PostgreSQL Source Code git master
Loading...
Searching...
No Matches
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8992 of file selfuncs.c.

8996{
8997 IndexOptInfo *index = path->indexinfo;
8999 double numPages = index->pages;
9000 RelOptInfo *baserel = index->rel;
9003 Cost spc_random_page_cost;
9004 double qual_arg_cost;
9005 double qualSelectivity;
9007 double indexRanges;
9008 double minimalRanges;
9009 double estimatedRanges;
9010 double selec;
9011 Relation indexRel;
9012 ListCell *l;
9014
9015 Assert(rte->rtekind == RTE_RELATION);
9016
9017 /* fetch estimated page cost for the tablespace containing the index */
9018 get_tablespace_page_costs(index->reltablespace,
9019 &spc_random_page_cost,
9021
9022 /*
9023 * Obtain some data from the index itself, if possible. Otherwise invent
9024 * some plausible internal statistics based on the relation page count.
9025 */
9026 if (!index->hypothetical)
9027 {
9028 /*
9029 * A lock should have already been obtained on the index in plancat.c.
9030 */
9031 indexRel = index_open(index->indexoid, NoLock);
9032 brinGetStats(indexRel, &statsData);
9033 index_close(indexRel, NoLock);
9034
9035 /* work out the actual number of ranges in the index */
9036 indexRanges = Max(ceil((double) baserel->pages /
9037 statsData.pagesPerRange), 1.0);
9038 }
9039 else
9040 {
9041 /*
9042 * Assume default number of pages per range, and estimate the number
9043 * of ranges based on that.
9044 */
9045 indexRanges = Max(ceil((double) baserel->pages /
9047
9049 statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
9050 }
9051
9052 /*
9053 * Compute index correlation
9054 *
9055 * Because we can use all index quals equally when scanning, we can use
9056 * the largest correlation (in absolute value) among columns used by the
9057 * query. Start at zero, the worst possible case. If we cannot find any
9058 * correlation statistics, we will keep it as 0.
9059 */
9060 *indexCorrelation = 0;
9061
9062 foreach(l, path->indexclauses)
9063 {
9065 AttrNumber attnum = index->indexkeys[iclause->indexcol];
9066
9067 /* attempt to lookup stats in relation for this index column */
9068 if (attnum != 0)
9069 {
9070 /* Simple variable -- look to stats for the underlying table */
9073 {
9074 /*
9075 * The hook took control of acquiring a stats tuple. If it
9076 * did supply a tuple, it'd better have supplied a freefunc.
9077 */
9078 if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
9079 elog(ERROR,
9080 "no function provided to release variable stats with");
9081 }
9082 else
9083 {
9084 vardata.statsTuple =
9086 ObjectIdGetDatum(rte->relid),
9088 BoolGetDatum(false));
9089 vardata.freefunc = ReleaseSysCache;
9090 }
9091 }
9092 else
9093 {
9094 /*
9095 * Looks like we've found an expression column in the index. Let's
9096 * see if there's any stats for it.
9097 */
9098
9099 /* get the attnum from the 0-based index. */
9100 attnum = iclause->indexcol + 1;
9101
9103 (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
9104 {
9105 /*
9106 * The hook took control of acquiring a stats tuple. If it
9107 * did supply a tuple, it'd better have supplied a freefunc.
9108 */
9109 if (HeapTupleIsValid(vardata.statsTuple) &&
9110 !vardata.freefunc)
9111 elog(ERROR, "no function provided to release variable stats with");
9112 }
9113 else
9114 {
9116 ObjectIdGetDatum(index->indexoid),
9118 BoolGetDatum(false));
9119 vardata.freefunc = ReleaseSysCache;
9120 }
9121 }
9122
9123 if (HeapTupleIsValid(vardata.statsTuple))
9124 {
9126
9127 if (get_attstatsslot(&sslot, vardata.statsTuple,
9130 {
9131 double varCorrelation = 0.0;
9132
9133 if (sslot.nnumbers > 0)
9134 varCorrelation = fabs(sslot.numbers[0]);
9135
9136 if (varCorrelation > *indexCorrelation)
9137 *indexCorrelation = varCorrelation;
9138
9140 }
9141 }
9142
9144 }
9145
9147 baserel->relid,
9148 JOIN_INNER, NULL);
9149
9150 /*
9151 * Now calculate the minimum possible ranges we could match with if all of
9152 * the rows were in the perfect order in the table's heap.
9153 */
9155
9156 /*
9157 * Now estimate the number of ranges that we'll touch by using the
9158 * indexCorrelation from the stats. Careful not to divide by zero (note
9159 * we're using the absolute value of the correlation).
9160 */
9161 if (*indexCorrelation < 1.0e-10)
9163 else
9164 estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
9165
9166 /* we expect to visit this portion of the table */
9168
9170
9171 *indexSelectivity = selec;
9172
9173 /*
9174 * Compute the index qual costs, much as in genericcostestimate, to add to
9175 * the index costs. We can disregard indexorderbys, since BRIN doesn't
9176 * support those.
9177 */
9179
9180 /*
9181 * Compute the startup cost as the cost to read the whole revmap
9182 * sequentially, including the cost to execute the index quals.
9183 */
9184 *indexStartupCost =
9185 spc_seq_page_cost * statsData.revmapNumPages * loop_count;
9186 *indexStartupCost += qual_arg_cost;
9187
9188 /*
9189 * To read a BRIN index there might be a bit of back and forth over
9190 * regular pages, as revmap might point to them out of sequential order;
9191 * calculate the total cost as reading the whole index in random order.
9192 */
9193 *indexTotalCost = *indexStartupCost +
9194 spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
9195
9196 /*
9197 * Charge a small amount per range tuple which we expect to match to. This
9198 * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
9199 * will set a bit for each page in the range when we find a matching
9200 * range, so we must multiply the charge by the number of pages in the
9201 * range.
9202 */
9203 *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
9204 statsData.pagesPerRange;
9205
9206 *indexPages = index->pages;
9207}
int16 AttrNumber
Definition attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition brin.c:1650
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition brin.h:40
#define REVMAP_PAGE_MAXITEMS
Definition brin_page.h:93
#define Min(x, y)
Definition c.h:1019
#define Max(x, y)
Definition c.h:1013
#define Assert(condition)
Definition c.h:885
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:100
double cpu_operator_cost
Definition costsize.c:134
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define HeapTupleIsValid(tuple)
Definition htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:133
#define NoLock
Definition lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition lsyscache.c:3496
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition lsyscache.c:3386
#define ATTSTATSSLOT_NUMBERS
Definition lsyscache.h:44
double Cost
Definition nodes.h:261
@ JOIN_INNER
Definition nodes.h:303
@ RTE_RELATION
#define planner_rt_fetch(rti, root)
Definition pathnodes.h:692
int16 attnum
#define lfirst_node(type, lc)
Definition pg_list.h:176
static Datum Int16GetDatum(int16 X)
Definition postgres.h:182
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:262
#define InvalidOid
static int fb(int x)
tree ctl root
Definition radixtree.h:1857
List * get_quals_from_indexclauses(List *indexclauses)
Definition selfuncs.c:7312
get_index_stats_hook_type get_index_stats_hook
Definition selfuncs.c:184
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition selfuncs.c:7342
get_relation_stats_hook_type get_relation_stats_hook
Definition selfuncs.c:183
#define ReleaseVariableStats(vardata)
Definition selfuncs.h:101
#define CLAMP_PROBABILITY(p)
Definition selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition spccache.c:183
List * indexclauses
Definition pathnodes.h:2043
IndexOptInfo * indexinfo
Definition pathnodes.h:2042
Definition pg_list.h:54
Definition type.h:96
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:264
HeapTuple SearchSysCache3(SysCacheIdentifier cacheId, Datum key1, Datum key2, Datum key3)
Definition syscache.c:240

References Assert, attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, fb(), free_attstatsslot(), get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexPath::indexinfo, Int16GetDatum(), InvalidOid, JOIN_INNER, lfirst_node, Max, Min, NoLock, ObjectIdGetDatum(), planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, REVMAP_PAGE_MAXITEMS, root, RTE_RELATION, and SearchSysCache3().

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 7687 of file selfuncs.c.

7691{
7692 IndexOptInfo *index = path->indexinfo;
7693 GenericCosts costs = {0};
7695 double numIndexTuples;
7699 int indexcol;
7700 bool eqQualHere;
7701 bool found_row_compare;
7702 bool found_array;
7703 bool found_is_null_op;
7704 bool have_correlation = false;
7705 double num_sa_scans;
7706 double correlation = 0.0;
7707 ListCell *lc;
7708
7709 /*
7710 * For a btree scan, only leading '=' quals plus inequality quals for the
7711 * immediately next attribute contribute to index selectivity (these are
7712 * the "boundary quals" that determine the starting and stopping points of
7713 * the index scan). Additional quals can suppress visits to the heap, so
7714 * it's OK to count them in indexSelectivity, but they should not count
7715 * for estimating numIndexTuples. So we must examine the given indexquals
7716 * to find out which ones count as boundary quals. We rely on the
7717 * knowledge that they are given in index column order. Note that nbtree
7718 * preprocessing can add skip arrays that act as leading '=' quals in the
7719 * absence of ordinary input '=' quals, so in practice _most_ input quals
7720 * are able to act as index bound quals (which we take into account here).
7721 *
7722 * For a RowCompareExpr, we consider only the first column, just as
7723 * rowcomparesel() does.
7724 *
7725 * If there's a SAOP or skip array in the quals, we'll actually perform up
7726 * to N index descents (not just one), but the underlying array key's
7727 * operator can be considered to act the same as it normally does.
7728 */
7731 indexcol = 0;
7732 eqQualHere = false;
7733 found_row_compare = false;
7734 found_array = false;
7735 found_is_null_op = false;
7736 num_sa_scans = 1;
7737 foreach(lc, path->indexclauses)
7738 {
7740 ListCell *lc2;
7741
7742 if (indexcol < iclause->indexcol)
7743 {
7744 double num_sa_scans_prev_cols = num_sa_scans;
7745
7746 /*
7747 * Beginning of a new column's quals.
7748 *
7749 * Skip scans use skip arrays, which are ScalarArrayOp style
7750 * arrays that generate their elements procedurally and on demand.
7751 * Given a multi-column index on "(a, b)", and an SQL WHERE clause
7752 * "WHERE b = 42", a skip scan will effectively use an indexqual
7753 * "WHERE a = ANY('{every col a value}') AND b = 42". (Obviously,
7754 * the array on "a" must also return "IS NULL" matches, since our
7755 * WHERE clause used no strict operator on "a").
7756 *
7757 * Here we consider how nbtree will backfill skip arrays for any
7758 * index columns that lacked an '=' qual. This maintains our
7759 * num_sa_scans estimate, and determines if this new column (the
7760 * "iclause->indexcol" column, not the prior "indexcol" column)
7761 * can have its RestrictInfos/quals added to indexBoundQuals.
7762 *
7763 * We'll need to handle columns that have inequality quals, where
7764 * the skip array generates values from a range constrained by the
7765 * quals (not every possible value). We've been maintaining
7766 * indexSkipQuals to help with this; it will now contain all of
7767 * the prior column's quals (that is, indexcol's quals) when they
7768 * might be used for this.
7769 */
7771 {
7772 /*
7773 * Skip arrays can't be added after a RowCompare input qual
7774 * due to limitations in nbtree
7775 */
7776 break;
7777 }
7778 if (eqQualHere)
7779 {
7780 /*
7781 * Don't need to add a skip array for an indexcol that already
7782 * has an '=' qual/equality constraint
7783 */
7784 indexcol++;
7786 }
7787 eqQualHere = false;
7788
7789 while (indexcol < iclause->indexcol)
7790 {
7791 double ndistinct;
7792 bool isdefault = true;
7793
7794 found_array = true;
7795
7796 /*
7797 * A skipped attribute's ndistinct forms the basis of our
7798 * estimate of the total number of "array elements" used by
7799 * its skip array at runtime. Look that up first.
7800 */
7802 ndistinct = get_variable_numdistinct(&vardata, &isdefault);
7803
7804 if (indexcol == 0)
7805 {
7806 /*
7807 * Get an estimate of the leading column's correlation in
7808 * passing (avoids rereading variable stats below)
7809 */
7810 if (HeapTupleIsValid(vardata.statsTuple))
7812 have_correlation = true;
7813 }
7814
7816
7817 /*
7818 * If ndistinct is a default estimate, conservatively assume
7819 * that no skipping will happen at runtime
7820 */
7821 if (isdefault)
7822 {
7823 num_sa_scans = num_sa_scans_prev_cols;
7824 break; /* done building indexBoundQuals */
7825 }
7826
7827 /*
7828 * Apply indexcol's indexSkipQuals selectivity to ndistinct
7829 */
7830 if (indexSkipQuals != NIL)
7831 {
7834
7835 /*
7836 * If the index is partial, AND the index predicate with
7837 * the index-bound quals to produce a more accurate idea
7838 * of the number of distinct values for prior indexcol
7839 */
7842
7844 index->rel->relid,
7845 JOIN_INNER,
7846 NULL);
7847
7848 /*
7849 * If ndistinctfrac is selective (on its own), the scan is
7850 * unlikely to benefit from repositioning itself using
7851 * later quals. Do not allow iclause->indexcol's quals to
7852 * be added to indexBoundQuals (it would increase descent
7853 * costs, without lowering numIndexTuples costs by much).
7854 */
7856 {
7857 num_sa_scans = num_sa_scans_prev_cols;
7858 break; /* done building indexBoundQuals */
7859 }
7860
7861 /* Adjust ndistinct downward */
7862 ndistinct = rint(ndistinct * ndistinctfrac);
7863 ndistinct = Max(ndistinct, 1);
7864 }
7865
7866 /*
7867 * When there's no inequality quals, account for the need to
7868 * find an initial value by counting -inf/+inf as a value.
7869 *
7870 * We don't charge anything extra for possible next/prior key
7871 * index probes, which are sometimes used to find the next
7872 * valid skip array element (ahead of using the located
7873 * element value to relocate the scan to the next position
7874 * that might contain matching tuples). It seems hard to do
7875 * better here. Use of the skip support infrastructure often
7876 * avoids most next/prior key probes. But even when it can't,
7877 * there's a decent chance that most individual next/prior key
7878 * probes will locate a leaf page whose key space overlaps all
7879 * of the scan's keys (even the lower-order keys) -- which
7880 * also avoids the need for a separate, extra index descent.
7881 * Note also that these probes are much cheaper than non-probe
7882 * primitive index scans: they're reliably very selective.
7883 */
7884 if (indexSkipQuals == NIL)
7885 ndistinct += 1;
7886
7887 /*
7888 * Update num_sa_scans estimate by multiplying by ndistinct.
7889 *
7890 * We make the pessimistic assumption that there is no
7891 * naturally occurring cross-column correlation. This is
7892 * often wrong, but it seems best to err on the side of not
7893 * expecting skipping to be helpful...
7894 */
7895 num_sa_scans *= ndistinct;
7896
7897 /*
7898 * ...but back out of adding this latest group of 1 or more
7899 * skip arrays when num_sa_scans exceeds the total number of
7900 * index pages (revert to num_sa_scans from before indexcol).
7901 * This causes a sharp discontinuity in cost (as a function of
7902 * the indexcol's ndistinct), but that is representative of
7903 * actual runtime costs.
7904 *
7905 * Note that skipping is helpful when each primitive index
7906 * scan only manages to skip over 1 or 2 irrelevant leaf pages
7907 * on average. Skip arrays bring savings in CPU costs due to
7908 * the scan not needing to evaluate indexquals against every
7909 * tuple, which can greatly exceed any savings in I/O costs.
7910 * This test is a test of whether num_sa_scans implies that
7911 * we're past the point where the ability to skip ceases to
7912 * lower the scan's costs (even qual evaluation CPU costs).
7913 */
7914 if (index->pages < num_sa_scans)
7915 {
7916 num_sa_scans = num_sa_scans_prev_cols;
7917 break; /* done building indexBoundQuals */
7918 }
7919
7920 indexcol++;
7922 }
7923
7924 /*
7925 * Finished considering the need to add skip arrays to bridge an
7926 * initial eqQualHere gap between the old and new index columns
7927 * (or there was no initial eqQualHere gap in the first place).
7928 *
7929 * If an initial gap could not be bridged, then new column's quals
7930 * (i.e. iclause->indexcol's quals) won't go into indexBoundQuals,
7931 * and so won't affect our final numIndexTuples estimate.
7932 */
7933 if (indexcol != iclause->indexcol)
7934 break; /* done building indexBoundQuals */
7935 }
7936
7937 Assert(indexcol == iclause->indexcol);
7938
7939 /* Examine each indexqual associated with this index clause */
7940 foreach(lc2, iclause->indexquals)
7941 {
7943 Expr *clause = rinfo->clause;
7944 Oid clause_op = InvalidOid;
7945 int op_strategy;
7946
7947 if (IsA(clause, OpExpr))
7948 {
7949 OpExpr *op = (OpExpr *) clause;
7950
7951 clause_op = op->opno;
7952 }
7953 else if (IsA(clause, RowCompareExpr))
7954 {
7955 RowCompareExpr *rc = (RowCompareExpr *) clause;
7956
7957 clause_op = linitial_oid(rc->opnos);
7958 found_row_compare = true;
7959 }
7960 else if (IsA(clause, ScalarArrayOpExpr))
7961 {
7962 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
7963 Node *other_operand = (Node *) lsecond(saop->args);
7965
7966 clause_op = saop->opno;
7967 found_array = true;
7968 /* estimate SA descents by indexBoundQuals only */
7969 if (alength > 1)
7970 num_sa_scans *= alength;
7971 }
7972 else if (IsA(clause, NullTest))
7973 {
7974 NullTest *nt = (NullTest *) clause;
7975
7976 if (nt->nulltesttype == IS_NULL)
7977 {
7978 found_is_null_op = true;
7979 /* IS NULL is like = for selectivity/skip scan purposes */
7980 eqQualHere = true;
7981 }
7982 }
7983 else
7984 elog(ERROR, "unsupported indexqual type: %d",
7985 (int) nodeTag(clause));
7986
7987 /* check for equality operator */
7988 if (OidIsValid(clause_op))
7989 {
7990 op_strategy = get_op_opfamily_strategy(clause_op,
7991 index->opfamily[indexcol]);
7992 Assert(op_strategy != 0); /* not a member of opfamily?? */
7993 if (op_strategy == BTEqualStrategyNumber)
7994 eqQualHere = true;
7995 }
7996
7998
7999 /*
8000 * We apply inequality selectivities to estimate index descent
8001 * costs with scans that use skip arrays. Save this indexcol's
8002 * RestrictInfos if it looks like they'll be needed for that.
8003 */
8004 if (!eqQualHere && !found_row_compare &&
8005 indexcol < index->nkeycolumns - 1)
8007 }
8008 }
8009
8010 /*
8011 * If index is unique and we found an '=' clause for each column, we can
8012 * just assume numIndexTuples = 1 and skip the expensive
8013 * clauselist_selectivity calculations. However, an array or NullTest
8014 * always invalidates that theory (even when eqQualHere has been set).
8015 */
8016 if (index->unique &&
8017 indexcol == index->nkeycolumns - 1 &&
8018 eqQualHere &&
8019 !found_array &&
8021 numIndexTuples = 1.0;
8022 else
8023 {
8026
8027 /*
8028 * If the index is partial, AND the index predicate with the
8029 * index-bound quals to produce a more accurate idea of the number of
8030 * rows covered by the bound conditions.
8031 */
8033
8035 index->rel->relid,
8036 JOIN_INNER,
8037 NULL);
8038 numIndexTuples = btreeSelectivity * index->rel->tuples;
8039
8040 /*
8041 * btree automatically combines individual array element primitive
8042 * index scans whenever the tuples covered by the next set of array
8043 * keys are close to tuples covered by the current set. That puts a
8044 * natural ceiling on the worst case number of descents -- there
8045 * cannot possibly be more than one descent per leaf page scanned.
8046 *
8047 * Clamp the number of descents to at most 1/3 the number of index
8048 * pages. This avoids implausibly high estimates with low selectivity
8049 * paths, where scans usually require only one or two descents. This
8050 * is most likely to help when there are several SAOP clauses, where
8051 * naively accepting the total number of distinct combinations of
8052 * array elements as the number of descents would frequently lead to
8053 * wild overestimates.
8054 *
8055 * We somewhat arbitrarily don't just make the cutoff the total number
8056 * of leaf pages (we make it 1/3 the total number of pages instead) to
8057 * give the btree code credit for its ability to continue on the leaf
8058 * level with low selectivity scans.
8059 *
8060 * Note: num_sa_scans includes both ScalarArrayOp array elements and
8061 * skip array elements whose qual affects our numIndexTuples estimate.
8062 */
8063 num_sa_scans = Min(num_sa_scans, ceil(index->pages * 0.3333333));
8064 num_sa_scans = Max(num_sa_scans, 1);
8065
8066 /*
8067 * As in genericcostestimate(), we have to adjust for any array quals
8068 * included in indexBoundQuals, and then round to integer.
8069 *
8070 * It is tempting to make genericcostestimate behave as if array
8071 * clauses work in almost the same way as scalar operators during
8072 * btree scans, making the top-level scan look like a continuous scan
8073 * (as opposed to num_sa_scans-many primitive index scans). After
8074 * all, btree scans mostly work like that at runtime. However, such a
8075 * scheme would badly bias genericcostestimate's simplistic approach
8076 * to calculating numIndexPages through prorating.
8077 *
8078 * Stick with the approach taken by non-native SAOP scans for now.
8079 * genericcostestimate will use the Mackert-Lohman formula to
8080 * compensate for repeat page fetches, even though that definitely
8081 * won't happen during btree scans (not for leaf pages, at least).
8082 * We're usually very pessimistic about the number of primitive index
8083 * scans that will be required, but it's not clear how to do better.
8084 */
8085 numIndexTuples = rint(numIndexTuples / num_sa_scans);
8086 }
8087
8088 /*
8089 * Now do generic index cost estimation.
8090 */
8091 costs.numIndexTuples = numIndexTuples;
8092 costs.num_sa_scans = num_sa_scans;
8093
8094 genericcostestimate(root, path, loop_count, &costs);
8095
8096 /*
8097 * Add a CPU-cost component to represent the costs of initial btree
8098 * descent. We don't charge any I/O cost for touching upper btree levels,
8099 * since they tend to stay in cache, but we still have to do about log2(N)
8100 * comparisons to descend a btree of N leaf tuples. We charge one
8101 * cpu_operator_cost per comparison.
8102 *
8103 * If there are SAOP or skip array keys, charge this once per estimated
8104 * index descent. The ones after the first one are not startup cost so
8105 * far as the overall plan goes, so just add them to "total" cost.
8106 */
8107 if (index->tuples > 1) /* avoid computing log(0) */
8108 {
8109 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
8111 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8112 }
8113
8114 /*
8115 * Even though we're not charging I/O cost for touching upper btree pages,
8116 * it's still reasonable to charge some CPU cost per page descended
8117 * through. Moreover, if we had no such charge at all, bloated indexes
8118 * would appear to have the same search cost as unbloated ones, at least
8119 * in cases where only a single leaf page is expected to be visited. This
8120 * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
8121 * touched. The number of such pages is btree tree height plus one (ie,
8122 * we charge for the leaf page too). As above, charge once per estimated
8123 * SAOP/skip array descent.
8124 */
8127 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8128
8129 if (!have_correlation)
8130 {
8132 if (HeapTupleIsValid(vardata.statsTuple))
8135 }
8136 else
8137 {
8138 /* btcost_correlation already called earlier on */
8140 }
8141
8142 *indexStartupCost = costs.indexStartupCost;
8143 *indexTotalCost = costs.indexTotalCost;
8144 *indexSelectivity = costs.indexSelectivity;
8145 *indexCorrelation = costs.indexCorrelation;
8146 *indexPages = costs.numIndexPages;
8147}
#define OidIsValid(objectId)
Definition c.h:800
List * lappend(List *list, void *datum)
Definition list.c:339
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition lsyscache.c:85
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define nodeTag(nodeptr)
Definition nodes.h:139
double Selectivity
Definition nodes.h:260
#define NIL
Definition pg_list.h:68
#define lsecond(l)
Definition pg_list.h:183
#define linitial_oid(l)
Definition pg_list.h:180
unsigned int Oid
@ IS_NULL
Definition primnodes.h:1978
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition selfuncs.c:7619
#define DEFAULT_PAGE_CPU_MULTIPLIER
Definition selfuncs.c:144
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition selfuncs.c:2223
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition selfuncs.c:7396
static void examine_indexcol_variable(PlannerInfo *root, IndexOptInfo *index, int indexcol, VariableStatData *vardata)
Definition selfuncs.c:6500
static double btcost_correlation(IndexOptInfo *index, VariableStatData *vardata)
Definition selfuncs.c:7650
double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
Definition selfuncs.c:6603
#define DEFAULT_RANGE_INEQ_SEL
Definition selfuncs.h:40
#define BTEqualStrategyNumber
Definition stratnum.h:31
Selectivity indexSelectivity
Definition selfuncs.h:129
Cost indexStartupCost
Definition selfuncs.h:127
double indexCorrelation
Definition selfuncs.h:130
double num_sa_scans
Definition selfuncs.h:136
Cost indexTotalCost
Definition selfuncs.h:128
double numIndexPages
Definition selfuncs.h:133
double numIndexTuples
Definition selfuncs.h:134
Definition nodes.h:135
Oid opno
Definition primnodes.h:851
Expr * clause
Definition pathnodes.h:2886

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert, btcost_correlation(), BTEqualStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, DEFAULT_RANGE_INEQ_SEL, elog, ERROR, estimate_array_length(), examine_indexcol_variable(), fb(), genericcostestimate(), get_op_opfamily_strategy(), get_variable_numdistinct(), HeapTupleIsValid, IndexPath::indexclauses, GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, Max, Min, NIL, nodeTag, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, ReleaseVariableStats, and root.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8602 of file selfuncs.c.

8606{
8607 IndexOptInfo *index = path->indexinfo;
8610 double numPages = index->pages,
8611 numTuples = index->tuples;
8612 double numEntryPages,
8615 numEntries;
8616 GinQualCounts counts;
8617 bool matchPossible;
8618 bool fullIndexScan;
8619 double partialScale;
8620 double entryPagesFetched,
8623 double qual_op_cost,
8625 spc_random_page_cost,
8628 Relation indexRel;
8630 ListCell *lc;
8631 int i;
8632
8633 /*
8634 * Obtain statistical information from the meta page, if possible. Else
8635 * set ginStats to zeroes, and we'll cope below.
8636 */
8637 if (!index->hypothetical)
8638 {
8639 /* Lock should have already been obtained in plancat.c */
8640 indexRel = index_open(index->indexoid, NoLock);
8641 ginGetStats(indexRel, &ginStats);
8642 index_close(indexRel, NoLock);
8643 }
8644 else
8645 {
8646 memset(&ginStats, 0, sizeof(ginStats));
8647 }
8648
8649 /*
8650 * Assuming we got valid (nonzero) stats at all, nPendingPages can be
8651 * trusted, but the other fields are data as of the last VACUUM. We can
8652 * scale them up to account for growth since then, but that method only
8653 * goes so far; in the worst case, the stats might be for a completely
8654 * empty index, and scaling them will produce pretty bogus numbers.
8655 * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
8656 * it's grown more than that, fall back to estimating things only from the
8657 * assumed-accurate index size. But we'll trust nPendingPages in any case
8658 * so long as it's not clearly insane, ie, more than the index size.
8659 */
8660 if (ginStats.nPendingPages < numPages)
8661 numPendingPages = ginStats.nPendingPages;
8662 else
8663 numPendingPages = 0;
8664
8665 if (numPages > 0 && ginStats.nTotalPages <= numPages &&
8666 ginStats.nTotalPages > numPages / 4 &&
8667 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
8668 {
8669 /*
8670 * OK, the stats seem close enough to sane to be trusted. But we
8671 * still need to scale them by the ratio numPages / nTotalPages to
8672 * account for growth since the last VACUUM.
8673 */
8674 double scale = numPages / ginStats.nTotalPages;
8675
8676 numEntryPages = ceil(ginStats.nEntryPages * scale);
8677 numDataPages = ceil(ginStats.nDataPages * scale);
8678 numEntries = ceil(ginStats.nEntries * scale);
8679 /* ensure we didn't round up too much */
8683 }
8684 else
8685 {
8686 /*
8687 * We might get here because it's a hypothetical index, or an index
8688 * created pre-9.1 and never vacuumed since upgrading (in which case
8689 * its stats would read as zeroes), or just because it's grown too
8690 * much since the last VACUUM for us to put our faith in scaling.
8691 *
8692 * Invent some plausible internal statistics based on the index page
8693 * count (and clamp that to at least 10 pages, just in case). We
8694 * estimate that 90% of the index is entry pages, and the rest is data
8695 * pages. Estimate 100 entries per entry page; this is rather bogus
8696 * since it'll depend on the size of the keys, but it's more robust
8697 * than trying to predict the number of entries per heap tuple.
8698 */
8699 numPages = Max(numPages, 10);
8703 }
8704
8705 /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
8706 if (numEntries < 1)
8707 numEntries = 1;
8708
8709 /*
8710 * If the index is partial, AND the index predicate with the index-bound
8711 * quals to produce a more accurate idea of the number of rows covered by
8712 * the bound conditions.
8713 */
8715
8716 /* Estimate the fraction of main-table tuples that will be visited */
8717 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
8718 index->rel->relid,
8719 JOIN_INNER,
8720 NULL);
8721
8722 /* fetch estimated page cost for tablespace containing index */
8723 get_tablespace_page_costs(index->reltablespace,
8724 &spc_random_page_cost,
8725 NULL);
8726
8727 /*
8728 * Generic assumption about index correlation: there isn't any.
8729 */
8730 *indexCorrelation = 0.0;
8731
8732 /*
8733 * Examine quals to estimate number of search entries & partial matches
8734 */
8735 memset(&counts, 0, sizeof(counts));
8736 counts.arrayScans = 1;
8737 matchPossible = true;
8738
8739 foreach(lc, path->indexclauses)
8740 {
8742 ListCell *lc2;
8743
8744 foreach(lc2, iclause->indexquals)
8745 {
8747 Expr *clause = rinfo->clause;
8748
8749 if (IsA(clause, OpExpr))
8750 {
8752 index,
8753 iclause->indexcol,
8754 (OpExpr *) clause,
8755 &counts);
8756 if (!matchPossible)
8757 break;
8758 }
8759 else if (IsA(clause, ScalarArrayOpExpr))
8760 {
8762 index,
8763 iclause->indexcol,
8764 (ScalarArrayOpExpr *) clause,
8765 numEntries,
8766 &counts);
8767 if (!matchPossible)
8768 break;
8769 }
8770 else
8771 {
8772 /* shouldn't be anything else for a GIN index */
8773 elog(ERROR, "unsupported GIN indexqual type: %d",
8774 (int) nodeTag(clause));
8775 }
8776 }
8777 }
8778
8779 /* Fall out if there were any provably-unsatisfiable quals */
8780 if (!matchPossible)
8781 {
8782 *indexStartupCost = 0;
8783 *indexTotalCost = 0;
8784 *indexSelectivity = 0;
8785 return;
8786 }
8787
8788 /*
8789 * If attribute has a full scan and at the same time doesn't have normal
8790 * scan, then we'll have to scan all non-null entries of that attribute.
8791 * Currently, we don't have per-attribute statistics for GIN. Thus, we
8792 * must assume the whole GIN index has to be scanned in this case.
8793 */
8794 fullIndexScan = false;
8795 for (i = 0; i < index->nkeycolumns; i++)
8796 {
8797 if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
8798 {
8799 fullIndexScan = true;
8800 break;
8801 }
8802 }
8803
8804 if (fullIndexScan || indexQuals == NIL)
8805 {
8806 /*
8807 * Full index scan will be required. We treat this as if every key in
8808 * the index had been listed in the query; is that reasonable?
8809 */
8810 counts.partialEntries = 0;
8811 counts.exactEntries = numEntries;
8812 counts.searchEntries = numEntries;
8813 }
8814
8815 /* Will we have more than one iteration of a nestloop scan? */
8817
8818 /*
8819 * Compute cost to begin scan, first of all, pay attention to pending
8820 * list.
8821 */
8823
8824 /*
8825 * Estimate number of entry pages read. We need to do
8826 * counts.searchEntries searches. Use a power function as it should be,
8827 * but tuples on leaf pages usually is much greater. Here we include all
8828 * searches in entry tree, including search of first entry in partial
8829 * match algorithm
8830 */
8832
8833 /*
8834 * Add an estimate of entry pages read by partial match algorithm. It's a
8835 * scan over leaf pages in entry tree. We haven't any useful stats here,
8836 * so estimate it as proportion. Because counts.partialEntries is really
8837 * pretty bogus (see code above), it's possible that it is more than
8838 * numEntries; clamp the proportion to ensure sanity.
8839 */
8842
8844
8845 /*
8846 * Partial match algorithm reads all data pages before doing actual scan,
8847 * so it's a startup cost. Again, we haven't any useful stats here, so
8848 * estimate it as proportion.
8849 */
8851
8852 *indexStartupCost = 0;
8853 *indexTotalCost = 0;
8854
8855 /*
8856 * Add a CPU-cost component to represent the costs of initial entry btree
8857 * descent. We don't charge any I/O cost for touching upper btree levels,
8858 * since they tend to stay in cache, but we still have to do about log2(N)
8859 * comparisons to descend a btree of N leaf tuples. We charge one
8860 * cpu_operator_cost per comparison.
8861 *
8862 * If there are ScalarArrayOpExprs, charge this once per SA scan. The
8863 * ones after the first one are not startup cost so far as the overall
8864 * plan is concerned, so add them only to "total" cost.
8865 */
8866 if (numEntries > 1) /* avoid computing log(0) */
8867 {
8869 *indexStartupCost += descentCost * counts.searchEntries;
8870 *indexTotalCost += counts.arrayScans * descentCost * counts.searchEntries;
8871 }
8872
8873 /*
8874 * Add a cpu cost per entry-page fetched. This is not amortized over a
8875 * loop.
8876 */
8879
8880 /*
8881 * Add a cpu cost per data-page fetched. This is also not amortized over a
8882 * loop. Since those are the data pages from the partial match algorithm,
8883 * charge them as startup cost.
8884 */
8886
8887 /*
8888 * Since we add the startup cost to the total cost later on, remove the
8889 * initial arrayscan from the total.
8890 */
8891 *indexTotalCost += dataPagesFetched * (counts.arrayScans - 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8892
8893 /*
8894 * Calculate cache effects if more than one scan due to nestloops or array
8895 * quals. The result is pro-rated per nestloop scan, but the array qual
8896 * factor shouldn't be pro-rated (compare genericcostestimate).
8897 */
8898 if (outer_scans > 1 || counts.arrayScans > 1)
8899 {
8910 }
8911
8912 /*
8913 * Here we use random page cost because logically-close pages could be far
8914 * apart on disk.
8915 */
8916 *indexStartupCost += (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
8917
8918 /*
8919 * Now compute the number of data pages fetched during the scan.
8920 *
8921 * We assume every entry to have the same number of items, and that there
8922 * is no overlap between them. (XXX: tsvector and array opclasses collect
8923 * statistics on the frequency of individual keys; it would be nice to use
8924 * those here.)
8925 */
8927
8928 /*
8929 * If there is a lot of overlap among the entries, in particular if one of
8930 * the entries is very frequent, the above calculation can grossly
8931 * under-estimate. As a simple cross-check, calculate a lower bound based
8932 * on the overall selectivity of the quals. At a minimum, we must read
8933 * one item pointer for each matching entry.
8934 *
8935 * The width of each item pointer varies, based on the level of
8936 * compression. We don't have statistics on that, but an average of
8937 * around 3 bytes per item is fairly typical.
8938 */
8939 dataPagesFetchedBySel = ceil(*indexSelectivity *
8940 (numTuples / (BLCKSZ / 3)));
8943
8944 /* Add one page cpu-cost to the startup cost */
8945 *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * counts.searchEntries;
8946
8947 /*
8948 * Add once again a CPU-cost for those data pages, before amortizing for
8949 * cache.
8950 */
8952
8953 /* Account for cache effects, the same as above */
8954 if (outer_scans > 1 || counts.arrayScans > 1)
8955 {
8961 }
8962
8963 /* And apply random_page_cost as the cost per page */
8964 *indexTotalCost += *indexStartupCost +
8965 dataPagesFetched * spc_random_page_cost;
8966
8967 /*
8968 * Add on index qual eval costs, much as in genericcostestimate. We charge
8969 * cpu but we can disregard indexorderbys, since GIN doesn't support
8970 * those.
8971 */
8974
8975 *indexStartupCost += qual_arg_cost;
8976 *indexTotalCost += qual_arg_cost;
8977
8978 /*
8979 * Add a cpu cost per search entry, corresponding to the actual visited
8980 * entries.
8981 */
8982 *indexTotalCost += (counts.searchEntries * counts.arrayScans) * (qual_op_cost);
8983 /* Now add a cpu cost per tuple in the posting lists / trees */
8984 *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost);
8986}
uint32 BlockNumber
Definition block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition costsize.c:896
double cpu_index_tuple_cost
Definition costsize.c:133
void ginGetStats(Relation index, GinStatsData *stats)
Definition ginutil.c:591
int i
Definition isn.c:77
static int list_length(const List *l)
Definition pg_list.h:152
static int scale
Definition pgbench.c:182
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition selfuncs.c:8486
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition selfuncs.c:8436
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition selfuncs.c:8309
double exactEntries
Definition selfuncs.c:8311
double arrayScans
Definition selfuncs.c:8313
double partialEntries
Definition selfuncs.c:8310
bool attHasFullScan[INDEX_MAX_KEYS]
Definition selfuncs.c:8308
double searchEntries
Definition selfuncs.c:8312

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, elog, ERROR, GinQualCounts::exactEntries, fb(), get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexPath::indexinfo, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, NIL, nodeTag, NoLock, GinQualCounts::partialEntries, root, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8192 of file selfuncs.c.

8196{
8197 IndexOptInfo *index = path->indexinfo;
8198 GenericCosts costs = {0};
8200
8201 genericcostestimate(root, path, loop_count, &costs);
8202
8203 /*
8204 * We model index descent costs similarly to those for btree, but to do
8205 * that we first need an idea of the tree height. We somewhat arbitrarily
8206 * assume that the fanout is 100, meaning the tree height is at most
8207 * log100(index->pages).
8208 *
8209 * Although this computation isn't really expensive enough to require
8210 * caching, we might as well use index->tree_height to cache it.
8211 */
8212 if (index->tree_height < 0) /* unknown? */
8213 {
8214 if (index->pages > 1) /* avoid computing log(0) */
8215 index->tree_height = (int) (log(index->pages) / log(100.0));
8216 else
8217 index->tree_height = 0;
8218 }
8219
8220 /*
8221 * Add a CPU-cost component to represent the costs of initial descent. We
8222 * just use log(N) here not log2(N) since the branching factor isn't
8223 * necessarily two anyway. As for btree, charge once per SA scan.
8224 */
8225 if (index->tuples > 1) /* avoid computing log(0) */
8226 {
8229 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8230 }
8231
8232 /*
8233 * Likewise add a per-page charge, calculated the same as for btrees.
8234 */
8237 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8238
8239 *indexStartupCost = costs.indexStartupCost;
8240 *indexTotalCost = costs.indexTotalCost;
8241 *indexSelectivity = costs.indexSelectivity;
8242 *indexCorrelation = costs.indexCorrelation;
8243 *indexPages = costs.numIndexPages;
8244}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, fb(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8150 of file selfuncs.c.

8154{
8155 GenericCosts costs = {0};
8156
8157 genericcostestimate(root, path, loop_count, &costs);
8158
8159 /*
8160 * A hash index has no descent costs as such, since the index AM can go
8161 * directly to the target bucket after computing the hash value. There
8162 * are a couple of other hash-specific costs that we could conceivably add
8163 * here, though:
8164 *
8165 * Ideally we'd charge spc_random_page_cost for each page in the target
8166 * bucket, not just the numIndexPages pages that genericcostestimate
8167 * thought we'd visit. However in most cases we don't know which bucket
8168 * that will be. There's no point in considering the average bucket size
8169 * because the hash AM makes sure that's always one page.
8170 *
8171 * Likewise, we could consider charging some CPU for each index tuple in
8172 * the bucket, if we knew how many there were. But the per-tuple cost is
8173 * just a hash value comparison, not a general datatype-dependent
8174 * comparison, so any such charge ought to be quite a bit less than
8175 * cpu_operator_cost; which makes it probably not worth worrying about.
8176 *
8177 * A bigger issue is that chance hash-value collisions will result in
8178 * wasted probes into the heap. We don't currently attempt to model this
8179 * cost on the grounds that it's rare, but maybe it's not rare enough.
8180 * (Any fix for this ought to consider the generic lossy-operator problem,
8181 * though; it's not entirely hash-specific.)
8182 */
8183
8184 *indexStartupCost = costs.indexStartupCost;
8185 *indexTotalCost = costs.indexTotalCost;
8186 *indexSelectivity = costs.indexSelectivity;
8187 *indexCorrelation = costs.indexCorrelation;
8188 *indexPages = costs.numIndexPages;
8189}

References fb(), genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::numIndexPages, and root.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8247 of file selfuncs.c.

8251{
8252 IndexOptInfo *index = path->indexinfo;
8253 GenericCosts costs = {0};
8255
8256 genericcostestimate(root, path, loop_count, &costs);
8257
8258 /*
8259 * We model index descent costs similarly to those for btree, but to do
8260 * that we first need an idea of the tree height. We somewhat arbitrarily
8261 * assume that the fanout is 100, meaning the tree height is at most
8262 * log100(index->pages).
8263 *
8264 * Although this computation isn't really expensive enough to require
8265 * caching, we might as well use index->tree_height to cache it.
8266 */
8267 if (index->tree_height < 0) /* unknown? */
8268 {
8269 if (index->pages > 1) /* avoid computing log(0) */
8270 index->tree_height = (int) (log(index->pages) / log(100.0));
8271 else
8272 index->tree_height = 0;
8273 }
8274
8275 /*
8276 * Add a CPU-cost component to represent the costs of initial descent. We
8277 * just use log(N) here not log2(N) since the branching factor isn't
8278 * necessarily two anyway. As for btree, charge once per SA scan.
8279 */
8280 if (index->tuples > 1) /* avoid computing log(0) */
8281 {
8284 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8285 }
8286
8287 /*
8288 * Likewise add a per-page charge, calculated the same as for btrees.
8289 */
8292 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8293
8294 *indexStartupCost = costs.indexStartupCost;
8295 *indexTotalCost = costs.indexTotalCost;
8296 *indexSelectivity = costs.indexSelectivity;
8297 *indexCorrelation = costs.indexCorrelation;
8298 *indexPages = costs.numIndexPages;
8299}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, fb(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by spghandler().