PostgreSQL Source Code git master
Loading...
Searching...
No Matches
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 9025 of file selfuncs.c.

9029{
9030 IndexOptInfo *index = path->indexinfo;
9032 double numPages = index->pages;
9033 RelOptInfo *baserel = index->rel;
9036 Cost spc_random_page_cost;
9037 double qual_arg_cost;
9038 double qualSelectivity;
9040 double indexRanges;
9041 double minimalRanges;
9042 double estimatedRanges;
9043 double selec;
9044 Relation indexRel;
9045 ListCell *l;
9047
9048 Assert(rte->rtekind == RTE_RELATION);
9049
9050 /* fetch estimated page cost for the tablespace containing the index */
9051 get_tablespace_page_costs(index->reltablespace,
9052 &spc_random_page_cost,
9054
9055 /*
9056 * Obtain some data from the index itself, if possible. Otherwise invent
9057 * some plausible internal statistics based on the relation page count.
9058 */
9059 if (!index->hypothetical)
9060 {
9061 /*
9062 * A lock should have already been obtained on the index in plancat.c.
9063 */
9064 indexRel = index_open(index->indexoid, NoLock);
9065 brinGetStats(indexRel, &statsData);
9066 index_close(indexRel, NoLock);
9067
9068 /* work out the actual number of ranges in the index */
9069 indexRanges = Max(ceil((double) baserel->pages /
9070 statsData.pagesPerRange), 1.0);
9071 }
9072 else
9073 {
9074 /*
9075 * Assume default number of pages per range, and estimate the number
9076 * of ranges based on that.
9077 */
9078 indexRanges = Max(ceil((double) baserel->pages /
9080
9082 statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
9083 }
9084
9085 /*
9086 * Compute index correlation
9087 *
9088 * Because we can use all index quals equally when scanning, we can use
9089 * the largest correlation (in absolute value) among columns used by the
9090 * query. Start at zero, the worst possible case. If we cannot find any
9091 * correlation statistics, we will keep it as 0.
9092 */
9093 *indexCorrelation = 0;
9094
9095 foreach(l, path->indexclauses)
9096 {
9098 AttrNumber attnum = index->indexkeys[iclause->indexcol];
9099
9100 /* attempt to lookup stats in relation for this index column */
9101 if (attnum != 0)
9102 {
9103 /* Simple variable -- look to stats for the underlying table */
9106 {
9107 /*
9108 * The hook took control of acquiring a stats tuple. If it
9109 * did supply a tuple, it'd better have supplied a freefunc.
9110 */
9111 if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
9112 elog(ERROR,
9113 "no function provided to release variable stats with");
9114 }
9115 else
9116 {
9117 vardata.statsTuple =
9119 ObjectIdGetDatum(rte->relid),
9121 BoolGetDatum(false));
9122 vardata.freefunc = ReleaseSysCache;
9123 }
9124 }
9125 else
9126 {
9127 /*
9128 * Looks like we've found an expression column in the index. Let's
9129 * see if there's any stats for it.
9130 */
9131
9132 /* get the attnum from the 0-based index. */
9133 attnum = iclause->indexcol + 1;
9134
9136 (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
9137 {
9138 /*
9139 * The hook took control of acquiring a stats tuple. If it
9140 * did supply a tuple, it'd better have supplied a freefunc.
9141 */
9142 if (HeapTupleIsValid(vardata.statsTuple) &&
9143 !vardata.freefunc)
9144 elog(ERROR, "no function provided to release variable stats with");
9145 }
9146 else
9147 {
9149 ObjectIdGetDatum(index->indexoid),
9151 BoolGetDatum(false));
9152 vardata.freefunc = ReleaseSysCache;
9153 }
9154 }
9155
9156 if (HeapTupleIsValid(vardata.statsTuple))
9157 {
9159
9160 if (get_attstatsslot(&sslot, vardata.statsTuple,
9163 {
9164 double varCorrelation = 0.0;
9165
9166 if (sslot.nnumbers > 0)
9167 varCorrelation = fabs(sslot.numbers[0]);
9168
9169 if (varCorrelation > *indexCorrelation)
9170 *indexCorrelation = varCorrelation;
9171
9173 }
9174 }
9175
9177 }
9178
9180 baserel->relid,
9181 JOIN_INNER, NULL);
9182
9183 /*
9184 * Now calculate the minimum possible ranges we could match with if all of
9185 * the rows were in the perfect order in the table's heap.
9186 */
9188
9189 /*
9190 * Now estimate the number of ranges that we'll touch by using the
9191 * indexCorrelation from the stats. Careful not to divide by zero (note
9192 * we're using the absolute value of the correlation).
9193 */
9194 if (*indexCorrelation < 1.0e-10)
9196 else
9197 estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
9198
9199 /* we expect to visit this portion of the table */
9201
9203
9204 *indexSelectivity = selec;
9205
9206 /*
9207 * Compute the index qual costs, much as in genericcostestimate, to add to
9208 * the index costs. We can disregard indexorderbys, since BRIN doesn't
9209 * support those.
9210 */
9212
9213 /*
9214 * Compute the startup cost as the cost to read the whole revmap
9215 * sequentially, including the cost to execute the index quals.
9216 */
9217 *indexStartupCost =
9218 spc_seq_page_cost * statsData.revmapNumPages * loop_count;
9219 *indexStartupCost += qual_arg_cost;
9220
9221 /*
9222 * To read a BRIN index there might be a bit of back and forth over
9223 * regular pages, as revmap might point to them out of sequential order;
9224 * calculate the total cost as reading the whole index in random order.
9225 */
9226 *indexTotalCost = *indexStartupCost +
9227 spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
9228
9229 /*
9230 * Charge a small amount per range tuple which we expect to match to. This
9231 * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
9232 * will set a bit for each page in the range when we find a matching
9233 * range, so we must multiply the charge by the number of pages in the
9234 * range.
9235 */
9236 *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
9237 statsData.pagesPerRange;
9238
9239 *indexPages = index->pages;
9240}
int16 AttrNumber
Definition attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition brin.c:1653
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition brin.h:40
#define REVMAP_PAGE_MAXITEMS
Definition brin_page.h:93
#define Min(x, y)
Definition c.h:1091
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:100
double cpu_operator_cost
Definition costsize.c:135
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define HeapTupleIsValid(tuple)
Definition htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:178
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:134
#define NoLock
Definition lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition lsyscache.c:3566
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition lsyscache.c:3456
#define ATTSTATSSLOT_NUMBERS
Definition lsyscache.h:44
double Cost
Definition nodes.h:261
@ JOIN_INNER
Definition nodes.h:303
@ RTE_RELATION
#define planner_rt_fetch(rti, root)
Definition pathnodes.h:704
int16 attnum
#define lfirst_node(type, lc)
Definition pg_list.h:176
static Datum Int16GetDatum(int16 X)
Definition postgres.h:172
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
#define InvalidOid
static int fb(int x)
tree ctl root
Definition radixtree.h:1857
List * get_quals_from_indexclauses(List *indexclauses)
Definition selfuncs.c:7321
get_index_stats_hook_type get_index_stats_hook
Definition selfuncs.c:184
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition selfuncs.c:7351
get_relation_stats_hook_type get_relation_stats_hook
Definition selfuncs.c:183
#define ReleaseVariableStats(vardata)
Definition selfuncs.h:101
#define CLAMP_PROBABILITY(p)
Definition selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition spccache.c:183
List * indexclauses
Definition pathnodes.h:2057
IndexOptInfo * indexinfo
Definition pathnodes.h:2056
Definition pg_list.h:54
Definition type.h:96
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:265
HeapTuple SearchSysCache3(SysCacheIdentifier cacheId, Datum key1, Datum key2, Datum key3)
Definition syscache.c:241

References Assert, attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, fb(), free_attstatsslot(), get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexPath::indexinfo, Int16GetDatum(), InvalidOid, JOIN_INNER, lfirst_node, Max, Min, NoLock, ObjectIdGetDatum(), planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, REVMAP_PAGE_MAXITEMS, root, RTE_RELATION, and SearchSysCache3().

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 7703 of file selfuncs.c.

7707{
7708 IndexOptInfo *index = path->indexinfo;
7709 GenericCosts costs = {0};
7711 double numIndexTuples;
7715 int indexcol;
7716 bool eqQualHere;
7717 bool found_row_compare;
7718 bool found_array;
7719 bool found_is_null_op;
7720 bool have_correlation = false;
7721 double num_sa_scans;
7722 double correlation = 0.0;
7723 ListCell *lc;
7724
7725 /*
7726 * For a btree scan, only leading '=' quals plus inequality quals for the
7727 * immediately next attribute contribute to index selectivity (these are
7728 * the "boundary quals" that determine the starting and stopping points of
7729 * the index scan). Additional quals can suppress visits to the heap, so
7730 * it's OK to count them in indexSelectivity, but they should not count
7731 * for estimating numIndexTuples. So we must examine the given indexquals
7732 * to find out which ones count as boundary quals. We rely on the
7733 * knowledge that they are given in index column order. Note that nbtree
7734 * preprocessing can add skip arrays that act as leading '=' quals in the
7735 * absence of ordinary input '=' quals, so in practice _most_ input quals
7736 * are able to act as index bound quals (which we take into account here).
7737 *
7738 * For a RowCompareExpr, we consider only the first column, just as
7739 * rowcomparesel() does.
7740 *
7741 * If there's a SAOP or skip array in the quals, we'll actually perform up
7742 * to N index descents (not just one), but the underlying array key's
7743 * operator can be considered to act the same as it normally does.
7744 */
7747 indexcol = 0;
7748 eqQualHere = false;
7749 found_row_compare = false;
7750 found_array = false;
7751 found_is_null_op = false;
7752 num_sa_scans = 1;
7753 foreach(lc, path->indexclauses)
7754 {
7756 ListCell *lc2;
7757
7758 if (indexcol < iclause->indexcol)
7759 {
7760 double num_sa_scans_prev_cols = num_sa_scans;
7761
7762 /*
7763 * Beginning of a new column's quals.
7764 *
7765 * Skip scans use skip arrays, which are ScalarArrayOp style
7766 * arrays that generate their elements procedurally and on demand.
7767 * Given a multi-column index on "(a, b)", and an SQL WHERE clause
7768 * "WHERE b = 42", a skip scan will effectively use an indexqual
7769 * "WHERE a = ANY('{every col a value}') AND b = 42". (Obviously,
7770 * the array on "a" must also return "IS NULL" matches, since our
7771 * WHERE clause used no strict operator on "a").
7772 *
7773 * Here we consider how nbtree will backfill skip arrays for any
7774 * index columns that lacked an '=' qual. This maintains our
7775 * num_sa_scans estimate, and determines if this new column (the
7776 * "iclause->indexcol" column, not the prior "indexcol" column)
7777 * can have its RestrictInfos/quals added to indexBoundQuals.
7778 *
7779 * We'll need to handle columns that have inequality quals, where
7780 * the skip array generates values from a range constrained by the
7781 * quals (not every possible value). We've been maintaining
7782 * indexSkipQuals to help with this; it will now contain all of
7783 * the prior column's quals (that is, indexcol's quals) when they
7784 * might be used for this.
7785 */
7787 {
7788 /*
7789 * Skip arrays can't be added after a RowCompare input qual
7790 * due to limitations in nbtree
7791 */
7792 break;
7793 }
7794 if (eqQualHere)
7795 {
7796 /*
7797 * Don't need to add a skip array for an indexcol that already
7798 * has an '=' qual/equality constraint
7799 */
7800 indexcol++;
7802 }
7803 eqQualHere = false;
7804
7805 while (indexcol < iclause->indexcol)
7806 {
7807 double ndistinct;
7808 bool isdefault = true;
7809
7810 found_array = true;
7811
7812 /*
7813 * A skipped attribute's ndistinct forms the basis of our
7814 * estimate of the total number of "array elements" used by
7815 * its skip array at runtime. Look that up first.
7816 */
7818 ndistinct = get_variable_numdistinct(&vardata, &isdefault);
7819
7820 if (indexcol == 0)
7821 {
7822 /*
7823 * Get an estimate of the leading column's correlation in
7824 * passing (avoids rereading variable stats below)
7825 */
7826 if (HeapTupleIsValid(vardata.statsTuple))
7828 have_correlation = true;
7829 }
7830
7832
7833 /*
7834 * If ndistinct is a default estimate, conservatively assume
7835 * that no skipping will happen at runtime
7836 */
7837 if (isdefault)
7838 {
7839 num_sa_scans = num_sa_scans_prev_cols;
7840 break; /* done building indexBoundQuals */
7841 }
7842
7843 /*
7844 * Apply indexcol's indexSkipQuals selectivity to ndistinct
7845 */
7846 if (indexSkipQuals != NIL)
7847 {
7850
7851 /*
7852 * If the index is partial, AND the index predicate with
7853 * the index-bound quals to produce a more accurate idea
7854 * of the number of distinct values for prior indexcol
7855 */
7858
7860 index->rel->relid,
7861 JOIN_INNER,
7862 NULL);
7863
7864 /*
7865 * If ndistinctfrac is selective (on its own), the scan is
7866 * unlikely to benefit from repositioning itself using
7867 * later quals. Do not allow iclause->indexcol's quals to
7868 * be added to indexBoundQuals (it would increase descent
7869 * costs, without lowering numIndexTuples costs by much).
7870 */
7872 {
7873 num_sa_scans = num_sa_scans_prev_cols;
7874 break; /* done building indexBoundQuals */
7875 }
7876
7877 /* Adjust ndistinct downward */
7878 ndistinct = rint(ndistinct * ndistinctfrac);
7879 ndistinct = Max(ndistinct, 1);
7880 }
7881
7882 /*
7883 * When there's no inequality quals, account for the need to
7884 * find an initial value by counting -inf/+inf as a value.
7885 *
7886 * We don't charge anything extra for possible next/prior key
7887 * index probes, which are sometimes used to find the next
7888 * valid skip array element (ahead of using the located
7889 * element value to relocate the scan to the next position
7890 * that might contain matching tuples). It seems hard to do
7891 * better here. Use of the skip support infrastructure often
7892 * avoids most next/prior key probes. But even when it can't,
7893 * there's a decent chance that most individual next/prior key
7894 * probes will locate a leaf page whose key space overlaps all
7895 * of the scan's keys (even the lower-order keys) -- which
7896 * also avoids the need for a separate, extra index descent.
7897 * Note also that these probes are much cheaper than non-probe
7898 * primitive index scans: they're reliably very selective.
7899 */
7900 if (indexSkipQuals == NIL)
7901 ndistinct += 1;
7902
7903 /*
7904 * Update num_sa_scans estimate by multiplying by ndistinct.
7905 *
7906 * We make the pessimistic assumption that there is no
7907 * naturally occurring cross-column correlation. This is
7908 * often wrong, but it seems best to err on the side of not
7909 * expecting skipping to be helpful...
7910 */
7911 num_sa_scans *= ndistinct;
7912
7913 /*
7914 * ...but back out of adding this latest group of 1 or more
7915 * skip arrays when num_sa_scans exceeds the total number of
7916 * index pages (revert to num_sa_scans from before indexcol).
7917 * This causes a sharp discontinuity in cost (as a function of
7918 * the indexcol's ndistinct), but that is representative of
7919 * actual runtime costs.
7920 *
7921 * Note that skipping is helpful when each primitive index
7922 * scan only manages to skip over 1 or 2 irrelevant leaf pages
7923 * on average. Skip arrays bring savings in CPU costs due to
7924 * the scan not needing to evaluate indexquals against every
7925 * tuple, which can greatly exceed any savings in I/O costs.
7926 * This test is a test of whether num_sa_scans implies that
7927 * we're past the point where the ability to skip ceases to
7928 * lower the scan's costs (even qual evaluation CPU costs).
7929 */
7930 if (index->pages < num_sa_scans)
7931 {
7932 num_sa_scans = num_sa_scans_prev_cols;
7933 break; /* done building indexBoundQuals */
7934 }
7935
7936 indexcol++;
7938 }
7939
7940 /*
7941 * Finished considering the need to add skip arrays to bridge an
7942 * initial eqQualHere gap between the old and new index columns
7943 * (or there was no initial eqQualHere gap in the first place).
7944 *
7945 * If an initial gap could not be bridged, then new column's quals
7946 * (i.e. iclause->indexcol's quals) won't go into indexBoundQuals,
7947 * and so won't affect our final numIndexTuples estimate.
7948 */
7949 if (indexcol != iclause->indexcol)
7950 break; /* done building indexBoundQuals */
7951 }
7952
7953 Assert(indexcol == iclause->indexcol);
7954
7955 /* Examine each indexqual associated with this index clause */
7956 foreach(lc2, iclause->indexquals)
7957 {
7959 Expr *clause = rinfo->clause;
7960 Oid clause_op = InvalidOid;
7961 int op_strategy;
7962
7963 if (IsA(clause, OpExpr))
7964 {
7965 OpExpr *op = (OpExpr *) clause;
7966
7967 clause_op = op->opno;
7968 }
7969 else if (IsA(clause, RowCompareExpr))
7970 {
7971 RowCompareExpr *rc = (RowCompareExpr *) clause;
7972
7973 clause_op = linitial_oid(rc->opnos);
7974 found_row_compare = true;
7975 }
7976 else if (IsA(clause, ScalarArrayOpExpr))
7977 {
7978 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
7979 Node *other_operand = (Node *) lsecond(saop->args);
7981
7982 clause_op = saop->opno;
7983 found_array = true;
7984 /* estimate SA descents by indexBoundQuals only */
7985 if (alength > 1)
7986 num_sa_scans *= alength;
7987 }
7988 else if (IsA(clause, NullTest))
7989 {
7990 NullTest *nt = (NullTest *) clause;
7991
7992 if (nt->nulltesttype == IS_NULL)
7993 {
7994 found_is_null_op = true;
7995 /* IS NULL is like = for selectivity/skip scan purposes */
7996 eqQualHere = true;
7997 }
7998 }
7999 else
8000 elog(ERROR, "unsupported indexqual type: %d",
8001 (int) nodeTag(clause));
8002
8003 /* check for equality operator */
8004 if (OidIsValid(clause_op))
8005 {
8006 op_strategy = get_op_opfamily_strategy(clause_op,
8007 index->opfamily[indexcol]);
8008 Assert(op_strategy != 0); /* not a member of opfamily?? */
8009 if (op_strategy == BTEqualStrategyNumber)
8010 eqQualHere = true;
8011 }
8012
8014
8015 /*
8016 * We apply inequality selectivities to estimate index descent
8017 * costs with scans that use skip arrays. Save this indexcol's
8018 * RestrictInfos if it looks like they'll be needed for that.
8019 */
8020 if (!eqQualHere && !found_row_compare &&
8021 indexcol < index->nkeycolumns - 1)
8023 }
8024 }
8025
8026 /*
8027 * If index is unique and we found an '=' clause for each column, we can
8028 * just assume numIndexTuples = 1 and skip the expensive
8029 * clauselist_selectivity calculations. However, an array or NullTest
8030 * always invalidates that theory (even when eqQualHere has been set).
8031 */
8032 if (index->unique &&
8033 indexcol == index->nkeycolumns - 1 &&
8034 eqQualHere &&
8035 !found_array &&
8037 numIndexTuples = 1.0;
8038 else
8039 {
8042
8043 /*
8044 * If the index is partial, AND the index predicate with the
8045 * index-bound quals to produce a more accurate idea of the number of
8046 * rows covered by the bound conditions.
8047 */
8049
8051 index->rel->relid,
8052 JOIN_INNER,
8053 NULL);
8054 numIndexTuples = btreeSelectivity * index->rel->tuples;
8055
8056 /*
8057 * btree automatically combines individual array element primitive
8058 * index scans whenever the tuples covered by the next set of array
8059 * keys are close to tuples covered by the current set. That puts a
8060 * natural ceiling on the worst case number of descents -- there
8061 * cannot possibly be more than one descent per leaf page scanned.
8062 *
8063 * Clamp the number of descents to at most 1/3 the number of index
8064 * pages. This avoids implausibly high estimates with low selectivity
8065 * paths, where scans usually require only one or two descents. This
8066 * is most likely to help when there are several SAOP clauses, where
8067 * naively accepting the total number of distinct combinations of
8068 * array elements as the number of descents would frequently lead to
8069 * wild overestimates.
8070 *
8071 * We somewhat arbitrarily don't just make the cutoff the total number
8072 * of leaf pages (we make it 1/3 the total number of pages instead) to
8073 * give the btree code credit for its ability to continue on the leaf
8074 * level with low selectivity scans.
8075 *
8076 * Note: num_sa_scans includes both ScalarArrayOp array elements and
8077 * skip array elements whose qual affects our numIndexTuples estimate.
8078 */
8079 num_sa_scans = Min(num_sa_scans, ceil(index->pages * 0.3333333));
8080 num_sa_scans = Max(num_sa_scans, 1);
8081
8082 /*
8083 * As in genericcostestimate(), we have to adjust for any array quals
8084 * included in indexBoundQuals, and then round to integer.
8085 *
8086 * It is tempting to make genericcostestimate behave as if array
8087 * clauses work in almost the same way as scalar operators during
8088 * btree scans, making the top-level scan look like a continuous scan
8089 * (as opposed to num_sa_scans-many primitive index scans). After
8090 * all, btree scans mostly work like that at runtime. However, such a
8091 * scheme would badly bias genericcostestimate's simplistic approach
8092 * to calculating numIndexPages through prorating.
8093 *
8094 * Stick with the approach taken by non-native SAOP scans for now.
8095 * genericcostestimate will use the Mackert-Lohman formula to
8096 * compensate for repeat page fetches, even though that definitely
8097 * won't happen during btree scans (not for leaf pages, at least).
8098 * We're usually very pessimistic about the number of primitive index
8099 * scans that will be required, but it's not clear how to do better.
8100 */
8101 numIndexTuples = rint(numIndexTuples / num_sa_scans);
8102 }
8103
8104 /*
8105 * Now do generic index cost estimation.
8106 *
8107 * While we expended effort to make realistic estimates of numIndexTuples
8108 * and num_sa_scans, we are content to count only the btree metapage as
8109 * non-leaf. btree fanout is typically high enough that upper pages are
8110 * few relative to leaf pages, so accounting for them would move the
8111 * estimates at most a percent or two. Given the uncertainty in just how
8112 * many upper pages exist in a particular index, we'll skip trying to
8113 * handle that.
8114 */
8115 costs.numIndexTuples = numIndexTuples;
8116 costs.num_sa_scans = num_sa_scans;
8117 costs.numNonLeafPages = 1;
8118
8119 genericcostestimate(root, path, loop_count, &costs);
8120
8121 /*
8122 * Add a CPU-cost component to represent the costs of initial btree
8123 * descent. We don't charge any I/O cost for touching upper btree levels,
8124 * since they tend to stay in cache, but we still have to do about log2(N)
8125 * comparisons to descend a btree of N leaf tuples. We charge one
8126 * cpu_operator_cost per comparison.
8127 *
8128 * If there are SAOP or skip array keys, charge this once per estimated
8129 * index descent. The ones after the first one are not startup cost so
8130 * far as the overall plan goes, so just add them to "total" cost.
8131 */
8132 if (index->tuples > 1) /* avoid computing log(0) */
8133 {
8134 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
8136 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8137 }
8138
8139 /*
8140 * Even though we're not charging I/O cost for touching upper btree pages,
8141 * it's still reasonable to charge some CPU cost per page descended
8142 * through. Moreover, if we had no such charge at all, bloated indexes
8143 * would appear to have the same search cost as unbloated ones, at least
8144 * in cases where only a single leaf page is expected to be visited. This
8145 * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
8146 * touched. The number of such pages is btree tree height plus one (ie,
8147 * we charge for the leaf page too). As above, charge once per estimated
8148 * SAOP/skip array descent.
8149 */
8152 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8153
8154 if (!have_correlation)
8155 {
8157 if (HeapTupleIsValid(vardata.statsTuple))
8160 }
8161 else
8162 {
8163 /* btcost_correlation already called earlier on */
8165 }
8166
8167 *indexStartupCost = costs.indexStartupCost;
8168 *indexTotalCost = costs.indexTotalCost;
8169 *indexSelectivity = costs.indexSelectivity;
8170 *indexCorrelation = costs.indexCorrelation;
8171 *indexPages = costs.numIndexPages;
8172}
#define OidIsValid(objectId)
Definition c.h:858
List * lappend(List *list, void *datum)
Definition list.c:339
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition lsyscache.c:87
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define nodeTag(nodeptr)
Definition nodes.h:139
double Selectivity
Definition nodes.h:260
#define NIL
Definition pg_list.h:68
#define lsecond(l)
Definition pg_list.h:183
#define linitial_oid(l)
Definition pg_list.h:180
unsigned int Oid
@ IS_NULL
Definition primnodes.h:1980
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition selfuncs.c:7635
#define DEFAULT_PAGE_CPU_MULTIPLIER
Definition selfuncs.c:144
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition selfuncs.c:2240
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition selfuncs.c:7410
static void examine_indexcol_variable(PlannerInfo *root, IndexOptInfo *index, int indexcol, VariableStatData *vardata)
Definition selfuncs.c:6508
static double btcost_correlation(IndexOptInfo *index, VariableStatData *vardata)
Definition selfuncs.c:7666
double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
Definition selfuncs.c:6611
#define DEFAULT_RANGE_INEQ_SEL
Definition selfuncs.h:40
#define BTEqualStrategyNumber
Definition stratnum.h:31
Selectivity indexSelectivity
Definition selfuncs.h:135
BlockNumber numNonLeafPages
Definition selfuncs.h:143
Cost indexStartupCost
Definition selfuncs.h:133
double indexCorrelation
Definition selfuncs.h:136
double num_sa_scans
Definition selfuncs.h:142
Cost indexTotalCost
Definition selfuncs.h:134
double numIndexPages
Definition selfuncs.h:139
double numIndexTuples
Definition selfuncs.h:140
Definition nodes.h:135
Oid opno
Definition primnodes.h:851
Expr * clause
Definition pathnodes.h:2901

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert, btcost_correlation(), BTEqualStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, DEFAULT_RANGE_INEQ_SEL, elog, ERROR, estimate_array_length(), examine_indexcol_variable(), fb(), genericcostestimate(), get_op_opfamily_strategy(), get_variable_numdistinct(), HeapTupleIsValid, IndexPath::indexclauses, GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, Max, Min, NIL, nodeTag, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, GenericCosts::numNonLeafPages, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, ReleaseVariableStats, and root.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8635 of file selfuncs.c.

8639{
8640 IndexOptInfo *index = path->indexinfo;
8643 double numPages = index->pages,
8644 numTuples = index->tuples;
8645 double numEntryPages,
8648 numEntries;
8649 GinQualCounts counts;
8650 bool matchPossible;
8651 bool fullIndexScan;
8652 double partialScale;
8653 double entryPagesFetched,
8656 double qual_op_cost,
8658 spc_random_page_cost,
8661 Relation indexRel;
8663 ListCell *lc;
8664 int i;
8665
8666 /*
8667 * Obtain statistical information from the meta page, if possible. Else
8668 * set ginStats to zeroes, and we'll cope below.
8669 */
8670 if (!index->hypothetical)
8671 {
8672 /* Lock should have already been obtained in plancat.c */
8673 indexRel = index_open(index->indexoid, NoLock);
8674 ginGetStats(indexRel, &ginStats);
8675 index_close(indexRel, NoLock);
8676 }
8677 else
8678 {
8679 memset(&ginStats, 0, sizeof(ginStats));
8680 }
8681
8682 /*
8683 * Assuming we got valid (nonzero) stats at all, nPendingPages can be
8684 * trusted, but the other fields are data as of the last VACUUM. We can
8685 * scale them up to account for growth since then, but that method only
8686 * goes so far; in the worst case, the stats might be for a completely
8687 * empty index, and scaling them will produce pretty bogus numbers.
8688 * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
8689 * it's grown more than that, fall back to estimating things only from the
8690 * assumed-accurate index size. But we'll trust nPendingPages in any case
8691 * so long as it's not clearly insane, ie, more than the index size.
8692 */
8693 if (ginStats.nPendingPages < numPages)
8694 numPendingPages = ginStats.nPendingPages;
8695 else
8696 numPendingPages = 0;
8697
8698 if (numPages > 0 && ginStats.nTotalPages <= numPages &&
8699 ginStats.nTotalPages > numPages / 4 &&
8700 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
8701 {
8702 /*
8703 * OK, the stats seem close enough to sane to be trusted. But we
8704 * still need to scale them by the ratio numPages / nTotalPages to
8705 * account for growth since the last VACUUM.
8706 */
8707 double scale = numPages / ginStats.nTotalPages;
8708
8709 numEntryPages = ceil(ginStats.nEntryPages * scale);
8710 numDataPages = ceil(ginStats.nDataPages * scale);
8711 numEntries = ceil(ginStats.nEntries * scale);
8712 /* ensure we didn't round up too much */
8716 }
8717 else
8718 {
8719 /*
8720 * We might get here because it's a hypothetical index, or an index
8721 * created pre-9.1 and never vacuumed since upgrading (in which case
8722 * its stats would read as zeroes), or just because it's grown too
8723 * much since the last VACUUM for us to put our faith in scaling.
8724 *
8725 * Invent some plausible internal statistics based on the index page
8726 * count (and clamp that to at least 10 pages, just in case). We
8727 * estimate that 90% of the index is entry pages, and the rest is data
8728 * pages. Estimate 100 entries per entry page; this is rather bogus
8729 * since it'll depend on the size of the keys, but it's more robust
8730 * than trying to predict the number of entries per heap tuple.
8731 */
8732 numPages = Max(numPages, 10);
8736 }
8737
8738 /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
8739 if (numEntries < 1)
8740 numEntries = 1;
8741
8742 /*
8743 * If the index is partial, AND the index predicate with the index-bound
8744 * quals to produce a more accurate idea of the number of rows covered by
8745 * the bound conditions.
8746 */
8748
8749 /* Estimate the fraction of main-table tuples that will be visited */
8750 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
8751 index->rel->relid,
8752 JOIN_INNER,
8753 NULL);
8754
8755 /* fetch estimated page cost for tablespace containing index */
8756 get_tablespace_page_costs(index->reltablespace,
8757 &spc_random_page_cost,
8758 NULL);
8759
8760 /*
8761 * Generic assumption about index correlation: there isn't any.
8762 */
8763 *indexCorrelation = 0.0;
8764
8765 /*
8766 * Examine quals to estimate number of search entries & partial matches
8767 */
8768 memset(&counts, 0, sizeof(counts));
8769 counts.arrayScans = 1;
8770 matchPossible = true;
8771
8772 foreach(lc, path->indexclauses)
8773 {
8775 ListCell *lc2;
8776
8777 foreach(lc2, iclause->indexquals)
8778 {
8780 Expr *clause = rinfo->clause;
8781
8782 if (IsA(clause, OpExpr))
8783 {
8785 index,
8786 iclause->indexcol,
8787 (OpExpr *) clause,
8788 &counts);
8789 if (!matchPossible)
8790 break;
8791 }
8792 else if (IsA(clause, ScalarArrayOpExpr))
8793 {
8795 index,
8796 iclause->indexcol,
8797 (ScalarArrayOpExpr *) clause,
8798 numEntries,
8799 &counts);
8800 if (!matchPossible)
8801 break;
8802 }
8803 else
8804 {
8805 /* shouldn't be anything else for a GIN index */
8806 elog(ERROR, "unsupported GIN indexqual type: %d",
8807 (int) nodeTag(clause));
8808 }
8809 }
8810 }
8811
8812 /* Fall out if there were any provably-unsatisfiable quals */
8813 if (!matchPossible)
8814 {
8815 *indexStartupCost = 0;
8816 *indexTotalCost = 0;
8817 *indexSelectivity = 0;
8818 return;
8819 }
8820
8821 /*
8822 * If attribute has a full scan and at the same time doesn't have normal
8823 * scan, then we'll have to scan all non-null entries of that attribute.
8824 * Currently, we don't have per-attribute statistics for GIN. Thus, we
8825 * must assume the whole GIN index has to be scanned in this case.
8826 */
8827 fullIndexScan = false;
8828 for (i = 0; i < index->nkeycolumns; i++)
8829 {
8830 if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
8831 {
8832 fullIndexScan = true;
8833 break;
8834 }
8835 }
8836
8837 if (fullIndexScan || indexQuals == NIL)
8838 {
8839 /*
8840 * Full index scan will be required. We treat this as if every key in
8841 * the index had been listed in the query; is that reasonable?
8842 */
8843 counts.partialEntries = 0;
8844 counts.exactEntries = numEntries;
8845 counts.searchEntries = numEntries;
8846 }
8847
8848 /* Will we have more than one iteration of a nestloop scan? */
8850
8851 /*
8852 * Compute cost to begin scan, first of all, pay attention to pending
8853 * list.
8854 */
8856
8857 /*
8858 * Estimate number of entry pages read. We need to do
8859 * counts.searchEntries searches. Use a power function as it should be,
8860 * but tuples on leaf pages usually is much greater. Here we include all
8861 * searches in entry tree, including search of first entry in partial
8862 * match algorithm
8863 */
8865
8866 /*
8867 * Add an estimate of entry pages read by partial match algorithm. It's a
8868 * scan over leaf pages in entry tree. We haven't any useful stats here,
8869 * so estimate it as proportion. Because counts.partialEntries is really
8870 * pretty bogus (see code above), it's possible that it is more than
8871 * numEntries; clamp the proportion to ensure sanity.
8872 */
8875
8877
8878 /*
8879 * Partial match algorithm reads all data pages before doing actual scan,
8880 * so it's a startup cost. Again, we haven't any useful stats here, so
8881 * estimate it as proportion.
8882 */
8884
8885 *indexStartupCost = 0;
8886 *indexTotalCost = 0;
8887
8888 /*
8889 * Add a CPU-cost component to represent the costs of initial entry btree
8890 * descent. We don't charge any I/O cost for touching upper btree levels,
8891 * since they tend to stay in cache, but we still have to do about log2(N)
8892 * comparisons to descend a btree of N leaf tuples. We charge one
8893 * cpu_operator_cost per comparison.
8894 *
8895 * If there are ScalarArrayOpExprs, charge this once per SA scan. The
8896 * ones after the first one are not startup cost so far as the overall
8897 * plan is concerned, so add them only to "total" cost.
8898 */
8899 if (numEntries > 1) /* avoid computing log(0) */
8900 {
8902 *indexStartupCost += descentCost * counts.searchEntries;
8903 *indexTotalCost += counts.arrayScans * descentCost * counts.searchEntries;
8904 }
8905
8906 /*
8907 * Add a cpu cost per entry-page fetched. This is not amortized over a
8908 * loop.
8909 */
8912
8913 /*
8914 * Add a cpu cost per data-page fetched. This is also not amortized over a
8915 * loop. Since those are the data pages from the partial match algorithm,
8916 * charge them as startup cost.
8917 */
8919
8920 /*
8921 * Since we add the startup cost to the total cost later on, remove the
8922 * initial arrayscan from the total.
8923 */
8924 *indexTotalCost += dataPagesFetched * (counts.arrayScans - 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8925
8926 /*
8927 * Calculate cache effects if more than one scan due to nestloops or array
8928 * quals. The result is pro-rated per nestloop scan, but the array qual
8929 * factor shouldn't be pro-rated (compare genericcostestimate).
8930 */
8931 if (outer_scans > 1 || counts.arrayScans > 1)
8932 {
8943 }
8944
8945 /*
8946 * Here we use random page cost because logically-close pages could be far
8947 * apart on disk.
8948 */
8949 *indexStartupCost += (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
8950
8951 /*
8952 * Now compute the number of data pages fetched during the scan.
8953 *
8954 * We assume every entry to have the same number of items, and that there
8955 * is no overlap between them. (XXX: tsvector and array opclasses collect
8956 * statistics on the frequency of individual keys; it would be nice to use
8957 * those here.)
8958 */
8960
8961 /*
8962 * If there is a lot of overlap among the entries, in particular if one of
8963 * the entries is very frequent, the above calculation can grossly
8964 * under-estimate. As a simple cross-check, calculate a lower bound based
8965 * on the overall selectivity of the quals. At a minimum, we must read
8966 * one item pointer for each matching entry.
8967 *
8968 * The width of each item pointer varies, based on the level of
8969 * compression. We don't have statistics on that, but an average of
8970 * around 3 bytes per item is fairly typical.
8971 */
8972 dataPagesFetchedBySel = ceil(*indexSelectivity *
8973 (numTuples / (BLCKSZ / 3)));
8976
8977 /* Add one page cpu-cost to the startup cost */
8978 *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * counts.searchEntries;
8979
8980 /*
8981 * Add once again a CPU-cost for those data pages, before amortizing for
8982 * cache.
8983 */
8985
8986 /* Account for cache effects, the same as above */
8987 if (outer_scans > 1 || counts.arrayScans > 1)
8988 {
8994 }
8995
8996 /* And apply random_page_cost as the cost per page */
8997 *indexTotalCost += *indexStartupCost +
8998 dataPagesFetched * spc_random_page_cost;
8999
9000 /*
9001 * Add on index qual eval costs, much as in genericcostestimate. We charge
9002 * cpu but we can disregard indexorderbys, since GIN doesn't support
9003 * those.
9004 */
9007
9008 *indexStartupCost += qual_arg_cost;
9009 *indexTotalCost += qual_arg_cost;
9010
9011 /*
9012 * Add a cpu cost per search entry, corresponding to the actual visited
9013 * entries.
9014 */
9015 *indexTotalCost += (counts.searchEntries * counts.arrayScans) * (qual_op_cost);
9016 /* Now add a cpu cost per tuple in the posting lists / trees */
9017 *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost);
9019}
uint32 BlockNumber
Definition block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition costsize.c:897
double cpu_index_tuple_cost
Definition costsize.c:134
void ginGetStats(Relation index, GinStatsData *stats)
Definition ginutil.c:575
int i
Definition isn.c:77
static int list_length(const List *l)
Definition pg_list.h:152
static int scale
Definition pgbench.c:182
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition selfuncs.c:8519
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition selfuncs.c:8469
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition selfuncs.c:8342
double exactEntries
Definition selfuncs.c:8344
double arrayScans
Definition selfuncs.c:8346
double partialEntries
Definition selfuncs.c:8343
bool attHasFullScan[INDEX_MAX_KEYS]
Definition selfuncs.c:8341
double searchEntries
Definition selfuncs.c:8345

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, elog, ERROR, GinQualCounts::exactEntries, fb(), get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexPath::indexinfo, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, NIL, nodeTag, NoLock, GinQualCounts::partialEntries, root, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8220 of file selfuncs.c.

8224{
8225 IndexOptInfo *index = path->indexinfo;
8226 GenericCosts costs = {0};
8228
8229 /* GiST has no metapage, so we treat all pages as leaf pages */
8230
8231 genericcostestimate(root, path, loop_count, &costs);
8232
8233 /*
8234 * We model index descent costs similarly to those for btree, but to do
8235 * that we first need an idea of the tree height. We somewhat arbitrarily
8236 * assume that the fanout is 100, meaning the tree height is at most
8237 * log100(index->pages).
8238 *
8239 * Although this computation isn't really expensive enough to require
8240 * caching, we might as well use index->tree_height to cache it.
8241 */
8242 if (index->tree_height < 0) /* unknown? */
8243 {
8244 if (index->pages > 1) /* avoid computing log(0) */
8245 index->tree_height = (int) (log(index->pages) / log(100.0));
8246 else
8247 index->tree_height = 0;
8248 }
8249
8250 /*
8251 * Add a CPU-cost component to represent the costs of initial descent. We
8252 * just use log(N) here not log2(N) since the branching factor isn't
8253 * necessarily two anyway. As for btree, charge once per SA scan.
8254 */
8255 if (index->tuples > 1) /* avoid computing log(0) */
8256 {
8259 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8260 }
8261
8262 /*
8263 * Likewise add a per-page charge, calculated the same as for btrees.
8264 */
8267 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8268
8269 *indexStartupCost = costs.indexStartupCost;
8270 *indexTotalCost = costs.indexTotalCost;
8271 *indexSelectivity = costs.indexSelectivity;
8272 *indexCorrelation = costs.indexCorrelation;
8273 *indexPages = costs.numIndexPages;
8274}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, fb(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8175 of file selfuncs.c.

8179{
8180 GenericCosts costs = {0};
8181
8182 /* As in btcostestimate, count only the metapage as non-leaf */
8183 costs.numNonLeafPages = 1;
8184
8185 genericcostestimate(root, path, loop_count, &costs);
8186
8187 /*
8188 * A hash index has no descent costs as such, since the index AM can go
8189 * directly to the target bucket after computing the hash value. There
8190 * are a couple of other hash-specific costs that we could conceivably add
8191 * here, though:
8192 *
8193 * Ideally we'd charge spc_random_page_cost for each page in the target
8194 * bucket, not just the numIndexPages pages that genericcostestimate
8195 * thought we'd visit. However in most cases we don't know which bucket
8196 * that will be. There's no point in considering the average bucket size
8197 * because the hash AM makes sure that's always one page.
8198 *
8199 * Likewise, we could consider charging some CPU for each index tuple in
8200 * the bucket, if we knew how many there were. But the per-tuple cost is
8201 * just a hash value comparison, not a general datatype-dependent
8202 * comparison, so any such charge ought to be quite a bit less than
8203 * cpu_operator_cost; which makes it probably not worth worrying about.
8204 *
8205 * A bigger issue is that chance hash-value collisions will result in
8206 * wasted probes into the heap. We don't currently attempt to model this
8207 * cost on the grounds that it's rare, but maybe it's not rare enough.
8208 * (Any fix for this ought to consider the generic lossy-operator problem,
8209 * though; it's not entirely hash-specific.)
8210 */
8211
8212 *indexStartupCost = costs.indexStartupCost;
8213 *indexTotalCost = costs.indexTotalCost;
8214 *indexSelectivity = costs.indexSelectivity;
8215 *indexCorrelation = costs.indexCorrelation;
8216 *indexPages = costs.numIndexPages;
8217}

References fb(), genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::numIndexPages, GenericCosts::numNonLeafPages, and root.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double indexCorrelation,
double indexPages 
)
extern

Definition at line 8277 of file selfuncs.c.

8281{
8282 IndexOptInfo *index = path->indexinfo;
8283 GenericCosts costs = {0};
8285
8286 /* As in btcostestimate, count only the metapage as non-leaf */
8287 costs.numNonLeafPages = 1;
8288
8289 genericcostestimate(root, path, loop_count, &costs);
8290
8291 /*
8292 * We model index descent costs similarly to those for btree, but to do
8293 * that we first need an idea of the tree height. We somewhat arbitrarily
8294 * assume that the fanout is 100, meaning the tree height is at most
8295 * log100(index->pages).
8296 *
8297 * Although this computation isn't really expensive enough to require
8298 * caching, we might as well use index->tree_height to cache it.
8299 */
8300 if (index->tree_height < 0) /* unknown? */
8301 {
8302 if (index->pages > 1) /* avoid computing log(0) */
8303 index->tree_height = (int) (log(index->pages) / log(100.0));
8304 else
8305 index->tree_height = 0;
8306 }
8307
8308 /*
8309 * Add a CPU-cost component to represent the costs of initial descent. We
8310 * just use log(N) here not log2(N) since the branching factor isn't
8311 * necessarily two anyway. As for btree, charge once per SA scan.
8312 */
8313 if (index->tuples > 1) /* avoid computing log(0) */
8314 {
8317 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8318 }
8319
8320 /*
8321 * Likewise add a per-page charge, calculated the same as for btrees.
8322 */
8325 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8326
8327 *indexStartupCost = costs.indexStartupCost;
8328 *indexTotalCost = costs.indexTotalCost;
8329 *indexSelectivity = costs.indexSelectivity;
8330 *indexCorrelation = costs.indexCorrelation;
8331 *indexPages = costs.numIndexPages;
8332}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, fb(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numNonLeafPages, and root.

Referenced by spghandler().