PostgreSQL Source Code git master
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 8991 of file selfuncs.c.

8995{
8996 IndexOptInfo *index = path->indexinfo;
8997 List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
8998 double numPages = index->pages;
8999 RelOptInfo *baserel = index->rel;
9000 RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
9001 Cost spc_seq_page_cost;
9002 Cost spc_random_page_cost;
9003 double qual_arg_cost;
9004 double qualSelectivity;
9005 BrinStatsData statsData;
9006 double indexRanges;
9007 double minimalRanges;
9008 double estimatedRanges;
9009 double selec;
9010 Relation indexRel;
9011 ListCell *l;
9012 VariableStatData vardata;
9013
9014 Assert(rte->rtekind == RTE_RELATION);
9015
9016 /* fetch estimated page cost for the tablespace containing the index */
9017 get_tablespace_page_costs(index->reltablespace,
9018 &spc_random_page_cost,
9019 &spc_seq_page_cost);
9020
9021 /*
9022 * Obtain some data from the index itself, if possible. Otherwise invent
9023 * some plausible internal statistics based on the relation page count.
9024 */
9025 if (!index->hypothetical)
9026 {
9027 /*
9028 * A lock should have already been obtained on the index in plancat.c.
9029 */
9030 indexRel = index_open(index->indexoid, NoLock);
9031 brinGetStats(indexRel, &statsData);
9032 index_close(indexRel, NoLock);
9033
9034 /* work out the actual number of ranges in the index */
9035 indexRanges = Max(ceil((double) baserel->pages /
9036 statsData.pagesPerRange), 1.0);
9037 }
9038 else
9039 {
9040 /*
9041 * Assume default number of pages per range, and estimate the number
9042 * of ranges based on that.
9043 */
9044 indexRanges = Max(ceil((double) baserel->pages /
9046
9048 statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
9049 }
9050
9051 /*
9052 * Compute index correlation
9053 *
9054 * Because we can use all index quals equally when scanning, we can use
9055 * the largest correlation (in absolute value) among columns used by the
9056 * query. Start at zero, the worst possible case. If we cannot find any
9057 * correlation statistics, we will keep it as 0.
9058 */
9059 *indexCorrelation = 0;
9060
9061 foreach(l, path->indexclauses)
9062 {
9063 IndexClause *iclause = lfirst_node(IndexClause, l);
9064 AttrNumber attnum = index->indexkeys[iclause->indexcol];
9065
9066 /* attempt to lookup stats in relation for this index column */
9067 if (attnum != 0)
9068 {
9069 /* Simple variable -- look to stats for the underlying table */
9071 (*get_relation_stats_hook) (root, rte, attnum, &vardata))
9072 {
9073 /*
9074 * The hook took control of acquiring a stats tuple. If it
9075 * did supply a tuple, it'd better have supplied a freefunc.
9076 */
9077 if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
9078 elog(ERROR,
9079 "no function provided to release variable stats with");
9080 }
9081 else
9082 {
9083 vardata.statsTuple =
9084 SearchSysCache3(STATRELATTINH,
9085 ObjectIdGetDatum(rte->relid),
9087 BoolGetDatum(false));
9088 vardata.freefunc = ReleaseSysCache;
9089 }
9090 }
9091 else
9092 {
9093 /*
9094 * Looks like we've found an expression column in the index. Let's
9095 * see if there's any stats for it.
9096 */
9097
9098 /* get the attnum from the 0-based index. */
9099 attnum = iclause->indexcol + 1;
9100
9102 (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
9103 {
9104 /*
9105 * The hook took control of acquiring a stats tuple. If it
9106 * did supply a tuple, it'd better have supplied a freefunc.
9107 */
9108 if (HeapTupleIsValid(vardata.statsTuple) &&
9109 !vardata.freefunc)
9110 elog(ERROR, "no function provided to release variable stats with");
9111 }
9112 else
9113 {
9114 vardata.statsTuple = SearchSysCache3(STATRELATTINH,
9115 ObjectIdGetDatum(index->indexoid),
9117 BoolGetDatum(false));
9118 vardata.freefunc = ReleaseSysCache;
9119 }
9120 }
9121
9122 if (HeapTupleIsValid(vardata.statsTuple))
9123 {
9124 AttStatsSlot sslot;
9125
9126 if (get_attstatsslot(&sslot, vardata.statsTuple,
9127 STATISTIC_KIND_CORRELATION, InvalidOid,
9129 {
9130 double varCorrelation = 0.0;
9131
9132 if (sslot.nnumbers > 0)
9133 varCorrelation = fabs(sslot.numbers[0]);
9134
9135 if (varCorrelation > *indexCorrelation)
9136 *indexCorrelation = varCorrelation;
9137
9138 free_attstatsslot(&sslot);
9139 }
9140 }
9141
9142 ReleaseVariableStats(vardata);
9143 }
9144
9145 qualSelectivity = clauselist_selectivity(root, indexQuals,
9146 baserel->relid,
9147 JOIN_INNER, NULL);
9148
9149 /*
9150 * Now calculate the minimum possible ranges we could match with if all of
9151 * the rows were in the perfect order in the table's heap.
9152 */
9153 minimalRanges = ceil(indexRanges * qualSelectivity);
9154
9155 /*
9156 * Now estimate the number of ranges that we'll touch by using the
9157 * indexCorrelation from the stats. Careful not to divide by zero (note
9158 * we're using the absolute value of the correlation).
9159 */
9160 if (*indexCorrelation < 1.0e-10)
9161 estimatedRanges = indexRanges;
9162 else
9163 estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
9164
9165 /* we expect to visit this portion of the table */
9166 selec = estimatedRanges / indexRanges;
9167
9168 CLAMP_PROBABILITY(selec);
9169
9170 *indexSelectivity = selec;
9171
9172 /*
9173 * Compute the index qual costs, much as in genericcostestimate, to add to
9174 * the index costs. We can disregard indexorderbys, since BRIN doesn't
9175 * support those.
9176 */
9177 qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
9178
9179 /*
9180 * Compute the startup cost as the cost to read the whole revmap
9181 * sequentially, including the cost to execute the index quals.
9182 */
9183 *indexStartupCost =
9184 spc_seq_page_cost * statsData.revmapNumPages * loop_count;
9185 *indexStartupCost += qual_arg_cost;
9186
9187 /*
9188 * To read a BRIN index there might be a bit of back and forth over
9189 * regular pages, as revmap might point to them out of sequential order;
9190 * calculate the total cost as reading the whole index in random order.
9191 */
9192 *indexTotalCost = *indexStartupCost +
9193 spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
9194
9195 /*
9196 * Charge a small amount per range tuple which we expect to match to. This
9197 * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
9198 * will set a bit for each page in the range when we find a matching
9199 * range, so we must multiply the charge by the number of pages in the
9200 * range.
9201 */
9202 *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
9203 statsData.pagesPerRange;
9204
9205 *indexPages = index->pages;
9206}
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1649
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:40
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:1003
#define Max(x, y)
Definition: c.h:997
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:100
double cpu_operator_cost
Definition: costsize.c:134
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
Assert(PointerIsAligned(start, uint64))
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define NoLock
Definition: lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3494
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3384
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:44
double Cost
Definition: nodes.h:261
@ JOIN_INNER
Definition: nodes.h:303
@ RTE_RELATION
Definition: parsenodes.h:1070
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:610
int16 attnum
Definition: pg_attribute.h:74
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static Datum Int16GetDatum(int16 X)
Definition: postgres.h:182
static Datum BoolGetDatum(bool X)
Definition: postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:262
#define InvalidOid
Definition: postgres_ext.h:37
tree ctl root
Definition: radixtree.h:1857
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:7311
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:184
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:7341
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:183
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:101
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
float4 * numbers
Definition: lsyscache.h:57
int nnumbers
Definition: lsyscache.h:58
BlockNumber revmapNumPages
Definition: brin.h:36
BlockNumber pagesPerRange
Definition: brin.h:35
AttrNumber indexcol
Definition: pathnodes.h:2009
List * indexclauses
Definition: pathnodes.h:1959
IndexOptInfo * indexinfo
Definition: pathnodes.h:1958
Definition: pg_list.h:54
RTEKind rtekind
Definition: parsenodes.h:1105
Index relid
Definition: pathnodes.h:973
BlockNumber pages
Definition: pathnodes.h:999
HeapTuple statsTuple
Definition: selfuncs.h:89
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
Definition: type.h:96
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:264
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:240

References Assert(), attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum(), InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum(), RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, root, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7686 of file selfuncs.c.

7690{
7691 IndexOptInfo *index = path->indexinfo;
7692 GenericCosts costs = {0};
7693 VariableStatData vardata = {0};
7694 double numIndexTuples;
7695 Cost descentCost;
7696 List *indexBoundQuals;
7697 List *indexSkipQuals;
7698 int indexcol;
7699 bool eqQualHere;
7700 bool found_row_compare;
7701 bool found_array;
7702 bool found_is_null_op;
7703 bool have_correlation = false;
7704 double num_sa_scans;
7705 double correlation = 0.0;
7706 ListCell *lc;
7707
7708 /*
7709 * For a btree scan, only leading '=' quals plus inequality quals for the
7710 * immediately next attribute contribute to index selectivity (these are
7711 * the "boundary quals" that determine the starting and stopping points of
7712 * the index scan). Additional quals can suppress visits to the heap, so
7713 * it's OK to count them in indexSelectivity, but they should not count
7714 * for estimating numIndexTuples. So we must examine the given indexquals
7715 * to find out which ones count as boundary quals. We rely on the
7716 * knowledge that they are given in index column order. Note that nbtree
7717 * preprocessing can add skip arrays that act as leading '=' quals in the
7718 * absence of ordinary input '=' quals, so in practice _most_ input quals
7719 * are able to act as index bound quals (which we take into account here).
7720 *
7721 * For a RowCompareExpr, we consider only the first column, just as
7722 * rowcomparesel() does.
7723 *
7724 * If there's a SAOP or skip array in the quals, we'll actually perform up
7725 * to N index descents (not just one), but the underlying array key's
7726 * operator can be considered to act the same as it normally does.
7727 */
7728 indexBoundQuals = NIL;
7729 indexSkipQuals = NIL;
7730 indexcol = 0;
7731 eqQualHere = false;
7732 found_row_compare = false;
7733 found_array = false;
7734 found_is_null_op = false;
7735 num_sa_scans = 1;
7736 foreach(lc, path->indexclauses)
7737 {
7738 IndexClause *iclause = lfirst_node(IndexClause, lc);
7739 ListCell *lc2;
7740
7741 if (indexcol < iclause->indexcol)
7742 {
7743 double num_sa_scans_prev_cols = num_sa_scans;
7744
7745 /*
7746 * Beginning of a new column's quals.
7747 *
7748 * Skip scans use skip arrays, which are ScalarArrayOp style
7749 * arrays that generate their elements procedurally and on demand.
7750 * Given a multi-column index on "(a, b)", and an SQL WHERE clause
7751 * "WHERE b = 42", a skip scan will effectively use an indexqual
7752 * "WHERE a = ANY('{every col a value}') AND b = 42". (Obviously,
7753 * the array on "a" must also return "IS NULL" matches, since our
7754 * WHERE clause used no strict operator on "a").
7755 *
7756 * Here we consider how nbtree will backfill skip arrays for any
7757 * index columns that lacked an '=' qual. This maintains our
7758 * num_sa_scans estimate, and determines if this new column (the
7759 * "iclause->indexcol" column, not the prior "indexcol" column)
7760 * can have its RestrictInfos/quals added to indexBoundQuals.
7761 *
7762 * We'll need to handle columns that have inequality quals, where
7763 * the skip array generates values from a range constrained by the
7764 * quals (not every possible value). We've been maintaining
7765 * indexSkipQuals to help with this; it will now contain all of
7766 * the prior column's quals (that is, indexcol's quals) when they
7767 * might be used for this.
7768 */
7769 if (found_row_compare)
7770 {
7771 /*
7772 * Skip arrays can't be added after a RowCompare input qual
7773 * due to limitations in nbtree
7774 */
7775 break;
7776 }
7777 if (eqQualHere)
7778 {
7779 /*
7780 * Don't need to add a skip array for an indexcol that already
7781 * has an '=' qual/equality constraint
7782 */
7783 indexcol++;
7784 indexSkipQuals = NIL;
7785 }
7786 eqQualHere = false;
7787
7788 while (indexcol < iclause->indexcol)
7789 {
7790 double ndistinct;
7791 bool isdefault = true;
7792
7793 found_array = true;
7794
7795 /*
7796 * A skipped attribute's ndistinct forms the basis of our
7797 * estimate of the total number of "array elements" used by
7798 * its skip array at runtime. Look that up first.
7799 */
7800 examine_indexcol_variable(root, index, indexcol, &vardata);
7801 ndistinct = get_variable_numdistinct(&vardata, &isdefault);
7802
7803 if (indexcol == 0)
7804 {
7805 /*
7806 * Get an estimate of the leading column's correlation in
7807 * passing (avoids rereading variable stats below)
7808 */
7809 if (HeapTupleIsValid(vardata.statsTuple))
7810 correlation = btcost_correlation(index, &vardata);
7811 have_correlation = true;
7812 }
7813
7814 ReleaseVariableStats(vardata);
7815
7816 /*
7817 * If ndistinct is a default estimate, conservatively assume
7818 * that no skipping will happen at runtime
7819 */
7820 if (isdefault)
7821 {
7822 num_sa_scans = num_sa_scans_prev_cols;
7823 break; /* done building indexBoundQuals */
7824 }
7825
7826 /*
7827 * Apply indexcol's indexSkipQuals selectivity to ndistinct
7828 */
7829 if (indexSkipQuals != NIL)
7830 {
7831 List *partialSkipQuals;
7832 Selectivity ndistinctfrac;
7833
7834 /*
7835 * If the index is partial, AND the index predicate with
7836 * the index-bound quals to produce a more accurate idea
7837 * of the number of distinct values for prior indexcol
7838 */
7839 partialSkipQuals = add_predicate_to_index_quals(index,
7840 indexSkipQuals);
7841
7842 ndistinctfrac = clauselist_selectivity(root, partialSkipQuals,
7843 index->rel->relid,
7844 JOIN_INNER,
7845 NULL);
7846
7847 /*
7848 * If ndistinctfrac is selective (on its own), the scan is
7849 * unlikely to benefit from repositioning itself using
7850 * later quals. Do not allow iclause->indexcol's quals to
7851 * be added to indexBoundQuals (it would increase descent
7852 * costs, without lowering numIndexTuples costs by much).
7853 */
7854 if (ndistinctfrac < DEFAULT_RANGE_INEQ_SEL)
7855 {
7856 num_sa_scans = num_sa_scans_prev_cols;
7857 break; /* done building indexBoundQuals */
7858 }
7859
7860 /* Adjust ndistinct downward */
7861 ndistinct = rint(ndistinct * ndistinctfrac);
7862 ndistinct = Max(ndistinct, 1);
7863 }
7864
7865 /*
7866 * When there's no inequality quals, account for the need to
7867 * find an initial value by counting -inf/+inf as a value.
7868 *
7869 * We don't charge anything extra for possible next/prior key
7870 * index probes, which are sometimes used to find the next
7871 * valid skip array element (ahead of using the located
7872 * element value to relocate the scan to the next position
7873 * that might contain matching tuples). It seems hard to do
7874 * better here. Use of the skip support infrastructure often
7875 * avoids most next/prior key probes. But even when it can't,
7876 * there's a decent chance that most individual next/prior key
7877 * probes will locate a leaf page whose key space overlaps all
7878 * of the scan's keys (even the lower-order keys) -- which
7879 * also avoids the need for a separate, extra index descent.
7880 * Note also that these probes are much cheaper than non-probe
7881 * primitive index scans: they're reliably very selective.
7882 */
7883 if (indexSkipQuals == NIL)
7884 ndistinct += 1;
7885
7886 /*
7887 * Update num_sa_scans estimate by multiplying by ndistinct.
7888 *
7889 * We make the pessimistic assumption that there is no
7890 * naturally occurring cross-column correlation. This is
7891 * often wrong, but it seems best to err on the side of not
7892 * expecting skipping to be helpful...
7893 */
7894 num_sa_scans *= ndistinct;
7895
7896 /*
7897 * ...but back out of adding this latest group of 1 or more
7898 * skip arrays when num_sa_scans exceeds the total number of
7899 * index pages (revert to num_sa_scans from before indexcol).
7900 * This causes a sharp discontinuity in cost (as a function of
7901 * the indexcol's ndistinct), but that is representative of
7902 * actual runtime costs.
7903 *
7904 * Note that skipping is helpful when each primitive index
7905 * scan only manages to skip over 1 or 2 irrelevant leaf pages
7906 * on average. Skip arrays bring savings in CPU costs due to
7907 * the scan not needing to evaluate indexquals against every
7908 * tuple, which can greatly exceed any savings in I/O costs.
7909 * This test is a test of whether num_sa_scans implies that
7910 * we're past the point where the ability to skip ceases to
7911 * lower the scan's costs (even qual evaluation CPU costs).
7912 */
7913 if (index->pages < num_sa_scans)
7914 {
7915 num_sa_scans = num_sa_scans_prev_cols;
7916 break; /* done building indexBoundQuals */
7917 }
7918
7919 indexcol++;
7920 indexSkipQuals = NIL;
7921 }
7922
7923 /*
7924 * Finished considering the need to add skip arrays to bridge an
7925 * initial eqQualHere gap between the old and new index columns
7926 * (or there was no initial eqQualHere gap in the first place).
7927 *
7928 * If an initial gap could not be bridged, then new column's quals
7929 * (i.e. iclause->indexcol's quals) won't go into indexBoundQuals,
7930 * and so won't affect our final numIndexTuples estimate.
7931 */
7932 if (indexcol != iclause->indexcol)
7933 break; /* done building indexBoundQuals */
7934 }
7935
7936 Assert(indexcol == iclause->indexcol);
7937
7938 /* Examine each indexqual associated with this index clause */
7939 foreach(lc2, iclause->indexquals)
7940 {
7941 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
7942 Expr *clause = rinfo->clause;
7943 Oid clause_op = InvalidOid;
7944 int op_strategy;
7945
7946 if (IsA(clause, OpExpr))
7947 {
7948 OpExpr *op = (OpExpr *) clause;
7949
7950 clause_op = op->opno;
7951 }
7952 else if (IsA(clause, RowCompareExpr))
7953 {
7954 RowCompareExpr *rc = (RowCompareExpr *) clause;
7955
7956 clause_op = linitial_oid(rc->opnos);
7957 found_row_compare = true;
7958 }
7959 else if (IsA(clause, ScalarArrayOpExpr))
7960 {
7961 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
7962 Node *other_operand = (Node *) lsecond(saop->args);
7963 double alength = estimate_array_length(root, other_operand);
7964
7965 clause_op = saop->opno;
7966 found_array = true;
7967 /* estimate SA descents by indexBoundQuals only */
7968 if (alength > 1)
7969 num_sa_scans *= alength;
7970 }
7971 else if (IsA(clause, NullTest))
7972 {
7973 NullTest *nt = (NullTest *) clause;
7974
7975 if (nt->nulltesttype == IS_NULL)
7976 {
7977 found_is_null_op = true;
7978 /* IS NULL is like = for selectivity/skip scan purposes */
7979 eqQualHere = true;
7980 }
7981 }
7982 else
7983 elog(ERROR, "unsupported indexqual type: %d",
7984 (int) nodeTag(clause));
7985
7986 /* check for equality operator */
7987 if (OidIsValid(clause_op))
7988 {
7989 op_strategy = get_op_opfamily_strategy(clause_op,
7990 index->opfamily[indexcol]);
7991 Assert(op_strategy != 0); /* not a member of opfamily?? */
7992 if (op_strategy == BTEqualStrategyNumber)
7993 eqQualHere = true;
7994 }
7995
7996 indexBoundQuals = lappend(indexBoundQuals, rinfo);
7997
7998 /*
7999 * We apply inequality selectivities to estimate index descent
8000 * costs with scans that use skip arrays. Save this indexcol's
8001 * RestrictInfos if it looks like they'll be needed for that.
8002 */
8003 if (!eqQualHere && !found_row_compare &&
8004 indexcol < index->nkeycolumns - 1)
8005 indexSkipQuals = lappend(indexSkipQuals, rinfo);
8006 }
8007 }
8008
8009 /*
8010 * If index is unique and we found an '=' clause for each column, we can
8011 * just assume numIndexTuples = 1 and skip the expensive
8012 * clauselist_selectivity calculations. However, an array or NullTest
8013 * always invalidates that theory (even when eqQualHere has been set).
8014 */
8015 if (index->unique &&
8016 indexcol == index->nkeycolumns - 1 &&
8017 eqQualHere &&
8018 !found_array &&
8019 !found_is_null_op)
8020 numIndexTuples = 1.0;
8021 else
8022 {
8023 List *selectivityQuals;
8024 Selectivity btreeSelectivity;
8025
8026 /*
8027 * If the index is partial, AND the index predicate with the
8028 * index-bound quals to produce a more accurate idea of the number of
8029 * rows covered by the bound conditions.
8030 */
8031 selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
8032
8033 btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
8034 index->rel->relid,
8035 JOIN_INNER,
8036 NULL);
8037 numIndexTuples = btreeSelectivity * index->rel->tuples;
8038
8039 /*
8040 * btree automatically combines individual array element primitive
8041 * index scans whenever the tuples covered by the next set of array
8042 * keys are close to tuples covered by the current set. That puts a
8043 * natural ceiling on the worst case number of descents -- there
8044 * cannot possibly be more than one descent per leaf page scanned.
8045 *
8046 * Clamp the number of descents to at most 1/3 the number of index
8047 * pages. This avoids implausibly high estimates with low selectivity
8048 * paths, where scans usually require only one or two descents. This
8049 * is most likely to help when there are several SAOP clauses, where
8050 * naively accepting the total number of distinct combinations of
8051 * array elements as the number of descents would frequently lead to
8052 * wild overestimates.
8053 *
8054 * We somewhat arbitrarily don't just make the cutoff the total number
8055 * of leaf pages (we make it 1/3 the total number of pages instead) to
8056 * give the btree code credit for its ability to continue on the leaf
8057 * level with low selectivity scans.
8058 *
8059 * Note: num_sa_scans includes both ScalarArrayOp array elements and
8060 * skip array elements whose qual affects our numIndexTuples estimate.
8061 */
8062 num_sa_scans = Min(num_sa_scans, ceil(index->pages * 0.3333333));
8063 num_sa_scans = Max(num_sa_scans, 1);
8064
8065 /*
8066 * As in genericcostestimate(), we have to adjust for any array quals
8067 * included in indexBoundQuals, and then round to integer.
8068 *
8069 * It is tempting to make genericcostestimate behave as if array
8070 * clauses work in almost the same way as scalar operators during
8071 * btree scans, making the top-level scan look like a continuous scan
8072 * (as opposed to num_sa_scans-many primitive index scans). After
8073 * all, btree scans mostly work like that at runtime. However, such a
8074 * scheme would badly bias genericcostestimate's simplistic approach
8075 * to calculating numIndexPages through prorating.
8076 *
8077 * Stick with the approach taken by non-native SAOP scans for now.
8078 * genericcostestimate will use the Mackert-Lohman formula to
8079 * compensate for repeat page fetches, even though that definitely
8080 * won't happen during btree scans (not for leaf pages, at least).
8081 * We're usually very pessimistic about the number of primitive index
8082 * scans that will be required, but it's not clear how to do better.
8083 */
8084 numIndexTuples = rint(numIndexTuples / num_sa_scans);
8085 }
8086
8087 /*
8088 * Now do generic index cost estimation.
8089 */
8090 costs.numIndexTuples = numIndexTuples;
8091 costs.num_sa_scans = num_sa_scans;
8092
8093 genericcostestimate(root, path, loop_count, &costs);
8094
8095 /*
8096 * Add a CPU-cost component to represent the costs of initial btree
8097 * descent. We don't charge any I/O cost for touching upper btree levels,
8098 * since they tend to stay in cache, but we still have to do about log2(N)
8099 * comparisons to descend a btree of N leaf tuples. We charge one
8100 * cpu_operator_cost per comparison.
8101 *
8102 * If there are SAOP or skip array keys, charge this once per estimated
8103 * index descent. The ones after the first one are not startup cost so
8104 * far as the overall plan goes, so just add them to "total" cost.
8105 */
8106 if (index->tuples > 1) /* avoid computing log(0) */
8107 {
8108 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
8109 costs.indexStartupCost += descentCost;
8110 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8111 }
8112
8113 /*
8114 * Even though we're not charging I/O cost for touching upper btree pages,
8115 * it's still reasonable to charge some CPU cost per page descended
8116 * through. Moreover, if we had no such charge at all, bloated indexes
8117 * would appear to have the same search cost as unbloated ones, at least
8118 * in cases where only a single leaf page is expected to be visited. This
8119 * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
8120 * touched. The number of such pages is btree tree height plus one (ie,
8121 * we charge for the leaf page too). As above, charge once per estimated
8122 * SAOP/skip array descent.
8123 */
8124 descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8125 costs.indexStartupCost += descentCost;
8126 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8127
8128 if (!have_correlation)
8129 {
8130 examine_indexcol_variable(root, index, 0, &vardata);
8131 if (HeapTupleIsValid(vardata.statsTuple))
8132 costs.indexCorrelation = btcost_correlation(index, &vardata);
8133 ReleaseVariableStats(vardata);
8134 }
8135 else
8136 {
8137 /* btcost_correlation already called earlier on */
8138 costs.indexCorrelation = correlation;
8139 }
8140
8141 *indexStartupCost = costs.indexStartupCost;
8142 *indexTotalCost = costs.indexTotalCost;
8143 *indexSelectivity = costs.indexSelectivity;
8144 *indexCorrelation = costs.indexCorrelation;
8145 *indexPages = costs.numIndexPages;
8146}
#define OidIsValid(objectId)
Definition: c.h:794
List * lappend(List *list, void *datum)
Definition: list.c:339
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:85
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define nodeTag(nodeptr)
Definition: nodes.h:139
double Selectivity
Definition: nodes.h:260
#define NIL
Definition: pg_list.h:68
#define lsecond(l)
Definition: pg_list.h:183
#define linitial_oid(l)
Definition: pg_list.h:180
unsigned int Oid
Definition: postgres_ext.h:32
@ IS_NULL
Definition: primnodes.h:1977
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:7618
#define DEFAULT_PAGE_CPU_MULTIPLIER
Definition: selfuncs.c:144
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition: selfuncs.c:2223
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:7395
static void examine_indexcol_variable(PlannerInfo *root, IndexOptInfo *index, int indexcol, VariableStatData *vardata)
Definition: selfuncs.c:6499
static double btcost_correlation(IndexOptInfo *index, VariableStatData *vardata)
Definition: selfuncs.c:7649
double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
Definition: selfuncs.c:6602
#define DEFAULT_RANGE_INEQ_SEL
Definition: selfuncs.h:40
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:129
Cost indexStartupCost
Definition: selfuncs.h:127
double indexCorrelation
Definition: selfuncs.h:130
double num_sa_scans
Definition: selfuncs.h:136
Cost indexTotalCost
Definition: selfuncs.h:128
double numIndexPages
Definition: selfuncs.h:133
double numIndexTuples
Definition: selfuncs.h:134
List * indexquals
Definition: pathnodes.h:2007
Definition: nodes.h:135
NullTestType nulltesttype
Definition: primnodes.h:1984
Oid opno
Definition: primnodes.h:850
Expr * clause
Definition: pathnodes.h:2792

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert(), btcost_correlation(), BTEqualStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, DEFAULT_RANGE_INEQ_SEL, elog, ERROR, estimate_array_length(), examine_indexcol_variable(), genericcostestimate(), get_op_opfamily_strategy(), get_variable_numdistinct(), HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, Max, Min, NIL, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, ReleaseVariableStats, root, and VariableStatData::statsTuple.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 8601 of file selfuncs.c.

8605{
8606 IndexOptInfo *index = path->indexinfo;
8607 List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
8608 List *selectivityQuals;
8609 double numPages = index->pages,
8610 numTuples = index->tuples;
8611 double numEntryPages,
8612 numDataPages,
8613 numPendingPages,
8614 numEntries;
8615 GinQualCounts counts;
8616 bool matchPossible;
8617 bool fullIndexScan;
8618 double partialScale;
8619 double entryPagesFetched,
8620 dataPagesFetched,
8621 dataPagesFetchedBySel;
8622 double qual_op_cost,
8623 qual_arg_cost,
8624 spc_random_page_cost,
8625 outer_scans;
8626 Cost descentCost;
8627 Relation indexRel;
8628 GinStatsData ginStats;
8629 ListCell *lc;
8630 int i;
8631
8632 /*
8633 * Obtain statistical information from the meta page, if possible. Else
8634 * set ginStats to zeroes, and we'll cope below.
8635 */
8636 if (!index->hypothetical)
8637 {
8638 /* Lock should have already been obtained in plancat.c */
8639 indexRel = index_open(index->indexoid, NoLock);
8640 ginGetStats(indexRel, &ginStats);
8641 index_close(indexRel, NoLock);
8642 }
8643 else
8644 {
8645 memset(&ginStats, 0, sizeof(ginStats));
8646 }
8647
8648 /*
8649 * Assuming we got valid (nonzero) stats at all, nPendingPages can be
8650 * trusted, but the other fields are data as of the last VACUUM. We can
8651 * scale them up to account for growth since then, but that method only
8652 * goes so far; in the worst case, the stats might be for a completely
8653 * empty index, and scaling them will produce pretty bogus numbers.
8654 * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
8655 * it's grown more than that, fall back to estimating things only from the
8656 * assumed-accurate index size. But we'll trust nPendingPages in any case
8657 * so long as it's not clearly insane, ie, more than the index size.
8658 */
8659 if (ginStats.nPendingPages < numPages)
8660 numPendingPages = ginStats.nPendingPages;
8661 else
8662 numPendingPages = 0;
8663
8664 if (numPages > 0 && ginStats.nTotalPages <= numPages &&
8665 ginStats.nTotalPages > numPages / 4 &&
8666 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
8667 {
8668 /*
8669 * OK, the stats seem close enough to sane to be trusted. But we
8670 * still need to scale them by the ratio numPages / nTotalPages to
8671 * account for growth since the last VACUUM.
8672 */
8673 double scale = numPages / ginStats.nTotalPages;
8674
8675 numEntryPages = ceil(ginStats.nEntryPages * scale);
8676 numDataPages = ceil(ginStats.nDataPages * scale);
8677 numEntries = ceil(ginStats.nEntries * scale);
8678 /* ensure we didn't round up too much */
8679 numEntryPages = Min(numEntryPages, numPages - numPendingPages);
8680 numDataPages = Min(numDataPages,
8681 numPages - numPendingPages - numEntryPages);
8682 }
8683 else
8684 {
8685 /*
8686 * We might get here because it's a hypothetical index, or an index
8687 * created pre-9.1 and never vacuumed since upgrading (in which case
8688 * its stats would read as zeroes), or just because it's grown too
8689 * much since the last VACUUM for us to put our faith in scaling.
8690 *
8691 * Invent some plausible internal statistics based on the index page
8692 * count (and clamp that to at least 10 pages, just in case). We
8693 * estimate that 90% of the index is entry pages, and the rest is data
8694 * pages. Estimate 100 entries per entry page; this is rather bogus
8695 * since it'll depend on the size of the keys, but it's more robust
8696 * than trying to predict the number of entries per heap tuple.
8697 */
8698 numPages = Max(numPages, 10);
8699 numEntryPages = floor((numPages - numPendingPages) * 0.90);
8700 numDataPages = numPages - numPendingPages - numEntryPages;
8701 numEntries = floor(numEntryPages * 100);
8702 }
8703
8704 /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
8705 if (numEntries < 1)
8706 numEntries = 1;
8707
8708 /*
8709 * If the index is partial, AND the index predicate with the index-bound
8710 * quals to produce a more accurate idea of the number of rows covered by
8711 * the bound conditions.
8712 */
8713 selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
8714
8715 /* Estimate the fraction of main-table tuples that will be visited */
8716 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
8717 index->rel->relid,
8718 JOIN_INNER,
8719 NULL);
8720
8721 /* fetch estimated page cost for tablespace containing index */
8722 get_tablespace_page_costs(index->reltablespace,
8723 &spc_random_page_cost,
8724 NULL);
8725
8726 /*
8727 * Generic assumption about index correlation: there isn't any.
8728 */
8729 *indexCorrelation = 0.0;
8730
8731 /*
8732 * Examine quals to estimate number of search entries & partial matches
8733 */
8734 memset(&counts, 0, sizeof(counts));
8735 counts.arrayScans = 1;
8736 matchPossible = true;
8737
8738 foreach(lc, path->indexclauses)
8739 {
8740 IndexClause *iclause = lfirst_node(IndexClause, lc);
8741 ListCell *lc2;
8742
8743 foreach(lc2, iclause->indexquals)
8744 {
8745 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
8746 Expr *clause = rinfo->clause;
8747
8748 if (IsA(clause, OpExpr))
8749 {
8750 matchPossible = gincost_opexpr(root,
8751 index,
8752 iclause->indexcol,
8753 (OpExpr *) clause,
8754 &counts);
8755 if (!matchPossible)
8756 break;
8757 }
8758 else if (IsA(clause, ScalarArrayOpExpr))
8759 {
8760 matchPossible = gincost_scalararrayopexpr(root,
8761 index,
8762 iclause->indexcol,
8763 (ScalarArrayOpExpr *) clause,
8764 numEntries,
8765 &counts);
8766 if (!matchPossible)
8767 break;
8768 }
8769 else
8770 {
8771 /* shouldn't be anything else for a GIN index */
8772 elog(ERROR, "unsupported GIN indexqual type: %d",
8773 (int) nodeTag(clause));
8774 }
8775 }
8776 }
8777
8778 /* Fall out if there were any provably-unsatisfiable quals */
8779 if (!matchPossible)
8780 {
8781 *indexStartupCost = 0;
8782 *indexTotalCost = 0;
8783 *indexSelectivity = 0;
8784 return;
8785 }
8786
8787 /*
8788 * If attribute has a full scan and at the same time doesn't have normal
8789 * scan, then we'll have to scan all non-null entries of that attribute.
8790 * Currently, we don't have per-attribute statistics for GIN. Thus, we
8791 * must assume the whole GIN index has to be scanned in this case.
8792 */
8793 fullIndexScan = false;
8794 for (i = 0; i < index->nkeycolumns; i++)
8795 {
8796 if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
8797 {
8798 fullIndexScan = true;
8799 break;
8800 }
8801 }
8802
8803 if (fullIndexScan || indexQuals == NIL)
8804 {
8805 /*
8806 * Full index scan will be required. We treat this as if every key in
8807 * the index had been listed in the query; is that reasonable?
8808 */
8809 counts.partialEntries = 0;
8810 counts.exactEntries = numEntries;
8811 counts.searchEntries = numEntries;
8812 }
8813
8814 /* Will we have more than one iteration of a nestloop scan? */
8815 outer_scans = loop_count;
8816
8817 /*
8818 * Compute cost to begin scan, first of all, pay attention to pending
8819 * list.
8820 */
8821 entryPagesFetched = numPendingPages;
8822
8823 /*
8824 * Estimate number of entry pages read. We need to do
8825 * counts.searchEntries searches. Use a power function as it should be,
8826 * but tuples on leaf pages usually is much greater. Here we include all
8827 * searches in entry tree, including search of first entry in partial
8828 * match algorithm
8829 */
8830 entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
8831
8832 /*
8833 * Add an estimate of entry pages read by partial match algorithm. It's a
8834 * scan over leaf pages in entry tree. We haven't any useful stats here,
8835 * so estimate it as proportion. Because counts.partialEntries is really
8836 * pretty bogus (see code above), it's possible that it is more than
8837 * numEntries; clamp the proportion to ensure sanity.
8838 */
8839 partialScale = counts.partialEntries / numEntries;
8840 partialScale = Min(partialScale, 1.0);
8841
8842 entryPagesFetched += ceil(numEntryPages * partialScale);
8843
8844 /*
8845 * Partial match algorithm reads all data pages before doing actual scan,
8846 * so it's a startup cost. Again, we haven't any useful stats here, so
8847 * estimate it as proportion.
8848 */
8849 dataPagesFetched = ceil(numDataPages * partialScale);
8850
8851 *indexStartupCost = 0;
8852 *indexTotalCost = 0;
8853
8854 /*
8855 * Add a CPU-cost component to represent the costs of initial entry btree
8856 * descent. We don't charge any I/O cost for touching upper btree levels,
8857 * since they tend to stay in cache, but we still have to do about log2(N)
8858 * comparisons to descend a btree of N leaf tuples. We charge one
8859 * cpu_operator_cost per comparison.
8860 *
8861 * If there are ScalarArrayOpExprs, charge this once per SA scan. The
8862 * ones after the first one are not startup cost so far as the overall
8863 * plan is concerned, so add them only to "total" cost.
8864 */
8865 if (numEntries > 1) /* avoid computing log(0) */
8866 {
8867 descentCost = ceil(log(numEntries) / log(2.0)) * cpu_operator_cost;
8868 *indexStartupCost += descentCost * counts.searchEntries;
8869 *indexTotalCost += counts.arrayScans * descentCost * counts.searchEntries;
8870 }
8871
8872 /*
8873 * Add a cpu cost per entry-page fetched. This is not amortized over a
8874 * loop.
8875 */
8876 *indexStartupCost += entryPagesFetched * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8877 *indexTotalCost += entryPagesFetched * counts.arrayScans * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8878
8879 /*
8880 * Add a cpu cost per data-page fetched. This is also not amortized over a
8881 * loop. Since those are the data pages from the partial match algorithm,
8882 * charge them as startup cost.
8883 */
8884 *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * dataPagesFetched;
8885
8886 /*
8887 * Since we add the startup cost to the total cost later on, remove the
8888 * initial arrayscan from the total.
8889 */
8890 *indexTotalCost += dataPagesFetched * (counts.arrayScans - 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8891
8892 /*
8893 * Calculate cache effects if more than one scan due to nestloops or array
8894 * quals. The result is pro-rated per nestloop scan, but the array qual
8895 * factor shouldn't be pro-rated (compare genericcostestimate).
8896 */
8897 if (outer_scans > 1 || counts.arrayScans > 1)
8898 {
8899 entryPagesFetched *= outer_scans * counts.arrayScans;
8900 entryPagesFetched = index_pages_fetched(entryPagesFetched,
8901 (BlockNumber) numEntryPages,
8902 numEntryPages, root);
8903 entryPagesFetched /= outer_scans;
8904 dataPagesFetched *= outer_scans * counts.arrayScans;
8905 dataPagesFetched = index_pages_fetched(dataPagesFetched,
8906 (BlockNumber) numDataPages,
8907 numDataPages, root);
8908 dataPagesFetched /= outer_scans;
8909 }
8910
8911 /*
8912 * Here we use random page cost because logically-close pages could be far
8913 * apart on disk.
8914 */
8915 *indexStartupCost += (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
8916
8917 /*
8918 * Now compute the number of data pages fetched during the scan.
8919 *
8920 * We assume every entry to have the same number of items, and that there
8921 * is no overlap between them. (XXX: tsvector and array opclasses collect
8922 * statistics on the frequency of individual keys; it would be nice to use
8923 * those here.)
8924 */
8925 dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
8926
8927 /*
8928 * If there is a lot of overlap among the entries, in particular if one of
8929 * the entries is very frequent, the above calculation can grossly
8930 * under-estimate. As a simple cross-check, calculate a lower bound based
8931 * on the overall selectivity of the quals. At a minimum, we must read
8932 * one item pointer for each matching entry.
8933 *
8934 * The width of each item pointer varies, based on the level of
8935 * compression. We don't have statistics on that, but an average of
8936 * around 3 bytes per item is fairly typical.
8937 */
8938 dataPagesFetchedBySel = ceil(*indexSelectivity *
8939 (numTuples / (BLCKSZ / 3)));
8940 if (dataPagesFetchedBySel > dataPagesFetched)
8941 dataPagesFetched = dataPagesFetchedBySel;
8942
8943 /* Add one page cpu-cost to the startup cost */
8944 *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * counts.searchEntries;
8945
8946 /*
8947 * Add once again a CPU-cost for those data pages, before amortizing for
8948 * cache.
8949 */
8950 *indexTotalCost += dataPagesFetched * counts.arrayScans * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8951
8952 /* Account for cache effects, the same as above */
8953 if (outer_scans > 1 || counts.arrayScans > 1)
8954 {
8955 dataPagesFetched *= outer_scans * counts.arrayScans;
8956 dataPagesFetched = index_pages_fetched(dataPagesFetched,
8957 (BlockNumber) numDataPages,
8958 numDataPages, root);
8959 dataPagesFetched /= outer_scans;
8960 }
8961
8962 /* And apply random_page_cost as the cost per page */
8963 *indexTotalCost += *indexStartupCost +
8964 dataPagesFetched * spc_random_page_cost;
8965
8966 /*
8967 * Add on index qual eval costs, much as in genericcostestimate. We charge
8968 * cpu but we can disregard indexorderbys, since GIN doesn't support
8969 * those.
8970 */
8971 qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
8972 qual_op_cost = cpu_operator_cost * list_length(indexQuals);
8973
8974 *indexStartupCost += qual_arg_cost;
8975 *indexTotalCost += qual_arg_cost;
8976
8977 /*
8978 * Add a cpu cost per search entry, corresponding to the actual visited
8979 * entries.
8980 */
8981 *indexTotalCost += (counts.searchEntries * counts.arrayScans) * (qual_op_cost);
8982 /* Now add a cpu cost per tuple in the posting lists / trees */
8983 *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost);
8984 *indexPages = dataPagesFetched;
8985}
uint32 BlockNumber
Definition: block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:882
double cpu_index_tuple_cost
Definition: costsize.c:133
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:629
int i
Definition: isn.c:77
static int list_length(const List *l)
Definition: pg_list.h:152
static int scale
Definition: pgbench.c:182
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:8485
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition: selfuncs.c:8435
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:8308
double exactEntries
Definition: selfuncs.c:8310
double arrayScans
Definition: selfuncs.c:8312
double partialEntries
Definition: selfuncs.c:8309
bool attHasFullScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:8307
double searchEntries
Definition: selfuncs.c:8311
BlockNumber nDataPages
Definition: gin.h:60
BlockNumber nPendingPages
Definition: gin.h:57
BlockNumber nEntryPages
Definition: gin.h:59
int64 nEntries
Definition: gin.h:61
BlockNumber nTotalPages
Definition: gin.h:58

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, elog, ERROR, GinQualCounts::exactEntries, get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, IndexClause::indexquals, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, NoLock, GinStatsData::nPendingPages, GinStatsData::nTotalPages, GinQualCounts::partialEntries, root, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 8191 of file selfuncs.c.

8195{
8196 IndexOptInfo *index = path->indexinfo;
8197 GenericCosts costs = {0};
8198 Cost descentCost;
8199
8200 genericcostestimate(root, path, loop_count, &costs);
8201
8202 /*
8203 * We model index descent costs similarly to those for btree, but to do
8204 * that we first need an idea of the tree height. We somewhat arbitrarily
8205 * assume that the fanout is 100, meaning the tree height is at most
8206 * log100(index->pages).
8207 *
8208 * Although this computation isn't really expensive enough to require
8209 * caching, we might as well use index->tree_height to cache it.
8210 */
8211 if (index->tree_height < 0) /* unknown? */
8212 {
8213 if (index->pages > 1) /* avoid computing log(0) */
8214 index->tree_height = (int) (log(index->pages) / log(100.0));
8215 else
8216 index->tree_height = 0;
8217 }
8218
8219 /*
8220 * Add a CPU-cost component to represent the costs of initial descent. We
8221 * just use log(N) here not log2(N) since the branching factor isn't
8222 * necessarily two anyway. As for btree, charge once per SA scan.
8223 */
8224 if (index->tuples > 1) /* avoid computing log(0) */
8225 {
8226 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
8227 costs.indexStartupCost += descentCost;
8228 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8229 }
8230
8231 /*
8232 * Likewise add a per-page charge, calculated the same as for btrees.
8233 */
8234 descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8235 costs.indexStartupCost += descentCost;
8236 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8237
8238 *indexStartupCost = costs.indexStartupCost;
8239 *indexTotalCost = costs.indexTotalCost;
8240 *indexSelectivity = costs.indexSelectivity;
8241 *indexCorrelation = costs.indexCorrelation;
8242 *indexPages = costs.numIndexPages;
8243}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 8149 of file selfuncs.c.

8153{
8154 GenericCosts costs = {0};
8155
8156 genericcostestimate(root, path, loop_count, &costs);
8157
8158 /*
8159 * A hash index has no descent costs as such, since the index AM can go
8160 * directly to the target bucket after computing the hash value. There
8161 * are a couple of other hash-specific costs that we could conceivably add
8162 * here, though:
8163 *
8164 * Ideally we'd charge spc_random_page_cost for each page in the target
8165 * bucket, not just the numIndexPages pages that genericcostestimate
8166 * thought we'd visit. However in most cases we don't know which bucket
8167 * that will be. There's no point in considering the average bucket size
8168 * because the hash AM makes sure that's always one page.
8169 *
8170 * Likewise, we could consider charging some CPU for each index tuple in
8171 * the bucket, if we knew how many there were. But the per-tuple cost is
8172 * just a hash value comparison, not a general datatype-dependent
8173 * comparison, so any such charge ought to be quite a bit less than
8174 * cpu_operator_cost; which makes it probably not worth worrying about.
8175 *
8176 * A bigger issue is that chance hash-value collisions will result in
8177 * wasted probes into the heap. We don't currently attempt to model this
8178 * cost on the grounds that it's rare, but maybe it's not rare enough.
8179 * (Any fix for this ought to consider the generic lossy-operator problem,
8180 * though; it's not entirely hash-specific.)
8181 */
8182
8183 *indexStartupCost = costs.indexStartupCost;
8184 *indexTotalCost = costs.indexTotalCost;
8185 *indexSelectivity = costs.indexSelectivity;
8186 *indexCorrelation = costs.indexCorrelation;
8187 *indexPages = costs.numIndexPages;
8188}

References genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::numIndexPages, and root.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 8246 of file selfuncs.c.

8250{
8251 IndexOptInfo *index = path->indexinfo;
8252 GenericCosts costs = {0};
8253 Cost descentCost;
8254
8255 genericcostestimate(root, path, loop_count, &costs);
8256
8257 /*
8258 * We model index descent costs similarly to those for btree, but to do
8259 * that we first need an idea of the tree height. We somewhat arbitrarily
8260 * assume that the fanout is 100, meaning the tree height is at most
8261 * log100(index->pages).
8262 *
8263 * Although this computation isn't really expensive enough to require
8264 * caching, we might as well use index->tree_height to cache it.
8265 */
8266 if (index->tree_height < 0) /* unknown? */
8267 {
8268 if (index->pages > 1) /* avoid computing log(0) */
8269 index->tree_height = (int) (log(index->pages) / log(100.0));
8270 else
8271 index->tree_height = 0;
8272 }
8273
8274 /*
8275 * Add a CPU-cost component to represent the costs of initial descent. We
8276 * just use log(N) here not log2(N) since the branching factor isn't
8277 * necessarily two anyway. As for btree, charge once per SA scan.
8278 */
8279 if (index->tuples > 1) /* avoid computing log(0) */
8280 {
8281 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
8282 costs.indexStartupCost += descentCost;
8283 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8284 }
8285
8286 /*
8287 * Likewise add a per-page charge, calculated the same as for btrees.
8288 */
8289 descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
8290 costs.indexStartupCost += descentCost;
8291 costs.indexTotalCost += costs.num_sa_scans * descentCost;
8292
8293 *indexStartupCost = costs.indexStartupCost;
8294 *indexTotalCost = costs.indexTotalCost;
8295 *indexSelectivity = costs.indexSelectivity;
8296 *indexCorrelation = costs.indexCorrelation;
8297 *indexPages = costs.numIndexPages;
8298}

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by spghandler().