PostgreSQL Source Code  git master
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7973 of file selfuncs.c.

7977 {
7978  IndexOptInfo *index = path->indexinfo;
7979  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7980  double numPages = index->pages;
7981  RelOptInfo *baserel = index->rel;
7982  RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7983  Cost spc_seq_page_cost;
7984  Cost spc_random_page_cost;
7985  double qual_arg_cost;
7986  double qualSelectivity;
7987  BrinStatsData statsData;
7988  double indexRanges;
7989  double minimalRanges;
7990  double estimatedRanges;
7991  double selec;
7992  Relation indexRel;
7993  ListCell *l;
7994  VariableStatData vardata;
7995 
7996  Assert(rte->rtekind == RTE_RELATION);
7997 
7998  /* fetch estimated page cost for the tablespace containing the index */
7999  get_tablespace_page_costs(index->reltablespace,
8000  &spc_random_page_cost,
8001  &spc_seq_page_cost);
8002 
8003  /*
8004  * Obtain some data from the index itself, if possible. Otherwise invent
8005  * some plausible internal statistics based on the relation page count.
8006  */
8007  if (!index->hypothetical)
8008  {
8009  /*
8010  * A lock should have already been obtained on the index in plancat.c.
8011  */
8012  indexRel = index_open(index->indexoid, NoLock);
8013  brinGetStats(indexRel, &statsData);
8014  index_close(indexRel, NoLock);
8015 
8016  /* work out the actual number of ranges in the index */
8017  indexRanges = Max(ceil((double) baserel->pages /
8018  statsData.pagesPerRange), 1.0);
8019  }
8020  else
8021  {
8022  /*
8023  * Assume default number of pages per range, and estimate the number
8024  * of ranges based on that.
8025  */
8026  indexRanges = Max(ceil((double) baserel->pages /
8028 
8030  statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
8031  }
8032 
8033  /*
8034  * Compute index correlation
8035  *
8036  * Because we can use all index quals equally when scanning, we can use
8037  * the largest correlation (in absolute value) among columns used by the
8038  * query. Start at zero, the worst possible case. If we cannot find any
8039  * correlation statistics, we will keep it as 0.
8040  */
8041  *indexCorrelation = 0;
8042 
8043  foreach(l, path->indexclauses)
8044  {
8045  IndexClause *iclause = lfirst_node(IndexClause, l);
8046  AttrNumber attnum = index->indexkeys[iclause->indexcol];
8047 
8048  /* attempt to lookup stats in relation for this index column */
8049  if (attnum != 0)
8050  {
8051  /* Simple variable -- look to stats for the underlying table */
8053  (*get_relation_stats_hook) (root, rte, attnum, &vardata))
8054  {
8055  /*
8056  * The hook took control of acquiring a stats tuple. If it
8057  * did supply a tuple, it'd better have supplied a freefunc.
8058  */
8059  if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
8060  elog(ERROR,
8061  "no function provided to release variable stats with");
8062  }
8063  else
8064  {
8065  vardata.statsTuple =
8066  SearchSysCache3(STATRELATTINH,
8067  ObjectIdGetDatum(rte->relid),
8069  BoolGetDatum(false));
8070  vardata.freefunc = ReleaseSysCache;
8071  }
8072  }
8073  else
8074  {
8075  /*
8076  * Looks like we've found an expression column in the index. Let's
8077  * see if there's any stats for it.
8078  */
8079 
8080  /* get the attnum from the 0-based index. */
8081  attnum = iclause->indexcol + 1;
8082 
8083  if (get_index_stats_hook &&
8084  (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
8085  {
8086  /*
8087  * The hook took control of acquiring a stats tuple. If it
8088  * did supply a tuple, it'd better have supplied a freefunc.
8089  */
8090  if (HeapTupleIsValid(vardata.statsTuple) &&
8091  !vardata.freefunc)
8092  elog(ERROR, "no function provided to release variable stats with");
8093  }
8094  else
8095  {
8096  vardata.statsTuple = SearchSysCache3(STATRELATTINH,
8097  ObjectIdGetDatum(index->indexoid),
8099  BoolGetDatum(false));
8100  vardata.freefunc = ReleaseSysCache;
8101  }
8102  }
8103 
8104  if (HeapTupleIsValid(vardata.statsTuple))
8105  {
8106  AttStatsSlot sslot;
8107 
8108  if (get_attstatsslot(&sslot, vardata.statsTuple,
8109  STATISTIC_KIND_CORRELATION, InvalidOid,
8111  {
8112  double varCorrelation = 0.0;
8113 
8114  if (sslot.nnumbers > 0)
8115  varCorrelation = fabs(sslot.numbers[0]);
8116 
8117  if (varCorrelation > *indexCorrelation)
8118  *indexCorrelation = varCorrelation;
8119 
8120  free_attstatsslot(&sslot);
8121  }
8122  }
8123 
8124  ReleaseVariableStats(vardata);
8125  }
8126 
8127  qualSelectivity = clauselist_selectivity(root, indexQuals,
8128  baserel->relid,
8129  JOIN_INNER, NULL);
8130 
8131  /*
8132  * Now calculate the minimum possible ranges we could match with if all of
8133  * the rows were in the perfect order in the table's heap.
8134  */
8135  minimalRanges = ceil(indexRanges * qualSelectivity);
8136 
8137  /*
8138  * Now estimate the number of ranges that we'll touch by using the
8139  * indexCorrelation from the stats. Careful not to divide by zero (note
8140  * we're using the absolute value of the correlation).
8141  */
8142  if (*indexCorrelation < 1.0e-10)
8143  estimatedRanges = indexRanges;
8144  else
8145  estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
8146 
8147  /* we expect to visit this portion of the table */
8148  selec = estimatedRanges / indexRanges;
8149 
8150  CLAMP_PROBABILITY(selec);
8151 
8152  *indexSelectivity = selec;
8153 
8154  /*
8155  * Compute the index qual costs, much as in genericcostestimate, to add to
8156  * the index costs. We can disregard indexorderbys, since BRIN doesn't
8157  * support those.
8158  */
8159  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
8160 
8161  /*
8162  * Compute the startup cost as the cost to read the whole revmap
8163  * sequentially, including the cost to execute the index quals.
8164  */
8165  *indexStartupCost =
8166  spc_seq_page_cost * statsData.revmapNumPages * loop_count;
8167  *indexStartupCost += qual_arg_cost;
8168 
8169  /*
8170  * To read a BRIN index there might be a bit of back and forth over
8171  * regular pages, as revmap might point to them out of sequential order;
8172  * calculate the total cost as reading the whole index in random order.
8173  */
8174  *indexTotalCost = *indexStartupCost +
8175  spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
8176 
8177  /*
8178  * Charge a small amount per range tuple which we expect to match to. This
8179  * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
8180  * will set a bit for each page in the range when we find a matching
8181  * range, so we must multiply the charge by the number of pages in the
8182  * range.
8183  */
8184  *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
8185  statsData.pagesPerRange;
8186 
8187  *indexPages = index->pages;
8188 }
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1635
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:39
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:1004
#define Max(x, y)
Definition: c.h:998
#define Assert(condition)
Definition: c.h:858
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:100
double cpu_operator_cost
Definition: costsize.c:123
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define NoLock
Definition: lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3344
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3234
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
double Cost
Definition: nodes.h:251
@ JOIN_INNER
Definition: nodes.h:293
@ RTE_RELATION
Definition: parsenodes.h:1028
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:560
int16 attnum
Definition: pg_attribute.h:74
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static Datum Int16GetDatum(int16 X)
Definition: postgres.h:172
static Datum BoolGetDatum(bool X)
Definition: postgres.h:102
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
#define InvalidOid
Definition: postgres_ext.h:36
tree ctl root
Definition: radixtree.h:1880
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:6460
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:148
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:6490
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:147
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:99
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
float4 * numbers
Definition: lsyscache.h:56
int nnumbers
Definition: lsyscache.h:57
BlockNumber revmapNumPages
Definition: brin.h:35
BlockNumber pagesPerRange
Definition: brin.h:34
AttrNumber indexcol
Definition: pathnodes.h:1750
List * indexclauses
Definition: pathnodes.h:1700
IndexOptInfo * indexinfo
Definition: pathnodes.h:1699
Definition: pg_list.h:54
RTEKind rtekind
Definition: parsenodes.h:1057
Index relid
Definition: pathnodes.h:908
BlockNumber pages
Definition: pathnodes.h:938
HeapTuple statsTuple
Definition: selfuncs.h:89
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
Definition: type.h:95
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:266
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:240

References Assert, attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum(), InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum(), RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, root, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6788 of file selfuncs.c.

6792 {
6793  IndexOptInfo *index = path->indexinfo;
6794  GenericCosts costs = {0};
6795  Oid relid;
6796  AttrNumber colnum;
6797  VariableStatData vardata = {0};
6798  double numIndexTuples;
6799  Cost descentCost;
6800  List *indexBoundQuals;
6801  int indexcol;
6802  bool eqQualHere;
6803  bool found_saop;
6804  bool found_is_null_op;
6805  double num_sa_scans;
6806  ListCell *lc;
6807 
6808  /*
6809  * For a btree scan, only leading '=' quals plus inequality quals for the
6810  * immediately next attribute contribute to index selectivity (these are
6811  * the "boundary quals" that determine the starting and stopping points of
6812  * the index scan). Additional quals can suppress visits to the heap, so
6813  * it's OK to count them in indexSelectivity, but they should not count
6814  * for estimating numIndexTuples. So we must examine the given indexquals
6815  * to find out which ones count as boundary quals. We rely on the
6816  * knowledge that they are given in index column order.
6817  *
6818  * For a RowCompareExpr, we consider only the first column, just as
6819  * rowcomparesel() does.
6820  *
6821  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform up
6822  * to N index descents (not just one), but the ScalarArrayOpExpr's
6823  * operator can be considered to act the same as it normally does.
6824  */
6825  indexBoundQuals = NIL;
6826  indexcol = 0;
6827  eqQualHere = false;
6828  found_saop = false;
6829  found_is_null_op = false;
6830  num_sa_scans = 1;
6831  foreach(lc, path->indexclauses)
6832  {
6833  IndexClause *iclause = lfirst_node(IndexClause, lc);
6834  ListCell *lc2;
6835 
6836  if (indexcol != iclause->indexcol)
6837  {
6838  /* Beginning of a new column's quals */
6839  if (!eqQualHere)
6840  break; /* done if no '=' qual for indexcol */
6841  eqQualHere = false;
6842  indexcol++;
6843  if (indexcol != iclause->indexcol)
6844  break; /* no quals at all for indexcol */
6845  }
6846 
6847  /* Examine each indexqual associated with this index clause */
6848  foreach(lc2, iclause->indexquals)
6849  {
6850  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6851  Expr *clause = rinfo->clause;
6852  Oid clause_op = InvalidOid;
6853  int op_strategy;
6854 
6855  if (IsA(clause, OpExpr))
6856  {
6857  OpExpr *op = (OpExpr *) clause;
6858 
6859  clause_op = op->opno;
6860  }
6861  else if (IsA(clause, RowCompareExpr))
6862  {
6863  RowCompareExpr *rc = (RowCompareExpr *) clause;
6864 
6865  clause_op = linitial_oid(rc->opnos);
6866  }
6867  else if (IsA(clause, ScalarArrayOpExpr))
6868  {
6869  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6870  Node *other_operand = (Node *) lsecond(saop->args);
6871  double alength = estimate_array_length(root, other_operand);
6872 
6873  clause_op = saop->opno;
6874  found_saop = true;
6875  /* estimate SA descents by indexBoundQuals only */
6876  if (alength > 1)
6877  num_sa_scans *= alength;
6878  }
6879  else if (IsA(clause, NullTest))
6880  {
6881  NullTest *nt = (NullTest *) clause;
6882 
6883  if (nt->nulltesttype == IS_NULL)
6884  {
6885  found_is_null_op = true;
6886  /* IS NULL is like = for selectivity purposes */
6887  eqQualHere = true;
6888  }
6889  }
6890  else
6891  elog(ERROR, "unsupported indexqual type: %d",
6892  (int) nodeTag(clause));
6893 
6894  /* check for equality operator */
6895  if (OidIsValid(clause_op))
6896  {
6897  op_strategy = get_op_opfamily_strategy(clause_op,
6898  index->opfamily[indexcol]);
6899  Assert(op_strategy != 0); /* not a member of opfamily?? */
6900  if (op_strategy == BTEqualStrategyNumber)
6901  eqQualHere = true;
6902  }
6903 
6904  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6905  }
6906  }
6907 
6908  /*
6909  * If index is unique and we found an '=' clause for each column, we can
6910  * just assume numIndexTuples = 1 and skip the expensive
6911  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6912  * NullTest invalidates that theory, even though it sets eqQualHere.
6913  */
6914  if (index->unique &&
6915  indexcol == index->nkeycolumns - 1 &&
6916  eqQualHere &&
6917  !found_saop &&
6918  !found_is_null_op)
6919  numIndexTuples = 1.0;
6920  else
6921  {
6922  List *selectivityQuals;
6923  Selectivity btreeSelectivity;
6924 
6925  /*
6926  * If the index is partial, AND the index predicate with the
6927  * index-bound quals to produce a more accurate idea of the number of
6928  * rows covered by the bound conditions.
6929  */
6930  selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6931 
6932  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6933  index->rel->relid,
6934  JOIN_INNER,
6935  NULL);
6936  numIndexTuples = btreeSelectivity * index->rel->tuples;
6937 
6938  /*
6939  * btree automatically combines individual ScalarArrayOpExpr primitive
6940  * index scans whenever the tuples covered by the next set of array
6941  * keys are close to tuples covered by the current set. That puts a
6942  * natural ceiling on the worst case number of descents -- there
6943  * cannot possibly be more than one descent per leaf page scanned.
6944  *
6945  * Clamp the number of descents to at most 1/3 the number of index
6946  * pages. This avoids implausibly high estimates with low selectivity
6947  * paths, where scans usually require only one or two descents. This
6948  * is most likely to help when there are several SAOP clauses, where
6949  * naively accepting the total number of distinct combinations of
6950  * array elements as the number of descents would frequently lead to
6951  * wild overestimates.
6952  *
6953  * We somewhat arbitrarily don't just make the cutoff the total number
6954  * of leaf pages (we make it 1/3 the total number of pages instead) to
6955  * give the btree code credit for its ability to continue on the leaf
6956  * level with low selectivity scans.
6957  */
6958  num_sa_scans = Min(num_sa_scans, ceil(index->pages * 0.3333333));
6959  num_sa_scans = Max(num_sa_scans, 1);
6960 
6961  /*
6962  * As in genericcostestimate(), we have to adjust for any
6963  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6964  * to integer.
6965  *
6966  * It is tempting to make genericcostestimate behave as if SAOP
6967  * clauses work in almost the same way as scalar operators during
6968  * btree scans, making the top-level scan look like a continuous scan
6969  * (as opposed to num_sa_scans-many primitive index scans). After
6970  * all, btree scans mostly work like that at runtime. However, such a
6971  * scheme would badly bias genericcostestimate's simplistic approach
6972  * to calculating numIndexPages through prorating.
6973  *
6974  * Stick with the approach taken by non-native SAOP scans for now.
6975  * genericcostestimate will use the Mackert-Lohman formula to
6976  * compensate for repeat page fetches, even though that definitely
6977  * won't happen during btree scans (not for leaf pages, at least).
6978  * We're usually very pessimistic about the number of primitive index
6979  * scans that will be required, but it's not clear how to do better.
6980  */
6981  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6982  }
6983 
6984  /*
6985  * Now do generic index cost estimation.
6986  */
6987  costs.numIndexTuples = numIndexTuples;
6988  costs.num_sa_scans = num_sa_scans;
6989 
6990  genericcostestimate(root, path, loop_count, &costs);
6991 
6992  /*
6993  * Add a CPU-cost component to represent the costs of initial btree
6994  * descent. We don't charge any I/O cost for touching upper btree levels,
6995  * since they tend to stay in cache, but we still have to do about log2(N)
6996  * comparisons to descend a btree of N leaf tuples. We charge one
6997  * cpu_operator_cost per comparison.
6998  *
6999  * If there are ScalarArrayOpExprs, charge this once per estimated SA
7000  * index descent. The ones after the first one are not startup cost so
7001  * far as the overall plan goes, so just add them to "total" cost.
7002  */
7003  if (index->tuples > 1) /* avoid computing log(0) */
7004  {
7005  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
7006  costs.indexStartupCost += descentCost;
7007  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7008  }
7009 
7010  /*
7011  * Even though we're not charging I/O cost for touching upper btree pages,
7012  * it's still reasonable to charge some CPU cost per page descended
7013  * through. Moreover, if we had no such charge at all, bloated indexes
7014  * would appear to have the same search cost as unbloated ones, at least
7015  * in cases where only a single leaf page is expected to be visited. This
7016  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
7017  * touched. The number of such pages is btree tree height plus one (ie,
7018  * we charge for the leaf page too). As above, charge once per estimated
7019  * SA index descent.
7020  */
7021  descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7022  costs.indexStartupCost += descentCost;
7023  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7024 
7025  /*
7026  * If we can get an estimate of the first column's ordering correlation C
7027  * from pg_statistic, estimate the index correlation as C for a
7028  * single-column index, or C * 0.75 for multiple columns. (The idea here
7029  * is that multiple columns dilute the importance of the first column's
7030  * ordering, but don't negate it entirely. Before 8.0 we divided the
7031  * correlation by the number of columns, but that seems too strong.)
7032  */
7033  if (index->indexkeys[0] != 0)
7034  {
7035  /* Simple variable --- look to stats for the underlying table */
7036  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
7037 
7038  Assert(rte->rtekind == RTE_RELATION);
7039  relid = rte->relid;
7040  Assert(relid != InvalidOid);
7041  colnum = index->indexkeys[0];
7042 
7044  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
7045  {
7046  /*
7047  * The hook took control of acquiring a stats tuple. If it did
7048  * supply a tuple, it'd better have supplied a freefunc.
7049  */
7050  if (HeapTupleIsValid(vardata.statsTuple) &&
7051  !vardata.freefunc)
7052  elog(ERROR, "no function provided to release variable stats with");
7053  }
7054  else
7055  {
7056  vardata.statsTuple = SearchSysCache3(STATRELATTINH,
7057  ObjectIdGetDatum(relid),
7058  Int16GetDatum(colnum),
7059  BoolGetDatum(rte->inh));
7060  vardata.freefunc = ReleaseSysCache;
7061  }
7062  }
7063  else
7064  {
7065  /* Expression --- maybe there are stats for the index itself */
7066  relid = index->indexoid;
7067  colnum = 1;
7068 
7069  if (get_index_stats_hook &&
7070  (*get_index_stats_hook) (root, relid, colnum, &vardata))
7071  {
7072  /*
7073  * The hook took control of acquiring a stats tuple. If it did
7074  * supply a tuple, it'd better have supplied a freefunc.
7075  */
7076  if (HeapTupleIsValid(vardata.statsTuple) &&
7077  !vardata.freefunc)
7078  elog(ERROR, "no function provided to release variable stats with");
7079  }
7080  else
7081  {
7082  vardata.statsTuple = SearchSysCache3(STATRELATTINH,
7083  ObjectIdGetDatum(relid),
7084  Int16GetDatum(colnum),
7085  BoolGetDatum(false));
7086  vardata.freefunc = ReleaseSysCache;
7087  }
7088  }
7089 
7090  if (HeapTupleIsValid(vardata.statsTuple))
7091  {
7092  Oid sortop;
7093  AttStatsSlot sslot;
7094 
7095  sortop = get_opfamily_member(index->opfamily[0],
7096  index->opcintype[0],
7097  index->opcintype[0],
7099  if (OidIsValid(sortop) &&
7100  get_attstatsslot(&sslot, vardata.statsTuple,
7101  STATISTIC_KIND_CORRELATION, sortop,
7103  {
7104  double varCorrelation;
7105 
7106  Assert(sslot.nnumbers == 1);
7107  varCorrelation = sslot.numbers[0];
7108 
7109  if (index->reverse_sort[0])
7110  varCorrelation = -varCorrelation;
7111 
7112  if (index->nkeycolumns > 1)
7113  costs.indexCorrelation = varCorrelation * 0.75;
7114  else
7115  costs.indexCorrelation = varCorrelation;
7116 
7117  free_attstatsslot(&sslot);
7118  }
7119  }
7120 
7121  ReleaseVariableStats(vardata);
7122 
7123  *indexStartupCost = costs.indexStartupCost;
7124  *indexTotalCost = costs.indexTotalCost;
7125  *indexSelectivity = costs.indexSelectivity;
7126  *indexCorrelation = costs.indexCorrelation;
7127  *indexPages = costs.numIndexPages;
7128 }
#define OidIsValid(objectId)
Definition: c.h:775
List * lappend(List *list, void *datum)
Definition: list.c:339
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:83
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:166
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
#define nodeTag(nodeptr)
Definition: nodes.h:133
double Selectivity
Definition: nodes.h:250
#define NIL
Definition: pg_list.h:68
#define lsecond(l)
Definition: pg_list.h:183
#define linitial_oid(l)
Definition: pg_list.h:180
unsigned int Oid
Definition: postgres_ext.h:31
@ IS_NULL
Definition: primnodes.h:1924
#define DEFAULT_PAGE_CPU_MULTIPLIER
Definition: selfuncs.c:144
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition: selfuncs.c:2136
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:6544
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6767
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:127
Cost indexStartupCost
Definition: selfuncs.h:125
double indexCorrelation
Definition: selfuncs.h:128
double num_sa_scans
Definition: selfuncs.h:134
Cost indexTotalCost
Definition: selfuncs.h:126
double numIndexPages
Definition: selfuncs.h:131
double numIndexTuples
Definition: selfuncs.h:132
List * indexquals
Definition: pathnodes.h:1748
Definition: nodes.h:129
NullTestType nulltesttype
Definition: primnodes.h:1931
Oid opno
Definition: primnodes.h:788
Expr * clause
Definition: pathnodes.h:2552

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, elog, ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum(), InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, Max, Min, NIL, AttStatsSlot::nnumbers, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, AttStatsSlot::numbers, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum(), OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, root, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), and VariableStatData::statsTuple.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7583 of file selfuncs.c.

7587 {
7588  IndexOptInfo *index = path->indexinfo;
7589  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7590  List *selectivityQuals;
7591  double numPages = index->pages,
7592  numTuples = index->tuples;
7593  double numEntryPages,
7594  numDataPages,
7595  numPendingPages,
7596  numEntries;
7597  GinQualCounts counts;
7598  bool matchPossible;
7599  bool fullIndexScan;
7600  double partialScale;
7601  double entryPagesFetched,
7602  dataPagesFetched,
7603  dataPagesFetchedBySel;
7604  double qual_op_cost,
7605  qual_arg_cost,
7606  spc_random_page_cost,
7607  outer_scans;
7608  Cost descentCost;
7609  Relation indexRel;
7610  GinStatsData ginStats;
7611  ListCell *lc;
7612  int i;
7613 
7614  /*
7615  * Obtain statistical information from the meta page, if possible. Else
7616  * set ginStats to zeroes, and we'll cope below.
7617  */
7618  if (!index->hypothetical)
7619  {
7620  /* Lock should have already been obtained in plancat.c */
7621  indexRel = index_open(index->indexoid, NoLock);
7622  ginGetStats(indexRel, &ginStats);
7623  index_close(indexRel, NoLock);
7624  }
7625  else
7626  {
7627  memset(&ginStats, 0, sizeof(ginStats));
7628  }
7629 
7630  /*
7631  * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7632  * trusted, but the other fields are data as of the last VACUUM. We can
7633  * scale them up to account for growth since then, but that method only
7634  * goes so far; in the worst case, the stats might be for a completely
7635  * empty index, and scaling them will produce pretty bogus numbers.
7636  * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7637  * it's grown more than that, fall back to estimating things only from the
7638  * assumed-accurate index size. But we'll trust nPendingPages in any case
7639  * so long as it's not clearly insane, ie, more than the index size.
7640  */
7641  if (ginStats.nPendingPages < numPages)
7642  numPendingPages = ginStats.nPendingPages;
7643  else
7644  numPendingPages = 0;
7645 
7646  if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7647  ginStats.nTotalPages > numPages / 4 &&
7648  ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7649  {
7650  /*
7651  * OK, the stats seem close enough to sane to be trusted. But we
7652  * still need to scale them by the ratio numPages / nTotalPages to
7653  * account for growth since the last VACUUM.
7654  */
7655  double scale = numPages / ginStats.nTotalPages;
7656 
7657  numEntryPages = ceil(ginStats.nEntryPages * scale);
7658  numDataPages = ceil(ginStats.nDataPages * scale);
7659  numEntries = ceil(ginStats.nEntries * scale);
7660  /* ensure we didn't round up too much */
7661  numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7662  numDataPages = Min(numDataPages,
7663  numPages - numPendingPages - numEntryPages);
7664  }
7665  else
7666  {
7667  /*
7668  * We might get here because it's a hypothetical index, or an index
7669  * created pre-9.1 and never vacuumed since upgrading (in which case
7670  * its stats would read as zeroes), or just because it's grown too
7671  * much since the last VACUUM for us to put our faith in scaling.
7672  *
7673  * Invent some plausible internal statistics based on the index page
7674  * count (and clamp that to at least 10 pages, just in case). We
7675  * estimate that 90% of the index is entry pages, and the rest is data
7676  * pages. Estimate 100 entries per entry page; this is rather bogus
7677  * since it'll depend on the size of the keys, but it's more robust
7678  * than trying to predict the number of entries per heap tuple.
7679  */
7680  numPages = Max(numPages, 10);
7681  numEntryPages = floor((numPages - numPendingPages) * 0.90);
7682  numDataPages = numPages - numPendingPages - numEntryPages;
7683  numEntries = floor(numEntryPages * 100);
7684  }
7685 
7686  /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
7687  if (numEntries < 1)
7688  numEntries = 1;
7689 
7690  /*
7691  * If the index is partial, AND the index predicate with the index-bound
7692  * quals to produce a more accurate idea of the number of rows covered by
7693  * the bound conditions.
7694  */
7695  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
7696 
7697  /* Estimate the fraction of main-table tuples that will be visited */
7698  *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7699  index->rel->relid,
7700  JOIN_INNER,
7701  NULL);
7702 
7703  /* fetch estimated page cost for tablespace containing index */
7704  get_tablespace_page_costs(index->reltablespace,
7705  &spc_random_page_cost,
7706  NULL);
7707 
7708  /*
7709  * Generic assumption about index correlation: there isn't any.
7710  */
7711  *indexCorrelation = 0.0;
7712 
7713  /*
7714  * Examine quals to estimate number of search entries & partial matches
7715  */
7716  memset(&counts, 0, sizeof(counts));
7717  counts.arrayScans = 1;
7718  matchPossible = true;
7719 
7720  foreach(lc, path->indexclauses)
7721  {
7722  IndexClause *iclause = lfirst_node(IndexClause, lc);
7723  ListCell *lc2;
7724 
7725  foreach(lc2, iclause->indexquals)
7726  {
7727  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
7728  Expr *clause = rinfo->clause;
7729 
7730  if (IsA(clause, OpExpr))
7731  {
7732  matchPossible = gincost_opexpr(root,
7733  index,
7734  iclause->indexcol,
7735  (OpExpr *) clause,
7736  &counts);
7737  if (!matchPossible)
7738  break;
7739  }
7740  else if (IsA(clause, ScalarArrayOpExpr))
7741  {
7742  matchPossible = gincost_scalararrayopexpr(root,
7743  index,
7744  iclause->indexcol,
7745  (ScalarArrayOpExpr *) clause,
7746  numEntries,
7747  &counts);
7748  if (!matchPossible)
7749  break;
7750  }
7751  else
7752  {
7753  /* shouldn't be anything else for a GIN index */
7754  elog(ERROR, "unsupported GIN indexqual type: %d",
7755  (int) nodeTag(clause));
7756  }
7757  }
7758  }
7759 
7760  /* Fall out if there were any provably-unsatisfiable quals */
7761  if (!matchPossible)
7762  {
7763  *indexStartupCost = 0;
7764  *indexTotalCost = 0;
7765  *indexSelectivity = 0;
7766  return;
7767  }
7768 
7769  /*
7770  * If attribute has a full scan and at the same time doesn't have normal
7771  * scan, then we'll have to scan all non-null entries of that attribute.
7772  * Currently, we don't have per-attribute statistics for GIN. Thus, we
7773  * must assume the whole GIN index has to be scanned in this case.
7774  */
7775  fullIndexScan = false;
7776  for (i = 0; i < index->nkeycolumns; i++)
7777  {
7778  if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
7779  {
7780  fullIndexScan = true;
7781  break;
7782  }
7783  }
7784 
7785  if (fullIndexScan || indexQuals == NIL)
7786  {
7787  /*
7788  * Full index scan will be required. We treat this as if every key in
7789  * the index had been listed in the query; is that reasonable?
7790  */
7791  counts.partialEntries = 0;
7792  counts.exactEntries = numEntries;
7793  counts.searchEntries = numEntries;
7794  }
7795 
7796  /* Will we have more than one iteration of a nestloop scan? */
7797  outer_scans = loop_count;
7798 
7799  /*
7800  * Compute cost to begin scan, first of all, pay attention to pending
7801  * list.
7802  */
7803  entryPagesFetched = numPendingPages;
7804 
7805  /*
7806  * Estimate number of entry pages read. We need to do
7807  * counts.searchEntries searches. Use a power function as it should be,
7808  * but tuples on leaf pages usually is much greater. Here we include all
7809  * searches in entry tree, including search of first entry in partial
7810  * match algorithm
7811  */
7812  entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7813 
7814  /*
7815  * Add an estimate of entry pages read by partial match algorithm. It's a
7816  * scan over leaf pages in entry tree. We haven't any useful stats here,
7817  * so estimate it as proportion. Because counts.partialEntries is really
7818  * pretty bogus (see code above), it's possible that it is more than
7819  * numEntries; clamp the proportion to ensure sanity.
7820  */
7821  partialScale = counts.partialEntries / numEntries;
7822  partialScale = Min(partialScale, 1.0);
7823 
7824  entryPagesFetched += ceil(numEntryPages * partialScale);
7825 
7826  /*
7827  * Partial match algorithm reads all data pages before doing actual scan,
7828  * so it's a startup cost. Again, we haven't any useful stats here, so
7829  * estimate it as proportion.
7830  */
7831  dataPagesFetched = ceil(numDataPages * partialScale);
7832 
7833  *indexStartupCost = 0;
7834  *indexTotalCost = 0;
7835 
7836  /*
7837  * Add a CPU-cost component to represent the costs of initial entry btree
7838  * descent. We don't charge any I/O cost for touching upper btree levels,
7839  * since they tend to stay in cache, but we still have to do about log2(N)
7840  * comparisons to descend a btree of N leaf tuples. We charge one
7841  * cpu_operator_cost per comparison.
7842  *
7843  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
7844  * ones after the first one are not startup cost so far as the overall
7845  * plan is concerned, so add them only to "total" cost.
7846  */
7847  if (numEntries > 1) /* avoid computing log(0) */
7848  {
7849  descentCost = ceil(log(numEntries) / log(2.0)) * cpu_operator_cost;
7850  *indexStartupCost += descentCost * counts.searchEntries;
7851  *indexTotalCost += counts.arrayScans * descentCost * counts.searchEntries;
7852  }
7853 
7854  /*
7855  * Add a cpu cost per entry-page fetched. This is not amortized over a
7856  * loop.
7857  */
7858  *indexStartupCost += entryPagesFetched * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7859  *indexTotalCost += entryPagesFetched * counts.arrayScans * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7860 
7861  /*
7862  * Add a cpu cost per data-page fetched. This is also not amortized over a
7863  * loop. Since those are the data pages from the partial match algorithm,
7864  * charge them as startup cost.
7865  */
7866  *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * dataPagesFetched;
7867 
7868  /*
7869  * Since we add the startup cost to the total cost later on, remove the
7870  * initial arrayscan from the total.
7871  */
7872  *indexTotalCost += dataPagesFetched * (counts.arrayScans - 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7873 
7874  /*
7875  * Calculate cache effects if more than one scan due to nestloops or array
7876  * quals. The result is pro-rated per nestloop scan, but the array qual
7877  * factor shouldn't be pro-rated (compare genericcostestimate).
7878  */
7879  if (outer_scans > 1 || counts.arrayScans > 1)
7880  {
7881  entryPagesFetched *= outer_scans * counts.arrayScans;
7882  entryPagesFetched = index_pages_fetched(entryPagesFetched,
7883  (BlockNumber) numEntryPages,
7884  numEntryPages, root);
7885  entryPagesFetched /= outer_scans;
7886  dataPagesFetched *= outer_scans * counts.arrayScans;
7887  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7888  (BlockNumber) numDataPages,
7889  numDataPages, root);
7890  dataPagesFetched /= outer_scans;
7891  }
7892 
7893  /*
7894  * Here we use random page cost because logically-close pages could be far
7895  * apart on disk.
7896  */
7897  *indexStartupCost += (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7898 
7899  /*
7900  * Now compute the number of data pages fetched during the scan.
7901  *
7902  * We assume every entry to have the same number of items, and that there
7903  * is no overlap between them. (XXX: tsvector and array opclasses collect
7904  * statistics on the frequency of individual keys; it would be nice to use
7905  * those here.)
7906  */
7907  dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7908 
7909  /*
7910  * If there is a lot of overlap among the entries, in particular if one of
7911  * the entries is very frequent, the above calculation can grossly
7912  * under-estimate. As a simple cross-check, calculate a lower bound based
7913  * on the overall selectivity of the quals. At a minimum, we must read
7914  * one item pointer for each matching entry.
7915  *
7916  * The width of each item pointer varies, based on the level of
7917  * compression. We don't have statistics on that, but an average of
7918  * around 3 bytes per item is fairly typical.
7919  */
7920  dataPagesFetchedBySel = ceil(*indexSelectivity *
7921  (numTuples / (BLCKSZ / 3)));
7922  if (dataPagesFetchedBySel > dataPagesFetched)
7923  dataPagesFetched = dataPagesFetchedBySel;
7924 
7925  /* Add one page cpu-cost to the startup cost */
7926  *indexStartupCost += DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost * counts.searchEntries;
7927 
7928  /*
7929  * Add once again a CPU-cost for those data pages, before amortizing for
7930  * cache.
7931  */
7932  *indexTotalCost += dataPagesFetched * counts.arrayScans * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7933 
7934  /* Account for cache effects, the same as above */
7935  if (outer_scans > 1 || counts.arrayScans > 1)
7936  {
7937  dataPagesFetched *= outer_scans * counts.arrayScans;
7938  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7939  (BlockNumber) numDataPages,
7940  numDataPages, root);
7941  dataPagesFetched /= outer_scans;
7942  }
7943 
7944  /* And apply random_page_cost as the cost per page */
7945  *indexTotalCost += *indexStartupCost +
7946  dataPagesFetched * spc_random_page_cost;
7947 
7948  /*
7949  * Add on index qual eval costs, much as in genericcostestimate. We charge
7950  * cpu but we can disregard indexorderbys, since GIN doesn't support
7951  * those.
7952  */
7953  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7954  qual_op_cost = cpu_operator_cost * list_length(indexQuals);
7955 
7956  *indexStartupCost += qual_arg_cost;
7957  *indexTotalCost += qual_arg_cost;
7958 
7959  /*
7960  * Add a cpu cost per search entry, corresponding to the actual visited
7961  * entries.
7962  */
7963  *indexTotalCost += (counts.searchEntries * counts.arrayScans) * (qual_op_cost);
7964  /* Now add a cpu cost per tuple in the posting lists / trees */
7965  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost);
7966  *indexPages = dataPagesFetched;
7967 }
uint32 BlockNumber
Definition: block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:898
double cpu_index_tuple_cost
Definition: costsize.c:122
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:623
int i
Definition: isn.c:73
static int list_length(const List *l)
Definition: pg_list.h:152
int scale
Definition: pgbench.c:181
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:7467
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition: selfuncs.c:7417
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7290
double exactEntries
Definition: selfuncs.c:7292
double arrayScans
Definition: selfuncs.c:7294
double partialEntries
Definition: selfuncs.c:7291
bool attHasFullScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7289
double searchEntries
Definition: selfuncs.c:7293
BlockNumber nDataPages
Definition: gin.h:47
BlockNumber nPendingPages
Definition: gin.h:44
BlockNumber nEntryPages
Definition: gin.h:46
int64 nEntries
Definition: gin.h:48
BlockNumber nTotalPages
Definition: gin.h:45

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, elog, ERROR, GinQualCounts::exactEntries, get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, IndexClause::indexquals, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, NoLock, GinStatsData::nPendingPages, GinStatsData::nTotalPages, GinQualCounts::partialEntries, root, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7173 of file selfuncs.c.

7177 {
7178  IndexOptInfo *index = path->indexinfo;
7179  GenericCosts costs = {0};
7180  Cost descentCost;
7181 
7182  genericcostestimate(root, path, loop_count, &costs);
7183 
7184  /*
7185  * We model index descent costs similarly to those for btree, but to do
7186  * that we first need an idea of the tree height. We somewhat arbitrarily
7187  * assume that the fanout is 100, meaning the tree height is at most
7188  * log100(index->pages).
7189  *
7190  * Although this computation isn't really expensive enough to require
7191  * caching, we might as well use index->tree_height to cache it.
7192  */
7193  if (index->tree_height < 0) /* unknown? */
7194  {
7195  if (index->pages > 1) /* avoid computing log(0) */
7196  index->tree_height = (int) (log(index->pages) / log(100.0));
7197  else
7198  index->tree_height = 0;
7199  }
7200 
7201  /*
7202  * Add a CPU-cost component to represent the costs of initial descent. We
7203  * just use log(N) here not log2(N) since the branching factor isn't
7204  * necessarily two anyway. As for btree, charge once per SA scan.
7205  */
7206  if (index->tuples > 1) /* avoid computing log(0) */
7207  {
7208  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7209  costs.indexStartupCost += descentCost;
7210  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7211  }
7212 
7213  /*
7214  * Likewise add a per-page charge, calculated the same as for btrees.
7215  */
7216  descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7217  costs.indexStartupCost += descentCost;
7218  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7219 
7220  *indexStartupCost = costs.indexStartupCost;
7221  *indexTotalCost = costs.indexTotalCost;
7222  *indexSelectivity = costs.indexSelectivity;
7223  *indexCorrelation = costs.indexCorrelation;
7224  *indexPages = costs.numIndexPages;
7225 }

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7131 of file selfuncs.c.

7135 {
7136  GenericCosts costs = {0};
7137 
7138  genericcostestimate(root, path, loop_count, &costs);
7139 
7140  /*
7141  * A hash index has no descent costs as such, since the index AM can go
7142  * directly to the target bucket after computing the hash value. There
7143  * are a couple of other hash-specific costs that we could conceivably add
7144  * here, though:
7145  *
7146  * Ideally we'd charge spc_random_page_cost for each page in the target
7147  * bucket, not just the numIndexPages pages that genericcostestimate
7148  * thought we'd visit. However in most cases we don't know which bucket
7149  * that will be. There's no point in considering the average bucket size
7150  * because the hash AM makes sure that's always one page.
7151  *
7152  * Likewise, we could consider charging some CPU for each index tuple in
7153  * the bucket, if we knew how many there were. But the per-tuple cost is
7154  * just a hash value comparison, not a general datatype-dependent
7155  * comparison, so any such charge ought to be quite a bit less than
7156  * cpu_operator_cost; which makes it probably not worth worrying about.
7157  *
7158  * A bigger issue is that chance hash-value collisions will result in
7159  * wasted probes into the heap. We don't currently attempt to model this
7160  * cost on the grounds that it's rare, but maybe it's not rare enough.
7161  * (Any fix for this ought to consider the generic lossy-operator problem,
7162  * though; it's not entirely hash-specific.)
7163  */
7164 
7165  *indexStartupCost = costs.indexStartupCost;
7166  *indexTotalCost = costs.indexTotalCost;
7167  *indexSelectivity = costs.indexSelectivity;
7168  *indexCorrelation = costs.indexCorrelation;
7169  *indexPages = costs.numIndexPages;
7170 }

References genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::numIndexPages, and root.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7228 of file selfuncs.c.

7232 {
7233  IndexOptInfo *index = path->indexinfo;
7234  GenericCosts costs = {0};
7235  Cost descentCost;
7236 
7237  genericcostestimate(root, path, loop_count, &costs);
7238 
7239  /*
7240  * We model index descent costs similarly to those for btree, but to do
7241  * that we first need an idea of the tree height. We somewhat arbitrarily
7242  * assume that the fanout is 100, meaning the tree height is at most
7243  * log100(index->pages).
7244  *
7245  * Although this computation isn't really expensive enough to require
7246  * caching, we might as well use index->tree_height to cache it.
7247  */
7248  if (index->tree_height < 0) /* unknown? */
7249  {
7250  if (index->pages > 1) /* avoid computing log(0) */
7251  index->tree_height = (int) (log(index->pages) / log(100.0));
7252  else
7253  index->tree_height = 0;
7254  }
7255 
7256  /*
7257  * Add a CPU-cost component to represent the costs of initial descent. We
7258  * just use log(N) here not log2(N) since the branching factor isn't
7259  * necessarily two anyway. As for btree, charge once per SA scan.
7260  */
7261  if (index->tuples > 1) /* avoid computing log(0) */
7262  {
7263  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7264  costs.indexStartupCost += descentCost;
7265  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7266  }
7267 
7268  /*
7269  * Likewise add a per-page charge, calculated the same as for btrees.
7270  */
7271  descentCost = (index->tree_height + 1) * DEFAULT_PAGE_CPU_MULTIPLIER * cpu_operator_cost;
7272  costs.indexStartupCost += descentCost;
7273  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7274 
7275  *indexStartupCost = costs.indexStartupCost;
7276  *indexTotalCost = costs.indexTotalCost;
7277  *indexSelectivity = costs.indexSelectivity;
7278  *indexCorrelation = costs.indexCorrelation;
7279  *indexPages = costs.numIndexPages;
7280 }

References cpu_operator_cost, DEFAULT_PAGE_CPU_MULTIPLIER, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, and root.

Referenced by spghandler().