PostgreSQL Source Code  git master
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7703 of file selfuncs.c.

7707 {
7708  IndexOptInfo *index = path->indexinfo;
7709  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7710  double numPages = index->pages;
7711  RelOptInfo *baserel = index->rel;
7712  RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7713  Cost spc_seq_page_cost;
7714  Cost spc_random_page_cost;
7715  double qual_arg_cost;
7716  double qualSelectivity;
7717  BrinStatsData statsData;
7718  double indexRanges;
7719  double minimalRanges;
7720  double estimatedRanges;
7721  double selec;
7722  Relation indexRel;
7723  ListCell *l;
7724  VariableStatData vardata;
7725 
7726  Assert(rte->rtekind == RTE_RELATION);
7727 
7728  /* fetch estimated page cost for the tablespace containing the index */
7729  get_tablespace_page_costs(index->reltablespace,
7730  &spc_random_page_cost,
7731  &spc_seq_page_cost);
7732 
7733  /*
7734  * Obtain some data from the index itself, if possible. Otherwise invent
7735  * some plausible internal statistics based on the relation page count.
7736  */
7737  if (!index->hypothetical)
7738  {
7739  /*
7740  * A lock should have already been obtained on the index in plancat.c.
7741  */
7742  indexRel = index_open(index->indexoid, NoLock);
7743  brinGetStats(indexRel, &statsData);
7744  index_close(indexRel, NoLock);
7745 
7746  /* work out the actual number of ranges in the index */
7747  indexRanges = Max(ceil((double) baserel->pages /
7748  statsData.pagesPerRange), 1.0);
7749  }
7750  else
7751  {
7752  /*
7753  * Assume default number of pages per range, and estimate the number
7754  * of ranges based on that.
7755  */
7756  indexRanges = Max(ceil((double) baserel->pages /
7758 
7760  statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
7761  }
7762 
7763  /*
7764  * Compute index correlation
7765  *
7766  * Because we can use all index quals equally when scanning, we can use
7767  * the largest correlation (in absolute value) among columns used by the
7768  * query. Start at zero, the worst possible case. If we cannot find any
7769  * correlation statistics, we will keep it as 0.
7770  */
7771  *indexCorrelation = 0;
7772 
7773  foreach(l, path->indexclauses)
7774  {
7775  IndexClause *iclause = lfirst_node(IndexClause, l);
7776  AttrNumber attnum = index->indexkeys[iclause->indexcol];
7777 
7778  /* attempt to lookup stats in relation for this index column */
7779  if (attnum != 0)
7780  {
7781  /* Simple variable -- look to stats for the underlying table */
7783  (*get_relation_stats_hook) (root, rte, attnum, &vardata))
7784  {
7785  /*
7786  * The hook took control of acquiring a stats tuple. If it
7787  * did supply a tuple, it'd better have supplied a freefunc.
7788  */
7789  if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
7790  elog(ERROR,
7791  "no function provided to release variable stats with");
7792  }
7793  else
7794  {
7795  vardata.statsTuple =
7797  ObjectIdGetDatum(rte->relid),
7799  BoolGetDatum(false));
7800  vardata.freefunc = ReleaseSysCache;
7801  }
7802  }
7803  else
7804  {
7805  /*
7806  * Looks like we've found an expression column in the index. Let's
7807  * see if there's any stats for it.
7808  */
7809 
7810  /* get the attnum from the 0-based index. */
7811  attnum = iclause->indexcol + 1;
7812 
7813  if (get_index_stats_hook &&
7814  (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
7815  {
7816  /*
7817  * The hook took control of acquiring a stats tuple. If it
7818  * did supply a tuple, it'd better have supplied a freefunc.
7819  */
7820  if (HeapTupleIsValid(vardata.statsTuple) &&
7821  !vardata.freefunc)
7822  elog(ERROR, "no function provided to release variable stats with");
7823  }
7824  else
7825  {
7827  ObjectIdGetDatum(index->indexoid),
7829  BoolGetDatum(false));
7830  vardata.freefunc = ReleaseSysCache;
7831  }
7832  }
7833 
7834  if (HeapTupleIsValid(vardata.statsTuple))
7835  {
7836  AttStatsSlot sslot;
7837 
7838  if (get_attstatsslot(&sslot, vardata.statsTuple,
7839  STATISTIC_KIND_CORRELATION, InvalidOid,
7841  {
7842  double varCorrelation = 0.0;
7843 
7844  if (sslot.nnumbers > 0)
7845  varCorrelation = Abs(sslot.numbers[0]);
7846 
7847  if (varCorrelation > *indexCorrelation)
7848  *indexCorrelation = varCorrelation;
7849 
7850  free_attstatsslot(&sslot);
7851  }
7852  }
7853 
7854  ReleaseVariableStats(vardata);
7855  }
7856 
7857  qualSelectivity = clauselist_selectivity(root, indexQuals,
7858  baserel->relid,
7859  JOIN_INNER, NULL);
7860 
7861  /*
7862  * Now calculate the minimum possible ranges we could match with if all of
7863  * the rows were in the perfect order in the table's heap.
7864  */
7865  minimalRanges = ceil(indexRanges * qualSelectivity);
7866 
7867  /*
7868  * Now estimate the number of ranges that we'll touch by using the
7869  * indexCorrelation from the stats. Careful not to divide by zero (note
7870  * we're using the absolute value of the correlation).
7871  */
7872  if (*indexCorrelation < 1.0e-10)
7873  estimatedRanges = indexRanges;
7874  else
7875  estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
7876 
7877  /* we expect to visit this portion of the table */
7878  selec = estimatedRanges / indexRanges;
7879 
7880  CLAMP_PROBABILITY(selec);
7881 
7882  *indexSelectivity = selec;
7883 
7884  /*
7885  * Compute the index qual costs, much as in genericcostestimate, to add to
7886  * the index costs. We can disregard indexorderbys, since BRIN doesn't
7887  * support those.
7888  */
7889  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7890 
7891  /*
7892  * Compute the startup cost as the cost to read the whole revmap
7893  * sequentially, including the cost to execute the index quals.
7894  */
7895  *indexStartupCost =
7896  spc_seq_page_cost * statsData.revmapNumPages * loop_count;
7897  *indexStartupCost += qual_arg_cost;
7898 
7899  /*
7900  * To read a BRIN index there might be a bit of back and forth over
7901  * regular pages, as revmap might point to them out of sequential order;
7902  * calculate the total cost as reading the whole index in random order.
7903  */
7904  *indexTotalCost = *indexStartupCost +
7905  spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
7906 
7907  /*
7908  * Charge a small amount per range tuple which we expect to match to. This
7909  * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
7910  * will set a bit for each page in the range when we find a matching
7911  * range, so we must multiply the charge by the number of pages in the
7912  * range.
7913  */
7914  *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
7915  statsData.pagesPerRange;
7916 
7917  *indexPages = index->pages;
7918 }
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1228
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:38
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:986
#define Max(x, y)
Definition: c.h:980
#define Abs(x)
Definition: c.h:992
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:102
double cpu_operator_cost
Definition: costsize.c:123
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
Assert(fmt[strlen(fmt) - 1] !='\n')
#define NoLock
Definition: lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3294
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3177
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
double Cost
Definition: nodes.h:672
@ JOIN_INNER
Definition: nodes.h:712
@ RTE_RELATION
Definition: parsenodes.h:990
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:388
int16 attnum
Definition: pg_attribute.h:83
#define lfirst_node(type, lc)
Definition: pg_list.h:172
#define ObjectIdGetDatum(X)
Definition: postgres.h:551
#define BoolGetDatum(X)
Definition: postgres.h:446
#define Int16GetDatum(X)
Definition: postgres.h:495
#define InvalidOid
Definition: postgres_ext.h:36
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:6285
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:145
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:6315
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:144
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:99
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:181
float4 * numbers
Definition: lsyscache.h:56
int nnumbers
Definition: lsyscache.h:57
BlockNumber revmapNumPages
Definition: brin.h:34
BlockNumber pagesPerRange
Definition: brin.h:33
AttrNumber indexcol
Definition: pathnodes.h:1295
List * indexclauses
Definition: pathnodes.h:1247
IndexOptInfo * indexinfo
Definition: pathnodes.h:1246
Definition: pg_list.h:51
RTEKind rtekind
Definition: parsenodes.h:1007
Index relid
Definition: pathnodes.h:709
BlockNumber pages
Definition: pathnodes.h:720
HeapTuple statsTuple
Definition: selfuncs.h:89
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
Definition: type.h:90
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1198
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:1172
@ STATRELATTINH
Definition: syscache.h:95

References Abs, Assert(), attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum, BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum, InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum, RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6608 of file selfuncs.c.

6612 {
6613  IndexOptInfo *index = path->indexinfo;
6614  GenericCosts costs;
6615  Oid relid;
6616  AttrNumber colnum;
6617  VariableStatData vardata;
6618  double numIndexTuples;
6619  Cost descentCost;
6620  List *indexBoundQuals;
6621  int indexcol;
6622  bool eqQualHere;
6623  bool found_saop;
6624  bool found_is_null_op;
6625  double num_sa_scans;
6626  ListCell *lc;
6627 
6628  /*
6629  * For a btree scan, only leading '=' quals plus inequality quals for the
6630  * immediately next attribute contribute to index selectivity (these are
6631  * the "boundary quals" that determine the starting and stopping points of
6632  * the index scan). Additional quals can suppress visits to the heap, so
6633  * it's OK to count them in indexSelectivity, but they should not count
6634  * for estimating numIndexTuples. So we must examine the given indexquals
6635  * to find out which ones count as boundary quals. We rely on the
6636  * knowledge that they are given in index column order.
6637  *
6638  * For a RowCompareExpr, we consider only the first column, just as
6639  * rowcomparesel() does.
6640  *
6641  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6642  * index scans not one, but the ScalarArrayOpExpr's operator can be
6643  * considered to act the same as it normally does.
6644  */
6645  indexBoundQuals = NIL;
6646  indexcol = 0;
6647  eqQualHere = false;
6648  found_saop = false;
6649  found_is_null_op = false;
6650  num_sa_scans = 1;
6651  foreach(lc, path->indexclauses)
6652  {
6653  IndexClause *iclause = lfirst_node(IndexClause, lc);
6654  ListCell *lc2;
6655 
6656  if (indexcol != iclause->indexcol)
6657  {
6658  /* Beginning of a new column's quals */
6659  if (!eqQualHere)
6660  break; /* done if no '=' qual for indexcol */
6661  eqQualHere = false;
6662  indexcol++;
6663  if (indexcol != iclause->indexcol)
6664  break; /* no quals at all for indexcol */
6665  }
6666 
6667  /* Examine each indexqual associated with this index clause */
6668  foreach(lc2, iclause->indexquals)
6669  {
6670  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6671  Expr *clause = rinfo->clause;
6672  Oid clause_op = InvalidOid;
6673  int op_strategy;
6674 
6675  if (IsA(clause, OpExpr))
6676  {
6677  OpExpr *op = (OpExpr *) clause;
6678 
6679  clause_op = op->opno;
6680  }
6681  else if (IsA(clause, RowCompareExpr))
6682  {
6683  RowCompareExpr *rc = (RowCompareExpr *) clause;
6684 
6685  clause_op = linitial_oid(rc->opnos);
6686  }
6687  else if (IsA(clause, ScalarArrayOpExpr))
6688  {
6689  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6690  Node *other_operand = (Node *) lsecond(saop->args);
6691  int alength = estimate_array_length(other_operand);
6692 
6693  clause_op = saop->opno;
6694  found_saop = true;
6695  /* count number of SA scans induced by indexBoundQuals only */
6696  if (alength > 1)
6697  num_sa_scans *= alength;
6698  }
6699  else if (IsA(clause, NullTest))
6700  {
6701  NullTest *nt = (NullTest *) clause;
6702 
6703  if (nt->nulltesttype == IS_NULL)
6704  {
6705  found_is_null_op = true;
6706  /* IS NULL is like = for selectivity purposes */
6707  eqQualHere = true;
6708  }
6709  }
6710  else
6711  elog(ERROR, "unsupported indexqual type: %d",
6712  (int) nodeTag(clause));
6713 
6714  /* check for equality operator */
6715  if (OidIsValid(clause_op))
6716  {
6717  op_strategy = get_op_opfamily_strategy(clause_op,
6718  index->opfamily[indexcol]);
6719  Assert(op_strategy != 0); /* not a member of opfamily?? */
6720  if (op_strategy == BTEqualStrategyNumber)
6721  eqQualHere = true;
6722  }
6723 
6724  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6725  }
6726  }
6727 
6728  /*
6729  * If index is unique and we found an '=' clause for each column, we can
6730  * just assume numIndexTuples = 1 and skip the expensive
6731  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6732  * NullTest invalidates that theory, even though it sets eqQualHere.
6733  */
6734  if (index->unique &&
6735  indexcol == index->nkeycolumns - 1 &&
6736  eqQualHere &&
6737  !found_saop &&
6738  !found_is_null_op)
6739  numIndexTuples = 1.0;
6740  else
6741  {
6742  List *selectivityQuals;
6743  Selectivity btreeSelectivity;
6744 
6745  /*
6746  * If the index is partial, AND the index predicate with the
6747  * index-bound quals to produce a more accurate idea of the number of
6748  * rows covered by the bound conditions.
6749  */
6750  selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6751 
6752  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6753  index->rel->relid,
6754  JOIN_INNER,
6755  NULL);
6756  numIndexTuples = btreeSelectivity * index->rel->tuples;
6757 
6758  /*
6759  * As in genericcostestimate(), we have to adjust for any
6760  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6761  * to integer.
6762  */
6763  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6764  }
6765 
6766  /*
6767  * Now do generic index cost estimation.
6768  */
6769  MemSet(&costs, 0, sizeof(costs));
6770  costs.numIndexTuples = numIndexTuples;
6771 
6772  genericcostestimate(root, path, loop_count, &costs);
6773 
6774  /*
6775  * Add a CPU-cost component to represent the costs of initial btree
6776  * descent. We don't charge any I/O cost for touching upper btree levels,
6777  * since they tend to stay in cache, but we still have to do about log2(N)
6778  * comparisons to descend a btree of N leaf tuples. We charge one
6779  * cpu_operator_cost per comparison.
6780  *
6781  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6782  * ones after the first one are not startup cost so far as the overall
6783  * plan is concerned, so add them only to "total" cost.
6784  */
6785  if (index->tuples > 1) /* avoid computing log(0) */
6786  {
6787  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6788  costs.indexStartupCost += descentCost;
6789  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6790  }
6791 
6792  /*
6793  * Even though we're not charging I/O cost for touching upper btree pages,
6794  * it's still reasonable to charge some CPU cost per page descended
6795  * through. Moreover, if we had no such charge at all, bloated indexes
6796  * would appear to have the same search cost as unbloated ones, at least
6797  * in cases where only a single leaf page is expected to be visited. This
6798  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6799  * touched. The number of such pages is btree tree height plus one (ie,
6800  * we charge for the leaf page too). As above, charge once per SA scan.
6801  */
6802  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6803  costs.indexStartupCost += descentCost;
6804  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6805 
6806  /*
6807  * If we can get an estimate of the first column's ordering correlation C
6808  * from pg_statistic, estimate the index correlation as C for a
6809  * single-column index, or C * 0.75 for multiple columns. (The idea here
6810  * is that multiple columns dilute the importance of the first column's
6811  * ordering, but don't negate it entirely. Before 8.0 we divided the
6812  * correlation by the number of columns, but that seems too strong.)
6813  */
6814  MemSet(&vardata, 0, sizeof(vardata));
6815 
6816  if (index->indexkeys[0] != 0)
6817  {
6818  /* Simple variable --- look to stats for the underlying table */
6819  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6820 
6821  Assert(rte->rtekind == RTE_RELATION);
6822  relid = rte->relid;
6823  Assert(relid != InvalidOid);
6824  colnum = index->indexkeys[0];
6825 
6827  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6828  {
6829  /*
6830  * The hook took control of acquiring a stats tuple. If it did
6831  * supply a tuple, it'd better have supplied a freefunc.
6832  */
6833  if (HeapTupleIsValid(vardata.statsTuple) &&
6834  !vardata.freefunc)
6835  elog(ERROR, "no function provided to release variable stats with");
6836  }
6837  else
6838  {
6840  ObjectIdGetDatum(relid),
6841  Int16GetDatum(colnum),
6842  BoolGetDatum(rte->inh));
6843  vardata.freefunc = ReleaseSysCache;
6844  }
6845  }
6846  else
6847  {
6848  /* Expression --- maybe there are stats for the index itself */
6849  relid = index->indexoid;
6850  colnum = 1;
6851 
6852  if (get_index_stats_hook &&
6853  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6854  {
6855  /*
6856  * The hook took control of acquiring a stats tuple. If it did
6857  * supply a tuple, it'd better have supplied a freefunc.
6858  */
6859  if (HeapTupleIsValid(vardata.statsTuple) &&
6860  !vardata.freefunc)
6861  elog(ERROR, "no function provided to release variable stats with");
6862  }
6863  else
6864  {
6866  ObjectIdGetDatum(relid),
6867  Int16GetDatum(colnum),
6868  BoolGetDatum(false));
6869  vardata.freefunc = ReleaseSysCache;
6870  }
6871  }
6872 
6873  if (HeapTupleIsValid(vardata.statsTuple))
6874  {
6875  Oid sortop;
6876  AttStatsSlot sslot;
6877 
6878  sortop = get_opfamily_member(index->opfamily[0],
6879  index->opcintype[0],
6880  index->opcintype[0],
6882  if (OidIsValid(sortop) &&
6883  get_attstatsslot(&sslot, vardata.statsTuple,
6884  STATISTIC_KIND_CORRELATION, sortop,
6886  {
6887  double varCorrelation;
6888 
6889  Assert(sslot.nnumbers == 1);
6890  varCorrelation = sslot.numbers[0];
6891 
6892  if (index->reverse_sort[0])
6893  varCorrelation = -varCorrelation;
6894 
6895  if (index->nkeycolumns > 1)
6896  costs.indexCorrelation = varCorrelation * 0.75;
6897  else
6898  costs.indexCorrelation = varCorrelation;
6899 
6900  free_attstatsslot(&sslot);
6901  }
6902  }
6903 
6904  ReleaseVariableStats(vardata);
6905 
6906  *indexStartupCost = costs.indexStartupCost;
6907  *indexTotalCost = costs.indexTotalCost;
6908  *indexSelectivity = costs.indexSelectivity;
6909  *indexCorrelation = costs.indexCorrelation;
6910  *indexPages = costs.numIndexPages;
6911 }
#define MemSet(start, val, len)
Definition: c.h:1008
#define OidIsValid(objectId)
Definition: c.h:710
List * lappend(List *list, void *datum)
Definition: list.c:336
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:81
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:164
#define IsA(nodeptr, _type_)
Definition: nodes.h:589
#define nodeTag(nodeptr)
Definition: nodes.h:543
double Selectivity
Definition: nodes.h:671
#define NIL
Definition: pg_list.h:65
#define lsecond(l)
Definition: pg_list.h:179
#define linitial_oid(l)
Definition: pg_list.h:176
unsigned int Oid
Definition: postgres_ext.h:31
@ IS_NULL
Definition: primnodes.h:1259
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2132
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:6369
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6587
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:124
Cost indexStartupCost
Definition: selfuncs.h:122
double indexCorrelation
Definition: selfuncs.h:125
double num_sa_scans
Definition: selfuncs.h:131
Cost indexTotalCost
Definition: selfuncs.h:123
double numIndexPages
Definition: selfuncs.h:128
double numIndexTuples
Definition: selfuncs.h:129
List * indexquals
Definition: pathnodes.h:1293
Definition: nodes.h:539
NullTestType nulltesttype
Definition: primnodes.h:1266
Oid opno
Definition: primnodes.h:542
Expr * clause
Definition: pathnodes.h:2059

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert(), ATTSTATSSLOT_NUMBERS, BoolGetDatum, BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, MemSet, NIL, AttStatsSlot::nnumbers, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, AttStatsSlot::numbers, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, RowCompareExpr::opnos, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7372 of file selfuncs.c.

7376 {
7377  IndexOptInfo *index = path->indexinfo;
7378  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7379  List *selectivityQuals;
7380  double numPages = index->pages,
7381  numTuples = index->tuples;
7382  double numEntryPages,
7383  numDataPages,
7384  numPendingPages,
7385  numEntries;
7386  GinQualCounts counts;
7387  bool matchPossible;
7388  bool fullIndexScan;
7389  double partialScale;
7390  double entryPagesFetched,
7391  dataPagesFetched,
7392  dataPagesFetchedBySel;
7393  double qual_op_cost,
7394  qual_arg_cost,
7395  spc_random_page_cost,
7396  outer_scans;
7397  Relation indexRel;
7398  GinStatsData ginStats;
7399  ListCell *lc;
7400  int i;
7401 
7402  /*
7403  * Obtain statistical information from the meta page, if possible. Else
7404  * set ginStats to zeroes, and we'll cope below.
7405  */
7406  if (!index->hypothetical)
7407  {
7408  /* Lock should have already been obtained in plancat.c */
7409  indexRel = index_open(index->indexoid, NoLock);
7410  ginGetStats(indexRel, &ginStats);
7411  index_close(indexRel, NoLock);
7412  }
7413  else
7414  {
7415  memset(&ginStats, 0, sizeof(ginStats));
7416  }
7417 
7418  /*
7419  * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7420  * trusted, but the other fields are data as of the last VACUUM. We can
7421  * scale them up to account for growth since then, but that method only
7422  * goes so far; in the worst case, the stats might be for a completely
7423  * empty index, and scaling them will produce pretty bogus numbers.
7424  * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7425  * it's grown more than that, fall back to estimating things only from the
7426  * assumed-accurate index size. But we'll trust nPendingPages in any case
7427  * so long as it's not clearly insane, ie, more than the index size.
7428  */
7429  if (ginStats.nPendingPages < numPages)
7430  numPendingPages = ginStats.nPendingPages;
7431  else
7432  numPendingPages = 0;
7433 
7434  if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7435  ginStats.nTotalPages > numPages / 4 &&
7436  ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7437  {
7438  /*
7439  * OK, the stats seem close enough to sane to be trusted. But we
7440  * still need to scale them by the ratio numPages / nTotalPages to
7441  * account for growth since the last VACUUM.
7442  */
7443  double scale = numPages / ginStats.nTotalPages;
7444 
7445  numEntryPages = ceil(ginStats.nEntryPages * scale);
7446  numDataPages = ceil(ginStats.nDataPages * scale);
7447  numEntries = ceil(ginStats.nEntries * scale);
7448  /* ensure we didn't round up too much */
7449  numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7450  numDataPages = Min(numDataPages,
7451  numPages - numPendingPages - numEntryPages);
7452  }
7453  else
7454  {
7455  /*
7456  * We might get here because it's a hypothetical index, or an index
7457  * created pre-9.1 and never vacuumed since upgrading (in which case
7458  * its stats would read as zeroes), or just because it's grown too
7459  * much since the last VACUUM for us to put our faith in scaling.
7460  *
7461  * Invent some plausible internal statistics based on the index page
7462  * count (and clamp that to at least 10 pages, just in case). We
7463  * estimate that 90% of the index is entry pages, and the rest is data
7464  * pages. Estimate 100 entries per entry page; this is rather bogus
7465  * since it'll depend on the size of the keys, but it's more robust
7466  * than trying to predict the number of entries per heap tuple.
7467  */
7468  numPages = Max(numPages, 10);
7469  numEntryPages = floor((numPages - numPendingPages) * 0.90);
7470  numDataPages = numPages - numPendingPages - numEntryPages;
7471  numEntries = floor(numEntryPages * 100);
7472  }
7473 
7474  /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
7475  if (numEntries < 1)
7476  numEntries = 1;
7477 
7478  /*
7479  * If the index is partial, AND the index predicate with the index-bound
7480  * quals to produce a more accurate idea of the number of rows covered by
7481  * the bound conditions.
7482  */
7483  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
7484 
7485  /* Estimate the fraction of main-table tuples that will be visited */
7486  *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7487  index->rel->relid,
7488  JOIN_INNER,
7489  NULL);
7490 
7491  /* fetch estimated page cost for tablespace containing index */
7492  get_tablespace_page_costs(index->reltablespace,
7493  &spc_random_page_cost,
7494  NULL);
7495 
7496  /*
7497  * Generic assumption about index correlation: there isn't any.
7498  */
7499  *indexCorrelation = 0.0;
7500 
7501  /*
7502  * Examine quals to estimate number of search entries & partial matches
7503  */
7504  memset(&counts, 0, sizeof(counts));
7505  counts.arrayScans = 1;
7506  matchPossible = true;
7507 
7508  foreach(lc, path->indexclauses)
7509  {
7510  IndexClause *iclause = lfirst_node(IndexClause, lc);
7511  ListCell *lc2;
7512 
7513  foreach(lc2, iclause->indexquals)
7514  {
7515  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
7516  Expr *clause = rinfo->clause;
7517 
7518  if (IsA(clause, OpExpr))
7519  {
7520  matchPossible = gincost_opexpr(root,
7521  index,
7522  iclause->indexcol,
7523  (OpExpr *) clause,
7524  &counts);
7525  if (!matchPossible)
7526  break;
7527  }
7528  else if (IsA(clause, ScalarArrayOpExpr))
7529  {
7530  matchPossible = gincost_scalararrayopexpr(root,
7531  index,
7532  iclause->indexcol,
7533  (ScalarArrayOpExpr *) clause,
7534  numEntries,
7535  &counts);
7536  if (!matchPossible)
7537  break;
7538  }
7539  else
7540  {
7541  /* shouldn't be anything else for a GIN index */
7542  elog(ERROR, "unsupported GIN indexqual type: %d",
7543  (int) nodeTag(clause));
7544  }
7545  }
7546  }
7547 
7548  /* Fall out if there were any provably-unsatisfiable quals */
7549  if (!matchPossible)
7550  {
7551  *indexStartupCost = 0;
7552  *indexTotalCost = 0;
7553  *indexSelectivity = 0;
7554  return;
7555  }
7556 
7557  /*
7558  * If attribute has a full scan and at the same time doesn't have normal
7559  * scan, then we'll have to scan all non-null entries of that attribute.
7560  * Currently, we don't have per-attribute statistics for GIN. Thus, we
7561  * must assume the whole GIN index has to be scanned in this case.
7562  */
7563  fullIndexScan = false;
7564  for (i = 0; i < index->nkeycolumns; i++)
7565  {
7566  if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
7567  {
7568  fullIndexScan = true;
7569  break;
7570  }
7571  }
7572 
7573  if (fullIndexScan || indexQuals == NIL)
7574  {
7575  /*
7576  * Full index scan will be required. We treat this as if every key in
7577  * the index had been listed in the query; is that reasonable?
7578  */
7579  counts.partialEntries = 0;
7580  counts.exactEntries = numEntries;
7581  counts.searchEntries = numEntries;
7582  }
7583 
7584  /* Will we have more than one iteration of a nestloop scan? */
7585  outer_scans = loop_count;
7586 
7587  /*
7588  * Compute cost to begin scan, first of all, pay attention to pending
7589  * list.
7590  */
7591  entryPagesFetched = numPendingPages;
7592 
7593  /*
7594  * Estimate number of entry pages read. We need to do
7595  * counts.searchEntries searches. Use a power function as it should be,
7596  * but tuples on leaf pages usually is much greater. Here we include all
7597  * searches in entry tree, including search of first entry in partial
7598  * match algorithm
7599  */
7600  entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7601 
7602  /*
7603  * Add an estimate of entry pages read by partial match algorithm. It's a
7604  * scan over leaf pages in entry tree. We haven't any useful stats here,
7605  * so estimate it as proportion. Because counts.partialEntries is really
7606  * pretty bogus (see code above), it's possible that it is more than
7607  * numEntries; clamp the proportion to ensure sanity.
7608  */
7609  partialScale = counts.partialEntries / numEntries;
7610  partialScale = Min(partialScale, 1.0);
7611 
7612  entryPagesFetched += ceil(numEntryPages * partialScale);
7613 
7614  /*
7615  * Partial match algorithm reads all data pages before doing actual scan,
7616  * so it's a startup cost. Again, we haven't any useful stats here, so
7617  * estimate it as proportion.
7618  */
7619  dataPagesFetched = ceil(numDataPages * partialScale);
7620 
7621  /*
7622  * Calculate cache effects if more than one scan due to nestloops or array
7623  * quals. The result is pro-rated per nestloop scan, but the array qual
7624  * factor shouldn't be pro-rated (compare genericcostestimate).
7625  */
7626  if (outer_scans > 1 || counts.arrayScans > 1)
7627  {
7628  entryPagesFetched *= outer_scans * counts.arrayScans;
7629  entryPagesFetched = index_pages_fetched(entryPagesFetched,
7630  (BlockNumber) numEntryPages,
7631  numEntryPages, root);
7632  entryPagesFetched /= outer_scans;
7633  dataPagesFetched *= outer_scans * counts.arrayScans;
7634  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7635  (BlockNumber) numDataPages,
7636  numDataPages, root);
7637  dataPagesFetched /= outer_scans;
7638  }
7639 
7640  /*
7641  * Here we use random page cost because logically-close pages could be far
7642  * apart on disk.
7643  */
7644  *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7645 
7646  /*
7647  * Now compute the number of data pages fetched during the scan.
7648  *
7649  * We assume every entry to have the same number of items, and that there
7650  * is no overlap between them. (XXX: tsvector and array opclasses collect
7651  * statistics on the frequency of individual keys; it would be nice to use
7652  * those here.)
7653  */
7654  dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7655 
7656  /*
7657  * If there is a lot of overlap among the entries, in particular if one of
7658  * the entries is very frequent, the above calculation can grossly
7659  * under-estimate. As a simple cross-check, calculate a lower bound based
7660  * on the overall selectivity of the quals. At a minimum, we must read
7661  * one item pointer for each matching entry.
7662  *
7663  * The width of each item pointer varies, based on the level of
7664  * compression. We don't have statistics on that, but an average of
7665  * around 3 bytes per item is fairly typical.
7666  */
7667  dataPagesFetchedBySel = ceil(*indexSelectivity *
7668  (numTuples / (BLCKSZ / 3)));
7669  if (dataPagesFetchedBySel > dataPagesFetched)
7670  dataPagesFetched = dataPagesFetchedBySel;
7671 
7672  /* Account for cache effects, the same as above */
7673  if (outer_scans > 1 || counts.arrayScans > 1)
7674  {
7675  dataPagesFetched *= outer_scans * counts.arrayScans;
7676  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7677  (BlockNumber) numDataPages,
7678  numDataPages, root);
7679  dataPagesFetched /= outer_scans;
7680  }
7681 
7682  /* And apply random_page_cost as the cost per page */
7683  *indexTotalCost = *indexStartupCost +
7684  dataPagesFetched * spc_random_page_cost;
7685 
7686  /*
7687  * Add on index qual eval costs, much as in genericcostestimate. But we
7688  * can disregard indexorderbys, since GIN doesn't support those.
7689  */
7690  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7691  qual_op_cost = cpu_operator_cost * list_length(indexQuals);
7692 
7693  *indexStartupCost += qual_arg_cost;
7694  *indexTotalCost += qual_arg_cost;
7695  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7696  *indexPages = dataPagesFetched;
7697 }
uint32 BlockNumber
Definition: block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:840
double cpu_index_tuple_cost
Definition: costsize.c:122
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:630
int i
Definition: isn.c:73
static int list_length(const List *l)
Definition: pg_list.h:149
int scale
Definition: pgbench.c:192
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:7256
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition: selfuncs.c:7206
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7079
double exactEntries
Definition: selfuncs.c:7081
double arrayScans
Definition: selfuncs.c:7083
double partialEntries
Definition: selfuncs.c:7080
bool attHasFullScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7078
double searchEntries
Definition: selfuncs.c:7082
BlockNumber nDataPages
Definition: gin.h:47
BlockNumber nPendingPages
Definition: gin.h:44
BlockNumber nEntryPages
Definition: gin.h:46
int64 nEntries
Definition: gin.h:48
BlockNumber nTotalPages
Definition: gin.h:45

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, elog, ERROR, GinQualCounts::exactEntries, get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, IndexClause::indexquals, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, NoLock, GinStatsData::nPendingPages, GinStatsData::nTotalPages, GinQualCounts::partialEntries, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6958 of file selfuncs.c.

6962 {
6963  IndexOptInfo *index = path->indexinfo;
6964  GenericCosts costs;
6965  Cost descentCost;
6966 
6967  MemSet(&costs, 0, sizeof(costs));
6968 
6969  genericcostestimate(root, path, loop_count, &costs);
6970 
6971  /*
6972  * We model index descent costs similarly to those for btree, but to do
6973  * that we first need an idea of the tree height. We somewhat arbitrarily
6974  * assume that the fanout is 100, meaning the tree height is at most
6975  * log100(index->pages).
6976  *
6977  * Although this computation isn't really expensive enough to require
6978  * caching, we might as well use index->tree_height to cache it.
6979  */
6980  if (index->tree_height < 0) /* unknown? */
6981  {
6982  if (index->pages > 1) /* avoid computing log(0) */
6983  index->tree_height = (int) (log(index->pages) / log(100.0));
6984  else
6985  index->tree_height = 0;
6986  }
6987 
6988  /*
6989  * Add a CPU-cost component to represent the costs of initial descent. We
6990  * just use log(N) here not log2(N) since the branching factor isn't
6991  * necessarily two anyway. As for btree, charge once per SA scan.
6992  */
6993  if (index->tuples > 1) /* avoid computing log(0) */
6994  {
6995  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6996  costs.indexStartupCost += descentCost;
6997  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6998  }
6999 
7000  /*
7001  * Likewise add a per-page charge, calculated the same as for btrees.
7002  */
7003  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7004  costs.indexStartupCost += descentCost;
7005  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7006 
7007  *indexStartupCost = costs.indexStartupCost;
7008  *indexTotalCost = costs.indexTotalCost;
7009  *indexSelectivity = costs.indexSelectivity;
7010  *indexCorrelation = costs.indexCorrelation;
7011  *indexPages = costs.numIndexPages;
7012 }

References cpu_operator_cost, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, and GenericCosts::numIndexPages.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6914 of file selfuncs.c.

6918 {
6919  GenericCosts costs;
6920 
6921  MemSet(&costs, 0, sizeof(costs));
6922 
6923  genericcostestimate(root, path, loop_count, &costs);
6924 
6925  /*
6926  * A hash index has no descent costs as such, since the index AM can go
6927  * directly to the target bucket after computing the hash value. There
6928  * are a couple of other hash-specific costs that we could conceivably add
6929  * here, though:
6930  *
6931  * Ideally we'd charge spc_random_page_cost for each page in the target
6932  * bucket, not just the numIndexPages pages that genericcostestimate
6933  * thought we'd visit. However in most cases we don't know which bucket
6934  * that will be. There's no point in considering the average bucket size
6935  * because the hash AM makes sure that's always one page.
6936  *
6937  * Likewise, we could consider charging some CPU for each index tuple in
6938  * the bucket, if we knew how many there were. But the per-tuple cost is
6939  * just a hash value comparison, not a general datatype-dependent
6940  * comparison, so any such charge ought to be quite a bit less than
6941  * cpu_operator_cost; which makes it probably not worth worrying about.
6942  *
6943  * A bigger issue is that chance hash-value collisions will result in
6944  * wasted probes into the heap. We don't currently attempt to model this
6945  * cost on the grounds that it's rare, but maybe it's not rare enough.
6946  * (Any fix for this ought to consider the generic lossy-operator problem,
6947  * though; it's not entirely hash-specific.)
6948  */
6949 
6950  *indexStartupCost = costs.indexStartupCost;
6951  *indexTotalCost = costs.indexTotalCost;
6952  *indexSelectivity = costs.indexSelectivity;
6953  *indexCorrelation = costs.indexCorrelation;
6954  *indexPages = costs.numIndexPages;
6955 }

References genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, and GenericCosts::numIndexPages.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7015 of file selfuncs.c.

7019 {
7020  IndexOptInfo *index = path->indexinfo;
7021  GenericCosts costs;
7022  Cost descentCost;
7023 
7024  MemSet(&costs, 0, sizeof(costs));
7025 
7026  genericcostestimate(root, path, loop_count, &costs);
7027 
7028  /*
7029  * We model index descent costs similarly to those for btree, but to do
7030  * that we first need an idea of the tree height. We somewhat arbitrarily
7031  * assume that the fanout is 100, meaning the tree height is at most
7032  * log100(index->pages).
7033  *
7034  * Although this computation isn't really expensive enough to require
7035  * caching, we might as well use index->tree_height to cache it.
7036  */
7037  if (index->tree_height < 0) /* unknown? */
7038  {
7039  if (index->pages > 1) /* avoid computing log(0) */
7040  index->tree_height = (int) (log(index->pages) / log(100.0));
7041  else
7042  index->tree_height = 0;
7043  }
7044 
7045  /*
7046  * Add a CPU-cost component to represent the costs of initial descent. We
7047  * just use log(N) here not log2(N) since the branching factor isn't
7048  * necessarily two anyway. As for btree, charge once per SA scan.
7049  */
7050  if (index->tuples > 1) /* avoid computing log(0) */
7051  {
7052  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7053  costs.indexStartupCost += descentCost;
7054  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7055  }
7056 
7057  /*
7058  * Likewise add a per-page charge, calculated the same as for btrees.
7059  */
7060  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7061  costs.indexStartupCost += descentCost;
7062  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7063 
7064  *indexStartupCost = costs.indexStartupCost;
7065  *indexTotalCost = costs.indexTotalCost;
7066  *indexSelectivity = costs.indexSelectivity;
7067  *indexCorrelation = costs.indexCorrelation;
7068  *indexPages = costs.numIndexPages;
7069 }

References cpu_operator_cost, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, and GenericCosts::numIndexPages.

Referenced by spghandler().