PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7537 of file selfuncs.c.

References clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, deconstruct_indexquals(), get_tablespace_page_costs(), IndexPath::indexinfo, IndexPath::indexorderbys, IndexPath::indexquals, JOIN_INNER, list_length(), NULL, orderby_operands_eval_cost(), other_operands_eval_cost(), IndexOptInfo::pages, IndexOptInfo::rel, RelOptInfo::relid, IndexOptInfo::reltablespace, and IndexOptInfo::tuples.

Referenced by brinhandler().

7541 {
7542  IndexOptInfo *index = path->indexinfo;
7543  List *indexQuals = path->indexquals;
7544  List *indexOrderBys = path->indexorderbys;
7545  double numPages = index->pages;
7546  double numTuples = index->tuples;
7547  List *qinfos;
7548  Cost spc_seq_page_cost;
7549  Cost spc_random_page_cost;
7550  double qual_op_cost;
7551  double qual_arg_cost;
7552 
7553  /* Do preliminary analysis of indexquals */
7554  qinfos = deconstruct_indexquals(path);
7555 
7556  /* fetch estimated page cost for tablespace containing index */
7558  &spc_random_page_cost,
7559  &spc_seq_page_cost);
7560 
7561  /*
7562  * BRIN indexes are always read in full; use that as startup cost.
7563  *
7564  * XXX maybe only include revmap pages here?
7565  */
7566  *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7567 
7568  /*
7569  * To read a BRIN index there might be a bit of back and forth over
7570  * regular pages, as revmap might point to them out of sequential order;
7571  * calculate this as reading the whole index in random order.
7572  */
7573  *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7574 
7575  *indexSelectivity =
7576  clauselist_selectivity(root, indexQuals,
7577  path->indexinfo->rel->relid,
7578  JOIN_INNER, NULL);
7579  *indexCorrelation = 1;
7580 
7581  /*
7582  * Add on index qual eval costs, much as in genericcostestimate.
7583  */
7584  qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7585  orderby_operands_eval_cost(root, path);
7586  qual_op_cost = cpu_operator_cost *
7587  (list_length(indexQuals) + list_length(indexOrderBys));
7588 
7589  *indexStartupCost += qual_arg_cost;
7590  *indexTotalCost += qual_arg_cost;
7591  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7592  *indexPages = index->pages;
7593 
7594  /* XXX what about pages_per_range? */
7595 }
IndexOptInfo * indexinfo
Definition: relation.h:972
Oid reltablespace
Definition: relation.h:589
static Cost other_operands_eval_cost(PlannerInfo *root, List *qinfos)
Definition: selfuncs.c:6158
double tuples
Definition: relation.h:594
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
static Cost orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
Definition: selfuncs.c:6183
Definition: type.h:90
BlockNumber pages
Definition: relation.h:593
List * indexquals
Definition: relation.h:974
RelOptInfo * rel
Definition: relation.h:590
double cpu_operator_cost
Definition: costsize.c:108
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:178
Index relid
Definition: relation.h:518
List * indexorderbys
Definition: relation.h:976
#define NULL
Definition: c.h:226
static int list_length(const List *l)
Definition: pg_list.h:89
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
Definition: pg_list.h:45
double cpu_index_tuple_cost
Definition: costsize.c:107
double Cost
Definition: nodes.h:632
void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6453 of file selfuncs.c.

References add_predicate_to_quals(), Assert, BoolGetDatum, BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, IndexQualInfo::clause_op, clauselist_selectivity(), cpu_operator_cost, deconstruct_indexquals(), elog, ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexQualInfo::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexOptInfo::indexkeys, IndexOptInfo::indexoid, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst, MemSet, IndexOptInfo::ncolumns, NIL, NULL, NullTest::nulltesttype, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum, OidIsValid, IndexOptInfo::opcintype, IndexOptInfo::opfamily, IndexQualInfo::other_operand, planner_rt_fetch, IndexOptInfo::rel, ReleaseSysCache(), ReleaseVariableStats, RelOptInfo::relid, RangeTblEntry::relid, IndexOptInfo::reverse_sort, IndexQualInfo::rinfo, rint(), RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3, STATISTIC_KIND_CORRELATION, STATRELATTINH, VariableStatData::statsTuple, IndexOptInfo::tree_height, RelOptInfo::tuples, IndexOptInfo::tuples, and IndexOptInfo::unique.

Referenced by bthandler().

6457 {
6458  IndexOptInfo *index = path->indexinfo;
6459  List *qinfos;
6460  GenericCosts costs;
6461  Oid relid;
6462  AttrNumber colnum;
6463  VariableStatData vardata;
6464  double numIndexTuples;
6465  Cost descentCost;
6466  List *indexBoundQuals;
6467  int indexcol;
6468  bool eqQualHere;
6469  bool found_saop;
6470  bool found_is_null_op;
6471  double num_sa_scans;
6472  ListCell *lc;
6473 
6474  /* Do preliminary analysis of indexquals */
6475  qinfos = deconstruct_indexquals(path);
6476 
6477  /*
6478  * For a btree scan, only leading '=' quals plus inequality quals for the
6479  * immediately next attribute contribute to index selectivity (these are
6480  * the "boundary quals" that determine the starting and stopping points of
6481  * the index scan). Additional quals can suppress visits to the heap, so
6482  * it's OK to count them in indexSelectivity, but they should not count
6483  * for estimating numIndexTuples. So we must examine the given indexquals
6484  * to find out which ones count as boundary quals. We rely on the
6485  * knowledge that they are given in index column order.
6486  *
6487  * For a RowCompareExpr, we consider only the first column, just as
6488  * rowcomparesel() does.
6489  *
6490  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6491  * index scans not one, but the ScalarArrayOpExpr's operator can be
6492  * considered to act the same as it normally does.
6493  */
6494  indexBoundQuals = NIL;
6495  indexcol = 0;
6496  eqQualHere = false;
6497  found_saop = false;
6498  found_is_null_op = false;
6499  num_sa_scans = 1;
6500  foreach(lc, qinfos)
6501  {
6502  IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6503  RestrictInfo *rinfo = qinfo->rinfo;
6504  Expr *clause = rinfo->clause;
6505  Oid clause_op;
6506  int op_strategy;
6507 
6508  if (indexcol != qinfo->indexcol)
6509  {
6510  /* Beginning of a new column's quals */
6511  if (!eqQualHere)
6512  break; /* done if no '=' qual for indexcol */
6513  eqQualHere = false;
6514  indexcol++;
6515  if (indexcol != qinfo->indexcol)
6516  break; /* no quals at all for indexcol */
6517  }
6518 
6519  if (IsA(clause, ScalarArrayOpExpr))
6520  {
6521  int alength = estimate_array_length(qinfo->other_operand);
6522 
6523  found_saop = true;
6524  /* count up number of SA scans induced by indexBoundQuals only */
6525  if (alength > 1)
6526  num_sa_scans *= alength;
6527  }
6528  else if (IsA(clause, NullTest))
6529  {
6530  NullTest *nt = (NullTest *) clause;
6531 
6532  if (nt->nulltesttype == IS_NULL)
6533  {
6534  found_is_null_op = true;
6535  /* IS NULL is like = for selectivity determination purposes */
6536  eqQualHere = true;
6537  }
6538  }
6539 
6540  /*
6541  * We would need to commute the clause_op if not varonleft, except
6542  * that we only care if it's equality or not, so that refinement is
6543  * unnecessary.
6544  */
6545  clause_op = qinfo->clause_op;
6546 
6547  /* check for equality operator */
6548  if (OidIsValid(clause_op))
6549  {
6550  op_strategy = get_op_opfamily_strategy(clause_op,
6551  index->opfamily[indexcol]);
6552  Assert(op_strategy != 0); /* not a member of opfamily?? */
6553  if (op_strategy == BTEqualStrategyNumber)
6554  eqQualHere = true;
6555  }
6556 
6557  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6558  }
6559 
6560  /*
6561  * If index is unique and we found an '=' clause for each column, we can
6562  * just assume numIndexTuples = 1 and skip the expensive
6563  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6564  * NullTest invalidates that theory, even though it sets eqQualHere.
6565  */
6566  if (index->unique &&
6567  indexcol == index->ncolumns - 1 &&
6568  eqQualHere &&
6569  !found_saop &&
6570  !found_is_null_op)
6571  numIndexTuples = 1.0;
6572  else
6573  {
6574  List *selectivityQuals;
6575  Selectivity btreeSelectivity;
6576 
6577  /*
6578  * If the index is partial, AND the index predicate with the
6579  * index-bound quals to produce a more accurate idea of the number of
6580  * rows covered by the bound conditions.
6581  */
6582  selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6583 
6584  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6585  index->rel->relid,
6586  JOIN_INNER,
6587  NULL);
6588  numIndexTuples = btreeSelectivity * index->rel->tuples;
6589 
6590  /*
6591  * As in genericcostestimate(), we have to adjust for any
6592  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6593  * to integer.
6594  */
6595  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6596  }
6597 
6598  /*
6599  * Now do generic index cost estimation.
6600  */
6601  MemSet(&costs, 0, sizeof(costs));
6602  costs.numIndexTuples = numIndexTuples;
6603 
6604  genericcostestimate(root, path, loop_count, qinfos, &costs);
6605 
6606  /*
6607  * Add a CPU-cost component to represent the costs of initial btree
6608  * descent. We don't charge any I/O cost for touching upper btree levels,
6609  * since they tend to stay in cache, but we still have to do about log2(N)
6610  * comparisons to descend a btree of N leaf tuples. We charge one
6611  * cpu_operator_cost per comparison.
6612  *
6613  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6614  * ones after the first one are not startup cost so far as the overall
6615  * plan is concerned, so add them only to "total" cost.
6616  */
6617  if (index->tuples > 1) /* avoid computing log(0) */
6618  {
6619  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6620  costs.indexStartupCost += descentCost;
6621  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6622  }
6623 
6624  /*
6625  * Even though we're not charging I/O cost for touching upper btree pages,
6626  * it's still reasonable to charge some CPU cost per page descended
6627  * through. Moreover, if we had no such charge at all, bloated indexes
6628  * would appear to have the same search cost as unbloated ones, at least
6629  * in cases where only a single leaf page is expected to be visited. This
6630  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6631  * touched. The number of such pages is btree tree height plus one (ie,
6632  * we charge for the leaf page too). As above, charge once per SA scan.
6633  */
6634  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6635  costs.indexStartupCost += descentCost;
6636  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6637 
6638  /*
6639  * If we can get an estimate of the first column's ordering correlation C
6640  * from pg_statistic, estimate the index correlation as C for a
6641  * single-column index, or C * 0.75 for multiple columns. (The idea here
6642  * is that multiple columns dilute the importance of the first column's
6643  * ordering, but don't negate it entirely. Before 8.0 we divided the
6644  * correlation by the number of columns, but that seems too strong.)
6645  */
6646  MemSet(&vardata, 0, sizeof(vardata));
6647 
6648  if (index->indexkeys[0] != 0)
6649  {
6650  /* Simple variable --- look to stats for the underlying table */
6651  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6652 
6653  Assert(rte->rtekind == RTE_RELATION);
6654  relid = rte->relid;
6655  Assert(relid != InvalidOid);
6656  colnum = index->indexkeys[0];
6657 
6659  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6660  {
6661  /*
6662  * The hook took control of acquiring a stats tuple. If it did
6663  * supply a tuple, it'd better have supplied a freefunc.
6664  */
6665  if (HeapTupleIsValid(vardata.statsTuple) &&
6666  !vardata.freefunc)
6667  elog(ERROR, "no function provided to release variable stats with");
6668  }
6669  else
6670  {
6672  ObjectIdGetDatum(relid),
6673  Int16GetDatum(colnum),
6674  BoolGetDatum(rte->inh));
6675  vardata.freefunc = ReleaseSysCache;
6676  }
6677  }
6678  else
6679  {
6680  /* Expression --- maybe there are stats for the index itself */
6681  relid = index->indexoid;
6682  colnum = 1;
6683 
6684  if (get_index_stats_hook &&
6685  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6686  {
6687  /*
6688  * The hook took control of acquiring a stats tuple. If it did
6689  * supply a tuple, it'd better have supplied a freefunc.
6690  */
6691  if (HeapTupleIsValid(vardata.statsTuple) &&
6692  !vardata.freefunc)
6693  elog(ERROR, "no function provided to release variable stats with");
6694  }
6695  else
6696  {
6698  ObjectIdGetDatum(relid),
6699  Int16GetDatum(colnum),
6700  BoolGetDatum(false));
6701  vardata.freefunc = ReleaseSysCache;
6702  }
6703  }
6704 
6705  if (HeapTupleIsValid(vardata.statsTuple))
6706  {
6707  Oid sortop;
6708  float4 *numbers;
6709  int nnumbers;
6710 
6711  sortop = get_opfamily_member(index->opfamily[0],
6712  index->opcintype[0],
6713  index->opcintype[0],
6715  if (OidIsValid(sortop) &&
6718  sortop,
6719  NULL,
6720  NULL, NULL,
6721  &numbers, &nnumbers))
6722  {
6723  double varCorrelation;
6724 
6725  Assert(nnumbers == 1);
6726  varCorrelation = numbers[0];
6727 
6728  if (index->reverse_sort[0])
6729  varCorrelation = -varCorrelation;
6730 
6731  if (index->ncolumns > 1)
6732  costs.indexCorrelation = varCorrelation * 0.75;
6733  else
6734  costs.indexCorrelation = varCorrelation;
6735 
6736  free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6737  }
6738  }
6739 
6740  ReleaseVariableStats(vardata);
6741 
6742  *indexStartupCost = costs.indexStartupCost;
6743  *indexTotalCost = costs.indexTotalCost;
6744  *indexSelectivity = costs.indexSelectivity;
6745  *indexCorrelation = costs.indexCorrelation;
6746  *indexPages = costs.numIndexPages;
6747 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
#define NIL
Definition: pg_list.h:69
#define IsA(nodeptr, _type_)
Definition: nodes.h:559
IndexOptInfo * indexinfo
Definition: relation.h:972
HeapTuple statsTuple
Definition: selfuncs.h:71
double tuples
Definition: relation.h:529
static List * add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6431
#define Int16GetDatum(X)
Definition: postgres.h:459
#define MemSet(start, val, len)
Definition: c.h:853
double Selectivity
Definition: nodes.h:631
bool get_attstatsslot(HeapTuple statstuple, Oid atttype, int32 atttypmod, int reqkind, Oid reqop, Oid *actualop, Datum **values, int *nvalues, float4 **numbers, int *nnumbers)
Definition: lsyscache.c:2854
double tuples
Definition: relation.h:594
unsigned int Oid
Definition: postgres_ext.h:31
int tree_height
Definition: relation.h:595
#define OidIsValid(objectId)
Definition: c.h:534
RestrictInfo * rinfo
Definition: selfuncs.h:105
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
bool unique
Definition: relation.h:621
Definition: type.h:90
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2078
RelOptInfo * rel
Definition: relation.h:590
#define planner_rt_fetch(rti, root)
Definition: relation.h:320
#define ObjectIdGetDatum(X)
Definition: postgres.h:515
#define ERROR
Definition: elog.h:43
double num_sa_scans
Definition: selfuncs.h:137
#define STATISTIC_KIND_CORRELATION
Definition: pg_statistic.h:233
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:163
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:148
double rint(double x)
Definition: rint.c:22
int ncolumns
Definition: relation.h:598
Index relid
Definition: relation.h:518
List * lappend(List *list, void *datum)
Definition: list.c:128
Expr * clause
Definition: relation.h:1637
float float4
Definition: c.h:377
double indexCorrelation
Definition: selfuncs.h:131
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1083
NullTestType nulltesttype
Definition: primnodes.h:1157
#define BoolGetDatum(X)
Definition: postgres.h:410
#define InvalidOid
Definition: postgres_ext.h:36
double numIndexTuples
Definition: selfuncs.h:135
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
#define lfirst(lc)
Definition: pg_list.h:106
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:149
Oid * opcintype
Definition: relation.h:602
Cost indexStartupCost
Definition: selfuncs.h:128
Oid * opfamily
Definition: relation.h:601
RTEKind rtekind
Definition: parsenodes.h:882
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:80
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:80
Node * other_operand
Definition: selfuncs.h:109
#define SearchSysCache3(cacheId, key1, key2, key3)
Definition: syscache.h:153
int * indexkeys
Definition: relation.h:599
#define elog
Definition: elog.h:219
Oid indexoid
Definition: relation.h:588
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:73
bool * reverse_sort
Definition: relation.h:604
#define BTLessStrategyNumber
Definition: stratnum.h:29
Definition: pg_list.h:45
int16 AttrNumber
Definition: attnum.h:21
void free_attstatsslot(Oid atttype, Datum *values, int nvalues, float4 *numbers, int nnumbers)
Definition: lsyscache.c:2978
#define BTEqualStrategyNumber
Definition: stratnum.h:31
double Cost
Definition: nodes.h:632
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6212
double numIndexPages
Definition: selfuncs.h:134
void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7212 of file selfuncs.c.

References AccessShareLock, GinQualCounts::arrayScans, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, deconstruct_indexquals(), elog, ERROR, GinQualCounts::exactEntries, get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), GinQualCounts::haveFullScan, IndexOptInfo::hypothetical, index_close(), index_open(), index_pages_fetched(), IndexPath::indexinfo, IndexOptInfo::indexoid, IndexPath::indexorderbys, IndexPath::indexquals, IndexOptInfo::indpred, IsA, JOIN_INNER, lfirst, list_concat(), list_length(), list_make1, Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, GinStatsData::nPendingPages, GinStatsData::nTotalPages, NULL, orderby_operands_eval_cost(), other_operands_eval_cost(), IndexOptInfo::pages, GinQualCounts::partialEntries, predicate_implied_by(), IndexOptInfo::rel, RelOptInfo::relid, IndexOptInfo::reltablespace, IndexQualInfo::rinfo, rint(), scale, GinQualCounts::searchEntries, and IndexOptInfo::tuples.

Referenced by ginhandler().

7216 {
7217  IndexOptInfo *index = path->indexinfo;
7218  List *indexQuals = path->indexquals;
7219  List *indexOrderBys = path->indexorderbys;
7220  List *qinfos;
7221  ListCell *l;
7222  List *selectivityQuals;
7223  double numPages = index->pages,
7224  numTuples = index->tuples;
7225  double numEntryPages,
7226  numDataPages,
7227  numPendingPages,
7228  numEntries;
7229  GinQualCounts counts;
7230  bool matchPossible;
7231  double partialScale;
7232  double entryPagesFetched,
7233  dataPagesFetched,
7234  dataPagesFetchedBySel;
7235  double qual_op_cost,
7236  qual_arg_cost,
7237  spc_random_page_cost,
7238  outer_scans;
7239  Relation indexRel;
7240  GinStatsData ginStats;
7241 
7242  /* Do preliminary analysis of indexquals */
7243  qinfos = deconstruct_indexquals(path);
7244 
7245  /*
7246  * Obtain statistical information from the meta page, if possible. Else
7247  * set ginStats to zeroes, and we'll cope below.
7248  */
7249  if (!index->hypothetical)
7250  {
7251  indexRel = index_open(index->indexoid, AccessShareLock);
7252  ginGetStats(indexRel, &ginStats);
7253  index_close(indexRel, AccessShareLock);
7254  }
7255  else
7256  {
7257  memset(&ginStats, 0, sizeof(ginStats));
7258  }
7259 
7260  /*
7261  * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7262  * trusted, but the other fields are data as of the last VACUUM. We can
7263  * scale them up to account for growth since then, but that method only
7264  * goes so far; in the worst case, the stats might be for a completely
7265  * empty index, and scaling them will produce pretty bogus numbers.
7266  * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7267  * it's grown more than that, fall back to estimating things only from the
7268  * assumed-accurate index size. But we'll trust nPendingPages in any case
7269  * so long as it's not clearly insane, ie, more than the index size.
7270  */
7271  if (ginStats.nPendingPages < numPages)
7272  numPendingPages = ginStats.nPendingPages;
7273  else
7274  numPendingPages = 0;
7275 
7276  if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7277  ginStats.nTotalPages > numPages / 4 &&
7278  ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7279  {
7280  /*
7281  * OK, the stats seem close enough to sane to be trusted. But we
7282  * still need to scale them by the ratio numPages / nTotalPages to
7283  * account for growth since the last VACUUM.
7284  */
7285  double scale = numPages / ginStats.nTotalPages;
7286 
7287  numEntryPages = ceil(ginStats.nEntryPages * scale);
7288  numDataPages = ceil(ginStats.nDataPages * scale);
7289  numEntries = ceil(ginStats.nEntries * scale);
7290  /* ensure we didn't round up too much */
7291  numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7292  numDataPages = Min(numDataPages,
7293  numPages - numPendingPages - numEntryPages);
7294  }
7295  else
7296  {
7297  /*
7298  * We might get here because it's a hypothetical index, or an index
7299  * created pre-9.1 and never vacuumed since upgrading (in which case
7300  * its stats would read as zeroes), or just because it's grown too
7301  * much since the last VACUUM for us to put our faith in scaling.
7302  *
7303  * Invent some plausible internal statistics based on the index page
7304  * count (and clamp that to at least 10 pages, just in case). We
7305  * estimate that 90% of the index is entry pages, and the rest is data
7306  * pages. Estimate 100 entries per entry page; this is rather bogus
7307  * since it'll depend on the size of the keys, but it's more robust
7308  * than trying to predict the number of entries per heap tuple.
7309  */
7310  numPages = Max(numPages, 10);
7311  numEntryPages = floor((numPages - numPendingPages) * 0.90);
7312  numDataPages = numPages - numPendingPages - numEntryPages;
7313  numEntries = floor(numEntryPages * 100);
7314  }
7315 
7316  /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
7317  if (numEntries < 1)
7318  numEntries = 1;
7319 
7320  /*
7321  * Include predicate in selectivityQuals (should match
7322  * genericcostestimate)
7323  */
7324  if (index->indpred != NIL)
7325  {
7326  List *predExtraQuals = NIL;
7327 
7328  foreach(l, index->indpred)
7329  {
7330  Node *predQual = (Node *) lfirst(l);
7331  List *oneQual = list_make1(predQual);
7332 
7333  if (!predicate_implied_by(oneQual, indexQuals))
7334  predExtraQuals = list_concat(predExtraQuals, oneQual);
7335  }
7336  /* list_concat avoids modifying the passed-in indexQuals list */
7337  selectivityQuals = list_concat(predExtraQuals, indexQuals);
7338  }
7339  else
7340  selectivityQuals = indexQuals;
7341 
7342  /* Estimate the fraction of main-table tuples that will be visited */
7343  *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7344  index->rel->relid,
7345  JOIN_INNER,
7346  NULL);
7347 
7348  /* fetch estimated page cost for tablespace containing index */
7350  &spc_random_page_cost,
7351  NULL);
7352 
7353  /*
7354  * Generic assumption about index correlation: there isn't any.
7355  */
7356  *indexCorrelation = 0.0;
7357 
7358  /*
7359  * Examine quals to estimate number of search entries & partial matches
7360  */
7361  memset(&counts, 0, sizeof(counts));
7362  counts.arrayScans = 1;
7363  matchPossible = true;
7364 
7365  foreach(l, qinfos)
7366  {
7367  IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7368  Expr *clause = qinfo->rinfo->clause;
7369 
7370  if (IsA(clause, OpExpr))
7371  {
7372  matchPossible = gincost_opexpr(root,
7373  index,
7374  qinfo,
7375  &counts);
7376  if (!matchPossible)
7377  break;
7378  }
7379  else if (IsA(clause, ScalarArrayOpExpr))
7380  {
7381  matchPossible = gincost_scalararrayopexpr(root,
7382  index,
7383  qinfo,
7384  numEntries,
7385  &counts);
7386  if (!matchPossible)
7387  break;
7388  }
7389  else
7390  {
7391  /* shouldn't be anything else for a GIN index */
7392  elog(ERROR, "unsupported GIN indexqual type: %d",
7393  (int) nodeTag(clause));
7394  }
7395  }
7396 
7397  /* Fall out if there were any provably-unsatisfiable quals */
7398  if (!matchPossible)
7399  {
7400  *indexStartupCost = 0;
7401  *indexTotalCost = 0;
7402  *indexSelectivity = 0;
7403  return;
7404  }
7405 
7406  if (counts.haveFullScan || indexQuals == NIL)
7407  {
7408  /*
7409  * Full index scan will be required. We treat this as if every key in
7410  * the index had been listed in the query; is that reasonable?
7411  */
7412  counts.partialEntries = 0;
7413  counts.exactEntries = numEntries;
7414  counts.searchEntries = numEntries;
7415  }
7416 
7417  /* Will we have more than one iteration of a nestloop scan? */
7418  outer_scans = loop_count;
7419 
7420  /*
7421  * Compute cost to begin scan, first of all, pay attention to pending
7422  * list.
7423  */
7424  entryPagesFetched = numPendingPages;
7425 
7426  /*
7427  * Estimate number of entry pages read. We need to do
7428  * counts.searchEntries searches. Use a power function as it should be,
7429  * but tuples on leaf pages usually is much greater. Here we include all
7430  * searches in entry tree, including search of first entry in partial
7431  * match algorithm
7432  */
7433  entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7434 
7435  /*
7436  * Add an estimate of entry pages read by partial match algorithm. It's a
7437  * scan over leaf pages in entry tree. We haven't any useful stats here,
7438  * so estimate it as proportion. Because counts.partialEntries is really
7439  * pretty bogus (see code above), it's possible that it is more than
7440  * numEntries; clamp the proportion to ensure sanity.
7441  */
7442  partialScale = counts.partialEntries / numEntries;
7443  partialScale = Min(partialScale, 1.0);
7444 
7445  entryPagesFetched += ceil(numEntryPages * partialScale);
7446 
7447  /*
7448  * Partial match algorithm reads all data pages before doing actual scan,
7449  * so it's a startup cost. Again, we haven't any useful stats here, so
7450  * estimate it as proportion.
7451  */
7452  dataPagesFetched = ceil(numDataPages * partialScale);
7453 
7454  /*
7455  * Calculate cache effects if more than one scan due to nestloops or array
7456  * quals. The result is pro-rated per nestloop scan, but the array qual
7457  * factor shouldn't be pro-rated (compare genericcostestimate).
7458  */
7459  if (outer_scans > 1 || counts.arrayScans > 1)
7460  {
7461  entryPagesFetched *= outer_scans * counts.arrayScans;
7462  entryPagesFetched = index_pages_fetched(entryPagesFetched,
7463  (BlockNumber) numEntryPages,
7464  numEntryPages, root);
7465  entryPagesFetched /= outer_scans;
7466  dataPagesFetched *= outer_scans * counts.arrayScans;
7467  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7468  (BlockNumber) numDataPages,
7469  numDataPages, root);
7470  dataPagesFetched /= outer_scans;
7471  }
7472 
7473  /*
7474  * Here we use random page cost because logically-close pages could be far
7475  * apart on disk.
7476  */
7477  *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7478 
7479  /*
7480  * Now compute the number of data pages fetched during the scan.
7481  *
7482  * We assume every entry to have the same number of items, and that there
7483  * is no overlap between them. (XXX: tsvector and array opclasses collect
7484  * statistics on the frequency of individual keys; it would be nice to use
7485  * those here.)
7486  */
7487  dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7488 
7489  /*
7490  * If there is a lot of overlap among the entries, in particular if one of
7491  * the entries is very frequent, the above calculation can grossly
7492  * under-estimate. As a simple cross-check, calculate a lower bound based
7493  * on the overall selectivity of the quals. At a minimum, we must read
7494  * one item pointer for each matching entry.
7495  *
7496  * The width of each item pointer varies, based on the level of
7497  * compression. We don't have statistics on that, but an average of
7498  * around 3 bytes per item is fairly typical.
7499  */
7500  dataPagesFetchedBySel = ceil(*indexSelectivity *
7501  (numTuples / (BLCKSZ / 3)));
7502  if (dataPagesFetchedBySel > dataPagesFetched)
7503  dataPagesFetched = dataPagesFetchedBySel;
7504 
7505  /* Account for cache effects, the same as above */
7506  if (outer_scans > 1 || counts.arrayScans > 1)
7507  {
7508  dataPagesFetched *= outer_scans * counts.arrayScans;
7509  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7510  (BlockNumber) numDataPages,
7511  numDataPages, root);
7512  dataPagesFetched /= outer_scans;
7513  }
7514 
7515  /* And apply random_page_cost as the cost per page */
7516  *indexTotalCost = *indexStartupCost +
7517  dataPagesFetched * spc_random_page_cost;
7518 
7519  /*
7520  * Add on index qual eval costs, much as in genericcostestimate
7521  */
7522  qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7523  orderby_operands_eval_cost(root, path);
7524  qual_op_cost = cpu_operator_cost *
7525  (list_length(indexQuals) + list_length(indexOrderBys));
7526 
7527  *indexStartupCost += qual_arg_cost;
7528  *indexTotalCost += qual_arg_cost;
7529  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7530  *indexPages = dataPagesFetched;
7531 }
#define NIL
Definition: pg_list.h:69
BlockNumber nEntryPages
Definition: gin.h:45
#define IsA(nodeptr, _type_)
Definition: nodes.h:559
IndexOptInfo * indexinfo
Definition: relation.h:972
bool predicate_implied_by(List *predicate_list, List *restrictinfo_list)
Definition: predtest.c:128
#define Min(x, y)
Definition: c.h:802
#define AccessShareLock
Definition: lockdefs.h:36
double partialEntries
Definition: selfuncs.c:6927
Definition: nodes.h:508
double searchEntries
Definition: selfuncs.c:6929
Oid reltablespace
Definition: relation.h:589
static Cost other_operands_eval_cost(PlannerInfo *root, List *qinfos)
Definition: selfuncs.c:6158
int scale
Definition: pgbench.c:106
int64 nEntries
Definition: gin.h:47
List * list_concat(List *list1, List *list2)
Definition: list.c:321
uint32 BlockNumber
Definition: block.h:31
bool hypothetical
Definition: relation.h:623
double tuples
Definition: relation.h:594
RestrictInfo * rinfo
Definition: selfuncs.h:105
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
static Cost orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
Definition: selfuncs.c:6183
Definition: type.h:90
double exactEntries
Definition: selfuncs.c:6928
BlockNumber pages
Definition: relation.h:593
#define list_make1(x1)
Definition: pg_list.h:133
List * indexquals
Definition: relation.h:974
RelOptInfo * rel
Definition: relation.h:590
#define ERROR
Definition: elog.h:43
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, IndexQualInfo *qinfo, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:7097
bool haveFullScan
Definition: selfuncs.c:6926
double cpu_operator_cost
Definition: costsize.c:108
double rint(double x)
Definition: rint.c:22
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:178
Index relid
Definition: relation.h:518
Expr * clause
Definition: relation.h:1637
BlockNumber nPendingPages
Definition: gin.h:43
double arrayScans
Definition: selfuncs.c:6930
List * indexorderbys
Definition: relation.h:976
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:632
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, IndexQualInfo *qinfo, GinQualCounts *counts)
Definition: selfuncs.c:7041
BlockNumber nDataPages
Definition: gin.h:46
#define Max(x, y)
Definition: c.h:796
#define NULL
Definition: c.h:226
#define lfirst(lc)
Definition: pg_list.h:106
static int list_length(const List *l)
Definition: pg_list.h:89
BlockNumber nTotalPages
Definition: gin.h:44
#define nodeTag(nodeptr)
Definition: nodes.h:513
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
#define elog
Definition: elog.h:219
Oid indexoid
Definition: relation.h:588
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
List * indpred
Definition: relation.h:611
Definition: pg_list.h:45
double cpu_index_tuple_cost
Definition: costsize.c:107
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:738
void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6798 of file selfuncs.c.

References cpu_operator_cost, deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, IndexOptInfo::pages, IndexOptInfo::tree_height, and IndexOptInfo::tuples.

Referenced by gisthandler().

6802 {
6803  IndexOptInfo *index = path->indexinfo;
6804  List *qinfos;
6805  GenericCosts costs;
6806  Cost descentCost;
6807 
6808  /* Do preliminary analysis of indexquals */
6809  qinfos = deconstruct_indexquals(path);
6810 
6811  MemSet(&costs, 0, sizeof(costs));
6812 
6813  genericcostestimate(root, path, loop_count, qinfos, &costs);
6814 
6815  /*
6816  * We model index descent costs similarly to those for btree, but to do
6817  * that we first need an idea of the tree height. We somewhat arbitrarily
6818  * assume that the fanout is 100, meaning the tree height is at most
6819  * log100(index->pages).
6820  *
6821  * Although this computation isn't really expensive enough to require
6822  * caching, we might as well use index->tree_height to cache it.
6823  */
6824  if (index->tree_height < 0) /* unknown? */
6825  {
6826  if (index->pages > 1) /* avoid computing log(0) */
6827  index->tree_height = (int) (log(index->pages) / log(100.0));
6828  else
6829  index->tree_height = 0;
6830  }
6831 
6832  /*
6833  * Add a CPU-cost component to represent the costs of initial descent. We
6834  * just use log(N) here not log2(N) since the branching factor isn't
6835  * necessarily two anyway. As for btree, charge once per SA scan.
6836  */
6837  if (index->tuples > 1) /* avoid computing log(0) */
6838  {
6839  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6840  costs.indexStartupCost += descentCost;
6841  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6842  }
6843 
6844  /*
6845  * Likewise add a per-page charge, calculated the same as for btrees.
6846  */
6847  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6848  costs.indexStartupCost += descentCost;
6849  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6850 
6851  *indexStartupCost = costs.indexStartupCost;
6852  *indexTotalCost = costs.indexTotalCost;
6853  *indexSelectivity = costs.indexSelectivity;
6854  *indexCorrelation = costs.indexCorrelation;
6855  *indexPages = costs.numIndexPages;
6856 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
IndexOptInfo * indexinfo
Definition: relation.h:972
#define MemSet(start, val, len)
Definition: c.h:853
double tuples
Definition: relation.h:594
int tree_height
Definition: relation.h:595
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
Definition: type.h:90
BlockNumber pages
Definition: relation.h:593
double num_sa_scans
Definition: selfuncs.h:137
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
double Cost
Definition: nodes.h:632
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6212
double numIndexPages
Definition: selfuncs.h:134
void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6750 of file selfuncs.c.

References deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, and GenericCosts::numIndexPages.

Referenced by hashhandler().

6754 {
6755  List *qinfos;
6756  GenericCosts costs;
6757 
6758  /* Do preliminary analysis of indexquals */
6759  qinfos = deconstruct_indexquals(path);
6760 
6761  MemSet(&costs, 0, sizeof(costs));
6762 
6763  genericcostestimate(root, path, loop_count, qinfos, &costs);
6764 
6765  /*
6766  * A hash index has no descent costs as such, since the index AM can go
6767  * directly to the target bucket after computing the hash value. There
6768  * are a couple of other hash-specific costs that we could conceivably add
6769  * here, though:
6770  *
6771  * Ideally we'd charge spc_random_page_cost for each page in the target
6772  * bucket, not just the numIndexPages pages that genericcostestimate
6773  * thought we'd visit. However in most cases we don't know which bucket
6774  * that will be. There's no point in considering the average bucket size
6775  * because the hash AM makes sure that's always one page.
6776  *
6777  * Likewise, we could consider charging some CPU for each index tuple in
6778  * the bucket, if we knew how many there were. But the per-tuple cost is
6779  * just a hash value comparison, not a general datatype-dependent
6780  * comparison, so any such charge ought to be quite a bit less than
6781  * cpu_operator_cost; which makes it probably not worth worrying about.
6782  *
6783  * A bigger issue is that chance hash-value collisions will result in
6784  * wasted probes into the heap. We don't currently attempt to model this
6785  * cost on the grounds that it's rare, but maybe it's not rare enough.
6786  * (Any fix for this ought to consider the generic lossy-operator problem,
6787  * though; it's not entirely hash-specific.)
6788  */
6789 
6790  *indexStartupCost = costs.indexStartupCost;
6791  *indexTotalCost = costs.indexTotalCost;
6792  *indexSelectivity = costs.indexSelectivity;
6793  *indexCorrelation = costs.indexCorrelation;
6794  *indexPages = costs.numIndexPages;
6795 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
#define MemSet(start, val, len)
Definition: c.h:853
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6212
double numIndexPages
Definition: selfuncs.h:134
void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6859 of file selfuncs.c.

References cpu_operator_cost, deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, IndexOptInfo::pages, IndexOptInfo::tree_height, and IndexOptInfo::tuples.

Referenced by spghandler().

6863 {
6864  IndexOptInfo *index = path->indexinfo;
6865  List *qinfos;
6866  GenericCosts costs;
6867  Cost descentCost;
6868 
6869  /* Do preliminary analysis of indexquals */
6870  qinfos = deconstruct_indexquals(path);
6871 
6872  MemSet(&costs, 0, sizeof(costs));
6873 
6874  genericcostestimate(root, path, loop_count, qinfos, &costs);
6875 
6876  /*
6877  * We model index descent costs similarly to those for btree, but to do
6878  * that we first need an idea of the tree height. We somewhat arbitrarily
6879  * assume that the fanout is 100, meaning the tree height is at most
6880  * log100(index->pages).
6881  *
6882  * Although this computation isn't really expensive enough to require
6883  * caching, we might as well use index->tree_height to cache it.
6884  */
6885  if (index->tree_height < 0) /* unknown? */
6886  {
6887  if (index->pages > 1) /* avoid computing log(0) */
6888  index->tree_height = (int) (log(index->pages) / log(100.0));
6889  else
6890  index->tree_height = 0;
6891  }
6892 
6893  /*
6894  * Add a CPU-cost component to represent the costs of initial descent. We
6895  * just use log(N) here not log2(N) since the branching factor isn't
6896  * necessarily two anyway. As for btree, charge once per SA scan.
6897  */
6898  if (index->tuples > 1) /* avoid computing log(0) */
6899  {
6900  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6901  costs.indexStartupCost += descentCost;
6902  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6903  }
6904 
6905  /*
6906  * Likewise add a per-page charge, calculated the same as for btrees.
6907  */
6908  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6909  costs.indexStartupCost += descentCost;
6910  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6911 
6912  *indexStartupCost = costs.indexStartupCost;
6913  *indexTotalCost = costs.indexTotalCost;
6914  *indexSelectivity = costs.indexSelectivity;
6915  *indexCorrelation = costs.indexCorrelation;
6916  *indexPages = costs.numIndexPages;
6917 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
IndexOptInfo * indexinfo
Definition: relation.h:972
#define MemSet(start, val, len)
Definition: c.h:853
double tuples
Definition: relation.h:594
int tree_height
Definition: relation.h:595
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6063
Definition: type.h:90
BlockNumber pages
Definition: relation.h:593
double num_sa_scans
Definition: selfuncs.h:137
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
double Cost
Definition: nodes.h:632
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6212
double numIndexPages
Definition: selfuncs.h:134