PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7710 of file selfuncs.c.

References clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, deconstruct_indexquals(), get_tablespace_page_costs(), IndexPath::indexinfo, IndexPath::indexorderbys, IndexPath::indexquals, JOIN_INNER, list_length(), NULL, orderby_operands_eval_cost(), other_operands_eval_cost(), IndexOptInfo::pages, IndexOptInfo::rel, RelOptInfo::relid, IndexOptInfo::reltablespace, and IndexOptInfo::tuples.

Referenced by brinhandler().

7714 {
7715  IndexOptInfo *index = path->indexinfo;
7716  List *indexQuals = path->indexquals;
7717  List *indexOrderBys = path->indexorderbys;
7718  double numPages = index->pages;
7719  double numTuples = index->tuples;
7720  List *qinfos;
7721  Cost spc_seq_page_cost;
7722  Cost spc_random_page_cost;
7723  double qual_op_cost;
7724  double qual_arg_cost;
7725 
7726  /* Do preliminary analysis of indexquals */
7727  qinfos = deconstruct_indexquals(path);
7728 
7729  /* fetch estimated page cost for tablespace containing index */
7731  &spc_random_page_cost,
7732  &spc_seq_page_cost);
7733 
7734  /*
7735  * BRIN indexes are always read in full; use that as startup cost.
7736  *
7737  * XXX maybe only include revmap pages here?
7738  */
7739  *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7740 
7741  /*
7742  * To read a BRIN index there might be a bit of back and forth over
7743  * regular pages, as revmap might point to them out of sequential order;
7744  * calculate this as reading the whole index in random order.
7745  */
7746  *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7747 
7748  *indexSelectivity =
7749  clauselist_selectivity(root, indexQuals,
7750  path->indexinfo->rel->relid,
7751  JOIN_INNER, NULL);
7752  *indexCorrelation = 1;
7753 
7754  /*
7755  * Add on index qual eval costs, much as in genericcostestimate.
7756  */
7757  qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7758  orderby_operands_eval_cost(root, path);
7759  qual_op_cost = cpu_operator_cost *
7760  (list_length(indexQuals) + list_length(indexOrderBys));
7761 
7762  *indexStartupCost += qual_arg_cost;
7763  *indexTotalCost += qual_arg_cost;
7764  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7765  *indexPages = index->pages;
7766 
7767  /* XXX what about pages_per_range? */
7768 }
IndexOptInfo * indexinfo
Definition: relation.h:995
Oid reltablespace
Definition: relation.h:594
static Cost other_operands_eval_cost(PlannerInfo *root, List *qinfos)
Definition: selfuncs.c:6331
double tuples
Definition: relation.h:599
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
static Cost orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
Definition: selfuncs.c:6356
Definition: type.h:90
BlockNumber pages
Definition: relation.h:598
List * indexquals
Definition: relation.h:997
RelOptInfo * rel
Definition: relation.h:595
double cpu_operator_cost
Definition: costsize.c:108
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:522
List * indexorderbys
Definition: relation.h:999
#define NULL
Definition: c.h:229
static int list_length(const List *l)
Definition: pg_list.h:89
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
Definition: pg_list.h:45
double cpu_index_tuple_cost
Definition: costsize.c:107
double Cost
Definition: nodes.h:636
void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6626 of file selfuncs.c.

References add_predicate_to_quals(), Assert, BoolGetDatum, BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, IndexQualInfo::clause_op, clauselist_selectivity(), cpu_operator_cost, deconstruct_indexquals(), elog, ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexQualInfo::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexOptInfo::indexkeys, IndexOptInfo::indexoid, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst, MemSet, IndexOptInfo::ncolumns, NIL, NULL, NullTest::nulltesttype, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum, OidIsValid, IndexOptInfo::opcintype, IndexOptInfo::opfamily, IndexQualInfo::other_operand, planner_rt_fetch, IndexOptInfo::rel, ReleaseSysCache(), ReleaseVariableStats, RelOptInfo::relid, RangeTblEntry::relid, IndexOptInfo::reverse_sort, IndexQualInfo::rinfo, rint(), RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3, STATISTIC_KIND_CORRELATION, STATRELATTINH, VariableStatData::statsTuple, IndexOptInfo::tree_height, RelOptInfo::tuples, IndexOptInfo::tuples, and IndexOptInfo::unique.

Referenced by bthandler().

6630 {
6631  IndexOptInfo *index = path->indexinfo;
6632  List *qinfos;
6633  GenericCosts costs;
6634  Oid relid;
6635  AttrNumber colnum;
6636  VariableStatData vardata;
6637  double numIndexTuples;
6638  Cost descentCost;
6639  List *indexBoundQuals;
6640  int indexcol;
6641  bool eqQualHere;
6642  bool found_saop;
6643  bool found_is_null_op;
6644  double num_sa_scans;
6645  ListCell *lc;
6646 
6647  /* Do preliminary analysis of indexquals */
6648  qinfos = deconstruct_indexquals(path);
6649 
6650  /*
6651  * For a btree scan, only leading '=' quals plus inequality quals for the
6652  * immediately next attribute contribute to index selectivity (these are
6653  * the "boundary quals" that determine the starting and stopping points of
6654  * the index scan). Additional quals can suppress visits to the heap, so
6655  * it's OK to count them in indexSelectivity, but they should not count
6656  * for estimating numIndexTuples. So we must examine the given indexquals
6657  * to find out which ones count as boundary quals. We rely on the
6658  * knowledge that they are given in index column order.
6659  *
6660  * For a RowCompareExpr, we consider only the first column, just as
6661  * rowcomparesel() does.
6662  *
6663  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6664  * index scans not one, but the ScalarArrayOpExpr's operator can be
6665  * considered to act the same as it normally does.
6666  */
6667  indexBoundQuals = NIL;
6668  indexcol = 0;
6669  eqQualHere = false;
6670  found_saop = false;
6671  found_is_null_op = false;
6672  num_sa_scans = 1;
6673  foreach(lc, qinfos)
6674  {
6675  IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6676  RestrictInfo *rinfo = qinfo->rinfo;
6677  Expr *clause = rinfo->clause;
6678  Oid clause_op;
6679  int op_strategy;
6680 
6681  if (indexcol != qinfo->indexcol)
6682  {
6683  /* Beginning of a new column's quals */
6684  if (!eqQualHere)
6685  break; /* done if no '=' qual for indexcol */
6686  eqQualHere = false;
6687  indexcol++;
6688  if (indexcol != qinfo->indexcol)
6689  break; /* no quals at all for indexcol */
6690  }
6691 
6692  if (IsA(clause, ScalarArrayOpExpr))
6693  {
6694  int alength = estimate_array_length(qinfo->other_operand);
6695 
6696  found_saop = true;
6697  /* count up number of SA scans induced by indexBoundQuals only */
6698  if (alength > 1)
6699  num_sa_scans *= alength;
6700  }
6701  else if (IsA(clause, NullTest))
6702  {
6703  NullTest *nt = (NullTest *) clause;
6704 
6705  if (nt->nulltesttype == IS_NULL)
6706  {
6707  found_is_null_op = true;
6708  /* IS NULL is like = for selectivity determination purposes */
6709  eqQualHere = true;
6710  }
6711  }
6712 
6713  /*
6714  * We would need to commute the clause_op if not varonleft, except
6715  * that we only care if it's equality or not, so that refinement is
6716  * unnecessary.
6717  */
6718  clause_op = qinfo->clause_op;
6719 
6720  /* check for equality operator */
6721  if (OidIsValid(clause_op))
6722  {
6723  op_strategy = get_op_opfamily_strategy(clause_op,
6724  index->opfamily[indexcol]);
6725  Assert(op_strategy != 0); /* not a member of opfamily?? */
6726  if (op_strategy == BTEqualStrategyNumber)
6727  eqQualHere = true;
6728  }
6729 
6730  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6731  }
6732 
6733  /*
6734  * If index is unique and we found an '=' clause for each column, we can
6735  * just assume numIndexTuples = 1 and skip the expensive
6736  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6737  * NullTest invalidates that theory, even though it sets eqQualHere.
6738  */
6739  if (index->unique &&
6740  indexcol == index->ncolumns - 1 &&
6741  eqQualHere &&
6742  !found_saop &&
6743  !found_is_null_op)
6744  numIndexTuples = 1.0;
6745  else
6746  {
6747  List *selectivityQuals;
6748  Selectivity btreeSelectivity;
6749 
6750  /*
6751  * If the index is partial, AND the index predicate with the
6752  * index-bound quals to produce a more accurate idea of the number of
6753  * rows covered by the bound conditions.
6754  */
6755  selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6756 
6757  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6758  index->rel->relid,
6759  JOIN_INNER,
6760  NULL);
6761  numIndexTuples = btreeSelectivity * index->rel->tuples;
6762 
6763  /*
6764  * As in genericcostestimate(), we have to adjust for any
6765  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6766  * to integer.
6767  */
6768  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6769  }
6770 
6771  /*
6772  * Now do generic index cost estimation.
6773  */
6774  MemSet(&costs, 0, sizeof(costs));
6775  costs.numIndexTuples = numIndexTuples;
6776 
6777  genericcostestimate(root, path, loop_count, qinfos, &costs);
6778 
6779  /*
6780  * Add a CPU-cost component to represent the costs of initial btree
6781  * descent. We don't charge any I/O cost for touching upper btree levels,
6782  * since they tend to stay in cache, but we still have to do about log2(N)
6783  * comparisons to descend a btree of N leaf tuples. We charge one
6784  * cpu_operator_cost per comparison.
6785  *
6786  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6787  * ones after the first one are not startup cost so far as the overall
6788  * plan is concerned, so add them only to "total" cost.
6789  */
6790  if (index->tuples > 1) /* avoid computing log(0) */
6791  {
6792  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6793  costs.indexStartupCost += descentCost;
6794  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6795  }
6796 
6797  /*
6798  * Even though we're not charging I/O cost for touching upper btree pages,
6799  * it's still reasonable to charge some CPU cost per page descended
6800  * through. Moreover, if we had no such charge at all, bloated indexes
6801  * would appear to have the same search cost as unbloated ones, at least
6802  * in cases where only a single leaf page is expected to be visited. This
6803  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6804  * touched. The number of such pages is btree tree height plus one (ie,
6805  * we charge for the leaf page too). As above, charge once per SA scan.
6806  */
6807  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6808  costs.indexStartupCost += descentCost;
6809  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6810 
6811  /*
6812  * If we can get an estimate of the first column's ordering correlation C
6813  * from pg_statistic, estimate the index correlation as C for a
6814  * single-column index, or C * 0.75 for multiple columns. (The idea here
6815  * is that multiple columns dilute the importance of the first column's
6816  * ordering, but don't negate it entirely. Before 8.0 we divided the
6817  * correlation by the number of columns, but that seems too strong.)
6818  */
6819  MemSet(&vardata, 0, sizeof(vardata));
6820 
6821  if (index->indexkeys[0] != 0)
6822  {
6823  /* Simple variable --- look to stats for the underlying table */
6824  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6825 
6826  Assert(rte->rtekind == RTE_RELATION);
6827  relid = rte->relid;
6828  Assert(relid != InvalidOid);
6829  colnum = index->indexkeys[0];
6830 
6832  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6833  {
6834  /*
6835  * The hook took control of acquiring a stats tuple. If it did
6836  * supply a tuple, it'd better have supplied a freefunc.
6837  */
6838  if (HeapTupleIsValid(vardata.statsTuple) &&
6839  !vardata.freefunc)
6840  elog(ERROR, "no function provided to release variable stats with");
6841  }
6842  else
6843  {
6845  ObjectIdGetDatum(relid),
6846  Int16GetDatum(colnum),
6847  BoolGetDatum(rte->inh));
6848  vardata.freefunc = ReleaseSysCache;
6849  }
6850  }
6851  else
6852  {
6853  /* Expression --- maybe there are stats for the index itself */
6854  relid = index->indexoid;
6855  colnum = 1;
6856 
6857  if (get_index_stats_hook &&
6858  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6859  {
6860  /*
6861  * The hook took control of acquiring a stats tuple. If it did
6862  * supply a tuple, it'd better have supplied a freefunc.
6863  */
6864  if (HeapTupleIsValid(vardata.statsTuple) &&
6865  !vardata.freefunc)
6866  elog(ERROR, "no function provided to release variable stats with");
6867  }
6868  else
6869  {
6871  ObjectIdGetDatum(relid),
6872  Int16GetDatum(colnum),
6873  BoolGetDatum(false));
6874  vardata.freefunc = ReleaseSysCache;
6875  }
6876  }
6877 
6878  if (HeapTupleIsValid(vardata.statsTuple))
6879  {
6880  Oid sortop;
6881  float4 *numbers;
6882  int nnumbers;
6883 
6884  sortop = get_opfamily_member(index->opfamily[0],
6885  index->opcintype[0],
6886  index->opcintype[0],
6888  if (OidIsValid(sortop) &&
6891  sortop,
6892  NULL,
6893  NULL, NULL,
6894  &numbers, &nnumbers))
6895  {
6896  double varCorrelation;
6897 
6898  Assert(nnumbers == 1);
6899  varCorrelation = numbers[0];
6900 
6901  if (index->reverse_sort[0])
6902  varCorrelation = -varCorrelation;
6903 
6904  if (index->ncolumns > 1)
6905  costs.indexCorrelation = varCorrelation * 0.75;
6906  else
6907  costs.indexCorrelation = varCorrelation;
6908 
6909  free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6910  }
6911  }
6912 
6913  ReleaseVariableStats(vardata);
6914 
6915  *indexStartupCost = costs.indexStartupCost;
6916  *indexTotalCost = costs.indexTotalCost;
6917  *indexSelectivity = costs.indexSelectivity;
6918  *indexCorrelation = costs.indexCorrelation;
6919  *indexPages = costs.numIndexPages;
6920 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
#define NIL
Definition: pg_list.h:69
#define IsA(nodeptr, _type_)
Definition: nodes.h:557
IndexOptInfo * indexinfo
Definition: relation.h:995
HeapTuple statsTuple
Definition: selfuncs.h:71
double tuples
Definition: relation.h:534
static List * add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6604
#define Int16GetDatum(X)
Definition: postgres.h:457
#define MemSet(start, val, len)
Definition: c.h:857
double Selectivity
Definition: nodes.h:635
bool get_attstatsslot(HeapTuple statstuple, Oid atttype, int32 atttypmod, int reqkind, Oid reqop, Oid *actualop, Datum **values, int *nvalues, float4 **numbers, int *nnumbers)
Definition: lsyscache.c:2854
double tuples
Definition: relation.h:599
unsigned int Oid
Definition: postgres_ext.h:31
int tree_height
Definition: relation.h:600
#define OidIsValid(objectId)
Definition: c.h:538
RestrictInfo * rinfo
Definition: selfuncs.h:105
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
bool unique
Definition: relation.h:626
Definition: type.h:90
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2083
RelOptInfo * rel
Definition: relation.h:595
#define planner_rt_fetch(rti, root)
Definition: relation.h:324
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define ERROR
Definition: elog.h:43
double num_sa_scans
Definition: selfuncs.h:137
#define STATISTIC_KIND_CORRELATION
Definition: pg_statistic.h:233
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:163
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:151
double rint(double x)
Definition: rint.c:22
int ncolumns
Definition: relation.h:603
Index relid
Definition: relation.h:522
List * lappend(List *list, void *datum)
Definition: list.c:128
Expr * clause
Definition: relation.h:1699
float float4
Definition: c.h:380
double indexCorrelation
Definition: selfuncs.h:131
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1116
NullTestType nulltesttype
Definition: primnodes.h:1179
#define BoolGetDatum(X)
Definition: postgres.h:408
#define InvalidOid
Definition: postgres_ext.h:36
double numIndexTuples
Definition: selfuncs.h:135
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define lfirst(lc)
Definition: pg_list.h:106
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:152
Oid * opcintype
Definition: relation.h:607
Cost indexStartupCost
Definition: selfuncs.h:128
Oid * opfamily
Definition: relation.h:606
RTEKind rtekind
Definition: parsenodes.h:916
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:80
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:80
Node * other_operand
Definition: selfuncs.h:109
#define SearchSysCache3(cacheId, key1, key2, key3)
Definition: syscache.h:156
int * indexkeys
Definition: relation.h:604
#define elog
Definition: elog.h:219
Oid indexoid
Definition: relation.h:593
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:73
bool * reverse_sort
Definition: relation.h:609
#define BTLessStrategyNumber
Definition: stratnum.h:29
Definition: pg_list.h:45
int16 AttrNumber
Definition: attnum.h:21
void free_attstatsslot(Oid atttype, Datum *values, int nvalues, float4 *numbers, int nnumbers)
Definition: lsyscache.c:2978
#define BTEqualStrategyNumber
Definition: stratnum.h:31
double Cost
Definition: nodes.h:636
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6385
double numIndexPages
Definition: selfuncs.h:134
void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7385 of file selfuncs.c.

References AccessShareLock, GinQualCounts::arrayScans, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, deconstruct_indexquals(), elog, ERROR, GinQualCounts::exactEntries, get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), GinQualCounts::haveFullScan, IndexOptInfo::hypothetical, index_close(), index_open(), index_pages_fetched(), IndexPath::indexinfo, IndexOptInfo::indexoid, IndexPath::indexorderbys, IndexPath::indexquals, IndexOptInfo::indpred, IsA, JOIN_INNER, lfirst, list_concat(), list_length(), list_make1, Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, GinStatsData::nPendingPages, GinStatsData::nTotalPages, NULL, orderby_operands_eval_cost(), other_operands_eval_cost(), IndexOptInfo::pages, GinQualCounts::partialEntries, predicate_implied_by(), IndexOptInfo::rel, RelOptInfo::relid, IndexOptInfo::reltablespace, IndexQualInfo::rinfo, rint(), scale, GinQualCounts::searchEntries, and IndexOptInfo::tuples.

Referenced by ginhandler().

7389 {
7390  IndexOptInfo *index = path->indexinfo;
7391  List *indexQuals = path->indexquals;
7392  List *indexOrderBys = path->indexorderbys;
7393  List *qinfos;
7394  ListCell *l;
7395  List *selectivityQuals;
7396  double numPages = index->pages,
7397  numTuples = index->tuples;
7398  double numEntryPages,
7399  numDataPages,
7400  numPendingPages,
7401  numEntries;
7402  GinQualCounts counts;
7403  bool matchPossible;
7404  double partialScale;
7405  double entryPagesFetched,
7406  dataPagesFetched,
7407  dataPagesFetchedBySel;
7408  double qual_op_cost,
7409  qual_arg_cost,
7410  spc_random_page_cost,
7411  outer_scans;
7412  Relation indexRel;
7413  GinStatsData ginStats;
7414 
7415  /* Do preliminary analysis of indexquals */
7416  qinfos = deconstruct_indexquals(path);
7417 
7418  /*
7419  * Obtain statistical information from the meta page, if possible. Else
7420  * set ginStats to zeroes, and we'll cope below.
7421  */
7422  if (!index->hypothetical)
7423  {
7424  indexRel = index_open(index->indexoid, AccessShareLock);
7425  ginGetStats(indexRel, &ginStats);
7426  index_close(indexRel, AccessShareLock);
7427  }
7428  else
7429  {
7430  memset(&ginStats, 0, sizeof(ginStats));
7431  }
7432 
7433  /*
7434  * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7435  * trusted, but the other fields are data as of the last VACUUM. We can
7436  * scale them up to account for growth since then, but that method only
7437  * goes so far; in the worst case, the stats might be for a completely
7438  * empty index, and scaling them will produce pretty bogus numbers.
7439  * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7440  * it's grown more than that, fall back to estimating things only from the
7441  * assumed-accurate index size. But we'll trust nPendingPages in any case
7442  * so long as it's not clearly insane, ie, more than the index size.
7443  */
7444  if (ginStats.nPendingPages < numPages)
7445  numPendingPages = ginStats.nPendingPages;
7446  else
7447  numPendingPages = 0;
7448 
7449  if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7450  ginStats.nTotalPages > numPages / 4 &&
7451  ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7452  {
7453  /*
7454  * OK, the stats seem close enough to sane to be trusted. But we
7455  * still need to scale them by the ratio numPages / nTotalPages to
7456  * account for growth since the last VACUUM.
7457  */
7458  double scale = numPages / ginStats.nTotalPages;
7459 
7460  numEntryPages = ceil(ginStats.nEntryPages * scale);
7461  numDataPages = ceil(ginStats.nDataPages * scale);
7462  numEntries = ceil(ginStats.nEntries * scale);
7463  /* ensure we didn't round up too much */
7464  numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7465  numDataPages = Min(numDataPages,
7466  numPages - numPendingPages - numEntryPages);
7467  }
7468  else
7469  {
7470  /*
7471  * We might get here because it's a hypothetical index, or an index
7472  * created pre-9.1 and never vacuumed since upgrading (in which case
7473  * its stats would read as zeroes), or just because it's grown too
7474  * much since the last VACUUM for us to put our faith in scaling.
7475  *
7476  * Invent some plausible internal statistics based on the index page
7477  * count (and clamp that to at least 10 pages, just in case). We
7478  * estimate that 90% of the index is entry pages, and the rest is data
7479  * pages. Estimate 100 entries per entry page; this is rather bogus
7480  * since it'll depend on the size of the keys, but it's more robust
7481  * than trying to predict the number of entries per heap tuple.
7482  */
7483  numPages = Max(numPages, 10);
7484  numEntryPages = floor((numPages - numPendingPages) * 0.90);
7485  numDataPages = numPages - numPendingPages - numEntryPages;
7486  numEntries = floor(numEntryPages * 100);
7487  }
7488 
7489  /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
7490  if (numEntries < 1)
7491  numEntries = 1;
7492 
7493  /*
7494  * Include predicate in selectivityQuals (should match
7495  * genericcostestimate)
7496  */
7497  if (index->indpred != NIL)
7498  {
7499  List *predExtraQuals = NIL;
7500 
7501  foreach(l, index->indpred)
7502  {
7503  Node *predQual = (Node *) lfirst(l);
7504  List *oneQual = list_make1(predQual);
7505 
7506  if (!predicate_implied_by(oneQual, indexQuals))
7507  predExtraQuals = list_concat(predExtraQuals, oneQual);
7508  }
7509  /* list_concat avoids modifying the passed-in indexQuals list */
7510  selectivityQuals = list_concat(predExtraQuals, indexQuals);
7511  }
7512  else
7513  selectivityQuals = indexQuals;
7514 
7515  /* Estimate the fraction of main-table tuples that will be visited */
7516  *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7517  index->rel->relid,
7518  JOIN_INNER,
7519  NULL);
7520 
7521  /* fetch estimated page cost for tablespace containing index */
7523  &spc_random_page_cost,
7524  NULL);
7525 
7526  /*
7527  * Generic assumption about index correlation: there isn't any.
7528  */
7529  *indexCorrelation = 0.0;
7530 
7531  /*
7532  * Examine quals to estimate number of search entries & partial matches
7533  */
7534  memset(&counts, 0, sizeof(counts));
7535  counts.arrayScans = 1;
7536  matchPossible = true;
7537 
7538  foreach(l, qinfos)
7539  {
7540  IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7541  Expr *clause = qinfo->rinfo->clause;
7542 
7543  if (IsA(clause, OpExpr))
7544  {
7545  matchPossible = gincost_opexpr(root,
7546  index,
7547  qinfo,
7548  &counts);
7549  if (!matchPossible)
7550  break;
7551  }
7552  else if (IsA(clause, ScalarArrayOpExpr))
7553  {
7554  matchPossible = gincost_scalararrayopexpr(root,
7555  index,
7556  qinfo,
7557  numEntries,
7558  &counts);
7559  if (!matchPossible)
7560  break;
7561  }
7562  else
7563  {
7564  /* shouldn't be anything else for a GIN index */
7565  elog(ERROR, "unsupported GIN indexqual type: %d",
7566  (int) nodeTag(clause));
7567  }
7568  }
7569 
7570  /* Fall out if there were any provably-unsatisfiable quals */
7571  if (!matchPossible)
7572  {
7573  *indexStartupCost = 0;
7574  *indexTotalCost = 0;
7575  *indexSelectivity = 0;
7576  return;
7577  }
7578 
7579  if (counts.haveFullScan || indexQuals == NIL)
7580  {
7581  /*
7582  * Full index scan will be required. We treat this as if every key in
7583  * the index had been listed in the query; is that reasonable?
7584  */
7585  counts.partialEntries = 0;
7586  counts.exactEntries = numEntries;
7587  counts.searchEntries = numEntries;
7588  }
7589 
7590  /* Will we have more than one iteration of a nestloop scan? */
7591  outer_scans = loop_count;
7592 
7593  /*
7594  * Compute cost to begin scan, first of all, pay attention to pending
7595  * list.
7596  */
7597  entryPagesFetched = numPendingPages;
7598 
7599  /*
7600  * Estimate number of entry pages read. We need to do
7601  * counts.searchEntries searches. Use a power function as it should be,
7602  * but tuples on leaf pages usually is much greater. Here we include all
7603  * searches in entry tree, including search of first entry in partial
7604  * match algorithm
7605  */
7606  entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7607 
7608  /*
7609  * Add an estimate of entry pages read by partial match algorithm. It's a
7610  * scan over leaf pages in entry tree. We haven't any useful stats here,
7611  * so estimate it as proportion. Because counts.partialEntries is really
7612  * pretty bogus (see code above), it's possible that it is more than
7613  * numEntries; clamp the proportion to ensure sanity.
7614  */
7615  partialScale = counts.partialEntries / numEntries;
7616  partialScale = Min(partialScale, 1.0);
7617 
7618  entryPagesFetched += ceil(numEntryPages * partialScale);
7619 
7620  /*
7621  * Partial match algorithm reads all data pages before doing actual scan,
7622  * so it's a startup cost. Again, we haven't any useful stats here, so
7623  * estimate it as proportion.
7624  */
7625  dataPagesFetched = ceil(numDataPages * partialScale);
7626 
7627  /*
7628  * Calculate cache effects if more than one scan due to nestloops or array
7629  * quals. The result is pro-rated per nestloop scan, but the array qual
7630  * factor shouldn't be pro-rated (compare genericcostestimate).
7631  */
7632  if (outer_scans > 1 || counts.arrayScans > 1)
7633  {
7634  entryPagesFetched *= outer_scans * counts.arrayScans;
7635  entryPagesFetched = index_pages_fetched(entryPagesFetched,
7636  (BlockNumber) numEntryPages,
7637  numEntryPages, root);
7638  entryPagesFetched /= outer_scans;
7639  dataPagesFetched *= outer_scans * counts.arrayScans;
7640  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7641  (BlockNumber) numDataPages,
7642  numDataPages, root);
7643  dataPagesFetched /= outer_scans;
7644  }
7645 
7646  /*
7647  * Here we use random page cost because logically-close pages could be far
7648  * apart on disk.
7649  */
7650  *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7651 
7652  /*
7653  * Now compute the number of data pages fetched during the scan.
7654  *
7655  * We assume every entry to have the same number of items, and that there
7656  * is no overlap between them. (XXX: tsvector and array opclasses collect
7657  * statistics on the frequency of individual keys; it would be nice to use
7658  * those here.)
7659  */
7660  dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7661 
7662  /*
7663  * If there is a lot of overlap among the entries, in particular if one of
7664  * the entries is very frequent, the above calculation can grossly
7665  * under-estimate. As a simple cross-check, calculate a lower bound based
7666  * on the overall selectivity of the quals. At a minimum, we must read
7667  * one item pointer for each matching entry.
7668  *
7669  * The width of each item pointer varies, based on the level of
7670  * compression. We don't have statistics on that, but an average of
7671  * around 3 bytes per item is fairly typical.
7672  */
7673  dataPagesFetchedBySel = ceil(*indexSelectivity *
7674  (numTuples / (BLCKSZ / 3)));
7675  if (dataPagesFetchedBySel > dataPagesFetched)
7676  dataPagesFetched = dataPagesFetchedBySel;
7677 
7678  /* Account for cache effects, the same as above */
7679  if (outer_scans > 1 || counts.arrayScans > 1)
7680  {
7681  dataPagesFetched *= outer_scans * counts.arrayScans;
7682  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7683  (BlockNumber) numDataPages,
7684  numDataPages, root);
7685  dataPagesFetched /= outer_scans;
7686  }
7687 
7688  /* And apply random_page_cost as the cost per page */
7689  *indexTotalCost = *indexStartupCost +
7690  dataPagesFetched * spc_random_page_cost;
7691 
7692  /*
7693  * Add on index qual eval costs, much as in genericcostestimate
7694  */
7695  qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7696  orderby_operands_eval_cost(root, path);
7697  qual_op_cost = cpu_operator_cost *
7698  (list_length(indexQuals) + list_length(indexOrderBys));
7699 
7700  *indexStartupCost += qual_arg_cost;
7701  *indexTotalCost += qual_arg_cost;
7702  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7703  *indexPages = dataPagesFetched;
7704 }
#define NIL
Definition: pg_list.h:69
BlockNumber nEntryPages
Definition: gin.h:45
#define IsA(nodeptr, _type_)
Definition: nodes.h:557
IndexOptInfo * indexinfo
Definition: relation.h:995
bool predicate_implied_by(List *predicate_list, List *restrictinfo_list)
Definition: predtest.c:128
#define Min(x, y)
Definition: c.h:806
#define AccessShareLock
Definition: lockdefs.h:36
double partialEntries
Definition: selfuncs.c:7100
Definition: nodes.h:506
double searchEntries
Definition: selfuncs.c:7102
Oid reltablespace
Definition: relation.h:594
static Cost other_operands_eval_cost(PlannerInfo *root, List *qinfos)
Definition: selfuncs.c:6331
int scale
Definition: pgbench.c:106
int64 nEntries
Definition: gin.h:47
List * list_concat(List *list1, List *list2)
Definition: list.c:321
uint32 BlockNumber
Definition: block.h:31
bool hypothetical
Definition: relation.h:628
double tuples
Definition: relation.h:599
RestrictInfo * rinfo
Definition: selfuncs.h:105
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
static Cost orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
Definition: selfuncs.c:6356
Definition: type.h:90
double exactEntries
Definition: selfuncs.c:7101
BlockNumber pages
Definition: relation.h:598
#define list_make1(x1)
Definition: pg_list.h:133
List * indexquals
Definition: relation.h:997
RelOptInfo * rel
Definition: relation.h:595
#define ERROR
Definition: elog.h:43
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, IndexQualInfo *qinfo, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:7270
bool haveFullScan
Definition: selfuncs.c:7099
double cpu_operator_cost
Definition: costsize.c:108
double rint(double x)
Definition: rint.c:22
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:522
Expr * clause
Definition: relation.h:1699
BlockNumber nPendingPages
Definition: gin.h:43
double arrayScans
Definition: selfuncs.c:7103
List * indexorderbys
Definition: relation.h:999
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:632
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, IndexQualInfo *qinfo, GinQualCounts *counts)
Definition: selfuncs.c:7214
BlockNumber nDataPages
Definition: gin.h:46
#define Max(x, y)
Definition: c.h:800
#define NULL
Definition: c.h:229
#define lfirst(lc)
Definition: pg_list.h:106
static int list_length(const List *l)
Definition: pg_list.h:89
BlockNumber nTotalPages
Definition: gin.h:44
#define nodeTag(nodeptr)
Definition: nodes.h:511
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
#define elog
Definition: elog.h:219
Oid indexoid
Definition: relation.h:593
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:92
List * indpred
Definition: relation.h:616
Definition: pg_list.h:45
double cpu_index_tuple_cost
Definition: costsize.c:107
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:813
void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6971 of file selfuncs.c.

References cpu_operator_cost, deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, IndexOptInfo::pages, IndexOptInfo::tree_height, and IndexOptInfo::tuples.

Referenced by gisthandler().

6975 {
6976  IndexOptInfo *index = path->indexinfo;
6977  List *qinfos;
6978  GenericCosts costs;
6979  Cost descentCost;
6980 
6981  /* Do preliminary analysis of indexquals */
6982  qinfos = deconstruct_indexquals(path);
6983 
6984  MemSet(&costs, 0, sizeof(costs));
6985 
6986  genericcostestimate(root, path, loop_count, qinfos, &costs);
6987 
6988  /*
6989  * We model index descent costs similarly to those for btree, but to do
6990  * that we first need an idea of the tree height. We somewhat arbitrarily
6991  * assume that the fanout is 100, meaning the tree height is at most
6992  * log100(index->pages).
6993  *
6994  * Although this computation isn't really expensive enough to require
6995  * caching, we might as well use index->tree_height to cache it.
6996  */
6997  if (index->tree_height < 0) /* unknown? */
6998  {
6999  if (index->pages > 1) /* avoid computing log(0) */
7000  index->tree_height = (int) (log(index->pages) / log(100.0));
7001  else
7002  index->tree_height = 0;
7003  }
7004 
7005  /*
7006  * Add a CPU-cost component to represent the costs of initial descent. We
7007  * just use log(N) here not log2(N) since the branching factor isn't
7008  * necessarily two anyway. As for btree, charge once per SA scan.
7009  */
7010  if (index->tuples > 1) /* avoid computing log(0) */
7011  {
7012  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7013  costs.indexStartupCost += descentCost;
7014  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7015  }
7016 
7017  /*
7018  * Likewise add a per-page charge, calculated the same as for btrees.
7019  */
7020  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7021  costs.indexStartupCost += descentCost;
7022  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7023 
7024  *indexStartupCost = costs.indexStartupCost;
7025  *indexTotalCost = costs.indexTotalCost;
7026  *indexSelectivity = costs.indexSelectivity;
7027  *indexCorrelation = costs.indexCorrelation;
7028  *indexPages = costs.numIndexPages;
7029 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
IndexOptInfo * indexinfo
Definition: relation.h:995
#define MemSet(start, val, len)
Definition: c.h:857
double tuples
Definition: relation.h:599
int tree_height
Definition: relation.h:600
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
Definition: type.h:90
BlockNumber pages
Definition: relation.h:598
double num_sa_scans
Definition: selfuncs.h:137
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
double Cost
Definition: nodes.h:636
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6385
double numIndexPages
Definition: selfuncs.h:134
void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6923 of file selfuncs.c.

References deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, and GenericCosts::numIndexPages.

Referenced by hashhandler().

6927 {
6928  List *qinfos;
6929  GenericCosts costs;
6930 
6931  /* Do preliminary analysis of indexquals */
6932  qinfos = deconstruct_indexquals(path);
6933 
6934  MemSet(&costs, 0, sizeof(costs));
6935 
6936  genericcostestimate(root, path, loop_count, qinfos, &costs);
6937 
6938  /*
6939  * A hash index has no descent costs as such, since the index AM can go
6940  * directly to the target bucket after computing the hash value. There
6941  * are a couple of other hash-specific costs that we could conceivably add
6942  * here, though:
6943  *
6944  * Ideally we'd charge spc_random_page_cost for each page in the target
6945  * bucket, not just the numIndexPages pages that genericcostestimate
6946  * thought we'd visit. However in most cases we don't know which bucket
6947  * that will be. There's no point in considering the average bucket size
6948  * because the hash AM makes sure that's always one page.
6949  *
6950  * Likewise, we could consider charging some CPU for each index tuple in
6951  * the bucket, if we knew how many there were. But the per-tuple cost is
6952  * just a hash value comparison, not a general datatype-dependent
6953  * comparison, so any such charge ought to be quite a bit less than
6954  * cpu_operator_cost; which makes it probably not worth worrying about.
6955  *
6956  * A bigger issue is that chance hash-value collisions will result in
6957  * wasted probes into the heap. We don't currently attempt to model this
6958  * cost on the grounds that it's rare, but maybe it's not rare enough.
6959  * (Any fix for this ought to consider the generic lossy-operator problem,
6960  * though; it's not entirely hash-specific.)
6961  */
6962 
6963  *indexStartupCost = costs.indexStartupCost;
6964  *indexTotalCost = costs.indexTotalCost;
6965  *indexSelectivity = costs.indexSelectivity;
6966  *indexCorrelation = costs.indexCorrelation;
6967  *indexPages = costs.numIndexPages;
6968 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
#define MemSet(start, val, len)
Definition: c.h:857
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6385
double numIndexPages
Definition: selfuncs.h:134
void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7032 of file selfuncs.c.

References cpu_operator_cost, deconstruct_indexquals(), genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, IndexOptInfo::pages, IndexOptInfo::tree_height, and IndexOptInfo::tuples.

Referenced by spghandler().

7036 {
7037  IndexOptInfo *index = path->indexinfo;
7038  List *qinfos;
7039  GenericCosts costs;
7040  Cost descentCost;
7041 
7042  /* Do preliminary analysis of indexquals */
7043  qinfos = deconstruct_indexquals(path);
7044 
7045  MemSet(&costs, 0, sizeof(costs));
7046 
7047  genericcostestimate(root, path, loop_count, qinfos, &costs);
7048 
7049  /*
7050  * We model index descent costs similarly to those for btree, but to do
7051  * that we first need an idea of the tree height. We somewhat arbitrarily
7052  * assume that the fanout is 100, meaning the tree height is at most
7053  * log100(index->pages).
7054  *
7055  * Although this computation isn't really expensive enough to require
7056  * caching, we might as well use index->tree_height to cache it.
7057  */
7058  if (index->tree_height < 0) /* unknown? */
7059  {
7060  if (index->pages > 1) /* avoid computing log(0) */
7061  index->tree_height = (int) (log(index->pages) / log(100.0));
7062  else
7063  index->tree_height = 0;
7064  }
7065 
7066  /*
7067  * Add a CPU-cost component to represent the costs of initial descent. We
7068  * just use log(N) here not log2(N) since the branching factor isn't
7069  * necessarily two anyway. As for btree, charge once per SA scan.
7070  */
7071  if (index->tuples > 1) /* avoid computing log(0) */
7072  {
7073  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7074  costs.indexStartupCost += descentCost;
7075  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7076  }
7077 
7078  /*
7079  * Likewise add a per-page charge, calculated the same as for btrees.
7080  */
7081  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7082  costs.indexStartupCost += descentCost;
7083  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7084 
7085  *indexStartupCost = costs.indexStartupCost;
7086  *indexTotalCost = costs.indexTotalCost;
7087  *indexSelectivity = costs.indexSelectivity;
7088  *indexCorrelation = costs.indexCorrelation;
7089  *indexPages = costs.numIndexPages;
7090 }
Selectivity indexSelectivity
Definition: selfuncs.h:130
IndexOptInfo * indexinfo
Definition: relation.h:995
#define MemSet(start, val, len)
Definition: c.h:857
double tuples
Definition: relation.h:599
int tree_height
Definition: relation.h:600
List * deconstruct_indexquals(IndexPath *path)
Definition: selfuncs.c:6236
Definition: type.h:90
BlockNumber pages
Definition: relation.h:598
double num_sa_scans
Definition: selfuncs.h:137
double cpu_operator_cost
Definition: costsize.c:108
Cost indexTotalCost
Definition: selfuncs.h:129
double indexCorrelation
Definition: selfuncs.h:131
Cost indexStartupCost
Definition: selfuncs.h:128
Definition: pg_list.h:45
double Cost
Definition: nodes.h:636
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, List *qinfos, GenericCosts *costs)
Definition: selfuncs.c:6385
double numIndexPages
Definition: selfuncs.h:134