PostgreSQL Source Code  git master
index_selfuncs.h File Reference
#include "access/amapi.h"
Include dependency graph for index_selfuncs.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void brincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void btcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gincostestimate (struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Function Documentation

◆ brincostestimate()

void brincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7734 of file selfuncs.c.

7738 {
7739  IndexOptInfo *index = path->indexinfo;
7740  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7741  double numPages = index->pages;
7742  RelOptInfo *baserel = index->rel;
7743  RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7744  Cost spc_seq_page_cost;
7745  Cost spc_random_page_cost;
7746  double qual_arg_cost;
7747  double qualSelectivity;
7748  BrinStatsData statsData;
7749  double indexRanges;
7750  double minimalRanges;
7751  double estimatedRanges;
7752  double selec;
7753  Relation indexRel;
7754  ListCell *l;
7755  VariableStatData vardata;
7756 
7757  Assert(rte->rtekind == RTE_RELATION);
7758 
7759  /* fetch estimated page cost for the tablespace containing the index */
7760  get_tablespace_page_costs(index->reltablespace,
7761  &spc_random_page_cost,
7762  &spc_seq_page_cost);
7763 
7764  /*
7765  * Obtain some data from the index itself, if possible. Otherwise invent
7766  * some plausible internal statistics based on the relation page count.
7767  */
7768  if (!index->hypothetical)
7769  {
7770  /*
7771  * A lock should have already been obtained on the index in plancat.c.
7772  */
7773  indexRel = index_open(index->indexoid, NoLock);
7774  brinGetStats(indexRel, &statsData);
7775  index_close(indexRel, NoLock);
7776 
7777  /* work out the actual number of ranges in the index */
7778  indexRanges = Max(ceil((double) baserel->pages /
7779  statsData.pagesPerRange), 1.0);
7780  }
7781  else
7782  {
7783  /*
7784  * Assume default number of pages per range, and estimate the number
7785  * of ranges based on that.
7786  */
7787  indexRanges = Max(ceil((double) baserel->pages /
7789 
7791  statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
7792  }
7793 
7794  /*
7795  * Compute index correlation
7796  *
7797  * Because we can use all index quals equally when scanning, we can use
7798  * the largest correlation (in absolute value) among columns used by the
7799  * query. Start at zero, the worst possible case. If we cannot find any
7800  * correlation statistics, we will keep it as 0.
7801  */
7802  *indexCorrelation = 0;
7803 
7804  foreach(l, path->indexclauses)
7805  {
7806  IndexClause *iclause = lfirst_node(IndexClause, l);
7807  AttrNumber attnum = index->indexkeys[iclause->indexcol];
7808 
7809  /* attempt to lookup stats in relation for this index column */
7810  if (attnum != 0)
7811  {
7812  /* Simple variable -- look to stats for the underlying table */
7814  (*get_relation_stats_hook) (root, rte, attnum, &vardata))
7815  {
7816  /*
7817  * The hook took control of acquiring a stats tuple. If it
7818  * did supply a tuple, it'd better have supplied a freefunc.
7819  */
7820  if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
7821  elog(ERROR,
7822  "no function provided to release variable stats with");
7823  }
7824  else
7825  {
7826  vardata.statsTuple =
7828  ObjectIdGetDatum(rte->relid),
7830  BoolGetDatum(false));
7831  vardata.freefunc = ReleaseSysCache;
7832  }
7833  }
7834  else
7835  {
7836  /*
7837  * Looks like we've found an expression column in the index. Let's
7838  * see if there's any stats for it.
7839  */
7840 
7841  /* get the attnum from the 0-based index. */
7842  attnum = iclause->indexcol + 1;
7843 
7844  if (get_index_stats_hook &&
7845  (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
7846  {
7847  /*
7848  * The hook took control of acquiring a stats tuple. If it
7849  * did supply a tuple, it'd better have supplied a freefunc.
7850  */
7851  if (HeapTupleIsValid(vardata.statsTuple) &&
7852  !vardata.freefunc)
7853  elog(ERROR, "no function provided to release variable stats with");
7854  }
7855  else
7856  {
7858  ObjectIdGetDatum(index->indexoid),
7860  BoolGetDatum(false));
7861  vardata.freefunc = ReleaseSysCache;
7862  }
7863  }
7864 
7865  if (HeapTupleIsValid(vardata.statsTuple))
7866  {
7867  AttStatsSlot sslot;
7868 
7869  if (get_attstatsslot(&sslot, vardata.statsTuple,
7870  STATISTIC_KIND_CORRELATION, InvalidOid,
7872  {
7873  double varCorrelation = 0.0;
7874 
7875  if (sslot.nnumbers > 0)
7876  varCorrelation = Abs(sslot.numbers[0]);
7877 
7878  if (varCorrelation > *indexCorrelation)
7879  *indexCorrelation = varCorrelation;
7880 
7881  free_attstatsslot(&sslot);
7882  }
7883  }
7884 
7885  ReleaseVariableStats(vardata);
7886  }
7887 
7888  qualSelectivity = clauselist_selectivity(root, indexQuals,
7889  baserel->relid,
7890  JOIN_INNER, NULL);
7891 
7892  /*
7893  * Now calculate the minimum possible ranges we could match with if all of
7894  * the rows were in the perfect order in the table's heap.
7895  */
7896  minimalRanges = ceil(indexRanges * qualSelectivity);
7897 
7898  /*
7899  * Now estimate the number of ranges that we'll touch by using the
7900  * indexCorrelation from the stats. Careful not to divide by zero (note
7901  * we're using the absolute value of the correlation).
7902  */
7903  if (*indexCorrelation < 1.0e-10)
7904  estimatedRanges = indexRanges;
7905  else
7906  estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
7907 
7908  /* we expect to visit this portion of the table */
7909  selec = estimatedRanges / indexRanges;
7910 
7911  CLAMP_PROBABILITY(selec);
7912 
7913  *indexSelectivity = selec;
7914 
7915  /*
7916  * Compute the index qual costs, much as in genericcostestimate, to add to
7917  * the index costs. We can disregard indexorderbys, since BRIN doesn't
7918  * support those.
7919  */
7920  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7921 
7922  /*
7923  * Compute the startup cost as the cost to read the whole revmap
7924  * sequentially, including the cost to execute the index quals.
7925  */
7926  *indexStartupCost =
7927  spc_seq_page_cost * statsData.revmapNumPages * loop_count;
7928  *indexStartupCost += qual_arg_cost;
7929 
7930  /*
7931  * To read a BRIN index there might be a bit of back and forth over
7932  * regular pages, as revmap might point to them out of sequential order;
7933  * calculate the total cost as reading the whole index in random order.
7934  */
7935  *indexTotalCost = *indexStartupCost +
7936  spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
7937 
7938  /*
7939  * Charge a small amount per range tuple which we expect to match to. This
7940  * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
7941  * will set a bit for each page in the range when we find a matching
7942  * range, so we must multiply the charge by the number of pages in the
7943  * range.
7944  */
7945  *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
7946  statsData.pagesPerRange;
7947 
7948  *indexPages = index->pages;
7949 }
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1254
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:38
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:997
#define Max(x, y)
Definition: c.h:991
#define Abs(x)
Definition: c.h:1003
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:102
double cpu_operator_cost
Definition: costsize.c:124
#define ERROR
Definition: elog.h:33
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
Assert(fmt[strlen(fmt) - 1] !='\n')
#define NoLock
Definition: lockdefs.h:34
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3304
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3187
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
double Cost
Definition: nodes.h:708
@ JOIN_INNER
Definition: nodes.h:750
@ RTE_RELATION
Definition: parsenodes.h:999
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:397
int16 attnum
Definition: pg_attribute.h:83
#define lfirst_node(type, lc)
Definition: pg_list.h:174
#define ObjectIdGetDatum(X)
Definition: postgres.h:551
#define BoolGetDatum(X)
Definition: postgres.h:446
#define Int16GetDatum(X)
Definition: postgres.h:495
#define InvalidOid
Definition: postgres_ext.h:36
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:6316
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:145
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:6346
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:144
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:99
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:181
float4 * numbers
Definition: lsyscache.h:56
int nnumbers
Definition: lsyscache.h:57
BlockNumber revmapNumPages
Definition: brin.h:34
BlockNumber pagesPerRange
Definition: brin.h:33
AttrNumber indexcol
Definition: pathnodes.h:1464
List * indexclauses
Definition: pathnodes.h:1416
IndexOptInfo * indexinfo
Definition: pathnodes.h:1415
Definition: pg_list.h:52
RTEKind rtekind
Definition: parsenodes.h:1016
Index relid
Definition: pathnodes.h:740
BlockNumber pages
Definition: pathnodes.h:762
HeapTuple statsTuple
Definition: selfuncs.h:89
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
Definition: type.h:90
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1221
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:1195
@ STATRELATTINH
Definition: syscache.h:97

References Abs, Assert(), attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum, BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog(), ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum, InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum, RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6639 of file selfuncs.c.

6643 {
6644  IndexOptInfo *index = path->indexinfo;
6645  GenericCosts costs;
6646  Oid relid;
6647  AttrNumber colnum;
6648  VariableStatData vardata;
6649  double numIndexTuples;
6650  Cost descentCost;
6651  List *indexBoundQuals;
6652  int indexcol;
6653  bool eqQualHere;
6654  bool found_saop;
6655  bool found_is_null_op;
6656  double num_sa_scans;
6657  ListCell *lc;
6658 
6659  /*
6660  * For a btree scan, only leading '=' quals plus inequality quals for the
6661  * immediately next attribute contribute to index selectivity (these are
6662  * the "boundary quals" that determine the starting and stopping points of
6663  * the index scan). Additional quals can suppress visits to the heap, so
6664  * it's OK to count them in indexSelectivity, but they should not count
6665  * for estimating numIndexTuples. So we must examine the given indexquals
6666  * to find out which ones count as boundary quals. We rely on the
6667  * knowledge that they are given in index column order.
6668  *
6669  * For a RowCompareExpr, we consider only the first column, just as
6670  * rowcomparesel() does.
6671  *
6672  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6673  * index scans not one, but the ScalarArrayOpExpr's operator can be
6674  * considered to act the same as it normally does.
6675  */
6676  indexBoundQuals = NIL;
6677  indexcol = 0;
6678  eqQualHere = false;
6679  found_saop = false;
6680  found_is_null_op = false;
6681  num_sa_scans = 1;
6682  foreach(lc, path->indexclauses)
6683  {
6684  IndexClause *iclause = lfirst_node(IndexClause, lc);
6685  ListCell *lc2;
6686 
6687  if (indexcol != iclause->indexcol)
6688  {
6689  /* Beginning of a new column's quals */
6690  if (!eqQualHere)
6691  break; /* done if no '=' qual for indexcol */
6692  eqQualHere = false;
6693  indexcol++;
6694  if (indexcol != iclause->indexcol)
6695  break; /* no quals at all for indexcol */
6696  }
6697 
6698  /* Examine each indexqual associated with this index clause */
6699  foreach(lc2, iclause->indexquals)
6700  {
6701  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6702  Expr *clause = rinfo->clause;
6703  Oid clause_op = InvalidOid;
6704  int op_strategy;
6705 
6706  if (IsA(clause, OpExpr))
6707  {
6708  OpExpr *op = (OpExpr *) clause;
6709 
6710  clause_op = op->opno;
6711  }
6712  else if (IsA(clause, RowCompareExpr))
6713  {
6714  RowCompareExpr *rc = (RowCompareExpr *) clause;
6715 
6716  clause_op = linitial_oid(rc->opnos);
6717  }
6718  else if (IsA(clause, ScalarArrayOpExpr))
6719  {
6720  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6721  Node *other_operand = (Node *) lsecond(saop->args);
6722  int alength = estimate_array_length(other_operand);
6723 
6724  clause_op = saop->opno;
6725  found_saop = true;
6726  /* count number of SA scans induced by indexBoundQuals only */
6727  if (alength > 1)
6728  num_sa_scans *= alength;
6729  }
6730  else if (IsA(clause, NullTest))
6731  {
6732  NullTest *nt = (NullTest *) clause;
6733 
6734  if (nt->nulltesttype == IS_NULL)
6735  {
6736  found_is_null_op = true;
6737  /* IS NULL is like = for selectivity purposes */
6738  eqQualHere = true;
6739  }
6740  }
6741  else
6742  elog(ERROR, "unsupported indexqual type: %d",
6743  (int) nodeTag(clause));
6744 
6745  /* check for equality operator */
6746  if (OidIsValid(clause_op))
6747  {
6748  op_strategy = get_op_opfamily_strategy(clause_op,
6749  index->opfamily[indexcol]);
6750  Assert(op_strategy != 0); /* not a member of opfamily?? */
6751  if (op_strategy == BTEqualStrategyNumber)
6752  eqQualHere = true;
6753  }
6754 
6755  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6756  }
6757  }
6758 
6759  /*
6760  * If index is unique and we found an '=' clause for each column, we can
6761  * just assume numIndexTuples = 1 and skip the expensive
6762  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6763  * NullTest invalidates that theory, even though it sets eqQualHere.
6764  */
6765  if (index->unique &&
6766  indexcol == index->nkeycolumns - 1 &&
6767  eqQualHere &&
6768  !found_saop &&
6769  !found_is_null_op)
6770  numIndexTuples = 1.0;
6771  else
6772  {
6773  List *selectivityQuals;
6774  Selectivity btreeSelectivity;
6775 
6776  /*
6777  * If the index is partial, AND the index predicate with the
6778  * index-bound quals to produce a more accurate idea of the number of
6779  * rows covered by the bound conditions.
6780  */
6781  selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6782 
6783  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6784  index->rel->relid,
6785  JOIN_INNER,
6786  NULL);
6787  numIndexTuples = btreeSelectivity * index->rel->tuples;
6788 
6789  /*
6790  * As in genericcostestimate(), we have to adjust for any
6791  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6792  * to integer.
6793  */
6794  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6795  }
6796 
6797  /*
6798  * Now do generic index cost estimation.
6799  */
6800  MemSet(&costs, 0, sizeof(costs));
6801  costs.numIndexTuples = numIndexTuples;
6802 
6803  genericcostestimate(root, path, loop_count, &costs);
6804 
6805  /*
6806  * Add a CPU-cost component to represent the costs of initial btree
6807  * descent. We don't charge any I/O cost for touching upper btree levels,
6808  * since they tend to stay in cache, but we still have to do about log2(N)
6809  * comparisons to descend a btree of N leaf tuples. We charge one
6810  * cpu_operator_cost per comparison.
6811  *
6812  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6813  * ones after the first one are not startup cost so far as the overall
6814  * plan is concerned, so add them only to "total" cost.
6815  */
6816  if (index->tuples > 1) /* avoid computing log(0) */
6817  {
6818  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6819  costs.indexStartupCost += descentCost;
6820  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6821  }
6822 
6823  /*
6824  * Even though we're not charging I/O cost for touching upper btree pages,
6825  * it's still reasonable to charge some CPU cost per page descended
6826  * through. Moreover, if we had no such charge at all, bloated indexes
6827  * would appear to have the same search cost as unbloated ones, at least
6828  * in cases where only a single leaf page is expected to be visited. This
6829  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6830  * touched. The number of such pages is btree tree height plus one (ie,
6831  * we charge for the leaf page too). As above, charge once per SA scan.
6832  */
6833  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6834  costs.indexStartupCost += descentCost;
6835  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6836 
6837  /*
6838  * If we can get an estimate of the first column's ordering correlation C
6839  * from pg_statistic, estimate the index correlation as C for a
6840  * single-column index, or C * 0.75 for multiple columns. (The idea here
6841  * is that multiple columns dilute the importance of the first column's
6842  * ordering, but don't negate it entirely. Before 8.0 we divided the
6843  * correlation by the number of columns, but that seems too strong.)
6844  */
6845  MemSet(&vardata, 0, sizeof(vardata));
6846 
6847  if (index->indexkeys[0] != 0)
6848  {
6849  /* Simple variable --- look to stats for the underlying table */
6850  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6851 
6852  Assert(rte->rtekind == RTE_RELATION);
6853  relid = rte->relid;
6854  Assert(relid != InvalidOid);
6855  colnum = index->indexkeys[0];
6856 
6858  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6859  {
6860  /*
6861  * The hook took control of acquiring a stats tuple. If it did
6862  * supply a tuple, it'd better have supplied a freefunc.
6863  */
6864  if (HeapTupleIsValid(vardata.statsTuple) &&
6865  !vardata.freefunc)
6866  elog(ERROR, "no function provided to release variable stats with");
6867  }
6868  else
6869  {
6871  ObjectIdGetDatum(relid),
6872  Int16GetDatum(colnum),
6873  BoolGetDatum(rte->inh));
6874  vardata.freefunc = ReleaseSysCache;
6875  }
6876  }
6877  else
6878  {
6879  /* Expression --- maybe there are stats for the index itself */
6880  relid = index->indexoid;
6881  colnum = 1;
6882 
6883  if (get_index_stats_hook &&
6884  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6885  {
6886  /*
6887  * The hook took control of acquiring a stats tuple. If it did
6888  * supply a tuple, it'd better have supplied a freefunc.
6889  */
6890  if (HeapTupleIsValid(vardata.statsTuple) &&
6891  !vardata.freefunc)
6892  elog(ERROR, "no function provided to release variable stats with");
6893  }
6894  else
6895  {
6897  ObjectIdGetDatum(relid),
6898  Int16GetDatum(colnum),
6899  BoolGetDatum(false));
6900  vardata.freefunc = ReleaseSysCache;
6901  }
6902  }
6903 
6904  if (HeapTupleIsValid(vardata.statsTuple))
6905  {
6906  Oid sortop;
6907  AttStatsSlot sslot;
6908 
6909  sortop = get_opfamily_member(index->opfamily[0],
6910  index->opcintype[0],
6911  index->opcintype[0],
6913  if (OidIsValid(sortop) &&
6914  get_attstatsslot(&sslot, vardata.statsTuple,
6915  STATISTIC_KIND_CORRELATION, sortop,
6917  {
6918  double varCorrelation;
6919 
6920  Assert(sslot.nnumbers == 1);
6921  varCorrelation = sslot.numbers[0];
6922 
6923  if (index->reverse_sort[0])
6924  varCorrelation = -varCorrelation;
6925 
6926  if (index->nkeycolumns > 1)
6927  costs.indexCorrelation = varCorrelation * 0.75;
6928  else
6929  costs.indexCorrelation = varCorrelation;
6930 
6931  free_attstatsslot(&sslot);
6932  }
6933  }
6934 
6935  ReleaseVariableStats(vardata);
6936 
6937  *indexStartupCost = costs.indexStartupCost;
6938  *indexTotalCost = costs.indexTotalCost;
6939  *indexSelectivity = costs.indexSelectivity;
6940  *indexCorrelation = costs.indexCorrelation;
6941  *indexPages = costs.numIndexPages;
6942 }
#define MemSet(start, val, len)
Definition: c.h:1019
#define OidIsValid(objectId)
Definition: c.h:721
List * lappend(List *list, void *datum)
Definition: list.c:338
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:81
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:164
#define IsA(nodeptr, _type_)
Definition: nodes.h:625
#define nodeTag(nodeptr)
Definition: nodes.h:579
double Selectivity
Definition: nodes.h:707
#define NIL
Definition: pg_list.h:66
#define lsecond(l)
Definition: pg_list.h:181
#define linitial_oid(l)
Definition: pg_list.h:178
unsigned int Oid
Definition: postgres_ext.h:31
@ IS_NULL
Definition: primnodes.h:1632
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2132
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:6400
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6618
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:124
Cost indexStartupCost
Definition: selfuncs.h:122
double indexCorrelation
Definition: selfuncs.h:125
double num_sa_scans
Definition: selfuncs.h:131
Cost indexTotalCost
Definition: selfuncs.h:123
double numIndexPages
Definition: selfuncs.h:128
double numIndexTuples
Definition: selfuncs.h:129
List * indexquals
Definition: pathnodes.h:1462
Definition: nodes.h:575
NullTestType nulltesttype
Definition: primnodes.h:1639
Oid opno
Definition: primnodes.h:634
Expr * clause
Definition: pathnodes.h:2234

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert(), ATTSTATSSLOT_NUMBERS, BoolGetDatum, BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, elog(), ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, MemSet, NIL, AttStatsSlot::nnumbers, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, AttStatsSlot::numbers, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, RowCompareExpr::opnos, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by bthandler().

◆ gincostestimate()

void gincostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7403 of file selfuncs.c.

7407 {
7408  IndexOptInfo *index = path->indexinfo;
7409  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7410  List *selectivityQuals;
7411  double numPages = index->pages,
7412  numTuples = index->tuples;
7413  double numEntryPages,
7414  numDataPages,
7415  numPendingPages,
7416  numEntries;
7417  GinQualCounts counts;
7418  bool matchPossible;
7419  bool fullIndexScan;
7420  double partialScale;
7421  double entryPagesFetched,
7422  dataPagesFetched,
7423  dataPagesFetchedBySel;
7424  double qual_op_cost,
7425  qual_arg_cost,
7426  spc_random_page_cost,
7427  outer_scans;
7428  Relation indexRel;
7429  GinStatsData ginStats;
7430  ListCell *lc;
7431  int i;
7432 
7433  /*
7434  * Obtain statistical information from the meta page, if possible. Else
7435  * set ginStats to zeroes, and we'll cope below.
7436  */
7437  if (!index->hypothetical)
7438  {
7439  /* Lock should have already been obtained in plancat.c */
7440  indexRel = index_open(index->indexoid, NoLock);
7441  ginGetStats(indexRel, &ginStats);
7442  index_close(indexRel, NoLock);
7443  }
7444  else
7445  {
7446  memset(&ginStats, 0, sizeof(ginStats));
7447  }
7448 
7449  /*
7450  * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7451  * trusted, but the other fields are data as of the last VACUUM. We can
7452  * scale them up to account for growth since then, but that method only
7453  * goes so far; in the worst case, the stats might be for a completely
7454  * empty index, and scaling them will produce pretty bogus numbers.
7455  * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7456  * it's grown more than that, fall back to estimating things only from the
7457  * assumed-accurate index size. But we'll trust nPendingPages in any case
7458  * so long as it's not clearly insane, ie, more than the index size.
7459  */
7460  if (ginStats.nPendingPages < numPages)
7461  numPendingPages = ginStats.nPendingPages;
7462  else
7463  numPendingPages = 0;
7464 
7465  if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7466  ginStats.nTotalPages > numPages / 4 &&
7467  ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7468  {
7469  /*
7470  * OK, the stats seem close enough to sane to be trusted. But we
7471  * still need to scale them by the ratio numPages / nTotalPages to
7472  * account for growth since the last VACUUM.
7473  */
7474  double scale = numPages / ginStats.nTotalPages;
7475 
7476  numEntryPages = ceil(ginStats.nEntryPages * scale);
7477  numDataPages = ceil(ginStats.nDataPages * scale);
7478  numEntries = ceil(ginStats.nEntries * scale);
7479  /* ensure we didn't round up too much */
7480  numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7481  numDataPages = Min(numDataPages,
7482  numPages - numPendingPages - numEntryPages);
7483  }
7484  else
7485  {
7486  /*
7487  * We might get here because it's a hypothetical index, or an index
7488  * created pre-9.1 and never vacuumed since upgrading (in which case
7489  * its stats would read as zeroes), or just because it's grown too
7490  * much since the last VACUUM for us to put our faith in scaling.
7491  *
7492  * Invent some plausible internal statistics based on the index page
7493  * count (and clamp that to at least 10 pages, just in case). We
7494  * estimate that 90% of the index is entry pages, and the rest is data
7495  * pages. Estimate 100 entries per entry page; this is rather bogus
7496  * since it'll depend on the size of the keys, but it's more robust
7497  * than trying to predict the number of entries per heap tuple.
7498  */
7499  numPages = Max(numPages, 10);
7500  numEntryPages = floor((numPages - numPendingPages) * 0.90);
7501  numDataPages = numPages - numPendingPages - numEntryPages;
7502  numEntries = floor(numEntryPages * 100);
7503  }
7504 
7505  /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
7506  if (numEntries < 1)
7507  numEntries = 1;
7508 
7509  /*
7510  * If the index is partial, AND the index predicate with the index-bound
7511  * quals to produce a more accurate idea of the number of rows covered by
7512  * the bound conditions.
7513  */
7514  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
7515 
7516  /* Estimate the fraction of main-table tuples that will be visited */
7517  *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7518  index->rel->relid,
7519  JOIN_INNER,
7520  NULL);
7521 
7522  /* fetch estimated page cost for tablespace containing index */
7523  get_tablespace_page_costs(index->reltablespace,
7524  &spc_random_page_cost,
7525  NULL);
7526 
7527  /*
7528  * Generic assumption about index correlation: there isn't any.
7529  */
7530  *indexCorrelation = 0.0;
7531 
7532  /*
7533  * Examine quals to estimate number of search entries & partial matches
7534  */
7535  memset(&counts, 0, sizeof(counts));
7536  counts.arrayScans = 1;
7537  matchPossible = true;
7538 
7539  foreach(lc, path->indexclauses)
7540  {
7541  IndexClause *iclause = lfirst_node(IndexClause, lc);
7542  ListCell *lc2;
7543 
7544  foreach(lc2, iclause->indexquals)
7545  {
7546  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
7547  Expr *clause = rinfo->clause;
7548 
7549  if (IsA(clause, OpExpr))
7550  {
7551  matchPossible = gincost_opexpr(root,
7552  index,
7553  iclause->indexcol,
7554  (OpExpr *) clause,
7555  &counts);
7556  if (!matchPossible)
7557  break;
7558  }
7559  else if (IsA(clause, ScalarArrayOpExpr))
7560  {
7561  matchPossible = gincost_scalararrayopexpr(root,
7562  index,
7563  iclause->indexcol,
7564  (ScalarArrayOpExpr *) clause,
7565  numEntries,
7566  &counts);
7567  if (!matchPossible)
7568  break;
7569  }
7570  else
7571  {
7572  /* shouldn't be anything else for a GIN index */
7573  elog(ERROR, "unsupported GIN indexqual type: %d",
7574  (int) nodeTag(clause));
7575  }
7576  }
7577  }
7578 
7579  /* Fall out if there were any provably-unsatisfiable quals */
7580  if (!matchPossible)
7581  {
7582  *indexStartupCost = 0;
7583  *indexTotalCost = 0;
7584  *indexSelectivity = 0;
7585  return;
7586  }
7587 
7588  /*
7589  * If attribute has a full scan and at the same time doesn't have normal
7590  * scan, then we'll have to scan all non-null entries of that attribute.
7591  * Currently, we don't have per-attribute statistics for GIN. Thus, we
7592  * must assume the whole GIN index has to be scanned in this case.
7593  */
7594  fullIndexScan = false;
7595  for (i = 0; i < index->nkeycolumns; i++)
7596  {
7597  if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
7598  {
7599  fullIndexScan = true;
7600  break;
7601  }
7602  }
7603 
7604  if (fullIndexScan || indexQuals == NIL)
7605  {
7606  /*
7607  * Full index scan will be required. We treat this as if every key in
7608  * the index had been listed in the query; is that reasonable?
7609  */
7610  counts.partialEntries = 0;
7611  counts.exactEntries = numEntries;
7612  counts.searchEntries = numEntries;
7613  }
7614 
7615  /* Will we have more than one iteration of a nestloop scan? */
7616  outer_scans = loop_count;
7617 
7618  /*
7619  * Compute cost to begin scan, first of all, pay attention to pending
7620  * list.
7621  */
7622  entryPagesFetched = numPendingPages;
7623 
7624  /*
7625  * Estimate number of entry pages read. We need to do
7626  * counts.searchEntries searches. Use a power function as it should be,
7627  * but tuples on leaf pages usually is much greater. Here we include all
7628  * searches in entry tree, including search of first entry in partial
7629  * match algorithm
7630  */
7631  entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7632 
7633  /*
7634  * Add an estimate of entry pages read by partial match algorithm. It's a
7635  * scan over leaf pages in entry tree. We haven't any useful stats here,
7636  * so estimate it as proportion. Because counts.partialEntries is really
7637  * pretty bogus (see code above), it's possible that it is more than
7638  * numEntries; clamp the proportion to ensure sanity.
7639  */
7640  partialScale = counts.partialEntries / numEntries;
7641  partialScale = Min(partialScale, 1.0);
7642 
7643  entryPagesFetched += ceil(numEntryPages * partialScale);
7644 
7645  /*
7646  * Partial match algorithm reads all data pages before doing actual scan,
7647  * so it's a startup cost. Again, we haven't any useful stats here, so
7648  * estimate it as proportion.
7649  */
7650  dataPagesFetched = ceil(numDataPages * partialScale);
7651 
7652  /*
7653  * Calculate cache effects if more than one scan due to nestloops or array
7654  * quals. The result is pro-rated per nestloop scan, but the array qual
7655  * factor shouldn't be pro-rated (compare genericcostestimate).
7656  */
7657  if (outer_scans > 1 || counts.arrayScans > 1)
7658  {
7659  entryPagesFetched *= outer_scans * counts.arrayScans;
7660  entryPagesFetched = index_pages_fetched(entryPagesFetched,
7661  (BlockNumber) numEntryPages,
7662  numEntryPages, root);
7663  entryPagesFetched /= outer_scans;
7664  dataPagesFetched *= outer_scans * counts.arrayScans;
7665  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7666  (BlockNumber) numDataPages,
7667  numDataPages, root);
7668  dataPagesFetched /= outer_scans;
7669  }
7670 
7671  /*
7672  * Here we use random page cost because logically-close pages could be far
7673  * apart on disk.
7674  */
7675  *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7676 
7677  /*
7678  * Now compute the number of data pages fetched during the scan.
7679  *
7680  * We assume every entry to have the same number of items, and that there
7681  * is no overlap between them. (XXX: tsvector and array opclasses collect
7682  * statistics on the frequency of individual keys; it would be nice to use
7683  * those here.)
7684  */
7685  dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7686 
7687  /*
7688  * If there is a lot of overlap among the entries, in particular if one of
7689  * the entries is very frequent, the above calculation can grossly
7690  * under-estimate. As a simple cross-check, calculate a lower bound based
7691  * on the overall selectivity of the quals. At a minimum, we must read
7692  * one item pointer for each matching entry.
7693  *
7694  * The width of each item pointer varies, based on the level of
7695  * compression. We don't have statistics on that, but an average of
7696  * around 3 bytes per item is fairly typical.
7697  */
7698  dataPagesFetchedBySel = ceil(*indexSelectivity *
7699  (numTuples / (BLCKSZ / 3)));
7700  if (dataPagesFetchedBySel > dataPagesFetched)
7701  dataPagesFetched = dataPagesFetchedBySel;
7702 
7703  /* Account for cache effects, the same as above */
7704  if (outer_scans > 1 || counts.arrayScans > 1)
7705  {
7706  dataPagesFetched *= outer_scans * counts.arrayScans;
7707  dataPagesFetched = index_pages_fetched(dataPagesFetched,
7708  (BlockNumber) numDataPages,
7709  numDataPages, root);
7710  dataPagesFetched /= outer_scans;
7711  }
7712 
7713  /* And apply random_page_cost as the cost per page */
7714  *indexTotalCost = *indexStartupCost +
7715  dataPagesFetched * spc_random_page_cost;
7716 
7717  /*
7718  * Add on index qual eval costs, much as in genericcostestimate. But we
7719  * can disregard indexorderbys, since GIN doesn't support those.
7720  */
7721  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7722  qual_op_cost = cpu_operator_cost * list_length(indexQuals);
7723 
7724  *indexStartupCost += qual_arg_cost;
7725  *indexTotalCost += qual_arg_cost;
7726  *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7727  *indexPages = dataPagesFetched;
7728 }
uint32 BlockNumber
Definition: block.h:31
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:868
double cpu_index_tuple_cost
Definition: costsize.c:123
void ginGetStats(Relation index, GinStatsData *stats)
Definition: ginutil.c:629
int i
Definition: isn.c:73
static int list_length(const List *l)
Definition: pg_list.h:150
int scale
Definition: pgbench.c:194
static bool gincost_scalararrayopexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
Definition: selfuncs.c:7287
static bool gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
Definition: selfuncs.c:7237
bool attHasNormalScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7110
double exactEntries
Definition: selfuncs.c:7112
double arrayScans
Definition: selfuncs.c:7114
double partialEntries
Definition: selfuncs.c:7111
bool attHasFullScan[INDEX_MAX_KEYS]
Definition: selfuncs.c:7109
double searchEntries
Definition: selfuncs.c:7113
BlockNumber nDataPages
Definition: gin.h:47
BlockNumber nPendingPages
Definition: gin.h:44
BlockNumber nEntryPages
Definition: gin.h:46
int64 nEntries
Definition: gin.h:48
BlockNumber nTotalPages
Definition: gin.h:45

References add_predicate_to_index_quals(), GinQualCounts::arrayScans, GinQualCounts::attHasFullScan, GinQualCounts::attHasNormalScan, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, elog(), ERROR, GinQualCounts::exactEntries, get_quals_from_indexclauses(), get_tablespace_page_costs(), gincost_opexpr(), gincost_scalararrayopexpr(), ginGetStats(), i, index_close(), index_open(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, IndexClause::indexquals, IsA, JOIN_INNER, lfirst_node, list_length(), Max, Min, GinStatsData::nDataPages, GinStatsData::nEntries, GinStatsData::nEntryPages, NIL, nodeTag, NoLock, GinStatsData::nPendingPages, GinStatsData::nTotalPages, GinQualCounts::partialEntries, scale, and GinQualCounts::searchEntries.

Referenced by ginhandler().

◆ gistcostestimate()

void gistcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6989 of file selfuncs.c.

6993 {
6994  IndexOptInfo *index = path->indexinfo;
6995  GenericCosts costs;
6996  Cost descentCost;
6997 
6998  MemSet(&costs, 0, sizeof(costs));
6999 
7000  genericcostestimate(root, path, loop_count, &costs);
7001 
7002  /*
7003  * We model index descent costs similarly to those for btree, but to do
7004  * that we first need an idea of the tree height. We somewhat arbitrarily
7005  * assume that the fanout is 100, meaning the tree height is at most
7006  * log100(index->pages).
7007  *
7008  * Although this computation isn't really expensive enough to require
7009  * caching, we might as well use index->tree_height to cache it.
7010  */
7011  if (index->tree_height < 0) /* unknown? */
7012  {
7013  if (index->pages > 1) /* avoid computing log(0) */
7014  index->tree_height = (int) (log(index->pages) / log(100.0));
7015  else
7016  index->tree_height = 0;
7017  }
7018 
7019  /*
7020  * Add a CPU-cost component to represent the costs of initial descent. We
7021  * just use log(N) here not log2(N) since the branching factor isn't
7022  * necessarily two anyway. As for btree, charge once per SA scan.
7023  */
7024  if (index->tuples > 1) /* avoid computing log(0) */
7025  {
7026  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7027  costs.indexStartupCost += descentCost;
7028  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7029  }
7030 
7031  /*
7032  * Likewise add a per-page charge, calculated the same as for btrees.
7033  */
7034  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7035  costs.indexStartupCost += descentCost;
7036  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7037 
7038  *indexStartupCost = costs.indexStartupCost;
7039  *indexTotalCost = costs.indexTotalCost;
7040  *indexSelectivity = costs.indexSelectivity;
7041  *indexCorrelation = costs.indexCorrelation;
7042  *indexPages = costs.numIndexPages;
7043 }

References cpu_operator_cost, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, and GenericCosts::numIndexPages.

Referenced by gisthandler().

◆ hashcostestimate()

void hashcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6945 of file selfuncs.c.

6949 {
6950  GenericCosts costs;
6951 
6952  MemSet(&costs, 0, sizeof(costs));
6953 
6954  genericcostestimate(root, path, loop_count, &costs);
6955 
6956  /*
6957  * A hash index has no descent costs as such, since the index AM can go
6958  * directly to the target bucket after computing the hash value. There
6959  * are a couple of other hash-specific costs that we could conceivably add
6960  * here, though:
6961  *
6962  * Ideally we'd charge spc_random_page_cost for each page in the target
6963  * bucket, not just the numIndexPages pages that genericcostestimate
6964  * thought we'd visit. However in most cases we don't know which bucket
6965  * that will be. There's no point in considering the average bucket size
6966  * because the hash AM makes sure that's always one page.
6967  *
6968  * Likewise, we could consider charging some CPU for each index tuple in
6969  * the bucket, if we knew how many there were. But the per-tuple cost is
6970  * just a hash value comparison, not a general datatype-dependent
6971  * comparison, so any such charge ought to be quite a bit less than
6972  * cpu_operator_cost; which makes it probably not worth worrying about.
6973  *
6974  * A bigger issue is that chance hash-value collisions will result in
6975  * wasted probes into the heap. We don't currently attempt to model this
6976  * cost on the grounds that it's rare, but maybe it's not rare enough.
6977  * (Any fix for this ought to consider the generic lossy-operator problem,
6978  * though; it's not entirely hash-specific.)
6979  */
6980 
6981  *indexStartupCost = costs.indexStartupCost;
6982  *indexTotalCost = costs.indexTotalCost;
6983  *indexSelectivity = costs.indexSelectivity;
6984  *indexCorrelation = costs.indexCorrelation;
6985  *indexPages = costs.numIndexPages;
6986 }

References genericcostestimate(), GenericCosts::indexCorrelation, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, and GenericCosts::numIndexPages.

Referenced by hashhandler().

◆ spgcostestimate()

void spgcostestimate ( struct PlannerInfo root,
struct IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7046 of file selfuncs.c.

7050 {
7051  IndexOptInfo *index = path->indexinfo;
7052  GenericCosts costs;
7053  Cost descentCost;
7054 
7055  MemSet(&costs, 0, sizeof(costs));
7056 
7057  genericcostestimate(root, path, loop_count, &costs);
7058 
7059  /*
7060  * We model index descent costs similarly to those for btree, but to do
7061  * that we first need an idea of the tree height. We somewhat arbitrarily
7062  * assume that the fanout is 100, meaning the tree height is at most
7063  * log100(index->pages).
7064  *
7065  * Although this computation isn't really expensive enough to require
7066  * caching, we might as well use index->tree_height to cache it.
7067  */
7068  if (index->tree_height < 0) /* unknown? */
7069  {
7070  if (index->pages > 1) /* avoid computing log(0) */
7071  index->tree_height = (int) (log(index->pages) / log(100.0));
7072  else
7073  index->tree_height = 0;
7074  }
7075 
7076  /*
7077  * Add a CPU-cost component to represent the costs of initial descent. We
7078  * just use log(N) here not log2(N) since the branching factor isn't
7079  * necessarily two anyway. As for btree, charge once per SA scan.
7080  */
7081  if (index->tuples > 1) /* avoid computing log(0) */
7082  {
7083  descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7084  costs.indexStartupCost += descentCost;
7085  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7086  }
7087 
7088  /*
7089  * Likewise add a per-page charge, calculated the same as for btrees.
7090  */
7091  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7092  costs.indexStartupCost += descentCost;
7093  costs.indexTotalCost += costs.num_sa_scans * descentCost;
7094 
7095  *indexStartupCost = costs.indexStartupCost;
7096  *indexTotalCost = costs.indexTotalCost;
7097  *indexSelectivity = costs.indexSelectivity;
7098  *indexCorrelation = costs.indexCorrelation;
7099  *indexPages = costs.numIndexPages;
7100 }

References cpu_operator_cost, genericcostestimate(), GenericCosts::indexCorrelation, IndexPath::indexinfo, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, MemSet, GenericCosts::num_sa_scans, and GenericCosts::numIndexPages.

Referenced by spghandler().