PostgreSQL Source Code  git master
costsize.c File Reference
#include "postgres.h"
#include <math.h>
#include "access/amapi.h"
#include "access/htup_details.h"
#include "access/tsmapi.h"
#include "executor/executor.h"
#include "executor/nodeHash.h"
#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/placeholder.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/restrictinfo.h"
#include "parser/parsetree.h"
#include "utils/lsyscache.h"
#include "utils/selfuncs.h"
#include "utils/spccache.h"
#include "utils/tuplesort.h"
Include dependency graph for costsize.c:

Go to the source code of this file.

Data Structures

struct  cost_qual_eval_context
 

Macros

#define LOG2(x)   (log(x) / 0.693147180559945)
 
#define APPEND_CPU_COST_MULTIPLIER   0.5
 

Functions

static Listextract_nonindex_conditions (List *qual_clauses, List *indexquals)
 
static MergeScanSelCachecached_scansel (PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
 
static void cost_rescan (PlannerInfo *root, Path *path, Cost *rescan_startup_cost, Cost *rescan_total_cost)
 
static bool cost_qual_eval_walker (Node *node, cost_qual_eval_context *context)
 
static void get_restriction_qual_cost (PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
 
static bool has_indexed_join_quals (NestPath *joinpath)
 
static double approx_tuple_count (PlannerInfo *root, JoinPath *path, List *quals)
 
static double calc_joinrel_size_estimate (PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, double outer_rows, double inner_rows, SpecialJoinInfo *sjinfo, List *restrictlist)
 
static Selectivity get_foreign_key_join_selectivity (PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
 
static Cost append_nonpartial_cost (List *subpaths, int numpaths, int parallel_workers)
 
static void set_rel_width (PlannerInfo *root, RelOptInfo *rel)
 
static double relation_byte_size (double tuples, int width)
 
static double page_size (double tuples, int width)
 
static double get_parallel_divisor (Path *path)
 
double clamp_row_est (double nrows)
 
void cost_seqscan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_samplescan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_gather (GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
 
void cost_gather_merge (GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
 
void cost_index (IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
 
double index_pages_fetched (double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
 
static double get_indexpath_pages (Path *bitmapqual)
 
void cost_bitmap_heap_scan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
 
void cost_bitmap_tree_node (Path *path, Cost *cost, Selectivity *selec)
 
void cost_bitmap_and_node (BitmapAndPath *path, PlannerInfo *root)
 
void cost_bitmap_or_node (BitmapOrPath *path, PlannerInfo *root)
 
void cost_tidscan (Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
 
void cost_subqueryscan (SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_functionscan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_tablefuncscan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_valuesscan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_ctescan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_namedtuplestorescan (Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
 
void cost_recursive_union (Path *runion, Path *nrterm, Path *rterm)
 
void cost_sort (Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
 
void cost_append (AppendPath *apath)
 
void cost_merge_append (Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
 
void cost_material (Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
 
void cost_agg (Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
 
void cost_windowagg (Path *path, PlannerInfo *root, List *windowFuncs, int numPartCols, int numOrderCols, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
 
void cost_group (Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
 
void initial_cost_nestloop (PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, Path *outer_path, Path *inner_path, JoinPathExtraData *extra)
 
void final_cost_nestloop (PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
 
void initial_cost_mergejoin (PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *mergeclauses, Path *outer_path, Path *inner_path, List *outersortkeys, List *innersortkeys, JoinPathExtraData *extra)
 
void final_cost_mergejoin (PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
 
void initial_cost_hashjoin (PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, JoinPathExtraData *extra, bool parallel_hash)
 
void final_cost_hashjoin (PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
 
void cost_subplan (PlannerInfo *root, SubPlan *subplan, Plan *plan)
 
void cost_qual_eval (QualCost *cost, List *quals, PlannerInfo *root)
 
void cost_qual_eval_node (QualCost *cost, Node *qual, PlannerInfo *root)
 
void compute_semi_anti_join_factors (PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, SpecialJoinInfo *sjinfo, List *restrictlist, SemiAntiJoinFactors *semifactors)
 
void set_baserel_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
double get_parameterized_baserel_size (PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
 
void set_joinrel_size_estimates (PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
 
double get_parameterized_joinrel_size (PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
 
void set_subquery_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
void set_function_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
void set_tablefunc_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
void set_values_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
void set_cte_size_estimates (PlannerInfo *root, RelOptInfo *rel, double cte_rows)
 
void set_namedtuplestore_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
void set_foreign_size_estimates (PlannerInfo *root, RelOptInfo *rel)
 
PathTargetset_pathtarget_cost_width (PlannerInfo *root, PathTarget *target)
 
double compute_bitmap_pages (PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, int loop_count, Cost *cost, double *tuple)
 

Variables

double seq_page_cost = DEFAULT_SEQ_PAGE_COST
 
double random_page_cost = DEFAULT_RANDOM_PAGE_COST
 
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST
 
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST
 
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST
 
double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST
 
double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST
 
int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE
 
Cost disable_cost = 1.0e10
 
int max_parallel_workers_per_gather = 2
 
bool enable_seqscan = true
 
bool enable_indexscan = true
 
bool enable_indexonlyscan = true
 
bool enable_bitmapscan = true
 
bool enable_tidscan = true
 
bool enable_sort = true
 
bool enable_hashagg = true
 
bool enable_nestloop = true
 
bool enable_material = true
 
bool enable_mergejoin = true
 
bool enable_hashjoin = true
 
bool enable_gathermerge = true
 
bool enable_partitionwise_join = false
 
bool enable_partitionwise_aggregate = false
 
bool enable_parallel_append = true
 
bool enable_parallel_hash = true
 
bool enable_partition_pruning = true
 

Macro Definition Documentation

◆ APPEND_CPU_COST_MULTIPLIER

#define APPEND_CPU_COST_MULTIPLIER   0.5

Definition at line 108 of file costsize.c.

Referenced by cost_append(), and cost_merge_append().

◆ LOG2

#define LOG2 (   x)    (log(x) / 0.693147180559945)

Definition at line 101 of file costsize.c.

Referenced by cost_gather_merge(), cost_merge_append(), and cost_sort().

Function Documentation

◆ append_nonpartial_cost()

static Cost append_nonpartial_cost ( List subpaths,
int  numpaths,
int  parallel_workers 
)
static

Definition at line 1767 of file costsize.c.

References for_each_cell, i, lfirst, Min, palloc(), subpath(), and Path::total_cost.

Referenced by cost_append().

1768 {
1769  Cost *costarr;
1770  int arrlen;
1771  ListCell *l;
1772  ListCell *cell;
1773  int i;
1774  int path_index;
1775  int min_index;
1776  int max_index;
1777 
1778  if (numpaths == 0)
1779  return 0;
1780 
1781  /*
1782  * Array length is number of workers or number of relevants paths,
1783  * whichever is less.
1784  */
1785  arrlen = Min(parallel_workers, numpaths);
1786  costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
1787 
1788  /* The first few paths will each be claimed by a different worker. */
1789  path_index = 0;
1790  foreach(cell, subpaths)
1791  {
1792  Path *subpath = (Path *) lfirst(cell);
1793 
1794  if (path_index == arrlen)
1795  break;
1796  costarr[path_index++] = subpath->total_cost;
1797  }
1798 
1799  /*
1800  * Since subpaths are sorted by decreasing cost, the last one will have
1801  * the minimum cost.
1802  */
1803  min_index = arrlen - 1;
1804 
1805  /*
1806  * For each of the remaining subpaths, add its cost to the array element
1807  * with minimum cost.
1808  */
1809  for_each_cell(l, cell)
1810  {
1811  Path *subpath = (Path *) lfirst(l);
1812  int i;
1813 
1814  /* Consider only the non-partial paths */
1815  if (path_index++ == numpaths)
1816  break;
1817 
1818  costarr[min_index] += subpath->total_cost;
1819 
1820  /* Update the new min cost array index */
1821  for (min_index = i = 0; i < arrlen; i++)
1822  {
1823  if (costarr[i] < costarr[min_index])
1824  min_index = i;
1825  }
1826  }
1827 
1828  /* Return the highest cost from the array */
1829  for (max_index = i = 0; i < arrlen; i++)
1830  {
1831  if (costarr[i] > costarr[max_index])
1832  max_index = i;
1833  }
1834 
1835  return costarr[max_index];
1836 }
#define Min(x, y)
Definition: c.h:857
Cost total_cost
Definition: relation.h:1090
#define lfirst(lc)
Definition: pg_list.h:106
#define for_each_cell(cell, initcell)
Definition: pg_list.h:169
void * palloc(Size size)
Definition: mcxt.c:924
int i
double Cost
Definition: nodes.h:648
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234

◆ approx_tuple_count()

static double approx_tuple_count ( PlannerInfo root,
JoinPath path,
List quals 
)
static

Definition at line 4239 of file costsize.c.

References clamp_row_est(), clause_selectivity(), SpecialJoinInfo::delay_upper_joins, JoinPath::innerjoinpath, JOIN_INNER, SpecialJoinInfo::jointype, lfirst, SpecialJoinInfo::lhs_strict, SpecialJoinInfo::min_lefthand, SpecialJoinInfo::min_righthand, NIL, JoinPath::outerjoinpath, Path::parent, RelOptInfo::relids, Path::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, SpecialJoinInfo::syn_lefthand, SpecialJoinInfo::syn_righthand, T_SpecialJoinInfo, and SpecialJoinInfo::type.

Referenced by final_cost_hashjoin(), and final_cost_mergejoin().

4240 {
4241  double tuples;
4242  double outer_tuples = path->outerjoinpath->rows;
4243  double inner_tuples = path->innerjoinpath->rows;
4244  SpecialJoinInfo sjinfo;
4245  Selectivity selec = 1.0;
4246  ListCell *l;
4247 
4248  /*
4249  * Make up a SpecialJoinInfo for JOIN_INNER semantics.
4250  */
4251  sjinfo.type = T_SpecialJoinInfo;
4252  sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
4253  sjinfo.min_righthand = path->innerjoinpath->parent->relids;
4254  sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
4255  sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
4256  sjinfo.jointype = JOIN_INNER;
4257  /* we don't bother trying to make the remaining fields valid */
4258  sjinfo.lhs_strict = false;
4259  sjinfo.delay_upper_joins = false;
4260  sjinfo.semi_can_btree = false;
4261  sjinfo.semi_can_hash = false;
4262  sjinfo.semi_operators = NIL;
4263  sjinfo.semi_rhs_exprs = NIL;
4264 
4265  /* Get the approximate selectivity */
4266  foreach(l, quals)
4267  {
4268  Node *qual = (Node *) lfirst(l);
4269 
4270  /* Note that clause_selectivity will be able to cache its result */
4271  selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
4272  }
4273 
4274  /* Apply it to the input relation sizes */
4275  tuples = selec * outer_tuples * inner_tuples;
4276 
4277  return clamp_row_est(tuples);
4278 }
#define NIL
Definition: pg_list.h:69
bool semi_can_btree
Definition: relation.h:2074
Relids min_righthand
Definition: relation.h:2067
Path * innerjoinpath
Definition: relation.h:1427
NodeTag type
Definition: relation.h:2065
Definition: nodes.h:517
double Selectivity
Definition: nodes.h:647
Relids syn_lefthand
Definition: relation.h:2068
Relids syn_righthand
Definition: relation.h:2069
List * semi_rhs_exprs
Definition: relation.h:2077
bool semi_can_hash
Definition: relation.h:2075
RelOptInfo * parent
Definition: relation.h:1078
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:574
Relids relids
Definition: relation.h:612
Path * outerjoinpath
Definition: relation.h:1426
bool delay_upper_joins
Definition: relation.h:2072
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1088
JoinType jointype
Definition: relation.h:2070
List * semi_operators
Definition: relation.h:2076
double clamp_row_est(double nrows)
Definition: costsize.c:188
Relids min_lefthand
Definition: relation.h:2066

◆ cached_scansel()

static MergeScanSelCache * cached_scansel ( PlannerInfo root,
RestrictInfo rinfo,
PathKey pathkey 
)
static

Definition at line 3074 of file costsize.c.

References RestrictInfo::clause, MergeScanSelCache::collation, EquivalenceClass::ec_collation, lappend(), MergeScanSelCache::leftendsel, MergeScanSelCache::leftstartsel, lfirst, MemoryContextSwitchTo(), mergejoinscansel(), MergeScanSelCache::nulls_first, MergeScanSelCache::opfamily, palloc(), PathKey::pk_eclass, PathKey::pk_nulls_first, PathKey::pk_opfamily, PathKey::pk_strategy, PlannerInfo::planner_cxt, MergeScanSelCache::rightendsel, MergeScanSelCache::rightstartsel, RestrictInfo::scansel_cache, and MergeScanSelCache::strategy.

Referenced by initial_cost_mergejoin().

3075 {
3076  MergeScanSelCache *cache;
3077  ListCell *lc;
3078  Selectivity leftstartsel,
3079  leftendsel,
3080  rightstartsel,
3081  rightendsel;
3082  MemoryContext oldcontext;
3083 
3084  /* Do we have this result already? */
3085  foreach(lc, rinfo->scansel_cache)
3086  {
3087  cache = (MergeScanSelCache *) lfirst(lc);
3088  if (cache->opfamily == pathkey->pk_opfamily &&
3089  cache->collation == pathkey->pk_eclass->ec_collation &&
3090  cache->strategy == pathkey->pk_strategy &&
3091  cache->nulls_first == pathkey->pk_nulls_first)
3092  return cache;
3093  }
3094 
3095  /* Nope, do the computation */
3096  mergejoinscansel(root,
3097  (Node *) rinfo->clause,
3098  pathkey->pk_opfamily,
3099  pathkey->pk_strategy,
3100  pathkey->pk_nulls_first,
3101  &leftstartsel,
3102  &leftendsel,
3103  &rightstartsel,
3104  &rightendsel);
3105 
3106  /* Cache the result in suitably long-lived workspace */
3107  oldcontext = MemoryContextSwitchTo(root->planner_cxt);
3108 
3109  cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
3110  cache->opfamily = pathkey->pk_opfamily;
3111  cache->collation = pathkey->pk_eclass->ec_collation;
3112  cache->strategy = pathkey->pk_strategy;
3113  cache->nulls_first = pathkey->pk_nulls_first;
3114  cache->leftstartsel = leftstartsel;
3115  cache->leftendsel = leftendsel;
3116  cache->rightstartsel = rightstartsel;
3117  cache->rightendsel = rightendsel;
3118 
3119  rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
3120 
3121  MemoryContextSwitchTo(oldcontext);
3122 
3123  return cache;
3124 }
Selectivity leftendsel
Definition: relation.h:1977
void mergejoinscansel(PlannerInfo *root, Node *clause, Oid opfamily, int strategy, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
Definition: selfuncs.c:2995
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Definition: nodes.h:517
double Selectivity
Definition: nodes.h:647
int pk_strategy
Definition: relation.h:977
bool pk_nulls_first
Definition: relation.h:978
Selectivity rightstartsel
Definition: relation.h:1978
List * lappend(List *list, void *datum)
Definition: list.c:128
Expr * clause
Definition: relation.h:1880
#define lfirst(lc)
Definition: pg_list.h:106
EquivalenceClass * pk_eclass
Definition: relation.h:975
Oid pk_opfamily
Definition: relation.h:976
void * palloc(Size size)
Definition: mcxt.c:924
MemoryContext planner_cxt
Definition: relation.h:302
Selectivity rightendsel
Definition: relation.h:1979
List * scansel_cache
Definition: relation.h:1932
Selectivity leftstartsel
Definition: relation.h:1976

◆ calc_joinrel_size_estimate()

static double calc_joinrel_size_estimate ( PlannerInfo root,
RelOptInfo joinrel,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
double  outer_rows,
double  inner_rows,
SpecialJoinInfo sjinfo,
List restrictlist 
)
static

Definition at line 4448 of file costsize.c.

References clamp_row_est(), clauselist_selectivity(), elog, ERROR, get_foreign_key_join_selectivity(), IS_OUTER_JOIN, JOIN_ANTI, JOIN_FULL, JOIN_INNER, JOIN_LEFT, JOIN_SEMI, SpecialJoinInfo::jointype, lappend(), lfirst_node, list_free(), NIL, RelOptInfo::relids, and RINFO_IS_PUSHED_DOWN.

Referenced by get_parameterized_joinrel_size(), and set_joinrel_size_estimates().

4456 {
4457  /* This apparently-useless variable dodges a compiler bug in VS2013: */
4458  List *restrictlist = restrictlist_in;
4459  JoinType jointype = sjinfo->jointype;
4460  Selectivity fkselec;
4461  Selectivity jselec;
4462  Selectivity pselec;
4463  double nrows;
4464 
4465  /*
4466  * Compute joinclause selectivity. Note that we are only considering
4467  * clauses that become restriction clauses at this join level; we are not
4468  * double-counting them because they were not considered in estimating the
4469  * sizes of the component rels.
4470  *
4471  * First, see whether any of the joinclauses can be matched to known FK
4472  * constraints. If so, drop those clauses from the restrictlist, and
4473  * instead estimate their selectivity using FK semantics. (We do this
4474  * without regard to whether said clauses are local or "pushed down".
4475  * Probably, an FK-matching clause could never be seen as pushed down at
4476  * an outer join, since it would be strict and hence would be grounds for
4477  * join strength reduction.) fkselec gets the net selectivity for
4478  * FK-matching clauses, or 1.0 if there are none.
4479  */
4480  fkselec = get_foreign_key_join_selectivity(root,
4481  outer_rel->relids,
4482  inner_rel->relids,
4483  sjinfo,
4484  &restrictlist);
4485 
4486  /*
4487  * For an outer join, we have to distinguish the selectivity of the join's
4488  * own clauses (JOIN/ON conditions) from any clauses that were "pushed
4489  * down". For inner joins we just count them all as joinclauses.
4490  */
4491  if (IS_OUTER_JOIN(jointype))
4492  {
4493  List *joinquals = NIL;
4494  List *pushedquals = NIL;
4495  ListCell *l;
4496 
4497  /* Grovel through the clauses to separate into two lists */
4498  foreach(l, restrictlist)
4499  {
4500  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4501 
4502  if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
4503  pushedquals = lappend(pushedquals, rinfo);
4504  else
4505  joinquals = lappend(joinquals, rinfo);
4506  }
4507 
4508  /* Get the separate selectivities */
4509  jselec = clauselist_selectivity(root,
4510  joinquals,
4511  0,
4512  jointype,
4513  sjinfo);
4514  pselec = clauselist_selectivity(root,
4515  pushedquals,
4516  0,
4517  jointype,
4518  sjinfo);
4519 
4520  /* Avoid leaking a lot of ListCells */
4521  list_free(joinquals);
4522  list_free(pushedquals);
4523  }
4524  else
4525  {
4526  jselec = clauselist_selectivity(root,
4527  restrictlist,
4528  0,
4529  jointype,
4530  sjinfo);
4531  pselec = 0.0; /* not used, keep compiler quiet */
4532  }
4533 
4534  /*
4535  * Basically, we multiply size of Cartesian product by selectivity.
4536  *
4537  * If we are doing an outer join, take that into account: the joinqual
4538  * selectivity has to be clamped using the knowledge that the output must
4539  * be at least as large as the non-nullable input. However, any
4540  * pushed-down quals are applied after the outer join, so their
4541  * selectivity applies fully.
4542  *
4543  * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4544  * of LHS rows that have matches, and we apply that straightforwardly.
4545  */
4546  switch (jointype)
4547  {
4548  case JOIN_INNER:
4549  nrows = outer_rows * inner_rows * fkselec * jselec;
4550  /* pselec not used */
4551  break;
4552  case JOIN_LEFT:
4553  nrows = outer_rows * inner_rows * fkselec * jselec;
4554  if (nrows < outer_rows)
4555  nrows = outer_rows;
4556  nrows *= pselec;
4557  break;
4558  case JOIN_FULL:
4559  nrows = outer_rows * inner_rows * fkselec * jselec;
4560  if (nrows < outer_rows)
4561  nrows = outer_rows;
4562  if (nrows < inner_rows)
4563  nrows = inner_rows;
4564  nrows *= pselec;
4565  break;
4566  case JOIN_SEMI:
4567  nrows = outer_rows * fkselec * jselec;
4568  /* pselec not used */
4569  break;
4570  case JOIN_ANTI:
4571  nrows = outer_rows * (1.0 - fkselec * jselec);
4572  nrows *= pselec;
4573  break;
4574  default:
4575  /* other values not expected here */
4576  elog(ERROR, "unrecognized join type: %d", (int) jointype);
4577  nrows = 0; /* keep compiler quiet */
4578  break;
4579  }
4580 
4581  return clamp_row_est(nrows);
4582 }
#define NIL
Definition: pg_list.h:69
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:730
double Selectivity
Definition: nodes.h:647
JoinType
Definition: nodes.h:681
static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
Definition: costsize.c:4600
#define RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)
Definition: relation.h:1957
#define ERROR
Definition: elog.h:43
#define lfirst_node(type, lc)
Definition: pg_list.h:109
Relids relids
Definition: relation.h:612
List * lappend(List *list, void *datum)
Definition: list.c:128
JoinType jointype
Definition: relation.h:2070
void list_free(List *list)
Definition: list.c:1133
#define elog
Definition: elog.h:219
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:99
double clamp_row_est(double nrows)
Definition: costsize.c:188
Definition: pg_list.h:45

◆ clamp_row_est()

double clamp_row_est ( double  nrows)

Definition at line 188 of file costsize.c.

References rint().

Referenced by approx_tuple_count(), bernoulli_samplescangetsamplesize(), calc_joinrel_size_estimate(), compute_bitmap_pages(), cost_agg(), cost_append(), cost_bitmap_heap_scan(), cost_group(), cost_index(), cost_seqscan(), cost_subplan(), create_bitmap_subplan(), create_limit_path(), estimate_hash_bucket_stats(), estimate_num_groups(), estimate_path_cost_size(), estimate_size(), expression_returns_set_rows(), final_cost_hashjoin(), final_cost_mergejoin(), final_cost_nestloop(), get_parameterized_baserel_size(), get_variable_numdistinct(), initial_cost_mergejoin(), set_baserel_size_estimates(), set_foreign_size(), system_rows_samplescangetsamplesize(), system_samplescangetsamplesize(), and system_time_samplescangetsamplesize().

189 {
190  /*
191  * Force estimate to be at least one row, to make explain output look
192  * better and to avoid possible divide-by-zero when interpolating costs.
193  * Make it an integer, too.
194  */
195  if (nrows <= 1.0)
196  nrows = 1.0;
197  else
198  nrows = rint(nrows);
199 
200  return nrows;
201 }
double rint(double x)
Definition: rint.c:22

◆ compute_bitmap_pages()

double compute_bitmap_pages ( PlannerInfo root,
RelOptInfo baserel,
Path bitmapqual,
int  loop_count,
Cost cost,
double *  tuple 
)

Definition at line 5383 of file costsize.c.

References clamp_row_est(), cost_bitmap_tree_node(), get_indexpath_pages(), index_pages_fetched(), Max, Min, RelOptInfo::pages, T, tbm_calculate_entries(), RelOptInfo::tuples, and work_mem.

Referenced by cost_bitmap_heap_scan(), and create_partial_bitmap_paths().

5385 {
5386  Cost indexTotalCost;
5387  Selectivity indexSelectivity;
5388  double T;
5389  double pages_fetched;
5390  double tuples_fetched;
5391  double heap_pages;
5392  long maxentries;
5393 
5394  /*
5395  * Fetch total cost of obtaining the bitmap, as well as its total
5396  * selectivity.
5397  */
5398  cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
5399 
5400  /*
5401  * Estimate number of main-table pages fetched.
5402  */
5403  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
5404 
5405  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
5406 
5407  /*
5408  * For a single scan, the number of heap pages that need to be fetched is
5409  * the same as the Mackert and Lohman formula for the case T <= b (ie, no
5410  * re-reads needed).
5411  */
5412  pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
5413 
5414  /*
5415  * Calculate the number of pages fetched from the heap. Then based on
5416  * current work_mem estimate get the estimated maxentries in the bitmap.
5417  * (Note that we always do this calculation based on the number of pages
5418  * that would be fetched in a single iteration, even if loop_count > 1.
5419  * That's correct, because only that number of entries will be stored in
5420  * the bitmap at one time.)
5421  */
5422  heap_pages = Min(pages_fetched, baserel->pages);
5423  maxentries = tbm_calculate_entries(work_mem * 1024L);
5424 
5425  if (loop_count > 1)
5426  {
5427  /*
5428  * For repeated bitmap scans, scale up the number of tuples fetched in
5429  * the Mackert and Lohman formula by the number of scans, so that we
5430  * estimate the number of pages fetched by all the scans. Then
5431  * pro-rate for one scan.
5432  */
5433  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
5434  baserel->pages,
5435  get_indexpath_pages(bitmapqual),
5436  root);
5437  pages_fetched /= loop_count;
5438  }
5439 
5440  if (pages_fetched >= T)
5441  pages_fetched = T;
5442  else
5443  pages_fetched = ceil(pages_fetched);
5444 
5445  if (maxentries < heap_pages)
5446  {
5447  double exact_pages;
5448  double lossy_pages;
5449 
5450  /*
5451  * Crude approximation of the number of lossy pages. Because of the
5452  * way tbm_lossify() is coded, the number of lossy pages increases
5453  * very sharply as soon as we run short of memory; this formula has
5454  * that property and seems to perform adequately in testing, but it's
5455  * possible we could do better somehow.
5456  */
5457  lossy_pages = Max(0, heap_pages - maxentries / 2);
5458  exact_pages = heap_pages - lossy_pages;
5459 
5460  /*
5461  * If there are lossy pages then recompute the number of tuples
5462  * processed by the bitmap heap node. We assume here that the chance
5463  * of a given tuple coming from an exact page is the same as the
5464  * chance that a given page is exact. This might not be true, but
5465  * it's not clear how we can do any better.
5466  */
5467  if (lossy_pages > 0)
5468  tuples_fetched =
5469  clamp_row_est(indexSelectivity *
5470  (exact_pages / heap_pages) * baserel->tuples +
5471  (lossy_pages / heap_pages) * baserel->tuples);
5472  }
5473 
5474  if (cost)
5475  *cost = indexTotalCost;
5476  if (tuple)
5477  *tuple = tuples_fetched;
5478 
5479  return pages_fetched;
5480 }
double tuples
Definition: relation.h:652
#define Min(x, y)
Definition: c.h:857
double Selectivity
Definition: nodes.h:647
static const uint32 T[65]
Definition: md5.c:101
int work_mem
Definition: globals.c:122
#define Max(x, y)
Definition: c.h:851
BlockNumber pages
Definition: relation.h:651
static double get_indexpath_pages(Path *bitmapqual)
Definition: costsize.c:895
long tbm_calculate_entries(double maxbytes)
Definition: tidbitmap.c:1545
double clamp_row_est(double nrows)
Definition: costsize.c:188
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:830
double Cost
Definition: nodes.h:648
void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
Definition: costsize.c:1046

◆ compute_semi_anti_join_factors()

void compute_semi_anti_join_factors ( PlannerInfo root,
RelOptInfo joinrel,
RelOptInfo outerrel,
RelOptInfo innerrel,
JoinType  jointype,
SpecialJoinInfo sjinfo,
List restrictlist,
SemiAntiJoinFactors semifactors 
)

Definition at line 4037 of file costsize.c.

References clauselist_selectivity(), SpecialJoinInfo::delay_upper_joins, IS_OUTER_JOIN, JOIN_ANTI, JOIN_INNER, JOIN_SEMI, SpecialJoinInfo::jointype, lappend(), lfirst_node, SpecialJoinInfo::lhs_strict, list_free(), SemiAntiJoinFactors::match_count, Max, SpecialJoinInfo::min_lefthand, SpecialJoinInfo::min_righthand, NIL, SemiAntiJoinFactors::outer_match_frac, RelOptInfo::relids, RINFO_IS_PUSHED_DOWN, RelOptInfo::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, SpecialJoinInfo::syn_lefthand, SpecialJoinInfo::syn_righthand, T_SpecialJoinInfo, and SpecialJoinInfo::type.

Referenced by add_paths_to_joinrel().

4045 {
4046  Selectivity jselec;
4047  Selectivity nselec;
4048  Selectivity avgmatch;
4049  SpecialJoinInfo norm_sjinfo;
4050  List *joinquals;
4051  ListCell *l;
4052 
4053  /*
4054  * In an ANTI join, we must ignore clauses that are "pushed down", since
4055  * those won't affect the match logic. In a SEMI join, we do not
4056  * distinguish joinquals from "pushed down" quals, so just use the whole
4057  * restrictinfo list. For other outer join types, we should consider only
4058  * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
4059  */
4060  if (IS_OUTER_JOIN(jointype))
4061  {
4062  joinquals = NIL;
4063  foreach(l, restrictlist)
4064  {
4065  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4066 
4067  if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
4068  joinquals = lappend(joinquals, rinfo);
4069  }
4070  }
4071  else
4072  joinquals = restrictlist;
4073 
4074  /*
4075  * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
4076  */
4077  jselec = clauselist_selectivity(root,
4078  joinquals,
4079  0,
4080  (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
4081  sjinfo);
4082 
4083  /*
4084  * Also get the normal inner-join selectivity of the join clauses.
4085  */
4086  norm_sjinfo.type = T_SpecialJoinInfo;
4087  norm_sjinfo.min_lefthand = outerrel->relids;
4088  norm_sjinfo.min_righthand = innerrel->relids;
4089  norm_sjinfo.syn_lefthand = outerrel->relids;
4090  norm_sjinfo.syn_righthand = innerrel->relids;
4091  norm_sjinfo.jointype = JOIN_INNER;
4092  /* we don't bother trying to make the remaining fields valid */
4093  norm_sjinfo.lhs_strict = false;
4094  norm_sjinfo.delay_upper_joins = false;
4095  norm_sjinfo.semi_can_btree = false;
4096  norm_sjinfo.semi_can_hash = false;
4097  norm_sjinfo.semi_operators = NIL;
4098  norm_sjinfo.semi_rhs_exprs = NIL;
4099 
4100  nselec = clauselist_selectivity(root,
4101  joinquals,
4102  0,
4103  JOIN_INNER,
4104  &norm_sjinfo);
4105 
4106  /* Avoid leaking a lot of ListCells */
4107  if (IS_OUTER_JOIN(jointype))
4108  list_free(joinquals);
4109 
4110  /*
4111  * jselec can be interpreted as the fraction of outer-rel rows that have
4112  * any matches (this is true for both SEMI and ANTI cases). And nselec is
4113  * the fraction of the Cartesian product that matches. So, the average
4114  * number of matches for each outer-rel row that has at least one match is
4115  * nselec * inner_rows / jselec.
4116  *
4117  * Note: it is correct to use the inner rel's "rows" count here, even
4118  * though we might later be considering a parameterized inner path with
4119  * fewer rows. This is because we have included all the join clauses in
4120  * the selectivity estimate.
4121  */
4122  if (jselec > 0) /* protect against zero divide */
4123  {
4124  avgmatch = nselec * innerrel->rows / jselec;
4125  /* Clamp to sane range */
4126  avgmatch = Max(1.0, avgmatch);
4127  }
4128  else
4129  avgmatch = 1.0;
4130 
4131  semifactors->outer_match_frac = jselec;
4132  semifactors->match_count = avgmatch;
4133 }
#define NIL
Definition: pg_list.h:69
bool semi_can_btree
Definition: relation.h:2074
Relids min_righthand
Definition: relation.h:2067
Selectivity outer_match_frac
Definition: relation.h:2291
NodeTag type
Definition: relation.h:2065
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:730
double Selectivity
Definition: nodes.h:647
Relids syn_lefthand
Definition: relation.h:2068
Relids syn_righthand
Definition: relation.h:2069
#define RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)
Definition: relation.h:1957
List * semi_rhs_exprs
Definition: relation.h:2077
bool semi_can_hash
Definition: relation.h:2075
#define lfirst_node(type, lc)
Definition: pg_list.h:109
Relids relids
Definition: relation.h:612
List * lappend(List *list, void *datum)
Definition: list.c:128
bool delay_upper_joins
Definition: relation.h:2072
double rows
Definition: relation.h:615
#define Max(x, y)
Definition: c.h:851
JoinType jointype
Definition: relation.h:2070
Selectivity match_count
Definition: relation.h:2292
List * semi_operators
Definition: relation.h:2076
void list_free(List *list)
Definition: list.c:1133
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:99
Definition: pg_list.h:45
Relids min_lefthand
Definition: relation.h:2066

◆ cost_agg()

void cost_agg ( Path path,
PlannerInfo root,
AggStrategy  aggstrategy,
const AggClauseCosts aggcosts,
int  numGroupCols,
double  numGroups,
List quals,
Cost  input_startup_cost,
Cost  input_total_cost,
double  input_tuples 
)

Definition at line 2060 of file costsize.c.

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, Assert, clamp_row_est(), clauselist_selectivity(), cost_qual_eval(), cpu_operator_cost, cpu_tuple_cost, disable_cost, enable_hashagg, AggClauseCosts::finalCost, JOIN_INNER, MemSet, QualCost::per_tuple, Path::rows, QualCost::startup, Path::startup_cost, Path::total_cost, and AggClauseCosts::transCost.

Referenced by choose_hashed_setop(), create_agg_path(), create_groupingsets_path(), and create_unique_path().

2066 {
2067  double output_tuples;
2068  Cost startup_cost;
2069  Cost total_cost;
2070  AggClauseCosts dummy_aggcosts;
2071 
2072  /* Use all-zero per-aggregate costs if NULL is passed */
2073  if (aggcosts == NULL)
2074  {
2075  Assert(aggstrategy == AGG_HASHED);
2076  MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
2077  aggcosts = &dummy_aggcosts;
2078  }
2079 
2080  /*
2081  * The transCost.per_tuple component of aggcosts should be charged once
2082  * per input tuple, corresponding to the costs of evaluating the aggregate
2083  * transfns and their input expressions (with any startup cost of course
2084  * charged but once). The finalCost component is charged once per output
2085  * tuple, corresponding to the costs of evaluating the finalfns.
2086  *
2087  * If we are grouping, we charge an additional cpu_operator_cost per
2088  * grouping column per input tuple for grouping comparisons.
2089  *
2090  * We will produce a single output tuple if not grouping, and a tuple per
2091  * group otherwise. We charge cpu_tuple_cost for each output tuple.
2092  *
2093  * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2094  * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2095  * input path is already sorted appropriately, AGG_SORTED should be
2096  * preferred (since it has no risk of memory overflow). This will happen
2097  * as long as the computed total costs are indeed exactly equal --- but if
2098  * there's roundoff error we might do the wrong thing. So be sure that
2099  * the computations below form the same intermediate values in the same
2100  * order.
2101  */
2102  if (aggstrategy == AGG_PLAIN)
2103  {
2104  startup_cost = input_total_cost;
2105  startup_cost += aggcosts->transCost.startup;
2106  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2107  startup_cost += aggcosts->finalCost;
2108  /* we aren't grouping */
2109  total_cost = startup_cost + cpu_tuple_cost;
2110  output_tuples = 1;
2111  }
2112  else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2113  {
2114  /* Here we are able to deliver output on-the-fly */
2115  startup_cost = input_startup_cost;
2116  total_cost = input_total_cost;
2117  if (aggstrategy == AGG_MIXED && !enable_hashagg)
2118  {
2119  startup_cost += disable_cost;
2120  total_cost += disable_cost;
2121  }
2122  /* calcs phrased this way to match HASHED case, see note above */
2123  total_cost += aggcosts->transCost.startup;
2124  total_cost += aggcosts->transCost.per_tuple * input_tuples;
2125  total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2126  total_cost += aggcosts->finalCost * numGroups;
2127  total_cost += cpu_tuple_cost * numGroups;
2128  output_tuples = numGroups;
2129  }
2130  else
2131  {
2132  /* must be AGG_HASHED */
2133  startup_cost = input_total_cost;
2134  if (!enable_hashagg)
2135  startup_cost += disable_cost;
2136  startup_cost += aggcosts->transCost.startup;
2137  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2138  startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2139  total_cost = startup_cost;
2140  total_cost += aggcosts->finalCost * numGroups;
2141  total_cost += cpu_tuple_cost * numGroups;
2142  output_tuples = numGroups;
2143  }
2144 
2145  /*
2146  * If there are quals (HAVING quals), account for their cost and
2147  * selectivity.
2148  */
2149  if (quals)
2150  {
2151  QualCost qual_cost;
2152 
2153  cost_qual_eval(&qual_cost, quals, root);
2154  startup_cost += qual_cost.startup;
2155  total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2156 
2157  output_tuples = clamp_row_est(output_tuples *
2159  quals,
2160  0,
2161  JOIN_INNER,
2162  NULL));
2163  }
2164 
2165  path->rows = output_tuples;
2166  path->startup_cost = startup_cost;
2167  path->total_cost = total_cost;
2168 }
#define MemSet(start, val, len)
Definition: c.h:908
QualCost transCost
Definition: relation.h:63
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
double cpu_operator_cost
Definition: costsize.c:115
Cost finalCost
Definition: relation.h:64
Cost total_cost
Definition: relation.h:1090
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
double cpu_tuple_cost
Definition: costsize.c:113
bool enable_hashagg
Definition: costsize.c:131
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:99
double clamp_row_est(double nrows)
Definition: costsize.c:188
double Cost
Definition: nodes.h:648

◆ cost_append()

void cost_append ( AppendPath apath)

Definition at line 1843 of file costsize.c.

References APPEND_CPU_COST_MULTIPLIER, append_nonpartial_cost(), clamp_row_est(), cpu_tuple_cost, AppendPath::first_partial_path, get_parallel_divisor(), i, lfirst, linitial, Min, NIL, Path::parallel_aware, Path::parallel_workers, AppendPath::path, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by create_append_path().

1844 {
1845  ListCell *l;
1846 
1847  apath->path.startup_cost = 0;
1848  apath->path.total_cost = 0;
1849 
1850  if (apath->subpaths == NIL)
1851  return;
1852 
1853  if (!apath->path.parallel_aware)
1854  {
1855  Path *subpath = (Path *) linitial(apath->subpaths);
1856 
1857  /*
1858  * Startup cost of non-parallel-aware Append is the startup cost of
1859  * first subpath.
1860  */
1861  apath->path.startup_cost = subpath->startup_cost;
1862 
1863  /* Compute rows and costs as sums of subplan rows and costs. */
1864  foreach(l, apath->subpaths)
1865  {
1866  Path *subpath = (Path *) lfirst(l);
1867 
1868  apath->path.rows += subpath->rows;
1869  apath->path.total_cost += subpath->total_cost;
1870  }
1871  }
1872  else /* parallel-aware */
1873  {
1874  int i = 0;
1875  double parallel_divisor = get_parallel_divisor(&apath->path);
1876 
1877  /* Calculate startup cost. */
1878  foreach(l, apath->subpaths)
1879  {
1880  Path *subpath = (Path *) lfirst(l);
1881 
1882  /*
1883  * Append will start returning tuples when the child node having
1884  * lowest startup cost is done setting up. We consider only the
1885  * first few subplans that immediately get a worker assigned.
1886  */
1887  if (i == 0)
1888  apath->path.startup_cost = subpath->startup_cost;
1889  else if (i < apath->path.parallel_workers)
1890  apath->path.startup_cost = Min(apath->path.startup_cost,
1891  subpath->startup_cost);
1892 
1893  /*
1894  * Apply parallel divisor to subpaths. Scale the number of rows
1895  * for each partial subpath based on the ratio of the parallel
1896  * divisor originally used for the subpath to the one we adopted.
1897  * Also add the cost of partial paths to the total cost, but
1898  * ignore non-partial paths for now.
1899  */
1900  if (i < apath->first_partial_path)
1901  apath->path.rows += subpath->rows / parallel_divisor;
1902  else
1903  {
1904  double subpath_parallel_divisor;
1905 
1906  subpath_parallel_divisor = get_parallel_divisor(subpath);
1907  apath->path.rows += subpath->rows * (subpath_parallel_divisor /
1908  parallel_divisor);
1909  apath->path.total_cost += subpath->total_cost;
1910  }
1911 
1912  apath->path.rows = clamp_row_est(apath->path.rows);
1913 
1914  i++;
1915  }
1916 
1917  /* Add cost for non-partial subpaths. */
1918  apath->path.total_cost +=
1920  apath->first_partial_path,
1921  apath->path.parallel_workers);
1922  }
1923 
1924  /*
1925  * Although Append does not do any selection or projection, it's not free;
1926  * add a small per-tuple overhead.
1927  */
1928  apath->path.total_cost +=
1930 }
#define NIL
Definition: pg_list.h:69
#define Min(x, y)
Definition: c.h:857
int parallel_workers
Definition: relation.h:1085
static Cost append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
Definition: costsize.c:1767
Path path
Definition: relation.h:1304
int first_partial_path
Definition: relation.h:1310
List * subpaths
Definition: relation.h:1307
#define linitial(l)
Definition: pg_list.h:111
Cost startup_cost
Definition: relation.h:1089
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
#define APPEND_CPU_COST_MULTIPLIER
Definition: costsize.c:108
Cost total_cost
Definition: relation.h:1090
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1088
double cpu_tuple_cost
Definition: costsize.c:113
int i
bool parallel_aware
Definition: relation.h:1083
double clamp_row_est(double nrows)
Definition: costsize.c:188
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234

◆ cost_bitmap_and_node()

void cost_bitmap_and_node ( BitmapAndPath path,
PlannerInfo root 
)

Definition at line 1089 of file costsize.c.

References BitmapAndPath::bitmapquals, BitmapAndPath::bitmapselectivity, cost_bitmap_tree_node(), cpu_operator_cost, lfirst, list_head(), BitmapAndPath::path, Path::rows, Path::startup_cost, subpath(), and Path::total_cost.

Referenced by bitmap_and_cost_est(), and create_bitmap_and_path().

1090 {
1091  Cost totalCost;
1092  Selectivity selec;
1093  ListCell *l;
1094 
1095  /*
1096  * We estimate AND selectivity on the assumption that the inputs are
1097  * independent. This is probably often wrong, but we don't have the info
1098  * to do better.
1099  *
1100  * The runtime cost of the BitmapAnd itself is estimated at 100x
1101  * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1102  * definitely too simplistic?
1103  */
1104  totalCost = 0.0;
1105  selec = 1.0;
1106  foreach(l, path->bitmapquals)
1107  {
1108  Path *subpath = (Path *) lfirst(l);
1109  Cost subCost;
1110  Selectivity subselec;
1111 
1112  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1113 
1114  selec *= subselec;
1115 
1116  totalCost += subCost;
1117  if (l != list_head(path->bitmapquals))
1118  totalCost += 100.0 * cpu_operator_cost;
1119  }
1120  path->bitmapselectivity = selec;
1121  path->path.rows = 0; /* per above, not used */
1122  path->path.startup_cost = totalCost;
1123  path->path.total_cost = totalCost;
1124 }
double Selectivity
Definition: nodes.h:647
Selectivity bitmapselectivity
Definition: relation.h:1199
List * bitmapquals
Definition: relation.h:1198
Cost startup_cost
Definition: relation.h:1089
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
double cpu_operator_cost
Definition: costsize.c:115
Cost total_cost
Definition: relation.h:1090
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1088
double Cost
Definition: nodes.h:648
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234
void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
Definition: costsize.c:1046

◆ cost_bitmap_heap_scan()

void cost_bitmap_heap_scan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info,
Path bitmapqual,
double  loop_count 
)

Definition at line 945 of file costsize.c.

References Assert, clamp_row_est(), compute_bitmap_pages(), PathTarget::cost, cpu_tuple_cost, disable_cost, enable_bitmapscan, get_parallel_divisor(), get_restriction_qual_cost(), get_tablespace_page_costs(), IsA, RelOptInfo::pages, Path::parallel_workers, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::reltablespace, RelOptInfo::rows, Path::rows, RTE_RELATION, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, T, and Path::total_cost.

Referenced by bitmap_and_cost_est(), bitmap_scan_cost_est(), and create_bitmap_heap_path().

948 {
949  Cost startup_cost = 0;
950  Cost run_cost = 0;
951  Cost indexTotalCost;
952  QualCost qpqual_cost;
953  Cost cpu_per_tuple;
954  Cost cost_per_page;
955  Cost cpu_run_cost;
956  double tuples_fetched;
957  double pages_fetched;
958  double spc_seq_page_cost,
959  spc_random_page_cost;
960  double T;
961 
962  /* Should only be applied to base relations */
963  Assert(IsA(baserel, RelOptInfo));
964  Assert(baserel->relid > 0);
965  Assert(baserel->rtekind == RTE_RELATION);
966 
967  /* Mark the path with the correct row estimate */
968  if (param_info)
969  path->rows = param_info->ppi_rows;
970  else
971  path->rows = baserel->rows;
972 
973  if (!enable_bitmapscan)
974  startup_cost += disable_cost;
975 
976  pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
977  loop_count, &indexTotalCost,
978  &tuples_fetched);
979 
980  startup_cost += indexTotalCost;
981  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
982 
983  /* Fetch estimated page costs for tablespace containing table. */
985  &spc_random_page_cost,
986  &spc_seq_page_cost);
987 
988  /*
989  * For small numbers of pages we should charge spc_random_page_cost
990  * apiece, while if nearly all the table's pages are being read, it's more
991  * appropriate to charge spc_seq_page_cost apiece. The effect is
992  * nonlinear, too. For lack of a better idea, interpolate like this to
993  * determine the cost per page.
994  */
995  if (pages_fetched >= 2.0)
996  cost_per_page = spc_random_page_cost -
997  (spc_random_page_cost - spc_seq_page_cost)
998  * sqrt(pages_fetched / T);
999  else
1000  cost_per_page = spc_random_page_cost;
1001 
1002  run_cost += pages_fetched * cost_per_page;
1003 
1004  /*
1005  * Estimate CPU costs per tuple.
1006  *
1007  * Often the indexquals don't need to be rechecked at each tuple ... but
1008  * not always, especially not if there are enough tuples involved that the
1009  * bitmaps become lossy. For the moment, just assume they will be
1010  * rechecked always. This means we charge the full freight for all the
1011  * scan clauses.
1012  */
1013  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1014 
1015  startup_cost += qpqual_cost.startup;
1016  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1017  cpu_run_cost = cpu_per_tuple * tuples_fetched;
1018 
1019  /* Adjust costing for parallelism, if used. */
1020  if (path->parallel_workers > 0)
1021  {
1022  double parallel_divisor = get_parallel_divisor(path);
1023 
1024  /* The CPU cost is divided among all the workers. */
1025  cpu_run_cost /= parallel_divisor;
1026 
1027  path->rows = clamp_row_est(path->rows / parallel_divisor);
1028  }
1029 
1030 
1031  run_cost += cpu_run_cost;
1032 
1033  /* tlist eval costs are paid per output row, not per tuple scanned */
1034  startup_cost += path->pathtarget->cost.startup;
1035  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1036 
1037  path->startup_cost = startup_cost;
1038  path->total_cost = startup_cost + run_cost;
1039 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
PathTarget * pathtarget
Definition: relation.h:1079
Oid reltablespace
Definition: relation.h:641
int parallel_workers
Definition: relation.h:1085
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
static const uint32 T[65]
Definition: md5.c:101
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:640
bool enable_bitmapscan
Definition: costsize.c:128
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
BlockNumber pages
Definition: relation.h:651
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double clamp_row_est(double nrows)
Definition: costsize.c:188
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, int loop_count, Cost *cost, double *tuple)
Definition: costsize.c:5383
double Cost
Definition: nodes.h:648

◆ cost_bitmap_or_node()

void cost_bitmap_or_node ( BitmapOrPath path,
PlannerInfo root 
)

Definition at line 1133 of file costsize.c.

References BitmapOrPath::bitmapquals, BitmapOrPath::bitmapselectivity, cost_bitmap_tree_node(), cpu_operator_cost, IsA, lfirst, list_head(), Min, BitmapOrPath::path, Path::rows, Path::startup_cost, subpath(), and Path::total_cost.

Referenced by create_bitmap_or_path().

1134 {
1135  Cost totalCost;
1136  Selectivity selec;
1137  ListCell *l;
1138 
1139  /*
1140  * We estimate OR selectivity on the assumption that the inputs are
1141  * non-overlapping, since that's often the case in "x IN (list)" type
1142  * situations. Of course, we clamp to 1.0 at the end.
1143  *
1144  * The runtime cost of the BitmapOr itself is estimated at 100x
1145  * cpu_operator_cost for each tbm_union needed. Probably too small,
1146  * definitely too simplistic? We are aware that the tbm_unions are
1147  * optimized out when the inputs are BitmapIndexScans.
1148  */
1149  totalCost = 0.0;
1150  selec = 0.0;
1151  foreach(l, path->bitmapquals)
1152  {
1153  Path *subpath = (Path *) lfirst(l);
1154  Cost subCost;
1155  Selectivity subselec;
1156 
1157  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1158 
1159  selec += subselec;
1160 
1161  totalCost += subCost;
1162  if (l != list_head(path->bitmapquals) &&
1163  !IsA(subpath, IndexPath))
1164  totalCost += 100.0 * cpu_operator_cost;
1165  }
1166  path->bitmapselectivity = Min(selec, 1.0);
1167  path->path.rows = 0; /* per above, not used */
1168  path->path.startup_cost = totalCost;
1169  path->path.total_cost = totalCost;
1170 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
#define Min(x, y)
Definition: c.h:857
double Selectivity
Definition: nodes.h:647
List * bitmapquals
Definition: relation.h:1211
Cost startup_cost
Definition: relation.h:1089
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
double cpu_operator_cost
Definition: costsize.c:115
Selectivity bitmapselectivity
Definition: relation.h:1212
Cost total_cost
Definition: relation.h:1090
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1088
double Cost
Definition: nodes.h:648
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234
void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
Definition: costsize.c:1046

◆ cost_bitmap_tree_node()

void cost_bitmap_tree_node ( Path path,
Cost cost,
Selectivity selec 
)

Definition at line 1046 of file costsize.c.

References cpu_operator_cost, elog, ERROR, IsA, nodeTag, Path::rows, and Path::total_cost.

Referenced by choose_bitmap_and(), compute_bitmap_pages(), cost_bitmap_and_node(), cost_bitmap_or_node(), and path_usage_comparator().

1047 {
1048  if (IsA(path, IndexPath))
1049  {
1050  *cost = ((IndexPath *) path)->indextotalcost;
1051  *selec = ((IndexPath *) path)->indexselectivity;
1052 
1053  /*
1054  * Charge a small amount per retrieved tuple to reflect the costs of
1055  * manipulating the bitmap. This is mostly to make sure that a bitmap
1056  * scan doesn't look to be the same cost as an indexscan to retrieve a
1057  * single tuple.
1058  */
1059  *cost += 0.1 * cpu_operator_cost * path->rows;
1060  }
1061  else if (IsA(path, BitmapAndPath))
1062  {
1063  *cost = path->total_cost;
1064  *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1065  }
1066  else if (IsA(path, BitmapOrPath))
1067  {
1068  *cost = path->total_cost;
1069  *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1070  }
1071  else
1072  {
1073  elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1074  *cost = *selec = 0; /* keep compiler quiet */
1075  }
1076 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
#define ERROR
Definition: elog.h:43
double cpu_operator_cost
Definition: costsize.c:115
Cost total_cost
Definition: relation.h:1090
double rows
Definition: relation.h:1088
#define nodeTag(nodeptr)
Definition: nodes.h:522
#define elog
Definition: elog.h:219

◆ cost_ctescan()

void cost_ctescan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1500 of file costsize.c.

References Assert, PathTarget::cost, cpu_tuple_cost, get_restriction_qual_cost(), Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_CTE, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_ctescan_path(), and create_worktablescan_path().

1502 {
1503  Cost startup_cost = 0;
1504  Cost run_cost = 0;
1505  QualCost qpqual_cost;
1506  Cost cpu_per_tuple;
1507 
1508  /* Should only be applied to base relations that are CTEs */
1509  Assert(baserel->relid > 0);
1510  Assert(baserel->rtekind == RTE_CTE);
1511 
1512  /* Mark the path with the correct row estimate */
1513  if (param_info)
1514  path->rows = param_info->ppi_rows;
1515  else
1516  path->rows = baserel->rows;
1517 
1518  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1519  cpu_per_tuple = cpu_tuple_cost;
1520 
1521  /* Add scanning CPU costs */
1522  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1523 
1524  startup_cost += qpqual_cost.startup;
1525  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1526  run_cost += cpu_per_tuple * baserel->tuples;
1527 
1528  /* tlist eval costs are paid per output row, not per tuple scanned */
1529  startup_cost += path->pathtarget->cost.startup;
1530  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1531 
1532  path->startup_cost = startup_cost;
1533  path->total_cost = startup_cost + run_cost;
1534 }
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double Cost
Definition: nodes.h:648

◆ cost_functionscan()

void cost_functionscan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1333 of file costsize.c.

References Assert, PathTarget::cost, cost_qual_eval_node(), cpu_tuple_cost, RangeTblEntry::functions, get_restriction_qual_cost(), Path::pathtarget, QualCost::per_tuple, planner_rt_fetch, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_FUNCTION, RangeTblEntry::rtekind, QualCost::startup, Path::startup_cost, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_functionscan_path().

1335 {
1336  Cost startup_cost = 0;
1337  Cost run_cost = 0;
1338  QualCost qpqual_cost;
1339  Cost cpu_per_tuple;
1340  RangeTblEntry *rte;
1341  QualCost exprcost;
1342 
1343  /* Should only be applied to base relations that are functions */
1344  Assert(baserel->relid > 0);
1345  rte = planner_rt_fetch(baserel->relid, root);
1346  Assert(rte->rtekind == RTE_FUNCTION);
1347 
1348  /* Mark the path with the correct row estimate */
1349  if (param_info)
1350  path->rows = param_info->ppi_rows;
1351  else
1352  path->rows = baserel->rows;
1353 
1354  /*
1355  * Estimate costs of executing the function expression(s).
1356  *
1357  * Currently, nodeFunctionscan.c always executes the functions to
1358  * completion before returning any rows, and caches the results in a
1359  * tuplestore. So the function eval cost is all startup cost, and per-row
1360  * costs are minimal.
1361  *
1362  * XXX in principle we ought to charge tuplestore spill costs if the
1363  * number of rows is large. However, given how phony our rowcount
1364  * estimates for functions tend to be, there's not a lot of point in that
1365  * refinement right now.
1366  */
1367  cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1368 
1369  startup_cost += exprcost.startup + exprcost.per_tuple;
1370 
1371  /* Add scanning CPU costs */
1372  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1373 
1374  startup_cost += qpqual_cost.startup;
1375  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1376  run_cost += cpu_per_tuple * baserel->tuples;
1377 
1378  /* tlist eval costs are paid per output row, not per tuple scanned */
1379  startup_cost += path->pathtarget->cost.startup;
1380  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1381 
1382  path->startup_cost = startup_cost;
1383  path->total_cost = startup_cost + run_cost;
1384 }
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3742
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Definition: nodes.h:517
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
#define planner_rt_fetch(rti, root)
Definition: relation.h:344
Cost startup_cost
Definition: relation.h:1089
Index relid
Definition: relation.h:640
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
List * functions
Definition: parsenodes.h:1016
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
RTEKind rtekind
Definition: parsenodes.h:962
double Cost
Definition: nodes.h:648

◆ cost_gather()

void cost_gather ( GatherPath path,
PlannerInfo root,
RelOptInfo rel,
ParamPathInfo param_info,
double *  rows 
)

Definition at line 364 of file costsize.c.

References parallel_setup_cost, parallel_tuple_cost, GatherPath::path, ParamPathInfo::ppi_rows, RelOptInfo::rows, Path::rows, Path::startup_cost, GatherPath::subpath, and Path::total_cost.

Referenced by create_gather_path().

367 {
368  Cost startup_cost = 0;
369  Cost run_cost = 0;
370 
371  /* Mark the path with the correct row estimate */
372  if (rows)
373  path->path.rows = *rows;
374  else if (param_info)
375  path->path.rows = param_info->ppi_rows;
376  else
377  path->path.rows = rel->rows;
378 
379  startup_cost = path->subpath->startup_cost;
380 
381  run_cost = path->subpath->total_cost - path->subpath->startup_cost;
382 
383  /* Parallel setup and communication cost. */
384  startup_cost += parallel_setup_cost;
385  run_cost += parallel_tuple_cost * path->path.rows;
386 
387  path->path.startup_cost = startup_cost;
388  path->path.total_cost = (startup_cost + run_cost);
389 }
double parallel_setup_cost
Definition: costsize.c:117
Cost startup_cost
Definition: relation.h:1089
Path * subpath
Definition: relation.h:1395
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
double rows
Definition: relation.h:1088
double ppi_rows
Definition: relation.h:1038
Path path
Definition: relation.h:1394
double Cost
Definition: nodes.h:648
double parallel_tuple_cost
Definition: costsize.c:116

◆ cost_gather_merge()

void cost_gather_merge ( GatherMergePath path,
PlannerInfo root,
RelOptInfo rel,
ParamPathInfo param_info,
Cost  input_startup_cost,
Cost  input_total_cost,
double *  rows 
)

Definition at line 402 of file costsize.c.

References Assert, cpu_operator_cost, disable_cost, enable_gathermerge, LOG2, GatherMergePath::num_workers, parallel_setup_cost, parallel_tuple_cost, GatherMergePath::path, ParamPathInfo::ppi_rows, RelOptInfo::rows, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by create_gather_merge_path().

406 {
407  Cost startup_cost = 0;
408  Cost run_cost = 0;
409  Cost comparison_cost;
410  double N;
411  double logN;
412 
413  /* Mark the path with the correct row estimate */
414  if (rows)
415  path->path.rows = *rows;
416  else if (param_info)
417  path->path.rows = param_info->ppi_rows;
418  else
419  path->path.rows = rel->rows;
420 
421  if (!enable_gathermerge)
422  startup_cost += disable_cost;
423 
424  /*
425  * Add one to the number of workers to account for the leader. This might
426  * be overgenerous since the leader will do less work than other workers
427  * in typical cases, but we'll go with it for now.
428  */
429  Assert(path->num_workers > 0);
430  N = (double) path->num_workers + 1;
431  logN = LOG2(N);
432 
433  /* Assumed cost per tuple comparison */
434  comparison_cost = 2.0 * cpu_operator_cost;
435 
436  /* Heap creation cost */
437  startup_cost += comparison_cost * N * logN;
438 
439  /* Per-tuple heap maintenance cost */
440  run_cost += path->path.rows * comparison_cost * logN;
441 
442  /* small cost for heap management, like cost_merge_append */
443  run_cost += cpu_operator_cost * path->path.rows;
444 
445  /*
446  * Parallel setup and communication cost. Since Gather Merge, unlike
447  * Gather, requires us to block until a tuple is available from every
448  * worker, we bump the IPC cost up a little bit as compared with Gather.
449  * For lack of a better idea, charge an extra 5%.
450  */
451  startup_cost += parallel_setup_cost;
452  run_cost += parallel_tuple_cost * path->path.rows * 1.05;
453 
454  path->path.startup_cost = startup_cost + input_startup_cost;
455  path->path.total_cost = (startup_cost + run_cost + input_total_cost);
456 }
double parallel_setup_cost
Definition: costsize.c:117
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
double cpu_operator_cost
Definition: costsize.c:115
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
#define LOG2(x)
Definition: costsize.c:101
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
double ppi_rows
Definition: relation.h:1038
bool enable_gathermerge
Definition: costsize.c:136
double Cost
Definition: nodes.h:648
double parallel_tuple_cost
Definition: costsize.c:116

◆ cost_group()

void cost_group ( Path path,
PlannerInfo root,
int  numGroupCols,
double  numGroups,
List quals,
Cost  input_startup_cost,
Cost  input_total_cost,
double  input_tuples 
)

Definition at line 2248 of file costsize.c.

References clamp_row_est(), clauselist_selectivity(), cost_qual_eval(), cpu_operator_cost, JOIN_INNER, QualCost::per_tuple, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by choose_hashed_setop(), and create_group_path().

2253 {
2254  double output_tuples;
2255  Cost startup_cost;
2256  Cost total_cost;
2257 
2258  output_tuples = numGroups;
2259  startup_cost = input_startup_cost;
2260  total_cost = input_total_cost;
2261 
2262  /*
2263  * Charge one cpu_operator_cost per comparison per input tuple. We assume
2264  * all columns get compared at most of the tuples.
2265  */
2266  total_cost += cpu_operator_cost * input_tuples * numGroupCols;
2267 
2268  /*
2269  * If there are quals (HAVING quals), account for their cost and
2270  * selectivity.
2271  */
2272  if (quals)
2273  {
2274  QualCost qual_cost;
2275 
2276  cost_qual_eval(&qual_cost, quals, root);
2277  startup_cost += qual_cost.startup;
2278  total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2279 
2280  output_tuples = clamp_row_est(output_tuples *
2282  quals,
2283  0,
2284  JOIN_INNER,
2285  NULL));
2286  }
2287 
2288  path->rows = output_tuples;
2289  path->startup_cost = startup_cost;
2290  path->total_cost = total_cost;
2291 }
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
double cpu_operator_cost
Definition: costsize.c:115
Cost total_cost
Definition: relation.h:1090
double rows
Definition: relation.h:1088
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:99
double clamp_row_est(double nrows)
Definition: costsize.c:188
double Cost
Definition: nodes.h:648

◆ cost_index()

void cost_index ( IndexPath path,
PlannerInfo root,
double  loop_count,
bool  partial_path 
)

Definition at line 477 of file costsize.c.

References RelOptInfo::allvisfrac, IndexOptInfo::amcostestimate, Assert, clamp_row_est(), compute_parallel_worker(), PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, disable_cost, enable_indexscan, extract_nonindex_conditions(), get_parallel_divisor(), get_tablespace_page_costs(), index_pages_fetched(), IndexPath::indexinfo, IndexPath::indexquals, IndexPath::indexselectivity, IndexPath::indextotalcost, IndexOptInfo::indrestrictinfo, IsA, list_concat(), max_parallel_workers_per_gather, RelOptInfo::pages, IndexOptInfo::pages, Path::parallel_aware, Path::parallel_workers, Path::param_info, IndexPath::path, Path::pathtarget, Path::pathtype, QualCost::per_tuple, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_rows, IndexOptInfo::rel, RelOptInfo::relid, RelOptInfo::reltablespace, RelOptInfo::rows, Path::rows, RTE_RELATION, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, T_IndexOnlyScan, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_index_path(), and reparameterize_path().

479 {
480  IndexOptInfo *index = path->indexinfo;
481  RelOptInfo *baserel = index->rel;
482  bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
483  amcostestimate_function amcostestimate;
484  List *qpquals;
485  Cost startup_cost = 0;
486  Cost run_cost = 0;
487  Cost cpu_run_cost = 0;
488  Cost indexStartupCost;
489  Cost indexTotalCost;
490  Selectivity indexSelectivity;
491  double indexCorrelation,
492  csquared;
493  double spc_seq_page_cost,
494  spc_random_page_cost;
495  Cost min_IO_cost,
496  max_IO_cost;
497  QualCost qpqual_cost;
498  Cost cpu_per_tuple;
499  double tuples_fetched;
500  double pages_fetched;
501  double rand_heap_pages;
502  double index_pages;
503 
504  /* Should only be applied to base relations */
505  Assert(IsA(baserel, RelOptInfo) &&
506  IsA(index, IndexOptInfo));
507  Assert(baserel->relid > 0);
508  Assert(baserel->rtekind == RTE_RELATION);
509 
510  /*
511  * Mark the path with the correct row estimate, and identify which quals
512  * will need to be enforced as qpquals. We need not check any quals that
513  * are implied by the index's predicate, so we can use indrestrictinfo not
514  * baserestrictinfo as the list of relevant restriction clauses for the
515  * rel.
516  */
517  if (path->path.param_info)
518  {
519  path->path.rows = path->path.param_info->ppi_rows;
520  /* qpquals come from the rel's restriction clauses and ppi_clauses */
521  qpquals = list_concat(
523  path->indexquals),
525  path->indexquals));
526  }
527  else
528  {
529  path->path.rows = baserel->rows;
530  /* qpquals come from just the rel's restriction clauses */
532  path->indexquals);
533  }
534 
535  if (!enable_indexscan)
536  startup_cost += disable_cost;
537  /* we don't need to check enable_indexonlyscan; indxpath.c does that */
538 
539  /*
540  * Call index-access-method-specific code to estimate the processing cost
541  * for scanning the index, as well as the selectivity of the index (ie,
542  * the fraction of main-table tuples we will have to retrieve) and its
543  * correlation to the main-table tuple order. We need a cast here because
544  * relation.h uses a weak function type to avoid including amapi.h.
545  */
546  amcostestimate = (amcostestimate_function) index->amcostestimate;
547  amcostestimate(root, path, loop_count,
548  &indexStartupCost, &indexTotalCost,
549  &indexSelectivity, &indexCorrelation,
550  &index_pages);
551 
552  /*
553  * Save amcostestimate's results for possible use in bitmap scan planning.
554  * We don't bother to save indexStartupCost or indexCorrelation, because a
555  * bitmap scan doesn't care about either.
556  */
557  path->indextotalcost = indexTotalCost;
558  path->indexselectivity = indexSelectivity;
559 
560  /* all costs for touching index itself included here */
561  startup_cost += indexStartupCost;
562  run_cost += indexTotalCost - indexStartupCost;
563 
564  /* estimate number of main-table tuples fetched */
565  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
566 
567  /* fetch estimated page costs for tablespace containing table */
569  &spc_random_page_cost,
570  &spc_seq_page_cost);
571 
572  /*----------
573  * Estimate number of main-table pages fetched, and compute I/O cost.
574  *
575  * When the index ordering is uncorrelated with the table ordering,
576  * we use an approximation proposed by Mackert and Lohman (see
577  * index_pages_fetched() for details) to compute the number of pages
578  * fetched, and then charge spc_random_page_cost per page fetched.
579  *
580  * When the index ordering is exactly correlated with the table ordering
581  * (just after a CLUSTER, for example), the number of pages fetched should
582  * be exactly selectivity * table_size. What's more, all but the first
583  * will be sequential fetches, not the random fetches that occur in the
584  * uncorrelated case. So if the number of pages is more than 1, we
585  * ought to charge
586  * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
587  * For partially-correlated indexes, we ought to charge somewhere between
588  * these two estimates. We currently interpolate linearly between the
589  * estimates based on the correlation squared (XXX is that appropriate?).
590  *
591  * If it's an index-only scan, then we will not need to fetch any heap
592  * pages for which the visibility map shows all tuples are visible.
593  * Hence, reduce the estimated number of heap fetches accordingly.
594  * We use the measured fraction of the entire heap that is all-visible,
595  * which might not be particularly relevant to the subset of the heap
596  * that this query will fetch; but it's not clear how to do better.
597  *----------
598  */
599  if (loop_count > 1)
600  {
601  /*
602  * For repeated indexscans, the appropriate estimate for the
603  * uncorrelated case is to scale up the number of tuples fetched in
604  * the Mackert and Lohman formula by the number of scans, so that we
605  * estimate the number of pages fetched by all the scans; then
606  * pro-rate the costs for one scan. In this case we assume all the
607  * fetches are random accesses.
608  */
609  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
610  baserel->pages,
611  (double) index->pages,
612  root);
613 
614  if (indexonly)
615  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
616 
617  rand_heap_pages = pages_fetched;
618 
619  max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
620 
621  /*
622  * In the perfectly correlated case, the number of pages touched by
623  * each scan is selectivity * table_size, and we can use the Mackert
624  * and Lohman formula at the page level to estimate how much work is
625  * saved by caching across scans. We still assume all the fetches are
626  * random, though, which is an overestimate that's hard to correct for
627  * without double-counting the cache effects. (But in most cases
628  * where such a plan is actually interesting, only one page would get
629  * fetched per scan anyway, so it shouldn't matter much.)
630  */
631  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
632 
633  pages_fetched = index_pages_fetched(pages_fetched * loop_count,
634  baserel->pages,
635  (double) index->pages,
636  root);
637 
638  if (indexonly)
639  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
640 
641  min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
642  }
643  else
644  {
645  /*
646  * Normal case: apply the Mackert and Lohman formula, and then
647  * interpolate between that and the correlation-derived result.
648  */
649  pages_fetched = index_pages_fetched(tuples_fetched,
650  baserel->pages,
651  (double) index->pages,
652  root);
653 
654  if (indexonly)
655  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
656 
657  rand_heap_pages = pages_fetched;
658 
659  /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
660  max_IO_cost = pages_fetched * spc_random_page_cost;
661 
662  /* min_IO_cost is for the perfectly correlated case (csquared=1) */
663  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
664 
665  if (indexonly)
666  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
667 
668  if (pages_fetched > 0)
669  {
670  min_IO_cost = spc_random_page_cost;
671  if (pages_fetched > 1)
672  min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
673  }
674  else
675  min_IO_cost = 0;
676  }
677 
678  if (partial_path)
679  {
680  /*
681  * For index only scans compute workers based on number of index pages
682  * fetched; the number of heap pages we fetch might be so small as to
683  * effectively rule out parallelism, which we don't want to do.
684  */
685  if (indexonly)
686  rand_heap_pages = -1;
687 
688  /*
689  * Estimate the number of parallel workers required to scan index. Use
690  * the number of heap pages computed considering heap fetches won't be
691  * sequential as for parallel scans the pages are accessed in random
692  * order.
693  */
695  rand_heap_pages,
696  index_pages,
698 
699  /*
700  * Fall out if workers can't be assigned for parallel scan, because in
701  * such a case this path will be rejected. So there is no benefit in
702  * doing extra computation.
703  */
704  if (path->path.parallel_workers <= 0)
705  return;
706 
707  path->path.parallel_aware = true;
708  }
709 
710  /*
711  * Now interpolate based on estimated index order correlation to get total
712  * disk I/O cost for main table accesses.
713  */
714  csquared = indexCorrelation * indexCorrelation;
715 
716  run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
717 
718  /*
719  * Estimate CPU costs per tuple.
720  *
721  * What we want here is cpu_tuple_cost plus the evaluation costs of any
722  * qual clauses that we have to evaluate as qpquals.
723  */
724  cost_qual_eval(&qpqual_cost, qpquals, root);
725 
726  startup_cost += qpqual_cost.startup;
727  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
728 
729  cpu_run_cost += cpu_per_tuple * tuples_fetched;
730 
731  /* tlist eval costs are paid per output row, not per tuple scanned */
732  startup_cost += path->path.pathtarget->cost.startup;
733  cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
734 
735  /* Adjust costing for parallelism, if used. */
736  if (path->path.parallel_workers > 0)
737  {
738  double parallel_divisor = get_parallel_divisor(&path->path);
739 
740  path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
741 
742  /* The CPU cost is divided among all the workers. */
743  cpu_run_cost /= parallel_divisor;
744  }
745 
746  run_cost += cpu_run_cost;
747 
748  path->path.startup_cost = startup_cost;
749  path->path.total_cost = startup_cost + run_cost;
750 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
PathTarget * pathtarget
Definition: relation.h:1079
Path path
Definition: relation.h:1154
IndexOptInfo * indexinfo
Definition: relation.h:1155
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:3435
double tuples
Definition: relation.h:652
Oid reltablespace
Definition: relation.h:641
int parallel_workers
Definition: relation.h:1085
ParamPathInfo * param_info
Definition: relation.h:1081
List * list_concat(List *list1, List *list2)
Definition: list.c:321
static List * extract_nonindex_conditions(List *qual_clauses, List *indexquals)
Definition: costsize.c:770
double Selectivity
Definition: nodes.h:647
Cost startup
Definition: relation.h:46
double allvisfrac
Definition: relation.h:653
Definition: type.h:89
BlockNumber pages
Definition: relation.h:758
NodeTag pathtype
Definition: relation.h:1076
Cost per_tuple
Definition: relation.h:47
List * indexquals
Definition: relation.h:1157
RelOptInfo * rel
Definition: relation.h:755
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
Cost indextotalcost
Definition: relation.h:1162
Cost disable_cost
Definition: costsize.c:121
Selectivity indexselectivity
Definition: relation.h:1163
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
void(* amcostestimate)()
Definition: relation.h:802
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:640
List * indrestrictinfo
Definition: relation.h:782
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
BlockNumber pages
Definition: relation.h:651
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
List * ppi_clauses
Definition: relation.h:1039
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
void(* amcostestimate_function)(struct PlannerInfo *root, struct IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: amapi.h:93
bool parallel_aware
Definition: relation.h:1083
double clamp_row_est(double nrows)
Definition: costsize.c:188
int max_parallel_workers_per_gather
Definition: costsize.c:123
Definition: pg_list.h:45
bool enable_indexscan
Definition: costsize.c:126
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:830
double Cost
Definition: nodes.h:648

◆ cost_material()

void cost_material ( Path path,
Cost  input_startup_cost,
Cost  input_total_cost,
double  tuples,
int  width 
)

Definition at line 2006 of file costsize.c.

References cpu_operator_cost, relation_byte_size(), Path::rows, seq_page_cost, Path::startup_cost, Path::total_cost, and work_mem.

Referenced by create_material_path(), and materialize_finished_plan().

2009 {
2010  Cost startup_cost = input_startup_cost;
2011  Cost run_cost = input_total_cost - input_startup_cost;
2012  double nbytes = relation_byte_size(tuples, width);
2013  long work_mem_bytes = work_mem * 1024L;
2014 
2015  path->rows = tuples;
2016 
2017  /*
2018  * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2019  * reflect bookkeeping overhead. (This rate must be more than what
2020  * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2021  * if it is exactly the same then there will be a cost tie between
2022  * nestloop with A outer, materialized B inner and nestloop with B outer,
2023  * materialized A inner. The extra cost ensures we'll prefer
2024  * materializing the smaller rel.) Note that this is normally a good deal
2025  * less than cpu_tuple_cost; which is OK because a Material plan node
2026  * doesn't do qual-checking or projection, so it's got less overhead than
2027  * most plan nodes.
2028  */
2029  run_cost += 2 * cpu_operator_cost * tuples;
2030 
2031  /*
2032  * If we will spill to disk, charge at the rate of seq_page_cost per page.
2033  * This cost is assumed to be evenly spread through the plan run phase,
2034  * which isn't exactly accurate but our cost model doesn't allow for
2035  * nonuniform costs within the run phase.
2036  */
2037  if (nbytes > work_mem_bytes)
2038  {
2039  double npages = ceil(nbytes / BLCKSZ);
2040 
2041  run_cost += seq_page_cost * npages;
2042  }
2043 
2044  path->startup_cost = startup_cost;
2045  path->total_cost = startup_cost + run_cost;
2046 }
Cost startup_cost
Definition: relation.h:1089
double cpu_operator_cost
Definition: costsize.c:115
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5329
int work_mem
Definition: globals.c:122
Cost total_cost
Definition: relation.h:1090
double rows
Definition: relation.h:1088
double seq_page_cost
Definition: costsize.c:111
double Cost
Definition: nodes.h:648

◆ cost_merge_append()

void cost_merge_append ( Path path,
PlannerInfo root,
List pathkeys,
int  n_streams,
Cost  input_startup_cost,
Cost  input_total_cost,
double  tuples 
)

Definition at line 1957 of file costsize.c.

References APPEND_CPU_COST_MULTIPLIER, cpu_operator_cost, cpu_tuple_cost, LOG2, Path::startup_cost, and Path::total_cost.

Referenced by create_merge_append_path().

1961 {
1962  Cost startup_cost = 0;
1963  Cost run_cost = 0;
1964  Cost comparison_cost;
1965  double N;
1966  double logN;
1967 
1968  /*
1969  * Avoid log(0)...
1970  */
1971  N = (n_streams < 2) ? 2.0 : (double) n_streams;
1972  logN = LOG2(N);
1973 
1974  /* Assumed cost per tuple comparison */
1975  comparison_cost = 2.0 * cpu_operator_cost;
1976 
1977  /* Heap creation cost */
1978  startup_cost += comparison_cost * N * logN;
1979 
1980  /* Per-tuple heap maintenance cost */
1981  run_cost += tuples * comparison_cost * logN;
1982 
1983  /*
1984  * Although MergeAppend does not do any selection or projection, it's not
1985  * free; add a small per-tuple overhead.
1986  */
1987  run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
1988 
1989  path->startup_cost = startup_cost + input_startup_cost;
1990  path->total_cost = startup_cost + run_cost + input_total_cost;
1991 }
Cost startup_cost
Definition: relation.h:1089
double cpu_operator_cost
Definition: costsize.c:115
#define APPEND_CPU_COST_MULTIPLIER
Definition: costsize.c:108
Cost total_cost
Definition: relation.h:1090
#define LOG2(x)
Definition: costsize.c:101
double cpu_tuple_cost
Definition: costsize.c:113
double Cost
Definition: nodes.h:648

◆ cost_namedtuplestorescan()

void cost_namedtuplestorescan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1541 of file costsize.c.

References Assert, cpu_tuple_cost, get_restriction_qual_cost(), QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_NAMEDTUPLESTORE, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_namedtuplestorescan_path().

1543 {
1544  Cost startup_cost = 0;
1545  Cost run_cost = 0;
1546  QualCost qpqual_cost;
1547  Cost cpu_per_tuple;
1548 
1549  /* Should only be applied to base relations that are Tuplestores */
1550  Assert(baserel->relid > 0);
1551  Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1552 
1553  /* Mark the path with the correct row estimate */
1554  if (param_info)
1555  path->rows = param_info->ppi_rows;
1556  else
1557  path->rows = baserel->rows;
1558 
1559  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1560  cpu_per_tuple = cpu_tuple_cost;
1561 
1562  /* Add scanning CPU costs */
1563  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1564 
1565  startup_cost += qpqual_cost.startup;
1566  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1567  run_cost += cpu_per_tuple * baserel->tuples;
1568 
1569  path->startup_cost = startup_cost;
1570  path->total_cost = startup_cost + run_cost;
1571 }
double tuples
Definition: relation.h:652
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double Cost
Definition: nodes.h:648

◆ cost_qual_eval()

void cost_qual_eval ( QualCost cost,
List quals,
PlannerInfo root 
)

Definition at line 3716 of file costsize.c.

References cost_qual_eval_walker(), lfirst, QualCost::per_tuple, cost_qual_eval_context::root, QualCost::startup, and cost_qual_eval_context::total.

Referenced by cost_agg(), cost_group(), cost_index(), cost_subplan(), cost_tidscan(), create_minmaxagg_path(), create_result_path(), estimate_path_cost_size(), final_cost_hashjoin(), final_cost_mergejoin(), final_cost_nestloop(), get_restriction_qual_cost(), inline_function(), IsProjectionFunctionalIndex(), plan_cluster_use_sort(), postgresGetForeignJoinPaths(), postgresGetForeignRelSize(), set_baserel_size_estimates(), and set_foreign_size_estimates().

3717 {
3718  cost_qual_eval_context context;
3719  ListCell *l;
3720 
3721  context.root = root;
3722  context.total.startup = 0;
3723  context.total.per_tuple = 0;
3724 
3725  /* We don't charge any cost for the implicit ANDing at top level ... */
3726 
3727  foreach(l, quals)
3728  {
3729  Node *qual = (Node *) lfirst(l);
3730 
3731  cost_qual_eval_walker(qual, &context);
3732  }
3733 
3734  *cost = context.total;
3735 }
PlannerInfo * root
Definition: costsize.c:145
Definition: nodes.h:517
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition: costsize.c:3756
#define lfirst(lc)
Definition: pg_list.h:106

◆ cost_qual_eval_node()

void cost_qual_eval_node ( QualCost cost,
Node qual,
PlannerInfo root 
)

Definition at line 3742 of file costsize.c.

References cost_qual_eval_walker(), QualCost::per_tuple, cost_qual_eval_context::root, QualCost::startup, and cost_qual_eval_context::total.

Referenced by add_placeholders_to_joinrel(), cost_functionscan(), cost_qual_eval_walker(), cost_tablefuncscan(), cost_windowagg(), get_agg_clause_costs_walker(), make_sort_input_target(), order_qual_clauses(), orderby_operands_eval_cost(), other_operands_eval_cost(), set_pathtarget_cost_width(), and set_rel_width().

3743 {
3744  cost_qual_eval_context context;
3745 
3746  context.root = root;
3747  context.total.startup = 0;
3748  context.total.per_tuple = 0;
3749 
3750  cost_qual_eval_walker(qual, &context);
3751 
3752  *cost = context.total;
3753 }
PlannerInfo * root
Definition: costsize.c:145
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition: costsize.c:3756

◆ cost_qual_eval_walker()

static bool cost_qual_eval_walker ( Node node,
cost_qual_eval_context context 
)
static

Definition at line 3756 of file costsize.c.

References CoerceViaIO::arg, ArrayCoerceExpr::arg, ScalarArrayOpExpr::args, RestrictInfo::clause, cost_qual_eval_node(), cpu_operator_cost, disable_cost, ArrayCoerceExpr::elemexpr, elog, ERROR, estimate_array_length(), RestrictInfo::eval_cost, expression_tree_walker(), exprType(), get_func_cost(), get_opcode(), getTypeInputInfo(), getTypeOutputInfo(), IsA, lfirst_oid, linitial, lsecond, ScalarArrayOpExpr::opfuncid, RowCompareExpr::opnos, RestrictInfo::orclause, SubPlan::per_call_cost, QualCost::per_tuple, RestrictInfo::pseudoconstant, CoerceViaIO::resulttype, cost_qual_eval_context::root, set_opfuncid(), set_sa_opfuncid(), QualCost::startup, SubPlan::startup_cost, AlternativeSubPlan::subplans, and cost_qual_eval_context::total.

Referenced by cost_qual_eval(), and cost_qual_eval_node().

3757 {
3758  if (node == NULL)
3759  return false;
3760 
3761  /*
3762  * RestrictInfo nodes contain an eval_cost field reserved for this
3763  * routine's use, so that it's not necessary to evaluate the qual clause's
3764  * cost more than once. If the clause's cost hasn't been computed yet,
3765  * the field's startup value will contain -1.
3766  */
3767  if (IsA(node, RestrictInfo))
3768  {
3769  RestrictInfo *rinfo = (RestrictInfo *) node;
3770 
3771  if (rinfo->eval_cost.startup < 0)
3772  {
3773  cost_qual_eval_context locContext;
3774 
3775  locContext.root = context->root;
3776  locContext.total.startup = 0;
3777  locContext.total.per_tuple = 0;
3778 
3779  /*
3780  * For an OR clause, recurse into the marked-up tree so that we
3781  * set the eval_cost for contained RestrictInfos too.
3782  */
3783  if (rinfo->orclause)
3784  cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3785  else
3786  cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3787 
3788  /*
3789  * If the RestrictInfo is marked pseudoconstant, it will be tested
3790  * only once, so treat its cost as all startup cost.
3791  */
3792  if (rinfo->pseudoconstant)
3793  {
3794  /* count one execution during startup */
3795  locContext.total.startup += locContext.total.per_tuple;
3796  locContext.total.per_tuple = 0;
3797  }
3798  rinfo->eval_cost = locContext.total;
3799  }
3800  context->total.startup += rinfo->eval_cost.startup;
3801  context->total.per_tuple += rinfo->eval_cost.per_tuple;
3802  /* do NOT recurse into children */
3803  return false;
3804  }
3805 
3806  /*
3807  * For each operator or function node in the given tree, we charge the
3808  * estimated execution cost given by pg_proc.procost (remember to multiply
3809  * this by cpu_operator_cost).
3810  *
3811  * Vars and Consts are charged zero, and so are boolean operators (AND,
3812  * OR, NOT). Simplistic, but a lot better than no model at all.
3813  *
3814  * Should we try to account for the possibility of short-circuit
3815  * evaluation of AND/OR? Probably *not*, because that would make the
3816  * results depend on the clause ordering, and we are not in any position
3817  * to expect that the current ordering of the clauses is the one that's
3818  * going to end up being used. The above per-RestrictInfo caching would
3819  * not mix well with trying to re-order clauses anyway.
3820  *
3821  * Another issue that is entirely ignored here is that if a set-returning
3822  * function is below top level in the tree, the functions/operators above
3823  * it will need to be evaluated multiple times. In practical use, such
3824  * cases arise so seldom as to not be worth the added complexity needed;
3825  * moreover, since our rowcount estimates for functions tend to be pretty
3826  * phony, the results would also be pretty phony.
3827  */
3828  if (IsA(node, FuncExpr))
3829  {
3830  context->total.per_tuple +=
3831  get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3832  }
3833  else if (IsA(node, OpExpr) ||
3834  IsA(node, DistinctExpr) ||
3835  IsA(node, NullIfExpr))
3836  {
3837  /* rely on struct equivalence to treat these all alike */
3838  set_opfuncid((OpExpr *) node);
3839  context->total.per_tuple +=
3840  get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3841  }
3842  else if (IsA(node, ScalarArrayOpExpr))
3843  {
3844  /*
3845  * Estimate that the operator will be applied to about half of the
3846  * array elements before the answer is determined.
3847  */
3848  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3849  Node *arraynode = (Node *) lsecond(saop->args);
3850 
3851  set_sa_opfuncid(saop);
3852  context->total.per_tuple += get_func_cost(saop->opfuncid) *
3853  cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3854  }
3855  else if (IsA(node, Aggref) ||
3856  IsA(node, WindowFunc))
3857  {
3858  /*
3859  * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3860  * ie, zero execution cost in the current model, because they behave
3861  * essentially like Vars at execution. We disregard the costs of
3862  * their input expressions for the same reason. The actual execution
3863  * costs of the aggregate/window functions and their arguments have to
3864  * be factored into plan-node-specific costing of the Agg or WindowAgg
3865  * plan node.
3866  */
3867  return false; /* don't recurse into children */
3868  }
3869  else if (IsA(node, CoerceViaIO))
3870  {
3871  CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3872  Oid iofunc;
3873  Oid typioparam;
3874  bool typisvarlena;
3875 
3876  /* check the result type's input function */
3877  getTypeInputInfo(iocoerce->resulttype,
3878  &iofunc, &typioparam);
3879  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3880  /* check the input type's output function */
3881  getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3882  &iofunc, &typisvarlena);
3883  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3884  }
3885  else if (IsA(node, ArrayCoerceExpr))
3886  {
3887  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3888  QualCost perelemcost;
3889 
3890  cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
3891  context->root);
3892  context->total.startup += perelemcost.startup;
3893  if (perelemcost.per_tuple > 0)
3894  context->total.per_tuple += perelemcost.per_tuple *
3895  estimate_array_length((Node *) acoerce->arg);
3896  }
3897  else if (IsA(node, RowCompareExpr))
3898  {
3899  /* Conservatively assume we will check all the columns */
3900  RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3901  ListCell *lc;
3902 
3903  foreach(lc, rcexpr->opnos)
3904  {
3905  Oid opid = lfirst_oid(lc);
3906 
3907  context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3909  }
3910  }
3911  else if (IsA(node, MinMaxExpr) ||
3912  IsA(node, SQLValueFunction) ||
3913  IsA(node, XmlExpr) ||
3914  IsA(node, CoerceToDomain) ||
3915  IsA(node, NextValueExpr))
3916  {
3917  /* Treat all these as having cost 1 */
3918  context->total.per_tuple += cpu_operator_cost;
3919  }
3920  else if (IsA(node, CurrentOfExpr))
3921  {
3922  /* Report high cost to prevent selection of anything but TID scan */
3923  context->total.startup += disable_cost;
3924  }
3925  else if (IsA(node, SubLink))
3926  {
3927  /* This routine should not be applied to un-planned expressions */
3928  elog(ERROR, "cannot handle unplanned sub-select");
3929  }
3930  else if (IsA(node, SubPlan))
3931  {
3932  /*
3933  * A subplan node in an expression typically indicates that the
3934  * subplan will be executed on each evaluation, so charge accordingly.
3935  * (Sub-selects that can be executed as InitPlans have already been
3936  * removed from the expression.)
3937  */
3938  SubPlan *subplan = (SubPlan *) node;
3939 
3940  context->total.startup += subplan->startup_cost;
3941  context->total.per_tuple += subplan->per_call_cost;
3942 
3943  /*
3944  * We don't want to recurse into the testexpr, because it was already
3945  * counted in the SubPlan node's costs. So we're done.
3946  */
3947  return false;
3948  }
3949  else if (IsA(node, AlternativeSubPlan))
3950  {
3951  /*
3952  * Arbitrarily use the first alternative plan for costing. (We should
3953  * certainly only include one alternative, and we don't yet have
3954  * enough information to know which one the executor is most likely to
3955  * use.)
3956  */
3957  AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3958 
3959  return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3960  context);
3961  }
3962  else if (IsA(node, PlaceHolderVar))
3963  {
3964  /*
3965  * A PlaceHolderVar should be given cost zero when considering general
3966  * expression evaluation costs. The expense of doing the contained
3967  * expression is charged as part of the tlist eval costs of the scan
3968  * or join where the PHV is first computed (see set_rel_width and
3969  * add_placeholders_to_joinrel). If we charged it again here, we'd be
3970  * double-counting the cost for each level of plan that the PHV
3971  * bubbles up through. Hence, return without recursing into the
3972  * phexpr.
3973  */
3974  return false;
3975  }
3976 
3977  /* recurse into children */
3979  (void *) context);
3980 }
QualCost eval_cost
Definition: relation.h:1917
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3742
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
PlannerInfo * root
Definition: costsize.c:145
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2650
Expr * orclause
Definition: relation.h:1911
Oid resulttype
Definition: primnodes.h:816
bool pseudoconstant
Definition: relation.h:1888
Definition: nodes.h:517
float4 get_func_cost(Oid funcid)
Definition: lsyscache.c:1645
unsigned int Oid
Definition: postgres_ext.h:31
#define lsecond(l)
Definition: pg_list.h:116
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2179
#define linitial(l)
Definition: pg_list.h:111
#define ERROR
Definition: elog.h:43
Cost disable_cost
Definition: costsize.c:121
double cpu_operator_cost
Definition: costsize.c:115
Expr * arg
Definition: primnodes.h:815
Expr * elemexpr
Definition: primnodes.h:840
void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
Definition: lsyscache.c:2617
Expr * clause
Definition: relation.h:1880
Cost per_call_cost
Definition: primnodes.h:714
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1079
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition: costsize.c:3756
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
bool expression_tree_walker(Node *node, bool(*walker)(), void *context)
Definition: nodeFuncs.c:1834
void set_opfuncid(OpExpr *opexpr)
Definition: nodeFuncs.c:1613
Cost startup_cost
Definition: primnodes.h:713
#define elog
Definition: elog.h:219
void set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
Definition: nodeFuncs.c:1624
#define lfirst_oid(lc)
Definition: pg_list.h:108

◆ cost_recursive_union()

void cost_recursive_union ( Path runion,
Path nrterm,
Path rterm 
)

Definition at line 1581 of file costsize.c.

References cpu_tuple_cost, Max, Path::pathtarget, Path::rows, Path::startup_cost, Path::total_cost, and PathTarget::width.

Referenced by create_recursiveunion_path().

1582 {
1583  Cost startup_cost;
1584  Cost total_cost;
1585  double total_rows;
1586 
1587  /* We probably have decent estimates for the non-recursive term */
1588  startup_cost = nrterm->startup_cost;
1589  total_cost = nrterm->total_cost;
1590  total_rows = nrterm->rows;
1591 
1592  /*
1593  * We arbitrarily assume that about 10 recursive iterations will be
1594  * needed, and that we've managed to get a good fix on the cost and output
1595  * size of each one of them. These are mighty shaky assumptions but it's
1596  * hard to see how to do better.
1597  */
1598  total_cost += 10 * rterm->total_cost;
1599  total_rows += 10 * rterm->rows;
1600 
1601  /*
1602  * Also charge cpu_tuple_cost per row to account for the costs of
1603  * manipulating the tuplestores. (We don't worry about possible
1604  * spill-to-disk costs.)
1605  */
1606  total_cost += cpu_tuple_cost * total_rows;
1607 
1608  runion->startup_cost = startup_cost;
1609  runion->total_cost = total_cost;
1610  runion->rows = total_rows;
1611  runion->pathtarget->width = Max(nrterm->pathtarget->width,
1612  rterm->pathtarget->width);
1613 }
PathTarget * pathtarget
Definition: relation.h:1079
Cost startup_cost
Definition: relation.h:1089
Cost total_cost
Definition: relation.h:1090
#define Max(x, y)
Definition: c.h:851
double rows
Definition: relation.h:1088
double cpu_tuple_cost
Definition: costsize.c:113
double Cost
Definition: nodes.h:648

◆ cost_rescan()

static void cost_rescan ( PlannerInfo root,
Path path,
Cost rescan_startup_cost,
Cost rescan_total_cost 
)
static

Definition at line 3609 of file costsize.c.

References cpu_operator_cost, cpu_tuple_cost, Path::pathtype, relation_byte_size(), seq_page_cost, Path::startup_cost, T_CteScan, T_FunctionScan, T_HashJoin, T_Material, T_Sort, T_WorkTableScan, Path::total_cost, and work_mem.

Referenced by initial_cost_nestloop().

3612 {
3613  switch (path->pathtype)
3614  {
3615  case T_FunctionScan:
3616 
3617  /*
3618  * Currently, nodeFunctionscan.c always executes the function to
3619  * completion before returning any rows, and caches the results in
3620  * a tuplestore. So the function eval cost is all startup cost
3621  * and isn't paid over again on rescans. However, all run costs
3622  * will be paid over again.
3623  */
3624  *rescan_startup_cost = 0;
3625  *rescan_total_cost = path->total_cost - path->startup_cost;
3626  break;
3627  case T_HashJoin:
3628 
3629  /*
3630  * If it's a single-batch join, we don't need to rebuild the hash
3631  * table during a rescan.
3632  */
3633  if (((HashPath *) path)->num_batches == 1)
3634  {
3635  /* Startup cost is exactly the cost of hash table building */
3636  *rescan_startup_cost = 0;
3637  *rescan_total_cost = path->total_cost - path->startup_cost;
3638  }
3639  else
3640  {
3641  /* Otherwise, no special treatment */
3642  *rescan_startup_cost = path->startup_cost;
3643  *rescan_total_cost = path->total_cost;
3644  }
3645  break;
3646  case T_CteScan:
3647  case T_WorkTableScan:
3648  {
3649  /*
3650  * These plan types materialize their final result in a
3651  * tuplestore or tuplesort object. So the rescan cost is only
3652  * cpu_tuple_cost per tuple, unless the result is large enough
3653  * to spill to disk.
3654  */
3655  Cost run_cost = cpu_tuple_cost * path->rows;
3656  double nbytes = relation_byte_size(path->rows,
3657  path->pathtarget->width);
3658  long work_mem_bytes = work_mem * 1024L;
3659 
3660  if (nbytes > work_mem_bytes)
3661  {
3662  /* It will spill, so account for re-read cost */
3663  double npages = ceil(nbytes / BLCKSZ);
3664 
3665  run_cost += seq_page_cost * npages;
3666  }
3667  *rescan_startup_cost = 0;
3668  *rescan_total_cost = run_cost;
3669  }
3670  break;
3671  case T_Material:
3672  case T_Sort:
3673  {
3674  /*
3675  * These plan types not only materialize their results, but do
3676  * not implement qual filtering or projection. So they are
3677  * even cheaper to rescan than the ones above. We charge only
3678  * cpu_operator_cost per tuple. (Note: keep that in sync with
3679  * the run_cost charge in cost_sort, and also see comments in
3680  * cost_material before you change it.)
3681  */
3682  Cost run_cost = cpu_operator_cost * path->rows;
3683  double nbytes = relation_byte_size(path->rows,
3684  path->pathtarget->width);
3685  long work_mem_bytes = work_mem * 1024L;
3686 
3687  if (nbytes > work_mem_bytes)
3688  {
3689  /* It will spill, so account for re-read cost */
3690  double npages = ceil(nbytes / BLCKSZ);
3691 
3692  run_cost += seq_page_cost * npages;
3693  }
3694  *rescan_startup_cost = 0;
3695  *rescan_total_cost = run_cost;
3696  }
3697  break;
3698  default:
3699  *rescan_startup_cost = path->startup_cost;
3700  *rescan_total_cost = path->total_cost;
3701  break;
3702  }
3703 }
Definition: nodes.h:76
NodeTag pathtype
Definition: relation.h:1076
Cost startup_cost
Definition: relation.h:1089
double cpu_operator_cost
Definition: costsize.c:115
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5329
int work_mem
Definition: globals.c:122
Cost total_cost
Definition: relation.h:1090
double cpu_tuple_cost
Definition: costsize.c:113
double seq_page_cost
Definition: costsize.c:111
double Cost
Definition: nodes.h:648

◆ cost_samplescan()

void cost_samplescan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 289 of file costsize.c.

References Assert, PathTarget::cost, cpu_tuple_cost, get_restriction_qual_cost(), get_tablespace_page_costs(), GetTsmRoutine(), TsmRoutine::NextSampleBlock, RelOptInfo::pages, Path::pathtarget, QualCost::per_tuple, planner_rt_fetch, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::reltablespace, RelOptInfo::rows, Path::rows, RTE_RELATION, RangeTblEntry::rtekind, QualCost::startup, Path::startup_cost, RangeTblEntry::tablesample, Path::total_cost, TableSampleClause::tsmhandler, and RelOptInfo::tuples.

Referenced by create_samplescan_path().

291 {
292  Cost startup_cost = 0;
293  Cost run_cost = 0;
294  RangeTblEntry *rte;
295  TableSampleClause *tsc;
296  TsmRoutine *tsm;
297  double spc_seq_page_cost,
298  spc_random_page_cost,
299  spc_page_cost;
300  QualCost qpqual_cost;
301  Cost cpu_per_tuple;
302 
303  /* Should only be applied to base relations with tablesample clauses */
304  Assert(baserel->relid > 0);
305  rte = planner_rt_fetch(baserel->relid, root);
306  Assert(rte->rtekind == RTE_RELATION);
307  tsc = rte->tablesample;
308  Assert(tsc != NULL);
309  tsm = GetTsmRoutine(tsc->tsmhandler);
310 
311  /* Mark the path with the correct row estimate */
312  if (param_info)
313  path->rows = param_info->ppi_rows;
314  else
315  path->rows = baserel->rows;
316 
317  /* fetch estimated page cost for tablespace containing table */
319  &spc_random_page_cost,
320  &spc_seq_page_cost);
321 
322  /* if NextSampleBlock is used, assume random access, else sequential */
323  spc_page_cost = (tsm->NextSampleBlock != NULL) ?
324  spc_random_page_cost : spc_seq_page_cost;
325 
326  /*
327  * disk costs (recall that baserel->pages has already been set to the
328  * number of pages the sampling method will visit)
329  */
330  run_cost += spc_page_cost * baserel->pages;
331 
332  /*
333  * CPU costs (recall that baserel->tuples has already been set to the
334  * number of tuples the sampling method will select). Note that we ignore
335  * execution cost of the TABLESAMPLE parameter expressions; they will be
336  * evaluated only once per scan, and in most usages they'll likely be
337  * simple constants anyway. We also don't charge anything for the
338  * calculations the sampling method might do internally.
339  */
340  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
341 
342  startup_cost += qpqual_cost.startup;
343  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
344  run_cost += cpu_per_tuple * baserel->tuples;
345  /* tlist eval costs are paid per output row, not per tuple scanned */
346  startup_cost += path->pathtarget->cost.startup;
347  run_cost += path->pathtarget->cost.per_tuple * path->rows;
348 
349  path->startup_cost = startup_cost;
350  path->total_cost = startup_cost + run_cost;
351 }
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Oid reltablespace
Definition: relation.h:641
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
#define planner_rt_fetch(rti, root)
Definition: relation.h:344
Cost startup_cost
Definition: relation.h:1089
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:72
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:640
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
BlockNumber pages
Definition: relation.h:651
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
RTEKind rtekind
Definition: parsenodes.h:962
struct TableSampleClause * tablesample
Definition: parsenodes.h:980
double Cost
Definition: nodes.h:648

◆ cost_seqscan()

void cost_seqscan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 212 of file costsize.c.

References Assert, clamp_row_est(), PathTarget::cost, cpu_tuple_cost, disable_cost, enable_seqscan, get_parallel_divisor(), get_restriction_qual_cost(), get_tablespace_page_costs(), RelOptInfo::pages, Path::parallel_workers, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::reltablespace, RelOptInfo::rows, Path::rows, RTE_RELATION, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_seqscan_path().

214 {
215  Cost startup_cost = 0;
216  Cost cpu_run_cost;
217  Cost disk_run_cost;
218  double spc_seq_page_cost;
219  QualCost qpqual_cost;
220  Cost cpu_per_tuple;
221 
222  /* Should only be applied to base relations */
223  Assert(baserel->relid > 0);
224  Assert(baserel->rtekind == RTE_RELATION);
225 
226  /* Mark the path with the correct row estimate */
227  if (param_info)
228  path->rows = param_info->ppi_rows;
229  else
230  path->rows = baserel->rows;
231 
232  if (!enable_seqscan)
233  startup_cost += disable_cost;
234 
235  /* fetch estimated page cost for tablespace containing table */
237  NULL,
238  &spc_seq_page_cost);
239 
240  /*
241  * disk costs
242  */
243  disk_run_cost = spc_seq_page_cost * baserel->pages;
244 
245  /* CPU costs */
246  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
247 
248  startup_cost += qpqual_cost.startup;
249  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
250  cpu_run_cost = cpu_per_tuple * baserel->tuples;
251  /* tlist eval costs are paid per output row, not per tuple scanned */
252  startup_cost += path->pathtarget->cost.startup;
253  cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
254 
255  /* Adjust costing for parallelism, if used. */
256  if (path->parallel_workers > 0)
257  {
258  double parallel_divisor = get_parallel_divisor(path);
259 
260  /* The CPU cost is divided among all the workers. */
261  cpu_run_cost /= parallel_divisor;
262 
263  /*
264  * It may be possible to amortize some of the I/O cost, but probably
265  * not very much, because most operating systems already do aggressive
266  * prefetching. For now, we assume that the disk run cost can't be
267  * amortized at all.
268  */
269 
270  /*
271  * In the case of a parallel plan, the row count needs to represent
272  * the number of tuples processed per worker.
273  */
274  path->rows = clamp_row_est(path->rows / parallel_divisor);
275  }
276 
277  path->startup_cost = startup_cost;
278  path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
279 }
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Oid reltablespace
Definition: relation.h:641
int parallel_workers
Definition: relation.h:1085
bool enable_seqscan
Definition: costsize.c:125
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
BlockNumber pages
Definition: relation.h:651
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double clamp_row_est(double nrows)
Definition: costsize.c:188
double Cost
Definition: nodes.h:648

◆ cost_sort()

void cost_sort ( Path path,
PlannerInfo root,
List pathkeys,
Cost  input_cost,
double  tuples,
int  width,
Cost  comparison_cost,
int  sort_mem,
double  limit_tuples 
)

Definition at line 1661 of file costsize.c.

References cpu_operator_cost, disable_cost, enable_sort, LOG2, random_page_cost, relation_byte_size(), Path::rows, seq_page_cost, Path::startup_cost, Path::total_cost, and tuplesort_merge_order().

Referenced by choose_hashed_setop(), create_gather_merge_path(), create_groupingsets_path(), create_merge_append_path(), create_sort_path(), create_unique_path(), initial_cost_mergejoin(), label_sort_with_costsize(), and plan_cluster_use_sort().

1665 {
1666  Cost startup_cost = input_cost;
1667  Cost run_cost = 0;
1668  double input_bytes = relation_byte_size(tuples, width);
1669  double output_bytes;
1670  double output_tuples;
1671  long sort_mem_bytes = sort_mem * 1024L;
1672 
1673  if (!enable_sort)
1674  startup_cost += disable_cost;
1675 
1676  path->rows = tuples;
1677 
1678  /*
1679  * We want to be sure the cost of a sort is never estimated as zero, even
1680  * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1681  */
1682  if (tuples < 2.0)
1683  tuples = 2.0;
1684 
1685  /* Include the default cost-per-comparison */
1686  comparison_cost += 2.0 * cpu_operator_cost;
1687 
1688  /* Do we have a useful LIMIT? */
1689  if (limit_tuples > 0 && limit_tuples < tuples)
1690  {
1691  output_tuples = limit_tuples;
1692  output_bytes = relation_byte_size(output_tuples, width);
1693  }
1694  else
1695  {
1696  output_tuples = tuples;
1697  output_bytes = input_bytes;
1698  }
1699 
1700  if (output_bytes > sort_mem_bytes)
1701  {
1702  /*
1703  * We'll have to use a disk-based sort of all the tuples
1704  */
1705  double npages = ceil(input_bytes / BLCKSZ);
1706  double nruns = input_bytes / sort_mem_bytes;
1707  double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1708  double log_runs;
1709  double npageaccesses;
1710 
1711  /*
1712  * CPU costs
1713  *
1714  * Assume about N log2 N comparisons
1715  */
1716  startup_cost += comparison_cost * tuples * LOG2(tuples);
1717 
1718  /* Disk costs */
1719 
1720  /* Compute logM(r) as log(r) / log(M) */
1721  if (nruns > mergeorder)
1722  log_runs = ceil(log(nruns) / log(mergeorder));
1723  else
1724  log_runs = 1.0;
1725  npageaccesses = 2.0 * npages * log_runs;
1726  /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1727  startup_cost += npageaccesses *
1728  (seq_page_cost * 0.75 + random_page_cost * 0.25);
1729  }
1730  else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1731  {
1732  /*
1733  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1734  * a total number of tuple comparisons of N log2 K; but the constant
1735  * factor is a bit higher than for quicksort. Tweak it so that the
1736  * cost curve is continuous at the crossover point.
1737  */
1738  startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1739  }
1740  else
1741  {
1742  /* We'll use plain quicksort on all the input tuples */
1743  startup_cost += comparison_cost * tuples * LOG2(tuples);
1744  }
1745 
1746  /*
1747  * Also charge a small amount (arbitrarily set equal to operator cost) per
1748  * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1749  * doesn't do qual-checking or projection, so it has less overhead than
1750  * most plan nodes. Note it's correct to use tuples not output_tuples
1751  * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1752  * counting the LIMIT otherwise.
1753  */
1754  run_cost += cpu_operator_cost * tuples;
1755 
1756  path->startup_cost = startup_cost;
1757  path->total_cost = startup_cost + run_cost;
1758 }
bool enable_sort
Definition: costsize.c:130
double random_page_cost
Definition: costsize.c:112
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
double cpu_operator_cost
Definition: costsize.c:115
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5329
Cost total_cost
Definition: relation.h:1090
#define LOG2(x)
Definition: costsize.c:101
double rows
Definition: relation.h:1088
int tuplesort_merge_order(int64 allowedMem)
Definition: tuplesort.c:2352
double seq_page_cost
Definition: costsize.c:111
double Cost
Definition: nodes.h:648

◆ cost_subplan()

void cost_subplan ( PlannerInfo root,
SubPlan subplan,
Plan plan 
)

Definition at line 3516 of file costsize.c.

References ALL_SUBLINK, ANY_SUBLINK, clamp_row_est(), cost_qual_eval(), cpu_operator_cost, ExecMaterializesOutput(), EXISTS_SUBLINK, make_ands_implicit(), NIL, nodeTag, SubPlan::parParam, SubPlan::per_call_cost, QualCost::per_tuple, Plan::plan_rows, QualCost::startup, Plan::startup_cost, SubPlan::startup_cost, SubPlan::subLinkType, SubPlan::testexpr, Plan::total_cost, and SubPlan::useHashTable.

Referenced by build_subplan(), SS_make_initplan_from_plan(), and SS_process_ctes().

3517 {
3518  QualCost sp_cost;
3519 
3520  /* Figure any cost for evaluating the testexpr */
3521  cost_qual_eval(&sp_cost,
3522  make_ands_implicit((Expr *) subplan->testexpr),
3523  root);
3524 
3525  if (subplan->useHashTable)
3526  {
3527  /*
3528  * If we are using a hash table for the subquery outputs, then the
3529  * cost of evaluating the query is a one-time cost. We charge one
3530  * cpu_operator_cost per tuple for the work of loading the hashtable,
3531  * too.
3532  */
3533  sp_cost.startup += plan->total_cost +
3534  cpu_operator_cost * plan->plan_rows;
3535 
3536  /*
3537  * The per-tuple costs include the cost of evaluating the lefthand
3538  * expressions, plus the cost of probing the hashtable. We already
3539  * accounted for the lefthand expressions as part of the testexpr, and
3540  * will also have counted one cpu_operator_cost for each comparison
3541  * operator. That is probably too low for the probing cost, but it's
3542  * hard to make a better estimate, so live with it for now.
3543  */
3544  }
3545  else
3546  {
3547  /*
3548  * Otherwise we will be rescanning the subplan output on each
3549  * evaluation. We need to estimate how much of the output we will
3550  * actually need to scan. NOTE: this logic should agree with the
3551  * tuple_fraction estimates used by make_subplan() in
3552  * plan/subselect.c.
3553  */
3554  Cost plan_run_cost = plan->total_cost - plan->startup_cost;
3555 
3556  if (subplan->subLinkType == EXISTS_SUBLINK)
3557  {
3558  /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3559  sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3560  }
3561  else if (subplan->subLinkType == ALL_SUBLINK ||
3562  subplan->subLinkType == ANY_SUBLINK)
3563  {
3564  /* assume we need 50% of the tuples */
3565  sp_cost.per_tuple += 0.50 * plan_run_cost;
3566  /* also charge a cpu_operator_cost per row examined */
3567  sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3568  }
3569  else
3570  {
3571  /* assume we need all tuples */
3572  sp_cost.per_tuple += plan_run_cost;
3573  }
3574 
3575  /*
3576  * Also account for subplan's startup cost. If the subplan is
3577  * uncorrelated or undirect correlated, AND its topmost node is one
3578  * that materializes its output, assume that we'll only need to pay
3579  * its startup cost once; otherwise assume we pay the startup cost
3580  * every time.
3581  */
3582  if (subplan->parParam == NIL &&
3584  sp_cost.startup += plan->startup_cost;
3585  else
3586  sp_cost.per_tuple += plan->startup_cost;
3587  }
3588 
3589  subplan->startup_cost = sp_cost.startup;
3590  subplan->per_call_cost = sp_cost.per_tuple;
3591 }
#define NIL
Definition: pg_list.h:69
double plan_rows
Definition: plannodes.h:133
SubLinkType subLinkType
Definition: primnodes.h:685
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
List * make_ands_implicit(Expr *clause)
Definition: clauses.c:379
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: plannodes.h:127
double cpu_operator_cost
Definition: costsize.c:115
Node * testexpr
Definition: primnodes.h:687
Cost per_call_cost
Definition: primnodes.h:714
List * parParam
Definition: primnodes.h:710
#define nodeTag(nodeptr)
Definition: nodes.h:522
Cost total_cost
Definition: plannodes.h:128
bool ExecMaterializesOutput(NodeTag plantype)
Definition: execAmi.c:577
bool useHashTable
Definition: primnodes.h:699
Cost startup_cost
Definition: primnodes.h:713
double clamp_row_est(double nrows)
Definition: costsize.c:188
double Cost
Definition: nodes.h:648

◆ cost_subqueryscan()

void cost_subqueryscan ( SubqueryScanPath path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1284 of file costsize.c.

References Assert, PathTarget::cost, cpu_tuple_cost, get_restriction_qual_cost(), SubqueryScanPath::path, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_SUBQUERY, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, SubqueryScanPath::subpath, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_subqueryscan_path().

1286 {
1287  Cost startup_cost;
1288  Cost run_cost;
1289  QualCost qpqual_cost;
1290  Cost cpu_per_tuple;
1291 
1292  /* Should only be applied to base relations that are subqueries */
1293  Assert(baserel->relid > 0);
1294  Assert(baserel->rtekind == RTE_SUBQUERY);
1295 
1296  /* Mark the path with the correct row estimate */
1297  if (param_info)
1298  path->path.rows = param_info->ppi_rows;
1299  else
1300  path->path.rows = baserel->rows;
1301 
1302  /*
1303  * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1304  * any restriction clauses and tlist that will be attached to the
1305  * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1306  * projection overhead.
1307  */
1308  path->path.startup_cost = path->subpath->startup_cost;
1309  path->path.total_cost = path->subpath->total_cost;
1310 
1311  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1312 
1313  startup_cost = qpqual_cost.startup;
1314  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1315  run_cost = cpu_per_tuple * baserel->tuples;
1316 
1317  /* tlist eval costs are paid per output row, not per tuple scanned */
1318  startup_cost += path->path.pathtarget->cost.startup;
1319  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1320 
1321  path->path.startup_cost += startup_cost;
1322  path->path.total_cost += startup_cost + run_cost;
1323 }
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double Cost
Definition: nodes.h:648

◆ cost_tablefuncscan()

void cost_tablefuncscan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1394 of file costsize.c.

References Assert, PathTarget::cost, cost_qual_eval_node(), cpu_tuple_cost, get_restriction_qual_cost(), Path::pathtarget, QualCost::per_tuple, planner_rt_fetch, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_TABLEFUNC, RangeTblEntry::rtekind, QualCost::startup, Path::startup_cost, RangeTblEntry::tablefunc, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_tablefuncscan_path().

1396 {
1397  Cost startup_cost = 0;
1398  Cost run_cost = 0;
1399  QualCost qpqual_cost;
1400  Cost cpu_per_tuple;
1401  RangeTblEntry *rte;
1402  QualCost exprcost;
1403 
1404  /* Should only be applied to base relations that are functions */
1405  Assert(baserel->relid > 0);
1406  rte = planner_rt_fetch(baserel->relid, root);
1407  Assert(rte->rtekind == RTE_TABLEFUNC);
1408 
1409  /* Mark the path with the correct row estimate */
1410  if (param_info)
1411  path->rows = param_info->ppi_rows;
1412  else
1413  path->rows = baserel->rows;
1414 
1415  /*
1416  * Estimate costs of executing the table func expression(s).
1417  *
1418  * XXX in principle we ought to charge tuplestore spill costs if the
1419  * number of rows is large. However, given how phony our rowcount
1420  * estimates for tablefuncs tend to be, there's not a lot of point in that
1421  * refinement right now.
1422  */
1423  cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1424 
1425  startup_cost += exprcost.startup + exprcost.per_tuple;
1426 
1427  /* Add scanning CPU costs */
1428  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1429 
1430  startup_cost += qpqual_cost.startup;
1431  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1432  run_cost += cpu_per_tuple * baserel->tuples;
1433 
1434  /* tlist eval costs are paid per output row, not per tuple scanned */
1435  startup_cost += path->pathtarget->cost.startup;
1436  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1437 
1438  path->startup_cost = startup_cost;
1439  path->total_cost = startup_cost + run_cost;
1440 }
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3742
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Definition: nodes.h:517
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
#define planner_rt_fetch(rti, root)
Definition: relation.h:344
TableFunc * tablefunc
Definition: parsenodes.h:1022
Cost startup_cost
Definition: relation.h:1089
Index relid
Definition: relation.h:640
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
RTEKind rtekind
Definition: parsenodes.h:962
double Cost
Definition: nodes.h:648

◆ cost_tidscan()

void cost_tidscan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
List tidquals,
ParamPathInfo param_info 
)

Definition at line 1181 of file costsize.c.

References ScalarArrayOpExpr::args, Assert, RelOptInfo::baserestrictcost, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, disable_cost, enable_tidscan, estimate_array_length(), get_restriction_qual_cost(), get_tablespace_page_costs(), IsA, lfirst, lsecond, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::reltablespace, RelOptInfo::rows, Path::rows, RTE_RELATION, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_tidscan_path().

1183 {
1184  Cost startup_cost = 0;
1185  Cost run_cost = 0;
1186  bool isCurrentOf = false;
1187  QualCost qpqual_cost;
1188  Cost cpu_per_tuple;
1189  QualCost tid_qual_cost;
1190  int ntuples;
1191  ListCell *l;
1192  double spc_random_page_cost;
1193 
1194  /* Should only be applied to base relations */
1195  Assert(baserel->relid > 0);
1196  Assert(baserel->rtekind == RTE_RELATION);
1197 
1198  /* Mark the path with the correct row estimate */
1199  if (param_info)
1200  path->rows = param_info->ppi_rows;
1201  else
1202  path->rows = baserel->rows;
1203 
1204  /* Count how many tuples we expect to retrieve */
1205  ntuples = 0;
1206  foreach(l, tidquals)
1207  {
1208  if (IsA(lfirst(l), ScalarArrayOpExpr))
1209  {
1210  /* Each element of the array yields 1 tuple */
1212  Node *arraynode = (Node *) lsecond(saop->args);
1213 
1214  ntuples += estimate_array_length(arraynode);
1215  }
1216  else if (IsA(lfirst(l), CurrentOfExpr))
1217  {
1218  /* CURRENT OF yields 1 tuple */
1219  isCurrentOf = true;
1220  ntuples++;
1221  }
1222  else
1223  {
1224  /* It's just CTID = something, count 1 tuple */
1225  ntuples++;
1226  }
1227  }
1228 
1229  /*
1230  * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1231  * understands how to do it correctly. Therefore, honor enable_tidscan
1232  * only when CURRENT OF isn't present. Also note that cost_qual_eval
1233  * counts a CurrentOfExpr as having startup cost disable_cost, which we
1234  * subtract off here; that's to prevent other plan types such as seqscan
1235  * from winning.
1236  */
1237  if (isCurrentOf)
1238  {
1240  startup_cost -= disable_cost;
1241  }
1242  else if (!enable_tidscan)
1243  startup_cost += disable_cost;
1244 
1245  /*
1246  * The TID qual expressions will be computed once, any other baserestrict
1247  * quals once per retrieved tuple.
1248  */
1249  cost_qual_eval(&tid_qual_cost, tidquals, root);
1250 
1251  /* fetch estimated page cost for tablespace containing table */
1253  &spc_random_page_cost,
1254  NULL);
1255 
1256  /* disk costs --- assume each tuple on a different page */
1257  run_cost += spc_random_page_cost * ntuples;
1258 
1259  /* Add scanning CPU costs */
1260  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1261 
1262  /* XXX currently we assume TID quals are a subset of qpquals */
1263  startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1264  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1265  tid_qual_cost.per_tuple;
1266  run_cost += cpu_per_tuple * ntuples;
1267 
1268  /* tlist eval costs are paid per output row, not per tuple scanned */
1269  startup_cost += path->pathtarget->cost.startup;
1270  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1271 
1272  path->startup_cost = startup_cost;
1273  path->total_cost = startup_cost + run_cost;
1274 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
PathTarget * pathtarget
Definition: relation.h:1079
bool enable_tidscan
Definition: costsize.c:129
Oid reltablespace
Definition: relation.h:641
Definition: nodes.h:517
#define lsecond(l)
Definition: pg_list.h:116
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2179
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
QualCost baserestrictcost
Definition: relation.h:673
double Cost
Definition: nodes.h:648

◆ cost_valuesscan()

void cost_valuesscan ( Path path,
PlannerInfo root,
RelOptInfo baserel,
ParamPathInfo param_info 
)

Definition at line 1450 of file costsize.c.

References Assert, PathTarget::cost, cpu_operator_cost, cpu_tuple_cost, get_restriction_qual_cost(), Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, RelOptInfo::relid, RelOptInfo::rows, Path::rows, RTE_VALUES, RelOptInfo::rtekind, QualCost::startup, Path::startup_cost, Path::total_cost, and RelOptInfo::tuples.

Referenced by create_valuesscan_path().

1452 {
1453  Cost startup_cost = 0;
1454  Cost run_cost = 0;
1455  QualCost qpqual_cost;
1456  Cost cpu_per_tuple;
1457 
1458  /* Should only be applied to base relations that are values lists */
1459  Assert(baserel->relid > 0);
1460  Assert(baserel->rtekind == RTE_VALUES);
1461 
1462  /* Mark the path with the correct row estimate */
1463  if (param_info)
1464  path->rows = param_info->ppi_rows;
1465  else
1466  path->rows = baserel->rows;
1467 
1468  /*
1469  * For now, estimate list evaluation cost at one operator eval per list
1470  * (probably pretty bogus, but is it worth being smarter?)
1471  */
1472  cpu_per_tuple = cpu_operator_cost;
1473 
1474  /* Add scanning CPU costs */
1475  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1476 
1477  startup_cost += qpqual_cost.startup;
1478  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1479  run_cost += cpu_per_tuple * baserel->tuples;
1480 
1481  /* tlist eval costs are paid per output row, not per tuple scanned */
1482  startup_cost += path->pathtarget->cost.startup;
1483  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1484 
1485  path->startup_cost = startup_cost;
1486  path->total_cost = startup_cost + run_cost;
1487 }
PathTarget * pathtarget
Definition: relation.h:1079
double tuples
Definition: relation.h:652
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
double cpu_operator_cost
Definition: costsize.c:115
Index relid
Definition: relation.h:640
RTEKind rtekind
Definition: relation.h:642
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3995
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
double Cost
Definition: nodes.h:648

◆ cost_windowagg()

void cost_windowagg ( Path path,
PlannerInfo root,
List windowFuncs,
int  numPartCols,
int  numOrderCols,
Cost  input_startup_cost,
Cost  input_total_cost,
double  input_tuples 
)

Definition at line 2178 of file costsize.c.

References WindowFunc::aggfilter, WindowFunc::args, cost_qual_eval_node(), cpu_operator_cost, cpu_tuple_cost, get_func_cost(), lfirst_node, QualCost::per_tuple, Path::rows, QualCost::startup, Path::startup_cost, Path::total_cost, and WindowFunc::winfnoid.

Referenced by create_windowagg_path().

2182 {
2183  Cost startup_cost;
2184  Cost total_cost;
2185  ListCell *lc;
2186 
2187  startup_cost = input_startup_cost;
2188  total_cost = input_total_cost;
2189 
2190  /*
2191  * Window functions are assumed to cost their stated execution cost, plus
2192  * the cost of evaluating their input expressions, per tuple. Since they
2193  * may in fact evaluate their inputs at multiple rows during each cycle,
2194  * this could be a drastic underestimate; but without a way to know how
2195  * many rows the window function will fetch, it's hard to do better. In
2196  * any case, it's a good estimate for all the built-in window functions,
2197  * so we'll just do this for now.
2198  */
2199  foreach(lc, windowFuncs)
2200  {
2201  WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
2202  Cost wfunccost;
2203  QualCost argcosts;
2204 
2205  wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
2206 
2207  /* also add the input expressions' cost to per-input-row costs */
2208  cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
2209  startup_cost += argcosts.startup;
2210  wfunccost += argcosts.per_tuple;
2211 
2212  /*
2213  * Add the filter's cost to per-input-row costs. XXX We should reduce
2214  * input expression costs according to filter selectivity.
2215  */
2216  cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
2217  startup_cost += argcosts.startup;
2218  wfunccost += argcosts.per_tuple;
2219 
2220  total_cost += wfunccost * input_tuples;
2221  }
2222 
2223  /*
2224  * We also charge cpu_operator_cost per grouping column per tuple for
2225  * grouping comparisons, plus cpu_tuple_cost per tuple for general
2226  * overhead.
2227  *
2228  * XXX this neglects costs of spooling the data to disk when it overflows
2229  * work_mem. Sooner or later that should get accounted for.
2230  */
2231  total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
2232  total_cost += cpu_tuple_cost * input_tuples;
2233 
2234  path->rows = input_tuples;
2235  path->startup_cost = startup_cost;
2236  path->total_cost = total_cost;
2237 }
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3742
List * args
Definition: primnodes.h:360
Definition: nodes.h:517
float4 get_func_cost(Oid funcid)
Definition: lsyscache.c:1645
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
Cost startup_cost
Definition: relation.h:1089
#define lfirst_node(type, lc)
Definition: pg_list.h:109
double cpu_operator_cost
Definition: costsize.c:115
Oid winfnoid
Definition: primnodes.h:356
Cost total_cost
Definition: relation.h:1090
Expr * aggfilter
Definition: primnodes.h:361
double rows
Definition: relation.h:1088
double cpu_tuple_cost
Definition: costsize.c:113
double Cost
Definition: nodes.h:648

◆ extract_nonindex_conditions()

static List * extract_nonindex_conditions ( List qual_clauses,
List indexquals 
)
static

Definition at line 770 of file costsize.c.

References is_redundant_derived_clause(), lappend(), lfirst_node, list_member_ptr(), NIL, and RestrictInfo::pseudoconstant.

Referenced by cost_index().

771 {
772  List *result = NIL;
773  ListCell *lc;
774 
775  foreach(lc, qual_clauses)
776  {
777  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
778 
779  if (rinfo->pseudoconstant)
780  continue; /* we may drop pseudoconstants here */
781  if (list_member_ptr(indexquals, rinfo))
782  continue; /* simple duplicate */
783  if (is_redundant_derived_clause(rinfo, indexquals))
784  continue; /* derived from same EquivalenceClass */
785  /* ... skip the predicate proof attempt createplan.c will try ... */
786  result = lappend(result, rinfo);
787  }
788  return result;
789 }
#define NIL
Definition: pg_list.h:69
bool is_redundant_derived_clause(RestrictInfo *rinfo, List *clauselist)
Definition: equivclass.c:2494
bool pseudoconstant
Definition: relation.h:1888
#define lfirst_node(type, lc)
Definition: pg_list.h:109
List * lappend(List *list, void *datum)
Definition: list.c:128
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:465
Definition: pg_list.h:45

◆ final_cost_hashjoin()

void final_cost_hashjoin ( PlannerInfo root,
HashPath path,
JoinCostWorkspace workspace,
JoinPathExtraData extra 
)

Definition at line 3261 of file costsize.c.

References approx_tuple_count(), Assert, bms_is_subset(), clamp_row_est(), RestrictInfo::clause, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, disable_cost, enable_hashjoin, estimate_hash_bucket_stats(), get_leftop(), get_parallel_divisor(), get_rightop(), HashPath::inner_rows_total, JoinCostWorkspace::inner_rows_total, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, IsA, JOIN_ANTI, JOIN_SEMI, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, RestrictInfo::left_bucketsize, RestrictInfo::left_mcvfreq, RestrictInfo::left_relids, lfirst_node, SemiAntiJoinFactors::match_count, HashPath::num_batches, JoinCostWorkspace::numbatches, JoinCostWorkspace::numbuckets, SemiAntiJoinFactors::outer_match_frac, JoinPath::outerjoinpath, Path::parallel_workers, Path::param_info, Path::parent, JoinPath::path, HashPath::path_hashclauses, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, relation_byte_size(), RelOptInfo::relids, RestrictInfo::right_bucketsize, RestrictInfo::right_mcvfreq, RestrictInfo::right_relids, rint(), RelOptInfo::rows, Path::rows, JoinCostWorkspace::run_cost, JoinPathExtraData::semifactors, QualCost::startup, Path::startup_cost, JoinCostWorkspace::startup_cost, Path::total_cost, PathTarget::width, and work_mem.

Referenced by create_hashjoin_path().

3264 {
3265  Path *outer_path = path->jpath.outerjoinpath;
3266  Path *inner_path = path->jpath.innerjoinpath;
3267  double outer_path_rows = outer_path->rows;
3268  double inner_path_rows = inner_path->rows;
3269  double inner_path_rows_total = workspace->inner_rows_total;
3270  List *hashclauses = path->path_hashclauses;
3271  Cost startup_cost = workspace->startup_cost;
3272  Cost run_cost = workspace->run_cost;
3273  int numbuckets = workspace->numbuckets;
3274  int numbatches = workspace->numbatches;
3275  Cost cpu_per_tuple;
3276  QualCost hash_qual_cost;
3277  QualCost qp_qual_cost;
3278  double hashjointuples;
3279  double virtualbuckets;
3280  Selectivity innerbucketsize;
3281  Selectivity innermcvfreq;
3282  ListCell *hcl;
3283 
3284  /* Mark the path with the correct row estimate */
3285  if (path->jpath.path.param_info)
3286  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3287  else
3288  path->jpath.path.rows = path->jpath.path.parent->rows;
3289 
3290  /* For partial paths, scale row estimate. */
3291  if (path->jpath.path.parallel_workers > 0)
3292  {
3293  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3294 
3295  path->jpath.path.rows =
3296  clamp_row_est(path->jpath.path.rows / parallel_divisor);
3297  }
3298 
3299  /*
3300  * We could include disable_cost in the preliminary estimate, but that
3301  * would amount to optimizing for the case where the join method is
3302  * disabled, which doesn't seem like the way to bet.
3303  */
3304  if (!enable_hashjoin)
3305  startup_cost += disable_cost;
3306 
3307  /* mark the path with estimated # of batches */
3308  path->num_batches = numbatches;
3309 
3310  /* store the total number of tuples (sum of partial row estimates) */
3311  path->inner_rows_total = inner_path_rows_total;
3312 
3313  /* and compute the number of "virtual" buckets in the whole join */
3314  virtualbuckets = (double) numbuckets * (double) numbatches;
3315 
3316  /*
3317  * Determine bucketsize fraction and MCV frequency for the inner relation.
3318  * We use the smallest bucketsize or MCV frequency estimated for any
3319  * individual hashclause; this is undoubtedly conservative.
3320  *
3321  * BUT: if inner relation has been unique-ified, we can assume it's good
3322  * for hashing. This is important both because it's the right answer, and
3323  * because we avoid contaminating the cache with a value that's wrong for
3324  * non-unique-ified paths.
3325  */
3326  if (IsA(inner_path, UniquePath))
3327  {
3328  innerbucketsize = 1.0 / virtualbuckets;
3329  innermcvfreq = 0.0;
3330  }
3331  else
3332  {
3333  innerbucketsize = 1.0;
3334  innermcvfreq = 1.0;
3335  foreach(hcl, hashclauses)
3336  {
3337  RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
3338  Selectivity thisbucketsize;
3339  Selectivity thismcvfreq;
3340 
3341  /*
3342  * First we have to figure out which side of the hashjoin clause
3343  * is the inner side.
3344  *
3345  * Since we tend to visit the same clauses over and over when
3346  * planning a large query, we cache the bucket stats estimates in
3347  * the RestrictInfo node to avoid repeated lookups of statistics.
3348  */
3349  if (bms_is_subset(restrictinfo->right_relids,
3350  inner_path->parent->relids))
3351  {
3352  /* righthand side is inner */
3353  thisbucketsize = restrictinfo->right_bucketsize;
3354  if (thisbucketsize < 0)
3355  {
3356  /* not cached yet */
3358  get_rightop(restrictinfo->clause),
3359  virtualbuckets,
3360  &restrictinfo->right_mcvfreq,
3361  &restrictinfo->right_bucketsize);
3362  thisbucketsize = restrictinfo->right_bucketsize;
3363  }
3364  thismcvfreq = restrictinfo->right_mcvfreq;
3365  }
3366  else
3367  {
3368  Assert(bms_is_subset(restrictinfo->left_relids,
3369  inner_path->parent->relids));
3370  /* lefthand side is inner */
3371  thisbucketsize = restrictinfo->left_bucketsize;
3372  if (thisbucketsize < 0)
3373  {
3374  /* not cached yet */
3376  get_leftop(restrictinfo->clause),
3377  virtualbuckets,
3378  &restrictinfo->left_mcvfreq,
3379  &restrictinfo->left_bucketsize);
3380  thisbucketsize = restrictinfo->left_bucketsize;
3381  }
3382  thismcvfreq = restrictinfo->left_mcvfreq;
3383  }
3384 
3385  if (innerbucketsize > thisbucketsize)
3386  innerbucketsize = thisbucketsize;
3387  if (innermcvfreq > thismcvfreq)
3388  innermcvfreq = thismcvfreq;
3389  }
3390  }
3391 
3392  /*
3393  * If the bucket holding the inner MCV would exceed work_mem, we don't
3394  * want to hash unless there is really no other alternative, so apply
3395  * disable_cost. (The executor normally copes with excessive memory usage
3396  * by splitting batches, but obviously it cannot separate equal values
3397  * that way, so it will be unable to drive the batch size below work_mem
3398  * when this is true.)
3399  */
3400  if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
3401  inner_path->pathtarget->width) >
3402  (work_mem * 1024L))
3403  startup_cost += disable_cost;
3404 
3405  /*
3406  * Compute cost of the hashquals and qpquals (other restriction clauses)
3407  * separately.
3408  */
3409  cost_qual_eval(&hash_qual_cost, hashclauses, root);
3410  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3411  qp_qual_cost.startup -= hash_qual_cost.startup;
3412  qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
3413 
3414  /* CPU costs */
3415 
3416  if (path->jpath.jointype == JOIN_SEMI ||
3417  path->jpath.jointype == JOIN_ANTI ||
3418  extra->inner_unique)
3419  {
3420  double outer_matched_rows;
3421  Selectivity inner_scan_frac;
3422 
3423  /*
3424  * With a SEMI or ANTI join, or if the innerrel is known unique, the
3425  * executor will stop after the first match.
3426  *
3427  * For an outer-rel row that has at least one match, we can expect the
3428  * bucket scan to stop after a fraction 1/(match_count+1) of the
3429  * bucket's rows, if the matches are evenly distributed. Since they
3430  * probably aren't quite evenly distributed, we apply a fuzz factor of
3431  * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
3432  * to clamp inner_scan_frac to at most 1.0; but since match_count is
3433  * at least 1, no such clamp is needed now.)
3434  */
3435  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3436  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3437 
3438  startup_cost += hash_qual_cost.startup;
3439  run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
3440  clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
3441 
3442  /*
3443  * For unmatched outer-rel rows, the picture is quite a lot different.
3444  * In the first place, there is no reason to assume that these rows
3445  * preferentially hit heavily-populated buckets; instead assume they
3446  * are uncorrelated with the inner distribution and so they see an
3447  * average bucket size of inner_path_rows / virtualbuckets. In the
3448  * second place, it seems likely that they will have few if any exact
3449  * hash-code matches and so very few of the tuples in the bucket will
3450  * actually require eval of the hash quals. We don't have any good
3451  * way to estimate how many will, but for the moment assume that the
3452  * effective cost per bucket entry is one-tenth what it is for
3453  * matchable tuples.
3454  */
3455  run_cost += hash_qual_cost.per_tuple *
3456  (outer_path_rows - outer_matched_rows) *
3457  clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
3458 
3459  /* Get # of tuples that will pass the basic join */
3460  if (path->jpath.jointype == JOIN_SEMI)
3461  hashjointuples = outer_matched_rows;
3462  else
3463  hashjointuples = outer_path_rows - outer_matched_rows;
3464  }
3465  else
3466  {
3467  /*
3468  * The number of tuple comparisons needed is the number of outer
3469  * tuples times the typical number of tuples in a hash bucket, which
3470  * is the inner relation size times its bucketsize fraction. At each
3471  * one, we need to evaluate the hashjoin quals. But actually,
3472  * charging the full qual eval cost at each tuple is pessimistic,
3473  * since we don't evaluate the quals unless the hash values match
3474  * exactly. For lack of a better idea, halve the cost estimate to
3475  * allow for that.
3476  */
3477  startup_cost += hash_qual_cost.startup;
3478  run_cost += hash_qual_cost.per_tuple * outer_path_rows *
3479  clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
3480 
3481  /*
3482  * Get approx # tuples passing the hashquals. We use
3483  * approx_tuple_count here because we need an estimate done with
3484  * JOIN_INNER semantics.
3485  */
3486  hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
3487  }
3488 
3489  /*
3490  * For each tuple that gets through the hashjoin proper, we charge
3491  * cpu_tuple_cost plus the cost of evaluating additional restriction
3492  * clauses that are to be applied at the join. (This is pessimistic since
3493  * not all of the quals may get evaluated at each tuple.)
3494  */
3495  startup_cost += qp_qual_cost.startup;
3496  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3497  run_cost += cpu_per_tuple * hashjointuples;
3498 
3499  /* tlist eval costs are paid per output row, not per tuple scanned */
3500  startup_cost += path->jpath.path.pathtarget->cost.startup;
3501  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3502 
3503  path->jpath.path.startup_cost = startup_cost;
3504  path->jpath.path.total_cost = startup_cost + run_cost;
3505 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
JoinPath jpath
Definition: relation.h:1500
PathTarget * pathtarget
Definition: relation.h:1079
SemiAntiJoinFactors semifactors
Definition: relation.h:2314
int num_batches
Definition: relation.h:1502
Selectivity right_mcvfreq
Definition: relation.h:1944
Selectivity outer_match_frac
Definition: relation.h:2291
Path * innerjoinpath
Definition: relation.h:1427
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition: costsize.c:4239
int parallel_workers
Definition: relation.h:1085
ParamPathInfo * param_info
Definition: relation.h:1081
Relids left_relids
Definition: relation.h:1907
double Selectivity
Definition: nodes.h:647
double inner_rows_total
Definition: relation.h:1503
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Node * get_leftop(const Expr *clause)
Definition: clauses.c:200
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
List * joinrestrictinfo
Definition: relation.h:1429
RelOptInfo * parent
Definition: relation.h:1078
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:374
#define lfirst_node(type, lc)
Definition: pg_list.h:109
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
Relids relids
Definition: relation.h:612
double rint(double x)
Definition: rint.c:22
Expr * clause
Definition: relation.h:1880
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5329
Path * outerjoinpath
Definition: relation.h:1426
double inner_rows_total
Definition: relation.h:2417
int work_mem
Definition: globals.c:122
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
Selectivity left_bucketsize
Definition: relation.h:1941
Relids right_relids
Definition: relation.h:1908
Path path
Definition: relation.h:1419
#define Assert(condition)
Definition: c.h:699
double rows
Definition: relation.h:1088
Selectivity left_mcvfreq
Definition: relation.h:1943
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
Node * get_rightop(const Expr *clause)
Definition: clauses.c:217
double ppi_rows
Definition: relation.h:1038
bool enable_hashjoin
Definition: costsize.c:135
Selectivity match_count
Definition: relation.h:2292
Selectivity right_bucketsize
Definition: relation.h:1942
JoinType jointype
Definition: relation.h:1421
void estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets, Selectivity *mcv_freq, Selectivity *bucketsize_frac)
Definition: selfuncs.c:3782
List * path_hashclauses
Definition: relation.h:1501
double clamp_row_est(double nrows)
Definition: costsize.c:188
Definition: pg_list.h:45
double Cost
Definition: nodes.h:648

◆ final_cost_mergejoin()

void final_cost_mergejoin ( PlannerInfo root,
MergePath path,
JoinCostWorkspace workspace,
JoinPathExtraData extra 
)

Definition at line 2830 of file costsize.c.

References approx_tuple_count(), clamp_row_est(), PathTarget::cost, cost_qual_eval(), cpu_operator_cost, cpu_tuple_cost, disable_cost, enable_material, enable_mergejoin, ExecSupportsMarkRestore(), get_parallel_divisor(), JoinCostWorkspace::inner_rows, JoinCostWorkspace::inner_run_cost, JoinCostWorkspace::inner_skip_rows, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, IsA, JOIN_ANTI, JOIN_SEMI, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, list_length(), MergePath::materialize_inner, NIL, JoinCostWorkspace::outer_rows, JoinCostWorkspace::outer_skip_rows, JoinPath::outerjoinpath, Path::parallel_workers, Path::param_info, Path::parent, JoinPath::path, MergePath::path_mergeclauses, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, relation_byte_size(), RelOptInfo::rows, Path::rows, JoinCostWorkspace::run_cost, MergePath::skip_mark_restore, QualCost::startup, Path::startup_cost, JoinCostWorkspace::startup_cost, Path::total_cost, PathTarget::width, and work_mem.

Referenced by create_mergejoin_path().

2833 {
2834  Path *outer_path = path->jpath.outerjoinpath;
2835  Path *inner_path = path->jpath.innerjoinpath;
2836  double inner_path_rows = inner_path->rows;
2837  List *mergeclauses = path->path_mergeclauses;
2838  List *innersortkeys = path->innersortkeys;
2839  Cost startup_cost = workspace->startup_cost;
2840  Cost run_cost = workspace->run_cost;
2841  Cost inner_run_cost = workspace->inner_run_cost;
2842  double outer_rows = workspace->outer_rows;
2843  double inner_rows = workspace->inner_rows;
2844  double outer_skip_rows = workspace->outer_skip_rows;
2845  double inner_skip_rows = workspace->inner_skip_rows;
2846  Cost cpu_per_tuple,
2847  bare_inner_cost,
2848  mat_inner_cost;
2849  QualCost merge_qual_cost;
2850  QualCost qp_qual_cost;
2851  double mergejointuples,
2852  rescannedtuples;
2853  double rescanratio;
2854 
2855  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2856  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2857  inner_path_rows = 1;
2858 
2859  /* Mark the path with the correct row estimate */
2860  if (path->jpath.path.param_info)
2861  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2862  else
2863  path->jpath.path.rows = path->jpath.path.parent->rows;
2864 
2865  /* For partial paths, scale row estimate. */
2866  if (path->jpath.path.parallel_workers > 0)
2867  {
2868  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2869 
2870  path->jpath.path.rows =
2871  clamp_row_est(path->jpath.path.rows / parallel_divisor);
2872  }
2873 
2874  /*
2875  * We could include disable_cost in the preliminary estimate, but that
2876  * would amount to optimizing for the case where the join method is
2877  * disabled, which doesn't seem like the way to bet.
2878  */
2879  if (!enable_mergejoin)
2880  startup_cost += disable_cost;
2881 
2882  /*
2883  * Compute cost of the mergequals and qpquals (other restriction clauses)
2884  * separately.
2885  */
2886  cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2887  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2888  qp_qual_cost.startup -= merge_qual_cost.startup;
2889  qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2890 
2891  /*
2892  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2893  * executor will stop scanning for matches after the first match. When
2894  * all the joinclauses are merge clauses, this means we don't ever need to
2895  * back up the merge, and so we can skip mark/restore overhead.
2896  */
2897  if ((path->jpath.jointype == JOIN_SEMI ||
2898  path->jpath.jointype == JOIN_ANTI ||
2899  extra->inner_unique) &&
2902  path->skip_mark_restore = true;
2903  else
2904  path->skip_mark_restore = false;
2905 
2906  /*
2907  * Get approx # tuples passing the mergequals. We use approx_tuple_count
2908  * here because we need an estimate done with JOIN_INNER semantics.
2909  */
2910  mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2911 
2912  /*
2913  * When there are equal merge keys in the outer relation, the mergejoin
2914  * must rescan any matching tuples in the inner relation. This means
2915  * re-fetching inner tuples; we have to estimate how often that happens.
2916  *
2917  * For regular inner and outer joins, the number of re-fetches can be
2918  * estimated approximately as size of merge join output minus size of
2919  * inner relation. Assume that the distinct key values are 1, 2, ..., and
2920  * denote the number of values of each key in the outer relation as m1,
2921  * m2, ...; in the inner relation, n1, n2, ... Then we have
2922  *
2923  * size of join = m1 * n1 + m2 * n2 + ...
2924  *
2925  * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2926  * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2927  * relation
2928  *
2929  * This equation works correctly for outer tuples having no inner match
2930  * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2931  * are effectively subtracting those from the number of rescanned tuples,
2932  * when we should not. Can we do better without expensive selectivity
2933  * computations?
2934  *
2935  * The whole issue is moot if we are working from a unique-ified outer
2936  * input, or if we know we don't need to mark/restore at all.
2937  */
2938  if (IsA(outer_path, UniquePath) ||path->skip_mark_restore)
2939  rescannedtuples = 0;
2940  else
2941  {
2942  rescannedtuples = mergejointuples - inner_path_rows;
2943  /* Must clamp because of possible underestimate */
2944  if (rescannedtuples < 0)
2945  rescannedtuples = 0;
2946  }
2947  /* We'll inflate various costs this much to account for rescanning */
2948  rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2949 
2950  /*
2951  * Decide whether we want to materialize the inner input to shield it from
2952  * mark/restore and performing re-fetches. Our cost model for regular
2953  * re-fetches is that a re-fetch costs the same as an original fetch,
2954  * which is probably an overestimate; but on the other hand we ignore the
2955  * bookkeeping costs of mark/restore. Not clear if it's worth developing
2956  * a more refined model. So we just need to inflate the inner run cost by
2957  * rescanratio.
2958  */
2959  bare_inner_cost = inner_run_cost * rescanratio;
2960 
2961  /*
2962  * When we interpose a Material node the re-fetch cost is assumed to be
2963  * just cpu_operator_cost per tuple, independently of the underlying
2964  * plan's cost; and we charge an extra cpu_operator_cost per original
2965  * fetch as well. Note that we're assuming the materialize node will
2966  * never spill to disk, since it only has to remember tuples back to the
2967  * last mark. (If there are a huge number of duplicates, our other cost
2968  * factors will make the path so expensive that it probably won't get
2969  * chosen anyway.) So we don't use cost_rescan here.
2970  *
2971  * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2972  * of the generated Material node.
2973  */
2974  mat_inner_cost = inner_run_cost +
2975  cpu_operator_cost * inner_path_rows * rescanratio;
2976 
2977  /*
2978  * If we don't need mark/restore at all, we don't need materialization.
2979  */
2980  if (path->skip_mark_restore)
2981  path->materialize_inner = false;
2982 
2983  /*
2984  * Prefer materializing if it looks cheaper, unless the user has asked to
2985  * suppress materialization.
2986  */
2987  else if (enable_material && mat_inner_cost < bare_inner_cost)
2988  path->materialize_inner = true;
2989 
2990  /*
2991  * Even if materializing doesn't look cheaper, we *must* do it if the
2992  * inner path is to be used directly (without sorting) and it doesn't
2993  * support mark/restore.
2994  *
2995  * Since the inner side must be ordered, and only Sorts and IndexScans can
2996  * create order to begin with, and they both support mark/restore, you
2997  * might think there's no problem --- but you'd be wrong. Nestloop and
2998  * merge joins can *preserve* the order of their inputs, so they can be
2999  * selected as the input of a mergejoin, and they don't support
3000  * mark/restore at present.
3001  *
3002  * We don't test the value of enable_material here, because
3003  * materialization is required for correctness in this case, and turning
3004  * it off does not entitle us to deliver an invalid plan.
3005  */
3006  else if (innersortkeys == NIL &&
3007  !ExecSupportsMarkRestore(inner_path))
3008  path->materialize_inner = true;
3009 
3010  /*
3011  * Also, force materializing if the inner path is to be sorted and the
3012  * sort is expected to spill to disk. This is because the final merge
3013  * pass can be done on-the-fly if it doesn't have to support mark/restore.
3014  * We don't try to adjust the cost estimates for this consideration,
3015  * though.
3016  *
3017  * Since materialization is a performance optimization in this case,
3018  * rather than necessary for correctness, we skip it if enable_material is
3019  * off.
3020  */
3021  else if (enable_material && innersortkeys != NIL &&
3022  relation_byte_size(inner_path_rows,
3023  inner_path->pathtarget->width) >
3024  (work_mem * 1024L))
3025  path->materialize_inner = true;
3026  else
3027  path->materialize_inner = false;
3028 
3029  /* Charge the right incremental cost for the chosen case */
3030  if (path->materialize_inner)
3031  run_cost += mat_inner_cost;
3032  else
3033  run_cost += bare_inner_cost;
3034 
3035  /* CPU costs */
3036 
3037  /*
3038  * The number of tuple comparisons needed is approximately number of outer
3039  * rows plus number of inner rows plus number of rescanned tuples (can we
3040  * refine this?). At each one, we need to evaluate the mergejoin quals.
3041  */
3042  startup_cost += merge_qual_cost.startup;
3043  startup_cost += merge_qual_cost.per_tuple *
3044  (outer_skip_rows + inner_skip_rows * rescanratio);
3045  run_cost += merge_qual_cost.per_tuple *
3046  ((outer_rows - outer_skip_rows) +
3047  (inner_rows - inner_skip_rows) * rescanratio);
3048 
3049  /*
3050  * For each tuple that gets through the mergejoin proper, we charge
3051  * cpu_tuple_cost plus the cost of evaluating additional restriction
3052  * clauses that are to be applied at the join. (This is pessimistic since
3053  * not all of the quals may get evaluated at each tuple.)
3054  *
3055  * Note: we could adjust for SEMI/ANTI joins skipping some qual
3056  * evaluations here, but it's probably not worth the trouble.
3057  */
3058  startup_cost += qp_qual_cost.startup;
3059  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3060  run_cost += cpu_per_tuple * mergejointuples;
3061 
3062  /* tlist eval costs are paid per output row, not per tuple scanned */
3063  startup_cost += path->jpath.path.pathtarget->cost.startup;
3064  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3065 
3066  path->jpath.path.startup_cost = startup_cost;
3067  path->jpath.path.total_cost = startup_cost + run_cost;
3068 }
#define NIL
Definition: pg_list.h:69
List * path_mergeclauses
Definition: relation.h:1482
#define IsA(nodeptr, _type_)
Definition: nodes.h:568
PathTarget * pathtarget
Definition: relation.h:1079
bool ExecSupportsMarkRestore(Path *pathnode)
Definition: execAmi.c:405
bool materialize_inner
Definition: relation.h:1486
Path * innerjoinpath
Definition: relation.h:1427
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition: costsize.c:4239
int parallel_workers
Definition: relation.h:1085
ParamPathInfo * param_info
Definition: relation.h:1081
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
bool skip_mark_restore
Definition: relation.h:1485
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
List * joinrestrictinfo
Definition: relation.h:1429
RelOptInfo * parent
Definition: relation.h:1078
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
double cpu_operator_cost
Definition: costsize.c:115
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5329
Path * outerjoinpath
Definition: relation.h:1426
int work_mem
Definition: globals.c:122
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
double outer_skip_rows
Definition: relation.h:2411
bool enable_mergejoin
Definition: costsize.c:134
Path path
Definition: relation.h:1419
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
static int list_length(const List *l)
Definition: pg_list.h:89
List * innersortkeys
Definition: relation.h:1484
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
JoinType jointype
Definition: relation.h:1421
JoinPath jpath
Definition: relation.h:1481
double inner_skip_rows
Definition: relation.h:2412
double clamp_row_est(double nrows)
Definition: costsize.c:188
Definition: pg_list.h:45
double Cost
Definition: nodes.h:648
bool enable_material
Definition: costsize.c:133

◆ final_cost_nestloop()

void final_cost_nestloop ( PlannerInfo root,
NestPath path,
JoinCostWorkspace workspace,
JoinPathExtraData extra 
)

Definition at line 2393 of file costsize.c.

References clamp_row_est(), PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, disable_cost, enable_nestloop, get_parallel_divisor(), has_indexed_join_quals(), JoinCostWorkspace::inner_rescan_run_cost, JoinCostWorkspace::inner_run_cost, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JOIN_ANTI, JOIN_SEMI, JoinPath::joinrestrictinfo, JoinPath::jointype, SemiAntiJoinFactors::match_count, SemiAntiJoinFactors::outer_match_frac, JoinPath::outerjoinpath, Path::parallel_workers, Path::param_info, Path::parent, JoinPath::path, Path::pathtarget, QualCost::per_tuple, ParamPathInfo::ppi_rows, rint(), RelOptInfo::rows, Path::rows, JoinCostWorkspace::run_cost, JoinPathExtraData::semifactors, QualCost::startup, Path::startup_cost, JoinCostWorkspace::startup_cost, and Path::total_cost.

Referenced by create_nestloop_path().

2396 {
2397  Path *outer_path = path->outerjoinpath;
2398  Path *inner_path = path->innerjoinpath;
2399  double outer_path_rows = outer_path->rows;
2400  double inner_path_rows = inner_path->rows;
2401  Cost startup_cost = workspace->startup_cost;
2402  Cost run_cost = workspace->run_cost;
2403  Cost cpu_per_tuple;
2404  QualCost restrict_qual_cost;
2405  double ntuples;
2406 
2407  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2408  if (outer_path_rows <= 0 || isnan(outer_path_rows))
2409  outer_path_rows = 1;
2410  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2411  inner_path_rows = 1;
2412 
2413  /* Mark the path with the correct row estimate */
2414  if (path->path.param_info)
2415  path->path.rows = path->path.param_info->ppi_rows;
2416  else
2417  path->path.rows = path->path.parent->rows;
2418 
2419  /* For partial paths, scale row estimate. */
2420  if (path->path.parallel_workers > 0)
2421  {
2422  double parallel_divisor = get_parallel_divisor(&path->path);
2423 
2424  path->path.rows =
2425  clamp_row_est(path->path.rows / parallel_divisor);
2426  }
2427 
2428  /*
2429  * We could include disable_cost in the preliminary estimate, but that
2430  * would amount to optimizing for the case where the join method is
2431  * disabled, which doesn't seem like the way to bet.
2432  */
2433  if (!enable_nestloop)
2434  startup_cost += disable_cost;
2435 
2436  /* cost of inner-relation source data (we already dealt with outer rel) */
2437 
2438  if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI ||
2439  extra->inner_unique)
2440  {
2441  /*
2442  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2443  * executor will stop after the first match.
2444  */
2445  Cost inner_run_cost = workspace->inner_run_cost;
2446  Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2447  double outer_matched_rows;
2448  double outer_unmatched_rows;
2449  Selectivity inner_scan_frac;
2450 
2451  /*
2452  * For an outer-rel row that has at least one match, we can expect the
2453  * inner scan to stop after a fraction 1/(match_count+1) of the inner
2454  * rows, if the matches are evenly distributed. Since they probably
2455  * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2456  * that fraction. (If we used a larger fuzz factor, we'd have to
2457  * clamp inner_scan_frac to at most 1.0; but since match_count is at
2458  * least 1, no such clamp is needed now.)
2459  */
2460  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2461  outer_unmatched_rows = outer_path_rows - outer_matched_rows;
2462  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
2463 
2464  /*
2465  * Compute number of tuples processed (not number emitted!). First,
2466  * account for successfully-matched outer rows.
2467  */
2468  ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2469 
2470  /*
2471  * Now we need to estimate the actual costs of scanning the inner
2472  * relation, which may be quite a bit less than N times inner_run_cost
2473  * due to early scan stops. We consider two cases. If the inner path
2474  * is an indexscan using all the joinquals as indexquals, then an
2475  * unmatched outer row results in an indexscan returning no rows,
2476  * which is probably quite cheap. Otherwise, the executor will have
2477  * to scan the whole inner rel for an unmatched row; not so cheap.
2478  */
2479  if (has_indexed_join_quals(path))
2480  {
2481  /*
2482  * Successfully-matched outer rows will only require scanning
2483  * inner_scan_frac of the inner relation. In this case, we don't
2484  * need to charge the full inner_run_cost even when that's more
2485  * than inner_rescan_run_cost, because we can assume that none of
2486  * the inner scans ever scan the whole inner relation. So it's
2487  * okay to assume that all the inner scan executions can be
2488  * fractions of the full cost, even if materialization is reducing
2489  * the rescan cost. At this writing, it's impossible to get here
2490  * for a materialized inner scan, so inner_run_cost and
2491  * inner_rescan_run_cost will be the same anyway; but just in
2492  * case, use inner_run_cost for the first matched tuple and
2493  * inner_rescan_run_cost for additional ones.
2494  */
2495  run_cost += inner_run_cost * inner_scan_frac;
2496  if (outer_matched_rows > 1)
2497  run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2498 
2499  /*
2500  * Add the cost of inner-scan executions for unmatched outer rows.
2501  * We estimate this as the same cost as returning the first tuple
2502  * of a nonempty scan. We consider that these are all rescans,
2503  * since we used inner_run_cost once already.
2504  */
2505  run_cost += outer_unmatched_rows *
2506  inner_rescan_run_cost / inner_path_rows;
2507 
2508  /*
2509  * We won't be evaluating any quals at all for unmatched rows, so
2510  * don't add them to ntuples.
2511  */
2512  }
2513  else
2514  {
2515  /*
2516  * Here, a complicating factor is that rescans may be cheaper than
2517  * first scans. If we never scan all the way to the end of the
2518  * inner rel, it might be (depending on the plan type) that we'd
2519  * never pay the whole inner first-scan run cost. However it is
2520  * difficult to estimate whether that will happen (and it could
2521  * not happen if there are any unmatched outer rows!), so be
2522  * conservative and always charge the whole first-scan cost once.
2523  * We consider this charge to correspond to the first unmatched
2524  * outer row, unless there isn't one in our estimate, in which
2525  * case blame it on the first matched row.
2526  */
2527 
2528  /* First, count all unmatched join tuples as being processed */
2529  ntuples += outer_unmatched_rows * inner_path_rows;
2530 
2531  /* Now add the forced full scan, and decrement appropriate count */
2532  run_cost += inner_run_cost;
2533  if (outer_unmatched_rows >= 1)
2534  outer_unmatched_rows -= 1;
2535  else
2536  outer_matched_rows -= 1;
2537 
2538  /* Add inner run cost for additional outer tuples having matches */
2539  if (outer_matched_rows > 0)
2540  run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
2541 
2542  /* Add inner run cost for additional unmatched outer tuples */
2543  if (outer_unmatched_rows > 0)
2544  run_cost += outer_unmatched_rows * inner_rescan_run_cost;
2545  }
2546  }
2547  else
2548  {
2549  /* Normal-case source costs were included in preliminary estimate */
2550 
2551  /* Compute number of tuples processed (not number emitted!) */
2552  ntuples = outer_path_rows * inner_path_rows;
2553  }
2554 
2555  /* CPU costs */
2556  cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2557  startup_cost += restrict_qual_cost.startup;
2558  cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2559  run_cost += cpu_per_tuple * ntuples;
2560 
2561  /* tlist eval costs are paid per output row, not per tuple scanned */
2562  startup_cost += path->path.pathtarget->cost.startup;
2563  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2564 
2565  path->path.startup_cost = startup_cost;
2566  path->path.total_cost = startup_cost + run_cost;
2567 }
PathTarget * pathtarget
Definition: relation.h:1079
SemiAntiJoinFactors semifactors
Definition: relation.h:2314
bool enable_nestloop
Definition: costsize.c:132
Selectivity outer_match_frac
Definition: relation.h:2291
Path * innerjoinpath
Definition: relation.h:1427
int parallel_workers
Definition: relation.h:1085
ParamPathInfo * param_info
Definition: relation.h:1081
double Selectivity
Definition: nodes.h:647
Cost inner_rescan_run_cost
Definition: relation.h:2406
Cost startup
Definition: relation.h:46
Cost per_tuple
Definition: relation.h:47
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3716
Cost startup_cost
Definition: relation.h:1089
Cost disable_cost
Definition: costsize.c:121
List * joinrestrictinfo
Definition: relation.h:1429
RelOptInfo * parent
Definition: relation.h:1078
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5350
double rint(double x)
Definition: rint.c:22
Path * outerjoinpath
Definition: relation.h:1426
double rows
Definition: relation.h:615
Cost total_cost
Definition: relation.h:1090
Path path
Definition: relation.h:1419
static bool has_indexed_join_quals(NestPath *joinpath)
Definition: costsize.c:4146
double rows
Definition: relation.h:1088
QualCost cost
Definition: relation.h:1010
double cpu_tuple_cost
Definition: costsize.c:113
double ppi_rows
Definition: relation.h:1038
Selectivity match_count
Definition: relation.h:2292
JoinType jointype
Definition: relation.h:1421
double clamp_row_est(double nrows)
Definition: costsize.c:188
double Cost
Definition: nodes.h:648

◆ get_foreign_key_join_selectivity()