PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
costsize.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  * Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  * seq_page_cost Cost of a sequential page fetch
10  * random_page_cost Cost of a non-sequential page fetch
11  * cpu_tuple_cost Cost of typical CPU time to process a tuple
12  * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13  * cpu_operator_cost Cost of CPU time to execute an operator or function
14  * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
15  * parallel_setup_cost Cost of setting up shared memory for parallelism
16  *
17  * We expect that the kernel will typically do some amount of read-ahead
18  * optimization; this in conjunction with seek costs means that seq_page_cost
19  * is normally considerably less than random_page_cost. (However, if the
20  * database is fully cached in RAM, it is reasonable to set them equal.)
21  *
22  * We also use a rough estimate "effective_cache_size" of the number of
23  * disk pages in Postgres + OS-level disk cache. (We can't simply use
24  * NBuffers for this purpose because that would ignore the effects of
25  * the kernel's disk cache.)
26  *
27  * Obviously, taking constants for these values is an oversimplification,
28  * but it's tough enough to get any useful estimates even at this level of
29  * detail. Note that all of these parameters are user-settable, in case
30  * the default values are drastically off for a particular platform.
31  *
32  * seq_page_cost and random_page_cost can also be overridden for an individual
33  * tablespace, in case some data is on a fast disk and other data is on a slow
34  * disk. Per-tablespace overrides never apply to temporary work files such as
35  * an external sort or a materialize node that overflows work_mem.
36  *
37  * We compute two separate costs for each path:
38  * total_cost: total estimated cost to fetch all tuples
39  * startup_cost: cost that is expended before first tuple is fetched
40  * In some scenarios, such as when there is a LIMIT or we are implementing
41  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42  * path's result. A caller can estimate the cost of fetching a partial
43  * result by interpolating between startup_cost and total_cost. In detail:
44  * actual_cost = startup_cost +
45  * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46  * Note that a base relation's rows count (and, by extension, plan_rows for
47  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48  * that this equation works properly. (Note: while path->rows is never zero
49  * for ordinary relations, it is zero for paths for provably-empty relations,
50  * so beware of division-by-zero.) The LIMIT is applied as a top-level
51  * plan node.
52  *
53  * For largely historical reasons, most of the routines in this module use
54  * the passed result Path only to store their results (rows, startup_cost and
55  * total_cost) into. All the input data they need is passed as separate
56  * parameters, even though much of it could be extracted from the Path.
57  * An exception is made for the cost_XXXjoin() routines, which expect all
58  * the other fields of the passed XXXPath to be filled in, and similarly
59  * cost_index() assumes the passed IndexPath is valid except for its output
60  * values.
61  *
62  *
63  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
64  * Portions Copyright (c) 1994, Regents of the University of California
65  *
66  * IDENTIFICATION
67  * src/backend/optimizer/path/costsize.c
68  *
69  *-------------------------------------------------------------------------
70  */
71 
72 #include "postgres.h"
73 
74 #ifdef _MSC_VER
75 #include <float.h> /* for _isnan */
76 #endif
77 #include <math.h>
78 
79 #include "access/amapi.h"
80 #include "access/htup_details.h"
81 #include "access/tsmapi.h"
82 #include "executor/executor.h"
83 #include "executor/nodeHash.h"
84 #include "miscadmin.h"
85 #include "nodes/nodeFuncs.h"
86 #include "optimizer/clauses.h"
87 #include "optimizer/cost.h"
88 #include "optimizer/pathnode.h"
89 #include "optimizer/paths.h"
90 #include "optimizer/placeholder.h"
91 #include "optimizer/plancat.h"
92 #include "optimizer/planmain.h"
93 #include "optimizer/restrictinfo.h"
94 #include "parser/parsetree.h"
95 #include "utils/lsyscache.h"
96 #include "utils/selfuncs.h"
97 #include "utils/spccache.h"
98 #include "utils/tuplesort.h"
99 
100 
101 #define LOG2(x) (log(x) / 0.693147180559945)
102 
103 
111 
113 
115 
117 
118 bool enable_seqscan = true;
119 bool enable_indexscan = true;
121 bool enable_bitmapscan = true;
122 bool enable_tidscan = true;
123 bool enable_sort = true;
124 bool enable_hashagg = true;
125 bool enable_nestloop = true;
126 bool enable_material = true;
127 bool enable_mergejoin = true;
128 bool enable_hashjoin = true;
129 bool enable_gathermerge = true;
130 
131 typedef struct
132 {
136 
137 static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
139  RestrictInfo *rinfo,
140  PathKey *pathkey);
141 static void cost_rescan(PlannerInfo *root, Path *path,
142  Cost *rescan_startup_cost, Cost *rescan_total_cost);
143 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
144 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
145  ParamPathInfo *param_info,
146  QualCost *qpqual_cost);
147 static bool has_indexed_join_quals(NestPath *joinpath);
148 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
149  List *quals);
150 static double calc_joinrel_size_estimate(PlannerInfo *root,
151  RelOptInfo *outer_rel,
152  RelOptInfo *inner_rel,
153  double outer_rows,
154  double inner_rows,
155  SpecialJoinInfo *sjinfo,
156  List *restrictlist);
158  Relids outer_relids,
159  Relids inner_relids,
160  SpecialJoinInfo *sjinfo,
161  List **restrictlist);
162 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
163 static double relation_byte_size(double tuples, int width);
164 static double page_size(double tuples, int width);
165 static double get_parallel_divisor(Path *path);
166 
167 
168 /*
169  * clamp_row_est
170  * Force a row-count estimate to a sane value.
171  */
172 double
173 clamp_row_est(double nrows)
174 {
175  /*
176  * Force estimate to be at least one row, to make explain output look
177  * better and to avoid possible divide-by-zero when interpolating costs.
178  * Make it an integer, too.
179  */
180  if (nrows <= 1.0)
181  nrows = 1.0;
182  else
183  nrows = rint(nrows);
184 
185  return nrows;
186 }
187 
188 
189 /*
190  * cost_seqscan
191  * Determines and returns the cost of scanning a relation sequentially.
192  *
193  * 'baserel' is the relation to be scanned
194  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
195  */
196 void
198  RelOptInfo *baserel, ParamPathInfo *param_info)
199 {
200  Cost startup_cost = 0;
201  Cost cpu_run_cost;
202  Cost disk_run_cost;
203  double spc_seq_page_cost;
204  QualCost qpqual_cost;
205  Cost cpu_per_tuple;
206 
207  /* Should only be applied to base relations */
208  Assert(baserel->relid > 0);
209  Assert(baserel->rtekind == RTE_RELATION);
210 
211  /* Mark the path with the correct row estimate */
212  if (param_info)
213  path->rows = param_info->ppi_rows;
214  else
215  path->rows = baserel->rows;
216 
217  if (!enable_seqscan)
218  startup_cost += disable_cost;
219 
220  /* fetch estimated page cost for tablespace containing table */
222  NULL,
223  &spc_seq_page_cost);
224 
225  /*
226  * disk costs
227  */
228  disk_run_cost = spc_seq_page_cost * baserel->pages;
229 
230  /* CPU costs */
231  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
232 
233  startup_cost += qpqual_cost.startup;
234  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
235  cpu_run_cost = cpu_per_tuple * baserel->tuples;
236  /* tlist eval costs are paid per output row, not per tuple scanned */
237  startup_cost += path->pathtarget->cost.startup;
238  cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
239 
240  /* Adjust costing for parallelism, if used. */
241  if (path->parallel_workers > 0)
242  {
243  double parallel_divisor = get_parallel_divisor(path);
244 
245  /* The CPU cost is divided among all the workers. */
246  cpu_run_cost /= parallel_divisor;
247 
248  /*
249  * It may be possible to amortize some of the I/O cost, but probably
250  * not very much, because most operating systems already do aggressive
251  * prefetching. For now, we assume that the disk run cost can't be
252  * amortized at all.
253  */
254 
255  /*
256  * In the case of a parallel plan, the row count needs to represent
257  * the number of tuples processed per worker.
258  */
259  path->rows = clamp_row_est(path->rows / parallel_divisor);
260  }
261 
262  path->startup_cost = startup_cost;
263  path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
264 }
265 
266 /*
267  * cost_samplescan
268  * Determines and returns the cost of scanning a relation using sampling.
269  *
270  * 'baserel' is the relation to be scanned
271  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
272  */
273 void
275  RelOptInfo *baserel, ParamPathInfo *param_info)
276 {
277  Cost startup_cost = 0;
278  Cost run_cost = 0;
279  RangeTblEntry *rte;
280  TableSampleClause *tsc;
281  TsmRoutine *tsm;
282  double spc_seq_page_cost,
283  spc_random_page_cost,
284  spc_page_cost;
285  QualCost qpqual_cost;
286  Cost cpu_per_tuple;
287 
288  /* Should only be applied to base relations with tablesample clauses */
289  Assert(baserel->relid > 0);
290  rte = planner_rt_fetch(baserel->relid, root);
291  Assert(rte->rtekind == RTE_RELATION);
292  tsc = rte->tablesample;
293  Assert(tsc != NULL);
294  tsm = GetTsmRoutine(tsc->tsmhandler);
295 
296  /* Mark the path with the correct row estimate */
297  if (param_info)
298  path->rows = param_info->ppi_rows;
299  else
300  path->rows = baserel->rows;
301 
302  /* fetch estimated page cost for tablespace containing table */
304  &spc_random_page_cost,
305  &spc_seq_page_cost);
306 
307  /* if NextSampleBlock is used, assume random access, else sequential */
308  spc_page_cost = (tsm->NextSampleBlock != NULL) ?
309  spc_random_page_cost : spc_seq_page_cost;
310 
311  /*
312  * disk costs (recall that baserel->pages has already been set to the
313  * number of pages the sampling method will visit)
314  */
315  run_cost += spc_page_cost * baserel->pages;
316 
317  /*
318  * CPU costs (recall that baserel->tuples has already been set to the
319  * number of tuples the sampling method will select). Note that we ignore
320  * execution cost of the TABLESAMPLE parameter expressions; they will be
321  * evaluated only once per scan, and in most usages they'll likely be
322  * simple constants anyway. We also don't charge anything for the
323  * calculations the sampling method might do internally.
324  */
325  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
326 
327  startup_cost += qpqual_cost.startup;
328  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
329  run_cost += cpu_per_tuple * baserel->tuples;
330  /* tlist eval costs are paid per output row, not per tuple scanned */
331  startup_cost += path->pathtarget->cost.startup;
332  run_cost += path->pathtarget->cost.per_tuple * path->rows;
333 
334  path->startup_cost = startup_cost;
335  path->total_cost = startup_cost + run_cost;
336 }
337 
338 /*
339  * cost_gather
340  * Determines and returns the cost of gather path.
341  *
342  * 'rel' is the relation to be operated upon
343  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
344  * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
345  * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
346  * correspond to any particular RelOptInfo.
347  */
348 void
350  RelOptInfo *rel, ParamPathInfo *param_info,
351  double *rows)
352 {
353  Cost startup_cost = 0;
354  Cost run_cost = 0;
355 
356  /* Mark the path with the correct row estimate */
357  if (rows)
358  path->path.rows = *rows;
359  else if (param_info)
360  path->path.rows = param_info->ppi_rows;
361  else
362  path->path.rows = rel->rows;
363 
364  startup_cost = path->subpath->startup_cost;
365 
366  run_cost = path->subpath->total_cost - path->subpath->startup_cost;
367 
368  /* Parallel setup and communication cost. */
369  startup_cost += parallel_setup_cost;
370  run_cost += parallel_tuple_cost * path->path.rows;
371 
372  path->path.startup_cost = startup_cost;
373  path->path.total_cost = (startup_cost + run_cost);
374 }
375 
376 /*
377  * cost_gather_merge
378  * Determines and returns the cost of gather merge path.
379  *
380  * GatherMerge merges several pre-sorted input streams, using a heap that at
381  * any given instant holds the next tuple from each stream. If there are N
382  * streams, we need about N*log2(N) tuple comparisons to construct the heap at
383  * startup, and then for each output tuple, about log2(N) comparisons to
384  * replace the top heap entry with the next tuple from the same stream.
385  */
386 void
388  RelOptInfo *rel, ParamPathInfo *param_info,
389  Cost input_startup_cost, Cost input_total_cost,
390  double *rows)
391 {
392  Cost startup_cost = 0;
393  Cost run_cost = 0;
394  Cost comparison_cost;
395  double N;
396  double logN;
397 
398  /* Mark the path with the correct row estimate */
399  if (rows)
400  path->path.rows = *rows;
401  else if (param_info)
402  path->path.rows = param_info->ppi_rows;
403  else
404  path->path.rows = rel->rows;
405 
406  if (!enable_gathermerge)
407  startup_cost += disable_cost;
408 
409  /*
410  * Add one to the number of workers to account for the leader. This might
411  * be overgenerous since the leader will do less work than other workers
412  * in typical cases, but we'll go with it for now.
413  */
414  Assert(path->num_workers > 0);
415  N = (double) path->num_workers + 1;
416  logN = LOG2(N);
417 
418  /* Assumed cost per tuple comparison */
419  comparison_cost = 2.0 * cpu_operator_cost;
420 
421  /* Heap creation cost */
422  startup_cost += comparison_cost * N * logN;
423 
424  /* Per-tuple heap maintenance cost */
425  run_cost += path->path.rows * comparison_cost * logN;
426 
427  /* small cost for heap management, like cost_merge_append */
428  run_cost += cpu_operator_cost * path->path.rows;
429 
430  /*
431  * Parallel setup and communication cost. Since Gather Merge, unlike
432  * Gather, requires us to block until a tuple is available from every
433  * worker, we bump the IPC cost up a little bit as compared with Gather.
434  * For lack of a better idea, charge an extra 5%.
435  */
436  startup_cost += parallel_setup_cost;
437  run_cost += parallel_tuple_cost * path->path.rows * 1.05;
438 
439  path->path.startup_cost = startup_cost + input_startup_cost;
440  path->path.total_cost = (startup_cost + run_cost + input_total_cost);
441 }
442 
443 /*
444  * cost_index
445  * Determines and returns the cost of scanning a relation using an index.
446  *
447  * 'path' describes the indexscan under consideration, and is complete
448  * except for the fields to be set by this routine
449  * 'loop_count' is the number of repetitions of the indexscan to factor into
450  * estimates of caching behavior
451  *
452  * In addition to rows, startup_cost and total_cost, cost_index() sets the
453  * path's indextotalcost and indexselectivity fields. These values will be
454  * needed if the IndexPath is used in a BitmapIndexScan.
455  *
456  * NOTE: path->indexquals must contain only clauses usable as index
457  * restrictions. Any additional quals evaluated as qpquals may reduce the
458  * number of returned tuples, but they won't reduce the number of tuples
459  * we have to fetch from the table, so they don't reduce the scan cost.
460  */
461 void
462 cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
463  bool partial_path)
464 {
465  IndexOptInfo *index = path->indexinfo;
466  RelOptInfo *baserel = index->rel;
467  bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
468  amcostestimate_function amcostestimate;
469  List *qpquals;
470  Cost startup_cost = 0;
471  Cost run_cost = 0;
472  Cost cpu_run_cost = 0;
473  Cost indexStartupCost;
474  Cost indexTotalCost;
475  Selectivity indexSelectivity;
476  double indexCorrelation,
477  csquared;
478  double spc_seq_page_cost,
479  spc_random_page_cost;
480  Cost min_IO_cost,
481  max_IO_cost;
482  QualCost qpqual_cost;
483  Cost cpu_per_tuple;
484  double tuples_fetched;
485  double pages_fetched;
486  double rand_heap_pages;
487  double index_pages;
488 
489  /* Should only be applied to base relations */
490  Assert(IsA(baserel, RelOptInfo) &&
491  IsA(index, IndexOptInfo));
492  Assert(baserel->relid > 0);
493  Assert(baserel->rtekind == RTE_RELATION);
494 
495  /*
496  * Mark the path with the correct row estimate, and identify which quals
497  * will need to be enforced as qpquals. We need not check any quals that
498  * are implied by the index's predicate, so we can use indrestrictinfo not
499  * baserestrictinfo as the list of relevant restriction clauses for the
500  * rel.
501  */
502  if (path->path.param_info)
503  {
504  path->path.rows = path->path.param_info->ppi_rows;
505  /* qpquals come from the rel's restriction clauses and ppi_clauses */
506  qpquals = list_concat(
508  path->indexquals),
510  path->indexquals));
511  }
512  else
513  {
514  path->path.rows = baserel->rows;
515  /* qpquals come from just the rel's restriction clauses */
517  path->indexquals);
518  }
519 
520  if (!enable_indexscan)
521  startup_cost += disable_cost;
522  /* we don't need to check enable_indexonlyscan; indxpath.c does that */
523 
524  /*
525  * Call index-access-method-specific code to estimate the processing cost
526  * for scanning the index, as well as the selectivity of the index (ie,
527  * the fraction of main-table tuples we will have to retrieve) and its
528  * correlation to the main-table tuple order. We need a cast here because
529  * relation.h uses a weak function type to avoid including amapi.h.
530  */
531  amcostestimate = (amcostestimate_function) index->amcostestimate;
532  amcostestimate(root, path, loop_count,
533  &indexStartupCost, &indexTotalCost,
534  &indexSelectivity, &indexCorrelation,
535  &index_pages);
536 
537  /*
538  * Save amcostestimate's results for possible use in bitmap scan planning.
539  * We don't bother to save indexStartupCost or indexCorrelation, because a
540  * bitmap scan doesn't care about either.
541  */
542  path->indextotalcost = indexTotalCost;
543  path->indexselectivity = indexSelectivity;
544 
545  /* all costs for touching index itself included here */
546  startup_cost += indexStartupCost;
547  run_cost += indexTotalCost - indexStartupCost;
548 
549  /* estimate number of main-table tuples fetched */
550  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
551 
552  /* fetch estimated page costs for tablespace containing table */
554  &spc_random_page_cost,
555  &spc_seq_page_cost);
556 
557  /*----------
558  * Estimate number of main-table pages fetched, and compute I/O cost.
559  *
560  * When the index ordering is uncorrelated with the table ordering,
561  * we use an approximation proposed by Mackert and Lohman (see
562  * index_pages_fetched() for details) to compute the number of pages
563  * fetched, and then charge spc_random_page_cost per page fetched.
564  *
565  * When the index ordering is exactly correlated with the table ordering
566  * (just after a CLUSTER, for example), the number of pages fetched should
567  * be exactly selectivity * table_size. What's more, all but the first
568  * will be sequential fetches, not the random fetches that occur in the
569  * uncorrelated case. So if the number of pages is more than 1, we
570  * ought to charge
571  * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
572  * For partially-correlated indexes, we ought to charge somewhere between
573  * these two estimates. We currently interpolate linearly between the
574  * estimates based on the correlation squared (XXX is that appropriate?).
575  *
576  * If it's an index-only scan, then we will not need to fetch any heap
577  * pages for which the visibility map shows all tuples are visible.
578  * Hence, reduce the estimated number of heap fetches accordingly.
579  * We use the measured fraction of the entire heap that is all-visible,
580  * which might not be particularly relevant to the subset of the heap
581  * that this query will fetch; but it's not clear how to do better.
582  *----------
583  */
584  if (loop_count > 1)
585  {
586  /*
587  * For repeated indexscans, the appropriate estimate for the
588  * uncorrelated case is to scale up the number of tuples fetched in
589  * the Mackert and Lohman formula by the number of scans, so that we
590  * estimate the number of pages fetched by all the scans; then
591  * pro-rate the costs for one scan. In this case we assume all the
592  * fetches are random accesses.
593  */
594  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
595  baserel->pages,
596  (double) index->pages,
597  root);
598 
599  if (indexonly)
600  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
601 
602  rand_heap_pages = pages_fetched;
603 
604  max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
605 
606  /*
607  * In the perfectly correlated case, the number of pages touched by
608  * each scan is selectivity * table_size, and we can use the Mackert
609  * and Lohman formula at the page level to estimate how much work is
610  * saved by caching across scans. We still assume all the fetches are
611  * random, though, which is an overestimate that's hard to correct for
612  * without double-counting the cache effects. (But in most cases
613  * where such a plan is actually interesting, only one page would get
614  * fetched per scan anyway, so it shouldn't matter much.)
615  */
616  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
617 
618  pages_fetched = index_pages_fetched(pages_fetched * loop_count,
619  baserel->pages,
620  (double) index->pages,
621  root);
622 
623  if (indexonly)
624  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
625 
626  min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
627  }
628  else
629  {
630  /*
631  * Normal case: apply the Mackert and Lohman formula, and then
632  * interpolate between that and the correlation-derived result.
633  */
634  pages_fetched = index_pages_fetched(tuples_fetched,
635  baserel->pages,
636  (double) index->pages,
637  root);
638 
639  if (indexonly)
640  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
641 
642  rand_heap_pages = pages_fetched;
643 
644  /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
645  max_IO_cost = pages_fetched * spc_random_page_cost;
646 
647  /* min_IO_cost is for the perfectly correlated case (csquared=1) */
648  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
649 
650  if (indexonly)
651  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
652 
653  if (pages_fetched > 0)
654  {
655  min_IO_cost = spc_random_page_cost;
656  if (pages_fetched > 1)
657  min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
658  }
659  else
660  min_IO_cost = 0;
661  }
662 
663  if (partial_path)
664  {
665  /*
666  * For index only scans compute workers based on number of index pages
667  * fetched; the number of heap pages we fetch might be so small as to
668  * effectively rule out parallelism, which we don't want to do.
669  */
670  if (indexonly)
671  rand_heap_pages = -1;
672 
673  /*
674  * Estimate the number of parallel workers required to scan index. Use
675  * the number of heap pages computed considering heap fetches won't be
676  * sequential as for parallel scans the pages are accessed in random
677  * order.
678  */
680  rand_heap_pages, index_pages);
681 
682  /*
683  * Fall out if workers can't be assigned for parallel scan, because in
684  * such a case this path will be rejected. So there is no benefit in
685  * doing extra computation.
686  */
687  if (path->path.parallel_workers <= 0)
688  return;
689 
690  path->path.parallel_aware = true;
691  }
692 
693  /*
694  * Now interpolate based on estimated index order correlation to get total
695  * disk I/O cost for main table accesses.
696  */
697  csquared = indexCorrelation * indexCorrelation;
698 
699  run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
700 
701  /*
702  * Estimate CPU costs per tuple.
703  *
704  * What we want here is cpu_tuple_cost plus the evaluation costs of any
705  * qual clauses that we have to evaluate as qpquals.
706  */
707  cost_qual_eval(&qpqual_cost, qpquals, root);
708 
709  startup_cost += qpqual_cost.startup;
710  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
711 
712  cpu_run_cost += cpu_per_tuple * tuples_fetched;
713 
714  /* tlist eval costs are paid per output row, not per tuple scanned */
715  startup_cost += path->path.pathtarget->cost.startup;
716  cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
717 
718  /* Adjust costing for parallelism, if used. */
719  if (path->path.parallel_workers > 0)
720  {
721  double parallel_divisor = get_parallel_divisor(&path->path);
722 
723  path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
724 
725  /* The CPU cost is divided among all the workers. */
726  cpu_run_cost /= parallel_divisor;
727  }
728 
729  run_cost += cpu_run_cost;
730 
731  path->path.startup_cost = startup_cost;
732  path->path.total_cost = startup_cost + run_cost;
733 }
734 
735 /*
736  * extract_nonindex_conditions
737  *
738  * Given a list of quals to be enforced in an indexscan, extract the ones that
739  * will have to be applied as qpquals (ie, the index machinery won't handle
740  * them). The actual rules for this appear in create_indexscan_plan() in
741  * createplan.c, but the full rules are fairly expensive and we don't want to
742  * go to that much effort for index paths that don't get selected for the
743  * final plan. So we approximate it as quals that don't appear directly in
744  * indexquals and also are not redundant children of the same EquivalenceClass
745  * as some indexqual. This method neglects some infrequently-relevant
746  * considerations, specifically clauses that needn't be checked because they
747  * are implied by an indexqual. It does not seem worth the cycles to try to
748  * factor that in at this stage, even though createplan.c will take pains to
749  * remove such unnecessary clauses from the qpquals list if this path is
750  * selected for use.
751  */
752 static List *
753 extract_nonindex_conditions(List *qual_clauses, List *indexquals)
754 {
755  List *result = NIL;
756  ListCell *lc;
757 
758  foreach(lc, qual_clauses)
759  {
760  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
761 
762  if (rinfo->pseudoconstant)
763  continue; /* we may drop pseudoconstants here */
764  if (list_member_ptr(indexquals, rinfo))
765  continue; /* simple duplicate */
766  if (is_redundant_derived_clause(rinfo, indexquals))
767  continue; /* derived from same EquivalenceClass */
768  /* ... skip the predicate proof attempt createplan.c will try ... */
769  result = lappend(result, rinfo);
770  }
771  return result;
772 }
773 
774 /*
775  * index_pages_fetched
776  * Estimate the number of pages actually fetched after accounting for
777  * cache effects.
778  *
779  * We use an approximation proposed by Mackert and Lohman, "Index Scans
780  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
781  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
782  * The Mackert and Lohman approximation is that the number of pages
783  * fetched is
784  * PF =
785  * min(2TNs/(2T+Ns), T) when T <= b
786  * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
787  * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
788  * where
789  * T = # pages in table
790  * N = # tuples in table
791  * s = selectivity = fraction of table to be scanned
792  * b = # buffer pages available (we include kernel space here)
793  *
794  * We assume that effective_cache_size is the total number of buffer pages
795  * available for the whole query, and pro-rate that space across all the
796  * tables in the query and the index currently under consideration. (This
797  * ignores space needed for other indexes used by the query, but since we
798  * don't know which indexes will get used, we can't estimate that very well;
799  * and in any case counting all the tables may well be an overestimate, since
800  * depending on the join plan not all the tables may be scanned concurrently.)
801  *
802  * The product Ns is the number of tuples fetched; we pass in that
803  * product rather than calculating it here. "pages" is the number of pages
804  * in the object under consideration (either an index or a table).
805  * "index_pages" is the amount to add to the total table space, which was
806  * computed for us by query_planner.
807  *
808  * Caller is expected to have ensured that tuples_fetched is greater than zero
809  * and rounded to integer (see clamp_row_est). The result will likewise be
810  * greater than zero and integral.
811  */
812 double
813 index_pages_fetched(double tuples_fetched, BlockNumber pages,
814  double index_pages, PlannerInfo *root)
815 {
816  double pages_fetched;
817  double total_pages;
818  double T,
819  b;
820 
821  /* T is # pages in table, but don't allow it to be zero */
822  T = (pages > 1) ? (double) pages : 1.0;
823 
824  /* Compute number of pages assumed to be competing for cache space */
825  total_pages = root->total_table_pages + index_pages;
826  total_pages = Max(total_pages, 1.0);
827  Assert(T <= total_pages);
828 
829  /* b is pro-rated share of effective_cache_size */
830  b = (double) effective_cache_size *T / total_pages;
831 
832  /* force it positive and integral */
833  if (b <= 1.0)
834  b = 1.0;
835  else
836  b = ceil(b);
837 
838  /* This part is the Mackert and Lohman formula */
839  if (T <= b)
840  {
841  pages_fetched =
842  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
843  if (pages_fetched >= T)
844  pages_fetched = T;
845  else
846  pages_fetched = ceil(pages_fetched);
847  }
848  else
849  {
850  double lim;
851 
852  lim = (2.0 * T * b) / (2.0 * T - b);
853  if (tuples_fetched <= lim)
854  {
855  pages_fetched =
856  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
857  }
858  else
859  {
860  pages_fetched =
861  b + (tuples_fetched - lim) * (T - b) / T;
862  }
863  pages_fetched = ceil(pages_fetched);
864  }
865  return pages_fetched;
866 }
867 
868 /*
869  * get_indexpath_pages
870  * Determine the total size of the indexes used in a bitmap index path.
871  *
872  * Note: if the same index is used more than once in a bitmap tree, we will
873  * count it multiple times, which perhaps is the wrong thing ... but it's
874  * not completely clear, and detecting duplicates is difficult, so ignore it
875  * for now.
876  */
877 static double
879 {
880  double result = 0;
881  ListCell *l;
882 
883  if (IsA(bitmapqual, BitmapAndPath))
884  {
885  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
886 
887  foreach(l, apath->bitmapquals)
888  {
889  result += get_indexpath_pages((Path *) lfirst(l));
890  }
891  }
892  else if (IsA(bitmapqual, BitmapOrPath))
893  {
894  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
895 
896  foreach(l, opath->bitmapquals)
897  {
898  result += get_indexpath_pages((Path *) lfirst(l));
899  }
900  }
901  else if (IsA(bitmapqual, IndexPath))
902  {
903  IndexPath *ipath = (IndexPath *) bitmapqual;
904 
905  result = (double) ipath->indexinfo->pages;
906  }
907  else
908  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
909 
910  return result;
911 }
912 
913 /*
914  * cost_bitmap_heap_scan
915  * Determines and returns the cost of scanning a relation using a bitmap
916  * index-then-heap plan.
917  *
918  * 'baserel' is the relation to be scanned
919  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
920  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
921  * 'loop_count' is the number of repetitions of the indexscan to factor into
922  * estimates of caching behavior
923  *
924  * Note: the component IndexPaths in bitmapqual should have been costed
925  * using the same loop_count.
926  */
927 void
929  ParamPathInfo *param_info,
930  Path *bitmapqual, double loop_count)
931 {
932  Cost startup_cost = 0;
933  Cost run_cost = 0;
934  Cost indexTotalCost;
935  QualCost qpqual_cost;
936  Cost cpu_per_tuple;
937  Cost cost_per_page;
938  Cost cpu_run_cost;
939  double tuples_fetched;
940  double pages_fetched;
941  double spc_seq_page_cost,
942  spc_random_page_cost;
943  double T;
944 
945  /* Should only be applied to base relations */
946  Assert(IsA(baserel, RelOptInfo));
947  Assert(baserel->relid > 0);
948  Assert(baserel->rtekind == RTE_RELATION);
949 
950  /* Mark the path with the correct row estimate */
951  if (param_info)
952  path->rows = param_info->ppi_rows;
953  else
954  path->rows = baserel->rows;
955 
956  if (!enable_bitmapscan)
957  startup_cost += disable_cost;
958 
959  pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
960  loop_count, &indexTotalCost,
961  &tuples_fetched);
962 
963  startup_cost += indexTotalCost;
964  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
965 
966  /* Fetch estimated page costs for tablespace containing table. */
968  &spc_random_page_cost,
969  &spc_seq_page_cost);
970 
971  /*
972  * For small numbers of pages we should charge spc_random_page_cost
973  * apiece, while if nearly all the table's pages are being read, it's more
974  * appropriate to charge spc_seq_page_cost apiece. The effect is
975  * nonlinear, too. For lack of a better idea, interpolate like this to
976  * determine the cost per page.
977  */
978  if (pages_fetched >= 2.0)
979  cost_per_page = spc_random_page_cost -
980  (spc_random_page_cost - spc_seq_page_cost)
981  * sqrt(pages_fetched / T);
982  else
983  cost_per_page = spc_random_page_cost;
984 
985  run_cost += pages_fetched * cost_per_page;
986 
987  /*
988  * Estimate CPU costs per tuple.
989  *
990  * Often the indexquals don't need to be rechecked at each tuple ... but
991  * not always, especially not if there are enough tuples involved that the
992  * bitmaps become lossy. For the moment, just assume they will be
993  * rechecked always. This means we charge the full freight for all the
994  * scan clauses.
995  */
996  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
997 
998  startup_cost += qpqual_cost.startup;
999  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1000  cpu_run_cost = cpu_per_tuple * tuples_fetched;
1001 
1002  /* Adjust costing for parallelism, if used. */
1003  if (path->parallel_workers > 0)
1004  {
1005  double parallel_divisor = get_parallel_divisor(path);
1006 
1007  /* The CPU cost is divided among all the workers. */
1008  cpu_run_cost /= parallel_divisor;
1009 
1010  path->rows = clamp_row_est(path->rows / parallel_divisor);
1011  }
1012 
1013 
1014  run_cost += cpu_run_cost;
1015 
1016  /* tlist eval costs are paid per output row, not per tuple scanned */
1017  startup_cost += path->pathtarget->cost.startup;
1018  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1019 
1020  path->startup_cost = startup_cost;
1021  path->total_cost = startup_cost + run_cost;
1022 }
1023 
1024 /*
1025  * cost_bitmap_tree_node
1026  * Extract cost and selectivity from a bitmap tree node (index/and/or)
1027  */
1028 void
1030 {
1031  if (IsA(path, IndexPath))
1032  {
1033  *cost = ((IndexPath *) path)->indextotalcost;
1034  *selec = ((IndexPath *) path)->indexselectivity;
1035 
1036  /*
1037  * Charge a small amount per retrieved tuple to reflect the costs of
1038  * manipulating the bitmap. This is mostly to make sure that a bitmap
1039  * scan doesn't look to be the same cost as an indexscan to retrieve a
1040  * single tuple.
1041  */
1042  *cost += 0.1 * cpu_operator_cost * path->rows;
1043  }
1044  else if (IsA(path, BitmapAndPath))
1045  {
1046  *cost = path->total_cost;
1047  *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1048  }
1049  else if (IsA(path, BitmapOrPath))
1050  {
1051  *cost = path->total_cost;
1052  *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1053  }
1054  else
1055  {
1056  elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1057  *cost = *selec = 0; /* keep compiler quiet */
1058  }
1059 }
1060 
1061 /*
1062  * cost_bitmap_and_node
1063  * Estimate the cost of a BitmapAnd node
1064  *
1065  * Note that this considers only the costs of index scanning and bitmap
1066  * creation, not the eventual heap access. In that sense the object isn't
1067  * truly a Path, but it has enough path-like properties (costs in particular)
1068  * to warrant treating it as one. We don't bother to set the path rows field,
1069  * however.
1070  */
1071 void
1073 {
1074  Cost totalCost;
1075  Selectivity selec;
1076  ListCell *l;
1077 
1078  /*
1079  * We estimate AND selectivity on the assumption that the inputs are
1080  * independent. This is probably often wrong, but we don't have the info
1081  * to do better.
1082  *
1083  * The runtime cost of the BitmapAnd itself is estimated at 100x
1084  * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1085  * definitely too simplistic?
1086  */
1087  totalCost = 0.0;
1088  selec = 1.0;
1089  foreach(l, path->bitmapquals)
1090  {
1091  Path *subpath = (Path *) lfirst(l);
1092  Cost subCost;
1093  Selectivity subselec;
1094 
1095  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1096 
1097  selec *= subselec;
1098 
1099  totalCost += subCost;
1100  if (l != list_head(path->bitmapquals))
1101  totalCost += 100.0 * cpu_operator_cost;
1102  }
1103  path->bitmapselectivity = selec;
1104  path->path.rows = 0; /* per above, not used */
1105  path->path.startup_cost = totalCost;
1106  path->path.total_cost = totalCost;
1107 }
1108 
1109 /*
1110  * cost_bitmap_or_node
1111  * Estimate the cost of a BitmapOr node
1112  *
1113  * See comments for cost_bitmap_and_node.
1114  */
1115 void
1117 {
1118  Cost totalCost;
1119  Selectivity selec;
1120  ListCell *l;
1121 
1122  /*
1123  * We estimate OR selectivity on the assumption that the inputs are
1124  * non-overlapping, since that's often the case in "x IN (list)" type
1125  * situations. Of course, we clamp to 1.0 at the end.
1126  *
1127  * The runtime cost of the BitmapOr itself is estimated at 100x
1128  * cpu_operator_cost for each tbm_union needed. Probably too small,
1129  * definitely too simplistic? We are aware that the tbm_unions are
1130  * optimized out when the inputs are BitmapIndexScans.
1131  */
1132  totalCost = 0.0;
1133  selec = 0.0;
1134  foreach(l, path->bitmapquals)
1135  {
1136  Path *subpath = (Path *) lfirst(l);
1137  Cost subCost;
1138  Selectivity subselec;
1139 
1140  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1141 
1142  selec += subselec;
1143 
1144  totalCost += subCost;
1145  if (l != list_head(path->bitmapquals) &&
1146  !IsA(subpath, IndexPath))
1147  totalCost += 100.0 * cpu_operator_cost;
1148  }
1149  path->bitmapselectivity = Min(selec, 1.0);
1150  path->path.rows = 0; /* per above, not used */
1151  path->path.startup_cost = totalCost;
1152  path->path.total_cost = totalCost;
1153 }
1154 
1155 /*
1156  * cost_tidscan
1157  * Determines and returns the cost of scanning a relation using TIDs.
1158  *
1159  * 'baserel' is the relation to be scanned
1160  * 'tidquals' is the list of TID-checkable quals
1161  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1162  */
1163 void
1165  RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1166 {
1167  Cost startup_cost = 0;
1168  Cost run_cost = 0;
1169  bool isCurrentOf = false;
1170  QualCost qpqual_cost;
1171  Cost cpu_per_tuple;
1172  QualCost tid_qual_cost;
1173  int ntuples;
1174  ListCell *l;
1175  double spc_random_page_cost;
1176 
1177  /* Should only be applied to base relations */
1178  Assert(baserel->relid > 0);
1179  Assert(baserel->rtekind == RTE_RELATION);
1180 
1181  /* Mark the path with the correct row estimate */
1182  if (param_info)
1183  path->rows = param_info->ppi_rows;
1184  else
1185  path->rows = baserel->rows;
1186 
1187  /* Count how many tuples we expect to retrieve */
1188  ntuples = 0;
1189  foreach(l, tidquals)
1190  {
1191  if (IsA(lfirst(l), ScalarArrayOpExpr))
1192  {
1193  /* Each element of the array yields 1 tuple */
1195  Node *arraynode = (Node *) lsecond(saop->args);
1196 
1197  ntuples += estimate_array_length(arraynode);
1198  }
1199  else if (IsA(lfirst(l), CurrentOfExpr))
1200  {
1201  /* CURRENT OF yields 1 tuple */
1202  isCurrentOf = true;
1203  ntuples++;
1204  }
1205  else
1206  {
1207  /* It's just CTID = something, count 1 tuple */
1208  ntuples++;
1209  }
1210  }
1211 
1212  /*
1213  * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1214  * understands how to do it correctly. Therefore, honor enable_tidscan
1215  * only when CURRENT OF isn't present. Also note that cost_qual_eval
1216  * counts a CurrentOfExpr as having startup cost disable_cost, which we
1217  * subtract off here; that's to prevent other plan types such as seqscan
1218  * from winning.
1219  */
1220  if (isCurrentOf)
1221  {
1223  startup_cost -= disable_cost;
1224  }
1225  else if (!enable_tidscan)
1226  startup_cost += disable_cost;
1227 
1228  /*
1229  * The TID qual expressions will be computed once, any other baserestrict
1230  * quals once per retrieved tuple.
1231  */
1232  cost_qual_eval(&tid_qual_cost, tidquals, root);
1233 
1234  /* fetch estimated page cost for tablespace containing table */
1236  &spc_random_page_cost,
1237  NULL);
1238 
1239  /* disk costs --- assume each tuple on a different page */
1240  run_cost += spc_random_page_cost * ntuples;
1241 
1242  /* Add scanning CPU costs */
1243  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1244 
1245  /* XXX currently we assume TID quals are a subset of qpquals */
1246  startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1247  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1248  tid_qual_cost.per_tuple;
1249  run_cost += cpu_per_tuple * ntuples;
1250 
1251  /* tlist eval costs are paid per output row, not per tuple scanned */
1252  startup_cost += path->pathtarget->cost.startup;
1253  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1254 
1255  path->startup_cost = startup_cost;
1256  path->total_cost = startup_cost + run_cost;
1257 }
1258 
1259 /*
1260  * cost_subqueryscan
1261  * Determines and returns the cost of scanning a subquery RTE.
1262  *
1263  * 'baserel' is the relation to be scanned
1264  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1265  */
1266 void
1268  RelOptInfo *baserel, ParamPathInfo *param_info)
1269 {
1270  Cost startup_cost;
1271  Cost run_cost;
1272  QualCost qpqual_cost;
1273  Cost cpu_per_tuple;
1274 
1275  /* Should only be applied to base relations that are subqueries */
1276  Assert(baserel->relid > 0);
1277  Assert(baserel->rtekind == RTE_SUBQUERY);
1278 
1279  /* Mark the path with the correct row estimate */
1280  if (param_info)
1281  path->path.rows = param_info->ppi_rows;
1282  else
1283  path->path.rows = baserel->rows;
1284 
1285  /*
1286  * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1287  * any restriction clauses and tlist that will be attached to the
1288  * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1289  * projection overhead.
1290  */
1291  path->path.startup_cost = path->subpath->startup_cost;
1292  path->path.total_cost = path->subpath->total_cost;
1293 
1294  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1295 
1296  startup_cost = qpqual_cost.startup;
1297  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1298  run_cost = cpu_per_tuple * baserel->tuples;
1299 
1300  /* tlist eval costs are paid per output row, not per tuple scanned */
1301  startup_cost += path->path.pathtarget->cost.startup;
1302  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1303 
1304  path->path.startup_cost += startup_cost;
1305  path->path.total_cost += startup_cost + run_cost;
1306 }
1307 
1308 /*
1309  * cost_functionscan
1310  * Determines and returns the cost of scanning a function RTE.
1311  *
1312  * 'baserel' is the relation to be scanned
1313  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1314  */
1315 void
1317  RelOptInfo *baserel, ParamPathInfo *param_info)
1318 {
1319  Cost startup_cost = 0;
1320  Cost run_cost = 0;
1321  QualCost qpqual_cost;
1322  Cost cpu_per_tuple;
1323  RangeTblEntry *rte;
1324  QualCost exprcost;
1325 
1326  /* Should only be applied to base relations that are functions */
1327  Assert(baserel->relid > 0);
1328  rte = planner_rt_fetch(baserel->relid, root);
1329  Assert(rte->rtekind == RTE_FUNCTION);
1330 
1331  /* Mark the path with the correct row estimate */
1332  if (param_info)
1333  path->rows = param_info->ppi_rows;
1334  else
1335  path->rows = baserel->rows;
1336 
1337  /*
1338  * Estimate costs of executing the function expression(s).
1339  *
1340  * Currently, nodeFunctionscan.c always executes the functions to
1341  * completion before returning any rows, and caches the results in a
1342  * tuplestore. So the function eval cost is all startup cost, and per-row
1343  * costs are minimal.
1344  *
1345  * XXX in principle we ought to charge tuplestore spill costs if the
1346  * number of rows is large. However, given how phony our rowcount
1347  * estimates for functions tend to be, there's not a lot of point in that
1348  * refinement right now.
1349  */
1350  cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1351 
1352  startup_cost += exprcost.startup + exprcost.per_tuple;
1353 
1354  /* Add scanning CPU costs */
1355  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1356 
1357  startup_cost += qpqual_cost.startup;
1358  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1359  run_cost += cpu_per_tuple * baserel->tuples;
1360 
1361  /* tlist eval costs are paid per output row, not per tuple scanned */
1362  startup_cost += path->pathtarget->cost.startup;
1363  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1364 
1365  path->startup_cost = startup_cost;
1366  path->total_cost = startup_cost + run_cost;
1367 }
1368 
1369 /*
1370  * cost_tablefuncscan
1371  * Determines and returns the cost of scanning a table function.
1372  *
1373  * 'baserel' is the relation to be scanned
1374  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1375  */
1376 void
1378  RelOptInfo *baserel, ParamPathInfo *param_info)
1379 {
1380  Cost startup_cost = 0;
1381  Cost run_cost = 0;
1382  QualCost qpqual_cost;
1383  Cost cpu_per_tuple;
1384  RangeTblEntry *rte;
1385  QualCost exprcost;
1386 
1387  /* Should only be applied to base relations that are functions */
1388  Assert(baserel->relid > 0);
1389  rte = planner_rt_fetch(baserel->relid, root);
1390  Assert(rte->rtekind == RTE_TABLEFUNC);
1391 
1392  /* Mark the path with the correct row estimate */
1393  if (param_info)
1394  path->rows = param_info->ppi_rows;
1395  else
1396  path->rows = baserel->rows;
1397 
1398  /*
1399  * Estimate costs of executing the table func expression(s).
1400  *
1401  * XXX in principle we ought to charge tuplestore spill costs if the
1402  * number of rows is large. However, given how phony our rowcount
1403  * estimates for tablefuncs tend to be, there's not a lot of point in that
1404  * refinement right now.
1405  */
1406  cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1407 
1408  startup_cost += exprcost.startup + exprcost.per_tuple;
1409 
1410  /* Add scanning CPU costs */
1411  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1412 
1413  startup_cost += qpqual_cost.startup;
1414  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1415  run_cost += cpu_per_tuple * baserel->tuples;
1416 
1417  /* tlist eval costs are paid per output row, not per tuple scanned */
1418  startup_cost += path->pathtarget->cost.startup;
1419  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1420 
1421  path->startup_cost = startup_cost;
1422  path->total_cost = startup_cost + run_cost;
1423 }
1424 
1425 /*
1426  * cost_valuesscan
1427  * Determines and returns the cost of scanning a VALUES RTE.
1428  *
1429  * 'baserel' is the relation to be scanned
1430  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1431  */
1432 void
1434  RelOptInfo *baserel, ParamPathInfo *param_info)
1435 {
1436  Cost startup_cost = 0;
1437  Cost run_cost = 0;
1438  QualCost qpqual_cost;
1439  Cost cpu_per_tuple;
1440 
1441  /* Should only be applied to base relations that are values lists */
1442  Assert(baserel->relid > 0);
1443  Assert(baserel->rtekind == RTE_VALUES);
1444 
1445  /* Mark the path with the correct row estimate */
1446  if (param_info)
1447  path->rows = param_info->ppi_rows;
1448  else
1449  path->rows = baserel->rows;
1450 
1451  /*
1452  * For now, estimate list evaluation cost at one operator eval per list
1453  * (probably pretty bogus, but is it worth being smarter?)
1454  */
1455  cpu_per_tuple = cpu_operator_cost;
1456 
1457  /* Add scanning CPU costs */
1458  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1459 
1460  startup_cost += qpqual_cost.startup;
1461  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1462  run_cost += cpu_per_tuple * baserel->tuples;
1463 
1464  /* tlist eval costs are paid per output row, not per tuple scanned */
1465  startup_cost += path->pathtarget->cost.startup;
1466  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1467 
1468  path->startup_cost = startup_cost;
1469  path->total_cost = startup_cost + run_cost;
1470 }
1471 
1472 /*
1473  * cost_ctescan
1474  * Determines and returns the cost of scanning a CTE RTE.
1475  *
1476  * Note: this is used for both self-reference and regular CTEs; the
1477  * possible cost differences are below the threshold of what we could
1478  * estimate accurately anyway. Note that the costs of evaluating the
1479  * referenced CTE query are added into the final plan as initplan costs,
1480  * and should NOT be counted here.
1481  */
1482 void
1484  RelOptInfo *baserel, ParamPathInfo *param_info)
1485 {
1486  Cost startup_cost = 0;
1487  Cost run_cost = 0;
1488  QualCost qpqual_cost;
1489  Cost cpu_per_tuple;
1490 
1491  /* Should only be applied to base relations that are CTEs */
1492  Assert(baserel->relid > 0);
1493  Assert(baserel->rtekind == RTE_CTE);
1494 
1495  /* Mark the path with the correct row estimate */
1496  if (param_info)
1497  path->rows = param_info->ppi_rows;
1498  else
1499  path->rows = baserel->rows;
1500 
1501  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1502  cpu_per_tuple = cpu_tuple_cost;
1503 
1504  /* Add scanning CPU costs */
1505  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1506 
1507  startup_cost += qpqual_cost.startup;
1508  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1509  run_cost += cpu_per_tuple * baserel->tuples;
1510 
1511  /* tlist eval costs are paid per output row, not per tuple scanned */
1512  startup_cost += path->pathtarget->cost.startup;
1513  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1514 
1515  path->startup_cost = startup_cost;
1516  path->total_cost = startup_cost + run_cost;
1517 }
1518 
1519 /*
1520  * cost_namedtuplestorescan
1521  * Determines and returns the cost of scanning a named tuplestore.
1522  */
1523 void
1525  RelOptInfo *baserel, ParamPathInfo *param_info)
1526 {
1527  Cost startup_cost = 0;
1528  Cost run_cost = 0;
1529  QualCost qpqual_cost;
1530  Cost cpu_per_tuple;
1531 
1532  /* Should only be applied to base relations that are Tuplestores */
1533  Assert(baserel->relid > 0);
1534  Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1535 
1536  /* Mark the path with the correct row estimate */
1537  if (param_info)
1538  path->rows = param_info->ppi_rows;
1539  else
1540  path->rows = baserel->rows;
1541 
1542  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1543  cpu_per_tuple = cpu_tuple_cost;
1544 
1545  /* Add scanning CPU costs */
1546  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1547 
1548  startup_cost += qpqual_cost.startup;
1549  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1550  run_cost += cpu_per_tuple * baserel->tuples;
1551 
1552  path->startup_cost = startup_cost;
1553  path->total_cost = startup_cost + run_cost;
1554 }
1555 
1556 /*
1557  * cost_recursive_union
1558  * Determines and returns the cost of performing a recursive union,
1559  * and also the estimated output size.
1560  *
1561  * We are given Paths for the nonrecursive and recursive terms.
1562  */
1563 void
1564 cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1565 {
1566  Cost startup_cost;
1567  Cost total_cost;
1568  double total_rows;
1569 
1570  /* We probably have decent estimates for the non-recursive term */
1571  startup_cost = nrterm->startup_cost;
1572  total_cost = nrterm->total_cost;
1573  total_rows = nrterm->rows;
1574 
1575  /*
1576  * We arbitrarily assume that about 10 recursive iterations will be
1577  * needed, and that we've managed to get a good fix on the cost and output
1578  * size of each one of them. These are mighty shaky assumptions but it's
1579  * hard to see how to do better.
1580  */
1581  total_cost += 10 * rterm->total_cost;
1582  total_rows += 10 * rterm->rows;
1583 
1584  /*
1585  * Also charge cpu_tuple_cost per row to account for the costs of
1586  * manipulating the tuplestores. (We don't worry about possible
1587  * spill-to-disk costs.)
1588  */
1589  total_cost += cpu_tuple_cost * total_rows;
1590 
1591  runion->startup_cost = startup_cost;
1592  runion->total_cost = total_cost;
1593  runion->rows = total_rows;
1594  runion->pathtarget->width = Max(nrterm->pathtarget->width,
1595  rterm->pathtarget->width);
1596 }
1597 
1598 /*
1599  * cost_sort
1600  * Determines and returns the cost of sorting a relation, including
1601  * the cost of reading the input data.
1602  *
1603  * If the total volume of data to sort is less than sort_mem, we will do
1604  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1605  * comparisons for t tuples.
1606  *
1607  * If the total volume exceeds sort_mem, we switch to a tape-style merge
1608  * algorithm. There will still be about t*log2(t) tuple comparisons in
1609  * total, but we will also need to write and read each tuple once per
1610  * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1611  * number of initial runs formed and M is the merge order used by tuplesort.c.
1612  * Since the average initial run should be about sort_mem, we have
1613  * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1614  * cpu = comparison_cost * t * log2(t)
1615  *
1616  * If the sort is bounded (i.e., only the first k result tuples are needed)
1617  * and k tuples can fit into sort_mem, we use a heap method that keeps only
1618  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1619  *
1620  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1621  * accesses (XXX can't we refine that guess?)
1622  *
1623  * By default, we charge two operator evals per tuple comparison, which should
1624  * be in the right ballpark in most cases. The caller can tweak this by
1625  * specifying nonzero comparison_cost; typically that's used for any extra
1626  * work that has to be done to prepare the inputs to the comparison operators.
1627  *
1628  * 'pathkeys' is a list of sort keys
1629  * 'input_cost' is the total cost for reading the input data
1630  * 'tuples' is the number of tuples in the relation
1631  * 'width' is the average tuple width in bytes
1632  * 'comparison_cost' is the extra cost per comparison, if any
1633  * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1634  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1635  *
1636  * NOTE: some callers currently pass NIL for pathkeys because they
1637  * can't conveniently supply the sort keys. Since this routine doesn't
1638  * currently do anything with pathkeys anyway, that doesn't matter...
1639  * but if it ever does, it should react gracefully to lack of key data.
1640  * (Actually, the thing we'd most likely be interested in is just the number
1641  * of sort keys, which all callers *could* supply.)
1642  */
1643 void
1645  List *pathkeys, Cost input_cost, double tuples, int width,
1646  Cost comparison_cost, int sort_mem,
1647  double limit_tuples)
1648 {
1649  Cost startup_cost = input_cost;
1650  Cost run_cost = 0;
1651  double input_bytes = relation_byte_size(tuples, width);
1652  double output_bytes;
1653  double output_tuples;
1654  long sort_mem_bytes = sort_mem * 1024L;
1655 
1656  if (!enable_sort)
1657  startup_cost += disable_cost;
1658 
1659  path->rows = tuples;
1660 
1661  /*
1662  * We want to be sure the cost of a sort is never estimated as zero, even
1663  * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1664  */
1665  if (tuples < 2.0)
1666  tuples = 2.0;
1667 
1668  /* Include the default cost-per-comparison */
1669  comparison_cost += 2.0 * cpu_operator_cost;
1670 
1671  /* Do we have a useful LIMIT? */
1672  if (limit_tuples > 0 && limit_tuples < tuples)
1673  {
1674  output_tuples = limit_tuples;
1675  output_bytes = relation_byte_size(output_tuples, width);
1676  }
1677  else
1678  {
1679  output_tuples = tuples;
1680  output_bytes = input_bytes;
1681  }
1682 
1683  if (output_bytes > sort_mem_bytes)
1684  {
1685  /*
1686  * We'll have to use a disk-based sort of all the tuples
1687  */
1688  double npages = ceil(input_bytes / BLCKSZ);
1689  double nruns = input_bytes / sort_mem_bytes;
1690  double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1691  double log_runs;
1692  double npageaccesses;
1693 
1694  /*
1695  * CPU costs
1696  *
1697  * Assume about N log2 N comparisons
1698  */
1699  startup_cost += comparison_cost * tuples * LOG2(tuples);
1700 
1701  /* Disk costs */
1702 
1703  /* Compute logM(r) as log(r) / log(M) */
1704  if (nruns > mergeorder)
1705  log_runs = ceil(log(nruns) / log(mergeorder));
1706  else
1707  log_runs = 1.0;
1708  npageaccesses = 2.0 * npages * log_runs;
1709  /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1710  startup_cost += npageaccesses *
1711  (seq_page_cost * 0.75 + random_page_cost * 0.25);
1712  }
1713  else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1714  {
1715  /*
1716  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1717  * a total number of tuple comparisons of N log2 K; but the constant
1718  * factor is a bit higher than for quicksort. Tweak it so that the
1719  * cost curve is continuous at the crossover point.
1720  */
1721  startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1722  }
1723  else
1724  {
1725  /* We'll use plain quicksort on all the input tuples */
1726  startup_cost += comparison_cost * tuples * LOG2(tuples);
1727  }
1728 
1729  /*
1730  * Also charge a small amount (arbitrarily set equal to operator cost) per
1731  * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1732  * doesn't do qual-checking or projection, so it has less overhead than
1733  * most plan nodes. Note it's correct to use tuples not output_tuples
1734  * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1735  * counting the LIMIT otherwise.
1736  */
1737  run_cost += cpu_operator_cost * tuples;
1738 
1739  path->startup_cost = startup_cost;
1740  path->total_cost = startup_cost + run_cost;
1741 }
1742 
1743 /*
1744  * cost_merge_append
1745  * Determines and returns the cost of a MergeAppend node.
1746  *
1747  * MergeAppend merges several pre-sorted input streams, using a heap that
1748  * at any given instant holds the next tuple from each stream. If there
1749  * are N streams, we need about N*log2(N) tuple comparisons to construct
1750  * the heap at startup, and then for each output tuple, about log2(N)
1751  * comparisons to replace the top entry.
1752  *
1753  * (The effective value of N will drop once some of the input streams are
1754  * exhausted, but it seems unlikely to be worth trying to account for that.)
1755  *
1756  * The heap is never spilled to disk, since we assume N is not very large.
1757  * So this is much simpler than cost_sort.
1758  *
1759  * As in cost_sort, we charge two operator evals per tuple comparison.
1760  *
1761  * 'pathkeys' is a list of sort keys
1762  * 'n_streams' is the number of input streams
1763  * 'input_startup_cost' is the sum of the input streams' startup costs
1764  * 'input_total_cost' is the sum of the input streams' total costs
1765  * 'tuples' is the number of tuples in all the streams
1766  */
1767 void
1769  List *pathkeys, int n_streams,
1770  Cost input_startup_cost, Cost input_total_cost,
1771  double tuples)
1772 {
1773  Cost startup_cost = 0;
1774  Cost run_cost = 0;
1775  Cost comparison_cost;
1776  double N;
1777  double logN;
1778 
1779  /*
1780  * Avoid log(0)...
1781  */
1782  N = (n_streams < 2) ? 2.0 : (double) n_streams;
1783  logN = LOG2(N);
1784 
1785  /* Assumed cost per tuple comparison */
1786  comparison_cost = 2.0 * cpu_operator_cost;
1787 
1788  /* Heap creation cost */
1789  startup_cost += comparison_cost * N * logN;
1790 
1791  /* Per-tuple heap maintenance cost */
1792  run_cost += tuples * comparison_cost * logN;
1793 
1794  /*
1795  * Also charge a small amount (arbitrarily set equal to operator cost) per
1796  * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1797  * node doesn't do qual-checking or projection, so it has less overhead
1798  * than most plan nodes.
1799  */
1800  run_cost += cpu_operator_cost * tuples;
1801 
1802  path->startup_cost = startup_cost + input_startup_cost;
1803  path->total_cost = startup_cost + run_cost + input_total_cost;
1804 }
1805 
1806 /*
1807  * cost_material
1808  * Determines and returns the cost of materializing a relation, including
1809  * the cost of reading the input data.
1810  *
1811  * If the total volume of data to materialize exceeds work_mem, we will need
1812  * to write it to disk, so the cost is much higher in that case.
1813  *
1814  * Note that here we are estimating the costs for the first scan of the
1815  * relation, so the materialization is all overhead --- any savings will
1816  * occur only on rescan, which is estimated in cost_rescan.
1817  */
1818 void
1820  Cost input_startup_cost, Cost input_total_cost,
1821  double tuples, int width)
1822 {
1823  Cost startup_cost = input_startup_cost;
1824  Cost run_cost = input_total_cost - input_startup_cost;
1825  double nbytes = relation_byte_size(tuples, width);
1826  long work_mem_bytes = work_mem * 1024L;
1827 
1828  path->rows = tuples;
1829 
1830  /*
1831  * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1832  * reflect bookkeeping overhead. (This rate must be more than what
1833  * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1834  * if it is exactly the same then there will be a cost tie between
1835  * nestloop with A outer, materialized B inner and nestloop with B outer,
1836  * materialized A inner. The extra cost ensures we'll prefer
1837  * materializing the smaller rel.) Note that this is normally a good deal
1838  * less than cpu_tuple_cost; which is OK because a Material plan node
1839  * doesn't do qual-checking or projection, so it's got less overhead than
1840  * most plan nodes.
1841  */
1842  run_cost += 2 * cpu_operator_cost * tuples;
1843 
1844  /*
1845  * If we will spill to disk, charge at the rate of seq_page_cost per page.
1846  * This cost is assumed to be evenly spread through the plan run phase,
1847  * which isn't exactly accurate but our cost model doesn't allow for
1848  * nonuniform costs within the run phase.
1849  */
1850  if (nbytes > work_mem_bytes)
1851  {
1852  double npages = ceil(nbytes / BLCKSZ);
1853 
1854  run_cost += seq_page_cost * npages;
1855  }
1856 
1857  path->startup_cost = startup_cost;
1858  path->total_cost = startup_cost + run_cost;
1859 }
1860 
1861 /*
1862  * cost_agg
1863  * Determines and returns the cost of performing an Agg plan node,
1864  * including the cost of its input.
1865  *
1866  * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1867  * we are using a hashed Agg node just to do grouping).
1868  *
1869  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1870  * are for appropriately-sorted input.
1871  */
1872 void
1874  AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1875  int numGroupCols, double numGroups,
1876  Cost input_startup_cost, Cost input_total_cost,
1877  double input_tuples)
1878 {
1879  double output_tuples;
1880  Cost startup_cost;
1881  Cost total_cost;
1882  AggClauseCosts dummy_aggcosts;
1883 
1884  /* Use all-zero per-aggregate costs if NULL is passed */
1885  if (aggcosts == NULL)
1886  {
1887  Assert(aggstrategy == AGG_HASHED);
1888  MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1889  aggcosts = &dummy_aggcosts;
1890  }
1891 
1892  /*
1893  * The transCost.per_tuple component of aggcosts should be charged once
1894  * per input tuple, corresponding to the costs of evaluating the aggregate
1895  * transfns and their input expressions (with any startup cost of course
1896  * charged but once). The finalCost component is charged once per output
1897  * tuple, corresponding to the costs of evaluating the finalfns.
1898  *
1899  * If we are grouping, we charge an additional cpu_operator_cost per
1900  * grouping column per input tuple for grouping comparisons.
1901  *
1902  * We will produce a single output tuple if not grouping, and a tuple per
1903  * group otherwise. We charge cpu_tuple_cost for each output tuple.
1904  *
1905  * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1906  * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1907  * input path is already sorted appropriately, AGG_SORTED should be
1908  * preferred (since it has no risk of memory overflow). This will happen
1909  * as long as the computed total costs are indeed exactly equal --- but if
1910  * there's roundoff error we might do the wrong thing. So be sure that
1911  * the computations below form the same intermediate values in the same
1912  * order.
1913  */
1914  if (aggstrategy == AGG_PLAIN)
1915  {
1916  startup_cost = input_total_cost;
1917  startup_cost += aggcosts->transCost.startup;
1918  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1919  startup_cost += aggcosts->finalCost;
1920  /* we aren't grouping */
1921  total_cost = startup_cost + cpu_tuple_cost;
1922  output_tuples = 1;
1923  }
1924  else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
1925  {
1926  /* Here we are able to deliver output on-the-fly */
1927  startup_cost = input_startup_cost;
1928  total_cost = input_total_cost;
1929  if (aggstrategy == AGG_MIXED && !enable_hashagg)
1930  {
1931  startup_cost += disable_cost;
1932  total_cost += disable_cost;
1933  }
1934  /* calcs phrased this way to match HASHED case, see note above */
1935  total_cost += aggcosts->transCost.startup;
1936  total_cost += aggcosts->transCost.per_tuple * input_tuples;
1937  total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1938  total_cost += aggcosts->finalCost * numGroups;
1939  total_cost += cpu_tuple_cost * numGroups;
1940  output_tuples = numGroups;
1941  }
1942  else
1943  {
1944  /* must be AGG_HASHED */
1945  startup_cost = input_total_cost;
1946  if (!enable_hashagg)
1947  startup_cost += disable_cost;
1948  startup_cost += aggcosts->transCost.startup;
1949  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1950  startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1951  total_cost = startup_cost;
1952  total_cost += aggcosts->finalCost * numGroups;
1953  total_cost += cpu_tuple_cost * numGroups;
1954  output_tuples = numGroups;
1955  }
1956 
1957  path->rows = output_tuples;
1958  path->startup_cost = startup_cost;
1959  path->total_cost = total_cost;
1960 }
1961 
1962 /*
1963  * cost_windowagg
1964  * Determines and returns the cost of performing a WindowAgg plan node,
1965  * including the cost of its input.
1966  *
1967  * Input is assumed already properly sorted.
1968  */
1969 void
1971  List *windowFuncs, int numPartCols, int numOrderCols,
1972  Cost input_startup_cost, Cost input_total_cost,
1973  double input_tuples)
1974 {
1975  Cost startup_cost;
1976  Cost total_cost;
1977  ListCell *lc;
1978 
1979  startup_cost = input_startup_cost;
1980  total_cost = input_total_cost;
1981 
1982  /*
1983  * Window functions are assumed to cost their stated execution cost, plus
1984  * the cost of evaluating their input expressions, per tuple. Since they
1985  * may in fact evaluate their inputs at multiple rows during each cycle,
1986  * this could be a drastic underestimate; but without a way to know how
1987  * many rows the window function will fetch, it's hard to do better. In
1988  * any case, it's a good estimate for all the built-in window functions,
1989  * so we'll just do this for now.
1990  */
1991  foreach(lc, windowFuncs)
1992  {
1993  WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
1994  Cost wfunccost;
1995  QualCost argcosts;
1996 
1997  wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1998 
1999  /* also add the input expressions' cost to per-input-row costs */
2000  cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
2001  startup_cost += argcosts.startup;
2002  wfunccost += argcosts.per_tuple;
2003 
2004  /*
2005  * Add the filter's cost to per-input-row costs. XXX We should reduce
2006  * input expression costs according to filter selectivity.
2007  */
2008  cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
2009  startup_cost += argcosts.startup;
2010  wfunccost += argcosts.per_tuple;
2011 
2012  total_cost += wfunccost * input_tuples;
2013  }
2014 
2015  /*
2016  * We also charge cpu_operator_cost per grouping column per tuple for
2017  * grouping comparisons, plus cpu_tuple_cost per tuple for general
2018  * overhead.
2019  *
2020  * XXX this neglects costs of spooling the data to disk when it overflows
2021  * work_mem. Sooner or later that should get accounted for.
2022  */
2023  total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
2024  total_cost += cpu_tuple_cost * input_tuples;
2025 
2026  path->rows = input_tuples;
2027  path->startup_cost = startup_cost;
2028  path->total_cost = total_cost;
2029 }
2030 
2031 /*
2032  * cost_group
2033  * Determines and returns the cost of performing a Group plan node,
2034  * including the cost of its input.
2035  *
2036  * Note: caller must ensure that input costs are for appropriately-sorted
2037  * input.
2038  */
2039 void
2041  int numGroupCols, double numGroups,
2042  Cost input_startup_cost, Cost input_total_cost,
2043  double input_tuples)
2044 {
2045  Cost startup_cost;
2046  Cost total_cost;
2047 
2048  startup_cost = input_startup_cost;
2049  total_cost = input_total_cost;
2050 
2051  /*
2052  * Charge one cpu_operator_cost per comparison per input tuple. We assume
2053  * all columns get compared at most of the tuples.
2054  */
2055  total_cost += cpu_operator_cost * input_tuples * numGroupCols;
2056 
2057  path->rows = numGroups;
2058  path->startup_cost = startup_cost;
2059  path->total_cost = total_cost;
2060 }
2061 
2062 /*
2063  * initial_cost_nestloop
2064  * Preliminary estimate of the cost of a nestloop join path.
2065  *
2066  * This must quickly produce lower-bound estimates of the path's startup and
2067  * total costs. If we are unable to eliminate the proposed path from
2068  * consideration using the lower bounds, final_cost_nestloop will be called
2069  * to obtain the final estimates.
2070  *
2071  * The exact division of labor between this function and final_cost_nestloop
2072  * is private to them, and represents a tradeoff between speed of the initial
2073  * estimate and getting a tight lower bound. We choose to not examine the
2074  * join quals here, since that's by far the most expensive part of the
2075  * calculations. The end result is that CPU-cost considerations must be
2076  * left for the second phase; and for SEMI/ANTI joins, we must also postpone
2077  * incorporation of the inner path's run cost.
2078  *
2079  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2080  * other data to be used by final_cost_nestloop
2081  * 'jointype' is the type of join to be performed
2082  * 'outer_path' is the outer input to the join
2083  * 'inner_path' is the inner input to the join
2084  * 'extra' contains miscellaneous information about the join
2085  */
2086 void
2088  JoinType jointype,
2089  Path *outer_path, Path *inner_path,
2090  JoinPathExtraData *extra)
2091 {
2092  Cost startup_cost = 0;
2093  Cost run_cost = 0;
2094  double outer_path_rows = outer_path->rows;
2095  Cost inner_rescan_start_cost;
2096  Cost inner_rescan_total_cost;
2097  Cost inner_run_cost;
2098  Cost inner_rescan_run_cost;
2099 
2100  /* estimate costs to rescan the inner relation */
2101  cost_rescan(root, inner_path,
2102  &inner_rescan_start_cost,
2103  &inner_rescan_total_cost);
2104 
2105  /* cost of source data */
2106 
2107  /*
2108  * NOTE: clearly, we must pay both outer and inner paths' startup_cost
2109  * before we can start returning tuples, so the join's startup cost is
2110  * their sum. We'll also pay the inner path's rescan startup cost
2111  * multiple times.
2112  */
2113  startup_cost += outer_path->startup_cost + inner_path->startup_cost;
2114  run_cost += outer_path->total_cost - outer_path->startup_cost;
2115  if (outer_path_rows > 1)
2116  run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
2117 
2118  inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
2119  inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
2120 
2121  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
2122  extra->inner_unique)
2123  {
2124  /*
2125  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2126  * executor will stop after the first match.
2127  *
2128  * Getting decent estimates requires inspection of the join quals,
2129  * which we choose to postpone to final_cost_nestloop.
2130  */
2131 
2132  /* Save private data for final_cost_nestloop */
2133  workspace->inner_run_cost = inner_run_cost;
2134  workspace->inner_rescan_run_cost = inner_rescan_run_cost;
2135  }
2136  else
2137  {
2138  /* Normal case; we'll scan whole input rel for each outer row */
2139  run_cost += inner_run_cost;
2140  if (outer_path_rows > 1)
2141  run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
2142  }
2143 
2144  /* CPU costs left for later */
2145 
2146  /* Public result fields */
2147  workspace->startup_cost = startup_cost;
2148  workspace->total_cost = startup_cost + run_cost;
2149  /* Save private data for final_cost_nestloop */
2150  workspace->run_cost = run_cost;
2151 }
2152 
2153 /*
2154  * final_cost_nestloop
2155  * Final estimate of the cost and result size of a nestloop join path.
2156  *
2157  * 'path' is already filled in except for the rows and cost fields
2158  * 'workspace' is the result from initial_cost_nestloop
2159  * 'extra' contains miscellaneous information about the join
2160  */
2161 void
2163  JoinCostWorkspace *workspace,
2164  JoinPathExtraData *extra)
2165 {
2166  Path *outer_path = path->outerjoinpath;
2167  Path *inner_path = path->innerjoinpath;
2168  double outer_path_rows = outer_path->rows;
2169  double inner_path_rows = inner_path->rows;
2170  Cost startup_cost = workspace->startup_cost;
2171  Cost run_cost = workspace->run_cost;
2172  Cost cpu_per_tuple;
2173  QualCost restrict_qual_cost;
2174  double ntuples;
2175 
2176  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2177  if (outer_path_rows <= 0 || isnan(outer_path_rows))
2178  outer_path_rows = 1;
2179  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2180  inner_path_rows = 1;
2181 
2182  /* Mark the path with the correct row estimate */
2183  if (path->path.param_info)
2184  path->path.rows = path->path.param_info->ppi_rows;
2185  else
2186  path->path.rows = path->path.parent->rows;
2187 
2188  /* For partial paths, scale row estimate. */
2189  if (path->path.parallel_workers > 0)
2190  {
2191  double parallel_divisor = get_parallel_divisor(&path->path);
2192 
2193  path->path.rows =
2194  clamp_row_est(path->path.rows / parallel_divisor);
2195  }
2196 
2197  /*
2198  * We could include disable_cost in the preliminary estimate, but that
2199  * would amount to optimizing for the case where the join method is
2200  * disabled, which doesn't seem like the way to bet.
2201  */
2202  if (!enable_nestloop)
2203  startup_cost += disable_cost;
2204 
2205  /* cost of inner-relation source data (we already dealt with outer rel) */
2206 
2207  if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI ||
2208  extra->inner_unique)
2209  {
2210  /*
2211  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2212  * executor will stop after the first match.
2213  */
2214  Cost inner_run_cost = workspace->inner_run_cost;
2215  Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2216  double outer_matched_rows;
2217  Selectivity inner_scan_frac;
2218 
2219  /*
2220  * For an outer-rel row that has at least one match, we can expect the
2221  * inner scan to stop after a fraction 1/(match_count+1) of the inner
2222  * rows, if the matches are evenly distributed. Since they probably
2223  * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2224  * that fraction. (If we used a larger fuzz factor, we'd have to
2225  * clamp inner_scan_frac to at most 1.0; but since match_count is at
2226  * least 1, no such clamp is needed now.)
2227  */
2228  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2229  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
2230 
2231  /*
2232  * Compute number of tuples processed (not number emitted!). First,
2233  * account for successfully-matched outer rows.
2234  */
2235  ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2236 
2237  /*
2238  * Now we need to estimate the actual costs of scanning the inner
2239  * relation, which may be quite a bit less than N times inner_run_cost
2240  * due to early scan stops. We consider two cases. If the inner path
2241  * is an indexscan using all the joinquals as indexquals, then an
2242  * unmatched outer row results in an indexscan returning no rows,
2243  * which is probably quite cheap. Otherwise, the executor will have
2244  * to scan the whole inner rel for an unmatched row; not so cheap.
2245  */
2246  if (has_indexed_join_quals(path))
2247  {
2248  /*
2249  * Successfully-matched outer rows will only require scanning
2250  * inner_scan_frac of the inner relation. In this case, we don't
2251  * need to charge the full inner_run_cost even when that's more
2252  * than inner_rescan_run_cost, because we can assume that none of
2253  * the inner scans ever scan the whole inner relation. So it's
2254  * okay to assume that all the inner scan executions can be
2255  * fractions of the full cost, even if materialization is reducing
2256  * the rescan cost. At this writing, it's impossible to get here
2257  * for a materialized inner scan, so inner_run_cost and
2258  * inner_rescan_run_cost will be the same anyway; but just in
2259  * case, use inner_run_cost for the first matched tuple and
2260  * inner_rescan_run_cost for additional ones.
2261  */
2262  run_cost += inner_run_cost * inner_scan_frac;
2263  if (outer_matched_rows > 1)
2264  run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2265 
2266  /*
2267  * Add the cost of inner-scan executions for unmatched outer rows.
2268  * We estimate this as the same cost as returning the first tuple
2269  * of a nonempty scan. We consider that these are all rescans,
2270  * since we used inner_run_cost once already.
2271  */
2272  run_cost += (outer_path_rows - outer_matched_rows) *
2273  inner_rescan_run_cost / inner_path_rows;
2274 
2275  /*
2276  * We won't be evaluating any quals at all for unmatched rows, so
2277  * don't add them to ntuples.
2278  */
2279  }
2280  else
2281  {
2282  /*
2283  * Here, a complicating factor is that rescans may be cheaper than
2284  * first scans. If we never scan all the way to the end of the
2285  * inner rel, it might be (depending on the plan type) that we'd
2286  * never pay the whole inner first-scan run cost. However it is
2287  * difficult to estimate whether that will happen (and it could
2288  * not happen if there are any unmatched outer rows!), so be
2289  * conservative and always charge the whole first-scan cost once.
2290  */
2291  run_cost += inner_run_cost;
2292 
2293  /* Add inner run cost for additional outer tuples having matches */
2294  if (outer_matched_rows > 1)
2295  run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2296 
2297  /* Add inner run cost for unmatched outer tuples */
2298  run_cost += (outer_path_rows - outer_matched_rows) *
2299  inner_rescan_run_cost;
2300 
2301  /* And count the unmatched join tuples as being processed */
2302  ntuples += (outer_path_rows - outer_matched_rows) *
2303  inner_path_rows;
2304  }
2305  }
2306  else
2307  {
2308  /* Normal-case source costs were included in preliminary estimate */
2309 
2310  /* Compute number of tuples processed (not number emitted!) */
2311  ntuples = outer_path_rows * inner_path_rows;
2312  }
2313 
2314  /* CPU costs */
2315  cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2316  startup_cost += restrict_qual_cost.startup;
2317  cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2318  run_cost += cpu_per_tuple * ntuples;
2319 
2320  /* tlist eval costs are paid per output row, not per tuple scanned */
2321  startup_cost += path->path.pathtarget->cost.startup;
2322  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2323 
2324  path->path.startup_cost = startup_cost;
2325  path->path.total_cost = startup_cost + run_cost;
2326 }
2327 
2328 /*
2329  * initial_cost_mergejoin
2330  * Preliminary estimate of the cost of a mergejoin path.
2331  *
2332  * This must quickly produce lower-bound estimates of the path's startup and
2333  * total costs. If we are unable to eliminate the proposed path from
2334  * consideration using the lower bounds, final_cost_mergejoin will be called
2335  * to obtain the final estimates.
2336  *
2337  * The exact division of labor between this function and final_cost_mergejoin
2338  * is private to them, and represents a tradeoff between speed of the initial
2339  * estimate and getting a tight lower bound. We choose to not examine the
2340  * join quals here, except for obtaining the scan selectivity estimate which
2341  * is really essential (but fortunately, use of caching keeps the cost of
2342  * getting that down to something reasonable).
2343  * We also assume that cost_sort is cheap enough to use here.
2344  *
2345  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2346  * other data to be used by final_cost_mergejoin
2347  * 'jointype' is the type of join to be performed
2348  * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2349  * 'outer_path' is the outer input to the join
2350  * 'inner_path' is the inner input to the join
2351  * 'outersortkeys' is the list of sort keys for the outer path
2352  * 'innersortkeys' is the list of sort keys for the inner path
2353  * 'extra' contains miscellaneous information about the join
2354  *
2355  * Note: outersortkeys and innersortkeys should be NIL if no explicit
2356  * sort is needed because the respective source path is already ordered.
2357  */
2358 void
2360  JoinType jointype,
2361  List *mergeclauses,
2362  Path *outer_path, Path *inner_path,
2363  List *outersortkeys, List *innersortkeys,
2364  JoinPathExtraData *extra)
2365 {
2366  Cost startup_cost = 0;
2367  Cost run_cost = 0;
2368  double outer_path_rows = outer_path->rows;
2369  double inner_path_rows = inner_path->rows;
2370  Cost inner_run_cost;
2371  double outer_rows,
2372  inner_rows,
2373  outer_skip_rows,
2374  inner_skip_rows;
2375  Selectivity outerstartsel,
2376  outerendsel,
2377  innerstartsel,
2378  innerendsel;
2379  Path sort_path; /* dummy for result of cost_sort */
2380 
2381  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2382  if (outer_path_rows <= 0 || isnan(outer_path_rows))
2383  outer_path_rows = 1;
2384  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2385  inner_path_rows = 1;
2386 
2387  /*
2388  * A merge join will stop as soon as it exhausts either input stream
2389  * (unless it's an outer join, in which case the outer side has to be
2390  * scanned all the way anyway). Estimate fraction of the left and right
2391  * inputs that will actually need to be scanned. Likewise, we can
2392  * estimate the number of rows that will be skipped before the first join
2393  * pair is found, which should be factored into startup cost. We use only
2394  * the first (most significant) merge clause for this purpose. Since
2395  * mergejoinscansel() is a fairly expensive computation, we cache the
2396  * results in the merge clause RestrictInfo.
2397  */
2398  if (mergeclauses && jointype != JOIN_FULL)
2399  {
2400  RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2401  List *opathkeys;
2402  List *ipathkeys;
2403  PathKey *opathkey;
2404  PathKey *ipathkey;
2405  MergeScanSelCache *cache;
2406 
2407  /* Get the input pathkeys to determine the sort-order details */
2408  opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2409  ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2410  Assert(opathkeys);
2411  Assert(ipathkeys);
2412  opathkey = (PathKey *) linitial(opathkeys);
2413  ipathkey = (PathKey *) linitial(ipathkeys);
2414  /* debugging check */
2415  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2416  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2417  opathkey->pk_strategy != ipathkey->pk_strategy ||
2418  opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2419  elog(ERROR, "left and right pathkeys do not match in mergejoin");
2420 
2421  /* Get the selectivity with caching */
2422  cache = cached_scansel(root, firstclause, opathkey);
2423 
2424  if (bms_is_subset(firstclause->left_relids,
2425  outer_path->parent->relids))
2426  {
2427  /* left side of clause is outer */
2428  outerstartsel = cache->leftstartsel;
2429  outerendsel = cache->leftendsel;
2430  innerstartsel = cache->rightstartsel;
2431  innerendsel = cache->rightendsel;
2432  }
2433  else
2434  {
2435  /* left side of clause is inner */
2436  outerstartsel = cache->rightstartsel;
2437  outerendsel = cache->rightendsel;
2438  innerstartsel = cache->leftstartsel;
2439  innerendsel = cache->leftendsel;
2440  }
2441  if (jointype == JOIN_LEFT ||
2442  jointype == JOIN_ANTI)
2443  {
2444  outerstartsel = 0.0;
2445  outerendsel = 1.0;
2446  }
2447  else if (jointype == JOIN_RIGHT)
2448  {
2449  innerstartsel = 0.0;
2450  innerendsel = 1.0;
2451  }
2452  }
2453  else
2454  {
2455  /* cope with clauseless or full mergejoin */
2456  outerstartsel = innerstartsel = 0.0;
2457  outerendsel = innerendsel = 1.0;
2458  }
2459 
2460  /*
2461  * Convert selectivities to row counts. We force outer_rows and
2462  * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2463  */
2464  outer_skip_rows = rint(outer_path_rows * outerstartsel);
2465  inner_skip_rows = rint(inner_path_rows * innerstartsel);
2466  outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2467  inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2468 
2469  Assert(outer_skip_rows <= outer_rows);
2470  Assert(inner_skip_rows <= inner_rows);
2471 
2472  /*
2473  * Readjust scan selectivities to account for above rounding. This is
2474  * normally an insignificant effect, but when there are only a few rows in
2475  * the inputs, failing to do this makes for a large percentage error.
2476  */
2477  outerstartsel = outer_skip_rows / outer_path_rows;
2478  innerstartsel = inner_skip_rows / inner_path_rows;
2479  outerendsel = outer_rows / outer_path_rows;
2480  innerendsel = inner_rows / inner_path_rows;
2481 
2482  Assert(outerstartsel <= outerendsel);
2483  Assert(innerstartsel <= innerendsel);
2484 
2485  /* cost of source data */
2486 
2487  if (outersortkeys) /* do we need to sort outer? */
2488  {
2489  cost_sort(&sort_path,
2490  root,
2491  outersortkeys,
2492  outer_path->total_cost,
2493  outer_path_rows,
2494  outer_path->pathtarget->width,
2495  0.0,
2496  work_mem,
2497  -1.0);
2498  startup_cost += sort_path.startup_cost;
2499  startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2500  * outerstartsel;
2501  run_cost += (sort_path.total_cost - sort_path.startup_cost)
2502  * (outerendsel - outerstartsel);
2503  }
2504  else
2505  {
2506  startup_cost += outer_path->startup_cost;
2507  startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2508  * outerstartsel;
2509  run_cost += (outer_path->total_cost - outer_path->startup_cost)
2510  * (outerendsel - outerstartsel);
2511  }
2512 
2513  if (innersortkeys) /* do we need to sort inner? */
2514  {
2515  cost_sort(&sort_path,
2516  root,
2517  innersortkeys,
2518  inner_path->total_cost,
2519  inner_path_rows,
2520  inner_path->pathtarget->width,
2521  0.0,
2522  work_mem,
2523  -1.0);
2524  startup_cost += sort_path.startup_cost;
2525  startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2526  * innerstartsel;
2527  inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2528  * (innerendsel - innerstartsel);
2529  }
2530  else
2531  {
2532  startup_cost += inner_path->startup_cost;
2533  startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2534  * innerstartsel;
2535  inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2536  * (innerendsel - innerstartsel);
2537  }
2538 
2539  /*
2540  * We can't yet determine whether rescanning occurs, or whether
2541  * materialization of the inner input should be done. The minimum
2542  * possible inner input cost, regardless of rescan and materialization
2543  * considerations, is inner_run_cost. We include that in
2544  * workspace->total_cost, but not yet in run_cost.
2545  */
2546 
2547  /* CPU costs left for later */
2548 
2549  /* Public result fields */
2550  workspace->startup_cost = startup_cost;
2551  workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2552  /* Save private data for final_cost_mergejoin */
2553  workspace->run_cost = run_cost;
2554  workspace->inner_run_cost = inner_run_cost;
2555  workspace->outer_rows = outer_rows;
2556  workspace->inner_rows = inner_rows;
2557  workspace->outer_skip_rows = outer_skip_rows;
2558  workspace->inner_skip_rows = inner_skip_rows;
2559 }
2560 
2561 /*
2562  * final_cost_mergejoin
2563  * Final estimate of the cost and result size of a mergejoin path.
2564  *
2565  * Unlike other costsize functions, this routine makes two actual decisions:
2566  * whether the executor will need to do mark/restore, and whether we should
2567  * materialize the inner path. It would be logically cleaner to build
2568  * separate paths testing these alternatives, but that would require repeating
2569  * most of the cost calculations, which are not all that cheap. Since the
2570  * choice will not affect output pathkeys or startup cost, only total cost,
2571  * there is no possibility of wanting to keep more than one path. So it seems
2572  * best to make the decisions here and record them in the path's
2573  * skip_mark_restore and materialize_inner fields.
2574  *
2575  * Mark/restore overhead is usually required, but can be skipped if we know
2576  * that the executor need find only one match per outer tuple, and that the
2577  * mergeclauses are sufficient to identify a match.
2578  *
2579  * We materialize the inner path if we need mark/restore and either the inner
2580  * path can't support mark/restore, or it's cheaper to use an interposed
2581  * Material node to handle mark/restore.
2582  *
2583  * 'path' is already filled in except for the rows and cost fields and
2584  * skip_mark_restore and materialize_inner
2585  * 'workspace' is the result from initial_cost_mergejoin
2586  * 'extra' contains miscellaneous information about the join
2587  */
2588 void
2590  JoinCostWorkspace *workspace,
2591  JoinPathExtraData *extra)
2592 {
2593  Path *outer_path = path->jpath.outerjoinpath;
2594  Path *inner_path = path->jpath.innerjoinpath;
2595  double inner_path_rows = inner_path->rows;
2596  List *mergeclauses = path->path_mergeclauses;
2597  List *innersortkeys = path->innersortkeys;
2598  Cost startup_cost = workspace->startup_cost;
2599  Cost run_cost = workspace->run_cost;
2600  Cost inner_run_cost = workspace->inner_run_cost;
2601  double outer_rows = workspace->outer_rows;
2602  double inner_rows = workspace->inner_rows;
2603  double outer_skip_rows = workspace->outer_skip_rows;
2604  double inner_skip_rows = workspace->inner_skip_rows;
2605  Cost cpu_per_tuple,
2606  bare_inner_cost,
2607  mat_inner_cost;
2608  QualCost merge_qual_cost;
2609  QualCost qp_qual_cost;
2610  double mergejointuples,
2611  rescannedtuples;
2612  double rescanratio;
2613 
2614  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2615  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2616  inner_path_rows = 1;
2617 
2618  /* Mark the path with the correct row estimate */
2619  if (path->jpath.path.param_info)
2620  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2621  else
2622  path->jpath.path.rows = path->jpath.path.parent->rows;
2623 
2624  /* For partial paths, scale row estimate. */
2625  if (path->jpath.path.parallel_workers > 0)
2626  {
2627  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2628 
2629  path->jpath.path.rows =
2630  clamp_row_est(path->jpath.path.rows / parallel_divisor);
2631  }
2632 
2633  /*
2634  * We could include disable_cost in the preliminary estimate, but that
2635  * would amount to optimizing for the case where the join method is
2636  * disabled, which doesn't seem like the way to bet.
2637  */
2638  if (!enable_mergejoin)
2639  startup_cost += disable_cost;
2640 
2641  /*
2642  * Compute cost of the mergequals and qpquals (other restriction clauses)
2643  * separately.
2644  */
2645  cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2646  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2647  qp_qual_cost.startup -= merge_qual_cost.startup;
2648  qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2649 
2650  /*
2651  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2652  * executor will stop scanning for matches after the first match. When
2653  * all the joinclauses are merge clauses, this means we don't ever need to
2654  * back up the merge, and so we can skip mark/restore overhead.
2655  */
2656  if ((path->jpath.jointype == JOIN_SEMI ||
2657  path->jpath.jointype == JOIN_ANTI ||
2658  extra->inner_unique) &&
2661  path->skip_mark_restore = true;
2662  else
2663  path->skip_mark_restore = false;
2664 
2665  /*
2666  * Get approx # tuples passing the mergequals. We use approx_tuple_count
2667  * here because we need an estimate done with JOIN_INNER semantics.
2668  */
2669  mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2670 
2671  /*
2672  * When there are equal merge keys in the outer relation, the mergejoin
2673  * must rescan any matching tuples in the inner relation. This means
2674  * re-fetching inner tuples; we have to estimate how often that happens.
2675  *
2676  * For regular inner and outer joins, the number of re-fetches can be
2677  * estimated approximately as size of merge join output minus size of
2678  * inner relation. Assume that the distinct key values are 1, 2, ..., and
2679  * denote the number of values of each key in the outer relation as m1,
2680  * m2, ...; in the inner relation, n1, n2, ... Then we have
2681  *
2682  * size of join = m1 * n1 + m2 * n2 + ...
2683  *
2684  * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2685  * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2686  * relation
2687  *
2688  * This equation works correctly for outer tuples having no inner match
2689  * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2690  * are effectively subtracting those from the number of rescanned tuples,
2691  * when we should not. Can we do better without expensive selectivity
2692  * computations?
2693  *
2694  * The whole issue is moot if we are working from a unique-ified outer
2695  * input, or if we know we don't need to mark/restore at all.
2696  */
2697  if (IsA(outer_path, UniquePath) ||path->skip_mark_restore)
2698  rescannedtuples = 0;
2699  else
2700  {
2701  rescannedtuples = mergejointuples - inner_path_rows;
2702  /* Must clamp because of possible underestimate */
2703  if (rescannedtuples < 0)
2704  rescannedtuples = 0;
2705  }
2706  /* We'll inflate various costs this much to account for rescanning */
2707  rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2708 
2709  /*
2710  * Decide whether we want to materialize the inner input to shield it from
2711  * mark/restore and performing re-fetches. Our cost model for regular
2712  * re-fetches is that a re-fetch costs the same as an original fetch,
2713  * which is probably an overestimate; but on the other hand we ignore the
2714  * bookkeeping costs of mark/restore. Not clear if it's worth developing
2715  * a more refined model. So we just need to inflate the inner run cost by
2716  * rescanratio.
2717  */
2718  bare_inner_cost = inner_run_cost * rescanratio;
2719 
2720  /*
2721  * When we interpose a Material node the re-fetch cost is assumed to be
2722  * just cpu_operator_cost per tuple, independently of the underlying
2723  * plan's cost; and we charge an extra cpu_operator_cost per original
2724  * fetch as well. Note that we're assuming the materialize node will
2725  * never spill to disk, since it only has to remember tuples back to the
2726  * last mark. (If there are a huge number of duplicates, our other cost
2727  * factors will make the path so expensive that it probably won't get
2728  * chosen anyway.) So we don't use cost_rescan here.
2729  *
2730  * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2731  * of the generated Material node.
2732  */
2733  mat_inner_cost = inner_run_cost +
2734  cpu_operator_cost * inner_path_rows * rescanratio;
2735 
2736  /*
2737  * If we don't need mark/restore at all, we don't need materialization.
2738  */
2739  if (path->skip_mark_restore)
2740  path->materialize_inner = false;
2741 
2742  /*
2743  * Prefer materializing if it looks cheaper, unless the user has asked to
2744  * suppress materialization.
2745  */
2746  else if (enable_material && mat_inner_cost < bare_inner_cost)
2747  path->materialize_inner = true;
2748 
2749  /*
2750  * Even if materializing doesn't look cheaper, we *must* do it if the
2751  * inner path is to be used directly (without sorting) and it doesn't
2752  * support mark/restore.
2753  *
2754  * Since the inner side must be ordered, and only Sorts and IndexScans can
2755  * create order to begin with, and they both support mark/restore, you
2756  * might think there's no problem --- but you'd be wrong. Nestloop and
2757  * merge joins can *preserve* the order of their inputs, so they can be
2758  * selected as the input of a mergejoin, and they don't support
2759  * mark/restore at present.
2760  *
2761  * We don't test the value of enable_material here, because
2762  * materialization is required for correctness in this case, and turning
2763  * it off does not entitle us to deliver an invalid plan.
2764  */
2765  else if (innersortkeys == NIL &&
2766  !ExecSupportsMarkRestore(inner_path))
2767  path->materialize_inner = true;
2768 
2769  /*
2770  * Also, force materializing if the inner path is to be sorted and the
2771  * sort is expected to spill to disk. This is because the final merge
2772  * pass can be done on-the-fly if it doesn't have to support mark/restore.
2773  * We don't try to adjust the cost estimates for this consideration,
2774  * though.
2775  *
2776  * Since materialization is a performance optimization in this case,
2777  * rather than necessary for correctness, we skip it if enable_material is
2778  * off.
2779  */
2780  else if (enable_material && innersortkeys != NIL &&
2781  relation_byte_size(inner_path_rows,
2782  inner_path->pathtarget->width) >
2783  (work_mem * 1024L))
2784  path->materialize_inner = true;
2785  else
2786  path->materialize_inner = false;
2787 
2788  /* Charge the right incremental cost for the chosen case */
2789  if (path->materialize_inner)
2790  run_cost += mat_inner_cost;
2791  else
2792  run_cost += bare_inner_cost;
2793 
2794  /* CPU costs */
2795 
2796  /*
2797  * The number of tuple comparisons needed is approximately number of outer
2798  * rows plus number of inner rows plus number of rescanned tuples (can we
2799  * refine this?). At each one, we need to evaluate the mergejoin quals.
2800  */
2801  startup_cost += merge_qual_cost.startup;
2802  startup_cost += merge_qual_cost.per_tuple *
2803  (outer_skip_rows + inner_skip_rows * rescanratio);
2804  run_cost += merge_qual_cost.per_tuple *
2805  ((outer_rows - outer_skip_rows) +
2806  (inner_rows - inner_skip_rows) * rescanratio);
2807 
2808  /*
2809  * For each tuple that gets through the mergejoin proper, we charge
2810  * cpu_tuple_cost plus the cost of evaluating additional restriction
2811  * clauses that are to be applied at the join. (This is pessimistic since
2812  * not all of the quals may get evaluated at each tuple.)
2813  *
2814  * Note: we could adjust for SEMI/ANTI joins skipping some qual
2815  * evaluations here, but it's probably not worth the trouble.
2816  */
2817  startup_cost += qp_qual_cost.startup;
2818  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2819  run_cost += cpu_per_tuple * mergejointuples;
2820 
2821  /* tlist eval costs are paid per output row, not per tuple scanned */
2822  startup_cost += path->jpath.path.pathtarget->cost.startup;
2823  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2824 
2825  path->jpath.path.startup_cost = startup_cost;
2826  path->jpath.path.total_cost = startup_cost + run_cost;
2827 }
2828 
2829 /*
2830  * run mergejoinscansel() with caching
2831  */
2832 static MergeScanSelCache *
2834 {
2835  MergeScanSelCache *cache;
2836  ListCell *lc;
2837  Selectivity leftstartsel,
2838  leftendsel,
2839  rightstartsel,
2840  rightendsel;
2841  MemoryContext oldcontext;
2842 
2843  /* Do we have this result already? */
2844  foreach(lc, rinfo->scansel_cache)
2845  {
2846  cache = (MergeScanSelCache *) lfirst(lc);
2847  if (cache->opfamily == pathkey->pk_opfamily &&
2848  cache->collation == pathkey->pk_eclass->ec_collation &&
2849  cache->strategy == pathkey->pk_strategy &&
2850  cache->nulls_first == pathkey->pk_nulls_first)
2851  return cache;
2852  }
2853 
2854  /* Nope, do the computation */
2855  mergejoinscansel(root,
2856  (Node *) rinfo->clause,
2857  pathkey->pk_opfamily,
2858  pathkey->pk_strategy,
2859  pathkey->pk_nulls_first,
2860  &leftstartsel,
2861  &leftendsel,
2862  &rightstartsel,
2863  &rightendsel);
2864 
2865  /* Cache the result in suitably long-lived workspace */
2866  oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2867 
2868  cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2869  cache->opfamily = pathkey->pk_opfamily;
2870  cache->collation = pathkey->pk_eclass->ec_collation;
2871  cache->strategy = pathkey->pk_strategy;
2872  cache->nulls_first = pathkey->pk_nulls_first;
2873  cache->leftstartsel = leftstartsel;
2874  cache->leftendsel = leftendsel;
2875  cache->rightstartsel = rightstartsel;
2876  cache->rightendsel = rightendsel;
2877 
2878  rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2879 
2880  MemoryContextSwitchTo(oldcontext);
2881 
2882  return cache;
2883 }
2884 
2885 /*
2886  * initial_cost_hashjoin
2887  * Preliminary estimate of the cost of a hashjoin path.
2888  *
2889  * This must quickly produce lower-bound estimates of the path's startup and
2890  * total costs. If we are unable to eliminate the proposed path from
2891  * consideration using the lower bounds, final_cost_hashjoin will be called
2892  * to obtain the final estimates.
2893  *
2894  * The exact division of labor between this function and final_cost_hashjoin
2895  * is private to them, and represents a tradeoff between speed of the initial
2896  * estimate and getting a tight lower bound. We choose to not examine the
2897  * join quals here (other than by counting the number of hash clauses),
2898  * so we can't do much with CPU costs. We do assume that
2899  * ExecChooseHashTableSize is cheap enough to use here.
2900  *
2901  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2902  * other data to be used by final_cost_hashjoin
2903  * 'jointype' is the type of join to be performed
2904  * 'hashclauses' is the list of joinclauses to be used as hash clauses
2905  * 'outer_path' is the outer input to the join
2906  * 'inner_path' is the inner input to the join
2907  * 'extra' contains miscellaneous information about the join
2908  */
2909 void
2911  JoinType jointype,
2912  List *hashclauses,
2913  Path *outer_path, Path *inner_path,
2914  JoinPathExtraData *extra)
2915 {
2916  Cost startup_cost = 0;
2917  Cost run_cost = 0;
2918  double outer_path_rows = outer_path->rows;
2919  double inner_path_rows = inner_path->rows;
2920  int num_hashclauses = list_length(hashclauses);
2921  int numbuckets;
2922  int numbatches;
2923  int num_skew_mcvs;
2924 
2925  /* cost of source data */
2926  startup_cost += outer_path->startup_cost;
2927  run_cost += outer_path->total_cost - outer_path->startup_cost;
2928  startup_cost += inner_path->total_cost;
2929 
2930  /*
2931  * Cost of computing hash function: must do it once per input tuple. We
2932  * charge one cpu_operator_cost for each column's hash function. Also,
2933  * tack on one cpu_tuple_cost per inner row, to model the costs of
2934  * inserting the row into the hashtable.
2935  *
2936  * XXX when a hashclause is more complex than a single operator, we really
2937  * should charge the extra eval costs of the left or right side, as
2938  * appropriate, here. This seems more work than it's worth at the moment.
2939  */
2940  startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2941  * inner_path_rows;
2942  run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2943 
2944  /*
2945  * Get hash table size that executor would use for inner relation.
2946  *
2947  * XXX for the moment, always assume that skew optimization will be
2948  * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2949  * trying to determine that for sure.
2950  *
2951  * XXX at some point it might be interesting to try to account for skew
2952  * optimization in the cost estimate, but for now, we don't.
2953  */
2954  ExecChooseHashTableSize(inner_path_rows,
2955  inner_path->pathtarget->width,
2956  true, /* useskew */
2957  &numbuckets,
2958  &numbatches,
2959  &num_skew_mcvs);
2960 
2961  /*
2962  * If inner relation is too big then we will need to "batch" the join,
2963  * which implies writing and reading most of the tuples to disk an extra
2964  * time. Charge seq_page_cost per page, since the I/O should be nice and
2965  * sequential. Writing the inner rel counts as startup cost, all the rest
2966  * as run cost.
2967  */
2968  if (numbatches > 1)
2969  {
2970  double outerpages = page_size(outer_path_rows,
2971  outer_path->pathtarget->width);
2972  double innerpages = page_size(inner_path_rows,
2973  inner_path->pathtarget->width);
2974 
2975  startup_cost += seq_page_cost * innerpages;
2976  run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2977  }
2978 
2979  /* CPU costs left for later */
2980 
2981  /* Public result fields */
2982  workspace->startup_cost = startup_cost;
2983  workspace->total_cost = startup_cost + run_cost;
2984  /* Save private data for final_cost_hashjoin */
2985  workspace->run_cost = run_cost;
2986  workspace->numbuckets = numbuckets;
2987  workspace->numbatches = numbatches;
2988 }
2989 
2990 /*
2991  * final_cost_hashjoin
2992  * Final estimate of the cost and result size of a hashjoin path.
2993  *
2994  * Note: the numbatches estimate is also saved into 'path' for use later
2995  *
2996  * 'path' is already filled in except for the rows and cost fields and
2997  * num_batches
2998  * 'workspace' is the result from initial_cost_hashjoin
2999  * 'extra' contains miscellaneous information about the join
3000  */
3001 void
3003  JoinCostWorkspace *workspace,
3004  JoinPathExtraData *extra)
3005 {
3006  Path *outer_path = path->jpath.outerjoinpath;
3007  Path *inner_path = path->jpath.innerjoinpath;
3008  double outer_path_rows = outer_path->rows;
3009  double inner_path_rows = inner_path->rows;
3010  List *hashclauses = path->path_hashclauses;
3011  Cost startup_cost = workspace->startup_cost;
3012  Cost run_cost = workspace->run_cost;
3013  int numbuckets = workspace->numbuckets;
3014  int numbatches = workspace->numbatches;
3015  Cost cpu_per_tuple;
3016  QualCost hash_qual_cost;
3017  QualCost qp_qual_cost;
3018  double hashjointuples;
3019  double virtualbuckets;
3020  Selectivity innerbucketsize;
3021  ListCell *hcl;
3022 
3023  /* Mark the path with the correct row estimate */
3024  if (path->jpath.path.param_info)
3025  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3026  else
3027  path->jpath.path.rows = path->jpath.path.parent->rows;
3028 
3029  /* For partial paths, scale row estimate. */
3030  if (path->jpath.path.parallel_workers > 0)
3031  {
3032  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3033 
3034  path->jpath.path.rows =
3035  clamp_row_est(path->jpath.path.rows / parallel_divisor);
3036  }
3037 
3038  /*
3039  * We could include disable_cost in the preliminary estimate, but that
3040  * would amount to optimizing for the case where the join method is
3041  * disabled, which doesn't seem like the way to bet.
3042  */
3043  if (!enable_hashjoin)
3044  startup_cost += disable_cost;
3045 
3046  /* mark the path with estimated # of batches */
3047  path->num_batches = numbatches;
3048 
3049  /* and compute the number of "virtual" buckets in the whole join */
3050  virtualbuckets = (double) numbuckets *(double) numbatches;
3051 
3052  /*
3053  * Determine bucketsize fraction for inner relation. We use the smallest
3054  * bucketsize estimated for any individual hashclause; this is undoubtedly
3055  * conservative.
3056  *
3057  * BUT: if inner relation has been unique-ified, we can assume it's good
3058  * for hashing. This is important both because it's the right answer, and
3059  * because we avoid contaminating the cache with a value that's wrong for
3060  * non-unique-ified paths.
3061  */
3062  if (IsA(inner_path, UniquePath))
3063  innerbucketsize = 1.0 / virtualbuckets;
3064  else
3065  {
3066  innerbucketsize = 1.0;
3067  foreach(hcl, hashclauses)
3068  {
3069  RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
3070  Selectivity thisbucketsize;
3071 
3072  /*
3073  * First we have to figure out which side of the hashjoin clause
3074  * is the inner side.
3075  *
3076  * Since we tend to visit the same clauses over and over when
3077  * planning a large query, we cache the bucketsize estimate in the
3078  * RestrictInfo node to avoid repeated lookups of statistics.
3079  */
3080  if (bms_is_subset(restrictinfo->right_relids,
3081  inner_path->parent->relids))
3082  {
3083  /* righthand side is inner */
3084  thisbucketsize = restrictinfo->right_bucketsize;
3085  if (thisbucketsize < 0)
3086  {
3087  /* not cached yet */
3088  thisbucketsize =
3090  get_rightop(restrictinfo->clause),
3091  virtualbuckets);
3092  restrictinfo->right_bucketsize = thisbucketsize;
3093  }
3094  }
3095  else
3096  {
3097  Assert(bms_is_subset(restrictinfo->left_relids,
3098  inner_path->parent->relids));
3099  /* lefthand side is inner */
3100  thisbucketsize = restrictinfo->left_bucketsize;
3101  if (thisbucketsize < 0)
3102  {
3103  /* not cached yet */
3104  thisbucketsize =
3106  get_leftop(restrictinfo->clause),
3107  virtualbuckets);
3108  restrictinfo->left_bucketsize = thisbucketsize;
3109  }
3110  }
3111 
3112  if (innerbucketsize > thisbucketsize)
3113  innerbucketsize = thisbucketsize;
3114  }
3115  }
3116 
3117  /*
3118  * Compute cost of the hashquals and qpquals (other restriction clauses)
3119  * separately.
3120  */
3121  cost_qual_eval(&hash_qual_cost, hashclauses, root);
3122  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3123  qp_qual_cost.startup -= hash_qual_cost.startup;
3124  qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
3125 
3126  /* CPU costs */
3127 
3128  if (path->jpath.jointype == JOIN_SEMI ||
3129  path->jpath.jointype == JOIN_ANTI ||
3130  extra->inner_unique)
3131  {
3132  double outer_matched_rows;
3133  Selectivity inner_scan_frac;
3134 
3135  /*
3136  * With a SEMI or ANTI join, or if the innerrel is known unique, the
3137  * executor will stop after the first match.
3138  *
3139  * For an outer-rel row that has at least one match, we can expect the
3140  * bucket scan to stop after a fraction 1/(match_count+1) of the
3141  * bucket's rows, if the matches are evenly distributed. Since they
3142  * probably aren't quite evenly distributed, we apply a fuzz factor of
3143  * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
3144  * to clamp inner_scan_frac to at most 1.0; but since match_count is
3145  * at least 1, no such clamp is needed now.)
3146  */
3147  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3148  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3149 
3150  startup_cost += hash_qual_cost.startup;
3151  run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
3152  clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
3153 
3154  /*
3155  * For unmatched outer-rel rows, the picture is quite a lot different.
3156  * In the first place, there is no reason to assume that these rows
3157  * preferentially hit heavily-populated buckets; instead assume they
3158  * are uncorrelated with the inner distribution and so they see an
3159  * average bucket size of inner_path_rows / virtualbuckets. In the
3160  * second place, it seems likely that they will have few if any exact
3161  * hash-code matches and so very few of the tuples in the bucket will
3162  * actually require eval of the hash quals. We don't have any good
3163  * way to estimate how many will, but for the moment assume that the
3164  * effective cost per bucket entry is one-tenth what it is for
3165  * matchable tuples.
3166  */
3167  run_cost += hash_qual_cost.per_tuple *
3168  (outer_path_rows - outer_matched_rows) *
3169  clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
3170 
3171  /* Get # of tuples that will pass the basic join */
3172  if (path->jpath.jointype == JOIN_SEMI)
3173  hashjointuples = outer_matched_rows;
3174  else
3175  hashjointuples = outer_path_rows - outer_matched_rows;
3176  }
3177  else
3178  {
3179  /*
3180  * The number of tuple comparisons needed is the number of outer
3181  * tuples times the typical number of tuples in a hash bucket, which
3182  * is the inner relation size times its bucketsize fraction. At each
3183  * one, we need to evaluate the hashjoin quals. But actually,
3184  * charging the full qual eval cost at each tuple is pessimistic,
3185  * since we don't evaluate the quals unless the hash values match
3186  * exactly. For lack of a better idea, halve the cost estimate to
3187  * allow for that.
3188  */
3189  startup_cost += hash_qual_cost.startup;
3190  run_cost += hash_qual_cost.per_tuple * outer_path_rows *
3191  clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
3192 
3193  /*
3194  * Get approx # tuples passing the hashquals. We use
3195  * approx_tuple_count here because we need an estimate done with
3196  * JOIN_INNER semantics.
3197  */
3198  hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
3199  }
3200 
3201  /*
3202  * For each tuple that gets through the hashjoin proper, we charge
3203  * cpu_tuple_cost plus the cost of evaluating additional restriction
3204  * clauses that are to be applied at the join. (This is pessimistic since
3205  * not all of the quals may get evaluated at each tuple.)
3206  */
3207  startup_cost += qp_qual_cost.startup;
3208  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3209  run_cost += cpu_per_tuple * hashjointuples;
3210 
3211  /* tlist eval costs are paid per output row, not per tuple scanned */
3212  startup_cost += path->jpath.path.pathtarget->cost.startup;
3213  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3214 
3215  path->jpath.path.startup_cost = startup_cost;
3216  path->jpath.path.total_cost = startup_cost + run_cost;
3217 }
3218 
3219 
3220 /*
3221  * cost_subplan
3222  * Figure the costs for a SubPlan (or initplan).
3223  *
3224  * Note: we could dig the subplan's Plan out of the root list, but in practice
3225  * all callers have it handy already, so we make them pass it.
3226  */
3227 void
3228 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
3229 {
3230  QualCost sp_cost;
3231 
3232  /* Figure any cost for evaluating the testexpr */
3233  cost_qual_eval(&sp_cost,
3234  make_ands_implicit((Expr *) subplan->testexpr),
3235  root);
3236 
3237  if (subplan->useHashTable)
3238  {
3239  /*
3240  * If we are using a hash table for the subquery outputs, then the
3241  * cost of evaluating the query is a one-time cost. We charge one
3242  * cpu_operator_cost per tuple for the work of loading the hashtable,
3243  * too.
3244  */
3245  sp_cost.startup += plan->total_cost +
3246  cpu_operator_cost * plan->plan_rows;
3247 
3248  /*
3249  * The per-tuple costs include the cost of evaluating the lefthand
3250  * expressions, plus the cost of probing the hashtable. We already
3251  * accounted for the lefthand expressions as part of the testexpr, and
3252  * will also have counted one cpu_operator_cost for each comparison
3253  * operator. That is probably too low for the probing cost, but it's
3254  * hard to make a better estimate, so live with it for now.
3255  */
3256  }
3257  else
3258  {
3259  /*
3260  * Otherwise we will be rescanning the subplan output on each
3261  * evaluation. We need to estimate how much of the output we will
3262  * actually need to scan. NOTE: this logic should agree with the
3263  * tuple_fraction estimates used by make_subplan() in
3264  * plan/subselect.c.
3265  */
3266  Cost plan_run_cost = plan->total_cost - plan->startup_cost;
3267 
3268  if (subplan->subLinkType == EXISTS_SUBLINK)
3269  {
3270  /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3271  sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3272  }
3273  else if (subplan->subLinkType == ALL_SUBLINK ||
3274  subplan->subLinkType == ANY_SUBLINK)
3275  {
3276  /* assume we need 50% of the tuples */
3277  sp_cost.per_tuple += 0.50 * plan_run_cost;
3278  /* also charge a cpu_operator_cost per row examined */
3279  sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3280  }
3281  else
3282  {
3283  /* assume we need all tuples */
3284  sp_cost.per_tuple += plan_run_cost;
3285  }
3286 
3287  /*
3288  * Also account for subplan's startup cost. If the subplan is
3289  * uncorrelated or undirect correlated, AND its topmost node is one
3290  * that materializes its output, assume that we'll only need to pay
3291  * its startup cost once; otherwise assume we pay the startup cost
3292  * every time.
3293  */
3294  if (subplan->parParam == NIL &&
3296  sp_cost.startup += plan->startup_cost;
3297  else
3298  sp_cost.per_tuple += plan->startup_cost;
3299  }
3300 
3301  subplan->startup_cost = sp_cost.startup;
3302  subplan->per_call_cost = sp_cost.per_tuple;
3303 }
3304 
3305 
3306 /*
3307  * cost_rescan
3308  * Given a finished Path, estimate the costs of rescanning it after
3309  * having done so the first time. For some Path types a rescan is
3310  * cheaper than an original scan (if no parameters change), and this
3311  * function embodies knowledge about that. The default is to return
3312  * the same costs stored in the Path. (Note that the cost estimates
3313  * actually stored in Paths are always for first scans.)
3314  *
3315  * This function is not currently intended to model effects such as rescans
3316  * being cheaper due to disk block caching; what we are concerned with is
3317  * plan types wherein the executor caches results explicitly, or doesn't
3318  * redo startup calculations, etc.
3319  */
3320 static void
3322  Cost *rescan_startup_cost, /* output parameters */
3323  Cost *rescan_total_cost)
3324 {
3325  switch (path->pathtype)
3326  {
3327  case T_FunctionScan:
3328 
3329  /*
3330  * Currently, nodeFunctionscan.c always executes the function to
3331  * completion before returning any rows, and caches the results in
3332  * a tuplestore. So the function eval cost is all startup cost
3333  * and isn't paid over again on rescans. However, all run costs
3334  * will be paid over again.
3335  */
3336  *rescan_startup_cost = 0;
3337  *rescan_total_cost = path->total_cost - path->startup_cost;
3338  break;
3339  case T_HashJoin:
3340 
3341  /*
3342  * If it's a single-batch join, we don't need to rebuild the hash
3343  * table during a rescan.
3344  */
3345  if (((HashPath *) path)->num_batches == 1)
3346  {
3347  /* Startup cost is exactly the cost of hash table building */
3348  *rescan_startup_cost = 0;
3349  *rescan_total_cost = path->total_cost - path->startup_cost;
3350  }
3351  else
3352  {
3353  /* Otherwise, no special treatment */
3354  *rescan_startup_cost = path->startup_cost;
3355  *rescan_total_cost = path->total_cost;
3356  }
3357  break;
3358  case T_CteScan:
3359  case T_WorkTableScan:
3360  {
3361  /*
3362  * These plan types materialize their final result in a
3363  * tuplestore or tuplesort object. So the rescan cost is only
3364  * cpu_tuple_cost per tuple, unless the result is large enough
3365  * to spill to disk.
3366  */
3367  Cost run_cost = cpu_tuple_cost * path->rows;
3368  double nbytes = relation_byte_size(path->rows,
3369  path->pathtarget->width);
3370  long work_mem_bytes = work_mem * 1024L;
3371 
3372  if (nbytes > work_mem_bytes)
3373  {
3374  /* It will spill, so account for re-read cost */
3375  double npages = ceil(nbytes / BLCKSZ);
3376 
3377  run_cost += seq_page_cost * npages;
3378  }
3379  *rescan_startup_cost = 0;
3380  *rescan_total_cost = run_cost;
3381  }
3382  break;
3383  case T_Material:
3384  case T_Sort:
3385  {
3386  /*
3387  * These plan types not only materialize their results, but do
3388  * not implement qual filtering or projection. So they are
3389  * even cheaper to rescan than the ones above. We charge only
3390  * cpu_operator_cost per tuple. (Note: keep that in sync with
3391  * the run_cost charge in cost_sort, and also see comments in
3392  * cost_material before you change it.)
3393  */
3394  Cost run_cost = cpu_operator_cost * path->rows;
3395  double nbytes = relation_byte_size(path->rows,
3396  path->pathtarget->width);
3397  long work_mem_bytes = work_mem * 1024L;
3398 
3399  if (nbytes > work_mem_bytes)
3400  {
3401  /* It will spill, so account for re-read cost */
3402  double npages = ceil(nbytes / BLCKSZ);
3403 
3404  run_cost += seq_page_cost * npages;
3405  }
3406  *rescan_startup_cost = 0;
3407  *rescan_total_cost = run_cost;
3408  }
3409  break;
3410  default:
3411  *rescan_startup_cost = path->startup_cost;
3412  *rescan_total_cost = path->total_cost;
3413  break;
3414  }
3415 }
3416 
3417 
3418 /*
3419  * cost_qual_eval
3420  * Estimate the CPU costs of evaluating a WHERE clause.
3421  * The input can be either an implicitly-ANDed list of boolean
3422  * expressions, or a list of RestrictInfo nodes. (The latter is
3423  * preferred since it allows caching of the results.)
3424  * The result includes both a one-time (startup) component,
3425  * and a per-evaluation component.
3426  */
3427 void
3429 {
3430  cost_qual_eval_context context;
3431  ListCell *l;
3432 
3433  context.root = root;
3434  context.total.startup = 0;
3435  context.total.per_tuple = 0;
3436 
3437  /* We don't charge any cost for the implicit ANDing at top level ... */
3438 
3439  foreach(l, quals)
3440  {
3441  Node *qual = (Node *) lfirst(l);
3442 
3443  cost_qual_eval_walker(qual, &context);
3444  }
3445 
3446  *cost = context.total;
3447 }
3448 
3449 /*
3450  * cost_qual_eval_node
3451  * As above, for a single RestrictInfo or expression.
3452  */
3453 void
3455 {
3456  cost_qual_eval_context context;
3457 
3458  context.root = root;
3459  context.total.startup = 0;
3460  context.total.per_tuple = 0;
3461 
3462  cost_qual_eval_walker(qual, &context);
3463 
3464  *cost = context.total;
3465 }
3466 
3467 static bool
3469 {
3470  if (node == NULL)
3471  return false;
3472 
3473  /*
3474  * RestrictInfo nodes contain an eval_cost field reserved for this
3475  * routine's use, so that it's not necessary to evaluate the qual clause's
3476  * cost more than once. If the clause's cost hasn't been computed yet,
3477  * the field's startup value will contain -1.
3478  */
3479  if (IsA(node, RestrictInfo))
3480  {
3481  RestrictInfo *rinfo = (RestrictInfo *) node;
3482 
3483  if (rinfo->eval_cost.startup < 0)
3484  {
3485  cost_qual_eval_context locContext;
3486 
3487  locContext.root = context->root;
3488  locContext.total.startup = 0;
3489  locContext.total.per_tuple = 0;
3490 
3491  /*
3492  * For an OR clause, recurse into the marked-up tree so that we
3493  * set the eval_cost for contained RestrictInfos too.
3494  */
3495  if (rinfo->orclause)
3496  cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3497  else
3498  cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3499 
3500  /*
3501  * If the RestrictInfo is marked pseudoconstant, it will be tested
3502  * only once, so treat its cost as all startup cost.
3503  */
3504  if (rinfo->pseudoconstant)
3505  {
3506  /* count one execution during startup */
3507  locContext.total.startup += locContext.total.per_tuple;
3508  locContext.total.per_tuple = 0;
3509  }
3510  rinfo->eval_cost = locContext.total;
3511  }
3512  context->total.startup += rinfo->eval_cost.startup;
3513  context->total.per_tuple += rinfo->eval_cost.per_tuple;
3514  /* do NOT recurse into children */
3515  return false;
3516  }
3517 
3518  /*
3519  * For each operator or function node in the given tree, we charge the
3520  * estimated execution cost given by pg_proc.procost (remember to multiply
3521  * this by cpu_operator_cost).
3522  *
3523  * Vars and Consts are charged zero, and so are boolean operators (AND,
3524  * OR, NOT). Simplistic, but a lot better than no model at all.
3525  *
3526  * Should we try to account for the possibility of short-circuit
3527  * evaluation of AND/OR? Probably *not*, because that would make the
3528  * results depend on the clause ordering, and we are not in any position
3529  * to expect that the current ordering of the clauses is the one that's
3530  * going to end up being used. The above per-RestrictInfo caching would
3531  * not mix well with trying to re-order clauses anyway.
3532  *
3533  * Another issue that is entirely ignored here is that if a set-returning
3534  * function is below top level in the tree, the functions/operators above
3535  * it will need to be evaluated multiple times. In practical use, such
3536  * cases arise so seldom as to not be worth the added complexity needed;
3537  * moreover, since our rowcount estimates for functions tend to be pretty
3538  * phony, the results would also be pretty phony.
3539  */
3540  if (IsA(node, FuncExpr))
3541  {
3542  context->total.per_tuple +=
3543  get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3544  }
3545  else if (IsA(node, OpExpr) ||
3546  IsA(node, DistinctExpr) ||
3547  IsA(node, NullIfExpr))
3548  {
3549  /* rely on struct equivalence to treat these all alike */
3550  set_opfuncid((OpExpr *) node);
3551  context->total.per_tuple +=
3552  get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3553  }
3554  else if (IsA(node, ScalarArrayOpExpr))
3555  {
3556  /*
3557  * Estimate that the operator will be applied to about half of the
3558  * array elements before the answer is determined.
3559  */
3560  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3561  Node *arraynode = (Node *) lsecond(saop->args);
3562 
3563  set_sa_opfuncid(saop);
3564  context->total.per_tuple += get_func_cost(saop->opfuncid) *
3565  cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3566  }
3567  else if (IsA(node, Aggref) ||
3568  IsA(node, WindowFunc))
3569  {
3570  /*
3571  * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3572  * ie, zero execution cost in the current model, because they behave
3573  * essentially like Vars at execution. We disregard the costs of
3574  * their input expressions for the same reason. The actual execution
3575  * costs of the aggregate/window functions and their arguments have to
3576  * be factored into plan-node-specific costing of the Agg or WindowAgg
3577  * plan node.
3578  */
3579  return false; /* don't recurse into children */
3580  }
3581  else if (IsA(node, CoerceViaIO))
3582  {
3583  CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3584  Oid iofunc;
3585  Oid typioparam;
3586  bool typisvarlena;
3587 
3588  /* check the result type's input function */
3589  getTypeInputInfo(iocoerce->resulttype,
3590  &iofunc, &typioparam);
3591  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3592  /* check the input type's output function */
3593  getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3594  &iofunc, &typisvarlena);
3595  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3596  }
3597  else if (IsA(node, ArrayCoerceExpr))
3598  {
3599  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3600  Node *arraynode = (Node *) acoerce->arg;
3601 
3602  if (OidIsValid(acoerce->elemfuncid))
3603  context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3605  }
3606  else if (IsA(node, RowCompareExpr))
3607  {
3608  /* Conservatively assume we will check all the columns */
3609  RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3610  ListCell *lc;
3611 
3612  foreach(lc, rcexpr->opnos)
3613  {
3614  Oid opid = lfirst_oid(lc);
3615 
3616  context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3618  }
3619  }
3620  else if (IsA(node, CurrentOfExpr))
3621  {
3622  /* Report high cost to prevent selection of anything but TID scan */
3623  context->total.startup += disable_cost;
3624  }
3625  else if (IsA(node, SubLink))
3626  {
3627  /* This routine should not be applied to un-planned expressions */
3628  elog(ERROR, "cannot handle unplanned sub-select");
3629  }
3630  else if (IsA(node, SubPlan))
3631  {
3632  /*
3633  * A subplan node in an expression typically indicates that the
3634  * subplan will be executed on each evaluation, so charge accordingly.
3635  * (Sub-selects that can be executed as InitPlans have already been
3636  * removed from the expression.)
3637  */
3638  SubPlan *subplan = (SubPlan *) node;
3639 
3640  context->total.startup += subplan->startup_cost;
3641  context->total.per_tuple += subplan->per_call_cost;
3642 
3643  /*
3644  * We don't want to recurse into the testexpr, because it was already
3645  * counted in the SubPlan node's costs. So we're done.
3646  */
3647  return false;
3648  }
3649  else if (IsA(node, AlternativeSubPlan))
3650  {
3651  /*
3652  * Arbitrarily use the first alternative plan for costing. (We should
3653  * certainly only include one alternative, and we don't yet have
3654  * enough information to know which one the executor is most likely to
3655  * use.)
3656  */
3657  AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3658 
3659  return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3660  context);
3661  }
3662  else if (IsA(node, PlaceHolderVar))
3663  {
3664  /*
3665  * A PlaceHolderVar should be given cost zero when considering general
3666  * expression evaluation costs. The expense of doing the contained
3667  * expression is charged as part of the tlist eval costs of the scan
3668  * or join where the PHV is first computed (see set_rel_width and
3669  * add_placeholders_to_joinrel). If we charged it again here, we'd be
3670  * double-counting the cost for each level of plan that the PHV
3671  * bubbles up through. Hence, return without recursing into the
3672  * phexpr.
3673  */
3674  return false;
3675  }
3676 
3677  /* recurse into children */
3679  (void *) context);
3680 }
3681 
3682 /*
3683  * get_restriction_qual_cost
3684  * Compute evaluation costs of a baserel's restriction quals, plus any
3685  * movable join quals that have been pushed down to the scan.
3686  * Results are returned into *qpqual_cost.
3687  *
3688  * This is a convenience subroutine that works for seqscans and other cases
3689  * where all the given quals will be evaluated the hard way. It's not useful
3690  * for cost_index(), for example, where the index machinery takes care of
3691  * some of the quals. We assume baserestrictcost was previously set by
3692  * set_baserel_size_estimates().
3693  */
3694 static void
3696  ParamPathInfo *param_info,
3697  QualCost *qpqual_cost)
3698 {
3699  if (param_info)
3700  {
3701  /* Include costs of pushed-down clauses */
3702  cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3703 
3704  qpqual_cost->startup += baserel->baserestrictcost.startup;
3705  qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3706  }
3707  else
3708  *qpqual_cost = baserel->baserestrictcost;
3709 }
3710 
3711 
3712 /*
3713  * compute_semi_anti_join_factors
3714  * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
3715  * can be expected to scan.
3716  *
3717  * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3718  * inner rows as soon as it finds a match to the current outer row.
3719  * The same happens if we have detected the inner rel is unique.
3720  * We should therefore adjust some of the cost components for this effect.
3721  * This function computes some estimates needed for these adjustments.
3722  * These estimates will be the same regardless of the particular paths used
3723  * for the outer and inner relation, so we compute these once and then pass
3724  * them to all the join cost estimation functions.
3725  *
3726  * Input parameters:
3727  * outerrel: outer relation under consideration
3728  * innerrel: inner relation under consideration
3729  * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
3730  * sjinfo: SpecialJoinInfo relevant to this join
3731  * restrictlist: join quals
3732  * Output parameters:
3733  * *semifactors is filled in (see relation.h for field definitions)
3734  */
3735 void
3737  RelOptInfo *outerrel,
3738  RelOptInfo *innerrel,
3739  JoinType jointype,
3740  SpecialJoinInfo *sjinfo,
3741  List *restrictlist,
3742  SemiAntiJoinFactors *semifactors)
3743 {
3744  Selectivity jselec;
3745  Selectivity nselec;
3746  Selectivity avgmatch;
3747  SpecialJoinInfo norm_sjinfo;
3748  List *joinquals;
3749  ListCell *l;
3750 
3751  /*
3752  * In an ANTI join, we must ignore clauses that are "pushed down", since
3753  * those won't affect the match logic. In a SEMI join, we do not
3754  * distinguish joinquals from "pushed down" quals, so just use the whole
3755  * restrictinfo list. For other outer join types, we should consider only
3756  * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
3757  */
3758  if (IS_OUTER_JOIN(jointype))
3759  {
3760  joinquals = NIL;
3761  foreach(l, restrictlist)
3762  {
3763  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3764 
3765  if (!rinfo->is_pushed_down)
3766  joinquals = lappend(joinquals, rinfo);
3767  }
3768  }
3769  else
3770  joinquals = restrictlist;
3771 
3772  /*
3773  * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3774  */
3775  jselec = clauselist_selectivity(root,
3776  joinquals,
3777  0,
3778  (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
3779  sjinfo);
3780 
3781  /*
3782  * Also get the normal inner-join selectivity of the join clauses.
3783  */
3784  norm_sjinfo.type = T_SpecialJoinInfo;
3785  norm_sjinfo.min_lefthand = outerrel->relids;
3786  norm_sjinfo.min_righthand = innerrel->relids;
3787  norm_sjinfo.syn_lefthand = outerrel->relids;
3788  norm_sjinfo.syn_righthand = innerrel->relids;
3789  norm_sjinfo.jointype = JOIN_INNER;
3790  /* we don't bother trying to make the remaining fields valid */
3791  norm_sjinfo.lhs_strict = false;
3792  norm_sjinfo.delay_upper_joins = false;
3793  norm_sjinfo.semi_can_btree = false;
3794  norm_sjinfo.semi_can_hash = false;
3795  norm_sjinfo.semi_operators = NIL;
3796  norm_sjinfo.semi_rhs_exprs = NIL;
3797 
3798  nselec = clauselist_selectivity(root,
3799  joinquals,
3800  0,
3801  JOIN_INNER,
3802  &norm_sjinfo);
3803 
3804  /* Avoid leaking a lot of ListCells */
3805  if (IS_OUTER_JOIN(jointype))
3806  list_free(joinquals);
3807 
3808  /*
3809  * jselec can be interpreted as the fraction of outer-rel rows that have
3810  * any matches (this is true for both SEMI and ANTI cases). And nselec is
3811  * the fraction of the Cartesian product that matches. So, the average
3812  * number of matches for each outer-rel row that has at least one match is
3813  * nselec * inner_rows / jselec.
3814  *
3815  * Note: it is correct to use the inner rel's "rows" count here, even
3816  * though we might later be considering a parameterized inner path with
3817  * fewer rows. This is because we have included all the join clauses in
3818  * the selectivity estimate.
3819  */
3820  if (jselec > 0) /* protect against zero divide */
3821  {
3822  avgmatch = nselec * innerrel->rows / jselec;
3823  /* Clamp to sane range */
3824  avgmatch = Max(1.0, avgmatch);
3825  }
3826  else
3827  avgmatch = 1.0;
3828 
3829  semifactors->outer_match_frac = jselec;
3830  semifactors->match_count = avgmatch;
3831 }
3832 
3833 /*
3834  * has_indexed_join_quals
3835  * Check whether all the joinquals of a nestloop join are used as
3836  * inner index quals.
3837  *
3838  * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3839  * indexscan) that uses all the joinquals as indexquals, we can assume that an
3840  * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3841  * expensive.
3842  */
3843 static bool
3845 {
3846  Relids joinrelids = joinpath->path.parent->relids;
3847  Path *innerpath = joinpath->innerjoinpath;
3848  List *indexclauses;
3849  bool found_one;
3850  ListCell *lc;
3851 
3852  /* If join still has quals to evaluate, it's not fast */
3853  if (joinpath->joinrestrictinfo != NIL)
3854  return false;
3855  /* Nor if the inner path isn't parameterized at all */
3856  if (innerpath->param_info == NULL)
3857  return false;
3858 
3859  /* Find the indexclauses list for the inner scan */
3860  switch (innerpath->pathtype)
3861  {
3862  case T_IndexScan:
3863  case T_IndexOnlyScan:
3864  indexclauses = ((IndexPath *) innerpath)->indexclauses;
3865  break;
3866  case T_BitmapHeapScan:
3867  {
3868  /* Accept only a simple bitmap scan, not AND/OR cases */
3869  Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3870 
3871  if (IsA(bmqual, IndexPath))
3872  indexclauses = ((IndexPath *) bmqual)->indexclauses;
3873  else
3874  return false;
3875  break;
3876  }
3877  default:
3878 
3879  /*
3880  * If it's not a simple indexscan, it probably doesn't run quickly
3881  * for zero rows out, even if it's a parameterized path using all
3882  * the joinquals.
3883  */
3884  return false;
3885  }
3886 
3887  /*
3888  * Examine the inner path's param clauses. Any that are from the outer
3889  * path must be found in the indexclauses list, either exactly or in an
3890  * equivalent form generated by equivclass.c. Also, we must find at least
3891  * one such clause, else it's a clauseless join which isn't fast.
3892  */
3893  found_one = false;
3894  foreach(lc, innerpath->param_info->ppi_clauses)
3895  {
3896  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3897 
3898  if (join_clause_is_movable_into(rinfo,
3899  innerpath->parent->relids,
3900  joinrelids))
3901  {
3902  if (!(list_member_ptr(indexclauses, rinfo) ||
3903  is_redundant_derived_clause(rinfo, indexclauses)))
3904  return false;
3905  found_one = true;
3906  }
3907  }
3908  return found_one;
3909 }
3910 
3911 
3912 /*
3913  * approx_tuple_count
3914  * Quick-and-dirty estimation of the number of join rows passing
3915  * a set of qual conditions.
3916  *
3917  * The quals can be either an implicitly-ANDed list of boolean expressions,
3918  * or a list of RestrictInfo nodes (typically the latter).
3919  *
3920  * We intentionally compute the selectivity under JOIN_INNER rules, even
3921  * if it's some type of outer join. This is appropriate because we are
3922  * trying to figure out how many tuples pass the initial merge or hash
3923  * join step.
3924  *
3925  * This is quick-and-dirty because we bypass clauselist_selectivity, and
3926  * simply multiply the independent clause selectivities together. Now
3927  * clauselist_selectivity often can't do any better than that anyhow, but
3928  * for some situations (such as range constraints) it is smarter. However,
3929  * we can't effectively cache the results of clauselist_selectivity, whereas
3930  * the individual clause selectivities can be and are cached.
3931  *
3932  * Since we are only using the results to estimate how many potential
3933  * output tuples are generated and passed through qpqual checking, it
3934  * seems OK to live with the approximation.
3935  */
3936 static double
3938 {
3939  double tuples;
3940  double outer_tuples = path->outerjoinpath->rows;
3941  double inner_tuples = path->innerjoinpath->rows;
3942  SpecialJoinInfo sjinfo;
3943  Selectivity selec = 1.0;
3944  ListCell *l;
3945 
3946  /*
3947  * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3948  */
3949  sjinfo.type = T_SpecialJoinInfo;
3950  sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3951  sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3952  sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3953  sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3954  sjinfo.jointype = JOIN_INNER;
3955  /* we don't bother trying to make the remaining fields valid */
3956  sjinfo.lhs_strict = false;
3957  sjinfo.delay_upper_joins = false;
3958  sjinfo.semi_can_btree = false;
3959  sjinfo.semi_can_hash = false;
3960  sjinfo.semi_operators = NIL;
3961  sjinfo.semi_rhs_exprs = NIL;
3962 
3963  /* Get the approximate selectivity */
3964  foreach(l, quals)
3965  {
3966  Node *qual = (Node *) lfirst(l);
3967 
3968  /* Note that clause_selectivity will be able to cache its result */
3969  selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3970  }
3971 
3972  /* Apply it to the input relation sizes */
3973  tuples = selec * outer_tuples * inner_tuples;
3974 
3975  return clamp_row_est(tuples);
3976 }
3977 
3978 
3979 /*
3980  * set_baserel_size_estimates
3981  * Set the size estimates for the given base relation.
3982  *
3983  * The rel's targetlist and restrictinfo list must have been constructed
3984  * already, and rel->tuples must be set.
3985  *
3986  * We set the following fields of the rel node:
3987  * rows: the estimated number of output tuples (after applying
3988  * restriction clauses).
3989  * width: the estimated average output tuple width in bytes.
3990  * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3991  */
3992 void
3994 {
3995  double nrows;
3996 
3997  /* Should only be applied to base relations */
3998  Assert(rel->relid > 0);
3999 
4000  nrows = rel->tuples *
4002  rel->baserestrictinfo,
4003  0,
4004  JOIN_INNER,
4005  NULL);
4006 
4007  rel->rows = clamp_row_est(nrows);
4008 
4010 
4011  set_rel_width(root, rel);
4012 }
4013 
4014 /*
4015  * get_parameterized_baserel_size
4016  * Make a size estimate for a parameterized scan of a base relation.
4017  *
4018  * 'param_clauses' lists the additional join clauses to be used.
4019  *
4020  * set_baserel_size_estimates must have been applied already.
4021  */
4022 double
4024  List *param_clauses)
4025 {
4026  List *allclauses;
4027  double nrows;
4028 
4029  /*
4030  * Estimate the number of rows returned by the parameterized scan, knowing
4031  * that it will apply all the extra join clauses as well as the rel's own
4032  * restriction clauses. Note that we force the clauses to be treated as
4033  * non-join clauses during selectivity estimation.
4034  */
4035  allclauses = list_concat(list_copy(param_clauses),
4036  rel->baserestrictinfo);
4037  nrows = rel->tuples *
4039  allclauses,
4040  rel->relid, /* do not use 0! */
4041  JOIN_INNER,
4042  NULL);
4043  nrows = clamp_row_est(nrows);
4044  /* For safety, make sure result is not more than the base estimate */
4045  if (nrows > rel->rows)
4046  nrows = rel->rows;
4047  return nrows;
4048 }
4049 
4050 /*
4051  * set_joinrel_size_estimates
4052  * Set the size estimates for the given join relation.
4053  *
4054  * The rel's targetlist must have been constructed already, and a
4055  * restriction clause list that matches the given component rels must
4056  * be provided.
4057  *
4058  * Since there is more than one way to make a joinrel for more than two
4059  * base relations, the results we get here could depend on which component
4060  * rel pair is provided. In theory we should get the same answers no matter
4061  * which pair is provided; in practice, since the selectivity estimation
4062  * routines don't handle all cases equally well, we might not. But there's
4063  * not much to be done about it. (Would it make sense to repeat the
4064  * calculations for each pair of input rels that's encountered, and somehow
4065  * average the results? Probably way more trouble than it's worth, and
4066  * anyway we must keep the rowcount estimate the same for all paths for the
4067  * joinrel.)
4068  *
4069  * We set only the rows field here. The reltarget field was already set by
4070  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
4071  */
4072 void
4074  RelOptInfo *outer_rel,
4075  RelOptInfo *inner_rel,
4076  SpecialJoinInfo *sjinfo,
4077  List *restrictlist)
4078 {
4079  rel->rows = calc_joinrel_size_estimate(root,
4080  outer_rel,
4081  inner_rel,
4082  outer_rel->rows,
4083  inner_rel->rows,
4084  sjinfo,
4085  restrictlist);
4086 }
4087 
4088 /*
4089  * get_parameterized_joinrel_size
4090  * Make a size estimate for a parameterized scan of a join relation.
4091  *
4092  * 'rel' is the joinrel under consideration.
4093  * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
4094  * produce the relations being joined.
4095  * 'sjinfo' is any SpecialJoinInfo relevant to this join.
4096  * 'restrict_clauses' lists the join clauses that need to be applied at the
4097  * join node (including any movable clauses that were moved down to this join,
4098  * and not including any movable clauses that were pushed down into the
4099  * child paths).
4100  *
4101  * set_joinrel_size_estimates must have been applied already.
4102  */
4103 double
4105  Path *outer_path,
4106  Path *inner_path,
4107  SpecialJoinInfo *sjinfo,
4108  List *restrict_clauses)
4109 {
4110  double nrows;
4111 
4112  /*
4113  * Estimate the number of rows returned by the parameterized join as the
4114  * sizes of the input paths times the selectivity of the clauses that have
4115  * ended up at this join node.
4116  *
4117  * As with set_joinrel_size_estimates, the rowcount estimate could depend
4118  * on the pair of input paths provided, though ideally we'd get the same
4119  * estimate for any pair with the same parameterization.
4120  */
4121  nrows = calc_joinrel_size_estimate(root,
4122  outer_path->parent,
4123  inner_path->parent,
4124  outer_path->rows,
4125  inner_path->rows,
4126  sjinfo,
4127  restrict_clauses);
4128  /* For safety, make sure result is not more than the base estimate */
4129  if (nrows > rel->rows)
4130  nrows = rel->rows;
4131  return nrows;
4132 }
4133 
4134 /*
4135  * calc_joinrel_size_estimate
4136  * Workhorse for set_joinrel_size_estimates and
4137  * get_parameterized_joinrel_size.
4138  *
4139  * outer_rel/inner_rel are the relations being joined, but they should be
4140  * assumed to have sizes outer_rows/inner_rows; those numbers might be less
4141  * than what rel->rows says, when we are considering parameterized paths.
4142  */
4143 static double
4145  RelOptInfo *outer_rel,
4146  RelOptInfo *inner_rel,
4147  double outer_rows,
4148  double inner_rows,
4149  SpecialJoinInfo *sjinfo,
4150  List *restrictlist_in)
4151 {
4152  /* This apparently-useless variable dodges a compiler bug in VS2013: */
4153  List *restrictlist = restrictlist_in;
4154  JoinType jointype = sjinfo->jointype;
4155  Selectivity fkselec;
4156  Selectivity jselec;
4157  Selectivity pselec;
4158  double nrows;
4159 
4160  /*
4161  * Compute joinclause selectivity. Note that we are only considering
4162  * clauses that become restriction clauses at this join level; we are not
4163  * double-counting them because they were not considered in estimating the
4164  * sizes of the component rels.
4165  *
4166  * First, see whether any of the joinclauses can be matched to known FK
4167  * constraints. If so, drop those clauses from the restrictlist, and
4168  * instead estimate their selectivity using FK semantics. (We do this
4169  * without regard to whether said clauses are local or "pushed down".
4170  * Probably, an FK-matching clause could never be seen as pushed down at
4171  * an outer join, since it would be strict and hence would be grounds for
4172  * join strength reduction.) fkselec gets the net selectivity for
4173  * FK-matching clauses, or 1.0 if there are none.
4174  */
4175  fkselec = get_foreign_key_join_selectivity(root,
4176  outer_rel->relids,
4177  inner_rel->relids,
4178  sjinfo,
4179  &restrictlist);
4180 
4181  /*
4182  * For an outer join, we have to distinguish the selectivity of the join's
4183  * own clauses (JOIN/ON conditions) from any clauses that were "pushed
4184  * down". For inner joins we just count them all as joinclauses.
4185  */
4186  if (IS_OUTER_JOIN(jointype))
4187  {
4188  List *joinquals = NIL;
4189  List *pushedquals = NIL;
4190  ListCell *l;
4191 
4192  /* Grovel through the clauses to separate into two lists */
4193  foreach(l, restrictlist)
4194  {
4195  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4196 
4197  if (rinfo->is_pushed_down)
4198  pushedquals = lappend(pushedquals, rinfo);
4199  else
4200  joinquals = lappend(joinquals, rinfo);
4201  }
4202 
4203  /* Get the separate selectivities */
4204  jselec = clauselist_selectivity(root,
4205  joinquals,
4206  0,
4207  jointype,
4208  sjinfo);
4209  pselec = clauselist_selectivity(root,
4210  pushedquals,
4211  0,
4212  jointype,
4213  sjinfo);
4214 
4215  /* Avoid leaking a lot of ListCells */
4216  list_free(joinquals);
4217  list_free(pushedquals);
4218  }
4219  else
4220  {
4221  jselec = clauselist_selectivity(root,
4222  restrictlist,
4223  0,
4224  jointype,
4225  sjinfo);
4226  pselec = 0.0; /* not used, keep compiler quiet */
4227  }
4228 
4229  /*
4230  * Basically, we multiply size of Cartesian product by selectivity.
4231  *
4232  * If we are doing an outer join, take that into account: the joinqual
4233  * selectivity has to be clamped using the knowledge that the output must
4234  * be at least as large as the non-nullable input. However, any
4235  * pushed-down quals are applied after the outer join, so their
4236  * selectivity applies fully.
4237  *
4238  * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4239  * of LHS rows that have matches, and we apply that straightforwardly.
4240  */
4241  switch (jointype)
4242  {
4243  case JOIN_INNER:
4244  nrows = outer_rows * inner_rows * fkselec * jselec;
4245  /* pselec not used */
4246  break;
4247  case JOIN_LEFT:
4248  nrows = outer_rows * inner_rows * fkselec * jselec;
4249  if (nrows < outer_rows)
4250  nrows = outer_rows;
4251  nrows *= pselec;
4252  break;
4253  case JOIN_FULL:
4254  nrows = outer_rows * inner_rows * fkselec * jselec;
4255  if (nrows < outer_rows)
4256  nrows = outer_rows;
4257  if (nrows < inner_rows)
4258  nrows = inner_rows;
4259  nrows *= pselec;
4260  break;
4261  case JOIN_SEMI:
4262  nrows = outer_rows * fkselec * jselec;
4263  /* pselec not used */
4264  break;
4265  case JOIN_ANTI:
4266  nrows = outer_rows * (1.0 - fkselec * jselec);
4267  nrows *= pselec;
4268  break;
4269  default:
4270  /* other values not expected here */
4271  elog(ERROR, "unrecognized join type: %d", (int) jointype);
4272  nrows = 0; /* keep compiler quiet */
4273  break;
4274  }
4275 
4276  return clamp_row_est(nrows);
4277 }
4278 
4279 /*
4280  * get_foreign_key_join_selectivity
4281  * Estimate join selectivity for foreign-key-related clauses.
4282  *
4283  * Remove any clauses that can be matched to FK constraints from *restrictlist,
4284  * and return a substitute estimate of their selectivity. 1.0 is returned
4285  * when there are no such clauses.
4286  *
4287  * The reason for treating such clauses specially is that we can get better
4288  * estimates this way than by relying on clauselist_selectivity(), especially
4289  * for multi-column FKs where that function's assumption that the clauses are
4290  * independent falls down badly. But even with single-column FKs, we may be
4291  * able to get a better answer when the pg_statistic stats are missing or out
4292  * of date.
4293  */
4294 static Selectivity
4296  Relids outer_relids,
4297  Relids inner_relids,
4298  SpecialJoinInfo *sjinfo,
4299  List **restrictlist)
4300 {
4301  Selectivity fkselec = 1.0;
4302  JoinType jointype = sjinfo->jointype;
4303  List *worklist = *restrictlist;
4304  ListCell *lc;
4305 
4306  /* Consider each FK constraint that is known to match the query */
4307  foreach(lc, root->fkey_list)
4308  {
4309  ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
4310  bool ref_is_outer;
4311  bool use_smallest_selectivity = false;
4312  List *removedlist;
4313  ListCell *cell;
4314  ListCell *prev;
4315  ListCell *next;
4316 
4317  /*
4318  * This FK is not relevant unless it connects a baserel on one side of
4319  * this join to a baserel on the other side.
4320  */
4321  if (bms_is_member(fkinfo->con_relid, outer_relids) &&
4322  bms_is_member(fkinfo->ref_relid, inner_relids))
4323  ref_is_outer = false;
4324  else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
4325  bms_is_member(fkinfo->con_relid, inner_relids))
4326  ref_is_outer = true;
4327  else
4328  continue;
4329 
4330  /*
4331  * Modify the restrictlist by removing clauses that match the FK (and
4332  * putting them into removedlist instead). It seems unsafe to modify
4333  * the originally-passed List structure, so we make a shallow copy the
4334  * first time through.
4335  */
4336  if (worklist == *restrictlist)
4337  worklist = list_copy(worklist);
4338 
4339  removedlist = NIL;
4340  prev = NULL;
4341  for (cell = list_head(worklist); cell; cell = next)
4342  {
4343  RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
4344  bool remove_it = false;
4345  int i;
4346 
4347  next = lnext(cell);
4348  /* Drop this clause if it matches any column of the FK */
4349  for (i = 0; i < fkinfo->nkeys; i++)
4350  {
4351  if (rinfo->parent_ec)
4352  {
4353  /*
4354  * EC-derived clauses can only match by EC. It is okay to
4355  * consider any clause derived from the same EC as
4356  * matching the FK: even if equivclass.c chose to generate
4357  * a clause equating some other pair of Vars, it could
4358  * have generated one equating the FK's Vars. So for
4359  * purposes of estimation, we can act as though it did so.
4360  *
4361  * Note: checking parent_ec is a bit of a cheat because
4362  * there are EC-derived clauses that don't have parent_ec
4363  * set; but such clauses must compare expressions that
4364  * aren't just Vars, so they cannot match the FK anyway.
4365  */
4366  if (fkinfo->eclass[i] == rinfo->parent_ec)
4367  {
4368  remove_it = true;
4369  break;
4370  }
4371  }
4372  else
4373  {
4374  /*
4375  * Otherwise, see if rinfo was previously matched to FK as
4376  * a "loose" clause.
4377  */
4378  if (list_member_ptr(fkinfo->rinfos[i], rinfo))
4379  {
4380  remove_it = true;
4381  break;
4382  }
4383  }
4384  }
4385  if (remove_it)
4386  {
4387  worklist = list_delete_cell(worklist, cell, prev);
4388  removedlist = lappend(removedlist, rinfo);
4389  }
4390  else
4391  prev = cell;
4392  }
4393 
4394  /*
4395  * If we failed to remove all the matching clauses we expected to
4396  * find, chicken out and ignore this FK; applying its selectivity
4397  * might result in double-counting. Put any clauses we did manage to
4398  * remove back into the worklist.
4399  *
4400  * Since the matching clauses are known not outerjoin-delayed, they
4401  * should certainly have appeared in the initial joinclause list. If
4402  * we didn't find them, they must have been matched to, and removed
4403  * by, some other FK in a previous iteration of this loop. (A likely
4404  * case is that two FKs are matched to the same EC; there will be only
4405  * one EC-derived clause in the initial list, so the first FK will
4406  * consume it.) Applying both FKs' selectivity independently risks
4407  * underestimating the join size; in particular, this would undo one
4408  * of the main things that ECs were invented for, namely to avoid
4409  * double-counting the selectivity of redundant equality conditions.
4410  * Later we might think of a reasonable way to combine the estimates,
4411  * but for now, just punt, since this is a fairly uncommon situation.
4412  */
4413  if (list_length(removedlist) !=
4414  (fkinfo->nmatched_ec + fkinfo->nmatched_ri))
4415  {
4416  worklist = list_concat(worklist, removedlist);
4417  continue;
4418  }
4419 
4420  /*
4421  * Finally we get to the payoff: estimate selectivity using the
4422  * knowledge that each referencing row will match exactly one row in
4423  * the referenced table.
4424  *
4425  * XXX that's not true in the presence of nulls in the referencing
4426  * column(s), so in principle we should derate the estimate for those.
4427  * However (1) if there are any strict restriction clauses for the
4428  * referencing column(s) elsewhere in the query, derating here would
4429  * be double-counting the null fraction, and (2) it's not very clear
4430  * how to combine null fractions for multiple referencing columns.
4431  *
4432  * In the use_smallest_selectivity code below, null derating is done
4433  * implicitly by relying on clause_selectivity(); in the other cases,
4434  * we do nothing for now about correcting for nulls.
4435  *
4436  * XXX another point here is that if either side of an FK constraint
4437  * is an inheritance parent, we estimate as though the constraint
4438  * covers all its children as well. This is not an unreasonable
4439  * assumption for a referencing table, ie the user probably applied
4440  * identical constraints to all child tables (though perhaps we ought
4441  * to check that). But it's not possible to have done that for a
4442  * referenced table. Fortunately, precisely because that doesn't
4443  * work, it is uncommon in practice to have an FK referencing a parent
4444  * table. So, at least for now, disregard inheritance here.
4445  */
4446  if (ref_is_outer && jointype != JOIN_INNER)
4447  {
4448  /*
4449  * When the referenced table is on the outer side of a non-inner
4450  * join, knowing that each inner row has exactly one match is not
4451  * as useful as one could wish, since we really need to know the
4452  * fraction of outer rows with a match. Still, we can avoid the
4453  * folly of multiplying the per-column estimates together. Take
4454  * the smallest per-column selectivity, instead. (This should
4455  * correspond to the FK column with the most nulls.)
4456  */
4457  use_smallest_selectivity = true;
4458  }
4459  else if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4460  {
4461  /*
4462  * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the
4463  * fraction of LHS rows that have matches. The referenced table
4464  * is on the inner side (we already handled the other case above),
4465  * so the FK implies that every LHS row has a match *in the
4466  * referenced table*. But any restriction or join clauses below
4467  * here will reduce the number of matches.
4468  */
4469  if (bms_membership(inner_relids) == BMS_SINGLETON)
4470  {
4471  /*
4472  * When the inner side of the semi/anti join is just the
4473  * referenced table, we may take the FK selectivity as equal
4474  * to the selectivity of the table's restriction clauses.
4475  */
4476  RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4477  double ref_tuples = Max(ref_rel->tuples, 1.0);
4478 
4479  fkselec *= ref_rel->rows / ref_tuples;
4480  }
4481  else
4482  {
4483  /*
4484  * When the inner side of the semi/anti join is itself a join,
4485  * it's hard to guess what fraction of the referenced table
4486  * will get through the join. But we still don't want to
4487  * multiply per-column estimates together. Take the smallest
4488  * per-column selectivity, instead.
4489  */
4490  use_smallest_selectivity = true;
4491  }
4492  }
4493  else
4494  {
4495  /*
4496  * Otherwise, selectivity is exactly 1/referenced-table-size; but
4497  * guard against tuples == 0. Note we should use the raw table
4498  * tuple count, not any estimate of its filtered or joined size.
4499  */
4500  RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4501  double ref_tuples = Max(ref_rel->tuples, 1.0);
4502 
4503  fkselec *= 1.0 / ref_tuples;
4504  }
4505 
4506  /*
4507  * Common code for cases where we should use the smallest selectivity
4508  * that would be computed for any one of the FK's clauses.
4509  */
4510  if (use_smallest_selectivity)
4511  {
4512  Selectivity thisfksel = 1.0;
4513 
4514  foreach(cell, removedlist)
4515  {
4516  RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
4517  Selectivity csel;
4518 
4519  csel = clause_selectivity(root, (Node *) rinfo,
4520  0, jointype, sjinfo);
4521  thisfksel = Min(thisfksel, csel);
4522  }
4523  fkselec *= thisfksel;
4524  }
4525  }
4526 
4527  *restrictlist = worklist;
4528  return fkselec;
4529 }
4530 
4531 /*
4532  * set_subquery_size_estimates
4533  * Set the size estimates for a base relation that is a subquery.
4534  *
4535  * The rel's targetlist and restrictinfo list must have been constructed
4536  * already, and the Paths for the subquery must have been completed.
4537  * We look at the subquery's PlannerInfo to extract data.
4538  *
4539  * We set the same fields as set_baserel_size_estimates.
4540  */
4541 void
4543 {
4544  PlannerInfo *subroot = rel->subroot;
4545  RelOptInfo *sub_final_rel;
4547  ListCell *lc;
4548 
4549  /* Should only be applied to base relations that are subqueries */
4550  Assert(rel->relid > 0);
4551 #ifdef USE_ASSERT_CHECKING
4552  rte = planner_rt_fetch(rel->relid, root);
4553  Assert(rte->rtekind == RTE_SUBQUERY);
4554 #endif
4555 
4556  /*
4557  * Copy raw number of output rows from subquery. All of its paths should
4558  * have the same output rowcount, so just look at cheapest-total.
4559  */
4560  sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
4561  rel->tuples = sub_final_rel->cheapest_total_path->rows;
4562 
4563  /*
4564  * Compute per-output-column width estimates by examining the subquery's
4565  * targetlist. For any output that is a plain Var, get the width estimate
4566  * that was made while planning the subquery. Otherwise, we leave it to
4567  * set_rel_width to fill in a datatype-based default estimate.
4568  */
4569  foreach(lc, subroot->parse->targetList)
4570  {
4571  TargetEntry *te = lfirst_node(TargetEntry, lc);
4572  Node *texpr = (Node *) te->expr;
4573  int32 item_width = 0;
4574 
4575  /* junk columns aren't visible to upper query */
4576  if (te->resjunk)
4577  continue;
4578 
4579  /*
4580  * The subquery could be an expansion of a view that's had columns
4581  * added to it since the current query was parsed, so that there are
4582  * non-junk tlist columns in it that don't correspond to any column
4583  * visible at our query level. Ignore such columns.
4584  */
4585  if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4586  continue;
4587 
4588  /*
4589  * XXX This currently doesn't work for subqueries containing set
4590  * operations, because the Vars in their tlists are bogus references
4591  * to the first leaf subquery, which wouldn't give the right answer
4592  * even if we could still get to its PlannerInfo.
4593  *
4594  * Also, the subquery could be an appendrel for which all branches are
4595  * known empty due to constraint exclusion, in which case
4596  * set_append_rel_pathlist will have left the attr_widths set to zero.
4597  *
4598  * In either case, we just leave the width estimate zero until
4599  * set_rel_width fixes it.
4600  */
4601  if (IsA(texpr, Var) &&
4602  subroot->parse->setOperations == NULL)
4603  {
4604  Var *var = (Var *) texpr;
4605  RelOptInfo *subrel = find_base_rel(subroot, var->varno);
4606 
4607  item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
4608  }
4609  rel->attr_widths[te->resno - rel->min_attr] = item_width;
4610  }
4611 
4612  /* Now estimate number of output rows, etc */
4613  set_baserel_size_estimates(root, rel);
4614 }
4615 
4616 /*
4617  * set_function_size_estimates
4618  * Set the size estimates for a base relation that is a function call.
4619  *
4620  * The rel's targetlist and restrictinfo list must have been constructed
4621  * already.
4622  *
4623  * We set the same fields as set_baserel_size_estimates.
4624  */
4625 void
4627 {
4628  RangeTblEntry *rte;
4629  ListCell *lc;
4630 
4631  /* Should only be applied to base relations that are functions */
4632  Assert(rel->relid > 0);
4633  rte = planner_rt_fetch(rel->relid, root);
4634  Assert(rte->rtekind == RTE_FUNCTION);
4635 
4636  /*
4637  * Estimate number of rows the functions will return. The rowcount of the
4638  * node is that of the largest function result.
4639  */
4640  rel->tuples = 0;
4641  foreach(lc, rte->functions)
4642  {
4643  RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
4644  double ntup = expression_returns_set_rows(rtfunc->funcexpr);
4645 
4646  if (ntup > rel->tuples)
4647  rel->tuples = ntup;
4648  }
4649 
4650  /* Now estimate number of output rows, etc */
4651  set_baserel_size_estimates(root, rel);
4652 }
4653 
4654 /*
4655  * set_function_size_estimates
4656  * Set the size estimates for a base relation that is a function call.
4657  *
4658  * The rel's targetlist and restrictinfo list must have been constructed
4659  * already.
4660  *
4661  * We set the same fields as set_tablefunc_size_estimates.
4662  */
4663 void
4665 {
4667 
4668  /* Should only be applied to base relations that are functions */
4669  Assert(rel->relid > 0);
4670 #ifdef USE_ASSERT_CHECKING
4671  rte = planner_rt_fetch(rel->relid, root);
4672  Assert(rte->rtekind == RTE_TABLEFUNC);
4673 #endif
4674 
4675  rel->tuples = 100;
4676 
4677  /* Now estimate number of output rows, etc */
4678  set_baserel_size_estimates(root, rel);
4679 }
4680 
4681 /*
4682  * set_values_size_estimates
4683  * Set the size estimates for a base relation that is a values list.
4684  *
4685  * The rel's targetlist and restrictinfo list must have been constructed
4686  * already.
4687  *
4688  * We set the same fields as set_baserel_size_estimates.
4689  */
4690 void
4692 {
4693  RangeTblEntry *rte;
4694 
4695  /* Should only be applied to base relations that are values lists */
4696  Assert(rel->relid > 0);
4697  rte = planner_rt_fetch(rel->relid, root);
4698  Assert(rte->rtekind == RTE_VALUES);
4699 
4700  /*
4701  * Estimate number of rows the values list will return. We know this
4702  * precisely based on the list length (well, barring set-returning
4703  * functions in list items, but that's a refinement not catered for
4704  * anywhere else either).
4705  */
4706  rel->tuples = list_length(rte->values_lists);
4707 
4708  /* Now estimate number of output rows, etc */
4709  set_baserel_size_estimates(root, rel);
4710 }
4711 
4712 /*
4713  * set_cte_size_estimates
4714  * Set the size estimates for a base relation that is a CTE reference.
4715  *
4716  * The rel's targetlist and restrictinfo list must have been constructed
4717  * already, and we need an estimate of the number of rows returned by the CTE
4718  * (if a regular CTE) or the non-recursive term (if a self-reference).
4719  *
4720  * We set the same fields as set_baserel_size_estimates.
4721  */
4722 void
4723 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
4724 {
4725  RangeTblEntry *rte;
4726 
4727  /* Should only be applied to base relations that are CTE references */
4728  Assert(rel->relid > 0);
4729  rte = planner_rt_fetch(rel->relid, root);
4730  Assert(rte->rtekind == RTE_CTE);
4731 
4732  if (rte->self_reference)
4733  {
4734  /*
4735  * In a self-reference, arbitrarily assume the average worktable size
4736  * is about 10 times the nonrecursive term's size.
4737  */
4738  rel->tuples = 10 * cte_rows;
4739  }
4740  else
4741  {
4742  /* Otherwise just believe the CTE's rowcount estimate */
4743  rel->tuples = cte_rows;
4744  }
4745 
4746  /* Now estimate number of output rows, etc */
4747  set_baserel_size_estimates(root, rel);
4748 }
4749 
4750 /*
4751  * set_namedtuplestore_size_estimates
4752  * Set the size estimates for a base relation that is a tuplestore reference.
4753  *
4754  * The rel's targetlist and restrictinfo list must have been constructed
4755  * already.
4756  *
4757  * We set the same fields as set_baserel_size_estimates.
4758  */
4759 void
4761 {
4762  RangeTblEntry *rte;
4763 
4764  /* Should only be applied to base relations that are tuplestore references */
4765  Assert(rel->relid > 0);
4766  rte = planner_rt_fetch(rel->relid, root);
4768 
4769  /*
4770  * Use the estimate provided by the code which is generating the named
4771  * tuplestore. In some cases, the actual number might be available; in
4772  * others the same plan will be re-used, so a "typical" value might be
4773  * estimated and used.
4774  */
4775  rel->tuples = rte->enrtuples;
4776  if (rel->tuples < 0)
4777  rel->tuples = 1000;
4778 
4779  /* Now estimate number of output rows, etc */
4780  set_baserel_size_estimates(root, rel);
4781 }
4782 
4783 /*
4784  * set_foreign_size_estimates
4785  * Set the size estimates for a base relation that is a foreign table.
4786  *
4787  * There is not a whole lot that we can do here; the foreign-data wrapper
4788  * is responsible for producing useful estimates. We can do a decent job
4789  * of estimating baserestrictcost, so we set that, and we also set up width
4790  * using what will be purely datatype-driven estimates from the targetlist.
4791  * There is no way to do anything sane with the rows value, so we just put
4792  * a default estimate and hope that the wrapper can improve on it. The
4793  * wrapper's GetForeignRelSize function will be called momentarily.
4794  *
4795  * The rel's targetlist and restrictinfo list must have been constructed
4796  * already.
4797  */
4798 void
4800 {
4801  /* Should only be applied to base relations */
4802  Assert(rel->relid > 0);
4803 
4804  rel->rows = 1000; /* entirely bogus default estimate */
4805 
4807 
4808  set_rel_width(root, rel);
4809 }
4810 
4811 
4812 /*
4813  * set_rel_width
4814  * Set the estimated output width of a base relation.
4815  *
4816  * The estimated output width is the sum of the per-attribute width estimates
4817  * for the actually-referenced columns, plus any PHVs or other expressions
4818  * that have to be calculated at this relation. This is the amount of data
4819  * we'd need to pass upwards in case of a sort, hash, etc.
4820  *
4821  * This function also sets reltarget->cost, so it's a bit misnamed now.
4822  *
4823  * NB: this works best on plain relations because it prefers to look at
4824  * real Vars. For subqueries, set_subquery_size_estimates will already have
4825  * copied up whatever per-column estimates were made within the subquery,
4826  * and for other types of rels there isn't much we can do anyway. We fall
4827  * back on (fairly stupid) datatype-based width estimates if we can't get
4828  * any better number.
4829  *
4830  * The per-attribute width estimates are cached for possible re-use while
4831  * building join relations or post-scan/join pathtargets.
4832  */
4833 static void
4835 {
4836  Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
4837  int32 tuple_width = 0;
4838  bool have_wholerow_var = false;
4839  ListCell *lc;
4840 
4841  /* Vars are assumed to have cost zero, but other exprs do not */
4842  rel->reltarget->cost.startup = 0;
4843  rel->reltarget->cost.per_tuple = 0;
4844 
4845  foreach(lc, rel->reltarget->exprs)
4846  {
4847  Node *node = (Node *) lfirst(lc);
4848 
4849  /*
4850  * Ordinarily, a Var in a rel's targetlist must belong to that rel;
4851  * but there are corner cases involving LATERAL references where that
4852  * isn't so. If the Var has the wrong varno, fall through to the
4853  * generic case (it doesn't seem worth the trouble to be any smarter).
4854  */
4855  if (IsA(node, Var) &&
4856  ((Var *) node)->varno == rel->relid)
4857  {
4858  Var *var = (Var *) node;
4859  int ndx;
4860  int32 item_width;
4861 
4862  Assert(var->varattno >= rel->min_attr);
4863  Assert(var->varattno <= rel->max_attr);
4864 
4865  ndx = var->varattno - rel->min_attr;
4866 
4867  /*
4868  * If it's a whole-row Var, we'll deal with it below after we have
4869  * already cached as many attr widths as possible.
4870  */
4871  if (var->varattno == 0)
4872  {
4873  have_wholerow_var = true;
4874  continue;
4875  }
4876 
4877  /*
4878  * The width may have been cached already (especially if it's a
4879  * subquery), so don't duplicate effort.
4880  */
4881  if (rel->attr_widths[ndx] > 0)
4882  {
4883  tuple_width += rel->attr_widths[ndx];
4884  continue;
4885  }
4886 
4887  /* Try to get column width from statistics */
4888  if (reloid != InvalidOid && var->varattno > 0)
4889  {
4890  item_width = get_attavgwidth(reloid, var->varattno);
4891  if (item_width > 0)
4892  {
4893  rel->attr_widths[ndx] = item_width;
4894  tuple_width += item_width;
4895  continue;
4896  }
4897  }
4898 
4899  /*
4900  * Not a plain relation, or can't find statistics for it. Estimate
4901  * using just the type info.
4902  */
4903  item_width = get_typavgwidth(var->vartype, var->vartypmod);
4904  Assert(item_width > 0);
4905  rel->attr_widths[ndx] = item_width;
4906  tuple_width += item_width;
4907  }
4908  else if (IsA(node, PlaceHolderVar))
4909  {
4910  /*
4911  * We will need to evaluate the PHV's contained expression while
4912  * scanning this rel, so be sure to include it in reltarget->cost.
4913  */
4914  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4915  PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4916  QualCost cost;
4917 
4918  tuple_width += phinfo->ph_width;
4919  cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
4920  rel->reltarget->cost.startup += cost.startup;
4921  rel->reltarget->cost.per_tuple += cost.per_tuple;
4922  }
4923  else
4924  {
4925  /*
4926  * We could be looking at an expression pulled up from a subquery,
4927  * or a ROW() representing a whole-row child Var, etc. Do what we
4928  * can using the expression type information.
4929  */
4930  int32 item_width;
4931  QualCost cost;
4932 
4933  item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4934  Assert(item_width > 0);
4935  tuple_width += item_width;
4936  /* Not entirely clear if we need to account for cost, but do so */
4937  cost_qual_eval_node(&cost, node, root);
4938  rel->reltarget->cost.startup += cost.startup;
4939  rel->reltarget->cost.per_tuple += cost.per_tuple;
4940  }
4941  }
4942 
4943  /*
4944  * If we have a whole-row reference, estimate its width as the sum of
4945  * per-column widths plus heap tuple header overhead.
4946  */
4947  if (have_wholerow_var)
4948  {
4949  int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4950 
4951  if (reloid != InvalidOid)
4952  {
4953  /* Real relation, so estimate true tuple width */
4954  wholerow_width += get_relation_data_width(reloid,
4955  rel->attr_widths - rel->min_attr);
4956  }
4957  else
4958  {
4959  /* Do what we can with info for a phony rel */
4960  AttrNumber i;
4961 
4962  for (i = 1; i <= rel->max_attr; i++)
4963  wholerow_width += rel->attr_widths[i - rel->min_attr];
4964  }
4965 
4966  rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4967 
4968  /*
4969  * Include the whole-row Var as part of the output tuple. Yes, that
4970  * really is what happens at runtime.
4971  */
4972  tuple_width += wholerow_width;
4973  }
4974 
4975  Assert(tuple_width >= 0);
4976  rel->reltarget->width = tuple_width;
4977 }
4978 
4979 /*
4980  * set_pathtarget_cost_width
4981  * Set the estimated eval cost and output width of a PathTarget tlist.
4982  *
4983  * As a notational convenience, returns the same PathTarget pointer passed in.
4984  *
4985  * Most, though not quite all, uses of this function occur after we've run
4986  * set_rel_width() for base relations; so we can usually obtain cached width
4987  * estimates for Vars. If we can't, fall back on datatype-based width
4988  * estimates. Present early-planning uses of PathTargets don't need accurate
4989  * widths badly enough to justify going to the catalogs for better data.
4990  */
4991 PathTarget *
4993 {
4994  int32 tuple_width = 0;
4995  ListCell *lc;
4996 
4997  /* Vars are assumed to have cost zero, but other exprs do not */
4998  target->cost.startup = 0;
4999  target->cost.per_tuple = 0;
5000 
5001  foreach(lc, target->exprs)
5002  {
5003  Node *node = (Node *) lfirst(lc);
5004 
5005  if (IsA(node, Var))
5006  {
5007  Var *var = (Var *) node;
5008  int32 item_width;
5009 
5010  /* We should not see any upper-level Vars here */
5011  Assert(var->varlevelsup == 0);
5012 
5013  /* Try to get data from RelOptInfo cache */
5014  if (var->varno < root->simple_rel_array_size)
5015  {
5016  RelOptInfo *rel = root->simple_rel_array[var->varno];
5017 
5018  if (rel != NULL &&
5019  var->varattno >= rel->min_attr &&
5020  var->varattno <= rel->max_attr)
5021  {
5022  int ndx = var->varattno - rel->min_attr;
5023 
5024  if (rel->attr_widths[ndx] > 0)
5025  {
5026  tuple_width += rel->attr_widths[ndx];
5027  continue;
5028  }
5029  }
5030  }
5031 
5032  /*
5033  * No cached data available, so estimate using just the type info.
5034  */
5035  item_width = get_typavgwidth(var->vartype, var->vartypmod);
5036  Assert(item_width > 0);
5037  tuple_width += item_width;
5038  }
5039  else
5040  {
5041  /*
5042  * Handle general expressions using type info.
5043  */
5044  int32 item_width;
5045  QualCost cost;
5046 
5047  item_width = get_typavgwidth(exprType(node), exprTypmod(node));
5048  Assert(item_width > 0);
5049  tuple_width += item_width;
5050 
5051  /* Account for cost, too */
5052  cost_qual_eval_node(&cost, node, root);
5053  target->cost.startup += cost.startup;
5054  target->cost.per_tuple += cost.per_tuple;
5055  }
5056  }
5057 
5058  Assert(tuple_width >= 0);
5059  target->width = tuple_width;
5060 
5061  return target;
5062 }
5063 
5064 /*
5065  * relation_byte_size
5066  * Estimate the storage space in bytes for a given number of tuples
5067  * of a given width (size in bytes).
5068  */
5069 static double
5070 relation_byte_size(double tuples, int width)
5071 {
5072  return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
5073 }
5074 
5075 /*
5076  * page_size
5077  * Returns an estimate of the number of pages covered by a given
5078  * number of tuples of a given width (size in bytes).
5079  */
5080 static double
5081 page_size(double tuples, int width)
5082 {
5083  return ceil(relation_byte_size(tuples, width) / BLCKSZ);
5084 }
5085 
5086 /*
5087  * Estimate the fraction of the work that each worker will do given the
5088  * number of workers budgeted for the path.
5089  */
5090 static double
5092 {
5093  double parallel_divisor = path->parallel_workers;
5094  double leader_contribution;
5095 
5096  /*
5097  * Early experience with parallel query suggests that when there is only
5098  * one worker, the leader often makes a very substantial contribution to
5099  * executing the parallel portion of the plan, but as more workers are
5100  * added, it does less and less, because it's busy reading tuples from the
5101  * workers and doing whatever non-parallel post-processing is needed. By
5102  * the time we reach 4 workers, the leader no longer makes a meaningful
5103  * contribution. Thus, for now, estimate that the leader spends 30% of
5104  * its time servicing each worker, and the remainder executing the
5105  * parallel plan.
5106  */
5107  leader_contribution = 1.0 - (0.3 * path->parallel_workers);
5108  if (leader_contribution > 0)
5109  parallel_divisor += leader_contribution;
5110 
5111  return parallel_divisor;
5112 }
5113 
5114 /*
5115  * compute_bitmap_pages
5116  *
5117  * compute number of pages fetched from heap in bitmap heap scan.
5118  */
5119 double
5120 compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
5121  int loop_count, Cost *cost, double *tuple)
5122 {
5123  Cost indexTotalCost;
5124  Selectivity indexSelectivity;
5125  double T;
5126  double pages_fetched;
5127  double tuples_fetched;
5128 
5129  /*
5130  * Fetch total cost of obtaining the bitmap, as well as its total
5131  * selectivity.
5132  */
5133  cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
5134 
5135  /*
5136  * Estimate number of main-table pages fetched.
5137  */
5138  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
5139 
5140  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
5141 
5142  if (loop_count > 1)
5143  {
5144  /*
5145  * For repeated bitmap scans, scale up the number of tuples fetched in
5146  * the Mackert and Lohman formula by the number of scans, so that we
5147  * estimate the number of pages fetched by all the scans. Then
5148  * pro-rate for one scan.
5149  */
5150  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
5151  baserel->pages,
5152  get_indexpath_pages(bitmapqual),
5153  root);
5154  pages_fetched /= loop_count;
5155  }
5156  else
5157  {
5158  /*
5159  * For a single scan, the number of heap pages that need to be fetched
5160  * is the same as the Mackert and Lohman formula for the case T <= b
5161  * (ie, no re-reads needed).
5162  */
5163  pages_fetched =
5164  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
5165  }
5166 
5167  if (pages_fetched >= T)
5168  pages_fetched = T;
5169  else
5170  pages_fetched = ceil(pages_fetched);
5171 
5172  if (cost)
5173  *cost = indexTotalCost;
5174  if (tuple)
5175  *tuple = tuples_fetched;
5176 
5177  return pages_fetched;
5178 }
QualCost eval_cost
Definition: relation.h:1784
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4542
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2040
static void cost_rescan(PlannerInfo *root, Path *path, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition: costsize.c:3321
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3454
#define NIL
Definition: pg_list.h:69
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3002
bool semi_can_btree
Definition: relation.h:1925
Selectivity leftendsel
Definition: relation.h:1828
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
List * path_mergeclauses
Definition: relation.h:1352
double plan_rows
Definition: plannodes.h:131
#define IsA(nodeptr, _type_)
Definition: nodes.h:560
JoinPath jpath
Definition: relation.h:1370
PlannerInfo * root
Definition: costsize.c:133
PathTarget * pathtarget
Definition: relation.h:954
Query * parse
Definition: relation.h:155
bool enable_tidscan
Definition: costsize.c:122
bool ExecSupportsMarkRestore(Path *pathnode)
Definition: execAmi.c:400
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:928
Index varlevelsup
Definition: primnodes.h:173
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2632
Path path
Definition: relation.h:1030
bool is_redundant_derived_clause(RestrictInfo *rinfo, List *clauselist)
Definition: equivclass.c:2455
IndexOptInfo * indexinfo
Definition: relation.h:1031
SemiAntiJoinFactors semifactors
Definition: relation.h:2184
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1164
static int32 next
Definition: blutils.c:210
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, int numPartCols, int numOrderCols, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:1970
bool enable_nestloop
Definition: costsize.c:125
int num_batches
Definition: relation.h:1372
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:4992
List * args
Definition: primnodes.h:359
Relids min_righthand
Definition: relation.h:1918
bool materialize_inner
Definition: relation.h:1356
#define DEFAULT_CPU_TUPLE_COST
Definition: cost.h:26
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:387
void mergejoinscansel(PlannerInfo *root, Node *clause, Oid opfamily, int strategy, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
Definition: selfuncs.c:2808
double expression_returns_set_rows(Node *clause)
Definition: clauses.c:802
Expr * orclause
Definition: relation.h:1778
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:276
Selectivity outer_match_frac
Definition: relation.h:2161
Path * innerjoinpath
Definition: relation.h:1297
static MergeScanSelCache * cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
Definition: costsize.c:2833
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition: costsize.c:3937
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:4073
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:4723
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1524
double tuples
Definition: relation.h:565
Oid reltablespace
Definition: relation.h:554
List * baserestrictinfo
Definition: relation.h:584
Oid resulttype
Definition: primnodes.h:811
NodeTag type
Definition: relation.h:1916
#define DEFAULT_PARALLEL_SETUP_COST
Definition: cost.h:30
#define Min(x, y)
Definition: c.h:806
int parallel_workers
Definition: relation.h:960
bool pseudoconstant
Definition: relation.h:1755
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1072
int effective_cache_size
Definition: costsize.c:112
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
SubLinkType subLinkType
Definition: primnodes.h:683
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:721
ParamPathInfo * param_info
Definition: relation.h:956
void initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, JoinPathExtraData *extra)
Definition: costsize.c:2910
List * list_copy(const List *oldlist)
Definition: list.c:1160
Definition: nodes.h:509
#define MemSet(start, val, len)
Definition: c.h:857
Relids left_relids
Definition: relation.h:1774
AttrNumber varattno
Definition: primnodes.h:168
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1433
static double page_size(double tuples, int width)
Definition: costsize.c:5081
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1483
List * list_concat(List *list1, List *list2)
Definition: list.c:321
double parallel_setup_cost
Definition: costsize.c:110
return result
Definition: formatting.c:1632
uint32 BlockNumber
Definition: block.h:31
static List * extract_nonindex_conditions(List *qual_clauses, List *indexquals)
Definition: costsize.c:753
Definition: nodes.h:75
double Selectivity
Definition: nodes.h:638
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition: nodeHash.c:400
QualCost transCost
Definition: relation.h:62
float4 get_func_cost(Oid funcid)
Definition: lsyscache.c:1641
Selectivity bitmapselectivity
Definition: relation.h:1075
unsigned int Oid
Definition: postgres_ext.h:31
Definition: primnodes.h:163
bool enable_seqscan
Definition: costsize.c:118
List * fkey_list
Definition: relation.h:260
#define DEFAULT_EFFECTIVE_CACHE_SIZE
Definition: cost.h:32
#define OidIsValid(objectId)
Definition: c.h:538
#define DEFAULT_CPU_OPERATOR_COST
Definition: cost.h:28
void initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *mergeclauses, Path *outer_path, Path *inner_path, List *outersortkeys, List *innersortkeys, JoinPathExtraData *extra)
Definition: costsize.c:2359
Cost inner_rescan_run_cost
Definition: relation.h:2211
List * values_lists
Definition: parsenodes.h:989
#define lsecond(l)
Definition: pg_list.h:116
Relids syn_lefthand
Definition: relation.h:1919
int pk_strategy
Definition: relation.h:852
Cost startup
Definition: relation.h:45
double allvisfrac
Definition: relation.h:566
signed int int32
Definition: c.h:256
List * bitmapquals
Definition: relation.h:1074
JoinType
Definition: nodes.h:672
List * targetList
Definition: parsenodes.h:138
struct RelOptInfo ** simple_rel_array
Definition: relation.h:179
List * bitmapquals
Definition: relation.h:1087
Definition: type.h:90
BlockNumber pages
Definition: relation.h:636
NodeTag pathtype
Definition: relation.h:951
Relids syn_righthand
Definition: relation.h:1920
PlannerInfo * subroot
Definition: relation.h:567
bool enable_sort
Definition: costsize.c:123
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2162
double random_page_cost
Definition: costsize.c:105
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:197
static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
Definition: costsize.c:4295
Cost per_tuple
Definition: relation.h:46
List * indexquals
Definition: relation.h:1033
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2057
bool skip_mark_restore
Definition: relation.h:1355
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4834
RelOptInfo * rel
Definition: relation.h:633
bool resjunk
Definition: primnodes.h:1374
#define linitial(l)
Definition: pg_list.h:111
#define planner_rt_fetch(rti, root)
Definition: relation.h:325
List * make_ands_implicit(Expr *clause)
Definition: clauses.c:378
bool pk_nulls_first
Definition: relation.h:853
#define ERROR
Definition: elog.h:43
Expr * phexpr
Definition: relation.h:1850
TableFunc * tablefunc
Definition: parsenodes.h:984
Oid vartype
Definition: primnodes.h:170
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3428
Node * get_leftop(const Expr *clause)
Definition: clauses.c:199
Cost indextotalcost
Definition: relation.h:1038
Cost startup_cost
Definition: relation.h:965
Cost disable_cost
Definition: costsize.c:114
List * semi_rhs_exprs
Definition: relation.h:1928
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1267
#define DEFAULT_RANDOM_PAGE_COST
Definition: cost.h:25
bool semi_can_hash
Definition: relation.h:1926
List * joinrestrictinfo
Definition: relation.h:1299
EquivalenceClass * parent_ec
Definition: relation.h:1781
RelOptInfo * parent
Definition: relation.h:953
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:308
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:919
#define lfirst_node(type, lc)
Definition: pg_list.h:109
static const uint32 T[65]
Definition: md5.c:101
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:4104
struct Path * cheapest_total_path
Definition: relation.h:543
Selectivity rightstartsel
Definition: relation.h:1829
Selectivity indexselectivity
Definition: relation.h:1039
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:572
Cost startup_cost
Definition: plannodes.h:125
PlaceHolderInfo * find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv, bool create_new_ph)
Definition: placeholder.c:69
AttrNumber resno
Definition: primnodes.h:1368
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:1873
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5091
Relids relids
Definition: relation.h:525
double cpu_operator_cost
Definition: costsize.c:108
Path * subpath
Definition: relation.h:1265
Oid winfnoid
Definition: primnodes.h:355
Expr * arg
Definition: primnodes.h:810
double total_table_pages
Definition: relation.h:289
Selectivity bitmapselectivity
Definition: relation.h:1088
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:72
int simple_rel_array_size
Definition: relation.h:180
double rint(double x)
Definition: rint.c:22
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:274
#define lnext(lc)
Definition: pg_list.h:105
void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
Definition: lsyscache.c:2599
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:508
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:553
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1110
List * lappend(List *list, void *datum)
Definition: list.c:128
bool enable_bitmapscan
Definition: costsize.c:121
Expr * clause
Definition: relation.h:1747
struct EquivalenceClass * eclass[INDEX_MAX_KEYS]
Definition: relation.h:705
Index varno
Definition: primnodes.h:166
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5070
List * exprs
Definition: relation.h:883
List * indrestrictinfo
Definition: relation.h:658
List * list_delete_cell(List *list, ListCell *cell, ListCell *prev)
Definition: list.c:528
Path * outerjoinpath
Definition: relation.h:1296
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4760
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:462
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:634
bool delay_upper_joins
Definition: relation.h:1923
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1564
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages)
Definition: allpaths.c:3067
bool self_reference
Definition: parsenodes.h:996
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:3993
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:4023
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1644
Node * testexpr
Definition: primnodes.h:685
int work_mem
Definition: globals.c:112
RTEKind rtekind
Definition: relation.h:555
Cost per_call_cost
Definition: primnodes.h:712
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2328
double rows
Definition: relation.h:528
#define InvalidOid
Definition: postgres_ext.h:36
Cost finalCost
Definition: relation.h:63
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1094
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition: costsize.c:3468
bool is_pushed_down
Definition: relation.h:1749
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:465
Cost total_cost
Definition: relation.h:966
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:1819
Selectivity left_bucketsize
Definition: relation.h:1808
double outer_skip_rows
Definition: relation.h:2216
List * pathkeys
Definition: relation.h:968
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3695
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
#define Max(x, y)
Definition: c.h:800
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1377
bool enable_mergejoin
Definition: costsize.c:127
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:1768
Relids right_relids
Definition: relation.h:1775
#define LOG2(x)
Definition: costsize.c:101
BlockNumber pages
Definition: relation.h:564
Path path
Definition: relation.h:1289
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define lfirst(lc)
Definition: pg_list.h:106
static bool has_indexed_join_quals(NestPath *joinpath)
Definition: costsize.c:3844
List * functions
Definition: parsenodes.h:978
Expr * aggfilter
Definition: primnodes.h:360
void compute_semi_anti_join_factors(PlannerInfo *root, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, SpecialJoinInfo *sjinfo, List *restrictlist, SemiAntiJoinFactors *semifactors)
Definition: costsize.c:3736
double rows
Definition: relation.h:964
Expr * expr
Definition: primnodes.h:1367
void cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
Definition: costsize.c:3228
static double get_indexpath_pages(Path *bitmapqual)
Definition: costsize.c:878
JoinType jointype
Definition: relation.h:1921
EquivalenceClass * pk_eclass
Definition: relation.h:850
List * ppi_clauses
Definition: relation.h:914
QualCost cost
Definition: relation.h:885
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
bool expression_tree_walker(Node *node, bool(*walker)(), void *context)
Definition: nodeFuncs.c:1865
static int list_length(const List *l)
Definition: pg_list.h:89
int tuplesort_merge_order(int64 allowedMem)
Definition: tuplesort.c:2305
#define MAXALIGN(LEN)
Definition: c.h:588
List * parParam
Definition: primnodes.h:708
#define DEFAULT_CPU_INDEX_TUPLE_COST
Definition: cost.h:27
List * innersortkeys
Definition: relation.h:1354
double cpu_tuple_cost
Definition: costsize.c:106
Oid pk_opfamily
Definition: relation.h:851
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:349
#define nodeTag(nodeptr)
Definition: nodes.h:514
Node * get_rightop(const Expr *clause)
Definition: clauses.c:216
double ppi_rows
Definition: relation.h:913
bool enable_hashjoin
Definition: costsize.c:128
RTEKind rtekind
Definition: parsenodes.h:929
bool enable_hashagg
Definition: costsize.c:124
Node * setOperations
Definition: parsenodes.h:163
int width
Definition: relation.h:886
Selectivity estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
Definition: selfuncs.c:3555
double enrtuples
Definition: parsenodes.h:1010
AggStrategy
Definition: nodes.h:734
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1316
AttrNumber max_attr
Definition: relation.h:557
Selectivity match_count
Definition: relation.h:2162
Selectivity right_bucketsize
Definition: relation.h:1809
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4691
void * palloc(Size size)
Definition: mcxt.c:849