PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
costsize.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  * Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  * seq_page_cost Cost of a sequential page fetch
10  * random_page_cost Cost of a non-sequential page fetch
11  * cpu_tuple_cost Cost of typical CPU time to process a tuple
12  * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13  * cpu_operator_cost Cost of CPU time to execute an operator or function
14  * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
15  * parallel_setup_cost Cost of setting up shared memory for parallelism
16  *
17  * We expect that the kernel will typically do some amount of read-ahead
18  * optimization; this in conjunction with seek costs means that seq_page_cost
19  * is normally considerably less than random_page_cost. (However, if the
20  * database is fully cached in RAM, it is reasonable to set them equal.)
21  *
22  * We also use a rough estimate "effective_cache_size" of the number of
23  * disk pages in Postgres + OS-level disk cache. (We can't simply use
24  * NBuffers for this purpose because that would ignore the effects of
25  * the kernel's disk cache.)
26  *
27  * Obviously, taking constants for these values is an oversimplification,
28  * but it's tough enough to get any useful estimates even at this level of
29  * detail. Note that all of these parameters are user-settable, in case
30  * the default values are drastically off for a particular platform.
31  *
32  * seq_page_cost and random_page_cost can also be overridden for an individual
33  * tablespace, in case some data is on a fast disk and other data is on a slow
34  * disk. Per-tablespace overrides never apply to temporary work files such as
35  * an external sort or a materialize node that overflows work_mem.
36  *
37  * We compute two separate costs for each path:
38  * total_cost: total estimated cost to fetch all tuples
39  * startup_cost: cost that is expended before first tuple is fetched
40  * In some scenarios, such as when there is a LIMIT or we are implementing
41  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42  * path's result. A caller can estimate the cost of fetching a partial
43  * result by interpolating between startup_cost and total_cost. In detail:
44  * actual_cost = startup_cost +
45  * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46  * Note that a base relation's rows count (and, by extension, plan_rows for
47  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48  * that this equation works properly. (Note: while path->rows is never zero
49  * for ordinary relations, it is zero for paths for provably-empty relations,
50  * so beware of division-by-zero.) The LIMIT is applied as a top-level
51  * plan node.
52  *
53  * For largely historical reasons, most of the routines in this module use
54  * the passed result Path only to store their results (rows, startup_cost and
55  * total_cost) into. All the input data they need is passed as separate
56  * parameters, even though much of it could be extracted from the Path.
57  * An exception is made for the cost_XXXjoin() routines, which expect all
58  * the other fields of the passed XXXPath to be filled in, and similarly
59  * cost_index() assumes the passed IndexPath is valid except for its output
60  * values.
61  *
62  *
63  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
64  * Portions Copyright (c) 1994, Regents of the University of California
65  *
66  * IDENTIFICATION
67  * src/backend/optimizer/path/costsize.c
68  *
69  *-------------------------------------------------------------------------
70  */
71 
72 #include "postgres.h"
73 
74 #ifdef _MSC_VER
75 #include <float.h> /* for _isnan */
76 #endif
77 #include <math.h>
78 
79 #include "access/amapi.h"
80 #include "access/htup_details.h"
81 #include "access/tsmapi.h"
82 #include "executor/executor.h"
83 #include "executor/nodeHash.h"
84 #include "miscadmin.h"
85 #include "nodes/nodeFuncs.h"
86 #include "optimizer/clauses.h"
87 #include "optimizer/cost.h"
88 #include "optimizer/pathnode.h"
89 #include "optimizer/paths.h"
90 #include "optimizer/placeholder.h"
91 #include "optimizer/plancat.h"
92 #include "optimizer/planmain.h"
93 #include "optimizer/restrictinfo.h"
94 #include "parser/parsetree.h"
95 #include "utils/lsyscache.h"
96 #include "utils/selfuncs.h"
97 #include "utils/spccache.h"
98 #include "utils/tuplesort.h"
99 
100 
101 #define LOG2(x) (log(x) / 0.693147180559945)
102 
103 
111 
113 
115 
117 
118 bool enable_seqscan = true;
119 bool enable_indexscan = true;
121 bool enable_bitmapscan = true;
122 bool enable_tidscan = true;
123 bool enable_sort = true;
124 bool enable_hashagg = true;
125 bool enable_nestloop = true;
126 bool enable_material = true;
127 bool enable_mergejoin = true;
128 bool enable_hashjoin = true;
129 bool enable_gathermerge = true;
130 
131 typedef struct
132 {
136 
137 static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
139  RestrictInfo *rinfo,
140  PathKey *pathkey);
141 static void cost_rescan(PlannerInfo *root, Path *path,
142  Cost *rescan_startup_cost, Cost *rescan_total_cost);
143 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
144 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
145  ParamPathInfo *param_info,
146  QualCost *qpqual_cost);
147 static bool has_indexed_join_quals(NestPath *joinpath);
148 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
149  List *quals);
150 static double calc_joinrel_size_estimate(PlannerInfo *root,
151  RelOptInfo *outer_rel,
152  RelOptInfo *inner_rel,
153  double outer_rows,
154  double inner_rows,
155  SpecialJoinInfo *sjinfo,
156  List *restrictlist);
158  Relids outer_relids,
159  Relids inner_relids,
160  SpecialJoinInfo *sjinfo,
161  List **restrictlist);
162 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
163 static double relation_byte_size(double tuples, int width);
164 static double page_size(double tuples, int width);
165 static double get_parallel_divisor(Path *path);
166 
167 
168 /*
169  * clamp_row_est
170  * Force a row-count estimate to a sane value.
171  */
172 double
173 clamp_row_est(double nrows)
174 {
175  /*
176  * Force estimate to be at least one row, to make explain output look
177  * better and to avoid possible divide-by-zero when interpolating costs.
178  * Make it an integer, too.
179  */
180  if (nrows <= 1.0)
181  nrows = 1.0;
182  else
183  nrows = rint(nrows);
184 
185  return nrows;
186 }
187 
188 
189 /*
190  * cost_seqscan
191  * Determines and returns the cost of scanning a relation sequentially.
192  *
193  * 'baserel' is the relation to be scanned
194  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
195  */
196 void
198  RelOptInfo *baserel, ParamPathInfo *param_info)
199 {
200  Cost startup_cost = 0;
201  Cost cpu_run_cost;
202  Cost disk_run_cost;
203  double spc_seq_page_cost;
204  QualCost qpqual_cost;
205  Cost cpu_per_tuple;
206 
207  /* Should only be applied to base relations */
208  Assert(baserel->relid > 0);
209  Assert(baserel->rtekind == RTE_RELATION);
210 
211  /* Mark the path with the correct row estimate */
212  if (param_info)
213  path->rows = param_info->ppi_rows;
214  else
215  path->rows = baserel->rows;
216 
217  if (!enable_seqscan)
218  startup_cost += disable_cost;
219 
220  /* fetch estimated page cost for tablespace containing table */
222  NULL,
223  &spc_seq_page_cost);
224 
225  /*
226  * disk costs
227  */
228  disk_run_cost = spc_seq_page_cost * baserel->pages;
229 
230  /* CPU costs */
231  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
232 
233  startup_cost += qpqual_cost.startup;
234  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
235  cpu_run_cost = cpu_per_tuple * baserel->tuples;
236  /* tlist eval costs are paid per output row, not per tuple scanned */
237  startup_cost += path->pathtarget->cost.startup;
238  cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
239 
240  /* Adjust costing for parallelism, if used. */
241  if (path->parallel_workers > 0)
242  {
243  double parallel_divisor = get_parallel_divisor(path);
244 
245  /* The CPU cost is divided among all the workers. */
246  cpu_run_cost /= parallel_divisor;
247 
248  /*
249  * It may be possible to amortize some of the I/O cost, but probably
250  * not very much, because most operating systems already do aggressive
251  * prefetching. For now, we assume that the disk run cost can't be
252  * amortized at all.
253  */
254 
255  /*
256  * In the case of a parallel plan, the row count needs to represent
257  * the number of tuples processed per worker.
258  */
259  path->rows = clamp_row_est(path->rows / parallel_divisor);
260  }
261 
262  path->startup_cost = startup_cost;
263  path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
264 }
265 
266 /*
267  * cost_samplescan
268  * Determines and returns the cost of scanning a relation using sampling.
269  *
270  * 'baserel' is the relation to be scanned
271  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
272  */
273 void
275  RelOptInfo *baserel, ParamPathInfo *param_info)
276 {
277  Cost startup_cost = 0;
278  Cost run_cost = 0;
279  RangeTblEntry *rte;
280  TableSampleClause *tsc;
281  TsmRoutine *tsm;
282  double spc_seq_page_cost,
283  spc_random_page_cost,
284  spc_page_cost;
285  QualCost qpqual_cost;
286  Cost cpu_per_tuple;
287 
288  /* Should only be applied to base relations with tablesample clauses */
289  Assert(baserel->relid > 0);
290  rte = planner_rt_fetch(baserel->relid, root);
291  Assert(rte->rtekind == RTE_RELATION);
292  tsc = rte->tablesample;
293  Assert(tsc != NULL);
294  tsm = GetTsmRoutine(tsc->tsmhandler);
295 
296  /* Mark the path with the correct row estimate */
297  if (param_info)
298  path->rows = param_info->ppi_rows;
299  else
300  path->rows = baserel->rows;
301 
302  /* fetch estimated page cost for tablespace containing table */
304  &spc_random_page_cost,
305  &spc_seq_page_cost);
306 
307  /* if NextSampleBlock is used, assume random access, else sequential */
308  spc_page_cost = (tsm->NextSampleBlock != NULL) ?
309  spc_random_page_cost : spc_seq_page_cost;
310 
311  /*
312  * disk costs (recall that baserel->pages has already been set to the
313  * number of pages the sampling method will visit)
314  */
315  run_cost += spc_page_cost * baserel->pages;
316 
317  /*
318  * CPU costs (recall that baserel->tuples has already been set to the
319  * number of tuples the sampling method will select). Note that we ignore
320  * execution cost of the TABLESAMPLE parameter expressions; they will be
321  * evaluated only once per scan, and in most usages they'll likely be
322  * simple constants anyway. We also don't charge anything for the
323  * calculations the sampling method might do internally.
324  */
325  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
326 
327  startup_cost += qpqual_cost.startup;
328  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
329  run_cost += cpu_per_tuple * baserel->tuples;
330  /* tlist eval costs are paid per output row, not per tuple scanned */
331  startup_cost += path->pathtarget->cost.startup;
332  run_cost += path->pathtarget->cost.per_tuple * path->rows;
333 
334  path->startup_cost = startup_cost;
335  path->total_cost = startup_cost + run_cost;
336 }
337 
338 /*
339  * cost_gather
340  * Determines and returns the cost of gather path.
341  *
342  * 'rel' is the relation to be operated upon
343  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
344  * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
345  * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
346  * correspond to any particular RelOptInfo.
347  */
348 void
350  RelOptInfo *rel, ParamPathInfo *param_info,
351  double *rows)
352 {
353  Cost startup_cost = 0;
354  Cost run_cost = 0;
355 
356  /* Mark the path with the correct row estimate */
357  if (rows)
358  path->path.rows = *rows;
359  else if (param_info)
360  path->path.rows = param_info->ppi_rows;
361  else
362  path->path.rows = rel->rows;
363 
364  startup_cost = path->subpath->startup_cost;
365 
366  run_cost = path->subpath->total_cost - path->subpath->startup_cost;
367 
368  /* Parallel setup and communication cost. */
369  startup_cost += parallel_setup_cost;
370  run_cost += parallel_tuple_cost * path->path.rows;
371 
372  path->path.startup_cost = startup_cost;
373  path->path.total_cost = (startup_cost + run_cost);
374 }
375 
376 /*
377  * cost_gather_merge
378  * Determines and returns the cost of gather merge path.
379  *
380  * GatherMerge merges several pre-sorted input streams, using a heap that at
381  * any given instant holds the next tuple from each stream. If there are N
382  * streams, we need about N*log2(N) tuple comparisons to construct the heap at
383  * startup, and then for each output tuple, about log2(N) comparisons to
384  * replace the top heap entry with the next tuple from the same stream.
385  */
386 void
388  RelOptInfo *rel, ParamPathInfo *param_info,
389  Cost input_startup_cost, Cost input_total_cost,
390  double *rows)
391 {
392  Cost startup_cost = 0;
393  Cost run_cost = 0;
394  Cost comparison_cost;
395  double N;
396  double logN;
397 
398  /* Mark the path with the correct row estimate */
399  if (rows)
400  path->path.rows = *rows;
401  else if (param_info)
402  path->path.rows = param_info->ppi_rows;
403  else
404  path->path.rows = rel->rows;
405 
406  if (!enable_gathermerge)
407  startup_cost += disable_cost;
408 
409  /*
410  * Add one to the number of workers to account for the leader. This might
411  * be overgenerous since the leader will do less work than other workers
412  * in typical cases, but we'll go with it for now.
413  */
414  Assert(path->num_workers > 0);
415  N = (double) path->num_workers + 1;
416  logN = LOG2(N);
417 
418  /* Assumed cost per tuple comparison */
419  comparison_cost = 2.0 * cpu_operator_cost;
420 
421  /* Heap creation cost */
422  startup_cost += comparison_cost * N * logN;
423 
424  /* Per-tuple heap maintenance cost */
425  run_cost += path->path.rows * comparison_cost * logN;
426 
427  /* small cost for heap management, like cost_merge_append */
428  run_cost += cpu_operator_cost * path->path.rows;
429 
430  /*
431  * Parallel setup and communication cost. Since Gather Merge, unlike
432  * Gather, requires us to block until a tuple is available from every
433  * worker, we bump the IPC cost up a little bit as compared with Gather.
434  * For lack of a better idea, charge an extra 5%.
435  */
436  startup_cost += parallel_setup_cost;
437  run_cost += parallel_tuple_cost * path->path.rows * 1.05;
438 
439  path->path.startup_cost = startup_cost + input_startup_cost;
440  path->path.total_cost = (startup_cost + run_cost + input_total_cost);
441 }
442 
443 /*
444  * cost_index
445  * Determines and returns the cost of scanning a relation using an index.
446  *
447  * 'path' describes the indexscan under consideration, and is complete
448  * except for the fields to be set by this routine
449  * 'loop_count' is the number of repetitions of the indexscan to factor into
450  * estimates of caching behavior
451  *
452  * In addition to rows, startup_cost and total_cost, cost_index() sets the
453  * path's indextotalcost and indexselectivity fields. These values will be
454  * needed if the IndexPath is used in a BitmapIndexScan.
455  *
456  * NOTE: path->indexquals must contain only clauses usable as index
457  * restrictions. Any additional quals evaluated as qpquals may reduce the
458  * number of returned tuples, but they won't reduce the number of tuples
459  * we have to fetch from the table, so they don't reduce the scan cost.
460  */
461 void
462 cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
463  bool partial_path)
464 {
465  IndexOptInfo *index = path->indexinfo;
466  RelOptInfo *baserel = index->rel;
467  bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
468  amcostestimate_function amcostestimate;
469  List *qpquals;
470  Cost startup_cost = 0;
471  Cost run_cost = 0;
472  Cost cpu_run_cost = 0;
473  Cost indexStartupCost;
474  Cost indexTotalCost;
475  Selectivity indexSelectivity;
476  double indexCorrelation,
477  csquared;
478  double spc_seq_page_cost,
479  spc_random_page_cost;
480  Cost min_IO_cost,
481  max_IO_cost;
482  QualCost qpqual_cost;
483  Cost cpu_per_tuple;
484  double tuples_fetched;
485  double pages_fetched;
486  double rand_heap_pages;
487  double index_pages;
488 
489  /* Should only be applied to base relations */
490  Assert(IsA(baserel, RelOptInfo) &&
491  IsA(index, IndexOptInfo));
492  Assert(baserel->relid > 0);
493  Assert(baserel->rtekind == RTE_RELATION);
494 
495  /*
496  * Mark the path with the correct row estimate, and identify which quals
497  * will need to be enforced as qpquals. We need not check any quals that
498  * are implied by the index's predicate, so we can use indrestrictinfo not
499  * baserestrictinfo as the list of relevant restriction clauses for the
500  * rel.
501  */
502  if (path->path.param_info)
503  {
504  path->path.rows = path->path.param_info->ppi_rows;
505  /* qpquals come from the rel's restriction clauses and ppi_clauses */
506  qpquals = list_concat(
508  path->indexquals),
510  path->indexquals));
511  }
512  else
513  {
514  path->path.rows = baserel->rows;
515  /* qpquals come from just the rel's restriction clauses */
517  path->indexquals);
518  }
519 
520  if (!enable_indexscan)
521  startup_cost += disable_cost;
522  /* we don't need to check enable_indexonlyscan; indxpath.c does that */
523 
524  /*
525  * Call index-access-method-specific code to estimate the processing cost
526  * for scanning the index, as well as the selectivity of the index (ie,
527  * the fraction of main-table tuples we will have to retrieve) and its
528  * correlation to the main-table tuple order. We need a cast here because
529  * relation.h uses a weak function type to avoid including amapi.h.
530  */
531  amcostestimate = (amcostestimate_function) index->amcostestimate;
532  amcostestimate(root, path, loop_count,
533  &indexStartupCost, &indexTotalCost,
534  &indexSelectivity, &indexCorrelation,
535  &index_pages);
536 
537  /*
538  * Save amcostestimate's results for possible use in bitmap scan planning.
539  * We don't bother to save indexStartupCost or indexCorrelation, because a
540  * bitmap scan doesn't care about either.
541  */
542  path->indextotalcost = indexTotalCost;
543  path->indexselectivity = indexSelectivity;
544 
545  /* all costs for touching index itself included here */
546  startup_cost += indexStartupCost;
547  run_cost += indexTotalCost - indexStartupCost;
548 
549  /* estimate number of main-table tuples fetched */
550  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
551 
552  /* fetch estimated page costs for tablespace containing table */
554  &spc_random_page_cost,
555  &spc_seq_page_cost);
556 
557  /*----------
558  * Estimate number of main-table pages fetched, and compute I/O cost.
559  *
560  * When the index ordering is uncorrelated with the table ordering,
561  * we use an approximation proposed by Mackert and Lohman (see
562  * index_pages_fetched() for details) to compute the number of pages
563  * fetched, and then charge spc_random_page_cost per page fetched.
564  *
565  * When the index ordering is exactly correlated with the table ordering
566  * (just after a CLUSTER, for example), the number of pages fetched should
567  * be exactly selectivity * table_size. What's more, all but the first
568  * will be sequential fetches, not the random fetches that occur in the
569  * uncorrelated case. So if the number of pages is more than 1, we
570  * ought to charge
571  * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
572  * For partially-correlated indexes, we ought to charge somewhere between
573  * these two estimates. We currently interpolate linearly between the
574  * estimates based on the correlation squared (XXX is that appropriate?).
575  *
576  * If it's an index-only scan, then we will not need to fetch any heap
577  * pages for which the visibility map shows all tuples are visible.
578  * Hence, reduce the estimated number of heap fetches accordingly.
579  * We use the measured fraction of the entire heap that is all-visible,
580  * which might not be particularly relevant to the subset of the heap
581  * that this query will fetch; but it's not clear how to do better.
582  *----------
583  */
584  if (loop_count > 1)
585  {
586  /*
587  * For repeated indexscans, the appropriate estimate for the
588  * uncorrelated case is to scale up the number of tuples fetched in
589  * the Mackert and Lohman formula by the number of scans, so that we
590  * estimate the number of pages fetched by all the scans; then
591  * pro-rate the costs for one scan. In this case we assume all the
592  * fetches are random accesses.
593  */
594  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
595  baserel->pages,
596  (double) index->pages,
597  root);
598 
599  if (indexonly)
600  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
601 
602  rand_heap_pages = pages_fetched;
603 
604  max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
605 
606  /*
607  * In the perfectly correlated case, the number of pages touched by
608  * each scan is selectivity * table_size, and we can use the Mackert
609  * and Lohman formula at the page level to estimate how much work is
610  * saved by caching across scans. We still assume all the fetches are
611  * random, though, which is an overestimate that's hard to correct for
612  * without double-counting the cache effects. (But in most cases
613  * where such a plan is actually interesting, only one page would get
614  * fetched per scan anyway, so it shouldn't matter much.)
615  */
616  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
617 
618  pages_fetched = index_pages_fetched(pages_fetched * loop_count,
619  baserel->pages,
620  (double) index->pages,
621  root);
622 
623  if (indexonly)
624  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
625 
626  min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
627  }
628  else
629  {
630  /*
631  * Normal case: apply the Mackert and Lohman formula, and then
632  * interpolate between that and the correlation-derived result.
633  */
634  pages_fetched = index_pages_fetched(tuples_fetched,
635  baserel->pages,
636  (double) index->pages,
637  root);
638 
639  if (indexonly)
640  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
641 
642  rand_heap_pages = pages_fetched;
643 
644  /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
645  max_IO_cost = pages_fetched * spc_random_page_cost;
646 
647  /* min_IO_cost is for the perfectly correlated case (csquared=1) */
648  pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
649 
650  if (indexonly)
651  pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
652 
653  if (pages_fetched > 0)
654  {
655  min_IO_cost = spc_random_page_cost;
656  if (pages_fetched > 1)
657  min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
658  }
659  else
660  min_IO_cost = 0;
661  }
662 
663  if (partial_path)
664  {
665  /*
666  * For index only scans compute workers based on number of index pages
667  * fetched; the number of heap pages we fetch might be so small as to
668  * effectively rule out parallelism, which we don't want to do.
669  */
670  if (indexonly)
671  rand_heap_pages = -1;
672 
673  /*
674  * Estimate the number of parallel workers required to scan index. Use
675  * the number of heap pages computed considering heap fetches won't be
676  * sequential as for parallel scans the pages are accessed in random
677  * order.
678  */
680  rand_heap_pages, index_pages);
681 
682  /*
683  * Fall out if workers can't be assigned for parallel scan, because in
684  * such a case this path will be rejected. So there is no benefit in
685  * doing extra computation.
686  */
687  if (path->path.parallel_workers <= 0)
688  return;
689 
690  path->path.parallel_aware = true;
691  }
692 
693  /*
694  * Now interpolate based on estimated index order correlation to get total
695  * disk I/O cost for main table accesses.
696  */
697  csquared = indexCorrelation * indexCorrelation;
698 
699  run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
700 
701  /*
702  * Estimate CPU costs per tuple.
703  *
704  * What we want here is cpu_tuple_cost plus the evaluation costs of any
705  * qual clauses that we have to evaluate as qpquals.
706  */
707  cost_qual_eval(&qpqual_cost, qpquals, root);
708 
709  startup_cost += qpqual_cost.startup;
710  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
711 
712  cpu_run_cost += cpu_per_tuple * tuples_fetched;
713 
714  /* tlist eval costs are paid per output row, not per tuple scanned */
715  startup_cost += path->path.pathtarget->cost.startup;
716  cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
717 
718  /* Adjust costing for parallelism, if used. */
719  if (path->path.parallel_workers > 0)
720  {
721  double parallel_divisor = get_parallel_divisor(&path->path);
722 
723  path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
724 
725  /* The CPU cost is divided among all the workers. */
726  cpu_run_cost /= parallel_divisor;
727  }
728 
729  run_cost += cpu_run_cost;
730 
731  path->path.startup_cost = startup_cost;
732  path->path.total_cost = startup_cost + run_cost;
733 }
734 
735 /*
736  * extract_nonindex_conditions
737  *
738  * Given a list of quals to be enforced in an indexscan, extract the ones that
739  * will have to be applied as qpquals (ie, the index machinery won't handle
740  * them). The actual rules for this appear in create_indexscan_plan() in
741  * createplan.c, but the full rules are fairly expensive and we don't want to
742  * go to that much effort for index paths that don't get selected for the
743  * final plan. So we approximate it as quals that don't appear directly in
744  * indexquals and also are not redundant children of the same EquivalenceClass
745  * as some indexqual. This method neglects some infrequently-relevant
746  * considerations, specifically clauses that needn't be checked because they
747  * are implied by an indexqual. It does not seem worth the cycles to try to
748  * factor that in at this stage, even though createplan.c will take pains to
749  * remove such unnecessary clauses from the qpquals list if this path is
750  * selected for use.
751  */
752 static List *
753 extract_nonindex_conditions(List *qual_clauses, List *indexquals)
754 {
755  List *result = NIL;
756  ListCell *lc;
757 
758  foreach(lc, qual_clauses)
759  {
760  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
761 
762  if (rinfo->pseudoconstant)
763  continue; /* we may drop pseudoconstants here */
764  if (list_member_ptr(indexquals, rinfo))
765  continue; /* simple duplicate */
766  if (is_redundant_derived_clause(rinfo, indexquals))
767  continue; /* derived from same EquivalenceClass */
768  /* ... skip the predicate proof attempt createplan.c will try ... */
769  result = lappend(result, rinfo);
770  }
771  return result;
772 }
773 
774 /*
775  * index_pages_fetched
776  * Estimate the number of pages actually fetched after accounting for
777  * cache effects.
778  *
779  * We use an approximation proposed by Mackert and Lohman, "Index Scans
780  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
781  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
782  * The Mackert and Lohman approximation is that the number of pages
783  * fetched is
784  * PF =
785  * min(2TNs/(2T+Ns), T) when T <= b
786  * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
787  * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
788  * where
789  * T = # pages in table
790  * N = # tuples in table
791  * s = selectivity = fraction of table to be scanned
792  * b = # buffer pages available (we include kernel space here)
793  *
794  * We assume that effective_cache_size is the total number of buffer pages
795  * available for the whole query, and pro-rate that space across all the
796  * tables in the query and the index currently under consideration. (This
797  * ignores space needed for other indexes used by the query, but since we
798  * don't know which indexes will get used, we can't estimate that very well;
799  * and in any case counting all the tables may well be an overestimate, since
800  * depending on the join plan not all the tables may be scanned concurrently.)
801  *
802  * The product Ns is the number of tuples fetched; we pass in that
803  * product rather than calculating it here. "pages" is the number of pages
804  * in the object under consideration (either an index or a table).
805  * "index_pages" is the amount to add to the total table space, which was
806  * computed for us by query_planner.
807  *
808  * Caller is expected to have ensured that tuples_fetched is greater than zero
809  * and rounded to integer (see clamp_row_est). The result will likewise be
810  * greater than zero and integral.
811  */
812 double
813 index_pages_fetched(double tuples_fetched, BlockNumber pages,
814  double index_pages, PlannerInfo *root)
815 {
816  double pages_fetched;
817  double total_pages;
818  double T,
819  b;
820 
821  /* T is # pages in table, but don't allow it to be zero */
822  T = (pages > 1) ? (double) pages : 1.0;
823 
824  /* Compute number of pages assumed to be competing for cache space */
825  total_pages = root->total_table_pages + index_pages;
826  total_pages = Max(total_pages, 1.0);
827  Assert(T <= total_pages);
828 
829  /* b is pro-rated share of effective_cache_size */
830  b = (double) effective_cache_size * T / total_pages;
831 
832  /* force it positive and integral */
833  if (b <= 1.0)
834  b = 1.0;
835  else
836  b = ceil(b);
837 
838  /* This part is the Mackert and Lohman formula */
839  if (T <= b)
840  {
841  pages_fetched =
842  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
843  if (pages_fetched >= T)
844  pages_fetched = T;
845  else
846  pages_fetched = ceil(pages_fetched);
847  }
848  else
849  {
850  double lim;
851 
852  lim = (2.0 * T * b) / (2.0 * T - b);
853  if (tuples_fetched <= lim)
854  {
855  pages_fetched =
856  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
857  }
858  else
859  {
860  pages_fetched =
861  b + (tuples_fetched - lim) * (T - b) / T;
862  }
863  pages_fetched = ceil(pages_fetched);
864  }
865  return pages_fetched;
866 }
867 
868 /*
869  * get_indexpath_pages
870  * Determine the total size of the indexes used in a bitmap index path.
871  *
872  * Note: if the same index is used more than once in a bitmap tree, we will
873  * count it multiple times, which perhaps is the wrong thing ... but it's
874  * not completely clear, and detecting duplicates is difficult, so ignore it
875  * for now.
876  */
877 static double
879 {
880  double result = 0;
881  ListCell *l;
882 
883  if (IsA(bitmapqual, BitmapAndPath))
884  {
885  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
886 
887  foreach(l, apath->bitmapquals)
888  {
889  result += get_indexpath_pages((Path *) lfirst(l));
890  }
891  }
892  else if (IsA(bitmapqual, BitmapOrPath))
893  {
894  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
895 
896  foreach(l, opath->bitmapquals)
897  {
898  result += get_indexpath_pages((Path *) lfirst(l));
899  }
900  }
901  else if (IsA(bitmapqual, IndexPath))
902  {
903  IndexPath *ipath = (IndexPath *) bitmapqual;
904 
905  result = (double) ipath->indexinfo->pages;
906  }
907  else
908  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
909 
910  return result;
911 }
912 
913 /*
914  * cost_bitmap_heap_scan
915  * Determines and returns the cost of scanning a relation using a bitmap
916  * index-then-heap plan.
917  *
918  * 'baserel' is the relation to be scanned
919  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
920  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
921  * 'loop_count' is the number of repetitions of the indexscan to factor into
922  * estimates of caching behavior
923  *
924  * Note: the component IndexPaths in bitmapqual should have been costed
925  * using the same loop_count.
926  */
927 void
929  ParamPathInfo *param_info,
930  Path *bitmapqual, double loop_count)
931 {
932  Cost startup_cost = 0;
933  Cost run_cost = 0;
934  Cost indexTotalCost;
935  QualCost qpqual_cost;
936  Cost cpu_per_tuple;
937  Cost cost_per_page;
938  Cost cpu_run_cost;
939  double tuples_fetched;
940  double pages_fetched;
941  double spc_seq_page_cost,
942  spc_random_page_cost;
943  double T;
944 
945  /* Should only be applied to base relations */
946  Assert(IsA(baserel, RelOptInfo));
947  Assert(baserel->relid > 0);
948  Assert(baserel->rtekind == RTE_RELATION);
949 
950  /* Mark the path with the correct row estimate */
951  if (param_info)
952  path->rows = param_info->ppi_rows;
953  else
954  path->rows = baserel->rows;
955 
956  if (!enable_bitmapscan)
957  startup_cost += disable_cost;
958 
959  pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
960  loop_count, &indexTotalCost,
961  &tuples_fetched);
962 
963  startup_cost += indexTotalCost;
964  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
965 
966  /* Fetch estimated page costs for tablespace containing table. */
968  &spc_random_page_cost,
969  &spc_seq_page_cost);
970 
971  /*
972  * For small numbers of pages we should charge spc_random_page_cost
973  * apiece, while if nearly all the table's pages are being read, it's more
974  * appropriate to charge spc_seq_page_cost apiece. The effect is
975  * nonlinear, too. For lack of a better idea, interpolate like this to
976  * determine the cost per page.
977  */
978  if (pages_fetched >= 2.0)
979  cost_per_page = spc_random_page_cost -
980  (spc_random_page_cost - spc_seq_page_cost)
981  * sqrt(pages_fetched / T);
982  else
983  cost_per_page = spc_random_page_cost;
984 
985  run_cost += pages_fetched * cost_per_page;
986 
987  /*
988  * Estimate CPU costs per tuple.
989  *
990  * Often the indexquals don't need to be rechecked at each tuple ... but
991  * not always, especially not if there are enough tuples involved that the
992  * bitmaps become lossy. For the moment, just assume they will be
993  * rechecked always. This means we charge the full freight for all the
994  * scan clauses.
995  */
996  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
997 
998  startup_cost += qpqual_cost.startup;
999  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1000  cpu_run_cost = cpu_per_tuple * tuples_fetched;
1001 
1002  /* Adjust costing for parallelism, if used. */
1003  if (path->parallel_workers > 0)
1004  {
1005  double parallel_divisor = get_parallel_divisor(path);
1006 
1007  /* The CPU cost is divided among all the workers. */
1008  cpu_run_cost /= parallel_divisor;
1009 
1010  path->rows = clamp_row_est(path->rows / parallel_divisor);
1011  }
1012 
1013 
1014  run_cost += cpu_run_cost;
1015 
1016  /* tlist eval costs are paid per output row, not per tuple scanned */
1017  startup_cost += path->pathtarget->cost.startup;
1018  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1019 
1020  path->startup_cost = startup_cost;
1021  path->total_cost = startup_cost + run_cost;
1022 }
1023 
1024 /*
1025  * cost_bitmap_tree_node
1026  * Extract cost and selectivity from a bitmap tree node (index/and/or)
1027  */
1028 void
1030 {
1031  if (IsA(path, IndexPath))
1032  {
1033  *cost = ((IndexPath *) path)->indextotalcost;
1034  *selec = ((IndexPath *) path)->indexselectivity;
1035 
1036  /*
1037  * Charge a small amount per retrieved tuple to reflect the costs of
1038  * manipulating the bitmap. This is mostly to make sure that a bitmap
1039  * scan doesn't look to be the same cost as an indexscan to retrieve a
1040  * single tuple.
1041  */
1042  *cost += 0.1 * cpu_operator_cost * path->rows;
1043  }
1044  else if (IsA(path, BitmapAndPath))
1045  {
1046  *cost = path->total_cost;
1047  *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1048  }
1049  else if (IsA(path, BitmapOrPath))
1050  {
1051  *cost = path->total_cost;
1052  *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1053  }
1054  else
1055  {
1056  elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1057  *cost = *selec = 0; /* keep compiler quiet */
1058  }
1059 }
1060 
1061 /*
1062  * cost_bitmap_and_node
1063  * Estimate the cost of a BitmapAnd node
1064  *
1065  * Note that this considers only the costs of index scanning and bitmap
1066  * creation, not the eventual heap access. In that sense the object isn't
1067  * truly a Path, but it has enough path-like properties (costs in particular)
1068  * to warrant treating it as one. We don't bother to set the path rows field,
1069  * however.
1070  */
1071 void
1073 {
1074  Cost totalCost;
1075  Selectivity selec;
1076  ListCell *l;
1077 
1078  /*
1079  * We estimate AND selectivity on the assumption that the inputs are
1080  * independent. This is probably often wrong, but we don't have the info
1081  * to do better.
1082  *
1083  * The runtime cost of the BitmapAnd itself is estimated at 100x
1084  * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1085  * definitely too simplistic?
1086  */
1087  totalCost = 0.0;
1088  selec = 1.0;
1089  foreach(l, path->bitmapquals)
1090  {
1091  Path *subpath = (Path *) lfirst(l);
1092  Cost subCost;
1093  Selectivity subselec;
1094 
1095  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1096 
1097  selec *= subselec;
1098 
1099  totalCost += subCost;
1100  if (l != list_head(path->bitmapquals))
1101  totalCost += 100.0 * cpu_operator_cost;
1102  }
1103  path->bitmapselectivity = selec;
1104  path->path.rows = 0; /* per above, not used */
1105  path->path.startup_cost = totalCost;
1106  path->path.total_cost = totalCost;
1107 }
1108 
1109 /*
1110  * cost_bitmap_or_node
1111  * Estimate the cost of a BitmapOr node
1112  *
1113  * See comments for cost_bitmap_and_node.
1114  */
1115 void
1117 {
1118  Cost totalCost;
1119  Selectivity selec;
1120  ListCell *l;
1121 
1122  /*
1123  * We estimate OR selectivity on the assumption that the inputs are
1124  * non-overlapping, since that's often the case in "x IN (list)" type
1125  * situations. Of course, we clamp to 1.0 at the end.
1126  *
1127  * The runtime cost of the BitmapOr itself is estimated at 100x
1128  * cpu_operator_cost for each tbm_union needed. Probably too small,
1129  * definitely too simplistic? We are aware that the tbm_unions are
1130  * optimized out when the inputs are BitmapIndexScans.
1131  */
1132  totalCost = 0.0;
1133  selec = 0.0;
1134  foreach(l, path->bitmapquals)
1135  {
1136  Path *subpath = (Path *) lfirst(l);
1137  Cost subCost;
1138  Selectivity subselec;
1139 
1140  cost_bitmap_tree_node(subpath, &subCost, &subselec);
1141 
1142  selec += subselec;
1143 
1144  totalCost += subCost;
1145  if (l != list_head(path->bitmapquals) &&
1146  !IsA(subpath, IndexPath))
1147  totalCost += 100.0 * cpu_operator_cost;
1148  }
1149  path->bitmapselectivity = Min(selec, 1.0);
1150  path->path.rows = 0; /* per above, not used */
1151  path->path.startup_cost = totalCost;
1152  path->path.total_cost = totalCost;
1153 }
1154 
1155 /*
1156  * cost_tidscan
1157  * Determines and returns the cost of scanning a relation using TIDs.
1158  *
1159  * 'baserel' is the relation to be scanned
1160  * 'tidquals' is the list of TID-checkable quals
1161  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1162  */
1163 void
1165  RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1166 {
1167  Cost startup_cost = 0;
1168  Cost run_cost = 0;
1169  bool isCurrentOf = false;
1170  QualCost qpqual_cost;
1171  Cost cpu_per_tuple;
1172  QualCost tid_qual_cost;
1173  int ntuples;
1174  ListCell *l;
1175  double spc_random_page_cost;
1176 
1177  /* Should only be applied to base relations */
1178  Assert(baserel->relid > 0);
1179  Assert(baserel->rtekind == RTE_RELATION);
1180 
1181  /* Mark the path with the correct row estimate */
1182  if (param_info)
1183  path->rows = param_info->ppi_rows;
1184  else
1185  path->rows = baserel->rows;
1186 
1187  /* Count how many tuples we expect to retrieve */
1188  ntuples = 0;
1189  foreach(l, tidquals)
1190  {
1191  if (IsA(lfirst(l), ScalarArrayOpExpr))
1192  {
1193  /* Each element of the array yields 1 tuple */
1195  Node *arraynode = (Node *) lsecond(saop->args);
1196 
1197  ntuples += estimate_array_length(arraynode);
1198  }
1199  else if (IsA(lfirst(l), CurrentOfExpr))
1200  {
1201  /* CURRENT OF yields 1 tuple */
1202  isCurrentOf = true;
1203  ntuples++;
1204  }
1205  else
1206  {
1207  /* It's just CTID = something, count 1 tuple */
1208  ntuples++;
1209  }
1210  }
1211 
1212  /*
1213  * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1214  * understands how to do it correctly. Therefore, honor enable_tidscan
1215  * only when CURRENT OF isn't present. Also note that cost_qual_eval
1216  * counts a CurrentOfExpr as having startup cost disable_cost, which we
1217  * subtract off here; that's to prevent other plan types such as seqscan
1218  * from winning.
1219  */
1220  if (isCurrentOf)
1221  {
1223  startup_cost -= disable_cost;
1224  }
1225  else if (!enable_tidscan)
1226  startup_cost += disable_cost;
1227 
1228  /*
1229  * The TID qual expressions will be computed once, any other baserestrict
1230  * quals once per retrieved tuple.
1231  */
1232  cost_qual_eval(&tid_qual_cost, tidquals, root);
1233 
1234  /* fetch estimated page cost for tablespace containing table */
1236  &spc_random_page_cost,
1237  NULL);
1238 
1239  /* disk costs --- assume each tuple on a different page */
1240  run_cost += spc_random_page_cost * ntuples;
1241 
1242  /* Add scanning CPU costs */
1243  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1244 
1245  /* XXX currently we assume TID quals are a subset of qpquals */
1246  startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1247  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1248  tid_qual_cost.per_tuple;
1249  run_cost += cpu_per_tuple * ntuples;
1250 
1251  /* tlist eval costs are paid per output row, not per tuple scanned */
1252  startup_cost += path->pathtarget->cost.startup;
1253  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1254 
1255  path->startup_cost = startup_cost;
1256  path->total_cost = startup_cost + run_cost;
1257 }
1258 
1259 /*
1260  * cost_subqueryscan
1261  * Determines and returns the cost of scanning a subquery RTE.
1262  *
1263  * 'baserel' is the relation to be scanned
1264  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1265  */
1266 void
1268  RelOptInfo *baserel, ParamPathInfo *param_info)
1269 {
1270  Cost startup_cost;
1271  Cost run_cost;
1272  QualCost qpqual_cost;
1273  Cost cpu_per_tuple;
1274 
1275  /* Should only be applied to base relations that are subqueries */
1276  Assert(baserel->relid > 0);
1277  Assert(baserel->rtekind == RTE_SUBQUERY);
1278 
1279  /* Mark the path with the correct row estimate */
1280  if (param_info)
1281  path->path.rows = param_info->ppi_rows;
1282  else
1283  path->path.rows = baserel->rows;
1284 
1285  /*
1286  * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1287  * any restriction clauses and tlist that will be attached to the
1288  * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1289  * projection overhead.
1290  */
1291  path->path.startup_cost = path->subpath->startup_cost;
1292  path->path.total_cost = path->subpath->total_cost;
1293 
1294  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1295 
1296  startup_cost = qpqual_cost.startup;
1297  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1298  run_cost = cpu_per_tuple * baserel->tuples;
1299 
1300  /* tlist eval costs are paid per output row, not per tuple scanned */
1301  startup_cost += path->path.pathtarget->cost.startup;
1302  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1303 
1304  path->path.startup_cost += startup_cost;
1305  path->path.total_cost += startup_cost + run_cost;
1306 }
1307 
1308 /*
1309  * cost_functionscan
1310  * Determines and returns the cost of scanning a function RTE.
1311  *
1312  * 'baserel' is the relation to be scanned
1313  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1314  */
1315 void
1317  RelOptInfo *baserel, ParamPathInfo *param_info)
1318 {
1319  Cost startup_cost = 0;
1320  Cost run_cost = 0;
1321  QualCost qpqual_cost;
1322  Cost cpu_per_tuple;
1323  RangeTblEntry *rte;
1324  QualCost exprcost;
1325 
1326  /* Should only be applied to base relations that are functions */
1327  Assert(baserel->relid > 0);
1328  rte = planner_rt_fetch(baserel->relid, root);
1329  Assert(rte->rtekind == RTE_FUNCTION);
1330 
1331  /* Mark the path with the correct row estimate */
1332  if (param_info)
1333  path->rows = param_info->ppi_rows;
1334  else
1335  path->rows = baserel->rows;
1336 
1337  /*
1338  * Estimate costs of executing the function expression(s).
1339  *
1340  * Currently, nodeFunctionscan.c always executes the functions to
1341  * completion before returning any rows, and caches the results in a
1342  * tuplestore. So the function eval cost is all startup cost, and per-row
1343  * costs are minimal.
1344  *
1345  * XXX in principle we ought to charge tuplestore spill costs if the
1346  * number of rows is large. However, given how phony our rowcount
1347  * estimates for functions tend to be, there's not a lot of point in that
1348  * refinement right now.
1349  */
1350  cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1351 
1352  startup_cost += exprcost.startup + exprcost.per_tuple;
1353 
1354  /* Add scanning CPU costs */
1355  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1356 
1357  startup_cost += qpqual_cost.startup;
1358  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1359  run_cost += cpu_per_tuple * baserel->tuples;
1360 
1361  /* tlist eval costs are paid per output row, not per tuple scanned */
1362  startup_cost += path->pathtarget->cost.startup;
1363  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1364 
1365  path->startup_cost = startup_cost;
1366  path->total_cost = startup_cost + run_cost;
1367 }
1368 
1369 /*
1370  * cost_tablefuncscan
1371  * Determines and returns the cost of scanning a table function.
1372  *
1373  * 'baserel' is the relation to be scanned
1374  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1375  */
1376 void
1378  RelOptInfo *baserel, ParamPathInfo *param_info)
1379 {
1380  Cost startup_cost = 0;
1381  Cost run_cost = 0;
1382  QualCost qpqual_cost;
1383  Cost cpu_per_tuple;
1384  RangeTblEntry *rte;
1385  QualCost exprcost;
1386 
1387  /* Should only be applied to base relations that are functions */
1388  Assert(baserel->relid > 0);
1389  rte = planner_rt_fetch(baserel->relid, root);
1390  Assert(rte->rtekind == RTE_TABLEFUNC);
1391 
1392  /* Mark the path with the correct row estimate */
1393  if (param_info)
1394  path->rows = param_info->ppi_rows;
1395  else
1396  path->rows = baserel->rows;
1397 
1398  /*
1399  * Estimate costs of executing the table func expression(s).
1400  *
1401  * XXX in principle we ought to charge tuplestore spill costs if the
1402  * number of rows is large. However, given how phony our rowcount
1403  * estimates for tablefuncs tend to be, there's not a lot of point in that
1404  * refinement right now.
1405  */
1406  cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1407 
1408  startup_cost += exprcost.startup + exprcost.per_tuple;
1409 
1410  /* Add scanning CPU costs */
1411  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1412 
1413  startup_cost += qpqual_cost.startup;
1414  cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1415  run_cost += cpu_per_tuple * baserel->tuples;
1416 
1417  /* tlist eval costs are paid per output row, not per tuple scanned */
1418  startup_cost += path->pathtarget->cost.startup;
1419  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1420 
1421  path->startup_cost = startup_cost;
1422  path->total_cost = startup_cost + run_cost;
1423 }
1424 
1425 /*
1426  * cost_valuesscan
1427  * Determines and returns the cost of scanning a VALUES RTE.
1428  *
1429  * 'baserel' is the relation to be scanned
1430  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1431  */
1432 void
1434  RelOptInfo *baserel, ParamPathInfo *param_info)
1435 {
1436  Cost startup_cost = 0;
1437  Cost run_cost = 0;
1438  QualCost qpqual_cost;
1439  Cost cpu_per_tuple;
1440 
1441  /* Should only be applied to base relations that are values lists */
1442  Assert(baserel->relid > 0);
1443  Assert(baserel->rtekind == RTE_VALUES);
1444 
1445  /* Mark the path with the correct row estimate */
1446  if (param_info)
1447  path->rows = param_info->ppi_rows;
1448  else
1449  path->rows = baserel->rows;
1450 
1451  /*
1452  * For now, estimate list evaluation cost at one operator eval per list
1453  * (probably pretty bogus, but is it worth being smarter?)
1454  */
1455  cpu_per_tuple = cpu_operator_cost;
1456 
1457  /* Add scanning CPU costs */
1458  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1459 
1460  startup_cost += qpqual_cost.startup;
1461  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1462  run_cost += cpu_per_tuple * baserel->tuples;
1463 
1464  /* tlist eval costs are paid per output row, not per tuple scanned */
1465  startup_cost += path->pathtarget->cost.startup;
1466  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1467 
1468  path->startup_cost = startup_cost;
1469  path->total_cost = startup_cost + run_cost;
1470 }
1471 
1472 /*
1473  * cost_ctescan
1474  * Determines and returns the cost of scanning a CTE RTE.
1475  *
1476  * Note: this is used for both self-reference and regular CTEs; the
1477  * possible cost differences are below the threshold of what we could
1478  * estimate accurately anyway. Note that the costs of evaluating the
1479  * referenced CTE query are added into the final plan as initplan costs,
1480  * and should NOT be counted here.
1481  */
1482 void
1484  RelOptInfo *baserel, ParamPathInfo *param_info)
1485 {
1486  Cost startup_cost = 0;
1487  Cost run_cost = 0;
1488  QualCost qpqual_cost;
1489  Cost cpu_per_tuple;
1490 
1491  /* Should only be applied to base relations that are CTEs */
1492  Assert(baserel->relid > 0);
1493  Assert(baserel->rtekind == RTE_CTE);
1494 
1495  /* Mark the path with the correct row estimate */
1496  if (param_info)
1497  path->rows = param_info->ppi_rows;
1498  else
1499  path->rows = baserel->rows;
1500 
1501  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1502  cpu_per_tuple = cpu_tuple_cost;
1503 
1504  /* Add scanning CPU costs */
1505  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1506 
1507  startup_cost += qpqual_cost.startup;
1508  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1509  run_cost += cpu_per_tuple * baserel->tuples;
1510 
1511  /* tlist eval costs are paid per output row, not per tuple scanned */
1512  startup_cost += path->pathtarget->cost.startup;
1513  run_cost += path->pathtarget->cost.per_tuple * path->rows;
1514 
1515  path->startup_cost = startup_cost;
1516  path->total_cost = startup_cost + run_cost;
1517 }
1518 
1519 /*
1520  * cost_namedtuplestorescan
1521  * Determines and returns the cost of scanning a named tuplestore.
1522  */
1523 void
1525  RelOptInfo *baserel, ParamPathInfo *param_info)
1526 {
1527  Cost startup_cost = 0;
1528  Cost run_cost = 0;
1529  QualCost qpqual_cost;
1530  Cost cpu_per_tuple;
1531 
1532  /* Should only be applied to base relations that are Tuplestores */
1533  Assert(baserel->relid > 0);
1534  Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1535 
1536  /* Mark the path with the correct row estimate */
1537  if (param_info)
1538  path->rows = param_info->ppi_rows;
1539  else
1540  path->rows = baserel->rows;
1541 
1542  /* Charge one CPU tuple cost per row for tuplestore manipulation */
1543  cpu_per_tuple = cpu_tuple_cost;
1544 
1545  /* Add scanning CPU costs */
1546  get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1547 
1548  startup_cost += qpqual_cost.startup;
1549  cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1550  run_cost += cpu_per_tuple * baserel->tuples;
1551 
1552  path->startup_cost = startup_cost;
1553  path->total_cost = startup_cost + run_cost;
1554 }
1555 
1556 /*
1557  * cost_recursive_union
1558  * Determines and returns the cost of performing a recursive union,
1559  * and also the estimated output size.
1560  *
1561  * We are given Paths for the nonrecursive and recursive terms.
1562  */
1563 void
1564 cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1565 {
1566  Cost startup_cost;
1567  Cost total_cost;
1568  double total_rows;
1569 
1570  /* We probably have decent estimates for the non-recursive term */
1571  startup_cost = nrterm->startup_cost;
1572  total_cost = nrterm->total_cost;
1573  total_rows = nrterm->rows;
1574 
1575  /*
1576  * We arbitrarily assume that about 10 recursive iterations will be
1577  * needed, and that we've managed to get a good fix on the cost and output
1578  * size of each one of them. These are mighty shaky assumptions but it's
1579  * hard to see how to do better.
1580  */
1581  total_cost += 10 * rterm->total_cost;
1582  total_rows += 10 * rterm->rows;
1583 
1584  /*
1585  * Also charge cpu_tuple_cost per row to account for the costs of
1586  * manipulating the tuplestores. (We don't worry about possible
1587  * spill-to-disk costs.)
1588  */
1589  total_cost += cpu_tuple_cost * total_rows;
1590 
1591  runion->startup_cost = startup_cost;
1592  runion->total_cost = total_cost;
1593  runion->rows = total_rows;
1594  runion->pathtarget->width = Max(nrterm->pathtarget->width,
1595  rterm->pathtarget->width);
1596 }
1597 
1598 /*
1599  * cost_sort
1600  * Determines and returns the cost of sorting a relation, including
1601  * the cost of reading the input data.
1602  *
1603  * If the total volume of data to sort is less than sort_mem, we will do
1604  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1605  * comparisons for t tuples.
1606  *
1607  * If the total volume exceeds sort_mem, we switch to a tape-style merge
1608  * algorithm. There will still be about t*log2(t) tuple comparisons in
1609  * total, but we will also need to write and read each tuple once per
1610  * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1611  * number of initial runs formed and M is the merge order used by tuplesort.c.
1612  * Since the average initial run should be about sort_mem, we have
1613  * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1614  * cpu = comparison_cost * t * log2(t)
1615  *
1616  * If the sort is bounded (i.e., only the first k result tuples are needed)
1617  * and k tuples can fit into sort_mem, we use a heap method that keeps only
1618  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1619  *
1620  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1621  * accesses (XXX can't we refine that guess?)
1622  *
1623  * By default, we charge two operator evals per tuple comparison, which should
1624  * be in the right ballpark in most cases. The caller can tweak this by
1625  * specifying nonzero comparison_cost; typically that's used for any extra
1626  * work that has to be done to prepare the inputs to the comparison operators.
1627  *
1628  * 'pathkeys' is a list of sort keys
1629  * 'input_cost' is the total cost for reading the input data
1630  * 'tuples' is the number of tuples in the relation
1631  * 'width' is the average tuple width in bytes
1632  * 'comparison_cost' is the extra cost per comparison, if any
1633  * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1634  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1635  *
1636  * NOTE: some callers currently pass NIL for pathkeys because they
1637  * can't conveniently supply the sort keys. Since this routine doesn't
1638  * currently do anything with pathkeys anyway, that doesn't matter...
1639  * but if it ever does, it should react gracefully to lack of key data.
1640  * (Actually, the thing we'd most likely be interested in is just the number
1641  * of sort keys, which all callers *could* supply.)
1642  */
1643 void
1645  List *pathkeys, Cost input_cost, double tuples, int width,
1646  Cost comparison_cost, int sort_mem,
1647  double limit_tuples)
1648 {
1649  Cost startup_cost = input_cost;
1650  Cost run_cost = 0;
1651  double input_bytes = relation_byte_size(tuples, width);
1652  double output_bytes;
1653  double output_tuples;
1654  long sort_mem_bytes = sort_mem * 1024L;
1655 
1656  if (!enable_sort)
1657  startup_cost += disable_cost;
1658 
1659  path->rows = tuples;
1660 
1661  /*
1662  * We want to be sure the cost of a sort is never estimated as zero, even
1663  * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1664  */
1665  if (tuples < 2.0)
1666  tuples = 2.0;
1667 
1668  /* Include the default cost-per-comparison */
1669  comparison_cost += 2.0 * cpu_operator_cost;
1670 
1671  /* Do we have a useful LIMIT? */
1672  if (limit_tuples > 0 && limit_tuples < tuples)
1673  {
1674  output_tuples = limit_tuples;
1675  output_bytes = relation_byte_size(output_tuples, width);
1676  }
1677  else
1678  {
1679  output_tuples = tuples;
1680  output_bytes = input_bytes;
1681  }
1682 
1683  if (output_bytes > sort_mem_bytes)
1684  {
1685  /*
1686  * We'll have to use a disk-based sort of all the tuples
1687  */
1688  double npages = ceil(input_bytes / BLCKSZ);
1689  double nruns = input_bytes / sort_mem_bytes;
1690  double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1691  double log_runs;
1692  double npageaccesses;
1693 
1694  /*
1695  * CPU costs
1696  *
1697  * Assume about N log2 N comparisons
1698  */
1699  startup_cost += comparison_cost * tuples * LOG2(tuples);
1700 
1701  /* Disk costs */
1702 
1703  /* Compute logM(r) as log(r) / log(M) */
1704  if (nruns > mergeorder)
1705  log_runs = ceil(log(nruns) / log(mergeorder));
1706  else
1707  log_runs = 1.0;
1708  npageaccesses = 2.0 * npages * log_runs;
1709  /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1710  startup_cost += npageaccesses *
1711  (seq_page_cost * 0.75 + random_page_cost * 0.25);
1712  }
1713  else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1714  {
1715  /*
1716  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1717  * a total number of tuple comparisons of N log2 K; but the constant
1718  * factor is a bit higher than for quicksort. Tweak it so that the
1719  * cost curve is continuous at the crossover point.
1720  */
1721  startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1722  }
1723  else
1724  {
1725  /* We'll use plain quicksort on all the input tuples */
1726  startup_cost += comparison_cost * tuples * LOG2(tuples);
1727  }
1728 
1729  /*
1730  * Also charge a small amount (arbitrarily set equal to operator cost) per
1731  * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1732  * doesn't do qual-checking or projection, so it has less overhead than
1733  * most plan nodes. Note it's correct to use tuples not output_tuples
1734  * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1735  * counting the LIMIT otherwise.
1736  */
1737  run_cost += cpu_operator_cost * tuples;
1738 
1739  path->startup_cost = startup_cost;
1740  path->total_cost = startup_cost + run_cost;
1741 }
1742 
1743 /*
1744  * cost_merge_append
1745  * Determines and returns the cost of a MergeAppend node.
1746  *
1747  * MergeAppend merges several pre-sorted input streams, using a heap that
1748  * at any given instant holds the next tuple from each stream. If there
1749  * are N streams, we need about N*log2(N) tuple comparisons to construct
1750  * the heap at startup, and then for each output tuple, about log2(N)
1751  * comparisons to replace the top entry.
1752  *
1753  * (The effective value of N will drop once some of the input streams are
1754  * exhausted, but it seems unlikely to be worth trying to account for that.)
1755  *
1756  * The heap is never spilled to disk, since we assume N is not very large.
1757  * So this is much simpler than cost_sort.
1758  *
1759  * As in cost_sort, we charge two operator evals per tuple comparison.
1760  *
1761  * 'pathkeys' is a list of sort keys
1762  * 'n_streams' is the number of input streams
1763  * 'input_startup_cost' is the sum of the input streams' startup costs
1764  * 'input_total_cost' is the sum of the input streams' total costs
1765  * 'tuples' is the number of tuples in all the streams
1766  */
1767 void
1769  List *pathkeys, int n_streams,
1770  Cost input_startup_cost, Cost input_total_cost,
1771  double tuples)
1772 {
1773  Cost startup_cost = 0;
1774  Cost run_cost = 0;
1775  Cost comparison_cost;
1776  double N;
1777  double logN;
1778 
1779  /*
1780  * Avoid log(0)...
1781  */
1782  N = (n_streams < 2) ? 2.0 : (double) n_streams;
1783  logN = LOG2(N);
1784 
1785  /* Assumed cost per tuple comparison */
1786  comparison_cost = 2.0 * cpu_operator_cost;
1787 
1788  /* Heap creation cost */
1789  startup_cost += comparison_cost * N * logN;
1790 
1791  /* Per-tuple heap maintenance cost */
1792  run_cost += tuples * comparison_cost * logN;
1793 
1794  /*
1795  * Also charge a small amount (arbitrarily set equal to operator cost) per
1796  * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1797  * node doesn't do qual-checking or projection, so it has less overhead
1798  * than most plan nodes.
1799  */
1800  run_cost += cpu_operator_cost * tuples;
1801 
1802  path->startup_cost = startup_cost + input_startup_cost;
1803  path->total_cost = startup_cost + run_cost + input_total_cost;
1804 }
1805 
1806 /*
1807  * cost_material
1808  * Determines and returns the cost of materializing a relation, including
1809  * the cost of reading the input data.
1810  *
1811  * If the total volume of data to materialize exceeds work_mem, we will need
1812  * to write it to disk, so the cost is much higher in that case.
1813  *
1814  * Note that here we are estimating the costs for the first scan of the
1815  * relation, so the materialization is all overhead --- any savings will
1816  * occur only on rescan, which is estimated in cost_rescan.
1817  */
1818 void
1820  Cost input_startup_cost, Cost input_total_cost,
1821  double tuples, int width)
1822 {
1823  Cost startup_cost = input_startup_cost;
1824  Cost run_cost = input_total_cost - input_startup_cost;
1825  double nbytes = relation_byte_size(tuples, width);
1826  long work_mem_bytes = work_mem * 1024L;
1827 
1828  path->rows = tuples;
1829 
1830  /*
1831  * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1832  * reflect bookkeeping overhead. (This rate must be more than what
1833  * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1834  * if it is exactly the same then there will be a cost tie between
1835  * nestloop with A outer, materialized B inner and nestloop with B outer,
1836  * materialized A inner. The extra cost ensures we'll prefer
1837  * materializing the smaller rel.) Note that this is normally a good deal
1838  * less than cpu_tuple_cost; which is OK because a Material plan node
1839  * doesn't do qual-checking or projection, so it's got less overhead than
1840  * most plan nodes.
1841  */
1842  run_cost += 2 * cpu_operator_cost * tuples;
1843 
1844  /*
1845  * If we will spill to disk, charge at the rate of seq_page_cost per page.
1846  * This cost is assumed to be evenly spread through the plan run phase,
1847  * which isn't exactly accurate but our cost model doesn't allow for
1848  * nonuniform costs within the run phase.
1849  */
1850  if (nbytes > work_mem_bytes)
1851  {
1852  double npages = ceil(nbytes / BLCKSZ);
1853 
1854  run_cost += seq_page_cost * npages;
1855  }
1856 
1857  path->startup_cost = startup_cost;
1858  path->total_cost = startup_cost + run_cost;
1859 }
1860 
1861 /*
1862  * cost_agg
1863  * Determines and returns the cost of performing an Agg plan node,
1864  * including the cost of its input.
1865  *
1866  * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1867  * we are using a hashed Agg node just to do grouping).
1868  *
1869  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1870  * are for appropriately-sorted input.
1871  */
1872 void
1874  AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1875  int numGroupCols, double numGroups,
1876  Cost input_startup_cost, Cost input_total_cost,
1877  double input_tuples)
1878 {
1879  double output_tuples;
1880  Cost startup_cost;
1881  Cost total_cost;
1882  AggClauseCosts dummy_aggcosts;
1883 
1884  /* Use all-zero per-aggregate costs if NULL is passed */
1885  if (aggcosts == NULL)
1886  {
1887  Assert(aggstrategy == AGG_HASHED);
1888  MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1889  aggcosts = &dummy_aggcosts;
1890  }
1891 
1892  /*
1893  * The transCost.per_tuple component of aggcosts should be charged once
1894  * per input tuple, corresponding to the costs of evaluating the aggregate
1895  * transfns and their input expressions (with any startup cost of course
1896  * charged but once). The finalCost component is charged once per output
1897  * tuple, corresponding to the costs of evaluating the finalfns.
1898  *
1899  * If we are grouping, we charge an additional cpu_operator_cost per
1900  * grouping column per input tuple for grouping comparisons.
1901  *
1902  * We will produce a single output tuple if not grouping, and a tuple per
1903  * group otherwise. We charge cpu_tuple_cost for each output tuple.
1904  *
1905  * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1906  * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1907  * input path is already sorted appropriately, AGG_SORTED should be
1908  * preferred (since it has no risk of memory overflow). This will happen
1909  * as long as the computed total costs are indeed exactly equal --- but if
1910  * there's roundoff error we might do the wrong thing. So be sure that
1911  * the computations below form the same intermediate values in the same
1912  * order.
1913  */
1914  if (aggstrategy == AGG_PLAIN)
1915  {
1916  startup_cost = input_total_cost;
1917  startup_cost += aggcosts->transCost.startup;
1918  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1919  startup_cost += aggcosts->finalCost;
1920  /* we aren't grouping */
1921  total_cost = startup_cost + cpu_tuple_cost;
1922  output_tuples = 1;
1923  }
1924  else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
1925  {
1926  /* Here we are able to deliver output on-the-fly */
1927  startup_cost = input_startup_cost;
1928  total_cost = input_total_cost;
1929  if (aggstrategy == AGG_MIXED && !enable_hashagg)
1930  {
1931  startup_cost += disable_cost;
1932  total_cost += disable_cost;
1933  }
1934  /* calcs phrased this way to match HASHED case, see note above */
1935  total_cost += aggcosts->transCost.startup;
1936  total_cost += aggcosts->transCost.per_tuple * input_tuples;
1937  total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1938  total_cost += aggcosts->finalCost * numGroups;
1939  total_cost += cpu_tuple_cost * numGroups;
1940  output_tuples = numGroups;
1941  }
1942  else
1943  {
1944  /* must be AGG_HASHED */
1945  startup_cost = input_total_cost;
1946  if (!enable_hashagg)
1947  startup_cost += disable_cost;
1948  startup_cost += aggcosts->transCost.startup;
1949  startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1950  startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1951  total_cost = startup_cost;
1952  total_cost += aggcosts->finalCost * numGroups;
1953  total_cost += cpu_tuple_cost * numGroups;
1954  output_tuples = numGroups;
1955  }
1956 
1957  path->rows = output_tuples;
1958  path->startup_cost = startup_cost;
1959  path->total_cost = total_cost;
1960 }
1961 
1962 /*
1963  * cost_windowagg
1964  * Determines and returns the cost of performing a WindowAgg plan node,
1965  * including the cost of its input.
1966  *
1967  * Input is assumed already properly sorted.
1968  */
1969 void
1971  List *windowFuncs, int numPartCols, int numOrderCols,
1972  Cost input_startup_cost, Cost input_total_cost,
1973  double input_tuples)
1974 {
1975  Cost startup_cost;
1976  Cost total_cost;
1977  ListCell *lc;
1978 
1979  startup_cost = input_startup_cost;
1980  total_cost = input_total_cost;
1981 
1982  /*
1983  * Window functions are assumed to cost their stated execution cost, plus
1984  * the cost of evaluating their input expressions, per tuple. Since they
1985  * may in fact evaluate their inputs at multiple rows during each cycle,
1986  * this could be a drastic underestimate; but without a way to know how
1987  * many rows the window function will fetch, it's hard to do better. In
1988  * any case, it's a good estimate for all the built-in window functions,
1989  * so we'll just do this for now.
1990  */
1991  foreach(lc, windowFuncs)
1992  {
1993  WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
1994  Cost wfunccost;
1995  QualCost argcosts;
1996 
1997  wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1998 
1999  /* also add the input expressions' cost to per-input-row costs */
2000  cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
2001  startup_cost += argcosts.startup;
2002  wfunccost += argcosts.per_tuple;
2003 
2004  /*
2005  * Add the filter's cost to per-input-row costs. XXX We should reduce
2006  * input expression costs according to filter selectivity.
2007  */
2008  cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
2009  startup_cost += argcosts.startup;
2010  wfunccost += argcosts.per_tuple;
2011 
2012  total_cost += wfunccost * input_tuples;
2013  }
2014 
2015  /*
2016  * We also charge cpu_operator_cost per grouping column per tuple for
2017  * grouping comparisons, plus cpu_tuple_cost per tuple for general
2018  * overhead.
2019  *
2020  * XXX this neglects costs of spooling the data to disk when it overflows
2021  * work_mem. Sooner or later that should get accounted for.
2022  */
2023  total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
2024  total_cost += cpu_tuple_cost * input_tuples;
2025 
2026  path->rows = input_tuples;
2027  path->startup_cost = startup_cost;
2028  path->total_cost = total_cost;
2029 }
2030 
2031 /*
2032  * cost_group
2033  * Determines and returns the cost of performing a Group plan node,
2034  * including the cost of its input.
2035  *
2036  * Note: caller must ensure that input costs are for appropriately-sorted
2037  * input.
2038  */
2039 void
2041  int numGroupCols, double numGroups,
2042  Cost input_startup_cost, Cost input_total_cost,
2043  double input_tuples)
2044 {
2045  Cost startup_cost;
2046  Cost total_cost;
2047 
2048  startup_cost = input_startup_cost;
2049  total_cost = input_total_cost;
2050 
2051  /*
2052  * Charge one cpu_operator_cost per comparison per input tuple. We assume
2053  * all columns get compared at most of the tuples.
2054  */
2055  total_cost += cpu_operator_cost * input_tuples * numGroupCols;
2056 
2057  path->rows = numGroups;
2058  path->startup_cost = startup_cost;
2059  path->total_cost = total_cost;
2060 }
2061 
2062 /*
2063  * initial_cost_nestloop
2064  * Preliminary estimate of the cost of a nestloop join path.
2065  *
2066  * This must quickly produce lower-bound estimates of the path's startup and
2067  * total costs. If we are unable to eliminate the proposed path from
2068  * consideration using the lower bounds, final_cost_nestloop will be called
2069  * to obtain the final estimates.
2070  *
2071  * The exact division of labor between this function and final_cost_nestloop
2072  * is private to them, and represents a tradeoff between speed of the initial
2073  * estimate and getting a tight lower bound. We choose to not examine the
2074  * join quals here, since that's by far the most expensive part of the
2075  * calculations. The end result is that CPU-cost considerations must be
2076  * left for the second phase; and for SEMI/ANTI joins, we must also postpone
2077  * incorporation of the inner path's run cost.
2078  *
2079  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2080  * other data to be used by final_cost_nestloop
2081  * 'jointype' is the type of join to be performed
2082  * 'outer_path' is the outer input to the join
2083  * 'inner_path' is the inner input to the join
2084  * 'extra' contains miscellaneous information about the join
2085  */
2086 void
2088  JoinType jointype,
2089  Path *outer_path, Path *inner_path,
2090  JoinPathExtraData *extra)
2091 {
2092  Cost startup_cost = 0;
2093  Cost run_cost = 0;
2094  double outer_path_rows = outer_path->rows;
2095  Cost inner_rescan_start_cost;
2096  Cost inner_rescan_total_cost;
2097  Cost inner_run_cost;
2098  Cost inner_rescan_run_cost;
2099 
2100  /* estimate costs to rescan the inner relation */
2101  cost_rescan(root, inner_path,
2102  &inner_rescan_start_cost,
2103  &inner_rescan_total_cost);
2104 
2105  /* cost of source data */
2106 
2107  /*
2108  * NOTE: clearly, we must pay both outer and inner paths' startup_cost
2109  * before we can start returning tuples, so the join's startup cost is
2110  * their sum. We'll also pay the inner path's rescan startup cost
2111  * multiple times.
2112  */
2113  startup_cost += outer_path->startup_cost + inner_path->startup_cost;
2114  run_cost += outer_path->total_cost - outer_path->startup_cost;
2115  if (outer_path_rows > 1)
2116  run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
2117 
2118  inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
2119  inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
2120 
2121  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
2122  extra->inner_unique)
2123  {
2124  /*
2125  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2126  * executor will stop after the first match.
2127  *
2128  * Getting decent estimates requires inspection of the join quals,
2129  * which we choose to postpone to final_cost_nestloop.
2130  */
2131 
2132  /* Save private data for final_cost_nestloop */
2133  workspace->inner_run_cost = inner_run_cost;
2134  workspace->inner_rescan_run_cost = inner_rescan_run_cost;
2135  }
2136  else
2137  {
2138  /* Normal case; we'll scan whole input rel for each outer row */
2139  run_cost += inner_run_cost;
2140  if (outer_path_rows > 1)
2141  run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
2142  }
2143 
2144  /* CPU costs left for later */
2145 
2146  /* Public result fields */
2147  workspace->startup_cost = startup_cost;
2148  workspace->total_cost = startup_cost + run_cost;
2149  /* Save private data for final_cost_nestloop */
2150  workspace->run_cost = run_cost;
2151 }
2152 
2153 /*
2154  * final_cost_nestloop
2155  * Final estimate of the cost and result size of a nestloop join path.
2156  *
2157  * 'path' is already filled in except for the rows and cost fields
2158  * 'workspace' is the result from initial_cost_nestloop
2159  * 'extra' contains miscellaneous information about the join
2160  */
2161 void
2163  JoinCostWorkspace *workspace,
2164  JoinPathExtraData *extra)
2165 {
2166  Path *outer_path = path->outerjoinpath;
2167  Path *inner_path = path->innerjoinpath;
2168  double outer_path_rows = outer_path->rows;
2169  double inner_path_rows = inner_path->rows;
2170  Cost startup_cost = workspace->startup_cost;
2171  Cost run_cost = workspace->run_cost;
2172  Cost cpu_per_tuple;
2173  QualCost restrict_qual_cost;
2174  double ntuples;
2175 
2176  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2177  if (outer_path_rows <= 0 || isnan(outer_path_rows))
2178  outer_path_rows = 1;
2179  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2180  inner_path_rows = 1;
2181 
2182  /* Mark the path with the correct row estimate */
2183  if (path->path.param_info)
2184  path->path.rows = path->path.param_info->ppi_rows;
2185  else
2186  path->path.rows = path->path.parent->rows;
2187 
2188  /* For partial paths, scale row estimate. */
2189  if (path->path.parallel_workers > 0)
2190  {
2191  double parallel_divisor = get_parallel_divisor(&path->path);
2192 
2193  path->path.rows =
2194  clamp_row_est(path->path.rows / parallel_divisor);
2195  }
2196 
2197  /*
2198  * We could include disable_cost in the preliminary estimate, but that
2199  * would amount to optimizing for the case where the join method is
2200  * disabled, which doesn't seem like the way to bet.
2201  */
2202  if (!enable_nestloop)
2203  startup_cost += disable_cost;
2204 
2205  /* cost of inner-relation source data (we already dealt with outer rel) */
2206 
2207  if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI ||
2208  extra->inner_unique)
2209  {
2210  /*
2211  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2212  * executor will stop after the first match.
2213  */
2214  Cost inner_run_cost = workspace->inner_run_cost;
2215  Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2216  double outer_matched_rows;
2217  double outer_unmatched_rows;
2218  Selectivity inner_scan_frac;
2219 
2220  /*
2221  * For an outer-rel row that has at least one match, we can expect the
2222  * inner scan to stop after a fraction 1/(match_count+1) of the inner
2223  * rows, if the matches are evenly distributed. Since they probably
2224  * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2225  * that fraction. (If we used a larger fuzz factor, we'd have to
2226  * clamp inner_scan_frac to at most 1.0; but since match_count is at
2227  * least 1, no such clamp is needed now.)
2228  */
2229  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2230  outer_unmatched_rows = outer_path_rows - outer_matched_rows;
2231  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
2232 
2233  /*
2234  * Compute number of tuples processed (not number emitted!). First,
2235  * account for successfully-matched outer rows.
2236  */
2237  ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2238 
2239  /*
2240  * Now we need to estimate the actual costs of scanning the inner
2241  * relation, which may be quite a bit less than N times inner_run_cost
2242  * due to early scan stops. We consider two cases. If the inner path
2243  * is an indexscan using all the joinquals as indexquals, then an
2244  * unmatched outer row results in an indexscan returning no rows,
2245  * which is probably quite cheap. Otherwise, the executor will have
2246  * to scan the whole inner rel for an unmatched row; not so cheap.
2247  */
2248  if (has_indexed_join_quals(path))
2249  {
2250  /*
2251  * Successfully-matched outer rows will only require scanning
2252  * inner_scan_frac of the inner relation. In this case, we don't
2253  * need to charge the full inner_run_cost even when that's more
2254  * than inner_rescan_run_cost, because we can assume that none of
2255  * the inner scans ever scan the whole inner relation. So it's
2256  * okay to assume that all the inner scan executions can be
2257  * fractions of the full cost, even if materialization is reducing
2258  * the rescan cost. At this writing, it's impossible to get here
2259  * for a materialized inner scan, so inner_run_cost and
2260  * inner_rescan_run_cost will be the same anyway; but just in
2261  * case, use inner_run_cost for the first matched tuple and
2262  * inner_rescan_run_cost for additional ones.
2263  */
2264  run_cost += inner_run_cost * inner_scan_frac;
2265  if (outer_matched_rows > 1)
2266  run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2267 
2268  /*
2269  * Add the cost of inner-scan executions for unmatched outer rows.
2270  * We estimate this as the same cost as returning the first tuple
2271  * of a nonempty scan. We consider that these are all rescans,
2272  * since we used inner_run_cost once already.
2273  */
2274  run_cost += outer_unmatched_rows *
2275  inner_rescan_run_cost / inner_path_rows;
2276 
2277  /*
2278  * We won't be evaluating any quals at all for unmatched rows, so
2279  * don't add them to ntuples.
2280  */
2281  }
2282  else
2283  {
2284  /*
2285  * Here, a complicating factor is that rescans may be cheaper than
2286  * first scans. If we never scan all the way to the end of the
2287  * inner rel, it might be (depending on the plan type) that we'd
2288  * never pay the whole inner first-scan run cost. However it is
2289  * difficult to estimate whether that will happen (and it could
2290  * not happen if there are any unmatched outer rows!), so be
2291  * conservative and always charge the whole first-scan cost once.
2292  * We consider this charge to correspond to the first unmatched
2293  * outer row, unless there isn't one in our estimate, in which
2294  * case blame it on the first matched row.
2295  */
2296 
2297  /* First, count all unmatched join tuples as being processed */
2298  ntuples += outer_unmatched_rows * inner_path_rows;
2299 
2300  /* Now add the forced full scan, and decrement appropriate count */
2301  run_cost += inner_run_cost;
2302  if (outer_unmatched_rows >= 1)
2303  outer_unmatched_rows -= 1;
2304  else
2305  outer_matched_rows -= 1;
2306 
2307  /* Add inner run cost for additional outer tuples having matches */
2308  if (outer_matched_rows > 0)
2309  run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
2310 
2311  /* Add inner run cost for additional unmatched outer tuples */
2312  if (outer_unmatched_rows > 0)
2313  run_cost += outer_unmatched_rows * inner_rescan_run_cost;
2314  }
2315  }
2316  else
2317  {
2318  /* Normal-case source costs were included in preliminary estimate */
2319 
2320  /* Compute number of tuples processed (not number emitted!) */
2321  ntuples = outer_path_rows * inner_path_rows;
2322  }
2323 
2324  /* CPU costs */
2325  cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2326  startup_cost += restrict_qual_cost.startup;
2327  cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2328  run_cost += cpu_per_tuple * ntuples;
2329 
2330  /* tlist eval costs are paid per output row, not per tuple scanned */
2331  startup_cost += path->path.pathtarget->cost.startup;
2332  run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2333 
2334  path->path.startup_cost = startup_cost;
2335  path->path.total_cost = startup_cost + run_cost;
2336 }
2337 
2338 /*
2339  * initial_cost_mergejoin
2340  * Preliminary estimate of the cost of a mergejoin path.
2341  *
2342  * This must quickly produce lower-bound estimates of the path's startup and
2343  * total costs. If we are unable to eliminate the proposed path from
2344  * consideration using the lower bounds, final_cost_mergejoin will be called
2345  * to obtain the final estimates.
2346  *
2347  * The exact division of labor between this function and final_cost_mergejoin
2348  * is private to them, and represents a tradeoff between speed of the initial
2349  * estimate and getting a tight lower bound. We choose to not examine the
2350  * join quals here, except for obtaining the scan selectivity estimate which
2351  * is really essential (but fortunately, use of caching keeps the cost of
2352  * getting that down to something reasonable).
2353  * We also assume that cost_sort is cheap enough to use here.
2354  *
2355  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2356  * other data to be used by final_cost_mergejoin
2357  * 'jointype' is the type of join to be performed
2358  * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2359  * 'outer_path' is the outer input to the join
2360  * 'inner_path' is the inner input to the join
2361  * 'outersortkeys' is the list of sort keys for the outer path
2362  * 'innersortkeys' is the list of sort keys for the inner path
2363  * 'extra' contains miscellaneous information about the join
2364  *
2365  * Note: outersortkeys and innersortkeys should be NIL if no explicit
2366  * sort is needed because the respective source path is already ordered.
2367  */
2368 void
2370  JoinType jointype,
2371  List *mergeclauses,
2372  Path *outer_path, Path *inner_path,
2373  List *outersortkeys, List *innersortkeys,
2374  JoinPathExtraData *extra)
2375 {
2376  Cost startup_cost = 0;
2377  Cost run_cost = 0;
2378  double outer_path_rows = outer_path->rows;
2379  double inner_path_rows = inner_path->rows;
2380  Cost inner_run_cost;
2381  double outer_rows,
2382  inner_rows,
2383  outer_skip_rows,
2384  inner_skip_rows;
2385  Selectivity outerstartsel,
2386  outerendsel,
2387  innerstartsel,
2388  innerendsel;
2389  Path sort_path; /* dummy for result of cost_sort */
2390 
2391  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2392  if (outer_path_rows <= 0 || isnan(outer_path_rows))
2393  outer_path_rows = 1;
2394  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2395  inner_path_rows = 1;
2396 
2397  /*
2398  * A merge join will stop as soon as it exhausts either input stream
2399  * (unless it's an outer join, in which case the outer side has to be
2400  * scanned all the way anyway). Estimate fraction of the left and right
2401  * inputs that will actually need to be scanned. Likewise, we can
2402  * estimate the number of rows that will be skipped before the first join
2403  * pair is found, which should be factored into startup cost. We use only
2404  * the first (most significant) merge clause for this purpose. Since
2405  * mergejoinscansel() is a fairly expensive computation, we cache the
2406  * results in the merge clause RestrictInfo.
2407  */
2408  if (mergeclauses && jointype != JOIN_FULL)
2409  {
2410  RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2411  List *opathkeys;
2412  List *ipathkeys;
2413  PathKey *opathkey;
2414  PathKey *ipathkey;
2415  MergeScanSelCache *cache;
2416 
2417  /* Get the input pathkeys to determine the sort-order details */
2418  opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2419  ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2420  Assert(opathkeys);
2421  Assert(ipathkeys);
2422  opathkey = (PathKey *) linitial(opathkeys);
2423  ipathkey = (PathKey *) linitial(ipathkeys);
2424  /* debugging check */
2425  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2426  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2427  opathkey->pk_strategy != ipathkey->pk_strategy ||
2428  opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2429  elog(ERROR, "left and right pathkeys do not match in mergejoin");
2430 
2431  /* Get the selectivity with caching */
2432  cache = cached_scansel(root, firstclause, opathkey);
2433 
2434  if (bms_is_subset(firstclause->left_relids,
2435  outer_path->parent->relids))
2436  {
2437  /* left side of clause is outer */
2438  outerstartsel = cache->leftstartsel;
2439  outerendsel = cache->leftendsel;
2440  innerstartsel = cache->rightstartsel;
2441  innerendsel = cache->rightendsel;
2442  }
2443  else
2444  {
2445  /* left side of clause is inner */
2446  outerstartsel = cache->rightstartsel;
2447  outerendsel = cache->rightendsel;
2448  innerstartsel = cache->leftstartsel;
2449  innerendsel = cache->leftendsel;
2450  }
2451  if (jointype == JOIN_LEFT ||
2452  jointype == JOIN_ANTI)
2453  {
2454  outerstartsel = 0.0;
2455  outerendsel = 1.0;
2456  }
2457  else if (jointype == JOIN_RIGHT)
2458  {
2459  innerstartsel = 0.0;
2460  innerendsel = 1.0;
2461  }
2462  }
2463  else
2464  {
2465  /* cope with clauseless or full mergejoin */
2466  outerstartsel = innerstartsel = 0.0;
2467  outerendsel = innerendsel = 1.0;
2468  }
2469 
2470  /*
2471  * Convert selectivities to row counts. We force outer_rows and
2472  * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2473  */
2474  outer_skip_rows = rint(outer_path_rows * outerstartsel);
2475  inner_skip_rows = rint(inner_path_rows * innerstartsel);
2476  outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2477  inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2478 
2479  Assert(outer_skip_rows <= outer_rows);
2480  Assert(inner_skip_rows <= inner_rows);
2481 
2482  /*
2483  * Readjust scan selectivities to account for above rounding. This is
2484  * normally an insignificant effect, but when there are only a few rows in
2485  * the inputs, failing to do this makes for a large percentage error.
2486  */
2487  outerstartsel = outer_skip_rows / outer_path_rows;
2488  innerstartsel = inner_skip_rows / inner_path_rows;
2489  outerendsel = outer_rows / outer_path_rows;
2490  innerendsel = inner_rows / inner_path_rows;
2491 
2492  Assert(outerstartsel <= outerendsel);
2493  Assert(innerstartsel <= innerendsel);
2494 
2495  /* cost of source data */
2496 
2497  if (outersortkeys) /* do we need to sort outer? */
2498  {
2499  cost_sort(&sort_path,
2500  root,
2501  outersortkeys,
2502  outer_path->total_cost,
2503  outer_path_rows,
2504  outer_path->pathtarget->width,
2505  0.0,
2506  work_mem,
2507  -1.0);
2508  startup_cost += sort_path.startup_cost;
2509  startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2510  * outerstartsel;
2511  run_cost += (sort_path.total_cost - sort_path.startup_cost)
2512  * (outerendsel - outerstartsel);
2513  }
2514  else
2515  {
2516  startup_cost += outer_path->startup_cost;
2517  startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2518  * outerstartsel;
2519  run_cost += (outer_path->total_cost - outer_path->startup_cost)
2520  * (outerendsel - outerstartsel);
2521  }
2522 
2523  if (innersortkeys) /* do we need to sort inner? */
2524  {
2525  cost_sort(&sort_path,
2526  root,
2527  innersortkeys,
2528  inner_path->total_cost,
2529  inner_path_rows,
2530  inner_path->pathtarget->width,
2531  0.0,
2532  work_mem,
2533  -1.0);
2534  startup_cost += sort_path.startup_cost;
2535  startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2536  * innerstartsel;
2537  inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2538  * (innerendsel - innerstartsel);
2539  }
2540  else
2541  {
2542  startup_cost += inner_path->startup_cost;
2543  startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2544  * innerstartsel;
2545  inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2546  * (innerendsel - innerstartsel);
2547  }
2548 
2549  /*
2550  * We can't yet determine whether rescanning occurs, or whether
2551  * materialization of the inner input should be done. The minimum
2552  * possible inner input cost, regardless of rescan and materialization
2553  * considerations, is inner_run_cost. We include that in
2554  * workspace->total_cost, but not yet in run_cost.
2555  */
2556 
2557  /* CPU costs left for later */
2558 
2559  /* Public result fields */
2560  workspace->startup_cost = startup_cost;
2561  workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2562  /* Save private data for final_cost_mergejoin */
2563  workspace->run_cost = run_cost;
2564  workspace->inner_run_cost = inner_run_cost;
2565  workspace->outer_rows = outer_rows;
2566  workspace->inner_rows = inner_rows;
2567  workspace->outer_skip_rows = outer_skip_rows;
2568  workspace->inner_skip_rows = inner_skip_rows;
2569 }
2570 
2571 /*
2572  * final_cost_mergejoin
2573  * Final estimate of the cost and result size of a mergejoin path.
2574  *
2575  * Unlike other costsize functions, this routine makes two actual decisions:
2576  * whether the executor will need to do mark/restore, and whether we should
2577  * materialize the inner path. It would be logically cleaner to build
2578  * separate paths testing these alternatives, but that would require repeating
2579  * most of the cost calculations, which are not all that cheap. Since the
2580  * choice will not affect output pathkeys or startup cost, only total cost,
2581  * there is no possibility of wanting to keep more than one path. So it seems
2582  * best to make the decisions here and record them in the path's
2583  * skip_mark_restore and materialize_inner fields.
2584  *
2585  * Mark/restore overhead is usually required, but can be skipped if we know
2586  * that the executor need find only one match per outer tuple, and that the
2587  * mergeclauses are sufficient to identify a match.
2588  *
2589  * We materialize the inner path if we need mark/restore and either the inner
2590  * path can't support mark/restore, or it's cheaper to use an interposed
2591  * Material node to handle mark/restore.
2592  *
2593  * 'path' is already filled in except for the rows and cost fields and
2594  * skip_mark_restore and materialize_inner
2595  * 'workspace' is the result from initial_cost_mergejoin
2596  * 'extra' contains miscellaneous information about the join
2597  */
2598 void
2600  JoinCostWorkspace *workspace,
2601  JoinPathExtraData *extra)
2602 {
2603  Path *outer_path = path->jpath.outerjoinpath;
2604  Path *inner_path = path->jpath.innerjoinpath;
2605  double inner_path_rows = inner_path->rows;
2606  List *mergeclauses = path->path_mergeclauses;
2607  List *innersortkeys = path->innersortkeys;
2608  Cost startup_cost = workspace->startup_cost;
2609  Cost run_cost = workspace->run_cost;
2610  Cost inner_run_cost = workspace->inner_run_cost;
2611  double outer_rows = workspace->outer_rows;
2612  double inner_rows = workspace->inner_rows;
2613  double outer_skip_rows = workspace->outer_skip_rows;
2614  double inner_skip_rows = workspace->inner_skip_rows;
2615  Cost cpu_per_tuple,
2616  bare_inner_cost,
2617  mat_inner_cost;
2618  QualCost merge_qual_cost;
2619  QualCost qp_qual_cost;
2620  double mergejointuples,
2621  rescannedtuples;
2622  double rescanratio;
2623 
2624  /* Protect some assumptions below that rowcounts aren't zero or NaN */
2625  if (inner_path_rows <= 0 || isnan(inner_path_rows))
2626  inner_path_rows = 1;
2627 
2628  /* Mark the path with the correct row estimate */
2629  if (path->jpath.path.param_info)
2630  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2631  else
2632  path->jpath.path.rows = path->jpath.path.parent->rows;
2633 
2634  /* For partial paths, scale row estimate. */
2635  if (path->jpath.path.parallel_workers > 0)
2636  {
2637  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2638 
2639  path->jpath.path.rows =
2640  clamp_row_est(path->jpath.path.rows / parallel_divisor);
2641  }
2642 
2643  /*
2644  * We could include disable_cost in the preliminary estimate, but that
2645  * would amount to optimizing for the case where the join method is
2646  * disabled, which doesn't seem like the way to bet.
2647  */
2648  if (!enable_mergejoin)
2649  startup_cost += disable_cost;
2650 
2651  /*
2652  * Compute cost of the mergequals and qpquals (other restriction clauses)
2653  * separately.
2654  */
2655  cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2656  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2657  qp_qual_cost.startup -= merge_qual_cost.startup;
2658  qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2659 
2660  /*
2661  * With a SEMI or ANTI join, or if the innerrel is known unique, the
2662  * executor will stop scanning for matches after the first match. When
2663  * all the joinclauses are merge clauses, this means we don't ever need to
2664  * back up the merge, and so we can skip mark/restore overhead.
2665  */
2666  if ((path->jpath.jointype == JOIN_SEMI ||
2667  path->jpath.jointype == JOIN_ANTI ||
2668  extra->inner_unique) &&
2671  path->skip_mark_restore = true;
2672  else
2673  path->skip_mark_restore = false;
2674 
2675  /*
2676  * Get approx # tuples passing the mergequals. We use approx_tuple_count
2677  * here because we need an estimate done with JOIN_INNER semantics.
2678  */
2679  mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2680 
2681  /*
2682  * When there are equal merge keys in the outer relation, the mergejoin
2683  * must rescan any matching tuples in the inner relation. This means
2684  * re-fetching inner tuples; we have to estimate how often that happens.
2685  *
2686  * For regular inner and outer joins, the number of re-fetches can be
2687  * estimated approximately as size of merge join output minus size of
2688  * inner relation. Assume that the distinct key values are 1, 2, ..., and
2689  * denote the number of values of each key in the outer relation as m1,
2690  * m2, ...; in the inner relation, n1, n2, ... Then we have
2691  *
2692  * size of join = m1 * n1 + m2 * n2 + ...
2693  *
2694  * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2695  * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2696  * relation
2697  *
2698  * This equation works correctly for outer tuples having no inner match
2699  * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2700  * are effectively subtracting those from the number of rescanned tuples,
2701  * when we should not. Can we do better without expensive selectivity
2702  * computations?
2703  *
2704  * The whole issue is moot if we are working from a unique-ified outer
2705  * input, or if we know we don't need to mark/restore at all.
2706  */
2707  if (IsA(outer_path, UniquePath) ||path->skip_mark_restore)
2708  rescannedtuples = 0;
2709  else
2710  {
2711  rescannedtuples = mergejointuples - inner_path_rows;
2712  /* Must clamp because of possible underestimate */
2713  if (rescannedtuples < 0)
2714  rescannedtuples = 0;
2715  }
2716  /* We'll inflate various costs this much to account for rescanning */
2717  rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2718 
2719  /*
2720  * Decide whether we want to materialize the inner input to shield it from
2721  * mark/restore and performing re-fetches. Our cost model for regular
2722  * re-fetches is that a re-fetch costs the same as an original fetch,
2723  * which is probably an overestimate; but on the other hand we ignore the
2724  * bookkeeping costs of mark/restore. Not clear if it's worth developing
2725  * a more refined model. So we just need to inflate the inner run cost by
2726  * rescanratio.
2727  */
2728  bare_inner_cost = inner_run_cost * rescanratio;
2729 
2730  /*
2731  * When we interpose a Material node the re-fetch cost is assumed to be
2732  * just cpu_operator_cost per tuple, independently of the underlying
2733  * plan's cost; and we charge an extra cpu_operator_cost per original
2734  * fetch as well. Note that we're assuming the materialize node will
2735  * never spill to disk, since it only has to remember tuples back to the
2736  * last mark. (If there are a huge number of duplicates, our other cost
2737  * factors will make the path so expensive that it probably won't get
2738  * chosen anyway.) So we don't use cost_rescan here.
2739  *
2740  * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2741  * of the generated Material node.
2742  */
2743  mat_inner_cost = inner_run_cost +
2744  cpu_operator_cost * inner_path_rows * rescanratio;
2745 
2746  /*
2747  * If we don't need mark/restore at all, we don't need materialization.
2748  */
2749  if (path->skip_mark_restore)
2750  path->materialize_inner = false;
2751 
2752  /*
2753  * Prefer materializing if it looks cheaper, unless the user has asked to
2754  * suppress materialization.
2755  */
2756  else if (enable_material && mat_inner_cost < bare_inner_cost)
2757  path->materialize_inner = true;
2758 
2759  /*
2760  * Even if materializing doesn't look cheaper, we *must* do it if the
2761  * inner path is to be used directly (without sorting) and it doesn't
2762  * support mark/restore.
2763  *
2764  * Since the inner side must be ordered, and only Sorts and IndexScans can
2765  * create order to begin with, and they both support mark/restore, you
2766  * might think there's no problem --- but you'd be wrong. Nestloop and
2767  * merge joins can *preserve* the order of their inputs, so they can be
2768  * selected as the input of a mergejoin, and they don't support
2769  * mark/restore at present.
2770  *
2771  * We don't test the value of enable_material here, because
2772  * materialization is required for correctness in this case, and turning
2773  * it off does not entitle us to deliver an invalid plan.
2774  */
2775  else if (innersortkeys == NIL &&
2776  !ExecSupportsMarkRestore(inner_path))
2777  path->materialize_inner = true;
2778 
2779  /*
2780  * Also, force materializing if the inner path is to be sorted and the
2781  * sort is expected to spill to disk. This is because the final merge
2782  * pass can be done on-the-fly if it doesn't have to support mark/restore.
2783  * We don't try to adjust the cost estimates for this consideration,
2784  * though.
2785  *
2786  * Since materialization is a performance optimization in this case,
2787  * rather than necessary for correctness, we skip it if enable_material is
2788  * off.
2789  */
2790  else if (enable_material && innersortkeys != NIL &&
2791  relation_byte_size(inner_path_rows,
2792  inner_path->pathtarget->width) >
2793  (work_mem * 1024L))
2794  path->materialize_inner = true;
2795  else
2796  path->materialize_inner = false;
2797 
2798  /* Charge the right incremental cost for the chosen case */
2799  if (path->materialize_inner)
2800  run_cost += mat_inner_cost;
2801  else
2802  run_cost += bare_inner_cost;
2803 
2804  /* CPU costs */
2805 
2806  /*
2807  * The number of tuple comparisons needed is approximately number of outer
2808  * rows plus number of inner rows plus number of rescanned tuples (can we
2809  * refine this?). At each one, we need to evaluate the mergejoin quals.
2810  */
2811  startup_cost += merge_qual_cost.startup;
2812  startup_cost += merge_qual_cost.per_tuple *
2813  (outer_skip_rows + inner_skip_rows * rescanratio);
2814  run_cost += merge_qual_cost.per_tuple *
2815  ((outer_rows - outer_skip_rows) +
2816  (inner_rows - inner_skip_rows) * rescanratio);
2817 
2818  /*
2819  * For each tuple that gets through the mergejoin proper, we charge
2820  * cpu_tuple_cost plus the cost of evaluating additional restriction
2821  * clauses that are to be applied at the join. (This is pessimistic since
2822  * not all of the quals may get evaluated at each tuple.)
2823  *
2824  * Note: we could adjust for SEMI/ANTI joins skipping some qual
2825  * evaluations here, but it's probably not worth the trouble.
2826  */
2827  startup_cost += qp_qual_cost.startup;
2828  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2829  run_cost += cpu_per_tuple * mergejointuples;
2830 
2831  /* tlist eval costs are paid per output row, not per tuple scanned */
2832  startup_cost += path->jpath.path.pathtarget->cost.startup;
2833  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2834 
2835  path->jpath.path.startup_cost = startup_cost;
2836  path->jpath.path.total_cost = startup_cost + run_cost;
2837 }
2838 
2839 /*
2840  * run mergejoinscansel() with caching
2841  */
2842 static MergeScanSelCache *
2844 {
2845  MergeScanSelCache *cache;
2846  ListCell *lc;
2847  Selectivity leftstartsel,
2848  leftendsel,
2849  rightstartsel,
2850  rightendsel;
2851  MemoryContext oldcontext;
2852 
2853  /* Do we have this result already? */
2854  foreach(lc, rinfo->scansel_cache)
2855  {
2856  cache = (MergeScanSelCache *) lfirst(lc);
2857  if (cache->opfamily == pathkey->pk_opfamily &&
2858  cache->collation == pathkey->pk_eclass->ec_collation &&
2859  cache->strategy == pathkey->pk_strategy &&
2860  cache->nulls_first == pathkey->pk_nulls_first)
2861  return cache;
2862  }
2863 
2864  /* Nope, do the computation */
2865  mergejoinscansel(root,
2866  (Node *) rinfo->clause,
2867  pathkey->pk_opfamily,
2868  pathkey->pk_strategy,
2869  pathkey->pk_nulls_first,
2870  &leftstartsel,
2871  &leftendsel,
2872  &rightstartsel,
2873  &rightendsel);
2874 
2875  /* Cache the result in suitably long-lived workspace */
2876  oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2877 
2878  cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2879  cache->opfamily = pathkey->pk_opfamily;
2880  cache->collation = pathkey->pk_eclass->ec_collation;
2881  cache->strategy = pathkey->pk_strategy;
2882  cache->nulls_first = pathkey->pk_nulls_first;
2883  cache->leftstartsel = leftstartsel;
2884  cache->leftendsel = leftendsel;
2885  cache->rightstartsel = rightstartsel;
2886  cache->rightendsel = rightendsel;
2887 
2888  rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2889 
2890  MemoryContextSwitchTo(oldcontext);
2891 
2892  return cache;
2893 }
2894 
2895 /*
2896  * initial_cost_hashjoin
2897  * Preliminary estimate of the cost of a hashjoin path.
2898  *
2899  * This must quickly produce lower-bound estimates of the path's startup and
2900  * total costs. If we are unable to eliminate the proposed path from
2901  * consideration using the lower bounds, final_cost_hashjoin will be called
2902  * to obtain the final estimates.
2903  *
2904  * The exact division of labor between this function and final_cost_hashjoin
2905  * is private to them, and represents a tradeoff between speed of the initial
2906  * estimate and getting a tight lower bound. We choose to not examine the
2907  * join quals here (other than by counting the number of hash clauses),
2908  * so we can't do much with CPU costs. We do assume that
2909  * ExecChooseHashTableSize is cheap enough to use here.
2910  *
2911  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2912  * other data to be used by final_cost_hashjoin
2913  * 'jointype' is the type of join to be performed
2914  * 'hashclauses' is the list of joinclauses to be used as hash clauses
2915  * 'outer_path' is the outer input to the join
2916  * 'inner_path' is the inner input to the join
2917  * 'extra' contains miscellaneous information about the join
2918  */
2919 void
2921  JoinType jointype,
2922  List *hashclauses,
2923  Path *outer_path, Path *inner_path,
2924  JoinPathExtraData *extra)
2925 {
2926  Cost startup_cost = 0;
2927  Cost run_cost = 0;
2928  double outer_path_rows = outer_path->rows;
2929  double inner_path_rows = inner_path->rows;
2930  int num_hashclauses = list_length(hashclauses);
2931  int numbuckets;
2932  int numbatches;
2933  int num_skew_mcvs;
2934 
2935  /* cost of source data */
2936  startup_cost += outer_path->startup_cost;
2937  run_cost += outer_path->total_cost - outer_path->startup_cost;
2938  startup_cost += inner_path->total_cost;
2939 
2940  /*
2941  * Cost of computing hash function: must do it once per input tuple. We
2942  * charge one cpu_operator_cost for each column's hash function. Also,
2943  * tack on one cpu_tuple_cost per inner row, to model the costs of
2944  * inserting the row into the hashtable.
2945  *
2946  * XXX when a hashclause is more complex than a single operator, we really
2947  * should charge the extra eval costs of the left or right side, as
2948  * appropriate, here. This seems more work than it's worth at the moment.
2949  */
2950  startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2951  * inner_path_rows;
2952  run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2953 
2954  /*
2955  * Get hash table size that executor would use for inner relation.
2956  *
2957  * XXX for the moment, always assume that skew optimization will be
2958  * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2959  * trying to determine that for sure.
2960  *
2961  * XXX at some point it might be interesting to try to account for skew
2962  * optimization in the cost estimate, but for now, we don't.
2963  */
2964  ExecChooseHashTableSize(inner_path_rows,
2965  inner_path->pathtarget->width,
2966  true, /* useskew */
2967  &numbuckets,
2968  &numbatches,
2969  &num_skew_mcvs);
2970 
2971  /*
2972  * If inner relation is too big then we will need to "batch" the join,
2973  * which implies writing and reading most of the tuples to disk an extra
2974  * time. Charge seq_page_cost per page, since the I/O should be nice and
2975  * sequential. Writing the inner rel counts as startup cost, all the rest
2976  * as run cost.
2977  */
2978  if (numbatches > 1)
2979  {
2980  double outerpages = page_size(outer_path_rows,
2981  outer_path->pathtarget->width);
2982  double innerpages = page_size(inner_path_rows,
2983  inner_path->pathtarget->width);
2984 
2985  startup_cost += seq_page_cost * innerpages;
2986  run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2987  }
2988 
2989  /* CPU costs left for later */
2990 
2991  /* Public result fields */
2992  workspace->startup_cost = startup_cost;
2993  workspace->total_cost = startup_cost + run_cost;
2994  /* Save private data for final_cost_hashjoin */
2995  workspace->run_cost = run_cost;
2996  workspace->numbuckets = numbuckets;
2997  workspace->numbatches = numbatches;
2998 }
2999 
3000 /*
3001  * final_cost_hashjoin
3002  * Final estimate of the cost and result size of a hashjoin path.
3003  *
3004  * Note: the numbatches estimate is also saved into 'path' for use later
3005  *
3006  * 'path' is already filled in except for the rows and cost fields and
3007  * num_batches
3008  * 'workspace' is the result from initial_cost_hashjoin
3009  * 'extra' contains miscellaneous information about the join
3010  */
3011 void
3013  JoinCostWorkspace *workspace,
3014  JoinPathExtraData *extra)
3015 {
3016  Path *outer_path = path->jpath.outerjoinpath;
3017  Path *inner_path = path->jpath.innerjoinpath;
3018  double outer_path_rows = outer_path->rows;
3019  double inner_path_rows = inner_path->rows;
3020  List *hashclauses = path->path_hashclauses;
3021  Cost startup_cost = workspace->startup_cost;
3022  Cost run_cost = workspace->run_cost;
3023  int numbuckets = workspace->numbuckets;
3024  int numbatches = workspace->numbatches;
3025  Cost cpu_per_tuple;
3026  QualCost hash_qual_cost;
3027  QualCost qp_qual_cost;
3028  double hashjointuples;
3029  double virtualbuckets;
3030  Selectivity innerbucketsize;
3031  Selectivity innermcvfreq;
3032  ListCell *hcl;
3033 
3034  /* Mark the path with the correct row estimate */
3035  if (path->jpath.path.param_info)
3036  path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3037  else
3038  path->jpath.path.rows = path->jpath.path.parent->rows;
3039 
3040  /* For partial paths, scale row estimate. */
3041  if (path->jpath.path.parallel_workers > 0)
3042  {
3043  double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3044 
3045  path->jpath.path.rows =
3046  clamp_row_est(path->jpath.path.rows / parallel_divisor);
3047  }
3048 
3049  /*
3050  * We could include disable_cost in the preliminary estimate, but that
3051  * would amount to optimizing for the case where the join method is
3052  * disabled, which doesn't seem like the way to bet.
3053  */
3054  if (!enable_hashjoin)
3055  startup_cost += disable_cost;
3056 
3057  /* mark the path with estimated # of batches */
3058  path->num_batches = numbatches;
3059 
3060  /* and compute the number of "virtual" buckets in the whole join */
3061  virtualbuckets = (double) numbuckets * (double) numbatches;
3062 
3063  /*
3064  * Determine bucketsize fraction and MCV frequency for the inner relation.
3065  * We use the smallest bucketsize or MCV frequency estimated for any
3066  * individual hashclause; this is undoubtedly conservative.
3067  *
3068  * BUT: if inner relation has been unique-ified, we can assume it's good
3069  * for hashing. This is important both because it's the right answer, and
3070  * because we avoid contaminating the cache with a value that's wrong for
3071  * non-unique-ified paths.
3072  */
3073  if (IsA(inner_path, UniquePath))
3074  {
3075  innerbucketsize = 1.0 / virtualbuckets;
3076  innermcvfreq = 0.0;
3077  }
3078  else
3079  {
3080  innerbucketsize = 1.0;
3081  innermcvfreq = 1.0;
3082  foreach(hcl, hashclauses)
3083  {
3084  RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
3085  Selectivity thisbucketsize;
3086  Selectivity thismcvfreq;
3087 
3088  /*
3089  * First we have to figure out which side of the hashjoin clause
3090  * is the inner side.
3091  *
3092  * Since we tend to visit the same clauses over and over when
3093  * planning a large query, we cache the bucket stats estimates in
3094  * the RestrictInfo node to avoid repeated lookups of statistics.
3095  */
3096  if (bms_is_subset(restrictinfo->right_relids,
3097  inner_path->parent->relids))
3098  {
3099  /* righthand side is inner */
3100  thisbucketsize = restrictinfo->right_bucketsize;
3101  if (thisbucketsize < 0)
3102  {
3103  /* not cached yet */
3105  get_rightop(restrictinfo->clause),
3106  virtualbuckets,
3107  &restrictinfo->right_mcvfreq,
3108  &restrictinfo->right_bucketsize);
3109  thisbucketsize = restrictinfo->right_bucketsize;
3110  }
3111  thismcvfreq = restrictinfo->right_mcvfreq;
3112  }
3113  else
3114  {
3115  Assert(bms_is_subset(restrictinfo->left_relids,
3116  inner_path->parent->relids));
3117  /* lefthand side is inner */
3118  thisbucketsize = restrictinfo->left_bucketsize;
3119  if (thisbucketsize < 0)
3120  {
3121  /* not cached yet */
3123  get_leftop(restrictinfo->clause),
3124  virtualbuckets,
3125  &restrictinfo->left_mcvfreq,
3126  &restrictinfo->left_bucketsize);
3127  thisbucketsize = restrictinfo->left_bucketsize;
3128  }
3129  thismcvfreq = restrictinfo->left_mcvfreq;
3130  }
3131 
3132  if (innerbucketsize > thisbucketsize)
3133  innerbucketsize = thisbucketsize;
3134  if (innermcvfreq > thismcvfreq)
3135  innermcvfreq = thismcvfreq;
3136  }
3137  }
3138 
3139  /*
3140  * If the bucket holding the inner MCV would exceed work_mem, we don't
3141  * want to hash unless there is really no other alternative, so apply
3142  * disable_cost. (The executor normally copes with excessive memory usage
3143  * by splitting batches, but obviously it cannot separate equal values
3144  * that way, so it will be unable to drive the batch size below work_mem
3145  * when this is true.)
3146  */
3147  if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
3148  inner_path->pathtarget->width) >
3149  (work_mem * 1024L))
3150  startup_cost += disable_cost;
3151 
3152  /*
3153  * Compute cost of the hashquals and qpquals (other restriction clauses)
3154  * separately.
3155  */
3156  cost_qual_eval(&hash_qual_cost, hashclauses, root);
3157  cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3158  qp_qual_cost.startup -= hash_qual_cost.startup;
3159  qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
3160 
3161  /* CPU costs */
3162 
3163  if (path->jpath.jointype == JOIN_SEMI ||
3164  path->jpath.jointype == JOIN_ANTI ||
3165  extra->inner_unique)
3166  {
3167  double outer_matched_rows;
3168  Selectivity inner_scan_frac;
3169 
3170  /*
3171  * With a SEMI or ANTI join, or if the innerrel is known unique, the
3172  * executor will stop after the first match.
3173  *
3174  * For an outer-rel row that has at least one match, we can expect the
3175  * bucket scan to stop after a fraction 1/(match_count+1) of the
3176  * bucket's rows, if the matches are evenly distributed. Since they
3177  * probably aren't quite evenly distributed, we apply a fuzz factor of
3178  * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
3179  * to clamp inner_scan_frac to at most 1.0; but since match_count is
3180  * at least 1, no such clamp is needed now.)
3181  */
3182  outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3183  inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3184 
3185  startup_cost += hash_qual_cost.startup;
3186  run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
3187  clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
3188 
3189  /*
3190  * For unmatched outer-rel rows, the picture is quite a lot different.
3191  * In the first place, there is no reason to assume that these rows
3192  * preferentially hit heavily-populated buckets; instead assume they
3193  * are uncorrelated with the inner distribution and so they see an
3194  * average bucket size of inner_path_rows / virtualbuckets. In the
3195  * second place, it seems likely that they will have few if any exact
3196  * hash-code matches and so very few of the tuples in the bucket will
3197  * actually require eval of the hash quals. We don't have any good
3198  * way to estimate how many will, but for the moment assume that the
3199  * effective cost per bucket entry is one-tenth what it is for
3200  * matchable tuples.
3201  */
3202  run_cost += hash_qual_cost.per_tuple *
3203  (outer_path_rows - outer_matched_rows) *
3204  clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
3205 
3206  /* Get # of tuples that will pass the basic join */
3207  if (path->jpath.jointype == JOIN_SEMI)
3208  hashjointuples = outer_matched_rows;
3209  else
3210  hashjointuples = outer_path_rows - outer_matched_rows;
3211  }
3212  else
3213  {
3214  /*
3215  * The number of tuple comparisons needed is the number of outer
3216  * tuples times the typical number of tuples in a hash bucket, which
3217  * is the inner relation size times its bucketsize fraction. At each
3218  * one, we need to evaluate the hashjoin quals. But actually,
3219  * charging the full qual eval cost at each tuple is pessimistic,
3220  * since we don't evaluate the quals unless the hash values match
3221  * exactly. For lack of a better idea, halve the cost estimate to
3222  * allow for that.
3223  */
3224  startup_cost += hash_qual_cost.startup;
3225  run_cost += hash_qual_cost.per_tuple * outer_path_rows *
3226  clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
3227 
3228  /*
3229  * Get approx # tuples passing the hashquals. We use
3230  * approx_tuple_count here because we need an estimate done with
3231  * JOIN_INNER semantics.
3232  */
3233  hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
3234  }
3235 
3236  /*
3237  * For each tuple that gets through the hashjoin proper, we charge
3238  * cpu_tuple_cost plus the cost of evaluating additional restriction
3239  * clauses that are to be applied at the join. (This is pessimistic since
3240  * not all of the quals may get evaluated at each tuple.)
3241  */
3242  startup_cost += qp_qual_cost.startup;
3243  cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3244  run_cost += cpu_per_tuple * hashjointuples;
3245 
3246  /* tlist eval costs are paid per output row, not per tuple scanned */
3247  startup_cost += path->jpath.path.pathtarget->cost.startup;
3248  run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3249 
3250  path->jpath.path.startup_cost = startup_cost;
3251  path->jpath.path.total_cost = startup_cost + run_cost;
3252 }
3253 
3254 
3255 /*
3256  * cost_subplan
3257  * Figure the costs for a SubPlan (or initplan).
3258  *
3259  * Note: we could dig the subplan's Plan out of the root list, but in practice
3260  * all callers have it handy already, so we make them pass it.
3261  */
3262 void
3263 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
3264 {
3265  QualCost sp_cost;
3266 
3267  /* Figure any cost for evaluating the testexpr */
3268  cost_qual_eval(&sp_cost,
3269  make_ands_implicit((Expr *) subplan->testexpr),
3270  root);
3271 
3272  if (subplan->useHashTable)
3273  {
3274  /*
3275  * If we are using a hash table for the subquery outputs, then the
3276  * cost of evaluating the query is a one-time cost. We charge one
3277  * cpu_operator_cost per tuple for the work of loading the hashtable,
3278  * too.
3279  */
3280  sp_cost.startup += plan->total_cost +
3281  cpu_operator_cost * plan->plan_rows;
3282 
3283  /*
3284  * The per-tuple costs include the cost of evaluating the lefthand
3285  * expressions, plus the cost of probing the hashtable. We already
3286  * accounted for the lefthand expressions as part of the testexpr, and
3287  * will also have counted one cpu_operator_cost for each comparison
3288  * operator. That is probably too low for the probing cost, but it's
3289  * hard to make a better estimate, so live with it for now.
3290  */
3291  }
3292  else
3293  {
3294  /*
3295  * Otherwise we will be rescanning the subplan output on each
3296  * evaluation. We need to estimate how much of the output we will
3297  * actually need to scan. NOTE: this logic should agree with the
3298  * tuple_fraction estimates used by make_subplan() in
3299  * plan/subselect.c.
3300  */
3301  Cost plan_run_cost = plan->total_cost - plan->startup_cost;
3302 
3303  if (subplan->subLinkType == EXISTS_SUBLINK)
3304  {
3305  /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3306  sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3307  }
3308  else if (subplan->subLinkType == ALL_SUBLINK ||
3309  subplan->subLinkType == ANY_SUBLINK)
3310  {
3311  /* assume we need 50% of the tuples */
3312  sp_cost.per_tuple += 0.50 * plan_run_cost;
3313  /* also charge a cpu_operator_cost per row examined */
3314  sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3315  }
3316  else
3317  {
3318  /* assume we need all tuples */
3319  sp_cost.per_tuple += plan_run_cost;
3320  }
3321 
3322  /*
3323  * Also account for subplan's startup cost. If the subplan is
3324  * uncorrelated or undirect correlated, AND its topmost node is one
3325  * that materializes its output, assume that we'll only need to pay
3326  * its startup cost once; otherwise assume we pay the startup cost
3327  * every time.
3328  */
3329  if (subplan->parParam == NIL &&
3331  sp_cost.startup += plan->startup_cost;
3332  else
3333  sp_cost.per_tuple += plan->startup_cost;
3334  }
3335 
3336  subplan->startup_cost = sp_cost.startup;
3337  subplan->per_call_cost = sp_cost.per_tuple;
3338 }
3339 
3340 
3341 /*
3342  * cost_rescan
3343  * Given a finished Path, estimate the costs of rescanning it after
3344  * having done so the first time. For some Path types a rescan is
3345  * cheaper than an original scan (if no parameters change), and this
3346  * function embodies knowledge about that. The default is to return
3347  * the same costs stored in the Path. (Note that the cost estimates
3348  * actually stored in Paths are always for first scans.)
3349  *
3350  * This function is not currently intended to model effects such as rescans
3351  * being cheaper due to disk block caching; what we are concerned with is
3352  * plan types wherein the executor caches results explicitly, or doesn't
3353  * redo startup calculations, etc.
3354  */
3355 static void
3357  Cost *rescan_startup_cost, /* output parameters */
3358  Cost *rescan_total_cost)
3359 {
3360  switch (path->pathtype)
3361  {
3362  case T_FunctionScan:
3363 
3364  /*
3365  * Currently, nodeFunctionscan.c always executes the function to
3366  * completion before returning any rows, and caches the results in
3367  * a tuplestore. So the function eval cost is all startup cost
3368  * and isn't paid over again on rescans. However, all run costs
3369  * will be paid over again.
3370  */
3371  *rescan_startup_cost = 0;
3372  *rescan_total_cost = path->total_cost - path->startup_cost;
3373  break;
3374  case T_HashJoin:
3375 
3376  /*
3377  * If it's a single-batch join, we don't need to rebuild the hash
3378  * table during a rescan.
3379  */
3380  if (((HashPath *) path)->num_batches == 1)
3381  {
3382  /* Startup cost is exactly the cost of hash table building */
3383  *rescan_startup_cost = 0;
3384  *rescan_total_cost = path->total_cost - path->startup_cost;
3385  }
3386  else
3387  {
3388  /* Otherwise, no special treatment */
3389  *rescan_startup_cost = path->startup_cost;
3390  *rescan_total_cost = path->total_cost;
3391  }
3392  break;
3393  case T_CteScan:
3394  case T_WorkTableScan:
3395  {
3396  /*
3397  * These plan types materialize their final result in a
3398  * tuplestore or tuplesort object. So the rescan cost is only
3399  * cpu_tuple_cost per tuple, unless the result is large enough
3400  * to spill to disk.
3401  */
3402  Cost run_cost = cpu_tuple_cost * path->rows;
3403  double nbytes = relation_byte_size(path->rows,
3404  path->pathtarget->width);
3405  long work_mem_bytes = work_mem * 1024L;
3406 
3407  if (nbytes > work_mem_bytes)
3408  {
3409  /* It will spill, so account for re-read cost */
3410  double npages = ceil(nbytes / BLCKSZ);
3411 
3412  run_cost += seq_page_cost * npages;
3413  }
3414  *rescan_startup_cost = 0;
3415  *rescan_total_cost = run_cost;
3416  }
3417  break;
3418  case T_Material:
3419  case T_Sort:
3420  {
3421  /*
3422  * These plan types not only materialize their results, but do
3423  * not implement qual filtering or projection. So they are
3424  * even cheaper to rescan than the ones above. We charge only
3425  * cpu_operator_cost per tuple. (Note: keep that in sync with
3426  * the run_cost charge in cost_sort, and also see comments in
3427  * cost_material before you change it.)
3428  */
3429  Cost run_cost = cpu_operator_cost * path->rows;
3430  double nbytes = relation_byte_size(path->rows,
3431  path->pathtarget->width);
3432  long work_mem_bytes = work_mem * 1024L;
3433 
3434  if (nbytes > work_mem_bytes)
3435  {
3436  /* It will spill, so account for re-read cost */
3437  double npages = ceil(nbytes / BLCKSZ);
3438 
3439  run_cost += seq_page_cost * npages;
3440  }
3441  *rescan_startup_cost = 0;
3442  *rescan_total_cost = run_cost;
3443  }
3444  break;
3445  default:
3446  *rescan_startup_cost = path->startup_cost;
3447  *rescan_total_cost = path->total_cost;
3448  break;
3449  }
3450 }
3451 
3452 
3453 /*
3454  * cost_qual_eval
3455  * Estimate the CPU costs of evaluating a WHERE clause.
3456  * The input can be either an implicitly-ANDed list of boolean
3457  * expressions, or a list of RestrictInfo nodes. (The latter is
3458  * preferred since it allows caching of the results.)
3459  * The result includes both a one-time (startup) component,
3460  * and a per-evaluation component.
3461  */
3462 void
3464 {
3465  cost_qual_eval_context context;
3466  ListCell *l;
3467 
3468  context.root = root;
3469  context.total.startup = 0;
3470  context.total.per_tuple = 0;
3471 
3472  /* We don't charge any cost for the implicit ANDing at top level ... */
3473 
3474  foreach(l, quals)
3475  {
3476  Node *qual = (Node *) lfirst(l);
3477 
3478  cost_qual_eval_walker(qual, &context);
3479  }
3480 
3481  *cost = context.total;
3482 }
3483 
3484 /*
3485  * cost_qual_eval_node
3486  * As above, for a single RestrictInfo or expression.
3487  */
3488 void
3490 {
3491  cost_qual_eval_context context;
3492 
3493  context.root = root;
3494  context.total.startup = 0;
3495  context.total.per_tuple = 0;
3496 
3497  cost_qual_eval_walker(qual, &context);
3498 
3499  *cost = context.total;
3500 }
3501 
3502 static bool
3504 {
3505  if (node == NULL)
3506  return false;
3507 
3508  /*
3509  * RestrictInfo nodes contain an eval_cost field reserved for this
3510  * routine's use, so that it's not necessary to evaluate the qual clause's
3511  * cost more than once. If the clause's cost hasn't been computed yet,
3512  * the field's startup value will contain -1.
3513  */
3514  if (IsA(node, RestrictInfo))
3515  {
3516  RestrictInfo *rinfo = (RestrictInfo *) node;
3517 
3518  if (rinfo->eval_cost.startup < 0)
3519  {
3520  cost_qual_eval_context locContext;
3521 
3522  locContext.root = context->root;
3523  locContext.total.startup = 0;
3524  locContext.total.per_tuple = 0;
3525 
3526  /*
3527  * For an OR clause, recurse into the marked-up tree so that we
3528  * set the eval_cost for contained RestrictInfos too.
3529  */
3530  if (rinfo->orclause)
3531  cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3532  else
3533  cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3534 
3535  /*
3536  * If the RestrictInfo is marked pseudoconstant, it will be tested
3537  * only once, so treat its cost as all startup cost.
3538  */
3539  if (rinfo->pseudoconstant)
3540  {
3541  /* count one execution during startup */
3542  locContext.total.startup += locContext.total.per_tuple;
3543  locContext.total.per_tuple = 0;
3544  }
3545  rinfo->eval_cost = locContext.total;
3546  }
3547  context->total.startup += rinfo->eval_cost.startup;
3548  context->total.per_tuple += rinfo->eval_cost.per_tuple;
3549  /* do NOT recurse into children */
3550  return false;
3551  }
3552 
3553  /*
3554  * For each operator or function node in the given tree, we charge the
3555  * estimated execution cost given by pg_proc.procost (remember to multiply
3556  * this by cpu_operator_cost).
3557  *
3558  * Vars and Consts are charged zero, and so are boolean operators (AND,
3559  * OR, NOT). Simplistic, but a lot better than no model at all.
3560  *
3561  * Should we try to account for the possibility of short-circuit
3562  * evaluation of AND/OR? Probably *not*, because that would make the
3563  * results depend on the clause ordering, and we are not in any position
3564  * to expect that the current ordering of the clauses is the one that's
3565  * going to end up being used. The above per-RestrictInfo caching would
3566  * not mix well with trying to re-order clauses anyway.
3567  *
3568  * Another issue that is entirely ignored here is that if a set-returning
3569  * function is below top level in the tree, the functions/operators above
3570  * it will need to be evaluated multiple times. In practical use, such
3571  * cases arise so seldom as to not be worth the added complexity needed;
3572  * moreover, since our rowcount estimates for functions tend to be pretty
3573  * phony, the results would also be pretty phony.
3574  */
3575  if (IsA(node, FuncExpr))
3576  {
3577  context->total.per_tuple +=
3578  get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3579  }
3580  else if (IsA(node, OpExpr) ||
3581  IsA(node, DistinctExpr) ||
3582  IsA(node, NullIfExpr))
3583  {
3584  /* rely on struct equivalence to treat these all alike */
3585  set_opfuncid((OpExpr *) node);
3586  context->total.per_tuple +=
3587  get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3588  }
3589  else if (IsA(node, ScalarArrayOpExpr))
3590  {
3591  /*
3592  * Estimate that the operator will be applied to about half of the
3593  * array elements before the answer is determined.
3594  */
3595  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3596  Node *arraynode = (Node *) lsecond(saop->args);
3597 
3598  set_sa_opfuncid(saop);
3599  context->total.per_tuple += get_func_cost(saop->opfuncid) *
3600  cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3601  }
3602  else if (IsA(node, Aggref) ||
3603  IsA(node, WindowFunc))
3604  {
3605  /*
3606  * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3607  * ie, zero execution cost in the current model, because they behave
3608  * essentially like Vars at execution. We disregard the costs of
3609  * their input expressions for the same reason. The actual execution
3610  * costs of the aggregate/window functions and their arguments have to
3611  * be factored into plan-node-specific costing of the Agg or WindowAgg
3612  * plan node.
3613  */
3614  return false; /* don't recurse into children */
3615  }
3616  else if (IsA(node, CoerceViaIO))
3617  {
3618  CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3619  Oid iofunc;
3620  Oid typioparam;
3621  bool typisvarlena;
3622 
3623  /* check the result type's input function */
3624  getTypeInputInfo(iocoerce->resulttype,
3625  &iofunc, &typioparam);
3626  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3627  /* check the input type's output function */
3628  getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3629  &iofunc, &typisvarlena);
3630  context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3631  }
3632  else if (IsA(node, ArrayCoerceExpr))
3633  {
3634  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3635  Node *arraynode = (Node *) acoerce->arg;
3636 
3637  if (OidIsValid(acoerce->elemfuncid))
3638  context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3640  }
3641  else if (IsA(node, RowCompareExpr))
3642  {
3643  /* Conservatively assume we will check all the columns */
3644  RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3645  ListCell *lc;
3646 
3647  foreach(lc, rcexpr->opnos)
3648  {
3649  Oid opid = lfirst_oid(lc);
3650 
3651  context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3653  }
3654  }
3655  else if (IsA(node, MinMaxExpr) ||
3656  IsA(node, SQLValueFunction) ||
3657  IsA(node, XmlExpr) ||
3658  IsA(node, CoerceToDomain) ||
3659  IsA(node, NextValueExpr))
3660  {
3661  /* Treat all these as having cost 1 */
3662  context->total.per_tuple += cpu_operator_cost;
3663  }
3664  else if (IsA(node, CurrentOfExpr))
3665  {
3666  /* Report high cost to prevent selection of anything but TID scan */
3667  context->total.startup += disable_cost;
3668  }
3669  else if (IsA(node, SubLink))
3670  {
3671  /* This routine should not be applied to un-planned expressions */
3672  elog(ERROR, "cannot handle unplanned sub-select");
3673  }
3674  else if (IsA(node, SubPlan))
3675  {
3676  /*
3677  * A subplan node in an expression typically indicates that the
3678  * subplan will be executed on each evaluation, so charge accordingly.
3679  * (Sub-selects that can be executed as InitPlans have already been
3680  * removed from the expression.)
3681  */
3682  SubPlan *subplan = (SubPlan *) node;
3683 
3684  context->total.startup += subplan->startup_cost;
3685  context->total.per_tuple += subplan->per_call_cost;
3686 
3687  /*
3688  * We don't want to recurse into the testexpr, because it was already
3689  * counted in the SubPlan node's costs. So we're done.
3690  */
3691  return false;
3692  }
3693  else if (IsA(node, AlternativeSubPlan))
3694  {
3695  /*
3696  * Arbitrarily use the first alternative plan for costing. (We should
3697  * certainly only include one alternative, and we don't yet have
3698  * enough information to know which one the executor is most likely to
3699  * use.)
3700  */
3701  AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3702 
3703  return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3704  context);
3705  }
3706  else if (IsA(node, PlaceHolderVar))
3707  {
3708  /*
3709  * A PlaceHolderVar should be given cost zero when considering general
3710  * expression evaluation costs. The expense of doing the contained
3711  * expression is charged as part of the tlist eval costs of the scan
3712  * or join where the PHV is first computed (see set_rel_width and
3713  * add_placeholders_to_joinrel). If we charged it again here, we'd be
3714  * double-counting the cost for each level of plan that the PHV
3715  * bubbles up through. Hence, return without recursing into the
3716  * phexpr.
3717  */
3718  return false;
3719  }
3720 
3721  /* recurse into children */
3723  (void *) context);
3724 }
3725 
3726 /*
3727  * get_restriction_qual_cost
3728  * Compute evaluation costs of a baserel's restriction quals, plus any
3729  * movable join quals that have been pushed down to the scan.
3730  * Results are returned into *qpqual_cost.
3731  *
3732  * This is a convenience subroutine that works for seqscans and other cases
3733  * where all the given quals will be evaluated the hard way. It's not useful
3734  * for cost_index(), for example, where the index machinery takes care of
3735  * some of the quals. We assume baserestrictcost was previously set by
3736  * set_baserel_size_estimates().
3737  */
3738 static void
3740  ParamPathInfo *param_info,
3741  QualCost *qpqual_cost)
3742 {
3743  if (param_info)
3744  {
3745  /* Include costs of pushed-down clauses */
3746  cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3747 
3748  qpqual_cost->startup += baserel->baserestrictcost.startup;
3749  qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3750  }
3751  else
3752  *qpqual_cost = baserel->baserestrictcost;
3753 }
3754 
3755 
3756 /*
3757  * compute_semi_anti_join_factors
3758  * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
3759  * can be expected to scan.
3760  *
3761  * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3762  * inner rows as soon as it finds a match to the current outer row.
3763  * The same happens if we have detected the inner rel is unique.
3764  * We should therefore adjust some of the cost components for this effect.
3765  * This function computes some estimates needed for these adjustments.
3766  * These estimates will be the same regardless of the particular paths used
3767  * for the outer and inner relation, so we compute these once and then pass
3768  * them to all the join cost estimation functions.
3769  *
3770  * Input parameters:
3771  * outerrel: outer relation under consideration
3772  * innerrel: inner relation under consideration
3773  * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
3774  * sjinfo: SpecialJoinInfo relevant to this join
3775  * restrictlist: join quals
3776  * Output parameters:
3777  * *semifactors is filled in (see relation.h for field definitions)
3778  */
3779 void
3781  RelOptInfo *outerrel,
3782  RelOptInfo *innerrel,
3783  JoinType jointype,
3784  SpecialJoinInfo *sjinfo,
3785  List *restrictlist,
3786  SemiAntiJoinFactors *semifactors)
3787 {
3788  Selectivity jselec;
3789  Selectivity nselec;
3790  Selectivity avgmatch;
3791  SpecialJoinInfo norm_sjinfo;
3792  List *joinquals;
3793  ListCell *l;
3794 
3795  /*
3796  * In an ANTI join, we must ignore clauses that are "pushed down", since
3797  * those won't affect the match logic. In a SEMI join, we do not
3798  * distinguish joinquals from "pushed down" quals, so just use the whole
3799  * restrictinfo list. For other outer join types, we should consider only
3800  * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
3801  */
3802  if (IS_OUTER_JOIN(jointype))
3803  {
3804  joinquals = NIL;
3805  foreach(l, restrictlist)
3806  {
3807  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3808 
3809  if (!rinfo->is_pushed_down)
3810  joinquals = lappend(joinquals, rinfo);
3811  }
3812  }
3813  else
3814  joinquals = restrictlist;
3815 
3816  /*
3817  * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3818  */
3819  jselec = clauselist_selectivity(root,
3820  joinquals,
3821  0,
3822  (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
3823  sjinfo);
3824 
3825  /*
3826  * Also get the normal inner-join selectivity of the join clauses.
3827  */
3828  norm_sjinfo.type = T_SpecialJoinInfo;
3829  norm_sjinfo.min_lefthand = outerrel->relids;
3830  norm_sjinfo.min_righthand = innerrel->relids;
3831  norm_sjinfo.syn_lefthand = outerrel->relids;
3832  norm_sjinfo.syn_righthand = innerrel->relids;
3833  norm_sjinfo.jointype = JOIN_INNER;
3834  /* we don't bother trying to make the remaining fields valid */
3835  norm_sjinfo.lhs_strict = false;
3836  norm_sjinfo.delay_upper_joins = false;
3837  norm_sjinfo.semi_can_btree = false;
3838  norm_sjinfo.semi_can_hash = false;
3839  norm_sjinfo.semi_operators = NIL;
3840  norm_sjinfo.semi_rhs_exprs = NIL;
3841 
3842  nselec = clauselist_selectivity(root,
3843  joinquals,
3844  0,
3845  JOIN_INNER,
3846  &norm_sjinfo);
3847 
3848  /* Avoid leaking a lot of ListCells */
3849  if (IS_OUTER_JOIN(jointype))
3850  list_free(joinquals);
3851 
3852  /*
3853  * jselec can be interpreted as the fraction of outer-rel rows that have
3854  * any matches (this is true for both SEMI and ANTI cases). And nselec is
3855  * the fraction of the Cartesian product that matches. So, the average
3856  * number of matches for each outer-rel row that has at least one match is
3857  * nselec * inner_rows / jselec.
3858  *
3859  * Note: it is correct to use the inner rel's "rows" count here, even
3860  * though we might later be considering a parameterized inner path with
3861  * fewer rows. This is because we have included all the join clauses in
3862  * the selectivity estimate.
3863  */
3864  if (jselec > 0) /* protect against zero divide */
3865  {
3866  avgmatch = nselec * innerrel->rows / jselec;
3867  /* Clamp to sane range */
3868  avgmatch = Max(1.0, avgmatch);
3869  }
3870  else
3871  avgmatch = 1.0;
3872 
3873  semifactors->outer_match_frac = jselec;
3874  semifactors->match_count = avgmatch;
3875 }
3876 
3877 /*
3878  * has_indexed_join_quals
3879  * Check whether all the joinquals of a nestloop join are used as
3880  * inner index quals.
3881  *
3882  * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3883  * indexscan) that uses all the joinquals as indexquals, we can assume that an
3884  * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3885  * expensive.
3886  */
3887 static bool
3889 {
3890  Relids joinrelids = joinpath->path.parent->relids;
3891  Path *innerpath = joinpath->innerjoinpath;
3892  List *indexclauses;
3893  bool found_one;
3894  ListCell *lc;
3895 
3896  /* If join still has quals to evaluate, it's not fast */
3897  if (joinpath->joinrestrictinfo != NIL)
3898  return false;
3899  /* Nor if the inner path isn't parameterized at all */
3900  if (innerpath->param_info == NULL)
3901  return false;
3902 
3903  /* Find the indexclauses list for the inner scan */
3904  switch (innerpath->pathtype)
3905  {
3906  case T_IndexScan:
3907  case T_IndexOnlyScan:
3908  indexclauses = ((IndexPath *) innerpath)->indexclauses;
3909  break;
3910  case T_BitmapHeapScan:
3911  {
3912  /* Accept only a simple bitmap scan, not AND/OR cases */
3913  Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3914 
3915  if (IsA(bmqual, IndexPath))
3916  indexclauses = ((IndexPath *) bmqual)->indexclauses;
3917  else
3918  return false;
3919  break;
3920  }
3921  default:
3922 
3923  /*
3924  * If it's not a simple indexscan, it probably doesn't run quickly
3925  * for zero rows out, even if it's a parameterized path using all
3926  * the joinquals.
3927  */
3928  return false;
3929  }
3930 
3931  /*
3932  * Examine the inner path's param clauses. Any that are from the outer
3933  * path must be found in the indexclauses list, either exactly or in an
3934  * equivalent form generated by equivclass.c. Also, we must find at least
3935  * one such clause, else it's a clauseless join which isn't fast.
3936  */
3937  found_one = false;
3938  foreach(lc, innerpath->param_info->ppi_clauses)
3939  {
3940  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3941 
3942  if (join_clause_is_movable_into(rinfo,
3943  innerpath->parent->relids,
3944  joinrelids))
3945  {
3946  if (!(list_member_ptr(indexclauses, rinfo) ||
3947  is_redundant_derived_clause(rinfo, indexclauses)))
3948  return false;
3949  found_one = true;
3950  }
3951  }
3952  return found_one;
3953 }
3954 
3955 
3956 /*
3957  * approx_tuple_count
3958  * Quick-and-dirty estimation of the number of join rows passing
3959  * a set of qual conditions.
3960  *
3961  * The quals can be either an implicitly-ANDed list of boolean expressions,
3962  * or a list of RestrictInfo nodes (typically the latter).
3963  *
3964  * We intentionally compute the selectivity under JOIN_INNER rules, even
3965  * if it's some type of outer join. This is appropriate because we are
3966  * trying to figure out how many tuples pass the initial merge or hash
3967  * join step.
3968  *
3969  * This is quick-and-dirty because we bypass clauselist_selectivity, and
3970  * simply multiply the independent clause selectivities together. Now
3971  * clauselist_selectivity often can't do any better than that anyhow, but
3972  * for some situations (such as range constraints) it is smarter. However,
3973  * we can't effectively cache the results of clauselist_selectivity, whereas
3974  * the individual clause selectivities can be and are cached.
3975  *
3976  * Since we are only using the results to estimate how many potential
3977  * output tuples are generated and passed through qpqual checking, it
3978  * seems OK to live with the approximation.
3979  */
3980 static double
3982 {
3983  double tuples;
3984  double outer_tuples = path->outerjoinpath->rows;
3985  double inner_tuples = path->innerjoinpath->rows;
3986  SpecialJoinInfo sjinfo;
3987  Selectivity selec = 1.0;
3988  ListCell *l;
3989 
3990  /*
3991  * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3992  */
3993  sjinfo.type = T_SpecialJoinInfo;
3994  sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3995  sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3996  sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3997  sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3998  sjinfo.jointype = JOIN_INNER;
3999  /* we don't bother trying to make the remaining fields valid */
4000  sjinfo.lhs_strict = false;
4001  sjinfo.delay_upper_joins = false;
4002  sjinfo.semi_can_btree = false;
4003  sjinfo.semi_can_hash = false;
4004  sjinfo.semi_operators = NIL;
4005  sjinfo.semi_rhs_exprs = NIL;
4006 
4007  /* Get the approximate selectivity */
4008  foreach(l, quals)
4009  {
4010  Node *qual = (Node *) lfirst(l);
4011 
4012  /* Note that clause_selectivity will be able to cache its result */
4013  selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
4014  }
4015 
4016  /* Apply it to the input relation sizes */
4017  tuples = selec * outer_tuples * inner_tuples;
4018 
4019  return clamp_row_est(tuples);
4020 }
4021 
4022 
4023 /*
4024  * set_baserel_size_estimates
4025  * Set the size estimates for the given base relation.
4026  *
4027  * The rel's targetlist and restrictinfo list must have been constructed
4028  * already, and rel->tuples must be set.
4029  *
4030  * We set the following fields of the rel node:
4031  * rows: the estimated number of output tuples (after applying
4032  * restriction clauses).
4033  * width: the estimated average output tuple width in bytes.
4034  * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
4035  */
4036 void
4038 {
4039  double nrows;
4040 
4041  /* Should only be applied to base relations */
4042  Assert(rel->relid > 0);
4043 
4044  nrows = rel->tuples *
4046  rel->baserestrictinfo,
4047  0,
4048  JOIN_INNER,
4049  NULL);
4050 
4051  rel->rows = clamp_row_est(nrows);
4052 
4054 
4055  set_rel_width(root, rel);
4056 }
4057 
4058 /*
4059  * get_parameterized_baserel_size
4060  * Make a size estimate for a parameterized scan of a base relation.
4061  *
4062  * 'param_clauses' lists the additional join clauses to be used.
4063  *
4064  * set_baserel_size_estimates must have been applied already.
4065  */
4066 double
4068  List *param_clauses)
4069 {
4070  List *allclauses;
4071  double nrows;
4072 
4073  /*
4074  * Estimate the number of rows returned by the parameterized scan, knowing
4075  * that it will apply all the extra join clauses as well as the rel's own
4076  * restriction clauses. Note that we force the clauses to be treated as
4077  * non-join clauses during selectivity estimation.
4078  */
4079  allclauses = list_concat(list_copy(param_clauses),
4080  rel->baserestrictinfo);
4081  nrows = rel->tuples *
4083  allclauses,
4084  rel->relid, /* do not use 0! */
4085  JOIN_INNER,
4086  NULL);
4087  nrows = clamp_row_est(nrows);
4088  /* For safety, make sure result is not more than the base estimate */
4089  if (nrows > rel->rows)
4090  nrows = rel->rows;
4091  return nrows;
4092 }
4093 
4094 /*
4095  * set_joinrel_size_estimates
4096  * Set the size estimates for the given join relation.
4097  *
4098  * The rel's targetlist must have been constructed already, and a
4099  * restriction clause list that matches the given component rels must
4100  * be provided.
4101  *
4102  * Since there is more than one way to make a joinrel for more than two
4103  * base relations, the results we get here could depend on which component
4104  * rel pair is provided. In theory we should get the same answers no matter
4105  * which pair is provided; in practice, since the selectivity estimation
4106  * routines don't handle all cases equally well, we might not. But there's
4107  * not much to be done about it. (Would it make sense to repeat the
4108  * calculations for each pair of input rels that's encountered, and somehow
4109  * average the results? Probably way more trouble than it's worth, and
4110  * anyway we must keep the rowcount estimate the same for all paths for the
4111  * joinrel.)
4112  *
4113  * We set only the rows field here. The reltarget field was already set by
4114  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
4115  */
4116 void
4118  RelOptInfo *outer_rel,
4119  RelOptInfo *inner_rel,
4120  SpecialJoinInfo *sjinfo,
4121  List *restrictlist)
4122 {
4123  rel->rows = calc_joinrel_size_estimate(root,
4124  outer_rel,
4125  inner_rel,
4126  outer_rel->rows,
4127  inner_rel->rows,
4128  sjinfo,
4129  restrictlist);
4130 }
4131 
4132 /*
4133  * get_parameterized_joinrel_size
4134  * Make a size estimate for a parameterized scan of a join relation.
4135  *
4136  * 'rel' is the joinrel under consideration.
4137  * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
4138  * produce the relations being joined.
4139  * 'sjinfo' is any SpecialJoinInfo relevant to this join.
4140  * 'restrict_clauses' lists the join clauses that need to be applied at the
4141  * join node (including any movable clauses that were moved down to this join,
4142  * and not including any movable clauses that were pushed down into the
4143  * child paths).
4144  *
4145  * set_joinrel_size_estimates must have been applied already.
4146  */
4147 double
4149  Path *outer_path,
4150  Path *inner_path,
4151  SpecialJoinInfo *sjinfo,
4152  List *restrict_clauses)
4153 {
4154  double nrows;
4155 
4156  /*
4157  * Estimate the number of rows returned by the parameterized join as the
4158  * sizes of the input paths times the selectivity of the clauses that have
4159  * ended up at this join node.
4160  *
4161  * As with set_joinrel_size_estimates, the rowcount estimate could depend
4162  * on the pair of input paths provided, though ideally we'd get the same
4163  * estimate for any pair with the same parameterization.
4164  */
4165  nrows = calc_joinrel_size_estimate(root,
4166  outer_path->parent,
4167  inner_path->parent,
4168  outer_path->rows,
4169  inner_path->rows,
4170  sjinfo,
4171  restrict_clauses);
4172  /* For safety, make sure result is not more than the base estimate */
4173  if (nrows > rel->rows)
4174  nrows = rel->rows;
4175  return nrows;
4176 }
4177 
4178 /*
4179  * calc_joinrel_size_estimate
4180  * Workhorse for set_joinrel_size_estimates and
4181  * get_parameterized_joinrel_size.
4182  *
4183  * outer_rel/inner_rel are the relations being joined, but they should be
4184  * assumed to have sizes outer_rows/inner_rows; those numbers might be less
4185  * than what rel->rows says, when we are considering parameterized paths.
4186  */
4187 static double
4189  RelOptInfo *outer_rel,
4190  RelOptInfo *inner_rel,
4191  double outer_rows,
4192  double inner_rows,
4193  SpecialJoinInfo *sjinfo,
4194  List *restrictlist_in)
4195 {
4196  /* This apparently-useless variable dodges a compiler bug in VS2013: */
4197  List *restrictlist = restrictlist_in;
4198  JoinType jointype = sjinfo->jointype;
4199  Selectivity fkselec;
4200  Selectivity jselec;
4201  Selectivity pselec;
4202  double nrows;
4203 
4204  /*
4205  * Compute joinclause selectivity. Note that we are only considering
4206  * clauses that become restriction clauses at this join level; we are not
4207  * double-counting them because they were not considered in estimating the
4208  * sizes of the component rels.
4209  *
4210  * First, see whether any of the joinclauses can be matched to known FK
4211  * constraints. If so, drop those clauses from the restrictlist, and
4212  * instead estimate their selectivity using FK semantics. (We do this
4213  * without regard to whether said clauses are local or "pushed down".
4214  * Probably, an FK-matching clause could never be seen as pushed down at
4215  * an outer join, since it would be strict and hence would be grounds for
4216  * join strength reduction.) fkselec gets the net selectivity for
4217  * FK-matching clauses, or 1.0 if there are none.
4218  */
4219  fkselec = get_foreign_key_join_selectivity(root,
4220  outer_rel->relids,
4221  inner_rel->relids,
4222  sjinfo,
4223  &restrictlist);
4224 
4225  /*
4226  * For an outer join, we have to distinguish the selectivity of the join's
4227  * own clauses (JOIN/ON conditions) from any clauses that were "pushed
4228  * down". For inner joins we just count them all as joinclauses.
4229  */
4230  if (IS_OUTER_JOIN(jointype))
4231  {
4232  List *joinquals = NIL;
4233  List *pushedquals = NIL;
4234  ListCell *l;
4235 
4236  /* Grovel through the clauses to separate into two lists */
4237  foreach(l, restrictlist)
4238  {
4239  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4240 
4241  if (rinfo->is_pushed_down)
4242  pushedquals = lappend(pushedquals, rinfo);
4243  else
4244  joinquals = lappend(joinquals, rinfo);
4245  }
4246 
4247  /* Get the separate selectivities */
4248  jselec = clauselist_selectivity(root,
4249  joinquals,
4250  0,
4251  jointype,
4252  sjinfo);
4253  pselec = clauselist_selectivity(root,
4254  pushedquals,
4255  0,
4256  jointype,
4257  sjinfo);
4258 
4259  /* Avoid leaking a lot of ListCells */
4260  list_free(joinquals);
4261  list_free(pushedquals);
4262  }
4263  else
4264  {
4265  jselec = clauselist_selectivity(root,
4266  restrictlist,
4267  0,
4268  jointype,
4269  sjinfo);
4270  pselec = 0.0; /* not used, keep compiler quiet */
4271  }
4272 
4273  /*
4274  * Basically, we multiply size of Cartesian product by selectivity.
4275  *
4276  * If we are doing an outer join, take that into account: the joinqual
4277  * selectivity has to be clamped using the knowledge that the output must
4278  * be at least as large as the non-nullable input. However, any
4279  * pushed-down quals are applied after the outer join, so their
4280  * selectivity applies fully.
4281  *
4282  * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4283  * of LHS rows that have matches, and we apply that straightforwardly.
4284  */
4285  switch (jointype)
4286  {
4287  case JOIN_INNER:
4288  nrows = outer_rows * inner_rows * fkselec * jselec;
4289  /* pselec not used */
4290  break;
4291  case JOIN_LEFT:
4292  nrows = outer_rows * inner_rows * fkselec * jselec;
4293  if (nrows < outer_rows)
4294  nrows = outer_rows;
4295  nrows *= pselec;
4296  break;
4297  case JOIN_FULL:
4298  nrows = outer_rows * inner_rows * fkselec * jselec;
4299  if (nrows < outer_rows)
4300  nrows = outer_rows;
4301  if (nrows < inner_rows)
4302  nrows = inner_rows;
4303  nrows *= pselec;
4304  break;
4305  case JOIN_SEMI:
4306  nrows = outer_rows * fkselec * jselec;
4307  /* pselec not used */
4308  break;
4309  case JOIN_ANTI:
4310  nrows = outer_rows * (1.0 - fkselec * jselec);
4311  nrows *= pselec;
4312  break;
4313  default:
4314  /* other values not expected here */
4315  elog(ERROR, "unrecognized join type: %d", (int) jointype);
4316  nrows = 0; /* keep compiler quiet */
4317  break;
4318  }
4319 
4320  return clamp_row_est(nrows);
4321 }
4322 
4323 /*
4324  * get_foreign_key_join_selectivity
4325  * Estimate join selectivity for foreign-key-related clauses.
4326  *
4327  * Remove any clauses that can be matched to FK constraints from *restrictlist,
4328  * and return a substitute estimate of their selectivity. 1.0 is returned
4329  * when there are no such clauses.
4330  *
4331  * The reason for treating such clauses specially is that we can get better
4332  * estimates this way than by relying on clauselist_selectivity(), especially
4333  * for multi-column FKs where that function's assumption that the clauses are
4334  * independent falls down badly. But even with single-column FKs, we may be
4335  * able to get a better answer when the pg_statistic stats are missing or out
4336  * of date.
4337  */
4338 static Selectivity
4340  Relids outer_relids,
4341  Relids inner_relids,
4342  SpecialJoinInfo *sjinfo,
4343  List **restrictlist)
4344 {
4345  Selectivity fkselec = 1.0;
4346  JoinType jointype = sjinfo->jointype;
4347  List *worklist = *restrictlist;
4348  ListCell *lc;
4349 
4350  /* Consider each FK constraint that is known to match the query */
4351  foreach(lc, root->fkey_list)
4352  {
4353  ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
4354  bool ref_is_outer;
4355  List *removedlist;
4356  ListCell *cell;
4357  ListCell *prev;
4358  ListCell *next;
4359 
4360  /*
4361  * This FK is not relevant unless it connects a baserel on one side of
4362  * this join to a baserel on the other side.
4363  */
4364  if (bms_is_member(fkinfo->con_relid, outer_relids) &&
4365  bms_is_member(fkinfo->ref_relid, inner_relids))
4366  ref_is_outer = false;
4367  else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
4368  bms_is_member(fkinfo->con_relid, inner_relids))
4369  ref_is_outer = true;
4370  else
4371  continue;
4372 
4373  /*
4374  * If we're dealing with a semi/anti join, and the FK's referenced
4375  * relation is on the outside, then knowledge of the FK doesn't help
4376  * us figure out what we need to know (which is the fraction of outer
4377  * rows that have matches). On the other hand, if the referenced rel
4378  * is on the inside, then all outer rows must have matches in the
4379  * referenced table (ignoring nulls). But any restriction or join
4380  * clauses that filter that table will reduce the fraction of matches.
4381  * We can account for restriction clauses, but it's too hard to guess
4382  * how many table rows would get through a join that's inside the RHS.
4383  * Hence, if either case applies, punt and ignore the FK.
4384  */
4385  if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
4386  (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
4387  continue;
4388 
4389  /*
4390  * Modify the restrictlist by removing clauses that match the FK (and
4391  * putting them into removedlist instead). It seems unsafe to modify
4392  * the originally-passed List structure, so we make a shallow copy the
4393  * first time through.
4394  */
4395  if (worklist == *restrictlist)
4396  worklist = list_copy(worklist);
4397 
4398  removedlist = NIL;
4399  prev = NULL;
4400  for (cell = list_head(worklist); cell; cell = next)
4401  {
4402  RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
4403  bool remove_it = false;
4404  int i;
4405 
4406  next = lnext(cell);
4407  /* Drop this clause if it matches any column of the FK */
4408  for (i = 0; i < fkinfo->nkeys; i++)
4409  {
4410  if (rinfo->parent_ec)
4411  {
4412  /*
4413  * EC-derived clauses can only match by EC. It is okay to
4414  * consider any clause derived from the same EC as
4415  * matching the FK: even if equivclass.c chose to generate
4416  * a clause equating some other pair of Vars, it could
4417  * have generated one equating the FK's Vars. So for
4418  * purposes of estimation, we can act as though it did so.
4419  *
4420  * Note: checking parent_ec is a bit of a cheat because
4421  * there are EC-derived clauses that don't have parent_ec
4422  * set; but such clauses must compare expressions that
4423  * aren't just Vars, so they cannot match the FK anyway.
4424  */
4425  if (fkinfo->eclass[i] == rinfo->parent_ec)
4426  {
4427  remove_it = true;
4428  break;
4429  }
4430  }
4431  else
4432  {
4433  /*
4434  * Otherwise, see if rinfo was previously matched to FK as
4435  * a "loose" clause.
4436  */
4437  if (list_member_ptr(fkinfo->rinfos[i], rinfo))
4438  {
4439  remove_it = true;
4440  break;
4441  }
4442  }
4443  }
4444  if (remove_it)
4445  {
4446  worklist = list_delete_cell(worklist, cell, prev);
4447  removedlist = lappend(removedlist, rinfo);
4448  }
4449  else
4450  prev = cell;
4451  }
4452 
4453  /*
4454  * If we failed to remove all the matching clauses we expected to
4455  * find, chicken out and ignore this FK; applying its selectivity
4456  * might result in double-counting. Put any clauses we did manage to
4457  * remove back into the worklist.
4458  *
4459  * Since the matching clauses are known not outerjoin-delayed, they
4460  * should certainly have appeared in the initial joinclause list. If
4461  * we didn't find them, they must have been matched to, and removed
4462  * by, some other FK in a previous iteration of this loop. (A likely
4463  * case is that two FKs are matched to the same EC; there will be only
4464  * one EC-derived clause in the initial list, so the first FK will
4465  * consume it.) Applying both FKs' selectivity independently risks
4466  * underestimating the join size; in particular, this would undo one
4467  * of the main things that ECs were invented for, namely to avoid
4468  * double-counting the selectivity of redundant equality conditions.
4469  * Later we might think of a reasonable way to combine the estimates,
4470  * but for now, just punt, since this is a fairly uncommon situation.
4471  */
4472  if (list_length(removedlist) !=
4473  (fkinfo->nmatched_ec + fkinfo->nmatched_ri))
4474  {
4475  worklist = list_concat(worklist, removedlist);
4476  continue;
4477  }
4478 
4479  /*
4480  * Finally we get to the payoff: estimate selectivity using the
4481  * knowledge that each referencing row will match exactly one row in
4482  * the referenced table.
4483  *
4484  * XXX that's not true in the presence of nulls in the referencing
4485  * column(s), so in principle we should derate the estimate for those.
4486  * However (1) if there are any strict restriction clauses for the
4487  * referencing column(s) elsewhere in the query, derating here would
4488  * be double-counting the null fraction, and (2) it's not very clear
4489  * how to combine null fractions for multiple referencing columns. So
4490  * we do nothing for now about correcting for nulls.
4491  *
4492  * XXX another point here is that if either side of an FK constraint
4493  * is an inheritance parent, we estimate as though the constraint
4494  * covers all its children as well. This is not an unreasonable
4495  * assumption for a referencing table, ie the user probably applied
4496  * identical constraints to all child tables (though perhaps we ought
4497  * to check that). But it's not possible to have done that for a
4498  * referenced table. Fortunately, precisely because that doesn't
4499  * work, it is uncommon in practice to have an FK referencing a parent
4500  * table. So, at least for now, disregard inheritance here.
4501  */
4502  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4503  {
4504  /*
4505  * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
4506  * referenced table is exactly the inside of the join. The join
4507  * selectivity is defined as the fraction of LHS rows that have
4508  * matches. The FK implies that every LHS row has a match *in the
4509  * referenced table*; but any restriction clauses on it will
4510  * reduce the number of matches. Hence we take the join
4511  * selectivity as equal to the selectivity of the table's
4512  * restriction clauses, which is rows / tuples; but we must guard
4513  * against tuples == 0.
4514  */
4515  RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4516  double ref_tuples = Max(ref_rel->tuples, 1.0);
4517 
4518  fkselec *= ref_rel->rows / ref_tuples;
4519  }
4520  else
4521  {
4522  /*
4523  * Otherwise, selectivity is exactly 1/referenced-table-size; but
4524  * guard against tuples == 0. Note we should use the raw table
4525  * tuple count, not any estimate of its filtered or joined size.
4526  */
4527  RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4528  double ref_tuples = Max(ref_rel->tuples, 1.0);
4529 
4530  fkselec *= 1.0 / ref_tuples;
4531  }
4532  }
4533 
4534  *restrictlist = worklist;
4535  return fkselec;
4536 }
4537 
4538 /*
4539  * set_subquery_size_estimates
4540  * Set the size estimates for a base relation that is a subquery.
4541  *
4542  * The rel's targetlist and restrictinfo list must have been constructed
4543  * already, and the Paths for the subquery must have been completed.
4544  * We look at the subquery's PlannerInfo to extract data.
4545  *
4546  * We set the same fields as set_baserel_size_estimates.
4547  */
4548 void
4550 {
4551  PlannerInfo *subroot = rel->subroot;
4552  RelOptInfo *sub_final_rel;
4554  ListCell *lc;
4555 
4556  /* Should only be applied to base relations that are subqueries */
4557  Assert(rel->relid > 0);
4558 #ifdef USE_ASSERT_CHECKING
4559  rte = planner_rt_fetch(rel->relid, root);
4560  Assert(rte->rtekind == RTE_SUBQUERY);
4561 #endif
4562 
4563  /*
4564  * Copy raw number of output rows from subquery. All of its paths should
4565  * have the same output rowcount, so just look at cheapest-total.
4566  */
4567  sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
4568  rel->tuples = sub_final_rel->cheapest_total_path->rows;
4569 
4570  /*
4571  * Compute per-output-column width estimates by examining the subquery's
4572  * targetlist. For any output that is a plain Var, get the width estimate
4573  * that was made while planning the subquery. Otherwise, we leave it to
4574  * set_rel_width to fill in a datatype-based default estimate.
4575  */
4576  foreach(lc, subroot->parse->targetList)
4577  {
4578  TargetEntry *te = lfirst_node(TargetEntry, lc);
4579  Node *texpr = (Node *) te->expr;
4580  int32 item_width = 0;
4581 
4582  /* junk columns aren't visible to upper query */
4583  if (te->resjunk)
4584  continue;
4585 
4586  /*
4587  * The subquery could be an expansion of a view that's had columns
4588  * added to it since the current query was parsed, so that there are
4589  * non-junk tlist columns in it that don't correspond to any column
4590  * visible at our query level. Ignore such columns.
4591  */
4592  if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4593  continue;
4594 
4595  /*
4596  * XXX This currently doesn't work for subqueries containing set
4597  * operations, because the Vars in their tlists are bogus references
4598  * to the first leaf subquery, which wouldn't give the right answer
4599  * even if we could still get to its PlannerInfo.
4600  *
4601  * Also, the subquery could be an appendrel for which all branches are
4602  * known empty due to constraint exclusion, in which case
4603  * set_append_rel_pathlist will have left the attr_widths set to zero.
4604  *
4605  * In either case, we just leave the width estimate zero until
4606  * set_rel_width fixes it.
4607  */
4608  if (IsA(texpr, Var) &&
4609  subroot->parse->setOperations == NULL)
4610  {
4611  Var *var = (Var *) texpr;
4612  RelOptInfo *subrel = find_base_rel(subroot, var->varno);
4613 
4614  item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
4615  }
4616  rel->attr_widths[te->resno - rel->min_attr] = item_width;
4617  }
4618 
4619  /* Now estimate number of output rows, etc */
4620  set_baserel_size_estimates(root, rel);
4621 }
4622 
4623 /*
4624  * set_function_size_estimates
4625  * Set the size estimates for a base relation that is a function call.
4626  *
4627  * The rel's targetlist and restrictinfo list must have been constructed
4628  * already.
4629  *
4630  * We set the same fields as set_baserel_size_estimates.
4631  */
4632 void
4634 {
4635  RangeTblEntry *rte;
4636  ListCell *lc;
4637 
4638  /* Should only be applied to base relations that are functions */
4639  Assert(rel->relid > 0);
4640  rte = planner_rt_fetch(rel->relid, root);
4641  Assert(rte->rtekind == RTE_FUNCTION);
4642 
4643  /*
4644  * Estimate number of rows the functions will return. The rowcount of the
4645  * node is that of the largest function result.
4646  */
4647  rel->tuples = 0;
4648  foreach(lc, rte->functions)
4649  {
4650  RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
4651  double ntup = expression_returns_set_rows(rtfunc->funcexpr);
4652 
4653  if (ntup > rel->tuples)
4654  rel->tuples = ntup;
4655  }
4656 
4657  /* Now estimate number of output rows, etc */
4658  set_baserel_size_estimates(root, rel);
4659 }
4660 
4661 /*
4662  * set_function_size_estimates
4663  * Set the size estimates for a base relation that is a function call.
4664  *
4665  * The rel's targetlist and restrictinfo list must have been constructed
4666  * already.
4667  *
4668  * We set the same fields as set_tablefunc_size_estimates.
4669  */
4670 void
4672 {
4674 
4675  /* Should only be applied to base relations that are functions */
4676  Assert(rel->relid > 0);
4677 #ifdef USE_ASSERT_CHECKING
4678  rte = planner_rt_fetch(rel->relid, root);
4679  Assert(rte->rtekind == RTE_TABLEFUNC);
4680 #endif
4681 
4682  rel->tuples = 100;
4683 
4684  /* Now estimate number of output rows, etc */
4685  set_baserel_size_estimates(root, rel);
4686 }
4687 
4688 /*
4689  * set_values_size_estimates
4690  * Set the size estimates for a base relation that is a values list.
4691  *
4692  * The rel's targetlist and restrictinfo list must have been constructed
4693  * already.
4694  *
4695  * We set the same fields as set_baserel_size_estimates.
4696  */
4697 void
4699 {
4700  RangeTblEntry *rte;
4701 
4702  /* Should only be applied to base relations that are values lists */
4703  Assert(rel->relid > 0);
4704  rte = planner_rt_fetch(rel->relid, root);
4705  Assert(rte->rtekind == RTE_VALUES);
4706 
4707  /*
4708  * Estimate number of rows the values list will return. We know this
4709  * precisely based on the list length (well, barring set-returning
4710  * functions in list items, but that's a refinement not catered for
4711  * anywhere else either).
4712  */
4713  rel->tuples = list_length(rte->values_lists);
4714 
4715  /* Now estimate number of output rows, etc */
4716  set_baserel_size_estimates(root, rel);
4717 }
4718 
4719 /*
4720  * set_cte_size_estimates
4721  * Set the size estimates for a base relation that is a CTE reference.
4722  *
4723  * The rel's targetlist and restrictinfo list must have been constructed
4724  * already, and we need an estimate of the number of rows returned by the CTE
4725  * (if a regular CTE) or the non-recursive term (if a self-reference).
4726  *
4727  * We set the same fields as set_baserel_size_estimates.
4728  */
4729 void
4730 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
4731 {
4732  RangeTblEntry *rte;
4733 
4734  /* Should only be applied to base relations that are CTE references */
4735  Assert(rel->relid > 0);
4736  rte = planner_rt_fetch(rel->relid, root);
4737  Assert(rte->rtekind == RTE_CTE);
4738 
4739  if (rte->self_reference)
4740  {
4741  /*
4742  * In a self-reference, arbitrarily assume the average worktable size
4743  * is about 10 times the nonrecursive term's size.
4744  */
4745  rel->tuples = 10 * cte_rows;
4746  }
4747  else
4748  {
4749  /* Otherwise just believe the CTE's rowcount estimate */
4750  rel->tuples = cte_rows;
4751  }
4752 
4753  /* Now estimate number of output rows, etc */
4754  set_baserel_size_estimates(root, rel);
4755 }
4756 
4757 /*
4758  * set_namedtuplestore_size_estimates
4759  * Set the size estimates for a base relation that is a tuplestore reference.
4760  *
4761  * The rel's targetlist and restrictinfo list must have been constructed
4762  * already.
4763  *
4764  * We set the same fields as set_baserel_size_estimates.
4765  */
4766 void
4768 {
4769  RangeTblEntry *rte;
4770 
4771  /* Should only be applied to base relations that are tuplestore references */
4772  Assert(rel->relid > 0);
4773  rte = planner_rt_fetch(rel->relid, root);
4775 
4776  /*
4777  * Use the estimate provided by the code which is generating the named
4778  * tuplestore. In some cases, the actual number might be available; in
4779  * others the same plan will be re-used, so a "typical" value might be
4780  * estimated and used.
4781  */
4782  rel->tuples = rte->enrtuples;
4783  if (rel->tuples < 0)
4784  rel->tuples = 1000;
4785 
4786  /* Now estimate number of output rows, etc */
4787  set_baserel_size_estimates(root, rel);
4788 }
4789 
4790 /*
4791  * set_foreign_size_estimates
4792  * Set the size estimates for a base relation that is a foreign table.
4793  *
4794  * There is not a whole lot that we can do here; the foreign-data wrapper
4795  * is responsible for producing useful estimates. We can do a decent job
4796  * of estimating baserestrictcost, so we set that, and we also set up width
4797  * using what will be purely datatype-driven estimates from the targetlist.
4798  * There is no way to do anything sane with the rows value, so we just put
4799  * a default estimate and hope that the wrapper can improve on it. The
4800  * wrapper's GetForeignRelSize function will be called momentarily.
4801  *
4802  * The rel's targetlist and restrictinfo list must have been constructed
4803  * already.
4804  */
4805 void
4807 {
4808  /* Should only be applied to base relations */
4809  Assert(rel->relid > 0);
4810 
4811  rel->rows = 1000; /* entirely bogus default estimate */
4812 
4814 
4815  set_rel_width(root, rel);
4816 }
4817 
4818 
4819 /*
4820  * set_rel_width
4821  * Set the estimated output width of a base relation.
4822  *
4823  * The estimated output width is the sum of the per-attribute width estimates
4824  * for the actually-referenced columns, plus any PHVs or other expressions
4825  * that have to be calculated at this relation. This is the amount of data
4826  * we'd need to pass upwards in case of a sort, hash, etc.
4827  *
4828  * This function also sets reltarget->cost, so it's a bit misnamed now.
4829  *
4830  * NB: this works best on plain relations because it prefers to look at
4831  * real Vars. For subqueries, set_subquery_size_estimates will already have
4832  * copied up whatever per-column estimates were made within the subquery,
4833  * and for other types of rels there isn't much we can do anyway. We fall
4834  * back on (fairly stupid) datatype-based width estimates if we can't get
4835  * any better number.
4836  *
4837  * The per-attribute width estimates are cached for possible re-use while
4838  * building join relations or post-scan/join pathtargets.
4839  */
4840 static void
4842 {
4843  Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
4844  int32 tuple_width = 0;
4845  bool have_wholerow_var = false;
4846  ListCell *lc;
4847 
4848  /* Vars are assumed to have cost zero, but other exprs do not */
4849  rel->reltarget->cost.startup = 0;
4850  rel->reltarget->cost.per_tuple = 0;
4851 
4852  foreach(lc, rel->reltarget->exprs)
4853  {
4854  Node *node = (Node *) lfirst(lc);
4855 
4856  /*
4857  * Ordinarily, a Var in a rel's targetlist must belong to that rel;
4858  * but there are corner cases involving LATERAL references where that
4859  * isn't so. If the Var has the wrong varno, fall through to the
4860  * generic case (it doesn't seem worth the trouble to be any smarter).
4861  */
4862  if (IsA(node, Var) &&
4863  ((Var *) node)->varno == rel->relid)
4864  {
4865  Var *var = (Var *) node;
4866  int ndx;
4867  int32 item_width;
4868 
4869  Assert(var->varattno >= rel->min_attr);
4870  Assert(var->varattno <= rel->max_attr);
4871 
4872  ndx = var->varattno - rel->min_attr;
4873 
4874  /*
4875  * If it's a whole-row Var, we'll deal with it below after we have
4876  * already cached as many attr widths as possible.
4877  */
4878  if (var->varattno == 0)
4879  {
4880  have_wholerow_var = true;
4881  continue;
4882  }
4883 
4884  /*
4885  * The width may have been cached already (especially if it's a
4886  * subquery), so don't duplicate effort.
4887  */
4888  if (rel->attr_widths[ndx] > 0)
4889  {
4890  tuple_width += rel->attr_widths[ndx];
4891  continue;
4892  }
4893 
4894  /* Try to get column width from statistics */
4895  if (reloid != InvalidOid && var->varattno > 0)
4896  {
4897  item_width = get_attavgwidth(reloid, var->varattno);
4898  if (item_width > 0)
4899  {
4900  rel->attr_widths[ndx] = item_width;
4901  tuple_width += item_width;
4902  continue;
4903  }
4904  }
4905 
4906  /*
4907  * Not a plain relation, or can't find statistics for it. Estimate
4908  * using just the type info.
4909  */
4910  item_width = get_typavgwidth(var->vartype, var->vartypmod);
4911  Assert(item_width > 0);
4912  rel->attr_widths[ndx] = item_width;
4913  tuple_width += item_width;
4914  }
4915  else if (IsA(node, PlaceHolderVar))
4916  {
4917  /*
4918  * We will need to evaluate the PHV's contained expression while
4919  * scanning this rel, so be sure to include it in reltarget->cost.
4920  */
4921  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4922  PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4923  QualCost cost;
4924 
4925  tuple_width += phinfo->ph_width;
4926  cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
4927  rel->reltarget->cost.startup += cost.startup;
4928  rel->reltarget->cost.per_tuple += cost.per_tuple;
4929  }
4930  else
4931  {
4932  /*
4933  * We could be looking at an expression pulled up from a subquery,
4934  * or a ROW() representing a whole-row child Var, etc. Do what we
4935  * can using the expression type information.
4936  */
4937  int32 item_width;
4938  QualCost cost;
4939 
4940  item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4941  Assert(item_width > 0);
4942  tuple_width += item_width;
4943  /* Not entirely clear if we need to account for cost, but do so */
4944  cost_qual_eval_node(&cost, node, root);
4945  rel->reltarget->cost.startup += cost.startup;
4946  rel->reltarget->cost.per_tuple += cost.per_tuple;
4947  }
4948  }
4949 
4950  /*
4951  * If we have a whole-row reference, estimate its width as the sum of
4952  * per-column widths plus heap tuple header overhead.
4953  */
4954  if (have_wholerow_var)
4955  {
4956  int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4957 
4958  if (reloid != InvalidOid)
4959  {
4960  /* Real relation, so estimate true tuple width */
4961  wholerow_width += get_relation_data_width(reloid,
4962  rel->attr_widths - rel->min_attr);
4963  }
4964  else
4965  {
4966  /* Do what we can with info for a phony rel */
4967  AttrNumber i;
4968 
4969  for (i = 1; i <= rel->max_attr; i++)
4970  wholerow_width += rel->attr_widths[i - rel->min_attr];
4971  }
4972 
4973  rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4974 
4975  /*
4976  * Include the whole-row Var as part of the output tuple. Yes, that
4977  * really is what happens at runtime.
4978  */
4979  tuple_width += wholerow_width;
4980  }
4981 
4982  Assert(tuple_width >= 0);
4983  rel->reltarget->width = tuple_width;
4984 }
4985 
4986 /*
4987  * set_pathtarget_cost_width
4988  * Set the estimated eval cost and output width of a PathTarget tlist.
4989  *
4990  * As a notational convenience, returns the same PathTarget pointer passed in.
4991  *
4992  * Most, though not quite all, uses of this function occur after we've run
4993  * set_rel_width() for base relations; so we can usually obtain cached width
4994  * estimates for Vars. If we can't, fall back on datatype-based width
4995  * estimates. Present early-planning uses of PathTargets don't need accurate
4996  * widths badly enough to justify going to the catalogs for better data.
4997  */
4998 PathTarget *
5000 {
5001  int32 tuple_width = 0;
5002  ListCell *lc;
5003 
5004  /* Vars are assumed to have cost zero, but other exprs do not */
5005  target->cost.startup = 0;
5006  target->cost.per_tuple = 0;
5007 
5008  foreach(lc, target->exprs)
5009  {
5010  Node *node = (Node *) lfirst(lc);
5011 
5012  if (IsA(node, Var))
5013  {
5014  Var *var = (Var *) node;
5015  int32 item_width;
5016 
5017  /* We should not see any upper-level Vars here */
5018  Assert(var->varlevelsup == 0);
5019 
5020  /* Try to get data from RelOptInfo cache */
5021  if (var->varno < root->simple_rel_array_size)
5022  {
5023  RelOptInfo *rel = root->simple_rel_array[var->varno];
5024 
5025  if (rel != NULL &&
5026  var->varattno >= rel->min_attr &&
5027  var->varattno <= rel->max_attr)
5028  {
5029  int ndx = var->varattno - rel->min_attr;
5030 
5031  if (rel->attr_widths[ndx] > 0)
5032  {
5033  tuple_width += rel->attr_widths[ndx];
5034  continue;
5035  }
5036  }
5037  }
5038 
5039  /*
5040  * No cached data available, so estimate using just the type info.
5041  */
5042  item_width = get_typavgwidth(var->vartype, var->vartypmod);
5043  Assert(item_width > 0);
5044  tuple_width += item_width;
5045  }
5046  else
5047  {
5048  /*
5049  * Handle general expressions using type info.
5050  */
5051  int32 item_width;
5052  QualCost cost;
5053 
5054  item_width = get_typavgwidth(exprType(node), exprTypmod(node));
5055  Assert(item_width > 0);
5056  tuple_width += item_width;
5057 
5058  /* Account for cost, too */
5059  cost_qual_eval_node(&cost, node, root);
5060  target->cost.startup += cost.startup;
5061  target->cost.per_tuple += cost.per_tuple;
5062  }
5063  }
5064 
5065  Assert(tuple_width >= 0);
5066  target->width = tuple_width;
5067 
5068  return target;
5069 }
5070 
5071 /*
5072  * relation_byte_size
5073  * Estimate the storage space in bytes for a given number of tuples
5074  * of a given width (size in bytes).
5075  */
5076 static double
5077 relation_byte_size(double tuples, int width)
5078 {
5079  return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
5080 }
5081 
5082 /*
5083  * page_size
5084  * Returns an estimate of the number of pages covered by a given
5085  * number of tuples of a given width (size in bytes).
5086  */
5087 static double
5088 page_size(double tuples, int width)
5089 {
5090  return ceil(relation_byte_size(tuples, width) / BLCKSZ);
5091 }
5092 
5093 /*
5094  * Estimate the fraction of the work that each worker will do given the
5095  * number of workers budgeted for the path.
5096  */
5097 static double
5099 {
5100  double parallel_divisor = path->parallel_workers;
5101  double leader_contribution;
5102 
5103  /*
5104  * Early experience with parallel query suggests that when there is only
5105  * one worker, the leader often makes a very substantial contribution to
5106  * executing the parallel portion of the plan, but as more workers are
5107  * added, it does less and less, because it's busy reading tuples from the
5108  * workers and doing whatever non-parallel post-processing is needed. By
5109  * the time we reach 4 workers, the leader no longer makes a meaningful
5110  * contribution. Thus, for now, estimate that the leader spends 30% of
5111  * its time servicing each worker, and the remainder executing the
5112  * parallel plan.
5113  */
5114  leader_contribution = 1.0 - (0.3 * path->parallel_workers);
5115  if (leader_contribution > 0)
5116  parallel_divisor += leader_contribution;
5117 
5118  return parallel_divisor;
5119 }
5120 
5121 /*
5122  * compute_bitmap_pages
5123  *
5124  * compute number of pages fetched from heap in bitmap heap scan.
5125  */
5126 double
5127 compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
5128  int loop_count, Cost *cost, double *tuple)
5129 {
5130  Cost indexTotalCost;
5131  Selectivity indexSelectivity;
5132  double T;
5133  double pages_fetched;
5134  double tuples_fetched;
5135 
5136  /*
5137  * Fetch total cost of obtaining the bitmap, as well as its total
5138  * selectivity.
5139  */
5140  cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
5141 
5142  /*
5143  * Estimate number of main-table pages fetched.
5144  */
5145  tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
5146 
5147  T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
5148 
5149  if (loop_count > 1)
5150  {
5151  /*
5152  * For repeated bitmap scans, scale up the number of tuples fetched in
5153  * the Mackert and Lohman formula by the number of scans, so that we
5154  * estimate the number of pages fetched by all the scans. Then
5155  * pro-rate for one scan.
5156  */
5157  pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
5158  baserel->pages,
5159  get_indexpath_pages(bitmapqual),
5160  root);
5161  pages_fetched /= loop_count;
5162  }
5163  else
5164  {
5165  /*
5166  * For a single scan, the number of heap pages that need to be fetched
5167  * is the same as the Mackert and Lohman formula for the case T <= b
5168  * (ie, no re-reads needed).
5169  */
5170  pages_fetched =
5171  (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
5172  }
5173 
5174  if (pages_fetched >= T)
5175  pages_fetched = T;
5176  else
5177  pages_fetched = ceil(pages_fetched);
5178 
5179  if (cost)
5180  *cost = indexTotalCost;
5181  if (tuple)
5182  *tuple = tuples_fetched;
5183 
5184  return pages_fetched;
5185 }
QualCost eval_cost
Definition: relation.h:1784
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4549
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2040
static void cost_rescan(PlannerInfo *root, Path *path, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition: costsize.c:3356
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3489
#define NIL
Definition: pg_list.h:69
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3012
bool semi_can_btree
Definition: relation.h:1927
Selectivity leftendsel
Definition: relation.h:1830
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
List * path_mergeclauses
Definition: relation.h:1352
double plan_rows
Definition: plannodes.h:131
#define IsA(nodeptr, _type_)
Definition: nodes.h:560
JoinPath jpath
Definition: relation.h:1370
PlannerInfo * root
Definition: costsize.c:133
PathTarget * pathtarget
Definition: relation.h:955
Query * parse
Definition: relation.h:155
bool enable_tidscan
Definition: costsize.c:122
bool ExecSupportsMarkRestore(Path *pathnode)
Definition: execAmi.c:405
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:928
Index varlevelsup
Definition: primnodes.h:173
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2632
Path path
Definition: relation.h:1030
bool is_redundant_derived_clause(RestrictInfo *rinfo, List *clauselist)
Definition: equivclass.c:2456
IndexOptInfo * indexinfo
Definition: relation.h:1031
SemiAntiJoinFactors semifactors
Definition: relation.h:2186
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1164
static int32 next
Definition: blutils.c:210
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, int numPartCols, int numOrderCols, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:1970
bool enable_nestloop
Definition: costsize.c:125
int num_batches
Definition: relation.h:1372
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:4999
List * args
Definition: primnodes.h:359
Relids min_righthand
Definition: relation.h:1920
bool materialize_inner
Definition: relation.h:1356
#define DEFAULT_CPU_TUPLE_COST
Definition: cost.h:26
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:387
void mergejoinscansel(PlannerInfo *root, Node *clause, Oid opfamily, int strategy, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
Definition: selfuncs.c:2927
Selectivity right_mcvfreq
Definition: relation.h:1811
double expression_returns_set_rows(Node *clause)
Definition: clauses.c:802
Expr * orclause
Definition: relation.h:1778
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:276
Selectivity outer_match_frac
Definition: relation.h:2163
Path * innerjoinpath
Definition: relation.h:1297
static MergeScanSelCache * cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
Definition: costsize.c:2843
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition: costsize.c:3981
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:4117
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:4730
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1524
double tuples
Definition: relation.h:565
Oid reltablespace
Definition: relation.h:554
List * baserestrictinfo
Definition: relation.h:585
Oid resulttype
Definition: primnodes.h:812
NodeTag type
Definition: relation.h:1918
#define DEFAULT_PARALLEL_SETUP_COST
Definition: cost.h:30
#define Min(x, y)
Definition: c.h:795
int parallel_workers
Definition: relation.h:961
bool pseudoconstant
Definition: relation.h:1755
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1072
int effective_cache_size
Definition: costsize.c:112
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
SubLinkType subLinkType
Definition: primnodes.h:684
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:722
ParamPathInfo * param_info
Definition: relation.h:957
void initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, JoinPathExtraData *extra)
Definition: costsize.c:2920
List * list_copy(const List *oldlist)
Definition: list.c:1160
Definition: nodes.h:509
#define MemSet(start, val, len)
Definition: c.h:846
Relids left_relids
Definition: relation.h:1774
AttrNumber varattno
Definition: primnodes.h:168
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1433
static double page_size(double tuples, int width)
Definition: costsize.c:5088
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1483
List * list_concat(List *list1, List *list2)
Definition: list.c:321
double parallel_setup_cost
Definition: costsize.c:110
return result
Definition: formatting.c:1633
uint32 BlockNumber
Definition: block.h:31
static List * extract_nonindex_conditions(List *qual_clauses, List *indexquals)
Definition: costsize.c:753
Definition: nodes.h:75
double Selectivity
Definition: nodes.h:639
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition: nodeHash.c:401
QualCost transCost
Definition: relation.h:62
float4 get_func_cost(Oid funcid)
Definition: lsyscache.c:1641
Selectivity bitmapselectivity
Definition: relation.h:1075
unsigned int Oid
Definition: postgres_ext.h:31
Definition: primnodes.h:163
bool enable_seqscan
Definition: costsize.c:118
List * fkey_list
Definition: relation.h:260
#define DEFAULT_EFFECTIVE_CACHE_SIZE
Definition: cost.h:32
#define OidIsValid(objectId)
Definition: c.h:532
#define DEFAULT_CPU_OPERATOR_COST
Definition: cost.h:28
void initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *mergeclauses, Path *outer_path, Path *inner_path, List *outersortkeys, List *innersortkeys, JoinPathExtraData *extra)
Definition: costsize.c:2369
Cost inner_rescan_run_cost
Definition: relation.h:2213
List * values_lists
Definition: parsenodes.h:1010
#define lsecond(l)
Definition: pg_list.h:116
Relids syn_lefthand
Definition: relation.h:1921
int pk_strategy
Definition: relation.h:853
Cost startup
Definition: relation.h:45
double allvisfrac
Definition: relation.h:566
signed int int32
Definition: c.h:246
List * bitmapquals
Definition: relation.h:1074
JoinType
Definition: nodes.h:673
List * targetList
Definition: parsenodes.h:138
struct RelOptInfo ** simple_rel_array
Definition: relation.h:179
List * bitmapquals
Definition: relation.h:1087
Definition: type.h:89
BlockNumber pages
Definition: relation.h:636
NodeTag pathtype
Definition: relation.h:952
Relids syn_righthand
Definition: relation.h:1922
PlannerInfo * subroot
Definition: relation.h:567
bool enable_sort
Definition: costsize.c:123
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2162
double random_page_cost
Definition: costsize.c:105
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:197
static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
Definition: costsize.c:4339
Cost per_tuple
Definition: relation.h:46
List * indexquals
Definition: relation.h:1033
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2158
bool skip_mark_restore
Definition: relation.h:1355
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4841
RelOptInfo * rel
Definition: relation.h:633
bool resjunk
Definition: primnodes.h:1375
#define linitial(l)
Definition: pg_list.h:111
#define planner_rt_fetch(rti, root)
Definition: relation.h:325
List * make_ands_implicit(Expr *clause)
Definition: clauses.c:378
bool pk_nulls_first
Definition: relation.h:854
#define ERROR
Definition: elog.h:43
Expr * phexpr
Definition: relation.h:1852
TableFunc * tablefunc
Definition: parsenodes.h:1005
Oid vartype
Definition: primnodes.h:170
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3463
Node * get_leftop(const Expr *clause)
Definition: clauses.c:199
Cost indextotalcost
Definition: relation.h:1038
Cost startup_cost
Definition: relation.h:965
Cost disable_cost
Definition: costsize.c:114
List * semi_rhs_exprs
Definition: relation.h:1930
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1267
#define DEFAULT_RANDOM_PAGE_COST
Definition: cost.h:25
bool semi_can_hash
Definition: relation.h:1928
List * joinrestrictinfo
Definition: relation.h:1299
EquivalenceClass * parent_ec
Definition: relation.h:1781
RelOptInfo * parent
Definition: relation.h:954
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:308
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:919
#define lfirst_node(type, lc)
Definition: pg_list.h:109
static const uint32 T[65]
Definition: md5.c:101
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:4148
struct Path * cheapest_total_path
Definition: relation.h:543
Selectivity rightstartsel
Definition: relation.h:1831
Selectivity indexselectivity
Definition: relation.h:1039
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:574
Cost startup_cost
Definition: plannodes.h:125
PlaceHolderInfo * find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv, bool create_new_ph)
Definition: placeholder.c:69
AttrNumber resno
Definition: primnodes.h:1369
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:1873
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
static double get_parallel_divisor(Path *path)
Definition: costsize.c:5098
Relids relids
Definition: relation.h:525
double cpu_operator_cost
Definition: costsize.c:108
Path * subpath
Definition: relation.h:1265
Oid winfnoid
Definition: primnodes.h:355
Expr * arg
Definition: primnodes.h:811
double total_table_pages
Definition: relation.h:289
Selectivity bitmapselectivity
Definition: relation.h:1088
NextSampleBlock_function NextSampleBlock
Definition: tsmapi.h:72
int simple_rel_array_size
Definition: relation.h:180
double rint(double x)
Definition: rint.c:22
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:274
#define lnext(lc)
Definition: pg_list.h:105
void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
Definition: lsyscache.c:2599
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:510
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:182
Index relid
Definition: relation.h:553
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1110
List * lappend(List *list, void *datum)
Definition: list.c:128
bool enable_bitmapscan
Definition: costsize.c:121
Expr * clause
Definition: relation.h:1747
struct EquivalenceClass * eclass[INDEX_MAX_KEYS]
Definition: relation.h:706
Index varno
Definition: primnodes.h:166
static double relation_byte_size(double tuples, int width)
Definition: costsize.c:5077
List * exprs
Definition: relation.h:884
List * indrestrictinfo
Definition: relation.h:658
List * list_delete_cell(List *list, ListCell *cell, ListCell *prev)
Definition: list.c:528
Path * outerjoinpath
Definition: relation.h:1296
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4767
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:462
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:634
bool delay_upper_joins
Definition: relation.h:1925
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1564
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages)
Definition: allpaths.c:3103
bool self_reference
Definition: parsenodes.h:1017
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4037
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:4067
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1644
Node * testexpr
Definition: primnodes.h:686
int work_mem
Definition: globals.c:113
RTEKind rtekind
Definition: relation.h:555
Cost per_call_cost
Definition: primnodes.h:713
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2328
double rows
Definition: relation.h:528
#define InvalidOid
Definition: postgres_ext.h:36
Cost finalCost
Definition: relation.h:63
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1094
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition: costsize.c:3503
bool is_pushed_down
Definition: relation.h:1749
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:465
Cost total_cost
Definition: relation.h:966
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:1819
Selectivity left_bucketsize
Definition: relation.h:1808
double outer_skip_rows
Definition: relation.h:2218
List * pathkeys
Definition: relation.h:968
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition: costsize.c:3739
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
#define Max(x, y)
Definition: c.h:789
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1377
bool enable_mergejoin
Definition: costsize.c:127
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:1768
Relids right_relids
Definition: relation.h:1775
#define LOG2(x)
Definition: costsize.c:101
BlockNumber pages
Definition: relation.h:564
Path path
Definition: relation.h:1289
#define Assert(condition)
Definition: c.h:664
#define lfirst(lc)
Definition: pg_list.h:106
static bool has_indexed_join_quals(NestPath *joinpath)
Definition: costsize.c:3888
List * functions
Definition: parsenodes.h:999
Expr * aggfilter
Definition: primnodes.h:360
void compute_semi_anti_join_factors(PlannerInfo *root, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, SpecialJoinInfo *sjinfo, List *restrictlist, SemiAntiJoinFactors *semifactors)
Definition: costsize.c:3780
double rows
Definition: relation.h:964
Expr * expr
Definition: primnodes.h:1368
void cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
Definition: costsize.c:3263
static double get_indexpath_pages(Path *bitmapqual)
Definition: costsize.c:878
JoinType jointype
Definition: relation.h:1923
EquivalenceClass * pk_eclass
Definition: relation.h:851
Selectivity left_mcvfreq
Definition: relation.h:1810
List * ppi_clauses
Definition: relation.h:915
QualCost cost
Definition: relation.h:886
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
bool expression_tree_walker(Node *node, bool(*walker)(), void *context)
Definition: nodeFuncs.c:1843
static int list_length(const List *l)
Definition: pg_list.h:89
int tuplesort_merge_order(int64 allowedMem)
Definition: tuplesort.c:2305
#define MAXALIGN(LEN)
Definition: c.h:576
List * parParam
Definition: primnodes.h:709
#define DEFAULT_CPU_INDEX_TUPLE_COST
Definition: cost.h:27
List * innersortkeys
Definition: relation.h:1354
double cpu_tuple_cost
Definition: costsize.c:106
Oid pk_opfamily
Definition: relation.h:852
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:349
#define nodeTag(nodeptr)
Definition: nodes.h:514
Node * get_rightop(const Expr *clause)
Definition: clauses.c:216
double ppi_rows
Definition: relation.h:914
bool enable_hashjoin
Definition: costsize.c:128
RTEKind rtekind
Definition: parsenodes.h:945
bool enable_hashagg
Definition: costsize.c:124
Node * setOperations
Definition: parsenodes.h:163
int width
Definition: relation.h:887