PostgreSQL Source Code git master
Loading...
Searching...
No Matches
costsize.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * costsize.c
4 * Routines to compute (and set) relation sizes and path costs
5 *
6 * Path costs are measured in arbitrary units established by these basic
7 * parameters:
8 *
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
14 * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 * parallel_setup_cost Cost of setting up shared memory for parallelism
16 *
17 * We expect that the kernel will typically do some amount of read-ahead
18 * optimization; this in conjunction with seek costs means that seq_page_cost
19 * is normally considerably less than random_page_cost. (However, if the
20 * database is fully cached in RAM, it is reasonable to set them equal.)
21 *
22 * We also use a rough estimate "effective_cache_size" of the number of
23 * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 * NBuffers for this purpose because that would ignore the effects of
25 * the kernel's disk cache.)
26 *
27 * Obviously, taking constants for these values is an oversimplification,
28 * but it's tough enough to get any useful estimates even at this level of
29 * detail. Note that all of these parameters are user-settable, in case
30 * the default values are drastically off for a particular platform.
31 *
32 * seq_page_cost and random_page_cost can also be overridden for an individual
33 * tablespace, in case some data is on a fast disk and other data is on a slow
34 * disk. Per-tablespace overrides never apply to temporary work files such as
35 * an external sort or a materialize node that overflows work_mem.
36 *
37 * We compute two separate costs for each path:
38 * total_cost: total estimated cost to fetch all tuples
39 * startup_cost: cost that is expended before first tuple is fetched
40 * In some scenarios, such as when there is a LIMIT or we are implementing
41 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 * path's result. A caller can estimate the cost of fetching a partial
43 * result by interpolating between startup_cost and total_cost. In detail:
44 * actual_cost = startup_cost +
45 * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 * Note that a base relation's rows count (and, by extension, plan_rows for
47 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 * that this equation works properly. (Note: while path->rows is never zero
49 * for ordinary relations, it is zero for paths for provably-empty relations,
50 * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 * plan node.
52 *
53 * Each path stores the total number of disabled nodes that exist at or
54 * below that point in the plan tree. This is regarded as a component of
55 * the cost, and paths with fewer disabled nodes should be regarded as
56 * cheaper than those with more. Disabled nodes occur when the user sets
57 * a GUC like enable_seqscan=false. We can't necessarily respect such a
58 * setting in every part of the plan tree, but we want to respect in as many
59 * parts of the plan tree as possible. Simpler schemes like storing a Boolean
60 * here rather than a count fail to do that. We used to disable nodes by
61 * adding a large constant to the startup cost, but that distorted planning
62 * in other ways.
63 *
64 * For largely historical reasons, most of the routines in this module use
65 * the passed result Path only to store their results (rows, startup_cost and
66 * total_cost) into. All the input data they need is passed as separate
67 * parameters, even though much of it could be extracted from the Path.
68 * An exception is made for the cost_XXXjoin() routines, which expect all
69 * the other fields of the passed XXXPath to be filled in, and similarly
70 * cost_index() assumes the passed IndexPath is valid except for its output
71 * values.
72 *
73 *
74 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
75 * Portions Copyright (c) 1994, Regents of the University of California
76 *
77 * IDENTIFICATION
78 * src/backend/optimizer/path/costsize.c
79 *
80 *-------------------------------------------------------------------------
81 */
82
83#include "postgres.h"
84
85#include <limits.h>
86#include <math.h>
87
88#include "access/amapi.h"
89#include "access/htup_details.h"
90#include "access/tsmapi.h"
91#include "executor/executor.h"
92#include "executor/nodeAgg.h"
93#include "executor/nodeHash.h"
95#include "miscadmin.h"
96#include "nodes/makefuncs.h"
97#include "nodes/nodeFuncs.h"
98#include "optimizer/clauses.h"
99#include "optimizer/cost.h"
100#include "optimizer/optimizer.h"
101#include "optimizer/pathnode.h"
102#include "optimizer/paths.h"
104#include "optimizer/plancat.h"
106#include "parser/parsetree.h"
107#include "utils/lsyscache.h"
108#include "utils/selfuncs.h"
109#include "utils/spccache.h"
110#include "utils/tuplesort.h"
111
112
113#define LOG2(x) (log(x) / 0.693147180559945)
114
115/*
116 * Append and MergeAppend nodes are less expensive than some other operations
117 * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
118 * per-tuple cost as cpu_tuple_cost multiplied by this value.
119 */
120#define APPEND_CPU_COST_MULTIPLIER 0.5
121
122/*
123 * Maximum value for row estimates. We cap row estimates to this to help
124 * ensure that costs based on these estimates remain within the range of what
125 * double can represent. add_path() wouldn't act sanely given infinite or NaN
126 * cost values.
127 */
128#define MAXIMUM_ROWCOUNT 1e100
129
138
140
142
144
145bool enable_seqscan = true;
149bool enable_tidscan = true;
150bool enable_sort = true;
152bool enable_hashagg = true;
153bool enable_nestloop = true;
154bool enable_material = true;
155bool enable_memoize = true;
157bool enable_hashjoin = true;
166
172
173static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
175 RestrictInfo *rinfo,
177static void cost_rescan(PlannerInfo *root, Path *path,
179static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
183static bool has_indexed_join_quals(NestPath *path);
184static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
185 List *quals);
187 RelOptInfo *joinrel,
190 double outer_rows,
191 double inner_rows,
192 SpecialJoinInfo *sjinfo,
193 List *restrictlist);
195 Relids outer_relids,
197 SpecialJoinInfo *sjinfo,
198 List **restrictlist);
199static Cost append_nonpartial_cost(List *subpaths, int numpaths,
200 int parallel_workers);
201static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
202static int32 get_expr_width(PlannerInfo *root, const Node *expr);
203static double relation_byte_size(double tuples, int width);
204static double page_size(double tuples, int width);
205static double get_parallel_divisor(Path *path);
206
207
208/*
209 * clamp_row_est
210 * Force a row-count estimate to a sane value.
211 */
212double
213clamp_row_est(double nrows)
214{
215 /*
216 * Avoid infinite and NaN row estimates. Costs derived from such values
217 * are going to be useless. Also force the estimate to be at least one
218 * row, to make explain output look better and to avoid possible
219 * divide-by-zero when interpolating costs. Make it an integer, too.
220 */
221 if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
222 nrows = MAXIMUM_ROWCOUNT;
223 else if (nrows <= 1.0)
224 nrows = 1.0;
225 else
226 nrows = rint(nrows);
227
228 return nrows;
229}
230
231/*
232 * clamp_width_est
233 * Force a tuple-width estimate to a sane value.
234 *
235 * The planner represents datatype width and tuple width estimates as int32.
236 * When summing column width estimates to create a tuple width estimate,
237 * it's possible to reach integer overflow in edge cases. To ensure sane
238 * behavior, we form such sums in int64 arithmetic and then apply this routine
239 * to clamp to int32 range.
240 */
241int32
243{
244 /*
245 * Anything more than MaxAllocSize is clearly bogus, since we could not
246 * create a tuple that large.
247 */
249 return (int32) MaxAllocSize;
250
251 /*
252 * Unlike clamp_row_est, we just Assert that the value isn't negative,
253 * rather than masking such errors.
254 */
255 Assert(tuple_width >= 0);
256
257 return (int32) tuple_width;
258}
259
260
261/*
262 * cost_seqscan
263 * Determines and returns the cost of scanning a relation sequentially.
264 *
265 * 'baserel' is the relation to be scanned
266 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
267 */
268void
271{
272 Cost startup_cost = 0;
275 double spc_seq_page_cost;
279
280 /* Should only be applied to base relations */
281 Assert(baserel->relid > 0);
282 Assert(baserel->rtekind == RTE_RELATION);
283
284 /* Mark the path with the correct row estimate */
285 if (param_info)
286 path->rows = param_info->ppi_rows;
287 else
288 path->rows = baserel->rows;
289
290 /* fetch estimated page cost for tablespace containing table */
291 get_tablespace_page_costs(baserel->reltablespace,
292 NULL,
294
295 /*
296 * disk costs
297 */
299
300 /* CPU costs */
302
303 startup_cost += qpqual_cost.startup;
306 /* tlist eval costs are paid per output row, not per tuple scanned */
307 startup_cost += path->pathtarget->cost.startup;
308 cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
309
310 /* Adjust costing for parallelism, if used. */
311 if (path->parallel_workers > 0)
312 {
314
315 /* The CPU cost is divided among all the workers. */
317
318 /*
319 * It may be possible to amortize some of the I/O cost, but probably
320 * not very much, because most operating systems already do aggressive
321 * prefetching. For now, we assume that the disk run cost can't be
322 * amortized at all.
323 */
324
325 /*
326 * In the case of a parallel plan, the row count needs to represent
327 * the number of tuples processed per worker.
328 */
329 path->rows = clamp_row_est(path->rows / parallel_divisor);
330 }
331 else
333
334 path->disabled_nodes =
335 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
336 path->startup_cost = startup_cost;
337 path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
338}
339
340/*
341 * cost_samplescan
342 * Determines and returns the cost of scanning a relation using sampling.
343 *
344 * 'baserel' is the relation to be scanned
345 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
346 */
347void
350{
351 Cost startup_cost = 0;
352 Cost run_cost = 0;
356 double spc_seq_page_cost,
357 spc_random_page_cost,
362
363 /* Should only be applied to base relations with tablesample clauses */
364 Assert(baserel->relid > 0);
365 rte = planner_rt_fetch(baserel->relid, root);
366 Assert(rte->rtekind == RTE_RELATION);
367 tsc = rte->tablesample;
368 Assert(tsc != NULL);
369 tsm = GetTsmRoutine(tsc->tsmhandler);
370
371 /* Mark the path with the correct row estimate */
372 if (param_info)
373 path->rows = param_info->ppi_rows;
374 else
375 path->rows = baserel->rows;
376
377 /* fetch estimated page cost for tablespace containing table */
378 get_tablespace_page_costs(baserel->reltablespace,
379 &spc_random_page_cost,
381
382 /* if NextSampleBlock is used, assume random access, else sequential */
383 spc_page_cost = (tsm->NextSampleBlock != NULL) ?
384 spc_random_page_cost : spc_seq_page_cost;
385
386 /*
387 * disk costs (recall that baserel->pages has already been set to the
388 * number of pages the sampling method will visit)
389 */
390 run_cost += spc_page_cost * baserel->pages;
391
392 /*
393 * CPU costs (recall that baserel->tuples has already been set to the
394 * number of tuples the sampling method will select). Note that we ignore
395 * execution cost of the TABLESAMPLE parameter expressions; they will be
396 * evaluated only once per scan, and in most usages they'll likely be
397 * simple constants anyway. We also don't charge anything for the
398 * calculations the sampling method might do internally.
399 */
401
402 startup_cost += qpqual_cost.startup;
404 run_cost += cpu_per_tuple * baserel->tuples;
405 /* tlist eval costs are paid per output row, not per tuple scanned */
406 startup_cost += path->pathtarget->cost.startup;
407 run_cost += path->pathtarget->cost.per_tuple * path->rows;
408
409 if (path->parallel_workers == 0)
411
412 path->disabled_nodes =
413 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
414 path->startup_cost = startup_cost;
415 path->total_cost = startup_cost + run_cost;
416}
417
418/*
419 * cost_gather
420 * Determines and returns the cost of gather path.
421 *
422 * 'rel' is the relation to be operated upon
423 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
424 * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
425 * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
426 * correspond to any particular RelOptInfo.
427 */
428void
431 double *rows)
432{
433 Cost startup_cost = 0;
434 Cost run_cost = 0;
435
436 /* Mark the path with the correct row estimate */
437 if (rows)
438 path->path.rows = *rows;
439 else if (param_info)
440 path->path.rows = param_info->ppi_rows;
441 else
442 path->path.rows = rel->rows;
443
444 startup_cost = path->subpath->startup_cost;
445
446 run_cost = path->subpath->total_cost - path->subpath->startup_cost;
447
448 /* Parallel setup and communication cost. */
449 startup_cost += parallel_setup_cost;
450 run_cost += parallel_tuple_cost * path->path.rows;
451
453 + ((rel->pgs_mask & PGS_GATHER) != 0 ? 0 : 1);
454 path->path.startup_cost = startup_cost;
455 path->path.total_cost = (startup_cost + run_cost);
456}
457
458/*
459 * cost_gather_merge
460 * Determines and returns the cost of gather merge path.
461 *
462 * GatherMerge merges several pre-sorted input streams, using a heap that at
463 * any given instant holds the next tuple from each stream. If there are N
464 * streams, we need about N*log2(N) tuple comparisons to construct the heap at
465 * startup, and then for each output tuple, about log2(N) comparisons to
466 * replace the top heap entry with the next tuple from the same stream.
467 */
468void
473 double *rows)
474{
475 Cost startup_cost = 0;
476 Cost run_cost = 0;
478 double N;
479 double logN;
480
481 /* Mark the path with the correct row estimate */
482 if (rows)
483 path->path.rows = *rows;
484 else if (param_info)
485 path->path.rows = param_info->ppi_rows;
486 else
487 path->path.rows = rel->rows;
488
489 /*
490 * Add one to the number of workers to account for the leader. This might
491 * be overgenerous since the leader will do less work than other workers
492 * in typical cases, but we'll go with it for now.
493 */
494 Assert(path->num_workers > 0);
495 N = (double) path->num_workers + 1;
496 logN = LOG2(N);
497
498 /* Assumed cost per tuple comparison */
500
501 /* Heap creation cost */
502 startup_cost += comparison_cost * N * logN;
503
504 /* Per-tuple heap maintenance cost */
505 run_cost += path->path.rows * comparison_cost * logN;
506
507 /* small cost for heap management, like cost_merge_append */
508 run_cost += cpu_operator_cost * path->path.rows;
509
510 /*
511 * Parallel setup and communication cost. Since Gather Merge, unlike
512 * Gather, requires us to block until a tuple is available from every
513 * worker, we bump the IPC cost up a little bit as compared with Gather.
514 * For lack of a better idea, charge an extra 5%.
515 */
516 startup_cost += parallel_setup_cost;
517 run_cost += parallel_tuple_cost * path->path.rows * 1.05;
518
520 + ((rel->pgs_mask & PGS_GATHER_MERGE) != 0 ? 0 : 1);
521 path->path.startup_cost = startup_cost + input_startup_cost;
522 path->path.total_cost = (startup_cost + run_cost + input_total_cost);
523}
524
525/*
526 * cost_index
527 * Determines and returns the cost of scanning a relation using an index.
528 *
529 * 'path' describes the indexscan under consideration, and is complete
530 * except for the fields to be set by this routine
531 * 'loop_count' is the number of repetitions of the indexscan to factor into
532 * estimates of caching behavior
533 *
534 * In addition to rows, startup_cost and total_cost, cost_index() sets the
535 * path's indextotalcost and indexselectivity fields. These values will be
536 * needed if the IndexPath is used in a BitmapIndexScan.
537 *
538 * NOTE: path->indexquals must contain only clauses usable as index
539 * restrictions. Any additional quals evaluated as qpquals may reduce the
540 * number of returned tuples, but they won't reduce the number of tuples
541 * we have to fetch from the table, so they don't reduce the scan cost.
542 */
543void
545 bool partial_path)
546{
548 RelOptInfo *baserel = index->rel;
549 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
550 amcostestimate_function amcostestimate;
551 List *qpquals;
552 Cost startup_cost = 0;
553 Cost run_cost = 0;
554 Cost cpu_run_cost = 0;
555 Cost indexStartupCost;
556 Cost indexTotalCost;
557 Selectivity indexSelectivity;
558 double indexCorrelation,
559 csquared;
560 double spc_seq_page_cost,
561 spc_random_page_cost;
566 double tuples_fetched;
567 double pages_fetched;
568 double rand_heap_pages;
569 double index_pages;
571
572 /* Should only be applied to base relations */
575 Assert(baserel->relid > 0);
576 Assert(baserel->rtekind == RTE_RELATION);
577
578 /*
579 * Mark the path with the correct row estimate, and identify which quals
580 * will need to be enforced as qpquals. We need not check any quals that
581 * are implied by the index's predicate, so we can use indrestrictinfo not
582 * baserestrictinfo as the list of relevant restriction clauses for the
583 * rel.
584 */
585 if (path->path.param_info)
586 {
587 path->path.rows = path->path.param_info->ppi_rows;
588 /* qpquals come from the rel's restriction clauses and ppi_clauses */
590 path->indexclauses),
591 extract_nonindex_conditions(path->path.param_info->ppi_clauses,
592 path->indexclauses));
593 }
594 else
595 {
596 path->path.rows = baserel->rows;
597 /* qpquals come from just the rel's restriction clauses */
599 path->indexclauses);
600 }
601
602 /* is this scan type disabled? */
605 path->path.disabled_nodes =
606 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
607
608 /*
609 * Call index-access-method-specific code to estimate the processing cost
610 * for scanning the index, as well as the selectivity of the index (ie,
611 * the fraction of main-table tuples we will have to retrieve) and its
612 * correlation to the main-table tuple order. We need a cast here because
613 * pathnodes.h uses a weak function type to avoid including amapi.h.
614 */
615 amcostestimate = (amcostestimate_function) index->amcostestimate;
616 amcostestimate(root, path, loop_count,
617 &indexStartupCost, &indexTotalCost,
618 &indexSelectivity, &indexCorrelation,
619 &index_pages);
620
621 /*
622 * Save amcostestimate's results for possible use in bitmap scan planning.
623 * We don't bother to save indexStartupCost or indexCorrelation, because a
624 * bitmap scan doesn't care about either.
625 */
626 path->indextotalcost = indexTotalCost;
627 path->indexselectivity = indexSelectivity;
628
629 /* all costs for touching index itself included here */
630 startup_cost += indexStartupCost;
631 run_cost += indexTotalCost - indexStartupCost;
632
633 /* estimate number of main-table tuples fetched */
634 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
635
636 /* fetch estimated page costs for tablespace containing table */
637 get_tablespace_page_costs(baserel->reltablespace,
638 &spc_random_page_cost,
640
641 /*----------
642 * Estimate number of main-table pages fetched, and compute I/O cost.
643 *
644 * When the index ordering is uncorrelated with the table ordering,
645 * we use an approximation proposed by Mackert and Lohman (see
646 * index_pages_fetched() for details) to compute the number of pages
647 * fetched, and then charge spc_random_page_cost per page fetched.
648 *
649 * When the index ordering is exactly correlated with the table ordering
650 * (just after a CLUSTER, for example), the number of pages fetched should
651 * be exactly selectivity * table_size. What's more, all but the first
652 * will be sequential fetches, not the random fetches that occur in the
653 * uncorrelated case. So if the number of pages is more than 1, we
654 * ought to charge
655 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
656 * For partially-correlated indexes, we ought to charge somewhere between
657 * these two estimates. We currently interpolate linearly between the
658 * estimates based on the correlation squared (XXX is that appropriate?).
659 *
660 * If it's an index-only scan, then we will not need to fetch any heap
661 * pages for which the visibility map shows all tuples are visible.
662 * Hence, reduce the estimated number of heap fetches accordingly.
663 * We use the measured fraction of the entire heap that is all-visible,
664 * which might not be particularly relevant to the subset of the heap
665 * that this query will fetch; but it's not clear how to do better.
666 *----------
667 */
668 if (loop_count > 1)
669 {
670 /*
671 * For repeated indexscans, the appropriate estimate for the
672 * uncorrelated case is to scale up the number of tuples fetched in
673 * the Mackert and Lohman formula by the number of scans, so that we
674 * estimate the number of pages fetched by all the scans; then
675 * pro-rate the costs for one scan. In this case we assume all the
676 * fetches are random accesses.
677 */
679 baserel->pages,
680 (double) index->pages,
681 root);
682
683 if (indexonly)
684 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
685
687
688 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
689
690 /*
691 * In the perfectly correlated case, the number of pages touched by
692 * each scan is selectivity * table_size, and we can use the Mackert
693 * and Lohman formula at the page level to estimate how much work is
694 * saved by caching across scans. We still assume all the fetches are
695 * random, though, which is an overestimate that's hard to correct for
696 * without double-counting the cache effects. (But in most cases
697 * where such a plan is actually interesting, only one page would get
698 * fetched per scan anyway, so it shouldn't matter much.)
699 */
700 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
701
703 baserel->pages,
704 (double) index->pages,
705 root);
706
707 if (indexonly)
708 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
709
710 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
711 }
712 else
713 {
714 /*
715 * Normal case: apply the Mackert and Lohman formula, and then
716 * interpolate between that and the correlation-derived result.
717 */
718 pages_fetched = index_pages_fetched(tuples_fetched,
719 baserel->pages,
720 (double) index->pages,
721 root);
722
723 if (indexonly)
724 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
725
727
728 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
729 max_IO_cost = pages_fetched * spc_random_page_cost;
730
731 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
732 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
733
734 if (indexonly)
735 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
736
737 if (pages_fetched > 0)
738 {
739 min_IO_cost = spc_random_page_cost;
740 if (pages_fetched > 1)
742 }
743 else
744 min_IO_cost = 0;
745 }
746
747 if (partial_path)
748 {
749 /*
750 * For index only scans compute workers based on number of index pages
751 * fetched; the number of heap pages we fetch might be so small as to
752 * effectively rule out parallelism, which we don't want to do.
753 */
754 if (indexonly)
755 rand_heap_pages = -1;
756
757 /*
758 * Estimate the number of parallel workers required to scan index. Use
759 * the number of heap pages computed considering heap fetches won't be
760 * sequential as for parallel scans the pages are accessed in random
761 * order.
762 */
767
768 /*
769 * Fall out if workers can't be assigned for parallel scan, because in
770 * such a case this path will be rejected. So there is no benefit in
771 * doing extra computation.
772 */
773 if (path->path.parallel_workers <= 0)
774 return;
775
776 path->path.parallel_aware = true;
777 }
778
779 /*
780 * Now interpolate based on estimated index order correlation to get total
781 * disk I/O cost for main table accesses.
782 */
783 csquared = indexCorrelation * indexCorrelation;
784
785 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
786
787 /*
788 * Estimate CPU costs per tuple.
789 *
790 * What we want here is cpu_tuple_cost plus the evaluation costs of any
791 * qual clauses that we have to evaluate as qpquals.
792 */
794
795 startup_cost += qpqual_cost.startup;
797
798 cpu_run_cost += cpu_per_tuple * tuples_fetched;
799
800 /* tlist eval costs are paid per output row, not per tuple scanned */
801 startup_cost += path->path.pathtarget->cost.startup;
802 cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
803
804 /* Adjust costing for parallelism, if used. */
805 if (path->path.parallel_workers > 0)
806 {
808
810
811 /* The CPU cost is divided among all the workers. */
813 }
814
815 run_cost += cpu_run_cost;
816
817 path->path.startup_cost = startup_cost;
818 path->path.total_cost = startup_cost + run_cost;
819}
820
821/*
822 * extract_nonindex_conditions
823 *
824 * Given a list of quals to be enforced in an indexscan, extract the ones that
825 * will have to be applied as qpquals (ie, the index machinery won't handle
826 * them). Here we detect only whether a qual clause is directly redundant
827 * with some indexclause. If the index path is chosen for use, createplan.c
828 * will try a bit harder to get rid of redundant qual conditions; specifically
829 * it will see if quals can be proven to be implied by the indexquals. But
830 * it does not seem worth the cycles to try to factor that in at this stage,
831 * since we're only trying to estimate qual eval costs. Otherwise this must
832 * match the logic in create_indexscan_plan().
833 *
834 * qual_clauses, and the result, are lists of RestrictInfos.
835 * indexclauses is a list of IndexClauses.
836 */
837static List *
839{
840 List *result = NIL;
841 ListCell *lc;
842
843 foreach(lc, qual_clauses)
844 {
846
847 if (rinfo->pseudoconstant)
848 continue; /* we may drop pseudoconstants here */
849 if (is_redundant_with_indexclauses(rinfo, indexclauses))
850 continue; /* dup or derived from same EquivalenceClass */
851 /* ... skip the predicate proof attempt createplan.c will try ... */
852 result = lappend(result, rinfo);
853 }
854 return result;
855}
856
857/*
858 * index_pages_fetched
859 * Estimate the number of pages actually fetched after accounting for
860 * cache effects.
861 *
862 * We use an approximation proposed by Mackert and Lohman, "Index Scans
863 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
864 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
865 * The Mackert and Lohman approximation is that the number of pages
866 * fetched is
867 * PF =
868 * min(2TNs/(2T+Ns), T) when T <= b
869 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
870 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
871 * where
872 * T = # pages in table
873 * N = # tuples in table
874 * s = selectivity = fraction of table to be scanned
875 * b = # buffer pages available (we include kernel space here)
876 *
877 * We assume that effective_cache_size is the total number of buffer pages
878 * available for the whole query, and pro-rate that space across all the
879 * tables in the query and the index currently under consideration. (This
880 * ignores space needed for other indexes used by the query, but since we
881 * don't know which indexes will get used, we can't estimate that very well;
882 * and in any case counting all the tables may well be an overestimate, since
883 * depending on the join plan not all the tables may be scanned concurrently.)
884 *
885 * The product Ns is the number of tuples fetched; we pass in that
886 * product rather than calculating it here. "pages" is the number of pages
887 * in the object under consideration (either an index or a table).
888 * "index_pages" is the amount to add to the total table space, which was
889 * computed for us by make_one_rel.
890 *
891 * Caller is expected to have ensured that tuples_fetched is greater than zero
892 * and rounded to integer (see clamp_row_est). The result will likewise be
893 * greater than zero and integral.
894 */
895double
896index_pages_fetched(double tuples_fetched, BlockNumber pages,
898{
899 double pages_fetched;
900 double total_pages;
901 double T,
902 b;
903
904 /* T is # pages in table, but don't allow it to be zero */
905 T = (pages > 1) ? (double) pages : 1.0;
906
907 /* Compute number of pages assumed to be competing for cache space */
908 total_pages = root->total_table_pages + index_pages;
910 Assert(T <= total_pages);
911
912 /* b is pro-rated share of effective_cache_size */
914
915 /* force it positive and integral */
916 if (b <= 1.0)
917 b = 1.0;
918 else
919 b = ceil(b);
920
921 /* This part is the Mackert and Lohman formula */
922 if (T <= b)
923 {
925 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
926 if (pages_fetched >= T)
928 else
930 }
931 else
932 {
933 double lim;
934
935 lim = (2.0 * T * b) / (2.0 * T - b);
936 if (tuples_fetched <= lim)
937 {
939 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
940 }
941 else
942 {
944 b + (tuples_fetched - lim) * (T - b) / T;
945 }
947 }
948 return pages_fetched;
949}
950
951/*
952 * get_indexpath_pages
953 * Determine the total size of the indexes used in a bitmap index path.
954 *
955 * Note: if the same index is used more than once in a bitmap tree, we will
956 * count it multiple times, which perhaps is the wrong thing ... but it's
957 * not completely clear, and detecting duplicates is difficult, so ignore it
958 * for now.
959 */
960static double
962{
963 double result = 0;
964 ListCell *l;
965
966 if (IsA(bitmapqual, BitmapAndPath))
967 {
968 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
969
970 foreach(l, apath->bitmapquals)
971 {
972 result += get_indexpath_pages((Path *) lfirst(l));
973 }
974 }
975 else if (IsA(bitmapqual, BitmapOrPath))
976 {
977 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
978
979 foreach(l, opath->bitmapquals)
980 {
981 result += get_indexpath_pages((Path *) lfirst(l));
982 }
983 }
984 else if (IsA(bitmapqual, IndexPath))
985 {
986 IndexPath *ipath = (IndexPath *) bitmapqual;
987
988 result = (double) ipath->indexinfo->pages;
989 }
990 else
991 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
992
993 return result;
994}
995
996/*
997 * cost_bitmap_heap_scan
998 * Determines and returns the cost of scanning a relation using a bitmap
999 * index-then-heap plan.
1000 *
1001 * 'baserel' is the relation to be scanned
1002 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1003 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1004 * 'loop_count' is the number of repetitions of the indexscan to factor into
1005 * estimates of caching behavior
1006 *
1007 * Note: the component IndexPaths in bitmapqual should have been costed
1008 * using the same loop_count.
1009 */
1010void
1013 Path *bitmapqual, double loop_count)
1014{
1015 Cost startup_cost = 0;
1016 Cost run_cost = 0;
1017 Cost indexTotalCost;
1022 double tuples_fetched;
1023 double pages_fetched;
1024 double spc_seq_page_cost,
1025 spc_random_page_cost;
1026 double T;
1028
1029 /* Should only be applied to base relations */
1031 Assert(baserel->relid > 0);
1032 Assert(baserel->rtekind == RTE_RELATION);
1033
1034 /* Mark the path with the correct row estimate */
1035 if (param_info)
1036 path->rows = param_info->ppi_rows;
1037 else
1038 path->rows = baserel->rows;
1039
1041 loop_count, &indexTotalCost,
1042 &tuples_fetched);
1043
1044 startup_cost += indexTotalCost;
1045 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1046
1047 /* Fetch estimated page costs for tablespace containing table. */
1048 get_tablespace_page_costs(baserel->reltablespace,
1049 &spc_random_page_cost,
1051
1052 /*
1053 * For small numbers of pages we should charge spc_random_page_cost
1054 * apiece, while if nearly all the table's pages are being read, it's more
1055 * appropriate to charge spc_seq_page_cost apiece. The effect is
1056 * nonlinear, too. For lack of a better idea, interpolate like this to
1057 * determine the cost per page.
1058 */
1059 if (pages_fetched >= 2.0)
1060 cost_per_page = spc_random_page_cost -
1061 (spc_random_page_cost - spc_seq_page_cost)
1062 * sqrt(pages_fetched / T);
1063 else
1064 cost_per_page = spc_random_page_cost;
1065
1066 run_cost += pages_fetched * cost_per_page;
1067
1068 /*
1069 * Estimate CPU costs per tuple.
1070 *
1071 * Often the indexquals don't need to be rechecked at each tuple ... but
1072 * not always, especially not if there are enough tuples involved that the
1073 * bitmaps become lossy. For the moment, just assume they will be
1074 * rechecked always. This means we charge the full freight for all the
1075 * scan clauses.
1076 */
1078
1079 startup_cost += qpqual_cost.startup;
1081 cpu_run_cost = cpu_per_tuple * tuples_fetched;
1082
1083 /* Adjust costing for parallelism, if used. */
1084 if (path->parallel_workers > 0)
1085 {
1087
1088 /* The CPU cost is divided among all the workers. */
1090
1091 path->rows = clamp_row_est(path->rows / parallel_divisor);
1092 }
1093 else
1095
1096
1097 run_cost += cpu_run_cost;
1098
1099 /* tlist eval costs are paid per output row, not per tuple scanned */
1100 startup_cost += path->pathtarget->cost.startup;
1101 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1102
1103 path->disabled_nodes =
1104 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
1105 path->startup_cost = startup_cost;
1106 path->total_cost = startup_cost + run_cost;
1107}
1108
1109/*
1110 * cost_bitmap_tree_node
1111 * Extract cost and selectivity from a bitmap tree node (index/and/or)
1112 */
1113void
1115{
1116 if (IsA(path, IndexPath))
1117 {
1118 *cost = ((IndexPath *) path)->indextotalcost;
1119 *selec = ((IndexPath *) path)->indexselectivity;
1120
1121 /*
1122 * Charge a small amount per retrieved tuple to reflect the costs of
1123 * manipulating the bitmap. This is mostly to make sure that a bitmap
1124 * scan doesn't look to be the same cost as an indexscan to retrieve a
1125 * single tuple.
1126 */
1127 *cost += 0.1 * cpu_operator_cost * path->rows;
1128 }
1129 else if (IsA(path, BitmapAndPath))
1130 {
1131 *cost = path->total_cost;
1132 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1133 }
1134 else if (IsA(path, BitmapOrPath))
1135 {
1136 *cost = path->total_cost;
1137 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1138 }
1139 else
1140 {
1141 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1142 *cost = *selec = 0; /* keep compiler quiet */
1143 }
1144}
1145
1146/*
1147 * cost_bitmap_and_node
1148 * Estimate the cost of a BitmapAnd node
1149 *
1150 * Note that this considers only the costs of index scanning and bitmap
1151 * creation, not the eventual heap access. In that sense the object isn't
1152 * truly a Path, but it has enough path-like properties (costs in particular)
1153 * to warrant treating it as one. We don't bother to set the path rows field,
1154 * however.
1155 */
1156void
1158{
1161 ListCell *l;
1162
1163 /*
1164 * We estimate AND selectivity on the assumption that the inputs are
1165 * independent. This is probably often wrong, but we don't have the info
1166 * to do better.
1167 *
1168 * The runtime cost of the BitmapAnd itself is estimated at 100x
1169 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1170 * definitely too simplistic?
1171 */
1172 totalCost = 0.0;
1173 selec = 1.0;
1174 foreach(l, path->bitmapquals)
1175 {
1176 Path *subpath = (Path *) lfirst(l);
1177 Cost subCost;
1179
1181
1182 selec *= subselec;
1183
1184 totalCost += subCost;
1185 if (l != list_head(path->bitmapquals))
1186 totalCost += 100.0 * cpu_operator_cost;
1187 }
1188 path->bitmapselectivity = selec;
1189 path->path.rows = 0; /* per above, not used */
1190 path->path.disabled_nodes = 0;
1191 path->path.startup_cost = totalCost;
1192 path->path.total_cost = totalCost;
1193}
1194
1195/*
1196 * cost_bitmap_or_node
1197 * Estimate the cost of a BitmapOr node
1198 *
1199 * See comments for cost_bitmap_and_node.
1200 */
1201void
1203{
1206 ListCell *l;
1207
1208 /*
1209 * We estimate OR selectivity on the assumption that the inputs are
1210 * non-overlapping, since that's often the case in "x IN (list)" type
1211 * situations. Of course, we clamp to 1.0 at the end.
1212 *
1213 * The runtime cost of the BitmapOr itself is estimated at 100x
1214 * cpu_operator_cost for each tbm_union needed. Probably too small,
1215 * definitely too simplistic? We are aware that the tbm_unions are
1216 * optimized out when the inputs are BitmapIndexScans.
1217 */
1218 totalCost = 0.0;
1219 selec = 0.0;
1220 foreach(l, path->bitmapquals)
1221 {
1222 Path *subpath = (Path *) lfirst(l);
1223 Cost subCost;
1225
1227
1228 selec += subselec;
1229
1230 totalCost += subCost;
1231 if (l != list_head(path->bitmapquals) &&
1233 totalCost += 100.0 * cpu_operator_cost;
1234 }
1235 path->bitmapselectivity = Min(selec, 1.0);
1236 path->path.rows = 0; /* per above, not used */
1237 path->path.startup_cost = totalCost;
1238 path->path.total_cost = totalCost;
1239}
1240
1241/*
1242 * cost_tidscan
1243 * Determines and returns the cost of scanning a relation using TIDs.
1244 *
1245 * 'baserel' is the relation to be scanned
1246 * 'tidquals' is the list of TID-checkable quals
1247 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1248 */
1249void
1252{
1253 Cost startup_cost = 0;
1254 Cost run_cost = 0;
1258 double ntuples;
1259 ListCell *l;
1260 double spc_random_page_cost;
1261 uint64 enable_mask = 0;
1262
1263 /* Should only be applied to base relations */
1264 Assert(baserel->relid > 0);
1265 Assert(baserel->rtekind == RTE_RELATION);
1266 Assert(tidquals != NIL);
1267
1268 /* Mark the path with the correct row estimate */
1269 if (param_info)
1270 path->rows = param_info->ppi_rows;
1271 else
1272 path->rows = baserel->rows;
1273
1274 /* Count how many tuples we expect to retrieve */
1275 ntuples = 0;
1276 foreach(l, tidquals)
1277 {
1279 Expr *qual = rinfo->clause;
1280
1281 /*
1282 * We must use a TID scan for CurrentOfExpr; in any other case, we
1283 * should be generating a TID scan only if TID scans are allowed.
1284 * Also, if CurrentOfExpr is the qual, there should be only one.
1285 */
1286 Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0 || IsA(qual, CurrentOfExpr));
1287 Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
1288
1289 if (IsA(qual, ScalarArrayOpExpr))
1290 {
1291 /* Each element of the array yields 1 tuple */
1292 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1293 Node *arraynode = (Node *) lsecond(saop->args);
1294
1296 }
1297 else if (IsA(qual, CurrentOfExpr))
1298 {
1299 /* CURRENT OF yields 1 tuple */
1300 ntuples++;
1301 }
1302 else
1303 {
1304 /* It's just CTID = something, count 1 tuple */
1305 ntuples++;
1306 }
1307 }
1308
1309 /*
1310 * The TID qual expressions will be computed once, any other baserestrict
1311 * quals once per retrieved tuple.
1312 */
1313 cost_qual_eval(&tid_qual_cost, tidquals, root);
1314
1315 /* fetch estimated page cost for tablespace containing table */
1316 get_tablespace_page_costs(baserel->reltablespace,
1317 &spc_random_page_cost,
1318 NULL);
1319
1320 /* disk costs --- assume each tuple on a different page */
1321 run_cost += spc_random_page_cost * ntuples;
1322
1323 /* Add scanning CPU costs */
1325
1326 /* XXX currently we assume TID quals are a subset of qpquals */
1327 startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1329 tid_qual_cost.per_tuple;
1330 run_cost += cpu_per_tuple * ntuples;
1331
1332 /* tlist eval costs are paid per output row, not per tuple scanned */
1333 startup_cost += path->pathtarget->cost.startup;
1334 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1335
1336 /*
1337 * There are assertions above verifying that we only reach this function
1338 * either when baserel->pgs_mask includes PGS_TIDSCAN or when the TID scan
1339 * is the only legal path, so we only need to consider the effects of
1340 * PGS_CONSIDER_NONPARTIAL here.
1341 */
1342 if (path->parallel_workers == 0)
1344 path->disabled_nodes =
1345 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1346 path->startup_cost = startup_cost;
1347 path->total_cost = startup_cost + run_cost;
1348}
1349
1350/*
1351 * cost_tidrangescan
1352 * Determines and sets the costs of scanning a relation using a range of
1353 * TIDs for 'path'
1354 *
1355 * 'baserel' is the relation to be scanned
1356 * 'tidrangequals' is the list of TID-checkable range quals
1357 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1358 */
1359void
1361 RelOptInfo *baserel, List *tidrangequals,
1363{
1364 Selectivity selectivity;
1365 double pages;
1366 Cost startup_cost;
1372 double ntuples;
1373 double nseqpages;
1374 double spc_random_page_cost;
1375 double spc_seq_page_cost;
1377
1378 /* Should only be applied to base relations */
1379 Assert(baserel->relid > 0);
1380 Assert(baserel->rtekind == RTE_RELATION);
1381
1382 /* Mark the path with the correct row estimate */
1383 if (param_info)
1384 path->rows = param_info->ppi_rows;
1385 else
1386 path->rows = baserel->rows;
1387
1388 /* Count how many tuples and pages we expect to scan */
1389 selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1390 JOIN_INNER, NULL);
1391 pages = ceil(selectivity * baserel->pages);
1392
1393 if (pages <= 0.0)
1394 pages = 1.0;
1395
1396 /*
1397 * The first page in a range requires a random seek, but each subsequent
1398 * page is just a normal sequential page read. NOTE: it's desirable for
1399 * TID Range Scans to cost more than the equivalent Sequential Scans,
1400 * because Seq Scans have some performance advantages such as scan
1401 * synchronization, and we'd prefer one of them to be picked unless a TID
1402 * Range Scan really is better.
1403 */
1404 ntuples = selectivity * baserel->tuples;
1405 nseqpages = pages - 1.0;
1406
1407 /*
1408 * The TID qual expressions will be computed once, any other baserestrict
1409 * quals once per retrieved tuple.
1410 */
1411 cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1412
1413 /* fetch estimated page cost for tablespace containing table */
1414 get_tablespace_page_costs(baserel->reltablespace,
1415 &spc_random_page_cost,
1417
1418 /* disk costs; 1 random page and the remainder as seq pages */
1419 disk_run_cost = spc_random_page_cost + spc_seq_page_cost * nseqpages;
1420
1421 /* Add scanning CPU costs */
1423
1424 /*
1425 * XXX currently we assume TID quals are a subset of qpquals at this
1426 * point; they will be removed (if possible) when we create the plan, so
1427 * we subtract their cost from the total qpqual cost. (If the TID quals
1428 * can't be removed, this is a mistake and we're going to underestimate
1429 * the CPU cost a bit.)
1430 */
1431 startup_cost = qpqual_cost.startup + tid_qual_cost.per_tuple;
1433 tid_qual_cost.per_tuple;
1434 cpu_run_cost = cpu_per_tuple * ntuples;
1435
1436 /* tlist eval costs are paid per output row, not per tuple scanned */
1437 startup_cost += path->pathtarget->cost.startup;
1438 cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
1439
1440 /* Adjust costing for parallelism, if used. */
1441 if (path->parallel_workers > 0)
1442 {
1444
1445 /* The CPU cost is divided among all the workers. */
1447
1448 /*
1449 * In the case of a parallel plan, the row count needs to represent
1450 * the number of tuples processed per worker.
1451 */
1452 path->rows = clamp_row_est(path->rows / parallel_divisor);
1453 }
1454
1455 /*
1456 * We should not generate this path type when PGS_TIDSCAN is unset, but we
1457 * might need to disable this path due to PGS_CONSIDER_NONPARTIAL.
1458 */
1459 Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0);
1460 if (path->parallel_workers == 0)
1462 path->disabled_nodes =
1463 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1464 path->startup_cost = startup_cost;
1465 path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
1466}
1467
1468/*
1469 * cost_subqueryscan
1470 * Determines and returns the cost of scanning a subquery RTE.
1471 *
1472 * 'baserel' is the relation to be scanned
1473 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1474 * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1475 */
1476void
1479 bool trivial_pathtarget)
1480{
1481 Cost startup_cost;
1482 Cost run_cost;
1483 List *qpquals;
1486 uint64 enable_mask = 0;
1487
1488 /* Should only be applied to base relations that are subqueries */
1489 Assert(baserel->relid > 0);
1490 Assert(baserel->rtekind == RTE_SUBQUERY);
1491
1492 /*
1493 * We compute the rowcount estimate as the subplan's estimate times the
1494 * selectivity of relevant restriction clauses. In simple cases this will
1495 * come out the same as baserel->rows; but when dealing with parallelized
1496 * paths we must do it like this to get the right answer.
1497 */
1498 if (param_info)
1499 qpquals = list_concat_copy(param_info->ppi_clauses,
1500 baserel->baserestrictinfo);
1501 else
1502 qpquals = baserel->baserestrictinfo;
1503
1504 path->path.rows = clamp_row_est(path->subpath->rows *
1506 qpquals,
1507 0,
1508 JOIN_INNER,
1509 NULL));
1510
1511 /*
1512 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1513 * any restriction clauses and tlist that will be attached to the
1514 * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1515 * projection overhead.
1516 */
1517 if (path->path.parallel_workers == 0)
1520 + (((baserel->pgs_mask & enable_mask) != enable_mask) ? 1 : 0);
1521 path->path.startup_cost = path->subpath->startup_cost;
1522 path->path.total_cost = path->subpath->total_cost;
1523
1524 /*
1525 * However, if there are no relevant restriction clauses and the
1526 * pathtarget is trivial, then we expect that setrefs.c will optimize away
1527 * the SubqueryScan plan node altogether, so we should just make its cost
1528 * and rowcount equal to the input path's.
1529 *
1530 * Note: there are some edge cases where createplan.c will apply a
1531 * different targetlist to the SubqueryScan node, thus falsifying our
1532 * current estimate of whether the target is trivial, and making the cost
1533 * estimate (though not the rowcount) wrong. It does not seem worth the
1534 * extra complication to try to account for that exactly, especially since
1535 * that behavior falsifies other cost estimates as well.
1536 */
1537 if (qpquals == NIL && trivial_pathtarget)
1538 return;
1539
1541
1542 startup_cost = qpqual_cost.startup;
1544 run_cost = cpu_per_tuple * path->subpath->rows;
1545
1546 /* tlist eval costs are paid per output row, not per tuple scanned */
1547 startup_cost += path->path.pathtarget->cost.startup;
1548 run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1549
1550 path->path.startup_cost += startup_cost;
1551 path->path.total_cost += startup_cost + run_cost;
1552}
1553
1554/*
1555 * cost_functionscan
1556 * Determines and returns the cost of scanning a function RTE.
1557 *
1558 * 'baserel' is the relation to be scanned
1559 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1560 */
1561void
1564{
1565 Cost startup_cost = 0;
1566 Cost run_cost = 0;
1571 uint64 enable_mask = 0;
1572
1573 /* Should only be applied to base relations that are functions */
1574 Assert(baserel->relid > 0);
1575 rte = planner_rt_fetch(baserel->relid, root);
1576 Assert(rte->rtekind == RTE_FUNCTION);
1577
1578 /* Mark the path with the correct row estimate */
1579 if (param_info)
1580 path->rows = param_info->ppi_rows;
1581 else
1582 path->rows = baserel->rows;
1583
1584 /*
1585 * Estimate costs of executing the function expression(s).
1586 *
1587 * Currently, nodeFunctionscan.c always executes the functions to
1588 * completion before returning any rows, and caches the results in a
1589 * tuplestore. So the function eval cost is all startup cost, and per-row
1590 * costs are minimal.
1591 *
1592 * XXX in principle we ought to charge tuplestore spill costs if the
1593 * number of rows is large. However, given how phony our rowcount
1594 * estimates for functions tend to be, there's not a lot of point in that
1595 * refinement right now.
1596 */
1597 cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1598
1599 startup_cost += exprcost.startup + exprcost.per_tuple;
1600
1601 /* Add scanning CPU costs */
1603
1604 startup_cost += qpqual_cost.startup;
1606 run_cost += cpu_per_tuple * baserel->tuples;
1607
1608 /* tlist eval costs are paid per output row, not per tuple scanned */
1609 startup_cost += path->pathtarget->cost.startup;
1610 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1611
1612 if (path->parallel_workers == 0)
1614 path->disabled_nodes =
1615 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1616 path->startup_cost = startup_cost;
1617 path->total_cost = startup_cost + run_cost;
1618}
1619
1620/*
1621 * cost_tablefuncscan
1622 * Determines and returns the cost of scanning a table function.
1623 *
1624 * 'baserel' is the relation to be scanned
1625 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1626 */
1627void
1630{
1631 Cost startup_cost = 0;
1632 Cost run_cost = 0;
1637 uint64 enable_mask = 0;
1638
1639 /* Should only be applied to base relations that are functions */
1640 Assert(baserel->relid > 0);
1641 rte = planner_rt_fetch(baserel->relid, root);
1642 Assert(rte->rtekind == RTE_TABLEFUNC);
1643
1644 /* Mark the path with the correct row estimate */
1645 if (param_info)
1646 path->rows = param_info->ppi_rows;
1647 else
1648 path->rows = baserel->rows;
1649
1650 /*
1651 * Estimate costs of executing the table func expression(s).
1652 *
1653 * XXX in principle we ought to charge tuplestore spill costs if the
1654 * number of rows is large. However, given how phony our rowcount
1655 * estimates for tablefuncs tend to be, there's not a lot of point in that
1656 * refinement right now.
1657 */
1658 cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1659
1660 startup_cost += exprcost.startup + exprcost.per_tuple;
1661
1662 /* Add scanning CPU costs */
1664
1665 startup_cost += qpqual_cost.startup;
1667 run_cost += cpu_per_tuple * baserel->tuples;
1668
1669 /* tlist eval costs are paid per output row, not per tuple scanned */
1670 startup_cost += path->pathtarget->cost.startup;
1671 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1672
1673 if (path->parallel_workers == 0)
1675 path->disabled_nodes =
1676 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1677 path->startup_cost = startup_cost;
1678 path->total_cost = startup_cost + run_cost;
1679}
1680
1681/*
1682 * cost_valuesscan
1683 * Determines and returns the cost of scanning a VALUES RTE.
1684 *
1685 * 'baserel' is the relation to be scanned
1686 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1687 */
1688void
1691{
1692 Cost startup_cost = 0;
1693 Cost run_cost = 0;
1696 uint64 enable_mask = 0;
1697
1698 /* Should only be applied to base relations that are values lists */
1699 Assert(baserel->relid > 0);
1700 Assert(baserel->rtekind == RTE_VALUES);
1701
1702 /* Mark the path with the correct row estimate */
1703 if (param_info)
1704 path->rows = param_info->ppi_rows;
1705 else
1706 path->rows = baserel->rows;
1707
1708 /*
1709 * For now, estimate list evaluation cost at one operator eval per list
1710 * (probably pretty bogus, but is it worth being smarter?)
1711 */
1713
1714 /* Add scanning CPU costs */
1716
1717 startup_cost += qpqual_cost.startup;
1719 run_cost += cpu_per_tuple * baserel->tuples;
1720
1721 /* tlist eval costs are paid per output row, not per tuple scanned */
1722 startup_cost += path->pathtarget->cost.startup;
1723 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1724
1725 if (path->parallel_workers == 0)
1727 path->disabled_nodes =
1728 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1729 path->startup_cost = startup_cost;
1730 path->total_cost = startup_cost + run_cost;
1731}
1732
1733/*
1734 * cost_ctescan
1735 * Determines and returns the cost of scanning a CTE RTE.
1736 *
1737 * Note: this is used for both self-reference and regular CTEs; the
1738 * possible cost differences are below the threshold of what we could
1739 * estimate accurately anyway. Note that the costs of evaluating the
1740 * referenced CTE query are added into the final plan as initplan costs,
1741 * and should NOT be counted here.
1742 */
1743void
1746{
1747 Cost startup_cost = 0;
1748 Cost run_cost = 0;
1751 uint64 enable_mask = 0;
1752
1753 /* Should only be applied to base relations that are CTEs */
1754 Assert(baserel->relid > 0);
1755 Assert(baserel->rtekind == RTE_CTE);
1756
1757 /* Mark the path with the correct row estimate */
1758 if (param_info)
1759 path->rows = param_info->ppi_rows;
1760 else
1761 path->rows = baserel->rows;
1762
1763 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1765
1766 /* Add scanning CPU costs */
1768
1769 startup_cost += qpqual_cost.startup;
1771 run_cost += cpu_per_tuple * baserel->tuples;
1772
1773 /* tlist eval costs are paid per output row, not per tuple scanned */
1774 startup_cost += path->pathtarget->cost.startup;
1775 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1776
1777 if (path->parallel_workers == 0)
1779 path->disabled_nodes =
1780 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1781 path->startup_cost = startup_cost;
1782 path->total_cost = startup_cost + run_cost;
1783}
1784
1785/*
1786 * cost_namedtuplestorescan
1787 * Determines and returns the cost of scanning a named tuplestore.
1788 */
1789void
1792{
1793 Cost startup_cost = 0;
1794 Cost run_cost = 0;
1797 uint64 enable_mask = 0;
1798
1799 /* Should only be applied to base relations that are Tuplestores */
1800 Assert(baserel->relid > 0);
1801 Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1802
1803 /* Mark the path with the correct row estimate */
1804 if (param_info)
1805 path->rows = param_info->ppi_rows;
1806 else
1807 path->rows = baserel->rows;
1808
1809 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1811
1812 /* Add scanning CPU costs */
1814
1815 startup_cost += qpqual_cost.startup;
1817 run_cost += cpu_per_tuple * baserel->tuples;
1818
1819 if (path->parallel_workers == 0)
1821 path->disabled_nodes =
1822 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1823 path->startup_cost = startup_cost;
1824 path->total_cost = startup_cost + run_cost;
1825}
1826
1827/*
1828 * cost_resultscan
1829 * Determines and returns the cost of scanning an RTE_RESULT relation.
1830 */
1831void
1834{
1835 Cost startup_cost = 0;
1836 Cost run_cost = 0;
1839 uint64 enable_mask = 0;
1840
1841 /* Should only be applied to RTE_RESULT base relations */
1842 Assert(baserel->relid > 0);
1843 Assert(baserel->rtekind == RTE_RESULT);
1844
1845 /* Mark the path with the correct row estimate */
1846 if (param_info)
1847 path->rows = param_info->ppi_rows;
1848 else
1849 path->rows = baserel->rows;
1850
1851 /* We charge qual cost plus cpu_tuple_cost */
1853
1854 startup_cost += qpqual_cost.startup;
1856 run_cost += cpu_per_tuple * baserel->tuples;
1857
1858 if (path->parallel_workers == 0)
1860 path->disabled_nodes =
1861 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1862 path->startup_cost = startup_cost;
1863 path->total_cost = startup_cost + run_cost;
1864}
1865
1866/*
1867 * cost_recursive_union
1868 * Determines and returns the cost of performing a recursive union,
1869 * and also the estimated output size.
1870 *
1871 * We are given Paths for the nonrecursive and recursive terms.
1872 */
1873void
1875{
1876 Cost startup_cost;
1877 Cost total_cost;
1878 double total_rows;
1879 uint64 enable_mask = 0;
1880
1881 /* We probably have decent estimates for the non-recursive term */
1882 startup_cost = nrterm->startup_cost;
1883 total_cost = nrterm->total_cost;
1884 total_rows = nrterm->rows;
1885
1886 /*
1887 * We arbitrarily assume that about 10 recursive iterations will be
1888 * needed, and that we've managed to get a good fix on the cost and output
1889 * size of each one of them. These are mighty shaky assumptions but it's
1890 * hard to see how to do better.
1891 */
1892 total_cost += 10 * rterm->total_cost;
1893 total_rows += 10 * rterm->rows;
1894
1895 /*
1896 * Also charge cpu_tuple_cost per row to account for the costs of
1897 * manipulating the tuplestores. (We don't worry about possible
1898 * spill-to-disk costs.)
1899 */
1900 total_cost += cpu_tuple_cost * total_rows;
1901
1902 if (runion->parallel_workers == 0)
1904 runion->disabled_nodes =
1905 (runion->parent->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1906 runion->startup_cost = startup_cost;
1907 runion->total_cost = total_cost;
1908 runion->rows = total_rows;
1909 runion->pathtarget->width = Max(nrterm->pathtarget->width,
1910 rterm->pathtarget->width);
1911}
1912
1913/*
1914 * cost_tuplesort
1915 * Determines and returns the cost of sorting a relation using tuplesort,
1916 * not including the cost of reading the input data.
1917 *
1918 * If the total volume of data to sort is less than sort_mem, we will do
1919 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1920 * comparisons for t tuples.
1921 *
1922 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1923 * algorithm. There will still be about t*log2(t) tuple comparisons in
1924 * total, but we will also need to write and read each tuple once per
1925 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1926 * number of initial runs formed and M is the merge order used by tuplesort.c.
1927 * Since the average initial run should be about sort_mem, we have
1928 * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1929 * cpu = comparison_cost * t * log2(t)
1930 *
1931 * If the sort is bounded (i.e., only the first k result tuples are needed)
1932 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1933 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1934 *
1935 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1936 * accesses (XXX can't we refine that guess?)
1937 *
1938 * By default, we charge two operator evals per tuple comparison, which should
1939 * be in the right ballpark in most cases. The caller can tweak this by
1940 * specifying nonzero comparison_cost; typically that's used for any extra
1941 * work that has to be done to prepare the inputs to the comparison operators.
1942 *
1943 * 'tuples' is the number of tuples in the relation
1944 * 'width' is the average tuple width in bytes
1945 * 'comparison_cost' is the extra cost per comparison, if any
1946 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1947 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1948 */
1949static void
1950cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1951 double tuples, int width,
1953 double limit_tuples)
1954{
1955 double input_bytes = relation_byte_size(tuples, width);
1956 double output_bytes;
1957 double output_tuples;
1959
1960 /*
1961 * We want to be sure the cost of a sort is never estimated as zero, even
1962 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1963 */
1964 if (tuples < 2.0)
1965 tuples = 2.0;
1966
1967 /* Include the default cost-per-comparison */
1969
1970 /* Do we have a useful LIMIT? */
1971 if (limit_tuples > 0 && limit_tuples < tuples)
1972 {
1973 output_tuples = limit_tuples;
1975 }
1976 else
1977 {
1978 output_tuples = tuples;
1980 }
1981
1983 {
1984 /*
1985 * We'll have to use a disk-based sort of all the tuples
1986 */
1987 double npages = ceil(input_bytes / BLCKSZ);
1988 double nruns = input_bytes / sort_mem_bytes;
1990 double log_runs;
1991 double npageaccesses;
1992
1993 /*
1994 * CPU costs
1995 *
1996 * Assume about N log2 N comparisons
1997 */
1998 *startup_cost = comparison_cost * tuples * LOG2(tuples);
1999
2000 /* Disk costs */
2001
2002 /* Compute logM(r) as log(r) / log(M) */
2003 if (nruns > mergeorder)
2005 else
2006 log_runs = 1.0;
2007 npageaccesses = 2.0 * npages * log_runs;
2008 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
2009 *startup_cost += npageaccesses *
2010 (seq_page_cost * 0.75 + random_page_cost * 0.25);
2011 }
2012 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
2013 {
2014 /*
2015 * We'll use a bounded heap-sort keeping just K tuples in memory, for
2016 * a total number of tuple comparisons of N log2 K; but the constant
2017 * factor is a bit higher than for quicksort. Tweak it so that the
2018 * cost curve is continuous at the crossover point.
2019 */
2020 *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
2021 }
2022 else
2023 {
2024 /* We'll use plain quicksort on all the input tuples */
2025 *startup_cost = comparison_cost * tuples * LOG2(tuples);
2026 }
2027
2028 /*
2029 * Also charge a small amount (arbitrarily set equal to operator cost) per
2030 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
2031 * doesn't do qual-checking or projection, so it has less overhead than
2032 * most plan nodes. Note it's correct to use tuples not output_tuples
2033 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
2034 * counting the LIMIT otherwise.
2035 */
2036 *run_cost = cpu_operator_cost * tuples;
2037}
2038
2039/*
2040 * cost_incremental_sort
2041 * Determines and returns the cost of sorting a relation incrementally, when
2042 * the input path is presorted by a prefix of the pathkeys.
2043 *
2044 * 'presorted_keys' is the number of leading pathkeys by which the input path
2045 * is sorted.
2046 *
2047 * We estimate the number of groups into which the relation is divided by the
2048 * leading pathkeys, and then calculate the cost of sorting a single group
2049 * with tuplesort using cost_tuplesort().
2050 */
2051void
2053 PlannerInfo *root, List *pathkeys, int presorted_keys,
2056 double input_tuples, int width, Cost comparison_cost, int sort_mem,
2057 double limit_tuples)
2058{
2059 Cost startup_cost,
2060 run_cost,
2062 double group_tuples,
2068 ListCell *l;
2069 bool unknown_varno = false;
2070
2071 Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2072
2073 /*
2074 * We want to be sure the cost of a sort is never estimated as zero, even
2075 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2076 */
2077 if (input_tuples < 2.0)
2078 input_tuples = 2.0;
2079
2080 /* Default estimate of number of groups, capped to one group per row. */
2081 input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2082
2083 /*
2084 * Extract presorted keys as list of expressions.
2085 *
2086 * We need to be careful about Vars containing "varno 0" which might have
2087 * been introduced by generate_append_tlist, which would confuse
2088 * estimate_num_groups (in fact it'd fail for such expressions). See
2089 * recurse_set_operations which has to deal with the same issue.
2090 *
2091 * Unlike recurse_set_operations we can't access the original target list
2092 * here, and even if we could it's not very clear how useful would that be
2093 * for a set operation combining multiple tables. So we simply detect if
2094 * there are any expressions with "varno 0" and use the default
2095 * DEFAULT_NUM_DISTINCT in that case.
2096 *
2097 * We might also use either 1.0 (a single group) or input_tuples (each row
2098 * being a separate group), pretty much the worst and best case for
2099 * incremental sort. But those are extreme cases and using something in
2100 * between seems reasonable. Furthermore, generate_append_tlist is used
2101 * for set operations, which are likely to produce mostly unique output
2102 * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2103 * while maintaining lower startup cost.
2104 */
2105 foreach(l, pathkeys)
2106 {
2107 PathKey *key = (PathKey *) lfirst(l);
2109 linitial(key->pk_eclass->ec_members);
2110
2111 /*
2112 * Check if the expression contains Var with "varno 0" so that we
2113 * don't call estimate_num_groups in that case.
2114 */
2115 if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2116 {
2117 unknown_varno = true;
2118 break;
2119 }
2120
2121 /* expression not containing any Vars with "varno 0" */
2123
2124 if (foreach_current_index(l) + 1 >= presorted_keys)
2125 break;
2126 }
2127
2128 /* Estimate the number of groups with equal presorted keys. */
2129 if (!unknown_varno)
2131 NULL, NULL);
2132
2133 group_tuples = input_tuples / input_groups;
2135
2136 /*
2137 * Estimate the average cost of sorting of one group where presorted keys
2138 * are equal.
2139 */
2142 limit_tuples);
2143
2144 /*
2145 * Startup cost of incremental sort is the startup cost of its first group
2146 * plus the cost of its input.
2147 */
2148 startup_cost = group_startup_cost + input_startup_cost +
2150
2151 /*
2152 * After we started producing tuples from the first group, the cost of
2153 * producing all the tuples is given by the cost to finish processing this
2154 * group, plus the total cost to process the remaining groups, plus the
2155 * remaining cost of input.
2156 */
2159
2160 /*
2161 * Incremental sort adds some overhead by itself. Firstly, it has to
2162 * detect the sort groups. This is roughly equal to one extra copy and
2163 * comparison per tuple.
2164 */
2165 run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2166
2167 /*
2168 * Additionally, we charge double cpu_tuple_cost for each input group to
2169 * account for the tuplesort_reset that's performed after each group.
2170 */
2171 run_cost += 2.0 * cpu_tuple_cost * input_groups;
2172
2173 path->rows = input_tuples;
2174
2175 /*
2176 * We should not generate these paths when enable_incremental_sort=false.
2177 * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2178 * it will have already affected the input path.
2179 */
2182
2183 path->startup_cost = startup_cost;
2184 path->total_cost = startup_cost + run_cost;
2185}
2186
2187/*
2188 * cost_sort
2189 * Determines and returns the cost of sorting a relation, including
2190 * the cost of reading the input data.
2191 *
2192 * NOTE: some callers currently pass NIL for pathkeys because they
2193 * can't conveniently supply the sort keys. Since this routine doesn't
2194 * currently do anything with pathkeys anyway, that doesn't matter...
2195 * but if it ever does, it should react gracefully to lack of key data.
2196 * (Actually, the thing we'd most likely be interested in is just the number
2197 * of sort keys, which all callers *could* supply.)
2198 */
2199void
2201 List *pathkeys, int input_disabled_nodes,
2202 Cost input_cost, double tuples, int width,
2204 double limit_tuples)
2205
2206{
2207 Cost startup_cost;
2208 Cost run_cost;
2209
2210 cost_tuplesort(&startup_cost, &run_cost,
2211 tuples, width,
2213 limit_tuples);
2214
2215 startup_cost += input_cost;
2216
2217 /*
2218 * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2219 * it will have already affected the input path.
2220 */
2221 path->rows = tuples;
2223 path->startup_cost = startup_cost;
2224 path->total_cost = startup_cost + run_cost;
2225}
2226
2227/*
2228 * append_nonpartial_cost
2229 * Estimate the cost of the non-partial paths in a Parallel Append.
2230 * The non-partial paths are assumed to be the first "numpaths" paths
2231 * from the subpaths list, and to be in order of decreasing cost.
2232 */
2233static Cost
2234append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2235{
2236 Cost *costarr;
2237 int arrlen;
2238 ListCell *l;
2239 ListCell *cell;
2240 int path_index;
2241 int min_index;
2242 int max_index;
2243
2244 if (numpaths == 0)
2245 return 0;
2246
2247 /*
2248 * Array length is number of workers or number of relevant paths,
2249 * whichever is less.
2250 */
2251 arrlen = Min(parallel_workers, numpaths);
2253
2254 /* The first few paths will each be claimed by a different worker. */
2255 path_index = 0;
2256 foreach(cell, subpaths)
2257 {
2258 Path *subpath = (Path *) lfirst(cell);
2259
2260 if (path_index == arrlen)
2261 break;
2263 }
2264
2265 /*
2266 * Since subpaths are sorted by decreasing cost, the last one will have
2267 * the minimum cost.
2268 */
2269 min_index = arrlen - 1;
2270
2271 /*
2272 * For each of the remaining subpaths, add its cost to the array element
2273 * with minimum cost.
2274 */
2275 for_each_cell(l, subpaths, cell)
2276 {
2277 Path *subpath = (Path *) lfirst(l);
2278
2279 /* Consider only the non-partial paths */
2280 if (path_index++ == numpaths)
2281 break;
2282
2284
2285 /* Update the new min cost array index */
2286 min_index = 0;
2287 for (int i = 0; i < arrlen; i++)
2288 {
2289 if (costarr[i] < costarr[min_index])
2290 min_index = i;
2291 }
2292 }
2293
2294 /* Return the highest cost from the array */
2295 max_index = 0;
2296 for (int i = 0; i < arrlen; i++)
2297 {
2298 if (costarr[i] > costarr[max_index])
2299 max_index = i;
2300 }
2301
2302 return costarr[max_index];
2303}
2304
2305/*
2306 * cost_append
2307 * Determines and returns the cost of an Append node.
2308 */
2309void
2311{
2312 RelOptInfo *rel = apath->path.parent;
2313 ListCell *l;
2315
2316 if (apath->path.parallel_workers == 0)
2318
2319 apath->path.disabled_nodes =
2320 (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
2321 apath->path.startup_cost = 0;
2322 apath->path.total_cost = 0;
2323 apath->path.rows = 0;
2324
2325 if (apath->subpaths == NIL)
2326 return;
2327
2328 if (!apath->path.parallel_aware)
2329 {
2330 List *pathkeys = apath->path.pathkeys;
2331
2332 if (pathkeys == NIL)
2333 {
2334 Path *firstsubpath = (Path *) linitial(apath->subpaths);
2335
2336 /*
2337 * For an unordered, non-parallel-aware Append we take the startup
2338 * cost as the startup cost of the first subpath.
2339 */
2340 apath->path.startup_cost = firstsubpath->startup_cost;
2341
2342 /*
2343 * Compute rows, number of disabled nodes, and total cost as sums
2344 * of underlying subplan values.
2345 */
2346 foreach(l, apath->subpaths)
2347 {
2348 Path *subpath = (Path *) lfirst(l);
2349
2350 apath->path.rows += subpath->rows;
2351 apath->path.disabled_nodes += subpath->disabled_nodes;
2352 apath->path.total_cost += subpath->total_cost;
2353 }
2354 }
2355 else
2356 {
2357 /*
2358 * For an ordered, non-parallel-aware Append we take the startup
2359 * cost as the sum of the subpath startup costs. This ensures
2360 * that we don't underestimate the startup cost when a query's
2361 * LIMIT is such that several of the children have to be run to
2362 * satisfy it. This might be overkill --- another plausible hack
2363 * would be to take the Append's startup cost as the maximum of
2364 * the child startup costs. But we don't want to risk believing
2365 * that an ORDER BY LIMIT query can be satisfied at small cost
2366 * when the first child has small startup cost but later ones
2367 * don't. (If we had the ability to deal with nonlinear cost
2368 * interpolation for partial retrievals, we would not need to be
2369 * so conservative about this.)
2370 *
2371 * This case is also different from the above in that we have to
2372 * account for possibly injecting sorts into subpaths that aren't
2373 * natively ordered.
2374 */
2375 foreach(l, apath->subpaths)
2376 {
2377 Path *subpath = (Path *) lfirst(l);
2378 int presorted_keys;
2379 Path sort_path; /* dummy for result of
2380 * cost_sort/cost_incremental_sort */
2381
2382 if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
2383 &presorted_keys))
2384 {
2385 /*
2386 * We'll need to insert a Sort node, so include costs for
2387 * that. We choose to use incremental sort if it is
2388 * enabled and there are presorted keys; otherwise we use
2389 * full sort.
2390 *
2391 * We can use the parent's LIMIT if any, since we
2392 * certainly won't pull more than that many tuples from
2393 * any child.
2394 */
2395 if (enable_incremental_sort && presorted_keys > 0)
2396 {
2398 root,
2399 pathkeys,
2400 presorted_keys,
2401 subpath->disabled_nodes,
2402 subpath->startup_cost,
2403 subpath->total_cost,
2404 subpath->rows,
2405 subpath->pathtarget->width,
2406 0.0,
2407 work_mem,
2408 apath->limit_tuples);
2409 }
2410 else
2411 {
2413 root,
2414 pathkeys,
2415 subpath->disabled_nodes,
2416 subpath->total_cost,
2417 subpath->rows,
2418 subpath->pathtarget->width,
2419 0.0,
2420 work_mem,
2421 apath->limit_tuples);
2422 }
2423
2424 subpath = &sort_path;
2425 }
2426
2427 apath->path.rows += subpath->rows;
2428 apath->path.disabled_nodes += subpath->disabled_nodes;
2429 apath->path.startup_cost += subpath->startup_cost;
2430 apath->path.total_cost += subpath->total_cost;
2431 }
2432 }
2433 }
2434 else /* parallel-aware */
2435 {
2436 int i = 0;
2438
2439 /* Parallel-aware Append never produces ordered output. */
2440 Assert(apath->path.pathkeys == NIL);
2441
2442 /* Calculate startup cost. */
2443 foreach(l, apath->subpaths)
2444 {
2445 Path *subpath = (Path *) lfirst(l);
2446
2447 /*
2448 * Append will start returning tuples when the child node having
2449 * lowest startup cost is done setting up. We consider only the
2450 * first few subplans that immediately get a worker assigned.
2451 */
2452 if (i == 0)
2453 apath->path.startup_cost = subpath->startup_cost;
2454 else if (i < apath->path.parallel_workers)
2455 apath->path.startup_cost = Min(apath->path.startup_cost,
2456 subpath->startup_cost);
2457
2458 /*
2459 * Apply parallel divisor to subpaths. Scale the number of rows
2460 * for each partial subpath based on the ratio of the parallel
2461 * divisor originally used for the subpath to the one we adopted.
2462 * Also add the cost of partial paths to the total cost, but
2463 * ignore non-partial paths for now.
2464 */
2465 if (i < apath->first_partial_path)
2466 apath->path.rows += subpath->rows / parallel_divisor;
2467 else
2468 {
2470
2472 apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2474 apath->path.total_cost += subpath->total_cost;
2475 }
2476
2477 apath->path.disabled_nodes += subpath->disabled_nodes;
2478 apath->path.rows = clamp_row_est(apath->path.rows);
2479
2480 i++;
2481 }
2482
2483 /* Add cost for non-partial subpaths. */
2484 apath->path.total_cost +=
2485 append_nonpartial_cost(apath->subpaths,
2486 apath->first_partial_path,
2487 apath->path.parallel_workers);
2488 }
2489
2490 /*
2491 * Although Append does not do any selection or projection, it's not free;
2492 * add a small per-tuple overhead.
2493 */
2494 apath->path.total_cost +=
2496}
2497
2498/*
2499 * cost_merge_append
2500 * Determines and returns the cost of a MergeAppend node.
2501 *
2502 * MergeAppend merges several pre-sorted input streams, using a heap that
2503 * at any given instant holds the next tuple from each stream. If there
2504 * are N streams, we need about N*log2(N) tuple comparisons to construct
2505 * the heap at startup, and then for each output tuple, about log2(N)
2506 * comparisons to replace the top entry.
2507 *
2508 * (The effective value of N will drop once some of the input streams are
2509 * exhausted, but it seems unlikely to be worth trying to account for that.)
2510 *
2511 * The heap is never spilled to disk, since we assume N is not very large.
2512 * So this is much simpler than cost_sort.
2513 *
2514 * As in cost_sort, we charge two operator evals per tuple comparison.
2515 *
2516 * 'pathkeys' is a list of sort keys
2517 * 'n_streams' is the number of input streams
2518 * 'input_disabled_nodes' is the sum of the input streams' disabled node counts
2519 * 'input_startup_cost' is the sum of the input streams' startup costs
2520 * 'input_total_cost' is the sum of the input streams' total costs
2521 * 'tuples' is the number of tuples in all the streams
2522 */
2523void
2525 List *pathkeys, int n_streams,
2528 double tuples)
2529{
2530 RelOptInfo *rel = path->parent;
2531 Cost startup_cost = 0;
2532 Cost run_cost = 0;
2534 double N;
2535 double logN;
2537
2538 if (path->parallel_workers == 0)
2540
2541 /*
2542 * Avoid log(0)...
2543 */
2544 N = (n_streams < 2) ? 2.0 : (double) n_streams;
2545 logN = LOG2(N);
2546
2547 /* Assumed cost per tuple comparison */
2549
2550 /* Heap creation cost */
2551 startup_cost += comparison_cost * N * logN;
2552
2553 /* Per-tuple heap maintenance cost */
2554 run_cost += tuples * comparison_cost * logN;
2555
2556 /*
2557 * Although MergeAppend does not do any selection or projection, it's not
2558 * free; add a small per-tuple overhead.
2559 */
2560 run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2561
2562 path->disabled_nodes =
2563 (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
2565 path->startup_cost = startup_cost + input_startup_cost;
2566 path->total_cost = startup_cost + run_cost + input_total_cost;
2567}
2568
2569/*
2570 * cost_material
2571 * Determines and returns the cost of materializing a relation, including
2572 * the cost of reading the input data.
2573 *
2574 * If the total volume of data to materialize exceeds work_mem, we will need
2575 * to write it to disk, so the cost is much higher in that case.
2576 *
2577 * Note that here we are estimating the costs for the first scan of the
2578 * relation, so the materialization is all overhead --- any savings will
2579 * occur only on rescan, which is estimated in cost_rescan.
2580 */
2581void
2583 bool enabled, int input_disabled_nodes,
2585 double tuples, int width)
2586{
2587 Cost startup_cost = input_startup_cost;
2589 double nbytes = relation_byte_size(tuples, width);
2590 double work_mem_bytes = work_mem * (Size) 1024;
2591
2592 path->rows = tuples;
2593
2594 /*
2595 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2596 * reflect bookkeeping overhead. (This rate must be more than what
2597 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2598 * if it is exactly the same then there will be a cost tie between
2599 * nestloop with A outer, materialized B inner and nestloop with B outer,
2600 * materialized A inner. The extra cost ensures we'll prefer
2601 * materializing the smaller rel.) Note that this is normally a good deal
2602 * less than cpu_tuple_cost; which is OK because a Material plan node
2603 * doesn't do qual-checking or projection, so it's got less overhead than
2604 * most plan nodes.
2605 */
2606 run_cost += 2 * cpu_operator_cost * tuples;
2607
2608 /*
2609 * If we will spill to disk, charge at the rate of seq_page_cost per page.
2610 * This cost is assumed to be evenly spread through the plan run phase,
2611 * which isn't exactly accurate but our cost model doesn't allow for
2612 * nonuniform costs within the run phase.
2613 */
2614 if (nbytes > work_mem_bytes)
2615 {
2616 double npages = ceil(nbytes / BLCKSZ);
2617
2618 run_cost += seq_page_cost * npages;
2619 }
2620
2621 path->disabled_nodes = input_disabled_nodes + (enabled ? 0 : 1);
2622 path->startup_cost = startup_cost;
2623 path->total_cost = startup_cost + run_cost;
2624}
2625
2626/*
2627 * cost_memoize_rescan
2628 * Determines the estimated cost of rescanning a Memoize node.
2629 *
2630 * In order to estimate this, we must gain knowledge of how often we expect to
2631 * be called and how many distinct sets of parameters we are likely to be
2632 * called with. If we expect a good cache hit ratio, then we can set our
2633 * costs to account for that hit ratio, plus a little bit of cost for the
2634 * caching itself. Caching will not work out well if we expect to be called
2635 * with too many distinct parameter values. The worst-case here is that we
2636 * never see any parameter value twice, in which case we'd never get a cache
2637 * hit and caching would be a complete waste of effort.
2638 */
2639static void
2642{
2644 ListCell *lc;
2645 Cost input_startup_cost = mpath->subpath->startup_cost;
2646 Cost input_total_cost = mpath->subpath->total_cost;
2647 double tuples = mpath->subpath->rows;
2648 Cardinality est_calls = mpath->est_calls;
2649 int width = mpath->subpath->pathtarget->width;
2650
2651 double hash_mem_bytes;
2652 double est_entry_bytes;
2654 Cardinality ndistinct;
2655 double evict_ratio;
2656 double hit_ratio;
2657 Cost startup_cost;
2658 Cost total_cost;
2659
2660 /* available cache space */
2662
2663 /*
2664 * Set the number of bytes each cache entry should consume in the cache.
2665 * To provide us with better estimations on how many cache entries we can
2666 * store at once, we make a call to the executor here to ask it what
2667 * memory overheads there are for a single cache entry.
2668 */
2669 est_entry_bytes = relation_byte_size(tuples, width) +
2671
2672 /* include the estimated width for the cache keys */
2673 foreach(lc, mpath->param_exprs)
2675
2676 /* estimate on the upper limit of cache entries we can hold at once */
2678
2679 /* estimate on the distinct number of parameter values */
2680 ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
2681 &estinfo);
2682
2683 /*
2684 * When the estimation fell back on using a default value, it's a bit too
2685 * risky to assume that it's ok to use a Memoize node. The use of a
2686 * default could cause us to use a Memoize node when it's really
2687 * inappropriate to do so. If we see that this has been done, then we'll
2688 * assume that every call will have unique parameters, which will almost
2689 * certainly mean a MemoizePath will never survive add_path().
2690 */
2691 if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2692 ndistinct = est_calls;
2693
2694 /* Remember the ndistinct estimate for EXPLAIN */
2695 mpath->est_unique_keys = ndistinct;
2696
2697 /*
2698 * Since we've already estimated the maximum number of entries we can
2699 * store at once and know the estimated number of distinct values we'll be
2700 * called with, we'll take this opportunity to set the path's est_entries.
2701 * This will ultimately determine the hash table size that the executor
2702 * will use. If we leave this at zero, the executor will just choose the
2703 * size itself. Really this is not the right place to do this, but it's
2704 * convenient since everything is already calculated.
2705 */
2706 mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2708
2709 /*
2710 * When the number of distinct parameter values is above the amount we can
2711 * store in the cache, then we'll have to evict some entries from the
2712 * cache. This is not free. Here we estimate how often we'll incur the
2713 * cost of that eviction.
2714 */
2715 evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2716
2717 /*
2718 * In order to estimate how costly a single scan will be, we need to
2719 * attempt to estimate what the cache hit ratio will be. To do that we
2720 * must look at how many scans are estimated in total for this node and
2721 * how many of those scans we expect to get a cache hit.
2722 */
2723 hit_ratio = ((est_calls - ndistinct) / est_calls) *
2724 (est_cache_entries / Max(ndistinct, est_cache_entries));
2725
2726 /* Remember the hit ratio estimate for EXPLAIN */
2727 mpath->est_hit_ratio = hit_ratio;
2728
2729 Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2730
2731 /*
2732 * Set the total_cost accounting for the expected cache hit ratio. We
2733 * also add on a cpu_operator_cost to account for a cache lookup. This
2734 * will happen regardless of whether it's a cache hit or not.
2735 */
2736 total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2737
2738 /* Now adjust the total cost to account for cache evictions */
2739
2740 /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2741 total_cost += cpu_tuple_cost * evict_ratio;
2742
2743 /*
2744 * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2745 * The per-tuple eviction is really just a pfree, so charging a whole
2746 * cpu_operator_cost seems a little excessive.
2747 */
2748 total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2749
2750 /*
2751 * Now adjust for storing things in the cache, since that's not free
2752 * either. Everything must go in the cache. We don't proportion this
2753 * over any ratio, just apply it once for the scan. We charge a
2754 * cpu_tuple_cost for the creation of the cache entry and also a
2755 * cpu_operator_cost for each tuple we expect to cache.
2756 */
2757 total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2758
2759 /*
2760 * Getting the first row must be also be proportioned according to the
2761 * expected cache hit ratio.
2762 */
2763 startup_cost = input_startup_cost * (1.0 - hit_ratio);
2764
2765 /*
2766 * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2767 * which we'll do regardless of whether it was a cache hit or not.
2768 */
2769 startup_cost += cpu_tuple_cost;
2770
2771 *rescan_startup_cost = startup_cost;
2772 *rescan_total_cost = total_cost;
2773}
2774
2775/*
2776 * cost_agg
2777 * Determines and returns the cost of performing an Agg plan node,
2778 * including the cost of its input.
2779 *
2780 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2781 * we are using a hashed Agg node just to do grouping).
2782 *
2783 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2784 * are for appropriately-sorted input.
2785 */
2786void
2788 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2789 int numGroupCols, double numGroups,
2790 List *quals,
2791 int disabled_nodes,
2793 double input_tuples, double input_width)
2794{
2795 double output_tuples;
2796 Cost startup_cost;
2797 Cost total_cost;
2798 const AggClauseCosts dummy_aggcosts = {0};
2799
2800 /* Use all-zero per-aggregate costs if NULL is passed */
2801 if (aggcosts == NULL)
2802 {
2803 Assert(aggstrategy == AGG_HASHED);
2805 }
2806
2807 /*
2808 * The transCost.per_tuple component of aggcosts should be charged once
2809 * per input tuple, corresponding to the costs of evaluating the aggregate
2810 * transfns and their input expressions. The finalCost.per_tuple component
2811 * is charged once per output tuple, corresponding to the costs of
2812 * evaluating the finalfns. Startup costs are of course charged but once.
2813 *
2814 * If we are grouping, we charge an additional cpu_operator_cost per
2815 * grouping column per input tuple for grouping comparisons.
2816 *
2817 * We will produce a single output tuple if not grouping, and a tuple per
2818 * group otherwise. We charge cpu_tuple_cost for each output tuple.
2819 *
2820 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2821 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2822 * input path is already sorted appropriately, AGG_SORTED should be
2823 * preferred (since it has no risk of memory overflow). This will happen
2824 * as long as the computed total costs are indeed exactly equal --- but if
2825 * there's roundoff error we might do the wrong thing. So be sure that
2826 * the computations below form the same intermediate values in the same
2827 * order.
2828 */
2829 if (aggstrategy == AGG_PLAIN)
2830 {
2831 startup_cost = input_total_cost;
2832 startup_cost += aggcosts->transCost.startup;
2833 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2834 startup_cost += aggcosts->finalCost.startup;
2835 startup_cost += aggcosts->finalCost.per_tuple;
2836 /* we aren't grouping */
2837 total_cost = startup_cost + cpu_tuple_cost;
2838 output_tuples = 1;
2839 }
2840 else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2841 {
2842 /* Here we are able to deliver output on-the-fly */
2843 startup_cost = input_startup_cost;
2844 total_cost = input_total_cost;
2845 if (aggstrategy == AGG_MIXED && !enable_hashagg)
2846 ++disabled_nodes;
2847 /* calcs phrased this way to match HASHED case, see note above */
2848 total_cost += aggcosts->transCost.startup;
2849 total_cost += aggcosts->transCost.per_tuple * input_tuples;
2850 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2851 total_cost += aggcosts->finalCost.startup;
2852 total_cost += aggcosts->finalCost.per_tuple * numGroups;
2853 total_cost += cpu_tuple_cost * numGroups;
2854 output_tuples = numGroups;
2855 }
2856 else
2857 {
2858 /* must be AGG_HASHED */
2859 startup_cost = input_total_cost;
2860 if (!enable_hashagg)
2861 ++disabled_nodes;
2862 startup_cost += aggcosts->transCost.startup;
2863 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2864 /* cost of computing hash value */
2865 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2866 startup_cost += aggcosts->finalCost.startup;
2867
2868 total_cost = startup_cost;
2869 total_cost += aggcosts->finalCost.per_tuple * numGroups;
2870 /* cost of retrieving from hash table */
2871 total_cost += cpu_tuple_cost * numGroups;
2872 output_tuples = numGroups;
2873 }
2874
2875 /*
2876 * Add the disk costs of hash aggregation that spills to disk.
2877 *
2878 * Groups that go into the hash table stay in memory until finalized, so
2879 * spilling and reprocessing tuples doesn't incur additional invocations
2880 * of transCost or finalCost. Furthermore, the computed hash value is
2881 * stored with the spilled tuples, so we don't incur extra invocations of
2882 * the hash function.
2883 *
2884 * Hash Agg begins returning tuples after the first batch is complete.
2885 * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2886 * accrue reads only to total_cost.
2887 */
2888 if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2889 {
2890 double pages;
2891 double pages_written = 0.0;
2892 double pages_read = 0.0;
2893 double spill_cost;
2894 double hashentrysize;
2895 double nbatches;
2896 Size mem_limit;
2898 int num_partitions;
2899 int depth;
2900
2901 /*
2902 * Estimate number of batches based on the computed limits. If less
2903 * than or equal to one, all groups are expected to fit in memory;
2904 * otherwise we expect to spill.
2905 */
2906 hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2908 aggcosts->transitionSpace);
2909 hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2910 &ngroups_limit, &num_partitions);
2911
2912 nbatches = Max((numGroups * hashentrysize) / mem_limit,
2913 numGroups / ngroups_limit);
2914
2915 nbatches = Max(ceil(nbatches), 1.0);
2916 num_partitions = Max(num_partitions, 2);
2917
2918 /*
2919 * The number of partitions can change at different levels of
2920 * recursion; but for the purposes of this calculation assume it stays
2921 * constant.
2922 */
2923 depth = ceil(log(nbatches) / log(num_partitions));
2924
2925 /*
2926 * Estimate number of pages read and written. For each level of
2927 * recursion, a tuple must be written and then later read.
2928 */
2929 pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2930 pages_written = pages_read = pages * depth;
2931
2932 /*
2933 * HashAgg has somewhat worse IO behavior than Sort on typical
2934 * hardware/OS combinations. Account for this with a generic penalty.
2935 */
2936 pages_read *= 2.0;
2937 pages_written *= 2.0;
2938
2939 startup_cost += pages_written * random_page_cost;
2940 total_cost += pages_written * random_page_cost;
2941 total_cost += pages_read * seq_page_cost;
2942
2943 /* account for CPU cost of spilling a tuple and reading it back */
2944 spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2945 startup_cost += spill_cost;
2946 total_cost += spill_cost;
2947 }
2948
2949 /*
2950 * If there are quals (HAVING quals), account for their cost and
2951 * selectivity.
2952 */
2953 if (quals)
2954 {
2956
2957 cost_qual_eval(&qual_cost, quals, root);
2958 startup_cost += qual_cost.startup;
2959 total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2960
2963 quals,
2964 0,
2965 JOIN_INNER,
2966 NULL));
2967 }
2968
2969 path->rows = output_tuples;
2970 path->disabled_nodes = disabled_nodes;
2971 path->startup_cost = startup_cost;
2972 path->total_cost = total_cost;
2973}
2974
2975/*
2976 * get_windowclause_startup_tuples
2977 * Estimate how many tuples we'll need to fetch from a WindowAgg's
2978 * subnode before we can output the first WindowAgg tuple.
2979 *
2980 * How many tuples need to be read depends on the WindowClause. For example,
2981 * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2982 * subnode tuples are read and aggregated before the WindowAgg can output
2983 * anything. If there's a PARTITION BY, then we only need to look at tuples
2984 * in the first partition. Here we attempt to estimate just how many
2985 * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2986 * before the first tuple can be output.
2987 */
2988static double
2990 double input_tuples)
2991{
2992 int frameOptions = wc->frameOptions;
2993 double partition_tuples;
2994 double return_tuples;
2995 double peer_tuples;
2996
2997 /*
2998 * First, figure out how many partitions there are likely to be and set
2999 * partition_tuples according to that estimate.
3000 */
3001 if (wc->partitionClause != NIL)
3002 {
3003 double num_partitions;
3005 root->parse->targetList);
3006
3007 num_partitions = estimate_num_groups(root, partexprs, input_tuples,
3008 NULL, NULL);
3009 list_free(partexprs);
3010
3011 partition_tuples = input_tuples / num_partitions;
3012 }
3013 else
3014 {
3015 /* all tuples belong to the same partition */
3016 partition_tuples = input_tuples;
3017 }
3018
3019 /* estimate the number of tuples in each peer group */
3020 if (wc->orderClause != NIL)
3021 {
3022 double num_groups;
3024
3026 root->parse->targetList);
3027
3028 /* estimate out how many peer groups there are in the partition */
3031 NULL);
3034 }
3035 else
3036 {
3037 /* no ORDER BY so only 1 tuple belongs in each peer group */
3038 peer_tuples = 1.0;
3039 }
3040
3041 if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
3042 {
3043 /* include all partition rows */
3045 }
3046 else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
3047 {
3048 if (frameOptions & FRAMEOPTION_ROWS)
3049 {
3050 /* just count the current row */
3051 return_tuples = 1.0;
3052 }
3053 else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3054 {
3055 /*
3056 * When in RANGE/GROUPS mode, it's more complex. If there's no
3057 * ORDER BY, then all rows in the partition are peers, otherwise
3058 * we'll need to read the first group of peers.
3059 */
3060 if (wc->orderClause == NIL)
3062 else
3064 }
3065 else
3066 {
3067 /*
3068 * Something new we don't support yet? This needs attention.
3069 * We'll just return 1.0 in the meantime.
3070 */
3071 Assert(false);
3072 return_tuples = 1.0;
3073 }
3074 }
3075 else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
3076 {
3077 /*
3078 * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
3079 * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
3080 * so we'll just assume only the current row needs to be read to fetch
3081 * the first WindowAgg row.
3082 */
3083 return_tuples = 1.0;
3084 }
3085 else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
3086 {
3087 Const *endOffset = (Const *) wc->endOffset;
3088 double end_offset_value;
3089
3090 /* try and figure out the value specified in the endOffset. */
3091 if (IsA(endOffset, Const))
3092 {
3093 if (endOffset->constisnull)
3094 {
3095 /*
3096 * NULLs are not allowed, but currently, there's no code to
3097 * error out if there's a NULL Const. We'll only discover
3098 * this during execution. For now, just pretend everything is
3099 * fine and assume that just the first row/range/group will be
3100 * needed.
3101 */
3102 end_offset_value = 1.0;
3103 }
3104 else
3105 {
3106 switch (endOffset->consttype)
3107 {
3108 case INT2OID:
3110 (double) DatumGetInt16(endOffset->constvalue);
3111 break;
3112 case INT4OID:
3114 (double) DatumGetInt32(endOffset->constvalue);
3115 break;
3116 case INT8OID:
3118 (double) DatumGetInt64(endOffset->constvalue);
3119 break;
3120 default:
3124 break;
3125 }
3126 }
3127 }
3128 else
3129 {
3130 /*
3131 * When the end bound is not a Const, we'll just need to guess. We
3132 * just make use of DEFAULT_INEQ_SEL.
3133 */
3136 }
3137
3138 if (frameOptions & FRAMEOPTION_ROWS)
3139 {
3140 /* include the N FOLLOWING and the current row */
3142 }
3143 else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3144 {
3145 /* include N FOLLOWING ranges/group and the initial range/group */
3147 }
3148 else
3149 {
3150 /*
3151 * Something new we don't support yet? This needs attention.
3152 * We'll just return 1.0 in the meantime.
3153 */
3154 Assert(false);
3155 return_tuples = 1.0;
3156 }
3157 }
3158 else
3159 {
3160 /*
3161 * Something new we don't support yet? This needs attention. We'll
3162 * just return 1.0 in the meantime.
3163 */
3164 Assert(false);
3165 return_tuples = 1.0;
3166 }
3167
3168 if (wc->partitionClause != NIL || wc->orderClause != NIL)
3169 {
3170 /*
3171 * Cap the return value to the estimated partition tuples and account
3172 * for the extra tuple WindowAgg will need to read to confirm the next
3173 * tuple does not belong to the same partition or peer group.
3174 */
3176 }
3177 else
3178 {
3179 /*
3180 * Cap the return value so it's never higher than the expected tuples
3181 * in the partition.
3182 */
3184 }
3185
3186 /*
3187 * We needn't worry about any EXCLUDE options as those only exclude rows
3188 * from being aggregated, not from being read from the WindowAgg's
3189 * subnode.
3190 */
3191
3193}
3194
3195/*
3196 * cost_windowagg
3197 * Determines and returns the cost of performing a WindowAgg plan node,
3198 * including the cost of its input.
3199 *
3200 * Input is assumed already properly sorted.
3201 */
3202void
3204 List *windowFuncs, WindowClause *winclause,
3207 double input_tuples)
3208{
3209 Cost startup_cost;
3210 Cost total_cost;
3211 double startup_tuples;
3212 int numPartCols;
3213 int numOrderCols;
3214 ListCell *lc;
3215
3217 numOrderCols = list_length(winclause->orderClause);
3218
3219 startup_cost = input_startup_cost;
3220 total_cost = input_total_cost;
3221
3222 /*
3223 * Window functions are assumed to cost their stated execution cost, plus
3224 * the cost of evaluating their input expressions, per tuple. Since they
3225 * may in fact evaluate their inputs at multiple rows during each cycle,
3226 * this could be a drastic underestimate; but without a way to know how
3227 * many rows the window function will fetch, it's hard to do better. In
3228 * any case, it's a good estimate for all the built-in window functions,
3229 * so we'll just do this for now.
3230 */
3231 foreach(lc, windowFuncs)
3232 {
3236
3237 argcosts.startup = argcosts.per_tuple = 0;
3238 add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3239 &argcosts);
3240 startup_cost += argcosts.startup;
3241 wfunccost = argcosts.per_tuple;
3242
3243 /* also add the input expressions' cost to per-input-row costs */
3244 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3245 startup_cost += argcosts.startup;
3246 wfunccost += argcosts.per_tuple;
3247
3248 /*
3249 * Add the filter's cost to per-input-row costs. XXX We should reduce
3250 * input expression costs according to filter selectivity.
3251 */
3253 startup_cost += argcosts.startup;
3254 wfunccost += argcosts.per_tuple;
3255
3256 total_cost += wfunccost * input_tuples;
3257 }
3258
3259 /*
3260 * We also charge cpu_operator_cost per grouping column per tuple for
3261 * grouping comparisons, plus cpu_tuple_cost per tuple for general
3262 * overhead.
3263 *
3264 * XXX this neglects costs of spooling the data to disk when it overflows
3265 * work_mem. Sooner or later that should get accounted for.
3266 */
3267 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3268 total_cost += cpu_tuple_cost * input_tuples;
3269
3270 path->rows = input_tuples;
3272 path->startup_cost = startup_cost;
3273 path->total_cost = total_cost;
3274
3275 /*
3276 * Also, take into account how many tuples we need to read from the
3277 * subnode in order to produce the first tuple from the WindowAgg. To do
3278 * this we proportion the run cost (total cost not including startup cost)
3279 * over the estimated startup tuples. We already included the startup
3280 * cost of the subnode, so we only need to do this when the estimated
3281 * startup tuples is above 1.0.
3282 */
3284 input_tuples);
3285
3286 if (startup_tuples > 1.0)
3287 path->startup_cost += (total_cost - startup_cost) / input_tuples *
3288 (startup_tuples - 1.0);
3289}
3290
3291/*
3292 * cost_group
3293 * Determines and returns the cost of performing a Group plan node,
3294 * including the cost of its input.
3295 *
3296 * Note: caller must ensure that input costs are for appropriately-sorted
3297 * input.
3298 */
3299void
3301 int numGroupCols, double numGroups,
3302 List *quals,
3305 double input_tuples)
3306{
3307 double output_tuples;
3308 Cost startup_cost;
3309 Cost total_cost;
3310
3311 output_tuples = numGroups;
3312 startup_cost = input_startup_cost;
3313 total_cost = input_total_cost;
3314
3315 /*
3316 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3317 * all columns get compared at most of the tuples.
3318 */
3319 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3320
3321 /*
3322 * If there are quals (HAVING quals), account for their cost and
3323 * selectivity.
3324 */
3325 if (quals)
3326 {
3328
3329 cost_qual_eval(&qual_cost, quals, root);
3330 startup_cost += qual_cost.startup;
3331 total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3332
3335 quals,
3336 0,
3337 JOIN_INNER,
3338 NULL));
3339 }
3340
3341 path->rows = output_tuples;
3343 path->startup_cost = startup_cost;
3344 path->total_cost = total_cost;
3345}
3346
3347/*
3348 * initial_cost_nestloop
3349 * Preliminary estimate of the cost of a nestloop join path.
3350 *
3351 * This must quickly produce lower-bound estimates of the path's startup and
3352 * total costs. If we are unable to eliminate the proposed path from
3353 * consideration using the lower bounds, final_cost_nestloop will be called
3354 * to obtain the final estimates.
3355 *
3356 * The exact division of labor between this function and final_cost_nestloop
3357 * is private to them, and represents a tradeoff between speed of the initial
3358 * estimate and getting a tight lower bound. We choose to not examine the
3359 * join quals here, since that's by far the most expensive part of the
3360 * calculations. The end result is that CPU-cost considerations must be
3361 * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3362 * incorporation of the inner path's run cost.
3363 *
3364 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3365 * other data to be used by final_cost_nestloop
3366 * 'jointype' is the type of join to be performed
3367 * 'outer_path' is the outer input to the join
3368 * 'inner_path' is the inner input to the join
3369 * 'extra' contains miscellaneous information about the join
3370 */
3371void
3373 JoinType jointype, uint64 enable_mask,
3375 JoinPathExtraData *extra)
3376{
3377 int disabled_nodes;
3378 Cost startup_cost = 0;
3379 Cost run_cost = 0;
3380 double outer_path_rows = outer_path->rows;
3383 Cost inner_run_cost;
3384 Cost inner_rescan_run_cost;
3385
3386 /* Count up disabled nodes. */
3387 disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
3388 disabled_nodes += inner_path->disabled_nodes;
3389 disabled_nodes += outer_path->disabled_nodes;
3390
3391 /* estimate costs to rescan the inner relation */
3395
3396 /* cost of source data */
3397
3398 /*
3399 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3400 * before we can start returning tuples, so the join's startup cost is
3401 * their sum. We'll also pay the inner path's rescan startup cost
3402 * multiple times.
3403 */
3404 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3405 run_cost += outer_path->total_cost - outer_path->startup_cost;
3406 if (outer_path_rows > 1)
3407 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3408
3409 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3410 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3411
3412 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3413 extra->inner_unique)
3414 {
3415 /*
3416 * With a SEMI or ANTI join, or if the innerrel is known unique, the
3417 * executor will stop after the first match.
3418 *
3419 * Getting decent estimates requires inspection of the join quals,
3420 * which we choose to postpone to final_cost_nestloop.
3421 */
3422
3423 /* Save private data for final_cost_nestloop */
3424 workspace->inner_run_cost = inner_run_cost;
3425 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3426 }
3427 else
3428 {
3429 /* Normal case; we'll scan whole input rel for each outer row */
3430 run_cost += inner_run_cost;
3431 if (outer_path_rows > 1)
3432 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3433 }
3434
3435 /* CPU costs left for later */
3436
3437 /* Public result fields */
3438 workspace->disabled_nodes = disabled_nodes;
3439 workspace->startup_cost = startup_cost;
3440 workspace->total_cost = startup_cost + run_cost;
3441 /* Save private data for final_cost_nestloop */
3442 workspace->run_cost = run_cost;
3443}
3444
3445/*
3446 * final_cost_nestloop
3447 * Final estimate of the cost and result size of a nestloop join path.
3448 *
3449 * 'path' is already filled in except for the rows and cost fields
3450 * 'workspace' is the result from initial_cost_nestloop
3451 * 'extra' contains miscellaneous information about the join
3452 */
3453void
3455 JoinCostWorkspace *workspace,
3456 JoinPathExtraData *extra)
3457{
3461 double inner_path_rows = inner_path->rows;
3462 Cost startup_cost = workspace->startup_cost;
3463 Cost run_cost = workspace->run_cost;
3466 double ntuples;
3467
3468 /* Set the number of disabled nodes. */
3469 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3470
3471 /* Protect some assumptions below that rowcounts aren't zero */
3472 if (outer_path_rows <= 0)
3473 outer_path_rows = 1;
3474 if (inner_path_rows <= 0)
3475 inner_path_rows = 1;
3476 /* Mark the path with the correct row estimate */
3477 if (path->jpath.path.param_info)
3478 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3479 else
3480 path->jpath.path.rows = path->jpath.path.parent->rows;
3481
3482 /* For partial paths, scale row estimate. */
3483 if (path->jpath.path.parallel_workers > 0)
3484 {
3485 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3486
3487 path->jpath.path.rows =
3488 clamp_row_est(path->jpath.path.rows / parallel_divisor);
3489 }
3490
3491 /* cost of inner-relation source data (we already dealt with outer rel) */
3492
3493 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3494 extra->inner_unique)
3495 {
3496 /*
3497 * With a SEMI or ANTI join, or if the innerrel is known unique, the
3498 * executor will stop after the first match.
3499 */
3500 Cost inner_run_cost = workspace->inner_run_cost;
3501 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3502 double outer_matched_rows;
3503 double outer_unmatched_rows;
3505
3506 /*
3507 * For an outer-rel row that has at least one match, we can expect the
3508 * inner scan to stop after a fraction 1/(match_count+1) of the inner
3509 * rows, if the matches are evenly distributed. Since they probably
3510 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3511 * that fraction. (If we used a larger fuzz factor, we'd have to
3512 * clamp inner_scan_frac to at most 1.0; but since match_count is at
3513 * least 1, no such clamp is needed now.)
3514 */
3517 inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3518
3519 /*
3520 * Compute number of tuples processed (not number emitted!). First,
3521 * account for successfully-matched outer rows.
3522 */
3524
3525 /*
3526 * Now we need to estimate the actual costs of scanning the inner
3527 * relation, which may be quite a bit less than N times inner_run_cost
3528 * due to early scan stops. We consider two cases. If the inner path
3529 * is an indexscan using all the joinquals as indexquals, then an
3530 * unmatched outer row results in an indexscan returning no rows,
3531 * which is probably quite cheap. Otherwise, the executor will have
3532 * to scan the whole inner rel for an unmatched row; not so cheap.
3533 */
3534 if (has_indexed_join_quals(path))
3535 {
3536 /*
3537 * Successfully-matched outer rows will only require scanning
3538 * inner_scan_frac of the inner relation. In this case, we don't
3539 * need to charge the full inner_run_cost even when that's more
3540 * than inner_rescan_run_cost, because we can assume that none of
3541 * the inner scans ever scan the whole inner relation. So it's
3542 * okay to assume that all the inner scan executions can be
3543 * fractions of the full cost, even if materialization is reducing
3544 * the rescan cost. At this writing, it's impossible to get here
3545 * for a materialized inner scan, so inner_run_cost and
3546 * inner_rescan_run_cost will be the same anyway; but just in
3547 * case, use inner_run_cost for the first matched tuple and
3548 * inner_rescan_run_cost for additional ones.
3549 */
3550 run_cost += inner_run_cost * inner_scan_frac;
3551 if (outer_matched_rows > 1)
3552 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3553
3554 /*
3555 * Add the cost of inner-scan executions for unmatched outer rows.
3556 * We estimate this as the same cost as returning the first tuple
3557 * of a nonempty scan. We consider that these are all rescans,
3558 * since we used inner_run_cost once already.
3559 */
3560 run_cost += outer_unmatched_rows *
3561 inner_rescan_run_cost / inner_path_rows;
3562
3563 /*
3564 * We won't be evaluating any quals at all for unmatched rows, so
3565 * don't add them to ntuples.
3566 */
3567 }
3568 else
3569 {
3570 /*
3571 * Here, a complicating factor is that rescans may be cheaper than
3572 * first scans. If we never scan all the way to the end of the
3573 * inner rel, it might be (depending on the plan type) that we'd
3574 * never pay the whole inner first-scan run cost. However it is
3575 * difficult to estimate whether that will happen (and it could
3576 * not happen if there are any unmatched outer rows!), so be
3577 * conservative and always charge the whole first-scan cost once.
3578 * We consider this charge to correspond to the first unmatched
3579 * outer row, unless there isn't one in our estimate, in which
3580 * case blame it on the first matched row.
3581 */
3582
3583 /* First, count all unmatched join tuples as being processed */
3585
3586 /* Now add the forced full scan, and decrement appropriate count */
3587 run_cost += inner_run_cost;
3588 if (outer_unmatched_rows >= 1)
3590 else
3591 outer_matched_rows -= 1;
3592
3593 /* Add inner run cost for additional outer tuples having matches */
3594 if (outer_matched_rows > 0)
3595 run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3596
3597 /* Add inner run cost for additional unmatched outer tuples */
3598 if (outer_unmatched_rows > 0)
3599 run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3600 }
3601 }
3602 else
3603 {
3604 /* Normal-case source costs were included in preliminary estimate */
3605
3606 /* Compute number of tuples processed (not number emitted!) */
3607 ntuples = outer_path_rows * inner_path_rows;
3608 }
3609
3610 /* CPU costs */
3612 startup_cost += restrict_qual_cost.startup;
3614 run_cost += cpu_per_tuple * ntuples;
3615
3616 /* tlist eval costs are paid per output row, not per tuple scanned */
3617 startup_cost += path->jpath.path.pathtarget->cost.startup;
3618 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3619
3620 path->jpath.path.startup_cost = startup_cost;
3621 path->jpath.path.total_cost = startup_cost + run_cost;
3622}
3623
3624/*
3625 * initial_cost_mergejoin
3626 * Preliminary estimate of the cost of a mergejoin path.
3627 *
3628 * This must quickly produce lower-bound estimates of the path's startup and
3629 * total costs. If we are unable to eliminate the proposed path from
3630 * consideration using the lower bounds, final_cost_mergejoin will be called
3631 * to obtain the final estimates.
3632 *
3633 * The exact division of labor between this function and final_cost_mergejoin
3634 * is private to them, and represents a tradeoff between speed of the initial
3635 * estimate and getting a tight lower bound. We choose to not examine the
3636 * join quals here, except for obtaining the scan selectivity estimate which
3637 * is really essential (but fortunately, use of caching keeps the cost of
3638 * getting that down to something reasonable).
3639 * We also assume that cost_sort/cost_incremental_sort is cheap enough to use
3640 * here.
3641 *
3642 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3643 * other data to be used by final_cost_mergejoin
3644 * 'jointype' is the type of join to be performed
3645 * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3646 * 'outer_path' is the outer input to the join
3647 * 'inner_path' is the inner input to the join
3648 * 'outersortkeys' is the list of sort keys for the outer path
3649 * 'innersortkeys' is the list of sort keys for the inner path
3650 * 'outer_presorted_keys' is the number of presorted keys of the outer path
3651 * 'extra' contains miscellaneous information about the join
3652 *
3653 * Note: outersortkeys and innersortkeys should be NIL if no explicit
3654 * sort is needed because the respective source path is already ordered.
3655 */
3656void
3658 JoinType jointype,
3659 List *mergeclauses,
3661 List *outersortkeys, List *innersortkeys,
3662 int outer_presorted_keys,
3663 JoinPathExtraData *extra)
3664{
3665 int disabled_nodes;
3666 Cost startup_cost = 0;
3667 Cost run_cost = 0;
3668 double outer_path_rows = outer_path->rows;
3669 double inner_path_rows = inner_path->rows;
3670 Cost inner_run_cost;
3671 double outer_rows,
3672 inner_rows,
3673 outer_skip_rows,
3674 inner_skip_rows;
3679 Path sort_path; /* dummy for result of
3680 * cost_sort/cost_incremental_sort */
3681
3682 /* Protect some assumptions below that rowcounts aren't zero */
3683 if (outer_path_rows <= 0)
3684 outer_path_rows = 1;
3685 if (inner_path_rows <= 0)
3686 inner_path_rows = 1;
3687
3688 /*
3689 * A merge join will stop as soon as it exhausts either input stream
3690 * (unless it's an outer join, in which case the outer side has to be
3691 * scanned all the way anyway). Estimate fraction of the left and right
3692 * inputs that will actually need to be scanned. Likewise, we can
3693 * estimate the number of rows that will be skipped before the first join
3694 * pair is found, which should be factored into startup cost. We use only
3695 * the first (most significant) merge clause for this purpose. Since
3696 * mergejoinscansel() is a fairly expensive computation, we cache the
3697 * results in the merge clause RestrictInfo.
3698 */
3699 if (mergeclauses && jointype != JOIN_FULL)
3700 {
3701 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3702 List *opathkeys;
3703 List *ipathkeys;
3706 MergeScanSelCache *cache;
3707
3708 /* Get the input pathkeys to determine the sort-order details */
3709 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3710 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3715 /* debugging check */
3716 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3717 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3718 opathkey->pk_cmptype != ipathkey->pk_cmptype ||
3719 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3720 elog(ERROR, "left and right pathkeys do not match in mergejoin");
3721
3722 /* Get the selectivity with caching */
3724
3725 if (bms_is_subset(firstclause->left_relids,
3726 outer_path->parent->relids))
3727 {
3728 /* left side of clause is outer */
3729 outerstartsel = cache->leftstartsel;
3730 outerendsel = cache->leftendsel;
3732 innerendsel = cache->rightendsel;
3733 }
3734 else
3735 {
3736 /* left side of clause is inner */
3738 outerendsel = cache->rightendsel;
3739 innerstartsel = cache->leftstartsel;
3740 innerendsel = cache->leftendsel;
3741 }
3742 if (jointype == JOIN_LEFT ||
3743 jointype == JOIN_ANTI)
3744 {
3745 outerstartsel = 0.0;
3746 outerendsel = 1.0;
3747 }
3748 else if (jointype == JOIN_RIGHT ||
3749 jointype == JOIN_RIGHT_ANTI)
3750 {
3751 innerstartsel = 0.0;
3752 innerendsel = 1.0;
3753 }
3754 }
3755 else
3756 {
3757 /* cope with clauseless or full mergejoin */
3759 outerendsel = innerendsel = 1.0;
3760 }
3761
3762 /*
3763 * Convert selectivities to row counts. We force outer_rows and
3764 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3765 */
3766 outer_skip_rows = rint(outer_path_rows * outerstartsel);
3767 inner_skip_rows = rint(inner_path_rows * innerstartsel);
3770
3771 Assert(outer_skip_rows <= outer_rows);
3772 Assert(inner_skip_rows <= inner_rows);
3773
3774 /*
3775 * Readjust scan selectivities to account for above rounding. This is
3776 * normally an insignificant effect, but when there are only a few rows in
3777 * the inputs, failing to do this makes for a large percentage error.
3778 */
3779 outerstartsel = outer_skip_rows / outer_path_rows;
3780 innerstartsel = inner_skip_rows / inner_path_rows;
3781 outerendsel = outer_rows / outer_path_rows;
3782 innerendsel = inner_rows / inner_path_rows;
3783
3786
3787 /*
3788 * We don't decide whether to materialize the inner path until we get to
3789 * final_cost_mergejoin(), so we don't know whether to check the pgs_mask
3790 * against PGS_MERGEJOIN_PLAIN or PGS_MERGEJOIN_MATERIALIZE. Instead, we
3791 * just account for any child nodes here and assume that this node is not
3792 * itself disabled; we can sort out the details in final_cost_mergejoin().
3793 *
3794 * (We could be more precise here by setting disabled_nodes to 1 at this
3795 * stage if both PGS_MERGEJOIN_PLAIN and PGS_MERGEJOIN_MATERIALIZE are
3796 * disabled, but that seems to against the idea of making this function
3797 * produce a quick, optimistic approximation of the final cost.)
3798 */
3799 disabled_nodes = 0;
3800
3801 /* cost of source data */
3802
3803 if (outersortkeys) /* do we need to sort outer? */
3804 {
3805 /*
3806 * We can assert that the outer path is not already ordered
3807 * appropriately for the mergejoin; otherwise, outersortkeys would
3808 * have been set to NIL.
3809 */
3810 Assert(!pathkeys_contained_in(outersortkeys, outer_path->pathkeys));
3811
3812 /*
3813 * We choose to use incremental sort if it is enabled and there are
3814 * presorted keys; otherwise we use full sort.
3815 */
3816 if (enable_incremental_sort && outer_presorted_keys > 0)
3817 {
3819 root,
3820 outersortkeys,
3821 outer_presorted_keys,
3822 outer_path->disabled_nodes,
3823 outer_path->startup_cost,
3824 outer_path->total_cost,
3826 outer_path->pathtarget->width,
3827 0.0,
3828 work_mem,
3829 -1.0);
3830 }
3831 else
3832 {
3834 root,
3835 outersortkeys,
3836 outer_path->disabled_nodes,
3837 outer_path->total_cost,
3839 outer_path->pathtarget->width,
3840 0.0,
3841 work_mem,
3842 -1.0);
3843 }
3844
3845 disabled_nodes += sort_path.disabled_nodes;
3846 startup_cost += sort_path.startup_cost;
3847 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3848 * outerstartsel;
3849 run_cost += (sort_path.total_cost - sort_path.startup_cost)
3851 }
3852 else
3853 {
3854 disabled_nodes += outer_path->disabled_nodes;
3855 startup_cost += outer_path->startup_cost;
3856 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3857 * outerstartsel;
3858 run_cost += (outer_path->total_cost - outer_path->startup_cost)
3860 }
3861
3862 if (innersortkeys) /* do we need to sort inner? */
3863 {
3864 /*
3865 * We can assert that the inner path is not already ordered
3866 * appropriately for the mergejoin; otherwise, innersortkeys would
3867 * have been set to NIL.
3868 */
3869 Assert(!pathkeys_contained_in(innersortkeys, inner_path->pathkeys));
3870
3871 /*
3872 * We do not consider incremental sort for inner path, because
3873 * incremental sort does not support mark/restore.
3874 */
3875
3877 root,
3878 innersortkeys,
3879 inner_path->disabled_nodes,
3880 inner_path->total_cost,
3882 inner_path->pathtarget->width,
3883 0.0,
3884 work_mem,
3885 -1.0);
3886 disabled_nodes += sort_path.disabled_nodes;
3887 startup_cost += sort_path.startup_cost;
3888 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3889 * innerstartsel;
3890 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3892 }
3893 else
3894 {
3895 disabled_nodes += inner_path->disabled_nodes;
3896 startup_cost += inner_path->startup_cost;
3897 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3898 * innerstartsel;
3899 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3901 }
3902
3903 /*
3904 * We can't yet determine whether rescanning occurs, or whether
3905 * materialization of the inner input should be done. The minimum
3906 * possible inner input cost, regardless of rescan and materialization
3907 * considerations, is inner_run_cost. We include that in
3908 * workspace->total_cost, but not yet in run_cost.
3909 */
3910
3911 /* CPU costs left for later */
3912
3913 /* Public result fields */
3914 workspace->disabled_nodes = disabled_nodes;
3915 workspace->startup_cost = startup_cost;
3916 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3917 /* Save private data for final_cost_mergejoin */
3918 workspace->run_cost = run_cost;
3919 workspace->inner_run_cost = inner_run_cost;
3920 workspace->outer_rows = outer_rows;
3921 workspace->inner_rows = inner_rows;
3922 workspace->outer_skip_rows = outer_skip_rows;
3923 workspace->inner_skip_rows = inner_skip_rows;
3924}
3925
3926/*
3927 * final_cost_mergejoin
3928 * Final estimate of the cost and result size of a mergejoin path.
3929 *
3930 * Unlike other costsize functions, this routine makes two actual decisions:
3931 * whether the executor will need to do mark/restore, and whether we should
3932 * materialize the inner path. It would be logically cleaner to build
3933 * separate paths testing these alternatives, but that would require repeating
3934 * most of the cost calculations, which are not all that cheap. Since the
3935 * choice will not affect output pathkeys or startup cost, only total cost,
3936 * there is no possibility of wanting to keep more than one path. So it seems
3937 * best to make the decisions here and record them in the path's
3938 * skip_mark_restore and materialize_inner fields.
3939 *
3940 * Mark/restore overhead is usually required, but can be skipped if we know
3941 * that the executor need find only one match per outer tuple, and that the
3942 * mergeclauses are sufficient to identify a match.
3943 *
3944 * We materialize the inner path if we need mark/restore and either the inner
3945 * path can't support mark/restore, or it's cheaper to use an interposed
3946 * Material node to handle mark/restore.
3947 *
3948 * 'path' is already filled in except for the rows and cost fields and
3949 * skip_mark_restore and materialize_inner
3950 * 'workspace' is the result from initial_cost_mergejoin
3951 * 'extra' contains miscellaneous information about the join
3952 */
3953void
3955 JoinCostWorkspace *workspace,
3956 JoinPathExtraData *extra)
3957{
3961 List *mergeclauses = path->path_mergeclauses;
3962 List *innersortkeys = path->innersortkeys;
3963 Cost startup_cost = workspace->startup_cost;
3964 Cost run_cost = workspace->run_cost;
3965 Cost inner_run_cost = workspace->inner_run_cost;
3966 double outer_rows = workspace->outer_rows;
3967 double inner_rows = workspace->inner_rows;
3968 double outer_skip_rows = workspace->outer_skip_rows;
3969 double inner_skip_rows = workspace->inner_skip_rows;
3975 double mergejointuples,
3977 double rescanratio;
3978 uint64 enable_mask = 0;
3979
3980 /* Protect some assumptions below that rowcounts aren't zero */
3981 if (inner_path_rows <= 0)
3982 inner_path_rows = 1;
3983
3984 /* Mark the path with the correct row estimate */
3985 if (path->jpath.path.param_info)
3986 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3987 else
3988 path->jpath.path.rows = path->jpath.path.parent->rows;
3989
3990 /* For partial paths, scale row estimate. */
3991 if (path->jpath.path.parallel_workers > 0)
3992 {
3993 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3994
3995 path->jpath.path.rows =
3996 clamp_row_est(path->jpath.path.rows / parallel_divisor);
3997 }
3998
3999 /*
4000 * Compute cost of the mergequals and qpquals (other restriction clauses)
4001 * separately.
4002 */
4003 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
4005 qp_qual_cost.startup -= merge_qual_cost.startup;
4006 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
4007
4008 /*
4009 * With a SEMI or ANTI join, or if the innerrel is known unique, the
4010 * executor will stop scanning for matches after the first match. When
4011 * all the joinclauses are merge clauses, this means we don't ever need to
4012 * back up the merge, and so we can skip mark/restore overhead.
4013 */
4014 if ((path->jpath.jointype == JOIN_SEMI ||
4015 path->jpath.jointype == JOIN_ANTI ||
4016 extra->inner_unique) &&
4019 path->skip_mark_restore = true;
4020 else
4021 path->skip_mark_restore = false;
4022
4023 /*
4024 * Get approx # tuples passing the mergequals. We use approx_tuple_count
4025 * here because we need an estimate done with JOIN_INNER semantics.
4026 */
4027 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
4028
4029 /*
4030 * When there are equal merge keys in the outer relation, the mergejoin
4031 * must rescan any matching tuples in the inner relation. This means
4032 * re-fetching inner tuples; we have to estimate how often that happens.
4033 *
4034 * For regular inner and outer joins, the number of re-fetches can be
4035 * estimated approximately as size of merge join output minus size of
4036 * inner relation. Assume that the distinct key values are 1, 2, ..., and
4037 * denote the number of values of each key in the outer relation as m1,
4038 * m2, ...; in the inner relation, n1, n2, ... Then we have
4039 *
4040 * size of join = m1 * n1 + m2 * n2 + ...
4041 *
4042 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
4043 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
4044 * relation
4045 *
4046 * This equation works correctly for outer tuples having no inner match
4047 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
4048 * are effectively subtracting those from the number of rescanned tuples,
4049 * when we should not. Can we do better without expensive selectivity
4050 * computations?
4051 *
4052 * The whole issue is moot if we know we don't need to mark/restore at
4053 * all, or if we are working from a unique-ified outer input.
4054 */
4055 if (path->skip_mark_restore ||
4057 path->jpath.jointype))
4058 rescannedtuples = 0;
4059 else
4060 {
4062 /* Must clamp because of possible underestimate */
4063 if (rescannedtuples < 0)
4064 rescannedtuples = 0;
4065 }
4066
4067 /*
4068 * We'll inflate various costs this much to account for rescanning. Note
4069 * that this is to be multiplied by something involving inner_rows, or
4070 * another number related to the portion of the inner rel we'll scan.
4071 */
4072 rescanratio = 1.0 + (rescannedtuples / inner_rows);
4073
4074 /*
4075 * Decide whether we want to materialize the inner input to shield it from
4076 * mark/restore and performing re-fetches. Our cost model for regular
4077 * re-fetches is that a re-fetch costs the same as an original fetch,
4078 * which is probably an overestimate; but on the other hand we ignore the
4079 * bookkeeping costs of mark/restore. Not clear if it's worth developing
4080 * a more refined model. So we just need to inflate the inner run cost by
4081 * rescanratio.
4082 */
4083 bare_inner_cost = inner_run_cost * rescanratio;
4084
4085 /*
4086 * When we interpose a Material node the re-fetch cost is assumed to be
4087 * just cpu_operator_cost per tuple, independently of the underlying
4088 * plan's cost; and we charge an extra cpu_operator_cost per original
4089 * fetch as well. Note that we're assuming the materialize node will
4090 * never spill to disk, since it only has to remember tuples back to the
4091 * last mark. (If there are a huge number of duplicates, our other cost
4092 * factors will make the path so expensive that it probably won't get
4093 * chosen anyway.) So we don't use cost_rescan here.
4094 *
4095 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
4096 * of the generated Material node.
4097 */
4098 mat_inner_cost = inner_run_cost +
4099 cpu_operator_cost * inner_rows * rescanratio;
4100
4101 /*
4102 * If we don't need mark/restore at all, we don't need materialization.
4103 */
4104 if (path->skip_mark_restore)
4105 path->materialize_inner = false;
4106
4107 /*
4108 * If merge joins with materialization are enabled, then choose
4109 * materialization if either (a) it looks cheaper or (b) merge joins
4110 * without materialization are disabled.
4111 */
4112 else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4114 (extra->pgs_mask & PGS_MERGEJOIN_PLAIN) == 0))
4115 path->materialize_inner = true;
4116
4117 /*
4118 * Regardless of what plan shapes are enabled and what the costs seem to
4119 * be, we *must* materialize it if the inner path is to be used directly
4120 * (without sorting) and it doesn't support mark/restore. Planner failure
4121 * is not an option!
4122 *
4123 * Since the inner side must be ordered, and only Sorts and IndexScans can
4124 * create order to begin with, and they both support mark/restore, you
4125 * might think there's no problem --- but you'd be wrong. Nestloop and
4126 * merge joins can *preserve* the order of their inputs, so they can be
4127 * selected as the input of a mergejoin, and they don't support
4128 * mark/restore at present.
4129 */
4130 else if (innersortkeys == NIL &&
4132 path->materialize_inner = true;
4133
4134 /*
4135 * Also, force materializing if the inner path is to be sorted and the
4136 * sort is expected to spill to disk. This is because the final merge
4137 * pass can be done on-the-fly if it doesn't have to support mark/restore.
4138 * We don't try to adjust the cost estimates for this consideration,
4139 * though.
4140 *
4141 * Since materialization is a performance optimization in this case,
4142 * rather than necessary for correctness, we skip it if materialization is
4143 * switched off.
4144 */
4145 else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4146 innersortkeys != NIL &&
4148 inner_path->pathtarget->width) >
4149 work_mem * (Size) 1024)
4150 path->materialize_inner = true;
4151 else
4152 path->materialize_inner = false;
4153
4154 /* Get the number of disabled nodes, not yet including this one. */
4155 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4156
4157 /*
4158 * Charge the right incremental cost for the chosen case, and update
4159 * enable_mask as appropriate.
4160 */
4161 if (path->materialize_inner)
4162 {
4163 run_cost += mat_inner_cost;
4165 }
4166 else
4167 {
4168 run_cost += bare_inner_cost;
4170 }
4171
4172 /* Incremental count of disabled nodes if this node is disabled. */
4173 if (path->jpath.path.parallel_workers == 0)
4175 if ((extra->pgs_mask & enable_mask) != enable_mask)
4176 ++path->jpath.path.disabled_nodes;
4177
4178 /* CPU costs */
4179
4180 /*
4181 * The number of tuple comparisons needed is approximately number of outer
4182 * rows plus number of inner rows plus number of rescanned tuples (can we
4183 * refine this?). At each one, we need to evaluate the mergejoin quals.
4184 */
4185 startup_cost += merge_qual_cost.startup;
4186 startup_cost += merge_qual_cost.per_tuple *
4187 (outer_skip_rows + inner_skip_rows * rescanratio);
4188 run_cost += merge_qual_cost.per_tuple *
4189 ((outer_rows - outer_skip_rows) +
4190 (inner_rows - inner_skip_rows) * rescanratio);
4191
4192 /*
4193 * For each tuple that gets through the mergejoin proper, we charge
4194 * cpu_tuple_cost plus the cost of evaluating additional restriction
4195 * clauses that are to be applied at the join. (This is pessimistic since
4196 * not all of the quals may get evaluated at each tuple.)
4197 *
4198 * Note: we could adjust for SEMI/ANTI joins skipping some qual
4199 * evaluations here, but it's probably not worth the trouble.
4200 */
4201 startup_cost += qp_qual_cost.startup;
4203 run_cost += cpu_per_tuple * mergejointuples;
4204
4205 /* tlist eval costs are paid per output row, not per tuple scanned */
4206 startup_cost += path->jpath.path.pathtarget->cost.startup;
4207 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4208
4209 path->jpath.path.startup_cost = startup_cost;
4210 path->jpath.path.total_cost = startup_cost + run_cost;
4211}
4212
4213/*
4214 * run mergejoinscansel() with caching
4215 */
4216static MergeScanSelCache *
4218{
4219 MergeScanSelCache *cache;
4220 ListCell *lc;
4221 Selectivity leftstartsel,
4222 leftendsel,
4223 rightstartsel,
4224 rightendsel;
4225 MemoryContext oldcontext;
4226
4227 /* Do we have this result already? */
4228 foreach(lc, rinfo->scansel_cache)
4229 {
4230 cache = (MergeScanSelCache *) lfirst(lc);
4231 if (cache->opfamily == pathkey->pk_opfamily &&
4232 cache->collation == pathkey->pk_eclass->ec_collation &&
4233 cache->cmptype == pathkey->pk_cmptype &&
4234 cache->nulls_first == pathkey->pk_nulls_first)
4235 return cache;
4236 }
4237
4238 /* Nope, do the computation */
4240 (Node *) rinfo->clause,
4241 pathkey->pk_opfamily,
4242 pathkey->pk_cmptype,
4243 pathkey->pk_nulls_first,
4244 &leftstartsel,
4245 &leftendsel,
4246 &rightstartsel,
4247 &rightendsel);
4248
4249 /* Cache the result in suitably long-lived workspace */
4250 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4251
4253 cache->opfamily = pathkey->pk_opfamily;
4254 cache->collation = pathkey->pk_eclass->ec_collation;
4255 cache->cmptype = pathkey->pk_cmptype;
4256 cache->nulls_first = pathkey->pk_nulls_first;
4257 cache->leftstartsel = leftstartsel;
4258 cache->leftendsel = leftendsel;
4259 cache->rightstartsel = rightstartsel;
4260 cache->rightendsel = rightendsel;
4261
4262 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4263
4264 MemoryContextSwitchTo(oldcontext);
4265
4266 return cache;
4267}
4268
4269/*
4270 * initial_cost_hashjoin
4271 * Preliminary estimate of the cost of a hashjoin path.
4272 *
4273 * This must quickly produce lower-bound estimates of the path's startup and
4274 * total costs. If we are unable to eliminate the proposed path from
4275 * consideration using the lower bounds, final_cost_hashjoin will be called
4276 * to obtain the final estimates.
4277 *
4278 * The exact division of labor between this function and final_cost_hashjoin
4279 * is private to them, and represents a tradeoff between speed of the initial
4280 * estimate and getting a tight lower bound. We choose to not examine the
4281 * join quals here (other than by counting the number of hash clauses),
4282 * so we can't do much with CPU costs. We do assume that
4283 * ExecChooseHashTableSize is cheap enough to use here.
4284 *
4285 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4286 * other data to be used by final_cost_hashjoin
4287 * 'jointype' is the type of join to be performed
4288 * 'hashclauses' is the list of joinclauses to be used as hash clauses
4289 * 'outer_path' is the outer input to the join
4290 * 'inner_path' is the inner input to the join
4291 * 'extra' contains miscellaneous information about the join
4292 * 'parallel_hash' indicates that inner_path is partial and that a shared
4293 * hash table will be built in parallel
4294 */
4295void
4297 JoinType jointype,
4298 List *hashclauses,
4300 JoinPathExtraData *extra,
4301 bool parallel_hash)
4302{
4303 int disabled_nodes;
4304 Cost startup_cost = 0;
4305 Cost run_cost = 0;
4306 double outer_path_rows = outer_path->rows;
4307 double inner_path_rows = inner_path->rows;
4309 int num_hashclauses = list_length(hashclauses);
4310 int numbuckets;
4311 int numbatches;
4312 int num_skew_mcvs;
4313 size_t space_allowed; /* unused */
4315
4316 if (outer_path->parallel_workers == 0)
4318
4319 /* Count up disabled nodes. */
4320 disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
4321 disabled_nodes += inner_path->disabled_nodes;
4322 disabled_nodes += outer_path->disabled_nodes;
4323
4324 /* cost of source data */
4325 startup_cost += outer_path->startup_cost;
4326 run_cost += outer_path->total_cost - outer_path->startup_cost;
4327 startup_cost += inner_path->total_cost;
4328
4329 /*
4330 * Cost of computing hash function: must do it once per input tuple. We
4331 * charge one cpu_operator_cost for each column's hash function. Also,
4332 * tack on one cpu_tuple_cost per inner row, to model the costs of
4333 * inserting the row into the hashtable.
4334 *
4335 * XXX when a hashclause is more complex than a single operator, we really
4336 * should charge the extra eval costs of the left or right side, as
4337 * appropriate, here. This seems more work than it's worth at the moment.
4338 */
4342
4343 /*
4344 * If this is a parallel hash build, then the value we have for
4345 * inner_rows_total currently refers only to the rows returned by each
4346 * participant. For shared hash table size estimation, we need the total
4347 * number, so we need to undo the division.
4348 */
4349 if (parallel_hash)
4351
4352 /*
4353 * Get hash table size that executor would use for inner relation.
4354 *
4355 * XXX for the moment, always assume that skew optimization will be
4356 * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4357 * trying to determine that for sure.
4358 *
4359 * XXX at some point it might be interesting to try to account for skew
4360 * optimization in the cost estimate, but for now, we don't.
4361 */
4363 inner_path->pathtarget->width,
4364 true, /* useskew */
4365 parallel_hash, /* try_combined_hash_mem */
4366 outer_path->parallel_workers,
4367 &space_allowed,
4368 &numbuckets,
4369 &numbatches,
4370 &num_skew_mcvs);
4371
4372 /*
4373 * If inner relation is too big then we will need to "batch" the join,
4374 * which implies writing and reading most of the tuples to disk an extra
4375 * time. Charge seq_page_cost per page, since the I/O should be nice and
4376 * sequential. Writing the inner rel counts as startup cost, all the rest
4377 * as run cost.
4378 */
4379 if (numbatches > 1)
4380 {
4382 outer_path->pathtarget->width);
4384 inner_path->pathtarget->width);
4385
4386 startup_cost += seq_page_cost * innerpages;
4387 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4388 }
4389
4390 /* CPU costs left for later */
4391
4392 /* Public result fields */
4393 workspace->disabled_nodes = disabled_nodes;
4394 workspace->startup_cost = startup_cost;
4395 workspace->total_cost = startup_cost + run_cost;
4396 /* Save private data for final_cost_hashjoin */
4397 workspace->run_cost = run_cost;
4398 workspace->numbuckets = numbuckets;
4399 workspace->numbatches = numbatches;
4401}
4402
4403/*
4404 * final_cost_hashjoin
4405 * Final estimate of the cost and result size of a hashjoin path.
4406 *
4407 * Note: the numbatches estimate is also saved into 'path' for use later
4408 *
4409 * 'path' is already filled in except for the rows and cost fields and
4410 * num_batches
4411 * 'workspace' is the result from initial_cost_hashjoin
4412 * 'extra' contains miscellaneous information about the join
4413 */
4414void
4416 JoinCostWorkspace *workspace,
4417 JoinPathExtraData *extra)
4418{
4422 double inner_path_rows = inner_path->rows;
4423 double inner_path_rows_total = workspace->inner_rows_total;
4424 List *hashclauses = path->path_hashclauses;
4425 Cost startup_cost = workspace->startup_cost;
4426 Cost run_cost = workspace->run_cost;
4427 int numbuckets = workspace->numbuckets;
4428 int numbatches = workspace->numbatches;
4432 double hashjointuples;
4433 double virtualbuckets;
4436 ListCell *hcl;
4437
4438 /* Set the number of disabled nodes. */
4439 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4440
4441 /* Mark the path with the correct row estimate */
4442 if (path->jpath.path.param_info)
4443 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4444 else
4445 path->jpath.path.rows = path->jpath.path.parent->rows;
4446
4447 /* For partial paths, scale row estimate. */
4448 if (path->jpath.path.parallel_workers > 0)
4449 {
4450 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4451
4452 path->jpath.path.rows =
4453 clamp_row_est(path->jpath.path.rows / parallel_divisor);
4454 }
4455
4456 /* mark the path with estimated # of batches */
4457 path->num_batches = numbatches;
4458
4459 /* store the total number of tuples (sum of partial row estimates) */
4461
4462 /* and compute the number of "virtual" buckets in the whole join */
4463 virtualbuckets = (double) numbuckets * (double) numbatches;
4464
4465 /*
4466 * Determine bucketsize fraction and MCV frequency for the inner relation.
4467 * We use the smallest bucketsize or MCV frequency estimated for any
4468 * individual hashclause; this is undoubtedly conservative.
4469 *
4470 * BUT: if inner relation has been unique-ified, we can assume it's good
4471 * for hashing. This is important both because it's the right answer, and
4472 * because we avoid contaminating the cache with a value that's wrong for
4473 * non-unique-ified paths.
4474 */
4475 if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo,
4476 path->jpath.jointype))
4477 {
4480 }
4481 else
4482 {
4484
4485 innerbucketsize = 1.0;
4486 innermcvfreq = 1.0;
4487
4488 /* At first, try to estimate bucket size using extended statistics. */
4490 inner_path->parent,
4491 hashclauses,
4493
4494 /* Pass through the remaining clauses */
4495 foreach(hcl, otherclauses)
4496 {
4500
4501 /*
4502 * First we have to figure out which side of the hashjoin clause
4503 * is the inner side.
4504 *
4505 * Since we tend to visit the same clauses over and over when
4506 * planning a large query, we cache the bucket stats estimates in
4507 * the RestrictInfo node to avoid repeated lookups of statistics.
4508 */
4509 if (bms_is_subset(restrictinfo->right_relids,
4510 inner_path->parent->relids))
4511 {
4512 /* righthand side is inner */
4513 thisbucketsize = restrictinfo->right_bucketsize;
4514 if (thisbucketsize < 0)
4515 {
4516 /* not cached yet */
4518 get_rightop(restrictinfo->clause),
4520 &restrictinfo->right_mcvfreq,
4521 &restrictinfo->right_bucketsize);
4522 thisbucketsize = restrictinfo->right_bucketsize;
4523 }
4524 thismcvfreq = restrictinfo->right_mcvfreq;
4525 }
4526 else
4527 {
4528 Assert(bms_is_subset(restrictinfo->left_relids,
4529 inner_path->parent->relids));
4530 /* lefthand side is inner */
4531 thisbucketsize = restrictinfo->left_bucketsize;
4532 if (thisbucketsize < 0)
4533 {
4534 /* not cached yet */
4536 get_leftop(restrictinfo->clause),
4538 &restrictinfo->left_mcvfreq,
4539 &restrictinfo->left_bucketsize);
4540 thisbucketsize = restrictinfo->left_bucketsize;
4541 }
4542 thismcvfreq = restrictinfo->left_mcvfreq;
4543 }
4544
4547 /* Disregard zero for MCV freq, it means we have no data */
4548 if (thismcvfreq > 0.0 && innermcvfreq > thismcvfreq)
4550 }
4551 }
4552
4553 /*
4554 * If the bucket holding the inner MCV would exceed hash_mem, we don't
4555 * want to hash unless there is really no other alternative, so apply
4556 * disable_cost. (The executor normally copes with excessive memory usage
4557 * by splitting batches, but obviously it cannot separate equal values
4558 * that way, so it will be unable to drive the batch size below hash_mem
4559 * when this is true.)
4560 */
4562 inner_path->pathtarget->width) > get_hash_memory_limit())
4563 startup_cost += disable_cost;
4564
4565 /*
4566 * Compute cost of the hashquals and qpquals (other restriction clauses)
4567 * separately.
4568 */
4569 cost_qual_eval(&hash_qual_cost, hashclauses, root);
4571 qp_qual_cost.startup -= hash_qual_cost.startup;
4572 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4573
4574 /* CPU costs */
4575
4576 if (path->jpath.jointype == JOIN_SEMI ||
4577 path->jpath.jointype == JOIN_ANTI ||
4578 extra->inner_unique)
4579 {
4580 double outer_matched_rows;
4582
4583 /*
4584 * With a SEMI or ANTI join, or if the innerrel is known unique, the
4585 * executor will stop after the first match.
4586 *
4587 * For an outer-rel row that has at least one match, we can expect the
4588 * bucket scan to stop after a fraction 1/(match_count+1) of the
4589 * bucket's rows, if the matches are evenly distributed. Since they
4590 * probably aren't quite evenly distributed, we apply a fuzz factor of
4591 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4592 * to clamp inner_scan_frac to at most 1.0; but since match_count is
4593 * at least 1, no such clamp is needed now.)
4594 */
4596 inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4597
4598 startup_cost += hash_qual_cost.startup;
4599 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4601
4602 /*
4603 * For unmatched outer-rel rows, the picture is quite a lot different.
4604 * In the first place, there is no reason to assume that these rows
4605 * preferentially hit heavily-populated buckets; instead assume they
4606 * are uncorrelated with the inner distribution and so they see an
4607 * average bucket size of inner_path_rows / virtualbuckets. In the
4608 * second place, it seems likely that they will have few if any exact
4609 * hash-code matches and so very few of the tuples in the bucket will
4610 * actually require eval of the hash quals. We don't have any good
4611 * way to estimate how many will, but for the moment assume that the
4612 * effective cost per bucket entry is one-tenth what it is for
4613 * matchable tuples.
4614 */
4615 run_cost += hash_qual_cost.per_tuple *
4618
4619 /* Get # of tuples that will pass the basic join */
4620 if (path->jpath.jointype == JOIN_ANTI)
4622 else
4624 }
4625 else
4626 {
4627 /*
4628 * The number of tuple comparisons needed is the number of outer
4629 * tuples times the typical number of tuples in a hash bucket, which
4630 * is the inner relation size times its bucketsize fraction. At each
4631 * one, we need to evaluate the hashjoin quals. But actually,
4632 * charging the full qual eval cost at each tuple is pessimistic,
4633 * since we don't evaluate the quals unless the hash values match
4634 * exactly. For lack of a better idea, halve the cost estimate to
4635 * allow for that.
4636 */
4637 startup_cost += hash_qual_cost.startup;
4638 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4640
4641 /*
4642 * Get approx # tuples passing the hashquals. We use
4643 * approx_tuple_count here because we need an estimate done with
4644 * JOIN_INNER semantics.
4645 */
4646 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4647 }
4648
4649 /*
4650 * For each tuple that gets through the hashjoin proper, we charge
4651 * cpu_tuple_cost plus the cost of evaluating additional restriction
4652 * clauses that are to be applied at the join. (This is pessimistic since
4653 * not all of the quals may get evaluated at each tuple.)
4654 */
4655 startup_cost += qp_qual_cost.startup;
4657 run_cost += cpu_per_tuple * hashjointuples;
4658
4659 /* tlist eval costs are paid per output row, not per tuple scanned */
4660 startup_cost += path->jpath.path.pathtarget->cost.startup;
4661 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4662
4663 path->jpath.path.startup_cost = startup_cost;
4664 path->jpath.path.total_cost = startup_cost + run_cost;
4665}
4666
4667
4668/*
4669 * cost_subplan
4670 * Figure the costs for a SubPlan (or initplan).
4671 *
4672 * Note: we could dig the subplan's Plan out of the root list, but in practice
4673 * all callers have it handy already, so we make them pass it.
4674 */
4675void
4677{
4679
4680 /*
4681 * Figure any cost for evaluating the testexpr.
4682 *
4683 * Usually, SubPlan nodes are built very early, before we have constructed
4684 * any RelOptInfos for the parent query level, which means the parent root
4685 * does not yet contain enough information to safely consult statistics.
4686 * Therefore, we pass root as NULL here. cost_qual_eval() is already
4687 * well-equipped to handle a NULL root.
4688 *
4689 * One exception is SubPlan nodes built for the initplans of MIN/MAX
4690 * aggregates from indexes (cf. SS_make_initplan_from_plan). In this
4691 * case, having a NULL root is safe because testexpr will be NULL.
4692 * Besides, an initplan will by definition not consult anything from the
4693 * parent plan.
4694 */
4696 make_ands_implicit((Expr *) subplan->testexpr),
4697 NULL);
4698
4699 if (subplan->useHashTable)
4700 {
4701 /*
4702 * If we are using a hash table for the subquery outputs, then the
4703 * cost of evaluating the query is a one-time cost. We charge one
4704 * cpu_operator_cost per tuple for the work of loading the hashtable,
4705 * too.
4706 */
4707 sp_cost.startup += plan->total_cost +
4708 cpu_operator_cost * plan->plan_rows;
4709
4710 /*
4711 * The per-tuple costs include the cost of evaluating the lefthand
4712 * expressions, plus the cost of probing the hashtable. We already
4713 * accounted for the lefthand expressions as part of the testexpr, and
4714 * will also have counted one cpu_operator_cost for each comparison
4715 * operator. That is probably too low for the probing cost, but it's
4716 * hard to make a better estimate, so live with it for now.
4717 */
4718 }
4719 else
4720 {
4721 /*
4722 * Otherwise we will be rescanning the subplan output on each
4723 * evaluation. We need to estimate how much of the output we will
4724 * actually need to scan. NOTE: this logic should agree with the
4725 * tuple_fraction estimates used by make_subplan() in
4726 * plan/subselect.c.
4727 */
4728 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4729
4730 if (subplan->subLinkType == EXISTS_SUBLINK)
4731 {
4732 /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4733 sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4734 }
4735 else if (subplan->subLinkType == ALL_SUBLINK ||
4736 subplan->subLinkType == ANY_SUBLINK)
4737 {
4738 /* assume we need 50% of the tuples */
4739 sp_cost.per_tuple += 0.50 * plan_run_cost;
4740 /* also charge a cpu_operator_cost per row examined */
4741 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4742 }
4743 else
4744 {
4745 /* assume we need all tuples */
4746 sp_cost.per_tuple += plan_run_cost;
4747 }
4748
4749 /*
4750 * Also account for subplan's startup cost. If the subplan is
4751 * uncorrelated or undirect correlated, AND its topmost node is one
4752 * that materializes its output, assume that we'll only need to pay
4753 * its startup cost once; otherwise assume we pay the startup cost
4754 * every time.
4755 */
4756 if (subplan->parParam == NIL &&
4758 sp_cost.startup += plan->startup_cost;
4759 else
4760 sp_cost.per_tuple += plan->startup_cost;
4761 }
4762
4763 subplan->startup_cost = sp_cost.startup;
4764 subplan->per_call_cost = sp_cost.per_tuple;
4765}
4766
4767
4768/*
4769 * cost_rescan
4770 * Given a finished Path, estimate the costs of rescanning it after
4771 * having done so the first time. For some Path types a rescan is
4772 * cheaper than an original scan (if no parameters change), and this
4773 * function embodies knowledge about that. The default is to return
4774 * the same costs stored in the Path. (Note that the cost estimates
4775 * actually stored in Paths are always for first scans.)
4776 *
4777 * This function is not currently intended to model effects such as rescans
4778 * being cheaper due to disk block caching; what we are concerned with is
4779 * plan types wherein the executor caches results explicitly, or doesn't
4780 * redo startup calculations, etc.
4781 */
4782static void
4784 Cost *rescan_startup_cost, /* output parameters */
4786{
4787 switch (path->pathtype)
4788 {
4789 case T_FunctionScan:
4790
4791 /*
4792 * Currently, nodeFunctionscan.c always executes the function to
4793 * completion before returning any rows, and caches the results in
4794 * a tuplestore. So the function eval cost is all startup cost
4795 * and isn't paid over again on rescans. However, all run costs
4796 * will be paid over again.
4797 */
4799 *rescan_total_cost = path->total_cost - path->startup_cost;
4800 break;
4801 case T_HashJoin:
4802
4803 /*
4804 * If it's a single-batch join, we don't need to rebuild the hash
4805 * table during a rescan.
4806 */
4807 if (((HashPath *) path)->num_batches == 1)
4808 {
4809 /* Startup cost is exactly the cost of hash table building */
4811 *rescan_total_cost = path->total_cost - path->startup_cost;
4812 }
4813 else
4814 {
4815 /* Otherwise, no special treatment */
4816 *rescan_startup_cost = path->startup_cost;
4817 *rescan_total_cost = path->total_cost;
4818 }
4819 break;
4820 case T_CteScan:
4821 case T_WorkTableScan:
4822 {
4823 /*
4824 * These plan types materialize their final result in a
4825 * tuplestore or tuplesort object. So the rescan cost is only
4826 * cpu_tuple_cost per tuple, unless the result is large enough
4827 * to spill to disk.
4828 */
4829 Cost run_cost = cpu_tuple_cost * path->rows;
4830 double nbytes = relation_byte_size(path->rows,
4831 path->pathtarget->width);
4832 double work_mem_bytes = work_mem * (Size) 1024;
4833
4834 if (nbytes > work_mem_bytes)
4835 {
4836 /* It will spill, so account for re-read cost */
4837 double npages = ceil(nbytes / BLCKSZ);
4838
4839 run_cost += seq_page_cost * npages;
4840 }
4842 *rescan_total_cost = run_cost;
4843 }
4844 break;
4845 case T_Material:
4846 case T_Sort:
4847 {
4848 /*
4849 * These plan types not only materialize their results, but do
4850 * not implement qual filtering or projection. So they are
4851 * even cheaper to rescan than the ones above. We charge only
4852 * cpu_operator_cost per tuple. (Note: keep that in sync with
4853 * the run_cost charge in cost_sort, and also see comments in
4854 * cost_material before you change it.)
4855 */
4856 Cost run_cost = cpu_operator_cost * path->rows;
4857 double nbytes = relation_byte_size(path->rows,
4858 path->pathtarget->width);
4859 double work_mem_bytes = work_mem * (Size) 1024;
4860
4861 if (nbytes > work_mem_bytes)
4862 {
4863 /* It will spill, so account for re-read cost */
4864 double npages = ceil(nbytes / BLCKSZ);
4865
4866 run_cost += seq_page_cost * npages;
4867 }
4869 *rescan_total_cost = run_cost;
4870 }
4871 break;
4872 case T_Memoize:
4873 /* All the hard work is done by cost_memoize_rescan */
4876 break;
4877 default:
4878 *rescan_startup_cost = path->startup_cost;
4879 *rescan_total_cost = path->total_cost;
4880 break;
4881 }
4882}
4883
4884
4885/*
4886 * cost_qual_eval
4887 * Estimate the CPU costs of evaluating a WHERE clause.
4888 * The input can be either an implicitly-ANDed list of boolean
4889 * expressions, or a list of RestrictInfo nodes. (The latter is
4890 * preferred since it allows caching of the results.)
4891 * The result includes both a one-time (startup) component,
4892 * and a per-evaluation component.
4893 *
4894 * Note: in some code paths root can be passed as NULL, resulting in
4895 * slightly worse estimates.
4896 */
4897void
4899{
4900 cost_qual_eval_context context;
4901 ListCell *l;
4902
4903 context.root = root;
4904 context.total.startup = 0;
4905 context.total.per_tuple = 0;
4906
4907 /* We don't charge any cost for the implicit ANDing at top level ... */
4908
4909 foreach(l, quals)
4910 {
4911 Node *qual = (Node *) lfirst(l);
4912
4913 cost_qual_eval_walker(qual, &context);
4914 }
4915
4916 *cost = context.total;
4917}
4918
4919/*
4920 * cost_qual_eval_node
4921 * As above, for a single RestrictInfo or expression.
4922 */
4923void
4925{
4926 cost_qual_eval_context context;
4927
4928 context.root = root;
4929 context.total.startup = 0;
4930 context.total.per_tuple = 0;
4931
4932 cost_qual_eval_walker(qual, &context);
4933
4934 *cost = context.total;
4935}
4936
4937static bool
4939{
4940 if (node == NULL)
4941 return false;
4942
4943 /*
4944 * RestrictInfo nodes contain an eval_cost field reserved for this
4945 * routine's use, so that it's not necessary to evaluate the qual clause's
4946 * cost more than once. If the clause's cost hasn't been computed yet,
4947 * the field's startup value will contain -1.
4948 */
4949 if (IsA(node, RestrictInfo))
4950 {
4951 RestrictInfo *rinfo = (RestrictInfo *) node;
4952
4953 if (rinfo->eval_cost.startup < 0)
4954 {
4956
4957 locContext.root = context->root;
4958 locContext.total.startup = 0;
4959 locContext.total.per_tuple = 0;
4960
4961 /*
4962 * For an OR clause, recurse into the marked-up tree so that we
4963 * set the eval_cost for contained RestrictInfos too.
4964 */
4965 if (rinfo->orclause)
4966 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4967 else
4969
4970 /*
4971 * If the RestrictInfo is marked pseudoconstant, it will be tested
4972 * only once, so treat its cost as all startup cost.
4973 */
4974 if (rinfo->pseudoconstant)
4975 {
4976 /* count one execution during startup */
4977 locContext.total.startup += locContext.total.per_tuple;
4978 locContext.total.per_tuple = 0;
4979 }
4980 rinfo->eval_cost = locContext.total;
4981 }
4982 context->total.startup += rinfo->eval_cost.startup;
4983 context->total.per_tuple += rinfo->eval_cost.per_tuple;
4984 /* do NOT recurse into children */
4985 return false;
4986 }
4987
4988 /*
4989 * For each operator or function node in the given tree, we charge the
4990 * estimated execution cost given by pg_proc.procost (remember to multiply
4991 * this by cpu_operator_cost).
4992 *
4993 * Vars and Consts are charged zero, and so are boolean operators (AND,
4994 * OR, NOT). Simplistic, but a lot better than no model at all.
4995 *
4996 * Should we try to account for the possibility of short-circuit
4997 * evaluation of AND/OR? Probably *not*, because that would make the
4998 * results depend on the clause ordering, and we are not in any position
4999 * to expect that the current ordering of the clauses is the one that's
5000 * going to end up being used. The above per-RestrictInfo caching would
5001 * not mix well with trying to re-order clauses anyway.
5002 *
5003 * Another issue that is entirely ignored here is that if a set-returning
5004 * function is below top level in the tree, the functions/operators above
5005 * it will need to be evaluated multiple times. In practical use, such
5006 * cases arise so seldom as to not be worth the added complexity needed;
5007 * moreover, since our rowcount estimates for functions tend to be pretty
5008 * phony, the results would also be pretty phony.
5009 */
5010 if (IsA(node, FuncExpr))
5011 {
5012 add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
5013 &context->total);
5014 }
5015 else if (IsA(node, OpExpr) ||
5016 IsA(node, DistinctExpr) ||
5017 IsA(node, NullIfExpr))
5018 {
5019 /* rely on struct equivalence to treat these all alike */
5020 set_opfuncid((OpExpr *) node);
5021 add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
5022 &context->total);
5023 }
5024 else if (IsA(node, ScalarArrayOpExpr))
5025 {
5026 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
5027 Node *arraynode = (Node *) lsecond(saop->args);
5030 double estarraylen = estimate_array_length(context->root, arraynode);
5031
5032 set_sa_opfuncid(saop);
5033 sacosts.startup = sacosts.per_tuple = 0;
5034 add_function_cost(context->root, saop->opfuncid, NULL,
5035 &sacosts);
5036
5037 if (OidIsValid(saop->hashfuncid))
5038 {
5039 /* Handle costs for hashed ScalarArrayOpExpr */
5040 hcosts.startup = hcosts.per_tuple = 0;
5041
5042 add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
5043 context->total.startup += sacosts.startup + hcosts.startup;
5044
5045 /* Estimate the cost of building the hashtable. */
5046 context->total.startup += estarraylen * hcosts.per_tuple;
5047
5048 /*
5049 * XXX should we charge a little bit for sacosts.per_tuple when
5050 * building the table, or is it ok to assume there will be zero
5051 * hash collision?
5052 */
5053
5054 /*
5055 * Charge for hashtable lookups. Charge a single hash and a
5056 * single comparison.
5057 */
5058 context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
5059 }
5060 else
5061 {
5062 /*
5063 * Estimate that the operator will be applied to about half of the
5064 * array elements before the answer is determined.
5065 */
5066 context->total.startup += sacosts.startup;
5067 context->total.per_tuple += sacosts.per_tuple *
5068 estimate_array_length(context->root, arraynode) * 0.5;
5069 }
5070 }
5071 else if (IsA(node, Aggref) ||
5072 IsA(node, WindowFunc))
5073 {
5074 /*
5075 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
5076 * ie, zero execution cost in the current model, because they behave
5077 * essentially like Vars at execution. We disregard the costs of
5078 * their input expressions for the same reason. The actual execution
5079 * costs of the aggregate/window functions and their arguments have to
5080 * be factored into plan-node-specific costing of the Agg or WindowAgg
5081 * plan node.
5082 */
5083 return false; /* don't recurse into children */
5084 }
5085 else if (IsA(node, GroupingFunc))
5086 {
5087 /* Treat this as having cost 1 */
5088 context->total.per_tuple += cpu_operator_cost;
5089 return false; /* don't recurse into children */
5090 }
5091 else if (IsA(node, CoerceViaIO))
5092 {
5093 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
5094 Oid iofunc;
5095 Oid typioparam;
5096 bool typisvarlena;
5097
5098 /* check the result type's input function */
5099 getTypeInputInfo(iocoerce->resulttype,
5100 &iofunc, &typioparam);
5101 add_function_cost(context->root, iofunc, NULL,
5102 &context->total);
5103 /* check the input type's output function */
5104 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
5105 &iofunc, &typisvarlena);
5106 add_function_cost(context->root, iofunc, NULL,
5107 &context->total);
5108 }
5109 else if (IsA(node, ArrayCoerceExpr))
5110 {
5113
5115 context->root);
5116 context->total.startup += perelemcost.startup;
5117 if (perelemcost.per_tuple > 0)
5118 context->total.per_tuple += perelemcost.per_tuple *
5119 estimate_array_length(context->root, (Node *) acoerce->arg);
5120 }
5121 else if (IsA(node, RowCompareExpr))
5122 {
5123 /* Conservatively assume we will check all the columns */
5125 ListCell *lc;
5126
5127 foreach(lc, rcexpr->opnos)
5128 {
5129 Oid opid = lfirst_oid(lc);
5130
5132 &context->total);
5133 }
5134 }
5135 else if (IsA(node, MinMaxExpr) ||
5136 IsA(node, SQLValueFunction) ||
5137 IsA(node, XmlExpr) ||
5138 IsA(node, CoerceToDomain) ||
5139 IsA(node, NextValueExpr) ||
5140 IsA(node, JsonExpr))
5141 {
5142 /* Treat all these as having cost 1 */
5143 context->total.per_tuple += cpu_operator_cost;
5144 }
5145 else if (IsA(node, SubLink))
5146 {
5147 /* This routine should not be applied to un-planned expressions */
5148 elog(ERROR, "cannot handle unplanned sub-select");
5149 }
5150 else if (IsA(node, SubPlan))
5151 {
5152 /*
5153 * A subplan node in an expression typically indicates that the
5154 * subplan will be executed on each evaluation, so charge accordingly.
5155 * (Sub-selects that can be executed as InitPlans have already been
5156 * removed from the expression.)
5157 */
5158 SubPlan *subplan = (SubPlan *) node;
5159
5160 context->total.startup += subplan->startup_cost;
5161 context->total.per_tuple += subplan->per_call_cost;
5162
5163 /*
5164 * We don't want to recurse into the testexpr, because it was already
5165 * counted in the SubPlan node's costs. So we're done.
5166 */
5167 return false;
5168 }
5169 else if (IsA(node, AlternativeSubPlan))
5170 {
5171 /*
5172 * Arbitrarily use the first alternative plan for costing. (We should
5173 * certainly only include one alternative, and we don't yet have
5174 * enough information to know which one the executor is most likely to
5175 * use.)
5176 */
5178
5179 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5180 context);
5181 }
5182 else if (IsA(node, PlaceHolderVar))
5183 {
5184 /*
5185 * A PlaceHolderVar should be given cost zero when considering general
5186 * expression evaluation costs. The expense of doing the contained
5187 * expression is charged as part of the tlist eval costs of the scan
5188 * or join where the PHV is first computed (see set_rel_width and
5189 * add_placeholders_to_joinrel). If we charged it again here, we'd be
5190 * double-counting the cost for each level of plan that the PHV
5191 * bubbles up through. Hence, return without recursing into the
5192 * phexpr.
5193 */
5194 return false;
5195 }
5196
5197 /* recurse into children */
5198 return expression_tree_walker(node, cost_qual_eval_walker, context);
5199}
5200
5201/*
5202 * get_restriction_qual_cost
5203 * Compute evaluation costs of a baserel's restriction quals, plus any
5204 * movable join quals that have been pushed down to the scan.
5205 * Results are returned into *qpqual_cost.
5206 *
5207 * This is a convenience subroutine that works for seqscans and other cases
5208 * where all the given quals will be evaluated the hard way. It's not useful
5209 * for cost_index(), for example, where the index machinery takes care of
5210 * some of the quals. We assume baserestrictcost was previously set by
5211 * set_baserel_size_estimates().
5212 */
5213static void
5217{
5218 if (param_info)
5219 {
5220 /* Include costs of pushed-down clauses */
5221 cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
5222
5223 qpqual_cost->startup += baserel->baserestrictcost.startup;
5224 qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
5225 }
5226 else
5227 *qpqual_cost = baserel->baserestrictcost;
5228}
5229
5230
5231/*
5232 * compute_semi_anti_join_factors
5233 * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5234 * can be expected to scan.
5235 *
5236 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
5237 * inner rows as soon as it finds a match to the current outer row.
5238 * The same happens if we have detected the inner rel is unique.
5239 * We should therefore adjust some of the cost components for this effect.
5240 * This function computes some estimates needed for these adjustments.
5241 * These estimates will be the same regardless of the particular paths used
5242 * for the outer and inner relation, so we compute these once and then pass
5243 * them to all the join cost estimation functions.
5244 *
5245 * Input parameters:
5246 * joinrel: join relation under consideration
5247 * outerrel: outer relation under consideration
5248 * innerrel: inner relation under consideration
5249 * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
5250 * sjinfo: SpecialJoinInfo relevant to this join
5251 * restrictlist: join quals
5252 * Output parameters:
5253 * *semifactors is filled in (see pathnodes.h for field definitions)
5254 */
5255void
5257 RelOptInfo *joinrel,
5258 RelOptInfo *outerrel,
5259 RelOptInfo *innerrel,
5260 JoinType jointype,
5261 SpecialJoinInfo *sjinfo,
5262 List *restrictlist,
5263 SemiAntiJoinFactors *semifactors)
5264{
5269 List *joinquals;
5270 ListCell *l;
5271
5272 /*
5273 * In an ANTI join, we must ignore clauses that are "pushed down", since
5274 * those won't affect the match logic. In a SEMI join, we do not
5275 * distinguish joinquals from "pushed down" quals, so just use the whole
5276 * restrictinfo list. For other outer join types, we should consider only
5277 * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5278 */
5279 if (IS_OUTER_JOIN(jointype))
5280 {
5281 joinquals = NIL;
5282 foreach(l, restrictlist)
5283 {
5285
5286 if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5287 joinquals = lappend(joinquals, rinfo);
5288 }
5289 }
5290 else
5291 joinquals = restrictlist;
5292
5293 /*
5294 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5295 */
5297 joinquals,
5298 0,
5299 (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5300 sjinfo);
5301
5302 /*
5303 * Also get the normal inner-join selectivity of the join clauses.
5304 */
5305 init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5306
5308 joinquals,
5309 0,
5310 JOIN_INNER,
5311 &norm_sjinfo);
5312
5313 /* Avoid leaking a lot of ListCells */
5314 if (IS_OUTER_JOIN(jointype))
5316
5317 /*
5318 * jselec can be interpreted as the fraction of outer-rel rows that have
5319 * any matches (this is true for both SEMI and ANTI cases). And nselec is
5320 * the fraction of the Cartesian product that matches. So, the average
5321 * number of matches for each outer-rel row that has at least one match is
5322 * nselec * inner_rows / jselec.
5323 *
5324 * Note: it is correct to use the inner rel's "rows" count here, even
5325 * though we might later be considering a parameterized inner path with
5326 * fewer rows. This is because we have included all the join clauses in
5327 * the selectivity estimate.
5328 */
5329 if (jselec > 0) /* protect against zero divide */
5330 {
5331 avgmatch = nselec * innerrel->rows / jselec;
5332 /* Clamp to sane range */
5333 avgmatch = Max(1.0, avgmatch);
5334 }
5335 else
5336 avgmatch = 1.0;
5337
5338 semifactors->outer_match_frac = jselec;
5339 semifactors->match_count = avgmatch;
5340}
5341
5342/*
5343 * has_indexed_join_quals
5344 * Check whether all the joinquals of a nestloop join are used as
5345 * inner index quals.
5346 *
5347 * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5348 * indexscan) that uses all the joinquals as indexquals, we can assume that an
5349 * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5350 * expensive.
5351 */
5352static bool
5354{
5355 JoinPath *joinpath = &path->jpath;
5356 Relids joinrelids = joinpath->path.parent->relids;
5357 Path *innerpath = joinpath->innerjoinpath;
5358 List *indexclauses;
5359 bool found_one;
5360 ListCell *lc;
5361
5362 /* If join still has quals to evaluate, it's not fast */
5363 if (joinpath->joinrestrictinfo != NIL)
5364 return false;
5365 /* Nor if the inner path isn't parameterized at all */
5366 if (innerpath->param_info == NULL)
5367 return false;
5368
5369 /* Find the indexclauses list for the inner scan */
5370 switch (innerpath->pathtype)
5371 {
5372 case T_IndexScan:
5373 case T_IndexOnlyScan:
5374 indexclauses = ((IndexPath *) innerpath)->indexclauses;
5375 break;
5376 case T_BitmapHeapScan:
5377 {
5378 /* Accept only a simple bitmap scan, not AND/OR cases */
5379 Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5380
5381 if (IsA(bmqual, IndexPath))
5382 indexclauses = ((IndexPath *) bmqual)->indexclauses;
5383 else
5384 return false;
5385 break;
5386 }
5387 default:
5388
5389 /*
5390 * If it's not a simple indexscan, it probably doesn't run quickly
5391 * for zero rows out, even if it's a parameterized path using all
5392 * the joinquals.
5393 */
5394 return false;
5395 }
5396
5397 /*
5398 * Examine the inner path's param clauses. Any that are from the outer
5399 * path must be found in the indexclauses list, either exactly or in an
5400 * equivalent form generated by equivclass.c. Also, we must find at least
5401 * one such clause, else it's a clauseless join which isn't fast.
5402 */
5403 found_one = false;
5404 foreach(lc, innerpath->param_info->ppi_clauses)
5405 {
5406 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5407
5409 innerpath->parent->relids,
5410 joinrelids))
5411 {
5412 if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5413 return false;
5414 found_one = true;
5415 }
5416 }
5417 return found_one;
5418}
5419
5420
5421/*
5422 * approx_tuple_count
5423 * Quick-and-dirty estimation of the number of join rows passing
5424 * a set of qual conditions.
5425 *
5426 * The quals can be either an implicitly-ANDed list of boolean expressions,
5427 * or a list of RestrictInfo nodes (typically the latter).
5428 *
5429 * We intentionally compute the selectivity under JOIN_INNER rules, even
5430 * if it's some type of outer join. This is appropriate because we are
5431 * trying to figure out how many tuples pass the initial merge or hash
5432 * join step.
5433 *
5434 * This is quick-and-dirty because we bypass clauselist_selectivity, and
5435 * simply multiply the independent clause selectivities together. Now
5436 * clauselist_selectivity often can't do any better than that anyhow, but
5437 * for some situations (such as range constraints) it is smarter. However,
5438 * we can't effectively cache the results of clauselist_selectivity, whereas
5439 * the individual clause selectivities can be and are cached.
5440 *
5441 * Since we are only using the results to estimate how many potential
5442 * output tuples are generated and passed through qpqual checking, it
5443 * seems OK to live with the approximation.
5444 */
5445static double
5447{
5448 double tuples;
5449 double outer_tuples = path->outerjoinpath->rows;
5450 double inner_tuples = path->innerjoinpath->rows;
5451 SpecialJoinInfo sjinfo;
5452 Selectivity selec = 1.0;
5453 ListCell *l;
5454
5455 /*
5456 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5457 */
5458 init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5459 path->innerjoinpath->parent->relids);
5460
5461 /* Get the approximate selectivity */
5462 foreach(l, quals)
5463 {
5464 Node *qual = (Node *) lfirst(l);
5465
5466 /* Note that clause_selectivity will be able to cache its result */
5467 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5468 }
5469
5470 /* Apply it to the input relation sizes */
5471 tuples = selec * outer_tuples * inner_tuples;
5472
5473 return clamp_row_est(tuples);
5474}
5475
5476
5477/*
5478 * set_baserel_size_estimates
5479 * Set the size estimates for the given base relation.
5480 *
5481 * The rel's targetlist and restrictinfo list must have been constructed
5482 * already, and rel->tuples must be set.
5483 *
5484 * We set the following fields of the rel node:
5485 * rows: the estimated number of output tuples (after applying
5486 * restriction clauses).
5487 * width: the estimated average output tuple width in bytes.
5488 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5489 */
5490void
5492{
5493 double nrows;
5494
5495 /* Should only be applied to base relations */
5496 Assert(rel->relid > 0);
5497
5498 nrows = rel->tuples *
5500 rel->baserestrictinfo,
5501 0,
5502 JOIN_INNER,
5503 NULL);
5504
5505 rel->rows = clamp_row_est(nrows);
5506
5508
5509 set_rel_width(root, rel);
5510}
5511
5512/*
5513 * get_parameterized_baserel_size
5514 * Make a size estimate for a parameterized scan of a base relation.
5515 *
5516 * 'param_clauses' lists the additional join clauses to be used.
5517 *
5518 * set_baserel_size_estimates must have been applied already.
5519 */
5520double
5523{
5525 double nrows;
5526
5527 /*
5528 * Estimate the number of rows returned by the parameterized scan, knowing
5529 * that it will apply all the extra join clauses as well as the rel's own
5530 * restriction clauses. Note that we force the clauses to be treated as
5531 * non-join clauses during selectivity estimation.
5532 */
5534 nrows = rel->tuples *
5536 allclauses,
5537 rel->relid, /* do not use 0! */
5538 JOIN_INNER,
5539 NULL);
5540 nrows = clamp_row_est(nrows);
5541 /* For safety, make sure result is not more than the base estimate */
5542 if (nrows > rel->rows)
5543 nrows = rel->rows;
5544 return nrows;
5545}
5546
5547/*
5548 * set_joinrel_size_estimates
5549 * Set the size estimates for the given join relation.
5550 *
5551 * The rel's targetlist must have been constructed already, and a
5552 * restriction clause list that matches the given component rels must
5553 * be provided.
5554 *
5555 * Since there is more than one way to make a joinrel for more than two
5556 * base relations, the results we get here could depend on which component
5557 * rel pair is provided. In theory we should get the same answers no matter
5558 * which pair is provided; in practice, since the selectivity estimation
5559 * routines don't handle all cases equally well, we might not. But there's
5560 * not much to be done about it. (Would it make sense to repeat the
5561 * calculations for each pair of input rels that's encountered, and somehow
5562 * average the results? Probably way more trouble than it's worth, and
5563 * anyway we must keep the rowcount estimate the same for all paths for the
5564 * joinrel.)
5565 *
5566 * We set only the rows field here. The reltarget field was already set by
5567 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5568 */
5569void
5573 SpecialJoinInfo *sjinfo,
5574 List *restrictlist)
5575{
5577 rel,
5578 outer_rel,
5579 inner_rel,
5580 outer_rel->rows,
5581 inner_rel->rows,
5582 sjinfo,
5583 restrictlist);
5584}
5585
5586/*
5587 * get_parameterized_joinrel_size
5588 * Make a size estimate for a parameterized scan of a join relation.
5589 *
5590 * 'rel' is the joinrel under consideration.
5591 * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5592 * produce the relations being joined.
5593 * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5594 * 'restrict_clauses' lists the join clauses that need to be applied at the
5595 * join node (including any movable clauses that were moved down to this join,
5596 * and not including any movable clauses that were pushed down into the
5597 * child paths).
5598 *
5599 * set_joinrel_size_estimates must have been applied already.
5600 */
5601double
5605 SpecialJoinInfo *sjinfo,
5607{
5608 double nrows;
5609
5610 /*
5611 * Estimate the number of rows returned by the parameterized join as the
5612 * sizes of the input paths times the selectivity of the clauses that have
5613 * ended up at this join node.
5614 *
5615 * As with set_joinrel_size_estimates, the rowcount estimate could depend
5616 * on the pair of input paths provided, though ideally we'd get the same
5617 * estimate for any pair with the same parameterization.
5618 */
5620 rel,
5621 outer_path->parent,
5622 inner_path->parent,
5624 inner_path->rows,
5625 sjinfo,
5627 /* For safety, make sure result is not more than the base estimate */
5628 if (nrows > rel->rows)
5629 nrows = rel->rows;
5630 return nrows;
5631}
5632
5633/*
5634 * calc_joinrel_size_estimate
5635 * Workhorse for set_joinrel_size_estimates and
5636 * get_parameterized_joinrel_size.
5637 *
5638 * outer_rel/inner_rel are the relations being joined, but they should be
5639 * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5640 * than what rel->rows says, when we are considering parameterized paths.
5641 */
5642static double
5644 RelOptInfo *joinrel,
5647 double outer_rows,
5648 double inner_rows,
5649 SpecialJoinInfo *sjinfo,
5650 List *restrictlist)
5651{
5652 JoinType jointype = sjinfo->jointype;
5656 double nrows;
5657
5658 /*
5659 * Compute joinclause selectivity. Note that we are only considering
5660 * clauses that become restriction clauses at this join level; we are not
5661 * double-counting them because they were not considered in estimating the
5662 * sizes of the component rels.
5663 *
5664 * First, see whether any of the joinclauses can be matched to known FK
5665 * constraints. If so, drop those clauses from the restrictlist, and
5666 * instead estimate their selectivity using FK semantics. (We do this
5667 * without regard to whether said clauses are local or "pushed down".
5668 * Probably, an FK-matching clause could never be seen as pushed down at
5669 * an outer join, since it would be strict and hence would be grounds for
5670 * join strength reduction.) fkselec gets the net selectivity for
5671 * FK-matching clauses, or 1.0 if there are none.
5672 */
5674 outer_rel->relids,
5675 inner_rel->relids,
5676 sjinfo,
5677 &restrictlist);
5678
5679 /*
5680 * For an outer join, we have to distinguish the selectivity of the join's
5681 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5682 * down". For inner joins we just count them all as joinclauses.
5683 */
5684 if (IS_OUTER_JOIN(jointype))
5685 {
5686 List *joinquals = NIL;
5687 List *pushedquals = NIL;
5688 ListCell *l;
5689
5690 /* Grovel through the clauses to separate into two lists */
5691 foreach(l, restrictlist)
5692 {
5694
5695 if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5697 else
5698 joinquals = lappend(joinquals, rinfo);
5699 }
5700
5701 /* Get the separate selectivities */
5703 joinquals,
5704 0,
5705 jointype,
5706 sjinfo);
5709 0,
5710 jointype,
5711 sjinfo);
5712
5713 /* Avoid leaking a lot of ListCells */
5716 }
5717 else
5718 {
5720 restrictlist,
5721 0,
5722 jointype,
5723 sjinfo);
5724 pselec = 0.0; /* not used, keep compiler quiet */
5725 }
5726
5727 /*
5728 * Basically, we multiply size of Cartesian product by selectivity.
5729 *
5730 * If we are doing an outer join, take that into account: the joinqual
5731 * selectivity has to be clamped using the knowledge that the output must
5732 * be at least as large as the non-nullable input. However, any
5733 * pushed-down quals are applied after the outer join, so their
5734 * selectivity applies fully.
5735 *
5736 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5737 * of LHS rows that have matches, and we apply that straightforwardly.
5738 */
5739 switch (jointype)
5740 {
5741 case JOIN_INNER:
5742 nrows = outer_rows * inner_rows * fkselec * jselec;
5743 /* pselec not used */
5744 break;
5745 case JOIN_LEFT:
5746 nrows = outer_rows * inner_rows * fkselec * jselec;
5747 if (nrows < outer_rows)
5748 nrows = outer_rows;
5749 nrows *= pselec;
5750 break;
5751 case JOIN_FULL:
5752 nrows = outer_rows * inner_rows * fkselec * jselec;
5753 if (nrows < outer_rows)
5754 nrows = outer_rows;
5755 if (nrows < inner_rows)
5756 nrows = inner_rows;
5757 nrows *= pselec;
5758 break;
5759 case JOIN_SEMI:
5760 nrows = outer_rows * fkselec * jselec;
5761 /* pselec not used */
5762 break;
5763 case JOIN_ANTI:
5764 nrows = outer_rows * (1.0 - fkselec * jselec);
5765 nrows *= pselec;
5766 break;
5767 default:
5768 /* other values not expected here */
5769 elog(ERROR, "unrecognized join type: %d", (int) jointype);
5770 nrows = 0; /* keep compiler quiet */
5771 break;
5772 }
5773
5774 return clamp_row_est(nrows);
5775}
5776
5777/*
5778 * get_foreign_key_join_selectivity
5779 * Estimate join selectivity for foreign-key-related clauses.
5780 *
5781 * Remove any clauses that can be matched to FK constraints from *restrictlist,
5782 * and return a substitute estimate of their selectivity. 1.0 is returned
5783 * when there are no such clauses.
5784 *
5785 * The reason for treating such clauses specially is that we can get better
5786 * estimates this way than by relying on clauselist_selectivity(), especially
5787 * for multi-column FKs where that function's assumption that the clauses are
5788 * independent falls down badly. But even with single-column FKs, we may be
5789 * able to get a better answer when the pg_statistic stats are missing or out
5790 * of date.
5791 */
5792static Selectivity
5794 Relids outer_relids,
5796 SpecialJoinInfo *sjinfo,
5797 List **restrictlist)
5798{
5799 Selectivity fkselec = 1.0;
5800 JoinType jointype = sjinfo->jointype;
5801 List *worklist = *restrictlist;
5802 ListCell *lc;
5803
5804 /* Consider each FK constraint that is known to match the query */
5805 foreach(lc, root->fkey_list)
5806 {
5808 bool ref_is_outer;
5810 ListCell *cell;
5811
5812 /*
5813 * This FK is not relevant unless it connects a baserel on one side of
5814 * this join to a baserel on the other side.
5815 */
5816 if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5817 bms_is_member(fkinfo->ref_relid, inner_relids))
5818 ref_is_outer = false;
5819 else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5820 bms_is_member(fkinfo->con_relid, inner_relids))
5821 ref_is_outer = true;
5822 else
5823 continue;
5824
5825 /*
5826 * If we're dealing with a semi/anti join, and the FK's referenced
5827 * relation is on the outside, then knowledge of the FK doesn't help
5828 * us figure out what we need to know (which is the fraction of outer
5829 * rows that have matches). On the other hand, if the referenced rel
5830 * is on the inside, then all outer rows must have matches in the
5831 * referenced table (ignoring nulls). But any restriction or join
5832 * clauses that filter that table will reduce the fraction of matches.
5833 * We can account for restriction clauses, but it's too hard to guess
5834 * how many table rows would get through a join that's inside the RHS.
5835 * Hence, if either case applies, punt and ignore the FK.
5836 */
5837 if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5839 continue;
5840
5841 /*
5842 * Modify the restrictlist by removing clauses that match the FK (and
5843 * putting them into removedlist instead). It seems unsafe to modify
5844 * the originally-passed List structure, so we make a shallow copy the
5845 * first time through.
5846 */
5847 if (worklist == *restrictlist)
5849
5850 removedlist = NIL;
5851 foreach(cell, worklist)
5852 {
5853 RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5854 bool remove_it = false;
5855 int i;
5856
5857 /* Drop this clause if it matches any column of the FK */
5858 for (i = 0; i < fkinfo->nkeys; i++)
5859 {
5860 if (rinfo->parent_ec)
5861 {
5862 /*
5863 * EC-derived clauses can only match by EC. It is okay to
5864 * consider any clause derived from the same EC as
5865 * matching the FK: even if equivclass.c chose to generate
5866 * a clause equating some other pair of Vars, it could
5867 * have generated one equating the FK's Vars. So for
5868 * purposes of estimation, we can act as though it did so.
5869 *
5870 * Note: checking parent_ec is a bit of a cheat because
5871 * there are EC-derived clauses that don't have parent_ec
5872 * set; but such clauses must compare expressions that
5873 * aren't just Vars, so they cannot match the FK anyway.
5874 */
5875 if (fkinfo->eclass[i] == rinfo->parent_ec)
5876 {
5877 remove_it = true;
5878 break;
5879 }
5880 }
5881 else
5882 {
5883 /*
5884 * Otherwise, see if rinfo was previously matched to FK as
5885 * a "loose" clause.
5886 */
5887 if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5888 {
5889 remove_it = true;
5890 break;
5891 }
5892 }
5893 }
5894 if (remove_it)
5895 {
5898 }
5899 }
5900
5901 /*
5902 * If we failed to remove all the matching clauses we expected to
5903 * find, chicken out and ignore this FK; applying its selectivity
5904 * might result in double-counting. Put any clauses we did manage to
5905 * remove back into the worklist.
5906 *
5907 * Since the matching clauses are known not outerjoin-delayed, they
5908 * would normally have appeared in the initial joinclause list. If we
5909 * didn't find them, there are two possibilities:
5910 *
5911 * 1. If the FK match is based on an EC that is ec_has_const, it won't
5912 * have generated any join clauses at all. We discount such ECs while
5913 * checking to see if we have "all" the clauses. (Below, we'll adjust
5914 * the selectivity estimate for this case.)
5915 *
5916 * 2. The clauses were matched to some other FK in a previous
5917 * iteration of this loop, and thus removed from worklist. (A likely
5918 * case is that two FKs are matched to the same EC; there will be only
5919 * one EC-derived clause in the initial list, so the first FK will
5920 * consume it.) Applying both FKs' selectivity independently risks
5921 * underestimating the join size; in particular, this would undo one
5922 * of the main things that ECs were invented for, namely to avoid
5923 * double-counting the selectivity of redundant equality conditions.
5924 * Later we might think of a reasonable way to combine the estimates,
5925 * but for now, just punt, since this is a fairly uncommon situation.
5926 */
5927 if (removedlist == NIL ||
5929 (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5930 {
5932 continue;
5933 }
5934
5935 /*
5936 * Finally we get to the payoff: estimate selectivity using the
5937 * knowledge that each referencing row will match exactly one row in
5938 * the referenced table.
5939 *
5940 * XXX that's not true in the presence of nulls in the referencing
5941 * column(s), so in principle we should derate the estimate for those.
5942 * However (1) if there are any strict restriction clauses for the
5943 * referencing column(s) elsewhere in the query, derating here would
5944 * be double-counting the null fraction, and (2) it's not very clear
5945 * how to combine null fractions for multiple referencing columns. So
5946 * we do nothing for now about correcting for nulls.
5947 *
5948 * XXX another point here is that if either side of an FK constraint
5949 * is an inheritance parent, we estimate as though the constraint
5950 * covers all its children as well. This is not an unreasonable
5951 * assumption for a referencing table, ie the user probably applied
5952 * identical constraints to all child tables (though perhaps we ought
5953 * to check that). But it's not possible to have done that for a
5954 * referenced table. Fortunately, precisely because that doesn't
5955 * work, it is uncommon in practice to have an FK referencing a parent
5956 * table. So, at least for now, disregard inheritance here.
5957 */
5958 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5959 {
5960 /*
5961 * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5962 * referenced table is exactly the inside of the join. The join
5963 * selectivity is defined as the fraction of LHS rows that have
5964 * matches. The FK implies that every LHS row has a match *in the
5965 * referenced table*; but any restriction clauses on it will
5966 * reduce the number of matches. Hence we take the join
5967 * selectivity as equal to the selectivity of the table's
5968 * restriction clauses, which is rows / tuples; but we must guard
5969 * against tuples == 0.
5970 */
5971 RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5972 double ref_tuples = Max(ref_rel->tuples, 1.0);
5973
5974 fkselec *= ref_rel->rows / ref_tuples;
5975 }
5976 else
5977 {
5978 /*
5979 * Otherwise, selectivity is exactly 1/referenced-table-size; but
5980 * guard against tuples == 0. Note we should use the raw table
5981 * tuple count, not any estimate of its filtered or joined size.
5982 */
5983 RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5984 double ref_tuples = Max(ref_rel->tuples, 1.0);
5985
5986 fkselec *= 1.0 / ref_tuples;
5987 }
5988
5989 /*
5990 * If any of the FK columns participated in ec_has_const ECs, then
5991 * equivclass.c will have generated "var = const" restrictions for
5992 * each side of the join, thus reducing the sizes of both input
5993 * relations. Taking the fkselec at face value would amount to
5994 * double-counting the selectivity of the constant restriction for the
5995 * referencing Var. Hence, look for the restriction clause(s) that
5996 * were applied to the referencing Var(s), and divide out their
5997 * selectivity to correct for this.
5998 */
5999 if (fkinfo->nconst_ec > 0)
6000 {
6001 for (int i = 0; i < fkinfo->nkeys; i++)
6002 {
6003 EquivalenceClass *ec = fkinfo->eclass[i];
6004
6005 if (ec && ec->ec_has_const)
6006 {
6007 EquivalenceMember *em = fkinfo->fk_eclass_member[i];
6009 ec,
6010 em);
6011
6012 if (rinfo)
6013 {
6014 Selectivity s0;
6015
6017 (Node *) rinfo,
6018 0,
6019 jointype,
6020 sjinfo);
6021 if (s0 > 0)
6022 fkselec /= s0;
6023 }
6024 }
6025 }
6026 }
6027 }
6028
6029 *restrictlist = worklist;
6031 return fkselec;
6032}
6033
6034/*
6035 * set_subquery_size_estimates
6036 * Set the size estimates for a base relation that is a subquery.
6037 *
6038 * The rel's targetlist and restrictinfo list must have been constructed
6039 * already, and the Paths for the subquery must have been completed.
6040 * We look at the subquery's PlannerInfo to extract data.
6041 *
6042 * We set the same fields as set_baserel_size_estimates.
6043 */
6044void
6046{
6047 PlannerInfo *subroot = rel->subroot;
6049 ListCell *lc;
6050
6051 /* Should only be applied to base relations that are subqueries */
6052 Assert(rel->relid > 0);
6053 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
6054
6055 /*
6056 * Copy raw number of output rows from subquery. All of its paths should
6057 * have the same output rowcount, so just look at cheapest-total.
6058 */
6060 rel->tuples = sub_final_rel->cheapest_total_path->rows;
6061
6062 /*
6063 * Compute per-output-column width estimates by examining the subquery's
6064 * targetlist. For any output that is a plain Var, get the width estimate
6065 * that was made while planning the subquery. Otherwise, we leave it to
6066 * set_rel_width to fill in a datatype-based default estimate.
6067 */
6068 foreach(lc, subroot->parse->targetList)
6069 {
6071 Node *texpr = (Node *) te->expr;
6072 int32 item_width = 0;
6073
6074 /* junk columns aren't visible to upper query */
6075 if (te->resjunk)
6076 continue;
6077
6078 /*
6079 * The subquery could be an expansion of a view that's had columns
6080 * added to it since the current query was parsed, so that there are
6081 * non-junk tlist columns in it that don't correspond to any column
6082 * visible at our query level. Ignore such columns.
6083 */
6084 if (te->resno < rel->min_attr || te->resno > rel->max_attr)
6085 continue;
6086
6087 /*
6088 * XXX This currently doesn't work for subqueries containing set
6089 * operations, because the Vars in their tlists are bogus references
6090 * to the first leaf subquery, which wouldn't give the right answer
6091 * even if we could still get to its PlannerInfo.
6092 *
6093 * Also, the subquery could be an appendrel for which all branches are
6094 * known empty due to constraint exclusion, in which case
6095 * set_append_rel_pathlist will have left the attr_widths set to zero.
6096 *
6097 * In either case, we just leave the width estimate zero until
6098 * set_rel_width fixes it.
6099 */
6100 if (IsA(texpr, Var) &&
6101 subroot->parse->setOperations == NULL)
6102 {
6103 Var *var = (Var *) texpr;
6104 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
6105
6106 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
6107 }
6108 rel->attr_widths[te->resno - rel->min_attr] = item_width;
6109 }
6110
6111 /* Now estimate number of output rows, etc */
6113}
6114
6115/*
6116 * set_function_size_estimates
6117 * Set the size estimates for a base relation that is a function call.
6118 *
6119 * The rel's targetlist and restrictinfo list must have been constructed
6120 * already.
6121 *
6122 * We set the same fields as set_baserel_size_estimates.
6123 */
6124void
6126{
6128 ListCell *lc;
6129
6130 /* Should only be applied to base relations that are functions */
6131 Assert(rel->relid > 0);
6132 rte = planner_rt_fetch(rel->relid, root);
6133 Assert(rte->rtekind == RTE_FUNCTION);
6134
6135 /*
6136 * Estimate number of rows the functions will return. The rowcount of the
6137 * node is that of the largest function result.
6138 */
6139 rel->tuples = 0;
6140 foreach(lc, rte->functions)
6141 {
6143 double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
6144
6145 if (ntup > rel->tuples)
6146 rel->tuples = ntup;
6147 }
6148
6149 /* Now estimate number of output rows, etc */
6151}
6152
6153/*
6154 * set_function_size_estimates
6155 * Set the size estimates for a base relation that is a function call.
6156 *
6157 * The rel's targetlist and restrictinfo list must have been constructed
6158 * already.
6159 *
6160 * We set the same fields as set_tablefunc_size_estimates.
6161 */
6162void
6164{
6165 /* Should only be applied to base relations that are functions */
6166 Assert(rel->relid > 0);
6167 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
6168
6169 rel->tuples = 100;
6170
6171 /* Now estimate number of output rows, etc */
6173}
6174
6175/*
6176 * set_values_size_estimates
6177 * Set the size estimates for a base relation that is a values list.
6178 *
6179 * The rel's targetlist and restrictinfo list must have been constructed
6180 * already.
6181 *
6182 * We set the same fields as set_baserel_size_estimates.
6183 */
6184void
6186{
6188
6189 /* Should only be applied to base relations that are values lists */
6190 Assert(rel->relid > 0);
6191 rte = planner_rt_fetch(rel->relid, root);
6192 Assert(rte->rtekind == RTE_VALUES);
6193
6194 /*
6195 * Estimate number of rows the values list will return. We know this
6196 * precisely based on the list length (well, barring set-returning
6197 * functions in list items, but that's a refinement not catered for
6198 * anywhere else either).
6199 */
6200 rel->tuples = list_length(rte->values_lists);
6201
6202 /* Now estimate number of output rows, etc */
6204}
6205
6206/*
6207 * set_cte_size_estimates
6208 * Set the size estimates for a base relation that is a CTE reference.
6209 *
6210 * The rel's targetlist and restrictinfo list must have been constructed
6211 * already, and we need an estimate of the number of rows returned by the CTE
6212 * (if a regular CTE) or the non-recursive term (if a self-reference).
6213 *
6214 * We set the same fields as set_baserel_size_estimates.
6215 */
6216void
6218{
6220
6221 /* Should only be applied to base relations that are CTE references */
6222 Assert(rel->relid > 0);
6223 rte = planner_rt_fetch(rel->relid, root);
6224 Assert(rte->rtekind == RTE_CTE);
6225
6226 if (rte->self_reference)
6227 {
6228 /*
6229 * In a self-reference, we assume the average worktable size is a
6230 * multiple of the nonrecursive term's size. The best multiplier will
6231 * vary depending on query "fan-out", so make its value adjustable.
6232 */
6234 }
6235 else
6236 {
6237 /* Otherwise just believe the CTE's rowcount estimate */
6238 rel->tuples = cte_rows;
6239 }
6240
6241 /* Now estimate number of output rows, etc */
6243}
6244
6245/*
6246 * set_namedtuplestore_size_estimates
6247 * Set the size estimates for a base relation that is a tuplestore reference.
6248 *
6249 * The rel's targetlist and restrictinfo list must have been constructed
6250 * already.
6251 *
6252 * We set the same fields as set_baserel_size_estimates.
6253 */
6254void
6256{
6258
6259 /* Should only be applied to base relations that are tuplestore references */
6260 Assert(rel->relid > 0);
6261 rte = planner_rt_fetch(rel->relid, root);
6262 Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6263
6264 /*
6265 * Use the estimate provided by the code which is generating the named
6266 * tuplestore. In some cases, the actual number might be available; in
6267 * others the same plan will be re-used, so a "typical" value might be
6268 * estimated and used.
6269 */
6270 rel->tuples = rte->enrtuples;
6271 if (rel->tuples < 0)
6272 rel->tuples = 1000;
6273
6274 /* Now estimate number of output rows, etc */
6276}
6277
6278/*
6279 * set_result_size_estimates
6280 * Set the size estimates for an RTE_RESULT base relation
6281 *
6282 * The rel's targetlist and restrictinfo list must have been constructed
6283 * already.
6284 *
6285 * We set the same fields as set_baserel_size_estimates.
6286 */
6287void
6289{
6290 /* Should only be applied to RTE_RESULT base relations */
6291 Assert(rel->relid > 0);
6292 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6293
6294 /* RTE_RESULT always generates a single row, natively */
6295 rel->tuples = 1;
6296
6297 /* Now estimate number of output rows, etc */
6299}
6300
6301/*
6302 * set_foreign_size_estimates
6303 * Set the size estimates for a base relation that is a foreign table.
6304 *
6305 * There is not a whole lot that we can do here; the foreign-data wrapper
6306 * is responsible for producing useful estimates. We can do a decent job
6307 * of estimating baserestrictcost, so we set that, and we also set up width
6308 * using what will be purely datatype-driven estimates from the targetlist.
6309 * There is no way to do anything sane with the rows value, so we just put
6310 * a default estimate and hope that the wrapper can improve on it. The
6311 * wrapper's GetForeignRelSize function will be called momentarily.
6312 *
6313 * The rel's targetlist and restrictinfo list must have been constructed
6314 * already.
6315 */
6316void
6318{
6319 /* Should only be applied to base relations */
6320 Assert(rel->relid > 0);
6321
6322 rel->rows = 1000; /* entirely bogus default estimate */
6323
6325
6326 set_rel_width(root, rel);
6327}
6328
6329
6330/*
6331 * set_rel_width
6332 * Set the estimated output width of a base relation.
6333 *
6334 * The estimated output width is the sum of the per-attribute width estimates
6335 * for the actually-referenced columns, plus any PHVs or other expressions
6336 * that have to be calculated at this relation. This is the amount of data
6337 * we'd need to pass upwards in case of a sort, hash, etc.
6338 *
6339 * This function also sets reltarget->cost, so it's a bit misnamed now.
6340 *
6341 * NB: this works best on plain relations because it prefers to look at
6342 * real Vars. For subqueries, set_subquery_size_estimates will already have
6343 * copied up whatever per-column estimates were made within the subquery,
6344 * and for other types of rels there isn't much we can do anyway. We fall
6345 * back on (fairly stupid) datatype-based width estimates if we can't get
6346 * any better number.
6347 *
6348 * The per-attribute width estimates are cached for possible re-use while
6349 * building join relations or post-scan/join pathtargets.
6350 */
6351static void
6353{
6354 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6355 int64 tuple_width = 0;
6356 bool have_wholerow_var = false;
6357 ListCell *lc;
6358
6359 /* Vars are assumed to have cost zero, but other exprs do not */
6360 rel->reltarget->cost.startup = 0;
6361 rel->reltarget->cost.per_tuple = 0;
6362
6363 foreach(lc, rel->reltarget->exprs)
6364 {
6365 Node *node = (Node *) lfirst(lc);
6366
6367 /*
6368 * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6369 * but there are corner cases involving LATERAL references where that
6370 * isn't so. If the Var has the wrong varno, fall through to the
6371 * generic case (it doesn't seem worth the trouble to be any smarter).
6372 */
6373 if (IsA(node, Var) &&
6374 ((Var *) node)->varno == rel->relid)
6375 {
6376 Var *var = (Var *) node;
6377 int ndx;
6379
6380 Assert(var->varattno >= rel->min_attr);
6381 Assert(var->varattno <= rel->max_attr);
6382
6383 ndx = var->varattno - rel->min_attr;
6384
6385 /*
6386 * If it's a whole-row Var, we'll deal with it below after we have
6387 * already cached as many attr widths as possible.
6388 */
6389 if (var->varattno == 0)
6390 {
6391 have_wholerow_var = true;
6392 continue;
6393 }
6394
6395 /*
6396 * The width may have been cached already (especially if it's a
6397 * subquery), so don't duplicate effort.
6398 */
6399 if (rel->attr_widths[ndx] > 0)
6400 {
6401 tuple_width += rel->attr_widths[ndx];
6402 continue;
6403 }
6404
6405 /* Try to get column width from statistics */
6406 if (reloid != InvalidOid && var->varattno > 0)
6407 {
6408 item_width = get_attavgwidth(reloid, var->varattno);
6409 if (item_width > 0)
6410 {
6411 rel->attr_widths[ndx] = item_width;
6413 continue;
6414 }
6415 }
6416
6417 /*
6418 * Not a plain relation, or can't find statistics for it. Estimate
6419 * using just the type info.
6420 */
6421 item_width = get_typavgwidth(var->vartype, var->vartypmod);
6422 Assert(item_width > 0);
6423 rel->attr_widths[ndx] = item_width;
6425 }
6426 else if (IsA(node, PlaceHolderVar))
6427 {
6428 /*
6429 * We will need to evaluate the PHV's contained expression while
6430 * scanning this rel, so be sure to include it in reltarget->cost.
6431 */
6432 PlaceHolderVar *phv = (PlaceHolderVar *) node;
6434 QualCost cost;
6435
6436 tuple_width += phinfo->ph_width;
6437 cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6438 rel->reltarget->cost.startup += cost.startup;
6439 rel->reltarget->cost.per_tuple += cost.per_tuple;
6440 }
6441 else
6442 {
6443 /*
6444 * We could be looking at an expression pulled up from a subquery,
6445 * or a ROW() representing a whole-row child Var, etc. Do what we
6446 * can using the expression type information.
6447 */
6449 QualCost cost;
6450
6452 Assert(item_width > 0);
6454 /* Not entirely clear if we need to account for cost, but do so */
6455 cost_qual_eval_node(&cost, node, root);
6456 rel->reltarget->cost.startup += cost.startup;
6457 rel->reltarget->cost.per_tuple += cost.per_tuple;
6458 }
6459 }
6460
6461 /*
6462 * If we have a whole-row reference, estimate its width as the sum of
6463 * per-column widths plus heap tuple header overhead.
6464 */
6466 {
6468
6469 if (reloid != InvalidOid)
6470 {
6471 /* Real relation, so estimate true tuple width */
6473 rel->attr_widths - rel->min_attr);
6474 }
6475 else
6476 {
6477 /* Do what we can with info for a phony rel */
6478 AttrNumber i;
6479
6480 for (i = 1; i <= rel->max_attr; i++)
6481 wholerow_width += rel->attr_widths[i - rel->min_attr];
6482 }
6483
6484 rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6485
6486 /*
6487 * Include the whole-row Var as part of the output tuple. Yes, that
6488 * really is what happens at runtime.
6489 */
6491 }
6492
6494}
6495
6496/*
6497 * set_pathtarget_cost_width
6498 * Set the estimated eval cost and output width of a PathTarget tlist.
6499 *
6500 * As a notational convenience, returns the same PathTarget pointer passed in.
6501 *
6502 * Most, though not quite all, uses of this function occur after we've run
6503 * set_rel_width() for base relations; so we can usually obtain cached width
6504 * estimates for Vars. If we can't, fall back on datatype-based width
6505 * estimates. Present early-planning uses of PathTargets don't need accurate
6506 * widths badly enough to justify going to the catalogs for better data.
6507 */
6508PathTarget *
6510{
6511 int64 tuple_width = 0;
6512 ListCell *lc;
6513
6514 /* Vars are assumed to have cost zero, but other exprs do not */
6515 target->cost.startup = 0;
6516 target->cost.per_tuple = 0;
6517
6518 foreach(lc, target->exprs)
6519 {
6520 Node *node = (Node *) lfirst(lc);
6521
6523
6524 /* For non-Vars, account for evaluation cost */
6525 if (!IsA(node, Var))
6526 {
6527 QualCost cost;
6528
6529 cost_qual_eval_node(&cost, node, root);
6530 target->cost.startup += cost.startup;
6531 target->cost.per_tuple += cost.per_tuple;
6532 }
6533 }
6534
6536
6537 return target;
6538}
6539
6540/*
6541 * get_expr_width
6542 * Estimate the width of the given expr attempting to use the width
6543 * cached in a Var's owning RelOptInfo, else fallback on the type's
6544 * average width when unable to or when the given Node is not a Var.
6545 */
6546static int32
6548{
6549 int32 width;
6550
6551 if (IsA(expr, Var))
6552 {
6553 const Var *var = (const Var *) expr;
6554
6555 /* We should not see any upper-level Vars here */
6556 Assert(var->varlevelsup == 0);
6557
6558 /* Try to get data from RelOptInfo cache */
6559 if (!IS_SPECIAL_VARNO(var->varno) &&
6560 var->varno < root->simple_rel_array_size)
6561 {
6562 RelOptInfo *rel = root->simple_rel_array[var->varno];
6563
6564 if (rel != NULL &&
6565 var->varattno >= rel->min_attr &&
6566 var->varattno <= rel->max_attr)
6567 {
6568 int ndx = var->varattno - rel->min_attr;
6569
6570 if (rel->attr_widths[ndx] > 0)
6571 return rel->attr_widths[ndx];
6572 }
6573 }
6574
6575 /*
6576 * No cached data available, so estimate using just the type info.
6577 */
6578 width = get_typavgwidth(var->vartype, var->vartypmod);
6579 Assert(width > 0);
6580
6581 return width;
6582 }
6583
6584 width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6585 Assert(width > 0);
6586 return width;
6587}
6588
6589/*
6590 * relation_byte_size
6591 * Estimate the storage space in bytes for a given number of tuples
6592 * of a given width (size in bytes).
6593 */
6594static double
6595relation_byte_size(double tuples, int width)
6596{
6597 return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6598}
6599
6600/*
6601 * page_size
6602 * Returns an estimate of the number of pages covered by a given
6603 * number of tuples of a given width (size in bytes).
6604 */
6605static double
6606page_size(double tuples, int width)
6607{
6608 return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6609}
6610
6611/*
6612 * Estimate the fraction of the work that each worker will do given the
6613 * number of workers budgeted for the path.
6614 */
6615static double
6617{
6618 double parallel_divisor = path->parallel_workers;
6619
6620 /*
6621 * Early experience with parallel query suggests that when there is only
6622 * one worker, the leader often makes a very substantial contribution to
6623 * executing the parallel portion of the plan, but as more workers are
6624 * added, it does less and less, because it's busy reading tuples from the
6625 * workers and doing whatever non-parallel post-processing is needed. By
6626 * the time we reach 4 workers, the leader no longer makes a meaningful
6627 * contribution. Thus, for now, estimate that the leader spends 30% of
6628 * its time servicing each worker, and the remainder executing the
6629 * parallel plan.
6630 */
6632 {
6633 double leader_contribution;
6634
6635 leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6636 if (leader_contribution > 0)
6638 }
6639
6640 return parallel_divisor;
6641}
6642
6643/*
6644 * compute_bitmap_pages
6645 * Estimate number of pages fetched from heap in a bitmap heap scan.
6646 *
6647 * 'baserel' is the relation to be scanned
6648 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6649 * 'loop_count' is the number of repetitions of the indexscan to factor into
6650 * estimates of caching behavior
6651 *
6652 * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6653 * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6654 */
6655double
6657 Path *bitmapqual, double loop_count,
6658 Cost *cost_p, double *tuples_p)
6659{
6660 Cost indexTotalCost;
6661 Selectivity indexSelectivity;
6662 double T;
6663 double pages_fetched;
6664 double tuples_fetched;
6665 double heap_pages;
6666 double maxentries;
6667
6668 /*
6669 * Fetch total cost of obtaining the bitmap, as well as its total
6670 * selectivity.
6671 */
6672 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6673
6674 /*
6675 * Estimate number of main-table pages fetched.
6676 */
6677 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6678
6679 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6680
6681 /*
6682 * For a single scan, the number of heap pages that need to be fetched is
6683 * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6684 * re-reads needed).
6685 */
6686 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6687
6688 /*
6689 * Calculate the number of pages fetched from the heap. Then based on
6690 * current work_mem estimate get the estimated maxentries in the bitmap.
6691 * (Note that we always do this calculation based on the number of pages
6692 * that would be fetched in a single iteration, even if loop_count > 1.
6693 * That's correct, because only that number of entries will be stored in
6694 * the bitmap at one time.)
6695 */
6697 maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
6698
6699 if (loop_count > 1)
6700 {
6701 /*
6702 * For repeated bitmap scans, scale up the number of tuples fetched in
6703 * the Mackert and Lohman formula by the number of scans, so that we
6704 * estimate the number of pages fetched by all the scans. Then
6705 * pro-rate for one scan.
6706 */
6707 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6708 baserel->pages,
6709 get_indexpath_pages(bitmapqual),
6710 root);
6712 }
6713
6714 if (pages_fetched >= T)
6715 pages_fetched = T;
6716 else
6718
6719 if (maxentries < heap_pages)
6720 {
6721 double exact_pages;
6722 double lossy_pages;
6723
6724 /*
6725 * Crude approximation of the number of lossy pages. Because of the
6726 * way tbm_lossify() is coded, the number of lossy pages increases
6727 * very sharply as soon as we run short of memory; this formula has
6728 * that property and seems to perform adequately in testing, but it's
6729 * possible we could do better somehow.
6730 */
6731 lossy_pages = Max(0, heap_pages - maxentries / 2);
6732 exact_pages = heap_pages - lossy_pages;
6733
6734 /*
6735 * If there are lossy pages then recompute the number of tuples
6736 * processed by the bitmap heap node. We assume here that the chance
6737 * of a given tuple coming from an exact page is the same as the
6738 * chance that a given page is exact. This might not be true, but
6739 * it's not clear how we can do any better.
6740 */
6741 if (lossy_pages > 0)
6742 tuples_fetched =
6743 clamp_row_est(indexSelectivity *
6744 (exact_pages / heap_pages) * baserel->tuples +
6745 (lossy_pages / heap_pages) * baserel->tuples);
6746 }
6747
6748 if (cost_p)
6749 *cost_p = indexTotalCost;
6750 if (tuples_p)
6751 *tuples_p = tuples_fetched;
6752
6753 return pages_fetched;
6754}
6755
6756/*
6757 * compute_gather_rows
6758 * Estimate number of rows for gather (merge) nodes.
6759 *
6760 * In a parallel plan, each worker's row estimate is determined by dividing the
6761 * total number of rows by parallel_divisor, which accounts for the leader's
6762 * contribution in addition to the number of workers. Accordingly, when
6763 * estimating the number of rows for gather (merge) nodes, we multiply the rows
6764 * per worker by the same parallel_divisor to undo the division.
6765 */
6766double
6768{
6769 Assert(path->parallel_workers > 0);
6770
6771 return clamp_row_est(path->rows * get_parallel_divisor(path));
6772}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4779
void(* amcostestimate_function)(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition amapi.h:148
int16 AttrNumber
Definition attnum.h:21
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:780
@ BMS_SINGLETON
Definition bitmapset.h:72
uint32 BlockNumber
Definition block.h:31
#define Min(x, y)
Definition c.h:997
#define MAXALIGN(LEN)
Definition c.h:826
#define PG_UINT32_MAX
Definition c.h:604
#define Max(x, y)
Definition c.h:991
#define Assert(condition)
Definition c.h:873
int64_t int64
Definition c.h:543
int32_t int32
Definition c.h:542
uint64_t uint64
Definition c.h:547
#define OidIsValid(objectId)
Definition c.h:788
size_t Size
Definition c.h:619
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition clauses.c:298
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:100
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:667
#define DEFAULT_PARALLEL_TUPLE_COST
Definition cost.h:29
#define DEFAULT_PARALLEL_SETUP_COST
Definition cost.h:30
#define DEFAULT_CPU_INDEX_TUPLE_COST
Definition cost.h:27
#define DEFAULT_CPU_TUPLE_COST
Definition cost.h:26
#define DEFAULT_RANDOM_PAGE_COST
Definition cost.h:25
#define DEFAULT_RECURSIVE_WORKTABLE_FACTOR
Definition cost.h:33
#define DEFAULT_EFFECTIVE_CACHE_SIZE
Definition cost.h:34
#define DEFAULT_SEQ_PAGE_COST
Definition cost.h:24
#define DEFAULT_CPU_OPERATOR_COST
Definition cost.h:28
double random_page_cost
Definition costsize.c:131
#define APPEND_CPU_COST_MULTIPLIER
Definition costsize.c:120
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6255
double cpu_operator_cost
Definition costsize.c:134
static double get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc, double input_tuples)
Definition costsize.c:2989
bool enable_partitionwise_aggregate
Definition costsize.c:160
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:4415
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition costsize.c:896
void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
Definition costsize.c:1114
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition costsize.c:5521
bool enable_seqscan
Definition costsize.c:145
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition costsize.c:5214
static double page_size(double tuples, int width)
Definition costsize.c:6606
int max_parallel_workers_per_gather
Definition costsize.c:143
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition costsize.c:5602
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:3954
static List * extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
Definition costsize.c:838
void cost_material(Path *path, bool enabled, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition costsize.c:2582
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6352
bool enable_memoize
Definition costsize.c:155
void compute_semi_anti_join_factors(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, SpecialJoinInfo *sjinfo, List *restrictlist, SemiAntiJoinFactors *semifactors)
Definition costsize.c:5256
static double get_indexpath_pages(Path *bitmapqual)
Definition costsize.c:961
double parallel_setup_cost
Definition costsize.c:136
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition costsize.c:4938
#define LOG2(x)
Definition costsize.c:113
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:5491
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition costsize.c:3203
double recursive_worktable_factor
Definition costsize.c:137
bool enable_gathermerge
Definition costsize.c:158
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1562
void initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, uint64 enable_mask, Path *outer_path, Path *inner_path, JoinPathExtraData *extra)
Definition costsize.c:3372
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition costsize.c:1011
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition costsize.c:1360
static double relation_byte_size(double tuples, int width)
Definition costsize.c:6595
double parallel_tuple_cost
Definition costsize.c:135
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6125
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition costsize.c:2787
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2200
static double calc_joinrel_size_estimate(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, double outer_rows, double inner_rows, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition costsize.c:5643
static MergeScanSelCache * cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
Definition costsize.c:4217
static void cost_rescan(PlannerInfo *root, Path *path, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition costsize.c:4783
bool enable_indexonlyscan
Definition costsize.c:147
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:3454
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition costsize.c:469
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition costsize.c:1874
bool enable_tidscan
Definition costsize.c:149
static void cost_tuplesort(Cost *startup_cost, Cost *run_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:1950
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1628
double cpu_tuple_cost
Definition costsize.c:132
bool enable_material
Definition costsize.c:154
void initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, JoinPathExtraData *extra, bool parallel_hash)
Definition costsize.c:4296
bool enable_hashjoin
Definition costsize.c:157
void initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *mergeclauses, Path *outer_path, Path *inner_path, List *outersortkeys, List *innersortkeys, int outer_presorted_keys, JoinPathExtraData *extra)
Definition costsize.c:3657
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:348
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition costsize.c:429
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition costsize.c:6217
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition costsize.c:5570
bool enable_mergejoin
Definition costsize.c:156
void cost_append(AppendPath *apath, PlannerInfo *root)
Definition costsize.c:2310
double compute_gather_rows(Path *path)
Definition costsize.c:6767
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition costsize.c:4924
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1790
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:269
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition costsize.c:6509
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1689
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2052
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition costsize.c:4898
bool enable_presorted_aggregate
Definition costsize.c:164
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6288
static bool has_indexed_join_quals(NestPath *path)
Definition costsize.c:5353
bool enable_parallel_hash
Definition costsize.c:162
bool enable_partitionwise_join
Definition costsize.c:159
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition costsize.c:3300
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1832
static Cost append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
Definition costsize.c:2234
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition costsize.c:6656
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition costsize.c:1157
bool enable_async_append
Definition costsize.c:165
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6045
double seq_page_cost
Definition costsize.c:130
bool enable_parallel_append
Definition costsize.c:161
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6317
bool enable_nestloop
Definition costsize.c:153
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition costsize.c:1250
bool enable_bitmapscan
Definition costsize.c:148
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition costsize.c:5446
void cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
Definition costsize.c:4676
static void cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition costsize.c:2640
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition costsize.c:2524
bool enable_hashagg
Definition costsize.c:152
double clamp_row_est(double nrows)
Definition costsize.c:213
static double get_parallel_divisor(Path *path)
Definition costsize.c:6616
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition costsize.c:1477
Cost disable_cost
Definition costsize.c:141
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1744
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition costsize.c:1202
bool enable_partition_pruning
Definition costsize.c:163
bool enable_sort
Definition costsize.c:150
int32 clamp_width_est(int64 tuple_width)
Definition costsize.c:242
int effective_cache_size
Definition costsize.c:139
double cpu_index_tuple_cost
Definition costsize.c:133
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6163
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition costsize.c:544
bool enable_indexscan
Definition costsize.c:146
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6185
static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
Definition costsize.c:5793
bool enable_incremental_sort
Definition costsize.c:151
static int32 get_expr_width(PlannerInfo *root, const Node *expr)
Definition costsize.c:6547
#define MAXIMUM_ROWCOUNT
Definition costsize.c:128
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
bool is_redundant_with_indexclauses(RestrictInfo *rinfo, List *indexclauses)
RestrictInfo * find_derived_clause_for_ec_member(PlannerInfo *root, EquivalenceClass *ec, EquivalenceMember *em)
bool ExecSupportsMarkRestore(Path *pathnode)
Definition execAmi.c:418
bool ExecMaterializesOutput(NodeTag plantype)
Definition execAmi.c:635
#define palloc_object(type)
Definition fe_memutils.h:74
#define MaxAllocSize
Definition fe_memutils.h:22
#define palloc_array(type, count)
Definition fe_memutils.h:76
int work_mem
Definition globals.c:131
#define SizeofHeapTupleHeader
int b
Definition isn.c:74
int i
Definition isn.c:77
void init_dummy_sjinfo(SpecialJoinInfo *sjinfo, Relids left_relids, Relids right_relids)
Definition joinrels.c:664
List * lappend(List *list, void *datum)
Definition list.c:339
List * list_concat(List *list1, const List *list2)
Definition list.c:561
List * list_concat_copy(const List *list1, const List *list2)
Definition list.c:598
List * list_copy(const List *oldlist)
Definition list.c:1573
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
void list_free(List *list)
Definition list.c:1546
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition lsyscache.c:3057
int32 get_attavgwidth(Oid relid, AttrNumber attnum)
Definition lsyscache.c:3308
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1435
void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
Definition lsyscache.c:3024
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2728
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
List * make_ands_implicit(Expr *clause)
Definition makefuncs.c:810
static const uint32 T[65]
Definition md5.c:119
Size hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
Definition nodeAgg.c:1698
void hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits, Size *mem_limit, uint64 *ngroups_limit, int *num_partitions)
Definition nodeAgg.c:1806
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition nodeFuncs.c:301
void set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
Definition nodeFuncs.c:1882
void set_opfuncid(OpExpr *opexpr)
Definition nodeFuncs.c:1871
static Node * get_rightop(const void *clause)
Definition nodeFuncs.h:95
#define expression_tree_walker(n, w, c)
Definition nodeFuncs.h:153
static Node * get_leftop(const void *clause)
Definition nodeFuncs.h:83
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, bool try_combined_hash_mem, int parallel_workers, size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition nodeHash.c:657
size_t get_hash_memory_limit(void)
Definition nodeHash.c:3621
double ExecEstimateCacheEntryOverheadBytes(double ntuples)
#define IsA(nodeptr, _type_)
Definition nodes.h:164
double Cost
Definition nodes.h:261
#define nodeTag(nodeptr)
Definition nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition nodes.h:348
double Cardinality
Definition nodes.h:262
AggStrategy
Definition nodes.h:363
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGG_MIXED
Definition nodes.h:367
@ AGG_PLAIN
Definition nodes.h:364
double Selectivity
Definition nodes.h:260
JoinType
Definition nodes.h:298
@ JOIN_SEMI
Definition nodes.h:317
@ JOIN_FULL
Definition nodes.h:305
@ JOIN_INNER
Definition nodes.h:303
@ JOIN_RIGHT
Definition nodes.h:306
@ JOIN_LEFT
Definition nodes.h:304
@ JOIN_RIGHT_ANTI
Definition nodes.h:320
@ JOIN_ANTI
Definition nodes.h:318
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define FRAMEOPTION_END_CURRENT_ROW
Definition parsenodes.h:619
#define FRAMEOPTION_END_OFFSET_PRECEDING
Definition parsenodes.h:621
@ RTE_CTE
@ RTE_NAMEDTUPLESTORE
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_RELATION
#define FRAMEOPTION_END_OFFSET_FOLLOWING
Definition parsenodes.h:623
#define FRAMEOPTION_RANGE
Definition parsenodes.h:610
#define FRAMEOPTION_GROUPS
Definition parsenodes.h:612
#define FRAMEOPTION_END_UNBOUNDED_FOLLOWING
Definition parsenodes.h:617
#define FRAMEOPTION_ROWS
Definition parsenodes.h:611
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
#define PGS_TIDSCAN
Definition pathnodes.h:70
#define PGS_APPEND
Definition pathnodes.h:78
#define PGS_MERGE_APPEND
Definition pathnodes.h:79
#define RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)
Definition pathnodes.h:3043
#define PGS_SEQSCAN
Definition pathnodes.h:66
#define PGS_MERGEJOIN_PLAIN
Definition pathnodes.h:72
#define PGS_MERGEJOIN_MATERIALIZE
Definition pathnodes.h:73
#define PGS_HASHJOIN
Definition pathnodes.h:77
#define PGS_CONSIDER_NONPARTIAL
Definition pathnodes.h:84
#define PGS_BITMAPSCAN
Definition pathnodes.h:69
#define planner_rt_fetch(rti, root)
Definition pathnodes.h:692
#define PGS_GATHER
Definition pathnodes.h:80
#define RELATION_WAS_MADE_UNIQUE(rel, sjinfo, nominal_jointype)
Definition pathnodes.h:1238
#define PGS_GATHER_MERGE
Definition pathnodes.h:81
@ UPPERREL_FINAL
Definition pathnodes.h:152
#define PGS_INDEXONLYSCAN
Definition pathnodes.h:68
#define PGS_INDEXSCAN
Definition pathnodes.h:67
#define lfirst(lc)
Definition pg_list.h:172
#define lfirst_node(type, lc)
Definition pg_list.h:176
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
#define foreach_current_index(var_or_cell)
Definition pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition pg_list.h:391
#define for_each_cell(cell, lst, initcell)
Definition pg_list.h:438
#define linitial(l)
Definition pg_list.h:178
#define lsecond(l)
Definition pg_list.h:183
static ListCell * list_head(const List *l)
Definition pg_list.h:128
#define lfirst_oid(lc)
Definition pg_list.h:174
#define plan(x)
Definition pg_regress.c:161
PlaceHolderInfo * find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv)
Definition placeholder.c:83
void add_function_cost(PlannerInfo *root, Oid funcid, Node *node, QualCost *cost)
Definition plancat.c:2369
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition plancat.c:1486
bool parallel_leader_participation
Definition planner.c:70
static int64 DatumGetInt64(Datum X)
Definition postgres.h:413
static int16 DatumGetInt16(Datum X)
Definition postgres.h:172
static int32 DatumGetInt32(Datum X)
Definition postgres.h:212
#define InvalidOid
unsigned int Oid
static int fb(int x)
@ ANY_SUBLINK
Definition primnodes.h:1032
@ ALL_SUBLINK
Definition primnodes.h:1031
@ EXISTS_SUBLINK
Definition primnodes.h:1030
#define IS_SPECIAL_VARNO(varno)
Definition primnodes.h:248
tree ctl root
Definition radixtree.h:1857
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:533
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1606
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
void mergejoinscansel(PlannerInfo *root, Node *clause, Oid opfamily, CompareType cmptype, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
Definition selfuncs.c:3285
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition selfuncs.c:2223
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3771
List * estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner, List *hashclauses, Selectivity *innerbucketsize)
Definition selfuncs.c:4123
void estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets, Selectivity *mcv_freq, Selectivity *bucketsize_frac)
Definition selfuncs.c:4390
#define CLAMP_PROBABILITY(p)
Definition selfuncs.h:63
#define DEFAULT_INEQ_SEL
Definition selfuncs.h:37
#define DEFAULT_NUM_DISTINCT
Definition selfuncs.h:52
#define SELFLAG_USED_DEFAULT
Definition selfuncs.h:76
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition spccache.c:182
Selectivity bitmapselectivity
Definition pathnodes.h:2130
List * bitmapquals
Definition pathnodes.h:2129
Selectivity bitmapselectivity
Definition pathnodes.h:2143
List * bitmapquals
Definition pathnodes.h:2142
Oid consttype
Definition primnodes.h:330
Path * subpath
Definition pathnodes.h:2358
List * path_hashclauses
Definition pathnodes.h:2476
Cardinality inner_rows_total
Definition pathnodes.h:2478
int num_batches
Definition pathnodes.h:2477
JoinPath jpath
Definition pathnodes.h:2475
List * indrestrictinfo
Definition pathnodes.h:1405
List * indexclauses
Definition pathnodes.h:2043
Selectivity indexselectivity
Definition pathnodes.h:2048
Cost indextotalcost
Definition pathnodes.h:2047
IndexOptInfo * indexinfo
Definition pathnodes.h:2042
Cardinality inner_rows
Definition pathnodes.h:3711
Cardinality outer_rows
Definition pathnodes.h:3710
Cardinality inner_skip_rows
Definition pathnodes.h:3713
Cardinality inner_rows_total
Definition pathnodes.h:3718
Cardinality outer_skip_rows
Definition pathnodes.h:3712
SemiAntiJoinFactors semifactors
Definition pathnodes.h:3595
SpecialJoinInfo * sjinfo
Definition pathnodes.h:3594
Path * outerjoinpath
Definition pathnodes.h:2390
Path * innerjoinpath
Definition pathnodes.h:2391
JoinType jointype
Definition pathnodes.h:2385
List * joinrestrictinfo
Definition pathnodes.h:2393
Definition pg_list.h:54
bool skip_mark_restore
Definition pathnodes.h:2460
List * innersortkeys
Definition pathnodes.h:2457
JoinPath jpath
Definition pathnodes.h:2454
bool materialize_inner
Definition pathnodes.h:2461
List * path_mergeclauses
Definition pathnodes.h:2455
Selectivity leftstartsel
Definition pathnodes.h:3062
Selectivity leftendsel
Definition pathnodes.h:3063
CompareType cmptype
Definition pathnodes.h:3059
Selectivity rightendsel
Definition pathnodes.h:3065
Selectivity rightstartsel
Definition pathnodes.h:3064
JoinPath jpath
Definition pathnodes.h:2408
Definition nodes.h:135
List * exprs
Definition pathnodes.h:1864
QualCost cost
Definition pathnodes.h:1870
NodeTag pathtype
Definition pathnodes.h:1957
Cardinality rows
Definition pathnodes.h:1991
Cost startup_cost
Definition pathnodes.h:1993
int parallel_workers
Definition pathnodes.h:1988
int disabled_nodes
Definition pathnodes.h:1992
Cost total_cost
Definition pathnodes.h:1994
bool parallel_aware
Definition pathnodes.h:1984
Query * parse
Definition pathnodes.h:309
Cost per_tuple
Definition pathnodes.h:121
Cost startup
Definition pathnodes.h:120
Node * setOperations
Definition parsenodes.h:236
List * targetList
Definition parsenodes.h:198
List * baserestrictinfo
Definition pathnodes.h:1130
Relids relids
Definition pathnodes.h:1009
struct PathTarget * reltarget
Definition pathnodes.h:1033
Index relid
Definition pathnodes.h:1057
uint64 pgs_mask
Definition pathnodes.h:1027
Cardinality tuples
Definition pathnodes.h:1084
QualCost baserestrictcost
Definition pathnodes.h:1132
PlannerInfo * subroot
Definition pathnodes.h:1088
AttrNumber max_attr
Definition pathnodes.h:1065
Cardinality rows
Definition pathnodes.h:1015
AttrNumber min_attr
Definition pathnodes.h:1063
Expr * clause
Definition pathnodes.h:2886
Selectivity outer_match_frac
Definition pathnodes.h:3571
Selectivity match_count
Definition pathnodes.h:3572
JoinType jointype
Definition pathnodes.h:3215
bool useHashTable
Definition primnodes.h:1113
Node * testexpr
Definition primnodes.h:1100
List * parParam
Definition primnodes.h:1124
Cost startup_cost
Definition primnodes.h:1127
Cost per_call_cost
Definition primnodes.h:1128
SubLinkType subLinkType
Definition primnodes.h:1098
Expr * expr
Definition primnodes.h:2240
AttrNumber resno
Definition primnodes.h:2242
AttrNumber varattno
Definition primnodes.h:275
int varno
Definition primnodes.h:270
Index varlevelsup
Definition primnodes.h:295
List * partitionClause
Node * endOffset
List * orderClause
List * args
Definition primnodes.h:606
Expr * aggfilter
Definition primnodes.h:608
PlannerInfo * root
Definition costsize.c:169
Definition type.h:96
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition tablesample.c:27
int tbm_calculate_entries(Size maxbytes)
Definition tidbitmap.c:1542
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition tlist.c:401
int tuplesort_merge_order(int64 allowedMem)
Definition tuplesort.c:1674
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition var.c:114