PostgreSQL Source Code git master
Loading...
Searching...
No Matches
costsize.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * costsize.c
4 * Routines to compute (and set) relation sizes and path costs
5 *
6 * Path costs are measured in arbitrary units established by these basic
7 * parameters:
8 *
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
14 * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 * parallel_setup_cost Cost of setting up shared memory for parallelism
16 *
17 * We expect that the kernel will typically do some amount of read-ahead
18 * optimization; this in conjunction with seek costs means that seq_page_cost
19 * is normally considerably less than random_page_cost. (However, if the
20 * database is fully cached in RAM, it is reasonable to set them equal.)
21 *
22 * We also use a rough estimate "effective_cache_size" of the number of
23 * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 * NBuffers for this purpose because that would ignore the effects of
25 * the kernel's disk cache.)
26 *
27 * Obviously, taking constants for these values is an oversimplification,
28 * but it's tough enough to get any useful estimates even at this level of
29 * detail. Note that all of these parameters are user-settable, in case
30 * the default values are drastically off for a particular platform.
31 *
32 * seq_page_cost and random_page_cost can also be overridden for an individual
33 * tablespace, in case some data is on a fast disk and other data is on a slow
34 * disk. Per-tablespace overrides never apply to temporary work files such as
35 * an external sort or a materialize node that overflows work_mem.
36 *
37 * We compute two separate costs for each path:
38 * total_cost: total estimated cost to fetch all tuples
39 * startup_cost: cost that is expended before first tuple is fetched
40 * In some scenarios, such as when there is a LIMIT or we are implementing
41 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 * path's result. A caller can estimate the cost of fetching a partial
43 * result by interpolating between startup_cost and total_cost. In detail:
44 * actual_cost = startup_cost +
45 * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 * Note that a base relation's rows count (and, by extension, plan_rows for
47 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 * that this equation works properly. (Note: while path->rows is never zero
49 * for ordinary relations, it is zero for paths for provably-empty relations,
50 * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 * plan node.
52 *
53 * Each path stores the total number of disabled nodes that exist at or
54 * below that point in the plan tree. This is regarded as a component of
55 * the cost, and paths with fewer disabled nodes should be regarded as
56 * cheaper than those with more. Disabled nodes occur when the user sets
57 * a GUC like enable_seqscan=false. We can't necessarily respect such a
58 * setting in every part of the plan tree, but we want to respect in as many
59 * parts of the plan tree as possible. Simpler schemes like storing a Boolean
60 * here rather than a count fail to do that. We used to disable nodes by
61 * adding a large constant to the startup cost, but that distorted planning
62 * in other ways.
63 *
64 * For largely historical reasons, most of the routines in this module use
65 * the passed result Path only to store their results (rows, startup_cost and
66 * total_cost) into. All the input data they need is passed as separate
67 * parameters, even though much of it could be extracted from the Path.
68 * An exception is made for the cost_XXXjoin() routines, which expect all
69 * the other fields of the passed XXXPath to be filled in, and similarly
70 * cost_index() assumes the passed IndexPath is valid except for its output
71 * values.
72 *
73 *
74 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
75 * Portions Copyright (c) 1994, Regents of the University of California
76 *
77 * IDENTIFICATION
78 * src/backend/optimizer/path/costsize.c
79 *
80 *-------------------------------------------------------------------------
81 */
82
83#include "postgres.h"
84
85#include <limits.h>
86#include <math.h>
87
88#include "access/amapi.h"
89#include "access/htup_details.h"
90#include "access/tsmapi.h"
91#include "executor/executor.h"
92#include "executor/nodeAgg.h"
93#include "executor/nodeHash.h"
95#include "miscadmin.h"
96#include "nodes/makefuncs.h"
97#include "nodes/nodeFuncs.h"
98#include "nodes/tidbitmap.h"
99#include "optimizer/clauses.h"
100#include "optimizer/cost.h"
101#include "optimizer/optimizer.h"
102#include "optimizer/pathnode.h"
103#include "optimizer/paths.h"
105#include "optimizer/plancat.h"
107#include "parser/parsetree.h"
108#include "utils/lsyscache.h"
109#include "utils/selfuncs.h"
110#include "utils/spccache.h"
111#include "utils/tuplesort.h"
112
113
114#define LOG2(x) (log(x) / 0.693147180559945)
115
116/*
117 * Append and MergeAppend nodes are less expensive than some other operations
118 * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
119 * per-tuple cost as cpu_tuple_cost multiplied by this value.
120 */
121#define APPEND_CPU_COST_MULTIPLIER 0.5
122
123/*
124 * Maximum value for row estimates. We cap row estimates to this to help
125 * ensure that costs based on these estimates remain within the range of what
126 * double can represent. add_path() wouldn't act sanely given infinite or NaN
127 * cost values.
128 */
129#define MAXIMUM_ROWCOUNT 1e100
130
139
141
143
145
146bool enable_seqscan = true;
150bool enable_tidscan = true;
151bool enable_sort = true;
153bool enable_hashagg = true;
154bool enable_nestloop = true;
155bool enable_material = true;
156bool enable_memoize = true;
158bool enable_hashjoin = true;
167
173
174static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
176 RestrictInfo *rinfo,
178static void cost_rescan(PlannerInfo *root, Path *path,
180static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
184static bool has_indexed_join_quals(NestPath *path);
185static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
186 List *quals);
188 RelOptInfo *joinrel,
191 double outer_rows,
192 double inner_rows,
193 SpecialJoinInfo *sjinfo,
194 List *restrictlist);
196 Relids outer_relids,
198 SpecialJoinInfo *sjinfo,
199 List **restrictlist);
200static Cost append_nonpartial_cost(List *subpaths, int numpaths,
201 int parallel_workers);
202static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
203static int32 get_expr_width(PlannerInfo *root, const Node *expr);
204static double relation_byte_size(double tuples, int width);
205static double page_size(double tuples, int width);
206static double get_parallel_divisor(Path *path);
207
208
209/*
210 * clamp_row_est
211 * Force a row-count estimate to a sane value.
212 */
213double
214clamp_row_est(double nrows)
215{
216 /*
217 * Avoid infinite and NaN row estimates. Costs derived from such values
218 * are going to be useless. Also force the estimate to be at least one
219 * row, to make explain output look better and to avoid possible
220 * divide-by-zero when interpolating costs. Make it an integer, too.
221 */
222 if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
223 nrows = MAXIMUM_ROWCOUNT;
224 else if (nrows <= 1.0)
225 nrows = 1.0;
226 else
227 nrows = rint(nrows);
228
229 return nrows;
230}
231
232/*
233 * clamp_width_est
234 * Force a tuple-width estimate to a sane value.
235 *
236 * The planner represents datatype width and tuple width estimates as int32.
237 * When summing column width estimates to create a tuple width estimate,
238 * it's possible to reach integer overflow in edge cases. To ensure sane
239 * behavior, we form such sums in int64 arithmetic and then apply this routine
240 * to clamp to int32 range.
241 */
242int32
244{
245 /*
246 * Anything more than MaxAllocSize is clearly bogus, since we could not
247 * create a tuple that large.
248 */
250 return (int32) MaxAllocSize;
251
252 /*
253 * Unlike clamp_row_est, we just Assert that the value isn't negative,
254 * rather than masking such errors.
255 */
256 Assert(tuple_width >= 0);
257
258 return (int32) tuple_width;
259}
260
261
262/*
263 * cost_seqscan
264 * Determines and returns the cost of scanning a relation sequentially.
265 *
266 * 'baserel' is the relation to be scanned
267 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
268 */
269void
272{
273 Cost startup_cost = 0;
276 double spc_seq_page_cost;
280
281 /* Should only be applied to base relations */
282 Assert(baserel->relid > 0);
283 Assert(baserel->rtekind == RTE_RELATION);
284
285 /* Mark the path with the correct row estimate */
286 if (param_info)
287 path->rows = param_info->ppi_rows;
288 else
289 path->rows = baserel->rows;
290
291 /* fetch estimated page cost for tablespace containing table */
292 get_tablespace_page_costs(baserel->reltablespace,
293 NULL,
295
296 /*
297 * disk costs
298 */
300
301 /* CPU costs */
303
304 startup_cost += qpqual_cost.startup;
307 /* tlist eval costs are paid per output row, not per tuple scanned */
308 startup_cost += path->pathtarget->cost.startup;
309 cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
310
311 /* Adjust costing for parallelism, if used. */
312 if (path->parallel_workers > 0)
313 {
315
316 /* The CPU cost is divided among all the workers. */
318
319 /*
320 * It may be possible to amortize some of the I/O cost, but probably
321 * not very much, because most operating systems already do aggressive
322 * prefetching. For now, we assume that the disk run cost can't be
323 * amortized at all.
324 */
325
326 /*
327 * In the case of a parallel plan, the row count needs to represent
328 * the number of tuples processed per worker.
329 */
330 path->rows = clamp_row_est(path->rows / parallel_divisor);
331 }
332 else
334
335 path->disabled_nodes =
336 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
337 path->startup_cost = startup_cost;
338 path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
339}
340
341/*
342 * cost_samplescan
343 * Determines and returns the cost of scanning a relation using sampling.
344 *
345 * 'baserel' is the relation to be scanned
346 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
347 */
348void
351{
352 Cost startup_cost = 0;
353 Cost run_cost = 0;
357 double spc_seq_page_cost,
358 spc_random_page_cost,
363
364 /* Should only be applied to base relations with tablesample clauses */
365 Assert(baserel->relid > 0);
366 rte = planner_rt_fetch(baserel->relid, root);
367 Assert(rte->rtekind == RTE_RELATION);
368 tsc = rte->tablesample;
369 Assert(tsc != NULL);
370 tsm = GetTsmRoutine(tsc->tsmhandler);
371
372 /* Mark the path with the correct row estimate */
373 if (param_info)
374 path->rows = param_info->ppi_rows;
375 else
376 path->rows = baserel->rows;
377
378 /* fetch estimated page cost for tablespace containing table */
379 get_tablespace_page_costs(baserel->reltablespace,
380 &spc_random_page_cost,
382
383 /* if NextSampleBlock is used, assume random access, else sequential */
384 spc_page_cost = (tsm->NextSampleBlock != NULL) ?
385 spc_random_page_cost : spc_seq_page_cost;
386
387 /*
388 * disk costs (recall that baserel->pages has already been set to the
389 * number of pages the sampling method will visit)
390 */
391 run_cost += spc_page_cost * baserel->pages;
392
393 /*
394 * CPU costs (recall that baserel->tuples has already been set to the
395 * number of tuples the sampling method will select). Note that we ignore
396 * execution cost of the TABLESAMPLE parameter expressions; they will be
397 * evaluated only once per scan, and in most usages they'll likely be
398 * simple constants anyway. We also don't charge anything for the
399 * calculations the sampling method might do internally.
400 */
402
403 startup_cost += qpqual_cost.startup;
405 run_cost += cpu_per_tuple * baserel->tuples;
406 /* tlist eval costs are paid per output row, not per tuple scanned */
407 startup_cost += path->pathtarget->cost.startup;
408 run_cost += path->pathtarget->cost.per_tuple * path->rows;
409
410 if (path->parallel_workers == 0)
412
413 path->disabled_nodes =
414 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
415 path->startup_cost = startup_cost;
416 path->total_cost = startup_cost + run_cost;
417}
418
419/*
420 * cost_gather
421 * Determines and returns the cost of gather path.
422 *
423 * 'rel' is the relation to be operated upon
424 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
425 * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
426 * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
427 * correspond to any particular RelOptInfo.
428 */
429void
432 double *rows)
433{
434 Cost startup_cost = 0;
435 Cost run_cost = 0;
436
437 /* Mark the path with the correct row estimate */
438 if (rows)
439 path->path.rows = *rows;
440 else if (param_info)
441 path->path.rows = param_info->ppi_rows;
442 else
443 path->path.rows = rel->rows;
444
445 startup_cost = path->subpath->startup_cost;
446
447 run_cost = path->subpath->total_cost - path->subpath->startup_cost;
448
449 /* Parallel setup and communication cost. */
450 startup_cost += parallel_setup_cost;
451 run_cost += parallel_tuple_cost * path->path.rows;
452
454 + ((rel->pgs_mask & PGS_GATHER) != 0 ? 0 : 1);
455 path->path.startup_cost = startup_cost;
456 path->path.total_cost = (startup_cost + run_cost);
457}
458
459/*
460 * cost_gather_merge
461 * Determines and returns the cost of gather merge path.
462 *
463 * GatherMerge merges several pre-sorted input streams, using a heap that at
464 * any given instant holds the next tuple from each stream. If there are N
465 * streams, we need about N*log2(N) tuple comparisons to construct the heap at
466 * startup, and then for each output tuple, about log2(N) comparisons to
467 * replace the top heap entry with the next tuple from the same stream.
468 */
469void
474 double *rows)
475{
476 Cost startup_cost = 0;
477 Cost run_cost = 0;
479 double N;
480 double logN;
481
482 /* Mark the path with the correct row estimate */
483 if (rows)
484 path->path.rows = *rows;
485 else if (param_info)
486 path->path.rows = param_info->ppi_rows;
487 else
488 path->path.rows = rel->rows;
489
490 /*
491 * Add one to the number of workers to account for the leader. This might
492 * be overgenerous since the leader will do less work than other workers
493 * in typical cases, but we'll go with it for now.
494 */
495 Assert(path->num_workers > 0);
496 N = (double) path->num_workers + 1;
497 logN = LOG2(N);
498
499 /* Assumed cost per tuple comparison */
501
502 /* Heap creation cost */
503 startup_cost += comparison_cost * N * logN;
504
505 /* Per-tuple heap maintenance cost */
506 run_cost += path->path.rows * comparison_cost * logN;
507
508 /* small cost for heap management, like cost_merge_append */
509 run_cost += cpu_operator_cost * path->path.rows;
510
511 /*
512 * Parallel setup and communication cost. Since Gather Merge, unlike
513 * Gather, requires us to block until a tuple is available from every
514 * worker, we bump the IPC cost up a little bit as compared with Gather.
515 * For lack of a better idea, charge an extra 5%.
516 */
517 startup_cost += parallel_setup_cost;
518 run_cost += parallel_tuple_cost * path->path.rows * 1.05;
519
521 + ((rel->pgs_mask & PGS_GATHER_MERGE) != 0 ? 0 : 1);
522 path->path.startup_cost = startup_cost + input_startup_cost;
523 path->path.total_cost = (startup_cost + run_cost + input_total_cost);
524}
525
526/*
527 * cost_index
528 * Determines and returns the cost of scanning a relation using an index.
529 *
530 * 'path' describes the indexscan under consideration, and is complete
531 * except for the fields to be set by this routine
532 * 'loop_count' is the number of repetitions of the indexscan to factor into
533 * estimates of caching behavior
534 *
535 * In addition to rows, startup_cost and total_cost, cost_index() sets the
536 * path's indextotalcost and indexselectivity fields. These values will be
537 * needed if the IndexPath is used in a BitmapIndexScan.
538 *
539 * NOTE: path->indexquals must contain only clauses usable as index
540 * restrictions. Any additional quals evaluated as qpquals may reduce the
541 * number of returned tuples, but they won't reduce the number of tuples
542 * we have to fetch from the table, so they don't reduce the scan cost.
543 */
544void
546 bool partial_path)
547{
549 RelOptInfo *baserel = index->rel;
550 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
551 amcostestimate_function amcostestimate;
552 List *qpquals;
553 Cost startup_cost = 0;
554 Cost run_cost = 0;
555 Cost cpu_run_cost = 0;
556 Cost indexStartupCost;
557 Cost indexTotalCost;
558 Selectivity indexSelectivity;
559 double indexCorrelation,
560 csquared;
561 double spc_seq_page_cost,
562 spc_random_page_cost;
567 double tuples_fetched;
568 double pages_fetched;
569 double rand_heap_pages;
570 double index_pages;
572
573 /* Should only be applied to base relations */
576 Assert(baserel->relid > 0);
577 Assert(baserel->rtekind == RTE_RELATION);
578
579 /*
580 * Mark the path with the correct row estimate, and identify which quals
581 * will need to be enforced as qpquals. We need not check any quals that
582 * are implied by the index's predicate, so we can use indrestrictinfo not
583 * baserestrictinfo as the list of relevant restriction clauses for the
584 * rel.
585 */
586 if (path->path.param_info)
587 {
588 path->path.rows = path->path.param_info->ppi_rows;
589 /* qpquals come from the rel's restriction clauses and ppi_clauses */
591 path->indexclauses),
592 extract_nonindex_conditions(path->path.param_info->ppi_clauses,
593 path->indexclauses));
594 }
595 else
596 {
597 path->path.rows = baserel->rows;
598 /* qpquals come from just the rel's restriction clauses */
600 path->indexclauses);
601 }
602
603 /* is this scan type disabled? */
606 path->path.disabled_nodes =
607 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
608
609 /*
610 * Call index-access-method-specific code to estimate the processing cost
611 * for scanning the index, as well as the selectivity of the index (ie,
612 * the fraction of main-table tuples we will have to retrieve) and its
613 * correlation to the main-table tuple order. We need a cast here because
614 * pathnodes.h uses a weak function type to avoid including amapi.h.
615 */
616 amcostestimate = (amcostestimate_function) index->amcostestimate;
617 amcostestimate(root, path, loop_count,
618 &indexStartupCost, &indexTotalCost,
619 &indexSelectivity, &indexCorrelation,
620 &index_pages);
621
622 /*
623 * Save amcostestimate's results for possible use in bitmap scan planning.
624 * We don't bother to save indexStartupCost or indexCorrelation, because a
625 * bitmap scan doesn't care about either.
626 */
627 path->indextotalcost = indexTotalCost;
628 path->indexselectivity = indexSelectivity;
629
630 /* all costs for touching index itself included here */
631 startup_cost += indexStartupCost;
632 run_cost += indexTotalCost - indexStartupCost;
633
634 /* estimate number of main-table tuples fetched */
635 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
636
637 /* fetch estimated page costs for tablespace containing table */
638 get_tablespace_page_costs(baserel->reltablespace,
639 &spc_random_page_cost,
641
642 /*----------
643 * Estimate number of main-table pages fetched, and compute I/O cost.
644 *
645 * When the index ordering is uncorrelated with the table ordering,
646 * we use an approximation proposed by Mackert and Lohman (see
647 * index_pages_fetched() for details) to compute the number of pages
648 * fetched, and then charge spc_random_page_cost per page fetched.
649 *
650 * When the index ordering is exactly correlated with the table ordering
651 * (just after a CLUSTER, for example), the number of pages fetched should
652 * be exactly selectivity * table_size. What's more, all but the first
653 * will be sequential fetches, not the random fetches that occur in the
654 * uncorrelated case. So if the number of pages is more than 1, we
655 * ought to charge
656 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
657 * For partially-correlated indexes, we ought to charge somewhere between
658 * these two estimates. We currently interpolate linearly between the
659 * estimates based on the correlation squared (XXX is that appropriate?).
660 *
661 * If it's an index-only scan, then we will not need to fetch any heap
662 * pages for which the visibility map shows all tuples are visible.
663 * Hence, reduce the estimated number of heap fetches accordingly.
664 * We use the measured fraction of the entire heap that is all-visible,
665 * which might not be particularly relevant to the subset of the heap
666 * that this query will fetch; but it's not clear how to do better.
667 *----------
668 */
669 if (loop_count > 1)
670 {
671 /*
672 * For repeated indexscans, the appropriate estimate for the
673 * uncorrelated case is to scale up the number of tuples fetched in
674 * the Mackert and Lohman formula by the number of scans, so that we
675 * estimate the number of pages fetched by all the scans; then
676 * pro-rate the costs for one scan. In this case we assume all the
677 * fetches are random accesses.
678 */
680 baserel->pages,
681 (double) index->pages,
682 root);
683
684 if (indexonly)
685 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
686
688
689 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
690
691 /*
692 * In the perfectly correlated case, the number of pages touched by
693 * each scan is selectivity * table_size, and we can use the Mackert
694 * and Lohman formula at the page level to estimate how much work is
695 * saved by caching across scans. We still assume all the fetches are
696 * random, though, which is an overestimate that's hard to correct for
697 * without double-counting the cache effects. (But in most cases
698 * where such a plan is actually interesting, only one page would get
699 * fetched per scan anyway, so it shouldn't matter much.)
700 */
701 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
702
704 baserel->pages,
705 (double) index->pages,
706 root);
707
708 if (indexonly)
709 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
710
711 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
712 }
713 else
714 {
715 /*
716 * Normal case: apply the Mackert and Lohman formula, and then
717 * interpolate between that and the correlation-derived result.
718 */
719 pages_fetched = index_pages_fetched(tuples_fetched,
720 baserel->pages,
721 (double) index->pages,
722 root);
723
724 if (indexonly)
725 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
726
728
729 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
730 max_IO_cost = pages_fetched * spc_random_page_cost;
731
732 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
733 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
734
735 if (indexonly)
736 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
737
738 if (pages_fetched > 0)
739 {
740 min_IO_cost = spc_random_page_cost;
741 if (pages_fetched > 1)
743 }
744 else
745 min_IO_cost = 0;
746 }
747
748 if (partial_path)
749 {
750 /*
751 * For index only scans compute workers based on number of index pages
752 * fetched; the number of heap pages we fetch might be so small as to
753 * effectively rule out parallelism, which we don't want to do.
754 */
755 if (indexonly)
756 rand_heap_pages = -1;
757
758 /*
759 * Estimate the number of parallel workers required to scan index. Use
760 * the number of heap pages computed considering heap fetches won't be
761 * sequential as for parallel scans the pages are accessed in random
762 * order.
763 */
768
769 /*
770 * Fall out if workers can't be assigned for parallel scan, because in
771 * such a case this path will be rejected. So there is no benefit in
772 * doing extra computation.
773 */
774 if (path->path.parallel_workers <= 0)
775 return;
776
777 path->path.parallel_aware = true;
778 }
779
780 /*
781 * Now interpolate based on estimated index order correlation to get total
782 * disk I/O cost for main table accesses.
783 */
784 csquared = indexCorrelation * indexCorrelation;
785
786 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
787
788 /*
789 * Estimate CPU costs per tuple.
790 *
791 * What we want here is cpu_tuple_cost plus the evaluation costs of any
792 * qual clauses that we have to evaluate as qpquals.
793 */
795
796 startup_cost += qpqual_cost.startup;
798
799 cpu_run_cost += cpu_per_tuple * tuples_fetched;
800
801 /* tlist eval costs are paid per output row, not per tuple scanned */
802 startup_cost += path->path.pathtarget->cost.startup;
803 cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
804
805 /* Adjust costing for parallelism, if used. */
806 if (path->path.parallel_workers > 0)
807 {
809
811
812 /* The CPU cost is divided among all the workers. */
814 }
815
816 run_cost += cpu_run_cost;
817
818 path->path.startup_cost = startup_cost;
819 path->path.total_cost = startup_cost + run_cost;
820}
821
822/*
823 * extract_nonindex_conditions
824 *
825 * Given a list of quals to be enforced in an indexscan, extract the ones that
826 * will have to be applied as qpquals (ie, the index machinery won't handle
827 * them). Here we detect only whether a qual clause is directly redundant
828 * with some indexclause. If the index path is chosen for use, createplan.c
829 * will try a bit harder to get rid of redundant qual conditions; specifically
830 * it will see if quals can be proven to be implied by the indexquals. But
831 * it does not seem worth the cycles to try to factor that in at this stage,
832 * since we're only trying to estimate qual eval costs. Otherwise this must
833 * match the logic in create_indexscan_plan().
834 *
835 * qual_clauses, and the result, are lists of RestrictInfos.
836 * indexclauses is a list of IndexClauses.
837 */
838static List *
840{
841 List *result = NIL;
842 ListCell *lc;
843
844 foreach(lc, qual_clauses)
845 {
847
848 if (rinfo->pseudoconstant)
849 continue; /* we may drop pseudoconstants here */
850 if (is_redundant_with_indexclauses(rinfo, indexclauses))
851 continue; /* dup or derived from same EquivalenceClass */
852 /* ... skip the predicate proof attempt createplan.c will try ... */
853 result = lappend(result, rinfo);
854 }
855 return result;
856}
857
858/*
859 * index_pages_fetched
860 * Estimate the number of pages actually fetched after accounting for
861 * cache effects.
862 *
863 * We use an approximation proposed by Mackert and Lohman, "Index Scans
864 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
865 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
866 * The Mackert and Lohman approximation is that the number of pages
867 * fetched is
868 * PF =
869 * min(2TNs/(2T+Ns), T) when T <= b
870 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
871 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
872 * where
873 * T = # pages in table
874 * N = # tuples in table
875 * s = selectivity = fraction of table to be scanned
876 * b = # buffer pages available (we include kernel space here)
877 *
878 * We assume that effective_cache_size is the total number of buffer pages
879 * available for the whole query, and pro-rate that space across all the
880 * tables in the query and the index currently under consideration. (This
881 * ignores space needed for other indexes used by the query, but since we
882 * don't know which indexes will get used, we can't estimate that very well;
883 * and in any case counting all the tables may well be an overestimate, since
884 * depending on the join plan not all the tables may be scanned concurrently.)
885 *
886 * The product Ns is the number of tuples fetched; we pass in that
887 * product rather than calculating it here. "pages" is the number of pages
888 * in the object under consideration (either an index or a table).
889 * "index_pages" is the amount to add to the total table space, which was
890 * computed for us by make_one_rel.
891 *
892 * Caller is expected to have ensured that tuples_fetched is greater than zero
893 * and rounded to integer (see clamp_row_est). The result will likewise be
894 * greater than zero and integral.
895 */
896double
897index_pages_fetched(double tuples_fetched, BlockNumber pages,
899{
900 double pages_fetched;
901 double total_pages;
902 double T,
903 b;
904
905 /* T is # pages in table, but don't allow it to be zero */
906 T = (pages > 1) ? (double) pages : 1.0;
907
908 /* Compute number of pages assumed to be competing for cache space */
909 total_pages = root->total_table_pages + index_pages;
911 Assert(T <= total_pages);
912
913 /* b is pro-rated share of effective_cache_size */
915
916 /* force it positive and integral */
917 if (b <= 1.0)
918 b = 1.0;
919 else
920 b = ceil(b);
921
922 /* This part is the Mackert and Lohman formula */
923 if (T <= b)
924 {
926 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
927 if (pages_fetched >= T)
929 else
931 }
932 else
933 {
934 double lim;
935
936 lim = (2.0 * T * b) / (2.0 * T - b);
937 if (tuples_fetched <= lim)
938 {
940 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
941 }
942 else
943 {
945 b + (tuples_fetched - lim) * (T - b) / T;
946 }
948 }
949 return pages_fetched;
950}
951
952/*
953 * get_indexpath_pages
954 * Determine the total size of the indexes used in a bitmap index path.
955 *
956 * Note: if the same index is used more than once in a bitmap tree, we will
957 * count it multiple times, which perhaps is the wrong thing ... but it's
958 * not completely clear, and detecting duplicates is difficult, so ignore it
959 * for now.
960 */
961static double
963{
964 double result = 0;
965 ListCell *l;
966
967 if (IsA(bitmapqual, BitmapAndPath))
968 {
969 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
970
971 foreach(l, apath->bitmapquals)
972 {
973 result += get_indexpath_pages((Path *) lfirst(l));
974 }
975 }
976 else if (IsA(bitmapqual, BitmapOrPath))
977 {
978 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
979
980 foreach(l, opath->bitmapquals)
981 {
982 result += get_indexpath_pages((Path *) lfirst(l));
983 }
984 }
985 else if (IsA(bitmapqual, IndexPath))
986 {
987 IndexPath *ipath = (IndexPath *) bitmapqual;
988
989 result = (double) ipath->indexinfo->pages;
990 }
991 else
992 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
993
994 return result;
995}
996
997/*
998 * cost_bitmap_heap_scan
999 * Determines and returns the cost of scanning a relation using a bitmap
1000 * index-then-heap plan.
1001 *
1002 * 'baserel' is the relation to be scanned
1003 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1004 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1005 * 'loop_count' is the number of repetitions of the indexscan to factor into
1006 * estimates of caching behavior
1007 *
1008 * Note: the component IndexPaths in bitmapqual should have been costed
1009 * using the same loop_count.
1010 */
1011void
1014 Path *bitmapqual, double loop_count)
1015{
1016 Cost startup_cost = 0;
1017 Cost run_cost = 0;
1018 Cost indexTotalCost;
1023 double tuples_fetched;
1024 double pages_fetched;
1025 double spc_seq_page_cost,
1026 spc_random_page_cost;
1027 double T;
1029
1030 /* Should only be applied to base relations */
1032 Assert(baserel->relid > 0);
1033 Assert(baserel->rtekind == RTE_RELATION);
1034
1035 /* Mark the path with the correct row estimate */
1036 if (param_info)
1037 path->rows = param_info->ppi_rows;
1038 else
1039 path->rows = baserel->rows;
1040
1042 loop_count, &indexTotalCost,
1043 &tuples_fetched);
1044
1045 startup_cost += indexTotalCost;
1046 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1047
1048 /* Fetch estimated page costs for tablespace containing table. */
1049 get_tablespace_page_costs(baserel->reltablespace,
1050 &spc_random_page_cost,
1052
1053 /*
1054 * For small numbers of pages we should charge spc_random_page_cost
1055 * apiece, while if nearly all the table's pages are being read, it's more
1056 * appropriate to charge spc_seq_page_cost apiece. The effect is
1057 * nonlinear, too. For lack of a better idea, interpolate like this to
1058 * determine the cost per page.
1059 */
1060 if (pages_fetched >= 2.0)
1061 cost_per_page = spc_random_page_cost -
1062 (spc_random_page_cost - spc_seq_page_cost)
1063 * sqrt(pages_fetched / T);
1064 else
1065 cost_per_page = spc_random_page_cost;
1066
1067 run_cost += pages_fetched * cost_per_page;
1068
1069 /*
1070 * Estimate CPU costs per tuple.
1071 *
1072 * Often the indexquals don't need to be rechecked at each tuple ... but
1073 * not always, especially not if there are enough tuples involved that the
1074 * bitmaps become lossy. For the moment, just assume they will be
1075 * rechecked always. This means we charge the full freight for all the
1076 * scan clauses.
1077 */
1079
1080 startup_cost += qpqual_cost.startup;
1082 cpu_run_cost = cpu_per_tuple * tuples_fetched;
1083
1084 /* Adjust costing for parallelism, if used. */
1085 if (path->parallel_workers > 0)
1086 {
1088
1089 /* The CPU cost is divided among all the workers. */
1091
1092 path->rows = clamp_row_est(path->rows / parallel_divisor);
1093 }
1094 else
1096
1097
1098 run_cost += cpu_run_cost;
1099
1100 /* tlist eval costs are paid per output row, not per tuple scanned */
1101 startup_cost += path->pathtarget->cost.startup;
1102 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1103
1104 path->disabled_nodes =
1105 (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
1106 path->startup_cost = startup_cost;
1107 path->total_cost = startup_cost + run_cost;
1108}
1109
1110/*
1111 * cost_bitmap_tree_node
1112 * Extract cost and selectivity from a bitmap tree node (index/and/or)
1113 */
1114void
1116{
1117 if (IsA(path, IndexPath))
1118 {
1119 *cost = ((IndexPath *) path)->indextotalcost;
1120 *selec = ((IndexPath *) path)->indexselectivity;
1121
1122 /*
1123 * Charge a small amount per retrieved tuple to reflect the costs of
1124 * manipulating the bitmap. This is mostly to make sure that a bitmap
1125 * scan doesn't look to be the same cost as an indexscan to retrieve a
1126 * single tuple.
1127 */
1128 *cost += 0.1 * cpu_operator_cost * path->rows;
1129 }
1130 else if (IsA(path, BitmapAndPath))
1131 {
1132 *cost = path->total_cost;
1133 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1134 }
1135 else if (IsA(path, BitmapOrPath))
1136 {
1137 *cost = path->total_cost;
1138 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1139 }
1140 else
1141 {
1142 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1143 *cost = *selec = 0; /* keep compiler quiet */
1144 }
1145}
1146
1147/*
1148 * cost_bitmap_and_node
1149 * Estimate the cost of a BitmapAnd node
1150 *
1151 * Note that this considers only the costs of index scanning and bitmap
1152 * creation, not the eventual heap access. In that sense the object isn't
1153 * truly a Path, but it has enough path-like properties (costs in particular)
1154 * to warrant treating it as one. We don't bother to set the path rows field,
1155 * however.
1156 */
1157void
1159{
1162 ListCell *l;
1163
1164 /*
1165 * We estimate AND selectivity on the assumption that the inputs are
1166 * independent. This is probably often wrong, but we don't have the info
1167 * to do better.
1168 *
1169 * The runtime cost of the BitmapAnd itself is estimated at 100x
1170 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1171 * definitely too simplistic?
1172 */
1173 totalCost = 0.0;
1174 selec = 1.0;
1175 foreach(l, path->bitmapquals)
1176 {
1177 Path *subpath = (Path *) lfirst(l);
1178 Cost subCost;
1180
1182
1183 selec *= subselec;
1184
1185 totalCost += subCost;
1186 if (l != list_head(path->bitmapquals))
1187 totalCost += 100.0 * cpu_operator_cost;
1188 }
1189 path->bitmapselectivity = selec;
1190 path->path.rows = 0; /* per above, not used */
1191 path->path.disabled_nodes = 0;
1192 path->path.startup_cost = totalCost;
1193 path->path.total_cost = totalCost;
1194}
1195
1196/*
1197 * cost_bitmap_or_node
1198 * Estimate the cost of a BitmapOr node
1199 *
1200 * See comments for cost_bitmap_and_node.
1201 */
1202void
1204{
1207 ListCell *l;
1208
1209 /*
1210 * We estimate OR selectivity on the assumption that the inputs are
1211 * non-overlapping, since that's often the case in "x IN (list)" type
1212 * situations. Of course, we clamp to 1.0 at the end.
1213 *
1214 * The runtime cost of the BitmapOr itself is estimated at 100x
1215 * cpu_operator_cost for each tbm_union needed. Probably too small,
1216 * definitely too simplistic? We are aware that the tbm_unions are
1217 * optimized out when the inputs are BitmapIndexScans.
1218 */
1219 totalCost = 0.0;
1220 selec = 0.0;
1221 foreach(l, path->bitmapquals)
1222 {
1223 Path *subpath = (Path *) lfirst(l);
1224 Cost subCost;
1226
1228
1229 selec += subselec;
1230
1231 totalCost += subCost;
1232 if (l != list_head(path->bitmapquals) &&
1234 totalCost += 100.0 * cpu_operator_cost;
1235 }
1236 path->bitmapselectivity = Min(selec, 1.0);
1237 path->path.rows = 0; /* per above, not used */
1238 path->path.startup_cost = totalCost;
1239 path->path.total_cost = totalCost;
1240}
1241
1242/*
1243 * cost_tidscan
1244 * Determines and returns the cost of scanning a relation using TIDs.
1245 *
1246 * 'baserel' is the relation to be scanned
1247 * 'tidquals' is the list of TID-checkable quals
1248 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1249 */
1250void
1253{
1254 Cost startup_cost = 0;
1255 Cost run_cost = 0;
1259 double ntuples;
1260 ListCell *l;
1261 double spc_random_page_cost;
1262 uint64 enable_mask = 0;
1263
1264 /* Should only be applied to base relations */
1265 Assert(baserel->relid > 0);
1266 Assert(baserel->rtekind == RTE_RELATION);
1267 Assert(tidquals != NIL);
1268
1269 /* Mark the path with the correct row estimate */
1270 if (param_info)
1271 path->rows = param_info->ppi_rows;
1272 else
1273 path->rows = baserel->rows;
1274
1275 /* Count how many tuples we expect to retrieve */
1276 ntuples = 0;
1277 foreach(l, tidquals)
1278 {
1280 Expr *qual = rinfo->clause;
1281
1282 /*
1283 * We must use a TID scan for CurrentOfExpr; in any other case, we
1284 * should be generating a TID scan only if TID scans are allowed.
1285 * Also, if CurrentOfExpr is the qual, there should be only one.
1286 */
1287 Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0 || IsA(qual, CurrentOfExpr));
1288 Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
1289
1290 if (IsA(qual, ScalarArrayOpExpr))
1291 {
1292 /* Each element of the array yields 1 tuple */
1293 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1294 Node *arraynode = (Node *) lsecond(saop->args);
1295
1297 }
1298 else if (IsA(qual, CurrentOfExpr))
1299 {
1300 /* CURRENT OF yields 1 tuple */
1301 ntuples++;
1302 }
1303 else
1304 {
1305 /* It's just CTID = something, count 1 tuple */
1306 ntuples++;
1307 }
1308 }
1309
1310 /*
1311 * The TID qual expressions will be computed once, any other baserestrict
1312 * quals once per retrieved tuple.
1313 */
1314 cost_qual_eval(&tid_qual_cost, tidquals, root);
1315
1316 /* fetch estimated page cost for tablespace containing table */
1317 get_tablespace_page_costs(baserel->reltablespace,
1318 &spc_random_page_cost,
1319 NULL);
1320
1321 /* disk costs --- assume each tuple on a different page */
1322 run_cost += spc_random_page_cost * ntuples;
1323
1324 /* Add scanning CPU costs */
1326
1327 /* XXX currently we assume TID quals are a subset of qpquals */
1328 startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1330 tid_qual_cost.per_tuple;
1331 run_cost += cpu_per_tuple * ntuples;
1332
1333 /* tlist eval costs are paid per output row, not per tuple scanned */
1334 startup_cost += path->pathtarget->cost.startup;
1335 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1336
1337 /*
1338 * There are assertions above verifying that we only reach this function
1339 * either when baserel->pgs_mask includes PGS_TIDSCAN or when the TID scan
1340 * is the only legal path, so we only need to consider the effects of
1341 * PGS_CONSIDER_NONPARTIAL here.
1342 */
1343 if (path->parallel_workers == 0)
1345 path->disabled_nodes =
1346 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1347 path->startup_cost = startup_cost;
1348 path->total_cost = startup_cost + run_cost;
1349}
1350
1351/*
1352 * cost_tidrangescan
1353 * Determines and sets the costs of scanning a relation using a range of
1354 * TIDs for 'path'
1355 *
1356 * 'baserel' is the relation to be scanned
1357 * 'tidrangequals' is the list of TID-checkable range quals
1358 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1359 */
1360void
1362 RelOptInfo *baserel, List *tidrangequals,
1364{
1365 Selectivity selectivity;
1366 double pages;
1367 Cost startup_cost;
1373 double ntuples;
1374 double nseqpages;
1375 double spc_random_page_cost;
1376 double spc_seq_page_cost;
1378
1379 /* Should only be applied to base relations */
1380 Assert(baserel->relid > 0);
1381 Assert(baserel->rtekind == RTE_RELATION);
1382
1383 /* Mark the path with the correct row estimate */
1384 if (param_info)
1385 path->rows = param_info->ppi_rows;
1386 else
1387 path->rows = baserel->rows;
1388
1389 /* Count how many tuples and pages we expect to scan */
1390 selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1391 JOIN_INNER, NULL);
1392 pages = ceil(selectivity * baserel->pages);
1393
1394 if (pages <= 0.0)
1395 pages = 1.0;
1396
1397 /*
1398 * The first page in a range requires a random seek, but each subsequent
1399 * page is just a normal sequential page read. NOTE: it's desirable for
1400 * TID Range Scans to cost more than the equivalent Sequential Scans,
1401 * because Seq Scans have some performance advantages such as scan
1402 * synchronization, and we'd prefer one of them to be picked unless a TID
1403 * Range Scan really is better.
1404 */
1405 ntuples = selectivity * baserel->tuples;
1406 nseqpages = pages - 1.0;
1407
1408 /*
1409 * The TID qual expressions will be computed once, any other baserestrict
1410 * quals once per retrieved tuple.
1411 */
1412 cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1413
1414 /* fetch estimated page cost for tablespace containing table */
1415 get_tablespace_page_costs(baserel->reltablespace,
1416 &spc_random_page_cost,
1418
1419 /* disk costs; 1 random page and the remainder as seq pages */
1420 disk_run_cost = spc_random_page_cost + spc_seq_page_cost * nseqpages;
1421
1422 /* Add scanning CPU costs */
1424
1425 /*
1426 * XXX currently we assume TID quals are a subset of qpquals at this
1427 * point; they will be removed (if possible) when we create the plan, so
1428 * we subtract their cost from the total qpqual cost. (If the TID quals
1429 * can't be removed, this is a mistake and we're going to underestimate
1430 * the CPU cost a bit.)
1431 */
1432 startup_cost = qpqual_cost.startup + tid_qual_cost.per_tuple;
1434 tid_qual_cost.per_tuple;
1435 cpu_run_cost = cpu_per_tuple * ntuples;
1436
1437 /* tlist eval costs are paid per output row, not per tuple scanned */
1438 startup_cost += path->pathtarget->cost.startup;
1439 cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
1440
1441 /* Adjust costing for parallelism, if used. */
1442 if (path->parallel_workers > 0)
1443 {
1445
1446 /* The CPU cost is divided among all the workers. */
1448
1449 /*
1450 * In the case of a parallel plan, the row count needs to represent
1451 * the number of tuples processed per worker.
1452 */
1453 path->rows = clamp_row_est(path->rows / parallel_divisor);
1454 }
1455
1456 /*
1457 * We should not generate this path type when PGS_TIDSCAN is unset, but we
1458 * might need to disable this path due to PGS_CONSIDER_NONPARTIAL.
1459 */
1460 Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0);
1461 if (path->parallel_workers == 0)
1463 path->disabled_nodes =
1464 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1465 path->startup_cost = startup_cost;
1466 path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
1467}
1468
1469/*
1470 * cost_subqueryscan
1471 * Determines and returns the cost of scanning a subquery RTE.
1472 *
1473 * 'baserel' is the relation to be scanned
1474 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1475 * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1476 */
1477void
1480 bool trivial_pathtarget)
1481{
1482 Cost startup_cost;
1483 Cost run_cost;
1484 List *qpquals;
1487 uint64 enable_mask = 0;
1488
1489 /* Should only be applied to base relations that are subqueries */
1490 Assert(baserel->relid > 0);
1491 Assert(baserel->rtekind == RTE_SUBQUERY);
1492
1493 /*
1494 * We compute the rowcount estimate as the subplan's estimate times the
1495 * selectivity of relevant restriction clauses. In simple cases this will
1496 * come out the same as baserel->rows; but when dealing with parallelized
1497 * paths we must do it like this to get the right answer.
1498 */
1499 if (param_info)
1500 qpquals = list_concat_copy(param_info->ppi_clauses,
1501 baserel->baserestrictinfo);
1502 else
1503 qpquals = baserel->baserestrictinfo;
1504
1505 path->path.rows = clamp_row_est(path->subpath->rows *
1507 qpquals,
1508 0,
1509 JOIN_INNER,
1510 NULL));
1511
1512 /*
1513 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1514 * any restriction clauses and tlist that will be attached to the
1515 * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1516 * projection overhead.
1517 */
1518 if (path->path.parallel_workers == 0)
1521 + (((baserel->pgs_mask & enable_mask) != enable_mask) ? 1 : 0);
1522 path->path.startup_cost = path->subpath->startup_cost;
1523 path->path.total_cost = path->subpath->total_cost;
1524
1525 /*
1526 * However, if there are no relevant restriction clauses and the
1527 * pathtarget is trivial, then we expect that setrefs.c will optimize away
1528 * the SubqueryScan plan node altogether, so we should just make its cost
1529 * and rowcount equal to the input path's.
1530 *
1531 * Note: there are some edge cases where createplan.c will apply a
1532 * different targetlist to the SubqueryScan node, thus falsifying our
1533 * current estimate of whether the target is trivial, and making the cost
1534 * estimate (though not the rowcount) wrong. It does not seem worth the
1535 * extra complication to try to account for that exactly, especially since
1536 * that behavior falsifies other cost estimates as well.
1537 */
1538 if (qpquals == NIL && trivial_pathtarget)
1539 return;
1540
1542
1543 startup_cost = qpqual_cost.startup;
1545 run_cost = cpu_per_tuple * path->subpath->rows;
1546
1547 /* tlist eval costs are paid per output row, not per tuple scanned */
1548 startup_cost += path->path.pathtarget->cost.startup;
1549 run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1550
1551 path->path.startup_cost += startup_cost;
1552 path->path.total_cost += startup_cost + run_cost;
1553}
1554
1555/*
1556 * cost_functionscan
1557 * Determines and returns the cost of scanning a function RTE.
1558 *
1559 * 'baserel' is the relation to be scanned
1560 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1561 */
1562void
1565{
1566 Cost startup_cost = 0;
1567 Cost run_cost = 0;
1572 uint64 enable_mask = 0;
1573
1574 /* Should only be applied to base relations that are functions */
1575 Assert(baserel->relid > 0);
1576 rte = planner_rt_fetch(baserel->relid, root);
1577 Assert(rte->rtekind == RTE_FUNCTION);
1578
1579 /* Mark the path with the correct row estimate */
1580 if (param_info)
1581 path->rows = param_info->ppi_rows;
1582 else
1583 path->rows = baserel->rows;
1584
1585 /*
1586 * Estimate costs of executing the function expression(s).
1587 *
1588 * Currently, nodeFunctionscan.c always executes the functions to
1589 * completion before returning any rows, and caches the results in a
1590 * tuplestore. So the function eval cost is all startup cost, and per-row
1591 * costs are minimal.
1592 *
1593 * XXX in principle we ought to charge tuplestore spill costs if the
1594 * number of rows is large. However, given how phony our rowcount
1595 * estimates for functions tend to be, there's not a lot of point in that
1596 * refinement right now.
1597 */
1598 cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1599
1600 startup_cost += exprcost.startup + exprcost.per_tuple;
1601
1602 /* Add scanning CPU costs */
1604
1605 startup_cost += qpqual_cost.startup;
1607 run_cost += cpu_per_tuple * baserel->tuples;
1608
1609 /* tlist eval costs are paid per output row, not per tuple scanned */
1610 startup_cost += path->pathtarget->cost.startup;
1611 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1612
1613 if (path->parallel_workers == 0)
1615 path->disabled_nodes =
1616 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1617 path->startup_cost = startup_cost;
1618 path->total_cost = startup_cost + run_cost;
1619}
1620
1621/*
1622 * cost_tablefuncscan
1623 * Determines and returns the cost of scanning a table function.
1624 *
1625 * 'baserel' is the relation to be scanned
1626 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1627 */
1628void
1631{
1632 Cost startup_cost = 0;
1633 Cost run_cost = 0;
1638 uint64 enable_mask = 0;
1639
1640 /* Should only be applied to base relations that are functions */
1641 Assert(baserel->relid > 0);
1642 rte = planner_rt_fetch(baserel->relid, root);
1643 Assert(rte->rtekind == RTE_TABLEFUNC);
1644
1645 /* Mark the path with the correct row estimate */
1646 if (param_info)
1647 path->rows = param_info->ppi_rows;
1648 else
1649 path->rows = baserel->rows;
1650
1651 /*
1652 * Estimate costs of executing the table func expression(s).
1653 *
1654 * XXX in principle we ought to charge tuplestore spill costs if the
1655 * number of rows is large. However, given how phony our rowcount
1656 * estimates for tablefuncs tend to be, there's not a lot of point in that
1657 * refinement right now.
1658 */
1659 cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1660
1661 startup_cost += exprcost.startup + exprcost.per_tuple;
1662
1663 /* Add scanning CPU costs */
1665
1666 startup_cost += qpqual_cost.startup;
1668 run_cost += cpu_per_tuple * baserel->tuples;
1669
1670 /* tlist eval costs are paid per output row, not per tuple scanned */
1671 startup_cost += path->pathtarget->cost.startup;
1672 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1673
1674 if (path->parallel_workers == 0)
1676 path->disabled_nodes =
1677 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1678 path->startup_cost = startup_cost;
1679 path->total_cost = startup_cost + run_cost;
1680}
1681
1682/*
1683 * cost_valuesscan
1684 * Determines and returns the cost of scanning a VALUES RTE.
1685 *
1686 * 'baserel' is the relation to be scanned
1687 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1688 */
1689void
1692{
1693 Cost startup_cost = 0;
1694 Cost run_cost = 0;
1697 uint64 enable_mask = 0;
1698
1699 /* Should only be applied to base relations that are values lists */
1700 Assert(baserel->relid > 0);
1701 Assert(baserel->rtekind == RTE_VALUES);
1702
1703 /* Mark the path with the correct row estimate */
1704 if (param_info)
1705 path->rows = param_info->ppi_rows;
1706 else
1707 path->rows = baserel->rows;
1708
1709 /*
1710 * For now, estimate list evaluation cost at one operator eval per list
1711 * (probably pretty bogus, but is it worth being smarter?)
1712 */
1714
1715 /* Add scanning CPU costs */
1717
1718 startup_cost += qpqual_cost.startup;
1720 run_cost += cpu_per_tuple * baserel->tuples;
1721
1722 /* tlist eval costs are paid per output row, not per tuple scanned */
1723 startup_cost += path->pathtarget->cost.startup;
1724 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1725
1726 if (path->parallel_workers == 0)
1728 path->disabled_nodes =
1729 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1730 path->startup_cost = startup_cost;
1731 path->total_cost = startup_cost + run_cost;
1732}
1733
1734/*
1735 * cost_ctescan
1736 * Determines and returns the cost of scanning a CTE RTE.
1737 *
1738 * Note: this is used for both self-reference and regular CTEs; the
1739 * possible cost differences are below the threshold of what we could
1740 * estimate accurately anyway. Note that the costs of evaluating the
1741 * referenced CTE query are added into the final plan as initplan costs,
1742 * and should NOT be counted here.
1743 */
1744void
1747{
1748 Cost startup_cost = 0;
1749 Cost run_cost = 0;
1752 uint64 enable_mask = 0;
1753
1754 /* Should only be applied to base relations that are CTEs */
1755 Assert(baserel->relid > 0);
1756 Assert(baserel->rtekind == RTE_CTE);
1757
1758 /* Mark the path with the correct row estimate */
1759 if (param_info)
1760 path->rows = param_info->ppi_rows;
1761 else
1762 path->rows = baserel->rows;
1763
1764 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1766
1767 /* Add scanning CPU costs */
1769
1770 startup_cost += qpqual_cost.startup;
1772 run_cost += cpu_per_tuple * baserel->tuples;
1773
1774 /* tlist eval costs are paid per output row, not per tuple scanned */
1775 startup_cost += path->pathtarget->cost.startup;
1776 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1777
1778 if (path->parallel_workers == 0)
1780 path->disabled_nodes =
1781 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1782 path->startup_cost = startup_cost;
1783 path->total_cost = startup_cost + run_cost;
1784}
1785
1786/*
1787 * cost_namedtuplestorescan
1788 * Determines and returns the cost of scanning a named tuplestore.
1789 */
1790void
1793{
1794 Cost startup_cost = 0;
1795 Cost run_cost = 0;
1798 uint64 enable_mask = 0;
1799
1800 /* Should only be applied to base relations that are Tuplestores */
1801 Assert(baserel->relid > 0);
1802 Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1803
1804 /* Mark the path with the correct row estimate */
1805 if (param_info)
1806 path->rows = param_info->ppi_rows;
1807 else
1808 path->rows = baserel->rows;
1809
1810 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1812
1813 /* Add scanning CPU costs */
1815
1816 startup_cost += qpqual_cost.startup;
1818 run_cost += cpu_per_tuple * baserel->tuples;
1819
1820 if (path->parallel_workers == 0)
1822 path->disabled_nodes =
1823 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1824 path->startup_cost = startup_cost;
1825 path->total_cost = startup_cost + run_cost;
1826}
1827
1828/*
1829 * cost_resultscan
1830 * Determines and returns the cost of scanning an RTE_RESULT relation.
1831 */
1832void
1835{
1836 Cost startup_cost = 0;
1837 Cost run_cost = 0;
1840 uint64 enable_mask = 0;
1841
1842 /* Should only be applied to RTE_RESULT base relations */
1843 Assert(baserel->relid > 0);
1844 Assert(baserel->rtekind == RTE_RESULT);
1845
1846 /* Mark the path with the correct row estimate */
1847 if (param_info)
1848 path->rows = param_info->ppi_rows;
1849 else
1850 path->rows = baserel->rows;
1851
1852 /* We charge qual cost plus cpu_tuple_cost */
1854
1855 startup_cost += qpqual_cost.startup;
1857 run_cost += cpu_per_tuple * baserel->tuples;
1858
1859 if (path->parallel_workers == 0)
1861 path->disabled_nodes =
1862 (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1863 path->startup_cost = startup_cost;
1864 path->total_cost = startup_cost + run_cost;
1865}
1866
1867/*
1868 * cost_recursive_union
1869 * Determines and returns the cost of performing a recursive union,
1870 * and also the estimated output size.
1871 *
1872 * We are given Paths for the nonrecursive and recursive terms.
1873 */
1874void
1876{
1877 Cost startup_cost;
1878 Cost total_cost;
1879 double total_rows;
1880 uint64 enable_mask = 0;
1881
1882 /* We probably have decent estimates for the non-recursive term */
1883 startup_cost = nrterm->startup_cost;
1884 total_cost = nrterm->total_cost;
1885 total_rows = nrterm->rows;
1886
1887 /*
1888 * We arbitrarily assume that about 10 recursive iterations will be
1889 * needed, and that we've managed to get a good fix on the cost and output
1890 * size of each one of them. These are mighty shaky assumptions but it's
1891 * hard to see how to do better.
1892 */
1893 total_cost += 10 * rterm->total_cost;
1894 total_rows += 10 * rterm->rows;
1895
1896 /*
1897 * Also charge cpu_tuple_cost per row to account for the costs of
1898 * manipulating the tuplestores. (We don't worry about possible
1899 * spill-to-disk costs.)
1900 */
1901 total_cost += cpu_tuple_cost * total_rows;
1902
1903 if (runion->parallel_workers == 0)
1905 runion->disabled_nodes =
1906 (runion->parent->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
1907 runion->startup_cost = startup_cost;
1908 runion->total_cost = total_cost;
1909 runion->rows = total_rows;
1910 runion->pathtarget->width = Max(nrterm->pathtarget->width,
1911 rterm->pathtarget->width);
1912}
1913
1914/*
1915 * cost_tuplesort
1916 * Determines and returns the cost of sorting a relation using tuplesort,
1917 * not including the cost of reading the input data.
1918 *
1919 * If the total volume of data to sort is less than sort_mem, we will do
1920 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1921 * comparisons for t tuples.
1922 *
1923 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1924 * algorithm. There will still be about t*log2(t) tuple comparisons in
1925 * total, but we will also need to write and read each tuple once per
1926 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1927 * number of initial runs formed and M is the merge order used by tuplesort.c.
1928 * Since the average initial run should be about sort_mem, we have
1929 * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1930 * cpu = comparison_cost * t * log2(t)
1931 *
1932 * If the sort is bounded (i.e., only the first k result tuples are needed)
1933 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1934 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1935 *
1936 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1937 * accesses (XXX can't we refine that guess?)
1938 *
1939 * By default, we charge two operator evals per tuple comparison, which should
1940 * be in the right ballpark in most cases. The caller can tweak this by
1941 * specifying nonzero comparison_cost; typically that's used for any extra
1942 * work that has to be done to prepare the inputs to the comparison operators.
1943 *
1944 * 'tuples' is the number of tuples in the relation
1945 * 'width' is the average tuple width in bytes
1946 * 'comparison_cost' is the extra cost per comparison, if any
1947 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1948 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1949 */
1950static void
1951cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1952 double tuples, int width,
1954 double limit_tuples)
1955{
1956 double input_bytes = relation_byte_size(tuples, width);
1957 double output_bytes;
1958 double output_tuples;
1960
1961 /*
1962 * We want to be sure the cost of a sort is never estimated as zero, even
1963 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1964 */
1965 if (tuples < 2.0)
1966 tuples = 2.0;
1967
1968 /* Include the default cost-per-comparison */
1970
1971 /* Do we have a useful LIMIT? */
1972 if (limit_tuples > 0 && limit_tuples < tuples)
1973 {
1974 output_tuples = limit_tuples;
1976 }
1977 else
1978 {
1979 output_tuples = tuples;
1981 }
1982
1984 {
1985 /*
1986 * We'll have to use a disk-based sort of all the tuples
1987 */
1988 double npages = ceil(input_bytes / BLCKSZ);
1989 double nruns = input_bytes / sort_mem_bytes;
1991 double log_runs;
1992 double npageaccesses;
1993
1994 /*
1995 * CPU costs
1996 *
1997 * Assume about N log2 N comparisons
1998 */
1999 *startup_cost = comparison_cost * tuples * LOG2(tuples);
2000
2001 /* Disk costs */
2002
2003 /* Compute logM(r) as log(r) / log(M) */
2004 if (nruns > mergeorder)
2006 else
2007 log_runs = 1.0;
2008 npageaccesses = 2.0 * npages * log_runs;
2009 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
2010 *startup_cost += npageaccesses *
2011 (seq_page_cost * 0.75 + random_page_cost * 0.25);
2012 }
2013 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
2014 {
2015 /*
2016 * We'll use a bounded heap-sort keeping just K tuples in memory, for
2017 * a total number of tuple comparisons of N log2 K; but the constant
2018 * factor is a bit higher than for quicksort. Tweak it so that the
2019 * cost curve is continuous at the crossover point.
2020 */
2021 *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
2022 }
2023 else
2024 {
2025 /* We'll use plain quicksort on all the input tuples */
2026 *startup_cost = comparison_cost * tuples * LOG2(tuples);
2027 }
2028
2029 /*
2030 * Also charge a small amount (arbitrarily set equal to operator cost) per
2031 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
2032 * doesn't do qual-checking or projection, so it has less overhead than
2033 * most plan nodes. Note it's correct to use tuples not output_tuples
2034 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
2035 * counting the LIMIT otherwise.
2036 */
2037 *run_cost = cpu_operator_cost * tuples;
2038}
2039
2040/*
2041 * cost_incremental_sort
2042 * Determines and returns the cost of sorting a relation incrementally, when
2043 * the input path is presorted by a prefix of the pathkeys.
2044 *
2045 * 'presorted_keys' is the number of leading pathkeys by which the input path
2046 * is sorted.
2047 *
2048 * We estimate the number of groups into which the relation is divided by the
2049 * leading pathkeys, and then calculate the cost of sorting a single group
2050 * with tuplesort using cost_tuplesort().
2051 */
2052void
2054 PlannerInfo *root, List *pathkeys, int presorted_keys,
2057 double input_tuples, int width, Cost comparison_cost, int sort_mem,
2058 double limit_tuples)
2059{
2060 Cost startup_cost,
2061 run_cost,
2063 double group_tuples,
2069 ListCell *l;
2070 bool unknown_varno = false;
2071
2072 Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2073
2074 /*
2075 * We want to be sure the cost of a sort is never estimated as zero, even
2076 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2077 */
2078 if (input_tuples < 2.0)
2079 input_tuples = 2.0;
2080
2081 /* Default estimate of number of groups, capped to one group per row. */
2082 input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2083
2084 /*
2085 * Extract presorted keys as list of expressions.
2086 *
2087 * We need to be careful about Vars containing "varno 0" which might have
2088 * been introduced by generate_append_tlist, which would confuse
2089 * estimate_num_groups (in fact it'd fail for such expressions). See
2090 * recurse_set_operations which has to deal with the same issue.
2091 *
2092 * Unlike recurse_set_operations we can't access the original target list
2093 * here, and even if we could it's not very clear how useful would that be
2094 * for a set operation combining multiple tables. So we simply detect if
2095 * there are any expressions with "varno 0" and use the default
2096 * DEFAULT_NUM_DISTINCT in that case.
2097 *
2098 * We might also use either 1.0 (a single group) or input_tuples (each row
2099 * being a separate group), pretty much the worst and best case for
2100 * incremental sort. But those are extreme cases and using something in
2101 * between seems reasonable. Furthermore, generate_append_tlist is used
2102 * for set operations, which are likely to produce mostly unique output
2103 * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2104 * while maintaining lower startup cost.
2105 */
2106 foreach(l, pathkeys)
2107 {
2108 PathKey *key = (PathKey *) lfirst(l);
2110 linitial(key->pk_eclass->ec_members);
2111
2112 /*
2113 * Check if the expression contains Var with "varno 0" so that we
2114 * don't call estimate_num_groups in that case.
2115 */
2116 if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2117 {
2118 unknown_varno = true;
2119 break;
2120 }
2121
2122 /* expression not containing any Vars with "varno 0" */
2124
2125 if (foreach_current_index(l) + 1 >= presorted_keys)
2126 break;
2127 }
2128
2129 /* Estimate the number of groups with equal presorted keys. */
2130 if (!unknown_varno)
2132 NULL, NULL);
2133
2134 group_tuples = input_tuples / input_groups;
2136
2137 /*
2138 * Estimate the average cost of sorting of one group where presorted keys
2139 * are equal.
2140 */
2143 limit_tuples);
2144
2145 /*
2146 * Startup cost of incremental sort is the startup cost of its first group
2147 * plus the cost of its input.
2148 */
2149 startup_cost = group_startup_cost + input_startup_cost +
2151
2152 /*
2153 * After we started producing tuples from the first group, the cost of
2154 * producing all the tuples is given by the cost to finish processing this
2155 * group, plus the total cost to process the remaining groups, plus the
2156 * remaining cost of input.
2157 */
2160
2161 /*
2162 * Incremental sort adds some overhead by itself. Firstly, it has to
2163 * detect the sort groups. This is roughly equal to one extra copy and
2164 * comparison per tuple.
2165 */
2166 run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2167
2168 /*
2169 * Additionally, we charge double cpu_tuple_cost for each input group to
2170 * account for the tuplesort_reset that's performed after each group.
2171 */
2172 run_cost += 2.0 * cpu_tuple_cost * input_groups;
2173
2174 path->rows = input_tuples;
2175
2176 /*
2177 * We should not generate these paths when enable_incremental_sort=false.
2178 * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2179 * it will have already affected the input path.
2180 */
2183
2184 path->startup_cost = startup_cost;
2185 path->total_cost = startup_cost + run_cost;
2186}
2187
2188/*
2189 * cost_sort
2190 * Determines and returns the cost of sorting a relation, including
2191 * the cost of reading the input data.
2192 *
2193 * NOTE: some callers currently pass NIL for pathkeys because they
2194 * can't conveniently supply the sort keys. Since this routine doesn't
2195 * currently do anything with pathkeys anyway, that doesn't matter...
2196 * but if it ever does, it should react gracefully to lack of key data.
2197 * (Actually, the thing we'd most likely be interested in is just the number
2198 * of sort keys, which all callers *could* supply.)
2199 */
2200void
2202 List *pathkeys, int input_disabled_nodes,
2203 Cost input_cost, double tuples, int width,
2205 double limit_tuples)
2206
2207{
2208 Cost startup_cost;
2209 Cost run_cost;
2210
2211 cost_tuplesort(&startup_cost, &run_cost,
2212 tuples, width,
2214 limit_tuples);
2215
2216 startup_cost += input_cost;
2217
2218 /*
2219 * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
2220 * it will have already affected the input path.
2221 */
2222 path->rows = tuples;
2224 path->startup_cost = startup_cost;
2225 path->total_cost = startup_cost + run_cost;
2226}
2227
2228/*
2229 * append_nonpartial_cost
2230 * Estimate the cost of the non-partial paths in a Parallel Append.
2231 * The non-partial paths are assumed to be the first "numpaths" paths
2232 * from the subpaths list, and to be in order of decreasing cost.
2233 */
2234static Cost
2235append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2236{
2237 Cost *costarr;
2238 int arrlen;
2239 ListCell *l;
2240 ListCell *cell;
2241 int path_index;
2242 int min_index;
2243 int max_index;
2244
2245 if (numpaths == 0)
2246 return 0;
2247
2248 /*
2249 * Array length is number of workers or number of relevant paths,
2250 * whichever is less.
2251 */
2252 arrlen = Min(parallel_workers, numpaths);
2254
2255 /* The first few paths will each be claimed by a different worker. */
2256 path_index = 0;
2257 foreach(cell, subpaths)
2258 {
2259 Path *subpath = (Path *) lfirst(cell);
2260
2261 if (path_index == arrlen)
2262 break;
2264 }
2265
2266 /*
2267 * Since subpaths are sorted by decreasing cost, the last one will have
2268 * the minimum cost.
2269 */
2270 min_index = arrlen - 1;
2271
2272 /*
2273 * For each of the remaining subpaths, add its cost to the array element
2274 * with minimum cost.
2275 */
2276 for_each_cell(l, subpaths, cell)
2277 {
2278 Path *subpath = (Path *) lfirst(l);
2279
2280 /* Consider only the non-partial paths */
2281 if (path_index++ == numpaths)
2282 break;
2283
2285
2286 /* Update the new min cost array index */
2287 min_index = 0;
2288 for (int i = 0; i < arrlen; i++)
2289 {
2290 if (costarr[i] < costarr[min_index])
2291 min_index = i;
2292 }
2293 }
2294
2295 /* Return the highest cost from the array */
2296 max_index = 0;
2297 for (int i = 0; i < arrlen; i++)
2298 {
2299 if (costarr[i] > costarr[max_index])
2300 max_index = i;
2301 }
2302
2303 return costarr[max_index];
2304}
2305
2306/*
2307 * cost_append
2308 * Determines and returns the cost of an Append node.
2309 */
2310void
2312{
2313 RelOptInfo *rel = apath->path.parent;
2314 ListCell *l;
2316
2317 if (apath->path.parallel_workers == 0)
2319
2320 apath->path.disabled_nodes =
2321 (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
2322 apath->path.startup_cost = 0;
2323 apath->path.total_cost = 0;
2324 apath->path.rows = 0;
2325
2326 if (apath->subpaths == NIL)
2327 return;
2328
2329 if (!apath->path.parallel_aware)
2330 {
2331 List *pathkeys = apath->path.pathkeys;
2332
2333 if (pathkeys == NIL)
2334 {
2335 Path *firstsubpath = (Path *) linitial(apath->subpaths);
2336
2337 /*
2338 * For an unordered, non-parallel-aware Append we take the startup
2339 * cost as the startup cost of the first subpath.
2340 */
2341 apath->path.startup_cost = firstsubpath->startup_cost;
2342
2343 /*
2344 * Compute rows, number of disabled nodes, and total cost as sums
2345 * of underlying subplan values.
2346 */
2347 foreach(l, apath->subpaths)
2348 {
2349 Path *subpath = (Path *) lfirst(l);
2350
2351 apath->path.rows += subpath->rows;
2352 apath->path.disabled_nodes += subpath->disabled_nodes;
2353 apath->path.total_cost += subpath->total_cost;
2354 }
2355 }
2356 else
2357 {
2358 /*
2359 * For an ordered, non-parallel-aware Append we take the startup
2360 * cost as the sum of the subpath startup costs. This ensures
2361 * that we don't underestimate the startup cost when a query's
2362 * LIMIT is such that several of the children have to be run to
2363 * satisfy it. This might be overkill --- another plausible hack
2364 * would be to take the Append's startup cost as the maximum of
2365 * the child startup costs. But we don't want to risk believing
2366 * that an ORDER BY LIMIT query can be satisfied at small cost
2367 * when the first child has small startup cost but later ones
2368 * don't. (If we had the ability to deal with nonlinear cost
2369 * interpolation for partial retrievals, we would not need to be
2370 * so conservative about this.)
2371 *
2372 * This case is also different from the above in that we have to
2373 * account for possibly injecting sorts into subpaths that aren't
2374 * natively ordered.
2375 */
2376 foreach(l, apath->subpaths)
2377 {
2378 Path *subpath = (Path *) lfirst(l);
2379 int presorted_keys;
2380 Path sort_path; /* dummy for result of
2381 * cost_sort/cost_incremental_sort */
2382
2383 if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
2384 &presorted_keys))
2385 {
2386 /*
2387 * We'll need to insert a Sort node, so include costs for
2388 * that. We choose to use incremental sort if it is
2389 * enabled and there are presorted keys; otherwise we use
2390 * full sort.
2391 *
2392 * We can use the parent's LIMIT if any, since we
2393 * certainly won't pull more than that many tuples from
2394 * any child.
2395 */
2396 if (enable_incremental_sort && presorted_keys > 0)
2397 {
2399 root,
2400 pathkeys,
2401 presorted_keys,
2402 subpath->disabled_nodes,
2403 subpath->startup_cost,
2404 subpath->total_cost,
2405 subpath->rows,
2406 subpath->pathtarget->width,
2407 0.0,
2408 work_mem,
2409 apath->limit_tuples);
2410 }
2411 else
2412 {
2414 root,
2415 pathkeys,
2416 subpath->disabled_nodes,
2417 subpath->total_cost,
2418 subpath->rows,
2419 subpath->pathtarget->width,
2420 0.0,
2421 work_mem,
2422 apath->limit_tuples);
2423 }
2424
2425 subpath = &sort_path;
2426 }
2427
2428 apath->path.rows += subpath->rows;
2429 apath->path.disabled_nodes += subpath->disabled_nodes;
2430 apath->path.startup_cost += subpath->startup_cost;
2431 apath->path.total_cost += subpath->total_cost;
2432 }
2433 }
2434 }
2435 else /* parallel-aware */
2436 {
2437 int i = 0;
2439
2440 /* Parallel-aware Append never produces ordered output. */
2441 Assert(apath->path.pathkeys == NIL);
2442
2443 /* Calculate startup cost. */
2444 foreach(l, apath->subpaths)
2445 {
2446 Path *subpath = (Path *) lfirst(l);
2447
2448 /*
2449 * Append will start returning tuples when the child node having
2450 * lowest startup cost is done setting up. We consider only the
2451 * first few subplans that immediately get a worker assigned.
2452 */
2453 if (i == 0)
2454 apath->path.startup_cost = subpath->startup_cost;
2455 else if (i < apath->path.parallel_workers)
2456 apath->path.startup_cost = Min(apath->path.startup_cost,
2457 subpath->startup_cost);
2458
2459 /*
2460 * Apply parallel divisor to subpaths. Scale the number of rows
2461 * for each partial subpath based on the ratio of the parallel
2462 * divisor originally used for the subpath to the one we adopted.
2463 * Also add the cost of partial paths to the total cost, but
2464 * ignore non-partial paths for now.
2465 */
2466 if (i < apath->first_partial_path)
2467 apath->path.rows += subpath->rows / parallel_divisor;
2468 else
2469 {
2471
2473 apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2475 apath->path.total_cost += subpath->total_cost;
2476 }
2477
2478 apath->path.disabled_nodes += subpath->disabled_nodes;
2479 apath->path.rows = clamp_row_est(apath->path.rows);
2480
2481 i++;
2482 }
2483
2484 /* Add cost for non-partial subpaths. */
2485 apath->path.total_cost +=
2486 append_nonpartial_cost(apath->subpaths,
2487 apath->first_partial_path,
2488 apath->path.parallel_workers);
2489 }
2490
2491 /*
2492 * Although Append does not do any selection or projection, it's not free;
2493 * add a small per-tuple overhead.
2494 */
2495 apath->path.total_cost +=
2497}
2498
2499/*
2500 * cost_merge_append
2501 * Determines and returns the cost of a MergeAppend node.
2502 *
2503 * MergeAppend merges several pre-sorted input streams, using a heap that
2504 * at any given instant holds the next tuple from each stream. If there
2505 * are N streams, we need about N*log2(N) tuple comparisons to construct
2506 * the heap at startup, and then for each output tuple, about log2(N)
2507 * comparisons to replace the top entry.
2508 *
2509 * (The effective value of N will drop once some of the input streams are
2510 * exhausted, but it seems unlikely to be worth trying to account for that.)
2511 *
2512 * The heap is never spilled to disk, since we assume N is not very large.
2513 * So this is much simpler than cost_sort.
2514 *
2515 * As in cost_sort, we charge two operator evals per tuple comparison.
2516 *
2517 * 'pathkeys' is a list of sort keys
2518 * 'n_streams' is the number of input streams
2519 * 'input_disabled_nodes' is the sum of the input streams' disabled node counts
2520 * 'input_startup_cost' is the sum of the input streams' startup costs
2521 * 'input_total_cost' is the sum of the input streams' total costs
2522 * 'tuples' is the number of tuples in all the streams
2523 */
2524void
2526 List *pathkeys, int n_streams,
2529 double tuples)
2530{
2531 RelOptInfo *rel = path->parent;
2532 Cost startup_cost = 0;
2533 Cost run_cost = 0;
2535 double N;
2536 double logN;
2538
2539 if (path->parallel_workers == 0)
2541
2542 /*
2543 * Avoid log(0)...
2544 */
2545 N = (n_streams < 2) ? 2.0 : (double) n_streams;
2546 logN = LOG2(N);
2547
2548 /* Assumed cost per tuple comparison */
2550
2551 /* Heap creation cost */
2552 startup_cost += comparison_cost * N * logN;
2553
2554 /* Per-tuple heap maintenance cost */
2555 run_cost += tuples * comparison_cost * logN;
2556
2557 /*
2558 * Although MergeAppend does not do any selection or projection, it's not
2559 * free; add a small per-tuple overhead.
2560 */
2561 run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2562
2563 path->disabled_nodes =
2564 (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
2566 path->startup_cost = startup_cost + input_startup_cost;
2567 path->total_cost = startup_cost + run_cost + input_total_cost;
2568}
2569
2570/*
2571 * cost_material
2572 * Determines and returns the cost of materializing a relation, including
2573 * the cost of reading the input data.
2574 *
2575 * If the total volume of data to materialize exceeds work_mem, we will need
2576 * to write it to disk, so the cost is much higher in that case.
2577 *
2578 * Note that here we are estimating the costs for the first scan of the
2579 * relation, so the materialization is all overhead --- any savings will
2580 * occur only on rescan, which is estimated in cost_rescan.
2581 */
2582void
2584 bool enabled, int input_disabled_nodes,
2586 double tuples, int width)
2587{
2588 Cost startup_cost = input_startup_cost;
2590 double nbytes = relation_byte_size(tuples, width);
2591 double work_mem_bytes = work_mem * (Size) 1024;
2592
2593 path->rows = tuples;
2594
2595 /*
2596 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2597 * reflect bookkeeping overhead. (This rate must be more than what
2598 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2599 * if it is exactly the same then there will be a cost tie between
2600 * nestloop with A outer, materialized B inner and nestloop with B outer,
2601 * materialized A inner. The extra cost ensures we'll prefer
2602 * materializing the smaller rel.) Note that this is normally a good deal
2603 * less than cpu_tuple_cost; which is OK because a Material plan node
2604 * doesn't do qual-checking or projection, so it's got less overhead than
2605 * most plan nodes.
2606 */
2607 run_cost += 2 * cpu_operator_cost * tuples;
2608
2609 /*
2610 * If we will spill to disk, charge at the rate of seq_page_cost per page.
2611 * This cost is assumed to be evenly spread through the plan run phase,
2612 * which isn't exactly accurate but our cost model doesn't allow for
2613 * nonuniform costs within the run phase.
2614 */
2615 if (nbytes > work_mem_bytes)
2616 {
2617 double npages = ceil(nbytes / BLCKSZ);
2618
2619 run_cost += seq_page_cost * npages;
2620 }
2621
2622 path->disabled_nodes = input_disabled_nodes + (enabled ? 0 : 1);
2623 path->startup_cost = startup_cost;
2624 path->total_cost = startup_cost + run_cost;
2625}
2626
2627/*
2628 * cost_memoize_rescan
2629 * Determines the estimated cost of rescanning a Memoize node.
2630 *
2631 * In order to estimate this, we must gain knowledge of how often we expect to
2632 * be called and how many distinct sets of parameters we are likely to be
2633 * called with. If we expect a good cache hit ratio, then we can set our
2634 * costs to account for that hit ratio, plus a little bit of cost for the
2635 * caching itself. Caching will not work out well if we expect to be called
2636 * with too many distinct parameter values. The worst-case here is that we
2637 * never see any parameter value twice, in which case we'd never get a cache
2638 * hit and caching would be a complete waste of effort.
2639 */
2640static void
2643{
2645 ListCell *lc;
2646 Cost input_startup_cost = mpath->subpath->startup_cost;
2647 Cost input_total_cost = mpath->subpath->total_cost;
2648 double tuples = mpath->subpath->rows;
2649 Cardinality est_calls = mpath->est_calls;
2650 int width = mpath->subpath->pathtarget->width;
2651
2652 double hash_mem_bytes;
2653 double est_entry_bytes;
2655 Cardinality ndistinct;
2656 double evict_ratio;
2657 double hit_ratio;
2658 Cost startup_cost;
2659 Cost total_cost;
2660
2661 /* available cache space */
2663
2664 /*
2665 * Set the number of bytes each cache entry should consume in the cache.
2666 * To provide us with better estimations on how many cache entries we can
2667 * store at once, we make a call to the executor here to ask it what
2668 * memory overheads there are for a single cache entry.
2669 */
2670 est_entry_bytes = relation_byte_size(tuples, width) +
2672
2673 /* include the estimated width for the cache keys */
2674 foreach(lc, mpath->param_exprs)
2676
2677 /* estimate on the upper limit of cache entries we can hold at once */
2679
2680 /* estimate on the distinct number of parameter values */
2681 ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
2682 &estinfo);
2683
2684 /*
2685 * When the estimation fell back on using a default value, it's a bit too
2686 * risky to assume that it's ok to use a Memoize node. The use of a
2687 * default could cause us to use a Memoize node when it's really
2688 * inappropriate to do so. If we see that this has been done, then we'll
2689 * assume that every call will have unique parameters, which will almost
2690 * certainly mean a MemoizePath will never survive add_path().
2691 */
2692 if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2693 ndistinct = est_calls;
2694
2695 /* Remember the ndistinct estimate for EXPLAIN */
2696 mpath->est_unique_keys = ndistinct;
2697
2698 /*
2699 * Since we've already estimated the maximum number of entries we can
2700 * store at once and know the estimated number of distinct values we'll be
2701 * called with, we'll take this opportunity to set the path's est_entries.
2702 * This will ultimately determine the hash table size that the executor
2703 * will use. If we leave this at zero, the executor will just choose the
2704 * size itself. Really this is not the right place to do this, but it's
2705 * convenient since everything is already calculated.
2706 */
2707 mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2709
2710 /*
2711 * When the number of distinct parameter values is above the amount we can
2712 * store in the cache, then we'll have to evict some entries from the
2713 * cache. This is not free. Here we estimate how often we'll incur the
2714 * cost of that eviction.
2715 */
2716 evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2717
2718 /*
2719 * In order to estimate how costly a single scan will be, we need to
2720 * attempt to estimate what the cache hit ratio will be. To do that we
2721 * must look at how many scans are estimated in total for this node and
2722 * how many of those scans we expect to get a cache hit.
2723 */
2724 hit_ratio = ((est_calls - ndistinct) / est_calls) *
2725 (est_cache_entries / Max(ndistinct, est_cache_entries));
2726
2727 /* Remember the hit ratio estimate for EXPLAIN */
2728 mpath->est_hit_ratio = hit_ratio;
2729
2730 Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2731
2732 /*
2733 * Set the total_cost accounting for the expected cache hit ratio. We
2734 * also add on a cpu_operator_cost to account for a cache lookup. This
2735 * will happen regardless of whether it's a cache hit or not.
2736 */
2737 total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2738
2739 /* Now adjust the total cost to account for cache evictions */
2740
2741 /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2742 total_cost += cpu_tuple_cost * evict_ratio;
2743
2744 /*
2745 * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2746 * The per-tuple eviction is really just a pfree, so charging a whole
2747 * cpu_operator_cost seems a little excessive.
2748 */
2749 total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2750
2751 /*
2752 * Now adjust for storing things in the cache, since that's not free
2753 * either. Everything must go in the cache. We don't proportion this
2754 * over any ratio, just apply it once for the scan. We charge a
2755 * cpu_tuple_cost for the creation of the cache entry and also a
2756 * cpu_operator_cost for each tuple we expect to cache.
2757 */
2758 total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2759
2760 /*
2761 * Getting the first row must be also be proportioned according to the
2762 * expected cache hit ratio.
2763 */
2764 startup_cost = input_startup_cost * (1.0 - hit_ratio);
2765
2766 /*
2767 * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2768 * which we'll do regardless of whether it was a cache hit or not.
2769 */
2770 startup_cost += cpu_tuple_cost;
2771
2772 *rescan_startup_cost = startup_cost;
2773 *rescan_total_cost = total_cost;
2774}
2775
2776/*
2777 * cost_agg
2778 * Determines and returns the cost of performing an Agg plan node,
2779 * including the cost of its input.
2780 *
2781 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2782 * we are using a hashed Agg node just to do grouping).
2783 *
2784 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2785 * are for appropriately-sorted input.
2786 */
2787void
2789 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2790 int numGroupCols, double numGroups,
2791 List *quals,
2792 int disabled_nodes,
2794 double input_tuples, double input_width)
2795{
2796 double output_tuples;
2797 Cost startup_cost;
2798 Cost total_cost;
2799 const AggClauseCosts dummy_aggcosts = {0};
2800
2801 /* Use all-zero per-aggregate costs if NULL is passed */
2802 if (aggcosts == NULL)
2803 {
2804 Assert(aggstrategy == AGG_HASHED);
2806 }
2807
2808 /*
2809 * The transCost.per_tuple component of aggcosts should be charged once
2810 * per input tuple, corresponding to the costs of evaluating the aggregate
2811 * transfns and their input expressions. The finalCost.per_tuple component
2812 * is charged once per output tuple, corresponding to the costs of
2813 * evaluating the finalfns. Startup costs are of course charged but once.
2814 *
2815 * If we are grouping, we charge an additional cpu_operator_cost per
2816 * grouping column per input tuple for grouping comparisons.
2817 *
2818 * We will produce a single output tuple if not grouping, and a tuple per
2819 * group otherwise. We charge cpu_tuple_cost for each output tuple.
2820 *
2821 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2822 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2823 * input path is already sorted appropriately, AGG_SORTED should be
2824 * preferred (since it has no risk of memory overflow). This will happen
2825 * as long as the computed total costs are indeed exactly equal --- but if
2826 * there's roundoff error we might do the wrong thing. So be sure that
2827 * the computations below form the same intermediate values in the same
2828 * order.
2829 */
2830 if (aggstrategy == AGG_PLAIN)
2831 {
2832 startup_cost = input_total_cost;
2833 startup_cost += aggcosts->transCost.startup;
2834 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2835 startup_cost += aggcosts->finalCost.startup;
2836 startup_cost += aggcosts->finalCost.per_tuple;
2837 /* we aren't grouping */
2838 total_cost = startup_cost + cpu_tuple_cost;
2839 output_tuples = 1;
2840 }
2841 else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2842 {
2843 /* Here we are able to deliver output on-the-fly */
2844 startup_cost = input_startup_cost;
2845 total_cost = input_total_cost;
2846 if (aggstrategy == AGG_MIXED && !enable_hashagg)
2847 ++disabled_nodes;
2848 /* calcs phrased this way to match HASHED case, see note above */
2849 total_cost += aggcosts->transCost.startup;
2850 total_cost += aggcosts->transCost.per_tuple * input_tuples;
2851 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2852 total_cost += aggcosts->finalCost.startup;
2853 total_cost += aggcosts->finalCost.per_tuple * numGroups;
2854 total_cost += cpu_tuple_cost * numGroups;
2855 output_tuples = numGroups;
2856 }
2857 else
2858 {
2859 /* must be AGG_HASHED */
2860 startup_cost = input_total_cost;
2861 if (!enable_hashagg)
2862 ++disabled_nodes;
2863 startup_cost += aggcosts->transCost.startup;
2864 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2865 /* cost of computing hash value */
2866 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2867 startup_cost += aggcosts->finalCost.startup;
2868
2869 total_cost = startup_cost;
2870 total_cost += aggcosts->finalCost.per_tuple * numGroups;
2871 /* cost of retrieving from hash table */
2872 total_cost += cpu_tuple_cost * numGroups;
2873 output_tuples = numGroups;
2874 }
2875
2876 /*
2877 * Add the disk costs of hash aggregation that spills to disk.
2878 *
2879 * Groups that go into the hash table stay in memory until finalized, so
2880 * spilling and reprocessing tuples doesn't incur additional invocations
2881 * of transCost or finalCost. Furthermore, the computed hash value is
2882 * stored with the spilled tuples, so we don't incur extra invocations of
2883 * the hash function.
2884 *
2885 * Hash Agg begins returning tuples after the first batch is complete.
2886 * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2887 * accrue reads only to total_cost.
2888 */
2889 if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2890 {
2891 double pages;
2892 double pages_written = 0.0;
2893 double pages_read = 0.0;
2894 double spill_cost;
2895 double hashentrysize;
2896 double nbatches;
2897 Size mem_limit;
2899 int num_partitions;
2900 int depth;
2901
2902 /*
2903 * Estimate number of batches based on the computed limits. If less
2904 * than or equal to one, all groups are expected to fit in memory;
2905 * otherwise we expect to spill.
2906 */
2907 hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2909 aggcosts->transitionSpace);
2910 hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2911 &ngroups_limit, &num_partitions);
2912
2913 nbatches = Max((numGroups * hashentrysize) / mem_limit,
2914 numGroups / ngroups_limit);
2915
2916 nbatches = Max(ceil(nbatches), 1.0);
2917 num_partitions = Max(num_partitions, 2);
2918
2919 /*
2920 * The number of partitions can change at different levels of
2921 * recursion; but for the purposes of this calculation assume it stays
2922 * constant.
2923 */
2924 depth = ceil(log(nbatches) / log(num_partitions));
2925
2926 /*
2927 * Estimate number of pages read and written. For each level of
2928 * recursion, a tuple must be written and then later read.
2929 */
2930 pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2931 pages_written = pages_read = pages * depth;
2932
2933 /*
2934 * HashAgg has somewhat worse IO behavior than Sort on typical
2935 * hardware/OS combinations. Account for this with a generic penalty.
2936 */
2937 pages_read *= 2.0;
2938 pages_written *= 2.0;
2939
2940 startup_cost += pages_written * random_page_cost;
2941 total_cost += pages_written * random_page_cost;
2942 total_cost += pages_read * seq_page_cost;
2943
2944 /* account for CPU cost of spilling a tuple and reading it back */
2945 spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2946 startup_cost += spill_cost;
2947 total_cost += spill_cost;
2948 }
2949
2950 /*
2951 * If there are quals (HAVING quals), account for their cost and
2952 * selectivity.
2953 */
2954 if (quals)
2955 {
2957
2958 cost_qual_eval(&qual_cost, quals, root);
2959 startup_cost += qual_cost.startup;
2960 total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2961
2964 quals,
2965 0,
2966 JOIN_INNER,
2967 NULL));
2968 }
2969
2970 path->rows = output_tuples;
2971 path->disabled_nodes = disabled_nodes;
2972 path->startup_cost = startup_cost;
2973 path->total_cost = total_cost;
2974}
2975
2976/*
2977 * get_windowclause_startup_tuples
2978 * Estimate how many tuples we'll need to fetch from a WindowAgg's
2979 * subnode before we can output the first WindowAgg tuple.
2980 *
2981 * How many tuples need to be read depends on the WindowClause. For example,
2982 * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2983 * subnode tuples are read and aggregated before the WindowAgg can output
2984 * anything. If there's a PARTITION BY, then we only need to look at tuples
2985 * in the first partition. Here we attempt to estimate just how many
2986 * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2987 * before the first tuple can be output.
2988 */
2989static double
2991 double input_tuples)
2992{
2993 int frameOptions = wc->frameOptions;
2994 double partition_tuples;
2995 double return_tuples;
2996 double peer_tuples;
2997
2998 /*
2999 * First, figure out how many partitions there are likely to be and set
3000 * partition_tuples according to that estimate.
3001 */
3002 if (wc->partitionClause != NIL)
3003 {
3004 double num_partitions;
3006 root->parse->targetList);
3007
3008 num_partitions = estimate_num_groups(root, partexprs, input_tuples,
3009 NULL, NULL);
3010 list_free(partexprs);
3011
3012 partition_tuples = input_tuples / num_partitions;
3013 }
3014 else
3015 {
3016 /* all tuples belong to the same partition */
3017 partition_tuples = input_tuples;
3018 }
3019
3020 /* estimate the number of tuples in each peer group */
3021 if (wc->orderClause != NIL)
3022 {
3023 double num_groups;
3025
3027 root->parse->targetList);
3028
3029 /* estimate out how many peer groups there are in the partition */
3032 NULL);
3035 }
3036 else
3037 {
3038 /* no ORDER BY so only 1 tuple belongs in each peer group */
3039 peer_tuples = 1.0;
3040 }
3041
3042 if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
3043 {
3044 /* include all partition rows */
3046 }
3047 else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
3048 {
3049 if (frameOptions & FRAMEOPTION_ROWS)
3050 {
3051 /* just count the current row */
3052 return_tuples = 1.0;
3053 }
3054 else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3055 {
3056 /*
3057 * When in RANGE/GROUPS mode, it's more complex. If there's no
3058 * ORDER BY, then all rows in the partition are peers, otherwise
3059 * we'll need to read the first group of peers.
3060 */
3061 if (wc->orderClause == NIL)
3063 else
3065 }
3066 else
3067 {
3068 /*
3069 * Something new we don't support yet? This needs attention.
3070 * We'll just return 1.0 in the meantime.
3071 */
3072 Assert(false);
3073 return_tuples = 1.0;
3074 }
3075 }
3076 else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
3077 {
3078 /*
3079 * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
3080 * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
3081 * so we'll just assume only the current row needs to be read to fetch
3082 * the first WindowAgg row.
3083 */
3084 return_tuples = 1.0;
3085 }
3086 else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
3087 {
3088 Const *endOffset = (Const *) wc->endOffset;
3089 double end_offset_value;
3090
3091 /* try and figure out the value specified in the endOffset. */
3092 if (IsA(endOffset, Const))
3093 {
3094 if (endOffset->constisnull)
3095 {
3096 /*
3097 * NULLs are not allowed, but currently, there's no code to
3098 * error out if there's a NULL Const. We'll only discover
3099 * this during execution. For now, just pretend everything is
3100 * fine and assume that just the first row/range/group will be
3101 * needed.
3102 */
3103 end_offset_value = 1.0;
3104 }
3105 else
3106 {
3107 switch (endOffset->consttype)
3108 {
3109 case INT2OID:
3111 (double) DatumGetInt16(endOffset->constvalue);
3112 break;
3113 case INT4OID:
3115 (double) DatumGetInt32(endOffset->constvalue);
3116 break;
3117 case INT8OID:
3119 (double) DatumGetInt64(endOffset->constvalue);
3120 break;
3121 default:
3125 break;
3126 }
3127 }
3128 }
3129 else
3130 {
3131 /*
3132 * When the end bound is not a Const, we'll just need to guess. We
3133 * just make use of DEFAULT_INEQ_SEL.
3134 */
3137 }
3138
3139 if (frameOptions & FRAMEOPTION_ROWS)
3140 {
3141 /* include the N FOLLOWING and the current row */
3143 }
3144 else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3145 {
3146 /* include N FOLLOWING ranges/group and the initial range/group */
3148 }
3149 else
3150 {
3151 /*
3152 * Something new we don't support yet? This needs attention.
3153 * We'll just return 1.0 in the meantime.
3154 */
3155 Assert(false);
3156 return_tuples = 1.0;
3157 }
3158 }
3159 else
3160 {
3161 /*
3162 * Something new we don't support yet? This needs attention. We'll
3163 * just return 1.0 in the meantime.
3164 */
3165 Assert(false);
3166 return_tuples = 1.0;
3167 }
3168
3169 if (wc->partitionClause != NIL || wc->orderClause != NIL)
3170 {
3171 /*
3172 * Cap the return value to the estimated partition tuples and account
3173 * for the extra tuple WindowAgg will need to read to confirm the next
3174 * tuple does not belong to the same partition or peer group.
3175 */
3177 }
3178 else
3179 {
3180 /*
3181 * Cap the return value so it's never higher than the expected tuples
3182 * in the partition.
3183 */
3185 }
3186
3187 /*
3188 * We needn't worry about any EXCLUDE options as those only exclude rows
3189 * from being aggregated, not from being read from the WindowAgg's
3190 * subnode.
3191 */
3192
3194}
3195
3196/*
3197 * cost_windowagg
3198 * Determines and returns the cost of performing a WindowAgg plan node,
3199 * including the cost of its input.
3200 *
3201 * Input is assumed already properly sorted.
3202 */
3203void
3205 List *windowFuncs, WindowClause *winclause,
3208 double input_tuples)
3209{
3210 Cost startup_cost;
3211 Cost total_cost;
3212 double startup_tuples;
3213 int numPartCols;
3214 int numOrderCols;
3215 ListCell *lc;
3216
3218 numOrderCols = list_length(winclause->orderClause);
3219
3220 startup_cost = input_startup_cost;
3221 total_cost = input_total_cost;
3222
3223 /*
3224 * Window functions are assumed to cost their stated execution cost, plus
3225 * the cost of evaluating their input expressions, per tuple. Since they
3226 * may in fact evaluate their inputs at multiple rows during each cycle,
3227 * this could be a drastic underestimate; but without a way to know how
3228 * many rows the window function will fetch, it's hard to do better. In
3229 * any case, it's a good estimate for all the built-in window functions,
3230 * so we'll just do this for now.
3231 */
3232 foreach(lc, windowFuncs)
3233 {
3237
3238 argcosts.startup = argcosts.per_tuple = 0;
3239 add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3240 &argcosts);
3241 startup_cost += argcosts.startup;
3242 wfunccost = argcosts.per_tuple;
3243
3244 /* also add the input expressions' cost to per-input-row costs */
3245 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3246 startup_cost += argcosts.startup;
3247 wfunccost += argcosts.per_tuple;
3248
3249 /*
3250 * Add the filter's cost to per-input-row costs. XXX We should reduce
3251 * input expression costs according to filter selectivity.
3252 */
3254 startup_cost += argcosts.startup;
3255 wfunccost += argcosts.per_tuple;
3256
3257 total_cost += wfunccost * input_tuples;
3258 }
3259
3260 /*
3261 * We also charge cpu_operator_cost per grouping column per tuple for
3262 * grouping comparisons, plus cpu_tuple_cost per tuple for general
3263 * overhead.
3264 *
3265 * XXX this neglects costs of spooling the data to disk when it overflows
3266 * work_mem. Sooner or later that should get accounted for.
3267 */
3268 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3269 total_cost += cpu_tuple_cost * input_tuples;
3270
3271 path->rows = input_tuples;
3273 path->startup_cost = startup_cost;
3274 path->total_cost = total_cost;
3275
3276 /*
3277 * Also, take into account how many tuples we need to read from the
3278 * subnode in order to produce the first tuple from the WindowAgg. To do
3279 * this we proportion the run cost (total cost not including startup cost)
3280 * over the estimated startup tuples. We already included the startup
3281 * cost of the subnode, so we only need to do this when the estimated
3282 * startup tuples is above 1.0.
3283 */
3285 input_tuples);
3286
3287 if (startup_tuples > 1.0)
3288 path->startup_cost += (total_cost - startup_cost) / input_tuples *
3289 (startup_tuples - 1.0);
3290}
3291
3292/*
3293 * cost_group
3294 * Determines and returns the cost of performing a Group plan node,
3295 * including the cost of its input.
3296 *
3297 * Note: caller must ensure that input costs are for appropriately-sorted
3298 * input.
3299 */
3300void
3302 int numGroupCols, double numGroups,
3303 List *quals,
3306 double input_tuples)
3307{
3308 double output_tuples;
3309 Cost startup_cost;
3310 Cost total_cost;
3311
3312 output_tuples = numGroups;
3313 startup_cost = input_startup_cost;
3314 total_cost = input_total_cost;
3315
3316 /*
3317 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3318 * all columns get compared at most of the tuples.
3319 */
3320 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3321
3322 /*
3323 * If there are quals (HAVING quals), account for their cost and
3324 * selectivity.
3325 */
3326 if (quals)
3327 {
3329
3330 cost_qual_eval(&qual_cost, quals, root);
3331 startup_cost += qual_cost.startup;
3332 total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3333
3336 quals,
3337 0,
3338 JOIN_INNER,
3339 NULL));
3340 }
3341
3342 path->rows = output_tuples;
3344 path->startup_cost = startup_cost;
3345 path->total_cost = total_cost;
3346}
3347
3348/*
3349 * initial_cost_nestloop
3350 * Preliminary estimate of the cost of a nestloop join path.
3351 *
3352 * This must quickly produce lower-bound estimates of the path's startup and
3353 * total costs. If we are unable to eliminate the proposed path from
3354 * consideration using the lower bounds, final_cost_nestloop will be called
3355 * to obtain the final estimates.
3356 *
3357 * The exact division of labor between this function and final_cost_nestloop
3358 * is private to them, and represents a tradeoff between speed of the initial
3359 * estimate and getting a tight lower bound. We choose to not examine the
3360 * join quals here, since that's by far the most expensive part of the
3361 * calculations. The end result is that CPU-cost considerations must be
3362 * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3363 * incorporation of the inner path's run cost.
3364 *
3365 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3366 * other data to be used by final_cost_nestloop
3367 * 'jointype' is the type of join to be performed
3368 * 'outer_path' is the outer input to the join
3369 * 'inner_path' is the inner input to the join
3370 * 'extra' contains miscellaneous information about the join
3371 */
3372void
3374 JoinType jointype, uint64 enable_mask,
3376 JoinPathExtraData *extra)
3377{
3378 int disabled_nodes;
3379 Cost startup_cost = 0;
3380 Cost run_cost = 0;
3381 double outer_path_rows = outer_path->rows;
3384 Cost inner_run_cost;
3385 Cost inner_rescan_run_cost;
3386
3387 /* Count up disabled nodes. */
3388 disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
3389 disabled_nodes += inner_path->disabled_nodes;
3390 disabled_nodes += outer_path->disabled_nodes;
3391
3392 /* estimate costs to rescan the inner relation */
3396
3397 /* cost of source data */
3398
3399 /*
3400 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3401 * before we can start returning tuples, so the join's startup cost is
3402 * their sum. We'll also pay the inner path's rescan startup cost
3403 * multiple times.
3404 */
3405 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3406 run_cost += outer_path->total_cost - outer_path->startup_cost;
3407 if (outer_path_rows > 1)
3408 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3409
3410 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3411 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3412
3413 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3414 extra->inner_unique)
3415 {
3416 /*
3417 * With a SEMI or ANTI join, or if the innerrel is known unique, the
3418 * executor will stop after the first match.
3419 *
3420 * Getting decent estimates requires inspection of the join quals,
3421 * which we choose to postpone to final_cost_nestloop.
3422 */
3423
3424 /* Save private data for final_cost_nestloop */
3425 workspace->inner_run_cost = inner_run_cost;
3426 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3427 }
3428 else
3429 {
3430 /* Normal case; we'll scan whole input rel for each outer row */
3431 run_cost += inner_run_cost;
3432 if (outer_path_rows > 1)
3433 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3434 }
3435
3436 /* CPU costs left for later */
3437
3438 /* Public result fields */
3439 workspace->disabled_nodes = disabled_nodes;
3440 workspace->startup_cost = startup_cost;
3441 workspace->total_cost = startup_cost + run_cost;
3442 /* Save private data for final_cost_nestloop */
3443 workspace->run_cost = run_cost;
3444}
3445
3446/*
3447 * final_cost_nestloop
3448 * Final estimate of the cost and result size of a nestloop join path.
3449 *
3450 * 'path' is already filled in except for the rows and cost fields
3451 * 'workspace' is the result from initial_cost_nestloop
3452 * 'extra' contains miscellaneous information about the join
3453 */
3454void
3456 JoinCostWorkspace *workspace,
3457 JoinPathExtraData *extra)
3458{
3462 double inner_path_rows = inner_path->rows;
3463 Cost startup_cost = workspace->startup_cost;
3464 Cost run_cost = workspace->run_cost;
3467 double ntuples;
3468
3469 /* Set the number of disabled nodes. */
3470 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3471
3472 /* Protect some assumptions below that rowcounts aren't zero */
3473 if (outer_path_rows <= 0)
3474 outer_path_rows = 1;
3475 if (inner_path_rows <= 0)
3476 inner_path_rows = 1;
3477 /* Mark the path with the correct row estimate */
3478 if (path->jpath.path.param_info)
3479 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3480 else
3481 path->jpath.path.rows = path->jpath.path.parent->rows;
3482
3483 /* For partial paths, scale row estimate. */
3484 if (path->jpath.path.parallel_workers > 0)
3485 {
3486 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3487
3488 path->jpath.path.rows =
3489 clamp_row_est(path->jpath.path.rows / parallel_divisor);
3490 }
3491
3492 /* cost of inner-relation source data (we already dealt with outer rel) */
3493
3494 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3495 extra->inner_unique)
3496 {
3497 /*
3498 * With a SEMI or ANTI join, or if the innerrel is known unique, the
3499 * executor will stop after the first match.
3500 */
3501 Cost inner_run_cost = workspace->inner_run_cost;
3502 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3503 double outer_matched_rows;
3504 double outer_unmatched_rows;
3506
3507 /*
3508 * For an outer-rel row that has at least one match, we can expect the
3509 * inner scan to stop after a fraction 1/(match_count+1) of the inner
3510 * rows, if the matches are evenly distributed. Since they probably
3511 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3512 * that fraction. (If we used a larger fuzz factor, we'd have to
3513 * clamp inner_scan_frac to at most 1.0; but since match_count is at
3514 * least 1, no such clamp is needed now.)
3515 */
3518 inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3519
3520 /*
3521 * Compute number of tuples processed (not number emitted!). First,
3522 * account for successfully-matched outer rows.
3523 */
3525
3526 /*
3527 * Now we need to estimate the actual costs of scanning the inner
3528 * relation, which may be quite a bit less than N times inner_run_cost
3529 * due to early scan stops. We consider two cases. If the inner path
3530 * is an indexscan using all the joinquals as indexquals, then an
3531 * unmatched outer row results in an indexscan returning no rows,
3532 * which is probably quite cheap. Otherwise, the executor will have
3533 * to scan the whole inner rel for an unmatched row; not so cheap.
3534 */
3535 if (has_indexed_join_quals(path))
3536 {
3537 /*
3538 * Successfully-matched outer rows will only require scanning
3539 * inner_scan_frac of the inner relation. In this case, we don't
3540 * need to charge the full inner_run_cost even when that's more
3541 * than inner_rescan_run_cost, because we can assume that none of
3542 * the inner scans ever scan the whole inner relation. So it's
3543 * okay to assume that all the inner scan executions can be
3544 * fractions of the full cost, even if materialization is reducing
3545 * the rescan cost. At this writing, it's impossible to get here
3546 * for a materialized inner scan, so inner_run_cost and
3547 * inner_rescan_run_cost will be the same anyway; but just in
3548 * case, use inner_run_cost for the first matched tuple and
3549 * inner_rescan_run_cost for additional ones.
3550 */
3551 run_cost += inner_run_cost * inner_scan_frac;
3552 if (outer_matched_rows > 1)
3553 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3554
3555 /*
3556 * Add the cost of inner-scan executions for unmatched outer rows.
3557 * We estimate this as the same cost as returning the first tuple
3558 * of a nonempty scan. We consider that these are all rescans,
3559 * since we used inner_run_cost once already.
3560 */
3561 run_cost += outer_unmatched_rows *
3562 inner_rescan_run_cost / inner_path_rows;
3563
3564 /*
3565 * We won't be evaluating any quals at all for unmatched rows, so
3566 * don't add them to ntuples.
3567 */
3568 }
3569 else
3570 {
3571 /*
3572 * Here, a complicating factor is that rescans may be cheaper than
3573 * first scans. If we never scan all the way to the end of the
3574 * inner rel, it might be (depending on the plan type) that we'd
3575 * never pay the whole inner first-scan run cost. However it is
3576 * difficult to estimate whether that will happen (and it could
3577 * not happen if there are any unmatched outer rows!), so be
3578 * conservative and always charge the whole first-scan cost once.
3579 * We consider this charge to correspond to the first unmatched
3580 * outer row, unless there isn't one in our estimate, in which
3581 * case blame it on the first matched row.
3582 */
3583
3584 /* First, count all unmatched join tuples as being processed */
3586
3587 /* Now add the forced full scan, and decrement appropriate count */
3588 run_cost += inner_run_cost;
3589 if (outer_unmatched_rows >= 1)
3591 else
3592 outer_matched_rows -= 1;
3593
3594 /* Add inner run cost for additional outer tuples having matches */
3595 if (outer_matched_rows > 0)
3596 run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3597
3598 /* Add inner run cost for additional unmatched outer tuples */
3599 if (outer_unmatched_rows > 0)
3600 run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3601 }
3602 }
3603 else
3604 {
3605 /* Normal-case source costs were included in preliminary estimate */
3606
3607 /* Compute number of tuples processed (not number emitted!) */
3608 ntuples = outer_path_rows * inner_path_rows;
3609 }
3610
3611 /* CPU costs */
3613 startup_cost += restrict_qual_cost.startup;
3615 run_cost += cpu_per_tuple * ntuples;
3616
3617 /* tlist eval costs are paid per output row, not per tuple scanned */
3618 startup_cost += path->jpath.path.pathtarget->cost.startup;
3619 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3620
3621 path->jpath.path.startup_cost = startup_cost;
3622 path->jpath.path.total_cost = startup_cost + run_cost;
3623}
3624
3625/*
3626 * initial_cost_mergejoin
3627 * Preliminary estimate of the cost of a mergejoin path.
3628 *
3629 * This must quickly produce lower-bound estimates of the path's startup and
3630 * total costs. If we are unable to eliminate the proposed path from
3631 * consideration using the lower bounds, final_cost_mergejoin will be called
3632 * to obtain the final estimates.
3633 *
3634 * The exact division of labor between this function and final_cost_mergejoin
3635 * is private to them, and represents a tradeoff between speed of the initial
3636 * estimate and getting a tight lower bound. We choose to not examine the
3637 * join quals here, except for obtaining the scan selectivity estimate which
3638 * is really essential (but fortunately, use of caching keeps the cost of
3639 * getting that down to something reasonable).
3640 * We also assume that cost_sort/cost_incremental_sort is cheap enough to use
3641 * here.
3642 *
3643 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3644 * other data to be used by final_cost_mergejoin
3645 * 'jointype' is the type of join to be performed
3646 * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3647 * 'outer_path' is the outer input to the join
3648 * 'inner_path' is the inner input to the join
3649 * 'outersortkeys' is the list of sort keys for the outer path
3650 * 'innersortkeys' is the list of sort keys for the inner path
3651 * 'outer_presorted_keys' is the number of presorted keys of the outer path
3652 * 'extra' contains miscellaneous information about the join
3653 *
3654 * Note: outersortkeys and innersortkeys should be NIL if no explicit
3655 * sort is needed because the respective source path is already ordered.
3656 */
3657void
3659 JoinType jointype,
3660 List *mergeclauses,
3662 List *outersortkeys, List *innersortkeys,
3663 int outer_presorted_keys,
3664 JoinPathExtraData *extra)
3665{
3666 int disabled_nodes;
3667 Cost startup_cost = 0;
3668 Cost run_cost = 0;
3669 double outer_path_rows = outer_path->rows;
3670 double inner_path_rows = inner_path->rows;
3671 Cost inner_run_cost;
3672 double outer_rows,
3673 inner_rows,
3674 outer_skip_rows,
3675 inner_skip_rows;
3680 Path sort_path; /* dummy for result of
3681 * cost_sort/cost_incremental_sort */
3682
3683 /* Protect some assumptions below that rowcounts aren't zero */
3684 if (outer_path_rows <= 0)
3685 outer_path_rows = 1;
3686 if (inner_path_rows <= 0)
3687 inner_path_rows = 1;
3688
3689 /*
3690 * A merge join will stop as soon as it exhausts either input stream
3691 * (unless it's an outer join, in which case the outer side has to be
3692 * scanned all the way anyway). Estimate fraction of the left and right
3693 * inputs that will actually need to be scanned. Likewise, we can
3694 * estimate the number of rows that will be skipped before the first join
3695 * pair is found, which should be factored into startup cost. We use only
3696 * the first (most significant) merge clause for this purpose. Since
3697 * mergejoinscansel() is a fairly expensive computation, we cache the
3698 * results in the merge clause RestrictInfo.
3699 */
3700 if (mergeclauses && jointype != JOIN_FULL)
3701 {
3702 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3703 List *opathkeys;
3704 List *ipathkeys;
3707 MergeScanSelCache *cache;
3708
3709 /* Get the input pathkeys to determine the sort-order details */
3710 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3711 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3716 /* debugging check */
3717 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3718 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3719 opathkey->pk_cmptype != ipathkey->pk_cmptype ||
3720 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3721 elog(ERROR, "left and right pathkeys do not match in mergejoin");
3722
3723 /* Get the selectivity with caching */
3725
3726 if (bms_is_subset(firstclause->left_relids,
3727 outer_path->parent->relids))
3728 {
3729 /* left side of clause is outer */
3730 outerstartsel = cache->leftstartsel;
3731 outerendsel = cache->leftendsel;
3733 innerendsel = cache->rightendsel;
3734 }
3735 else
3736 {
3737 /* left side of clause is inner */
3739 outerendsel = cache->rightendsel;
3740 innerstartsel = cache->leftstartsel;
3741 innerendsel = cache->leftendsel;
3742 }
3743 if (jointype == JOIN_LEFT ||
3744 jointype == JOIN_ANTI)
3745 {
3746 outerstartsel = 0.0;
3747 outerendsel = 1.0;
3748 }
3749 else if (jointype == JOIN_RIGHT ||
3750 jointype == JOIN_RIGHT_ANTI)
3751 {
3752 innerstartsel = 0.0;
3753 innerendsel = 1.0;
3754 }
3755 }
3756 else
3757 {
3758 /* cope with clauseless or full mergejoin */
3760 outerendsel = innerendsel = 1.0;
3761 }
3762
3763 /*
3764 * Convert selectivities to row counts. We force outer_rows and
3765 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3766 */
3767 outer_skip_rows = rint(outer_path_rows * outerstartsel);
3768 inner_skip_rows = rint(inner_path_rows * innerstartsel);
3771
3772 Assert(outer_skip_rows <= outer_rows);
3773 Assert(inner_skip_rows <= inner_rows);
3774
3775 /*
3776 * Readjust scan selectivities to account for above rounding. This is
3777 * normally an insignificant effect, but when there are only a few rows in
3778 * the inputs, failing to do this makes for a large percentage error.
3779 */
3780 outerstartsel = outer_skip_rows / outer_path_rows;
3781 innerstartsel = inner_skip_rows / inner_path_rows;
3782 outerendsel = outer_rows / outer_path_rows;
3783 innerendsel = inner_rows / inner_path_rows;
3784
3787
3788 /*
3789 * We don't decide whether to materialize the inner path until we get to
3790 * final_cost_mergejoin(), so we don't know whether to check the pgs_mask
3791 * against PGS_MERGEJOIN_PLAIN or PGS_MERGEJOIN_MATERIALIZE. Instead, we
3792 * just account for any child nodes here and assume that this node is not
3793 * itself disabled; we can sort out the details in final_cost_mergejoin().
3794 *
3795 * (We could be more precise here by setting disabled_nodes to 1 at this
3796 * stage if both PGS_MERGEJOIN_PLAIN and PGS_MERGEJOIN_MATERIALIZE are
3797 * disabled, but that seems to against the idea of making this function
3798 * produce a quick, optimistic approximation of the final cost.)
3799 */
3800 disabled_nodes = 0;
3801
3802 /* cost of source data */
3803
3804 if (outersortkeys) /* do we need to sort outer? */
3805 {
3806 /*
3807 * We can assert that the outer path is not already ordered
3808 * appropriately for the mergejoin; otherwise, outersortkeys would
3809 * have been set to NIL.
3810 */
3811 Assert(!pathkeys_contained_in(outersortkeys, outer_path->pathkeys));
3812
3813 /*
3814 * We choose to use incremental sort if it is enabled and there are
3815 * presorted keys; otherwise we use full sort.
3816 */
3817 if (enable_incremental_sort && outer_presorted_keys > 0)
3818 {
3820 root,
3821 outersortkeys,
3822 outer_presorted_keys,
3823 outer_path->disabled_nodes,
3824 outer_path->startup_cost,
3825 outer_path->total_cost,
3827 outer_path->pathtarget->width,
3828 0.0,
3829 work_mem,
3830 -1.0);
3831 }
3832 else
3833 {
3835 root,
3836 outersortkeys,
3837 outer_path->disabled_nodes,
3838 outer_path->total_cost,
3840 outer_path->pathtarget->width,
3841 0.0,
3842 work_mem,
3843 -1.0);
3844 }
3845
3846 disabled_nodes += sort_path.disabled_nodes;
3847 startup_cost += sort_path.startup_cost;
3848 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3849 * outerstartsel;
3850 run_cost += (sort_path.total_cost - sort_path.startup_cost)
3852 }
3853 else
3854 {
3855 disabled_nodes += outer_path->disabled_nodes;
3856 startup_cost += outer_path->startup_cost;
3857 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3858 * outerstartsel;
3859 run_cost += (outer_path->total_cost - outer_path->startup_cost)
3861 }
3862
3863 if (innersortkeys) /* do we need to sort inner? */
3864 {
3865 /*
3866 * We can assert that the inner path is not already ordered
3867 * appropriately for the mergejoin; otherwise, innersortkeys would
3868 * have been set to NIL.
3869 */
3870 Assert(!pathkeys_contained_in(innersortkeys, inner_path->pathkeys));
3871
3872 /*
3873 * We do not consider incremental sort for inner path, because
3874 * incremental sort does not support mark/restore.
3875 */
3876
3878 root,
3879 innersortkeys,
3880 inner_path->disabled_nodes,
3881 inner_path->total_cost,
3883 inner_path->pathtarget->width,
3884 0.0,
3885 work_mem,
3886 -1.0);
3887 disabled_nodes += sort_path.disabled_nodes;
3888 startup_cost += sort_path.startup_cost;
3889 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3890 * innerstartsel;
3891 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3893 }
3894 else
3895 {
3896 disabled_nodes += inner_path->disabled_nodes;
3897 startup_cost += inner_path->startup_cost;
3898 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3899 * innerstartsel;
3900 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3902 }
3903
3904 /*
3905 * We can't yet determine whether rescanning occurs, or whether
3906 * materialization of the inner input should be done. The minimum
3907 * possible inner input cost, regardless of rescan and materialization
3908 * considerations, is inner_run_cost. We include that in
3909 * workspace->total_cost, but not yet in run_cost.
3910 */
3911
3912 /* CPU costs left for later */
3913
3914 /* Public result fields */
3915 workspace->disabled_nodes = disabled_nodes;
3916 workspace->startup_cost = startup_cost;
3917 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3918 /* Save private data for final_cost_mergejoin */
3919 workspace->run_cost = run_cost;
3920 workspace->inner_run_cost = inner_run_cost;
3921 workspace->outer_rows = outer_rows;
3922 workspace->inner_rows = inner_rows;
3923 workspace->outer_skip_rows = outer_skip_rows;
3924 workspace->inner_skip_rows = inner_skip_rows;
3925}
3926
3927/*
3928 * final_cost_mergejoin
3929 * Final estimate of the cost and result size of a mergejoin path.
3930 *
3931 * Unlike other costsize functions, this routine makes two actual decisions:
3932 * whether the executor will need to do mark/restore, and whether we should
3933 * materialize the inner path. It would be logically cleaner to build
3934 * separate paths testing these alternatives, but that would require repeating
3935 * most of the cost calculations, which are not all that cheap. Since the
3936 * choice will not affect output pathkeys or startup cost, only total cost,
3937 * there is no possibility of wanting to keep more than one path. So it seems
3938 * best to make the decisions here and record them in the path's
3939 * skip_mark_restore and materialize_inner fields.
3940 *
3941 * Mark/restore overhead is usually required, but can be skipped if we know
3942 * that the executor need find only one match per outer tuple, and that the
3943 * mergeclauses are sufficient to identify a match.
3944 *
3945 * We materialize the inner path if we need mark/restore and either the inner
3946 * path can't support mark/restore, or it's cheaper to use an interposed
3947 * Material node to handle mark/restore.
3948 *
3949 * 'path' is already filled in except for the rows and cost fields and
3950 * skip_mark_restore and materialize_inner
3951 * 'workspace' is the result from initial_cost_mergejoin
3952 * 'extra' contains miscellaneous information about the join
3953 */
3954void
3956 JoinCostWorkspace *workspace,
3957 JoinPathExtraData *extra)
3958{
3962 List *mergeclauses = path->path_mergeclauses;
3963 List *innersortkeys = path->innersortkeys;
3964 Cost startup_cost = workspace->startup_cost;
3965 Cost run_cost = workspace->run_cost;
3966 Cost inner_run_cost = workspace->inner_run_cost;
3967 double outer_rows = workspace->outer_rows;
3968 double inner_rows = workspace->inner_rows;
3969 double outer_skip_rows = workspace->outer_skip_rows;
3970 double inner_skip_rows = workspace->inner_skip_rows;
3976 double mergejointuples,
3978 double rescanratio;
3979 uint64 enable_mask = 0;
3980
3981 /* Protect some assumptions below that rowcounts aren't zero */
3982 if (inner_path_rows <= 0)
3983 inner_path_rows = 1;
3984
3985 /* Mark the path with the correct row estimate */
3986 if (path->jpath.path.param_info)
3987 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3988 else
3989 path->jpath.path.rows = path->jpath.path.parent->rows;
3990
3991 /* For partial paths, scale row estimate. */
3992 if (path->jpath.path.parallel_workers > 0)
3993 {
3994 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3995
3996 path->jpath.path.rows =
3997 clamp_row_est(path->jpath.path.rows / parallel_divisor);
3998 }
3999
4000 /*
4001 * Compute cost of the mergequals and qpquals (other restriction clauses)
4002 * separately.
4003 */
4004 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
4006 qp_qual_cost.startup -= merge_qual_cost.startup;
4007 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
4008
4009 /*
4010 * With a SEMI or ANTI join, or if the innerrel is known unique, the
4011 * executor will stop scanning for matches after the first match. When
4012 * all the joinclauses are merge clauses, this means we don't ever need to
4013 * back up the merge, and so we can skip mark/restore overhead.
4014 */
4015 if ((path->jpath.jointype == JOIN_SEMI ||
4016 path->jpath.jointype == JOIN_ANTI ||
4017 extra->inner_unique) &&
4020 path->skip_mark_restore = true;
4021 else
4022 path->skip_mark_restore = false;
4023
4024 /*
4025 * Get approx # tuples passing the mergequals. We use approx_tuple_count
4026 * here because we need an estimate done with JOIN_INNER semantics.
4027 */
4028 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
4029
4030 /*
4031 * When there are equal merge keys in the outer relation, the mergejoin
4032 * must rescan any matching tuples in the inner relation. This means
4033 * re-fetching inner tuples; we have to estimate how often that happens.
4034 *
4035 * For regular inner and outer joins, the number of re-fetches can be
4036 * estimated approximately as size of merge join output minus size of
4037 * inner relation. Assume that the distinct key values are 1, 2, ..., and
4038 * denote the number of values of each key in the outer relation as m1,
4039 * m2, ...; in the inner relation, n1, n2, ... Then we have
4040 *
4041 * size of join = m1 * n1 + m2 * n2 + ...
4042 *
4043 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
4044 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
4045 * relation
4046 *
4047 * This equation works correctly for outer tuples having no inner match
4048 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
4049 * are effectively subtracting those from the number of rescanned tuples,
4050 * when we should not. Can we do better without expensive selectivity
4051 * computations?
4052 *
4053 * The whole issue is moot if we know we don't need to mark/restore at
4054 * all, or if we are working from a unique-ified outer input.
4055 */
4056 if (path->skip_mark_restore ||
4058 path->jpath.jointype))
4059 rescannedtuples = 0;
4060 else
4061 {
4063 /* Must clamp because of possible underestimate */
4064 if (rescannedtuples < 0)
4065 rescannedtuples = 0;
4066 }
4067
4068 /*
4069 * We'll inflate various costs this much to account for rescanning. Note
4070 * that this is to be multiplied by something involving inner_rows, or
4071 * another number related to the portion of the inner rel we'll scan.
4072 */
4073 rescanratio = 1.0 + (rescannedtuples / inner_rows);
4074
4075 /*
4076 * Decide whether we want to materialize the inner input to shield it from
4077 * mark/restore and performing re-fetches. Our cost model for regular
4078 * re-fetches is that a re-fetch costs the same as an original fetch,
4079 * which is probably an overestimate; but on the other hand we ignore the
4080 * bookkeeping costs of mark/restore. Not clear if it's worth developing
4081 * a more refined model. So we just need to inflate the inner run cost by
4082 * rescanratio.
4083 */
4084 bare_inner_cost = inner_run_cost * rescanratio;
4085
4086 /*
4087 * When we interpose a Material node the re-fetch cost is assumed to be
4088 * just cpu_operator_cost per tuple, independently of the underlying
4089 * plan's cost; and we charge an extra cpu_operator_cost per original
4090 * fetch as well. Note that we're assuming the materialize node will
4091 * never spill to disk, since it only has to remember tuples back to the
4092 * last mark. (If there are a huge number of duplicates, our other cost
4093 * factors will make the path so expensive that it probably won't get
4094 * chosen anyway.) So we don't use cost_rescan here.
4095 *
4096 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
4097 * of the generated Material node.
4098 */
4099 mat_inner_cost = inner_run_cost +
4100 cpu_operator_cost * inner_rows * rescanratio;
4101
4102 /*
4103 * If we don't need mark/restore at all, we don't need materialization.
4104 */
4105 if (path->skip_mark_restore)
4106 path->materialize_inner = false;
4107
4108 /*
4109 * If merge joins with materialization are enabled, then choose
4110 * materialization if either (a) it looks cheaper or (b) merge joins
4111 * without materialization are disabled.
4112 */
4113 else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4115 (extra->pgs_mask & PGS_MERGEJOIN_PLAIN) == 0))
4116 path->materialize_inner = true;
4117
4118 /*
4119 * Regardless of what plan shapes are enabled and what the costs seem to
4120 * be, we *must* materialize it if the inner path is to be used directly
4121 * (without sorting) and it doesn't support mark/restore. Planner failure
4122 * is not an option!
4123 *
4124 * Since the inner side must be ordered, and only Sorts and IndexScans can
4125 * create order to begin with, and they both support mark/restore, you
4126 * might think there's no problem --- but you'd be wrong. Nestloop and
4127 * merge joins can *preserve* the order of their inputs, so they can be
4128 * selected as the input of a mergejoin, and they don't support
4129 * mark/restore at present.
4130 */
4131 else if (innersortkeys == NIL &&
4133 path->materialize_inner = true;
4134
4135 /*
4136 * Also, force materializing if the inner path is to be sorted and the
4137 * sort is expected to spill to disk. This is because the final merge
4138 * pass can be done on-the-fly if it doesn't have to support mark/restore.
4139 * We don't try to adjust the cost estimates for this consideration,
4140 * though.
4141 *
4142 * Since materialization is a performance optimization in this case,
4143 * rather than necessary for correctness, we skip it if materialization is
4144 * switched off.
4145 */
4146 else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
4147 innersortkeys != NIL &&
4149 inner_path->pathtarget->width) >
4150 work_mem * (Size) 1024)
4151 path->materialize_inner = true;
4152 else
4153 path->materialize_inner = false;
4154
4155 /* Get the number of disabled nodes, not yet including this one. */
4156 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4157
4158 /*
4159 * Charge the right incremental cost for the chosen case, and update
4160 * enable_mask as appropriate.
4161 */
4162 if (path->materialize_inner)
4163 {
4164 run_cost += mat_inner_cost;
4166 }
4167 else
4168 {
4169 run_cost += bare_inner_cost;
4171 }
4172
4173 /* Incremental count of disabled nodes if this node is disabled. */
4174 if (path->jpath.path.parallel_workers == 0)
4176 if ((extra->pgs_mask & enable_mask) != enable_mask)
4177 ++path->jpath.path.disabled_nodes;
4178
4179 /* CPU costs */
4180
4181 /*
4182 * The number of tuple comparisons needed is approximately number of outer
4183 * rows plus number of inner rows plus number of rescanned tuples (can we
4184 * refine this?). At each one, we need to evaluate the mergejoin quals.
4185 */
4186 startup_cost += merge_qual_cost.startup;
4187 startup_cost += merge_qual_cost.per_tuple *
4188 (outer_skip_rows + inner_skip_rows * rescanratio);
4189 run_cost += merge_qual_cost.per_tuple *
4190 ((outer_rows - outer_skip_rows) +
4191 (inner_rows - inner_skip_rows) * rescanratio);
4192
4193 /*
4194 * For each tuple that gets through the mergejoin proper, we charge
4195 * cpu_tuple_cost plus the cost of evaluating additional restriction
4196 * clauses that are to be applied at the join. (This is pessimistic since
4197 * not all of the quals may get evaluated at each tuple.)
4198 *
4199 * Note: we could adjust for SEMI/ANTI joins skipping some qual
4200 * evaluations here, but it's probably not worth the trouble.
4201 */
4202 startup_cost += qp_qual_cost.startup;
4204 run_cost += cpu_per_tuple * mergejointuples;
4205
4206 /* tlist eval costs are paid per output row, not per tuple scanned */
4207 startup_cost += path->jpath.path.pathtarget->cost.startup;
4208 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4209
4210 path->jpath.path.startup_cost = startup_cost;
4211 path->jpath.path.total_cost = startup_cost + run_cost;
4212}
4213
4214/*
4215 * run mergejoinscansel() with caching
4216 */
4217static MergeScanSelCache *
4219{
4220 MergeScanSelCache *cache;
4221 ListCell *lc;
4222 Selectivity leftstartsel,
4223 leftendsel,
4224 rightstartsel,
4225 rightendsel;
4226 MemoryContext oldcontext;
4227
4228 /* Do we have this result already? */
4229 foreach(lc, rinfo->scansel_cache)
4230 {
4231 cache = (MergeScanSelCache *) lfirst(lc);
4232 if (cache->opfamily == pathkey->pk_opfamily &&
4233 cache->collation == pathkey->pk_eclass->ec_collation &&
4234 cache->cmptype == pathkey->pk_cmptype &&
4235 cache->nulls_first == pathkey->pk_nulls_first)
4236 return cache;
4237 }
4238
4239 /* Nope, do the computation */
4241 (Node *) rinfo->clause,
4242 pathkey->pk_opfamily,
4243 pathkey->pk_cmptype,
4244 pathkey->pk_nulls_first,
4245 &leftstartsel,
4246 &leftendsel,
4247 &rightstartsel,
4248 &rightendsel);
4249
4250 /* Cache the result in suitably long-lived workspace */
4251 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4252
4254 cache->opfamily = pathkey->pk_opfamily;
4255 cache->collation = pathkey->pk_eclass->ec_collation;
4256 cache->cmptype = pathkey->pk_cmptype;
4257 cache->nulls_first = pathkey->pk_nulls_first;
4258 cache->leftstartsel = leftstartsel;
4259 cache->leftendsel = leftendsel;
4260 cache->rightstartsel = rightstartsel;
4261 cache->rightendsel = rightendsel;
4262
4263 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4264
4265 MemoryContextSwitchTo(oldcontext);
4266
4267 return cache;
4268}
4269
4270/*
4271 * initial_cost_hashjoin
4272 * Preliminary estimate of the cost of a hashjoin path.
4273 *
4274 * This must quickly produce lower-bound estimates of the path's startup and
4275 * total costs. If we are unable to eliminate the proposed path from
4276 * consideration using the lower bounds, final_cost_hashjoin will be called
4277 * to obtain the final estimates.
4278 *
4279 * The exact division of labor between this function and final_cost_hashjoin
4280 * is private to them, and represents a tradeoff between speed of the initial
4281 * estimate and getting a tight lower bound. We choose to not examine the
4282 * join quals here (other than by counting the number of hash clauses),
4283 * so we can't do much with CPU costs. We do assume that
4284 * ExecChooseHashTableSize is cheap enough to use here.
4285 *
4286 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4287 * other data to be used by final_cost_hashjoin
4288 * 'jointype' is the type of join to be performed
4289 * 'hashclauses' is the list of joinclauses to be used as hash clauses
4290 * 'outer_path' is the outer input to the join
4291 * 'inner_path' is the inner input to the join
4292 * 'extra' contains miscellaneous information about the join
4293 * 'parallel_hash' indicates that inner_path is partial and that a shared
4294 * hash table will be built in parallel
4295 */
4296void
4298 JoinType jointype,
4299 List *hashclauses,
4301 JoinPathExtraData *extra,
4302 bool parallel_hash)
4303{
4304 int disabled_nodes;
4305 Cost startup_cost = 0;
4306 Cost run_cost = 0;
4307 double outer_path_rows = outer_path->rows;
4308 double inner_path_rows = inner_path->rows;
4310 int num_hashclauses = list_length(hashclauses);
4311 int numbuckets;
4312 int numbatches;
4313 int num_skew_mcvs;
4314 size_t space_allowed; /* unused */
4316
4317 if (outer_path->parallel_workers == 0)
4319
4320 /* Count up disabled nodes. */
4321 disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
4322 disabled_nodes += inner_path->disabled_nodes;
4323 disabled_nodes += outer_path->disabled_nodes;
4324
4325 /* cost of source data */
4326 startup_cost += outer_path->startup_cost;
4327 run_cost += outer_path->total_cost - outer_path->startup_cost;
4328 startup_cost += inner_path->total_cost;
4329
4330 /*
4331 * Cost of computing hash function: must do it once per input tuple. We
4332 * charge one cpu_operator_cost for each column's hash function. Also,
4333 * tack on one cpu_tuple_cost per inner row, to model the costs of
4334 * inserting the row into the hashtable.
4335 *
4336 * XXX when a hashclause is more complex than a single operator, we really
4337 * should charge the extra eval costs of the left or right side, as
4338 * appropriate, here. This seems more work than it's worth at the moment.
4339 */
4343
4344 /*
4345 * If this is a parallel hash build, then the value we have for
4346 * inner_rows_total currently refers only to the rows returned by each
4347 * participant. For shared hash table size estimation, we need the total
4348 * number, so we need to undo the division.
4349 */
4350 if (parallel_hash)
4352
4353 /*
4354 * Get hash table size that executor would use for inner relation.
4355 *
4356 * XXX for the moment, always assume that skew optimization will be
4357 * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4358 * trying to determine that for sure.
4359 *
4360 * XXX at some point it might be interesting to try to account for skew
4361 * optimization in the cost estimate, but for now, we don't.
4362 */
4364 inner_path->pathtarget->width,
4365 true, /* useskew */
4366 parallel_hash, /* try_combined_hash_mem */
4367 outer_path->parallel_workers,
4368 &space_allowed,
4369 &numbuckets,
4370 &numbatches,
4371 &num_skew_mcvs);
4372
4373 /*
4374 * If inner relation is too big then we will need to "batch" the join,
4375 * which implies writing and reading most of the tuples to disk an extra
4376 * time. Charge seq_page_cost per page, since the I/O should be nice and
4377 * sequential. Writing the inner rel counts as startup cost, all the rest
4378 * as run cost.
4379 */
4380 if (numbatches > 1)
4381 {
4383 outer_path->pathtarget->width);
4385 inner_path->pathtarget->width);
4386
4387 startup_cost += seq_page_cost * innerpages;
4388 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4389 }
4390
4391 /* CPU costs left for later */
4392
4393 /* Public result fields */
4394 workspace->disabled_nodes = disabled_nodes;
4395 workspace->startup_cost = startup_cost;
4396 workspace->total_cost = startup_cost + run_cost;
4397 /* Save private data for final_cost_hashjoin */
4398 workspace->run_cost = run_cost;
4399 workspace->numbuckets = numbuckets;
4400 workspace->numbatches = numbatches;
4402}
4403
4404/*
4405 * final_cost_hashjoin
4406 * Final estimate of the cost and result size of a hashjoin path.
4407 *
4408 * Note: the numbatches estimate is also saved into 'path' for use later
4409 *
4410 * 'path' is already filled in except for the rows and cost fields and
4411 * num_batches
4412 * 'workspace' is the result from initial_cost_hashjoin
4413 * 'extra' contains miscellaneous information about the join
4414 */
4415void
4417 JoinCostWorkspace *workspace,
4418 JoinPathExtraData *extra)
4419{
4423 double inner_path_rows = inner_path->rows;
4424 double inner_path_rows_total = workspace->inner_rows_total;
4425 List *hashclauses = path->path_hashclauses;
4426 Cost startup_cost = workspace->startup_cost;
4427 Cost run_cost = workspace->run_cost;
4428 int numbuckets = workspace->numbuckets;
4429 int numbatches = workspace->numbatches;
4433 double hashjointuples;
4434 double virtualbuckets;
4437 ListCell *hcl;
4438
4439 /* Set the number of disabled nodes. */
4440 path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4441
4442 /* Mark the path with the correct row estimate */
4443 if (path->jpath.path.param_info)
4444 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4445 else
4446 path->jpath.path.rows = path->jpath.path.parent->rows;
4447
4448 /* For partial paths, scale row estimate. */
4449 if (path->jpath.path.parallel_workers > 0)
4450 {
4451 double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4452
4453 path->jpath.path.rows =
4454 clamp_row_est(path->jpath.path.rows / parallel_divisor);
4455 }
4456
4457 /* mark the path with estimated # of batches */
4458 path->num_batches = numbatches;
4459
4460 /* store the total number of tuples (sum of partial row estimates) */
4462
4463 /* and compute the number of "virtual" buckets in the whole join */
4464 virtualbuckets = (double) numbuckets * (double) numbatches;
4465
4466 /*
4467 * Determine bucketsize fraction and MCV frequency for the inner relation.
4468 * We use the smallest bucketsize or MCV frequency estimated for any
4469 * individual hashclause; this is undoubtedly conservative.
4470 *
4471 * BUT: if inner relation has been unique-ified, we can assume it's good
4472 * for hashing. This is important both because it's the right answer, and
4473 * because we avoid contaminating the cache with a value that's wrong for
4474 * non-unique-ified paths.
4475 */
4476 if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo,
4477 path->jpath.jointype))
4478 {
4481 }
4482 else
4483 {
4485
4486 innerbucketsize = 1.0;
4487 innermcvfreq = 1.0;
4488
4489 /* At first, try to estimate bucket size using extended statistics. */
4491 inner_path->parent,
4492 hashclauses,
4494
4495 /* Pass through the remaining clauses */
4496 foreach(hcl, otherclauses)
4497 {
4501
4502 /*
4503 * First we have to figure out which side of the hashjoin clause
4504 * is the inner side.
4505 *
4506 * Since we tend to visit the same clauses over and over when
4507 * planning a large query, we cache the bucket stats estimates in
4508 * the RestrictInfo node to avoid repeated lookups of statistics.
4509 */
4510 if (bms_is_subset(restrictinfo->right_relids,
4511 inner_path->parent->relids))
4512 {
4513 /* righthand side is inner */
4514 thisbucketsize = restrictinfo->right_bucketsize;
4515 if (thisbucketsize < 0)
4516 {
4517 /* not cached yet */
4519 get_rightop(restrictinfo->clause),
4521 &restrictinfo->right_mcvfreq,
4522 &restrictinfo->right_bucketsize);
4523 thisbucketsize = restrictinfo->right_bucketsize;
4524 }
4525 thismcvfreq = restrictinfo->right_mcvfreq;
4526 }
4527 else
4528 {
4529 Assert(bms_is_subset(restrictinfo->left_relids,
4530 inner_path->parent->relids));
4531 /* lefthand side is inner */
4532 thisbucketsize = restrictinfo->left_bucketsize;
4533 if (thisbucketsize < 0)
4534 {
4535 /* not cached yet */
4537 get_leftop(restrictinfo->clause),
4539 &restrictinfo->left_mcvfreq,
4540 &restrictinfo->left_bucketsize);
4541 thisbucketsize = restrictinfo->left_bucketsize;
4542 }
4543 thismcvfreq = restrictinfo->left_mcvfreq;
4544 }
4545
4548 /* Disregard zero for MCV freq, it means we have no data */
4549 if (thismcvfreq > 0.0 && innermcvfreq > thismcvfreq)
4551 }
4552 }
4553
4554 /*
4555 * If the bucket holding the inner MCV would exceed hash_mem, we don't
4556 * want to hash unless there is really no other alternative, so apply
4557 * disable_cost. (The executor normally copes with excessive memory usage
4558 * by splitting batches, but obviously it cannot separate equal values
4559 * that way, so it will be unable to drive the batch size below hash_mem
4560 * when this is true.)
4561 */
4563 inner_path->pathtarget->width) > get_hash_memory_limit())
4564 startup_cost += disable_cost;
4565
4566 /*
4567 * Compute cost of the hashquals and qpquals (other restriction clauses)
4568 * separately.
4569 */
4570 cost_qual_eval(&hash_qual_cost, hashclauses, root);
4572 qp_qual_cost.startup -= hash_qual_cost.startup;
4573 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4574
4575 /* CPU costs */
4576
4577 if (path->jpath.jointype == JOIN_SEMI ||
4578 path->jpath.jointype == JOIN_ANTI ||
4579 extra->inner_unique)
4580 {
4581 double outer_matched_rows;
4583
4584 /*
4585 * With a SEMI or ANTI join, or if the innerrel is known unique, the
4586 * executor will stop after the first match.
4587 *
4588 * For an outer-rel row that has at least one match, we can expect the
4589 * bucket scan to stop after a fraction 1/(match_count+1) of the
4590 * bucket's rows, if the matches are evenly distributed. Since they
4591 * probably aren't quite evenly distributed, we apply a fuzz factor of
4592 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4593 * to clamp inner_scan_frac to at most 1.0; but since match_count is
4594 * at least 1, no such clamp is needed now.)
4595 */
4597 inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4598
4599 startup_cost += hash_qual_cost.startup;
4600 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4602
4603 /*
4604 * For unmatched outer-rel rows, the picture is quite a lot different.
4605 * In the first place, there is no reason to assume that these rows
4606 * preferentially hit heavily-populated buckets; instead assume they
4607 * are uncorrelated with the inner distribution and so they see an
4608 * average bucket size of inner_path_rows / virtualbuckets. In the
4609 * second place, it seems likely that they will have few if any exact
4610 * hash-code matches and so very few of the tuples in the bucket will
4611 * actually require eval of the hash quals. We don't have any good
4612 * way to estimate how many will, but for the moment assume that the
4613 * effective cost per bucket entry is one-tenth what it is for
4614 * matchable tuples.
4615 */
4616 run_cost += hash_qual_cost.per_tuple *
4619
4620 /* Get # of tuples that will pass the basic join */
4621 if (path->jpath.jointype == JOIN_ANTI)
4623 else
4625 }
4626 else
4627 {
4628 /*
4629 * The number of tuple comparisons needed is the number of outer
4630 * tuples times the typical number of tuples in a hash bucket, which
4631 * is the inner relation size times its bucketsize fraction. At each
4632 * one, we need to evaluate the hashjoin quals. But actually,
4633 * charging the full qual eval cost at each tuple is pessimistic,
4634 * since we don't evaluate the quals unless the hash values match
4635 * exactly. For lack of a better idea, halve the cost estimate to
4636 * allow for that.
4637 */
4638 startup_cost += hash_qual_cost.startup;
4639 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4641
4642 /*
4643 * Get approx # tuples passing the hashquals. We use
4644 * approx_tuple_count here because we need an estimate done with
4645 * JOIN_INNER semantics.
4646 */
4647 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4648 }
4649
4650 /*
4651 * For each tuple that gets through the hashjoin proper, we charge
4652 * cpu_tuple_cost plus the cost of evaluating additional restriction
4653 * clauses that are to be applied at the join. (This is pessimistic since
4654 * not all of the quals may get evaluated at each tuple.)
4655 */
4656 startup_cost += qp_qual_cost.startup;
4658 run_cost += cpu_per_tuple * hashjointuples;
4659
4660 /* tlist eval costs are paid per output row, not per tuple scanned */
4661 startup_cost += path->jpath.path.pathtarget->cost.startup;
4662 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4663
4664 path->jpath.path.startup_cost = startup_cost;
4665 path->jpath.path.total_cost = startup_cost + run_cost;
4666}
4667
4668
4669/*
4670 * cost_subplan
4671 * Figure the costs for a SubPlan (or initplan).
4672 *
4673 * Note: we could dig the subplan's Plan out of the root list, but in practice
4674 * all callers have it handy already, so we make them pass it.
4675 */
4676void
4678{
4680
4681 /*
4682 * Figure any cost for evaluating the testexpr.
4683 *
4684 * Usually, SubPlan nodes are built very early, before we have constructed
4685 * any RelOptInfos for the parent query level, which means the parent root
4686 * does not yet contain enough information to safely consult statistics.
4687 * Therefore, we pass root as NULL here. cost_qual_eval() is already
4688 * well-equipped to handle a NULL root.
4689 *
4690 * One exception is SubPlan nodes built for the initplans of MIN/MAX
4691 * aggregates from indexes (cf. SS_make_initplan_from_plan). In this
4692 * case, having a NULL root is safe because testexpr will be NULL.
4693 * Besides, an initplan will by definition not consult anything from the
4694 * parent plan.
4695 */
4697 make_ands_implicit((Expr *) subplan->testexpr),
4698 NULL);
4699
4700 if (subplan->useHashTable)
4701 {
4702 /*
4703 * If we are using a hash table for the subquery outputs, then the
4704 * cost of evaluating the query is a one-time cost. We charge one
4705 * cpu_operator_cost per tuple for the work of loading the hashtable,
4706 * too.
4707 */
4708 sp_cost.startup += plan->total_cost +
4709 cpu_operator_cost * plan->plan_rows;
4710
4711 /*
4712 * The per-tuple costs include the cost of evaluating the lefthand
4713 * expressions, plus the cost of probing the hashtable. We already
4714 * accounted for the lefthand expressions as part of the testexpr, and
4715 * will also have counted one cpu_operator_cost for each comparison
4716 * operator. That is probably too low for the probing cost, but it's
4717 * hard to make a better estimate, so live with it for now.
4718 */
4719 }
4720 else
4721 {
4722 /*
4723 * Otherwise we will be rescanning the subplan output on each
4724 * evaluation. We need to estimate how much of the output we will
4725 * actually need to scan. NOTE: this logic should agree with the
4726 * tuple_fraction estimates used by make_subplan() in
4727 * plan/subselect.c.
4728 */
4729 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4730
4731 if (subplan->subLinkType == EXISTS_SUBLINK)
4732 {
4733 /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4734 sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4735 }
4736 else if (subplan->subLinkType == ALL_SUBLINK ||
4737 subplan->subLinkType == ANY_SUBLINK)
4738 {
4739 /* assume we need 50% of the tuples */
4740 sp_cost.per_tuple += 0.50 * plan_run_cost;
4741 /* also charge a cpu_operator_cost per row examined */
4742 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4743 }
4744 else
4745 {
4746 /* assume we need all tuples */
4747 sp_cost.per_tuple += plan_run_cost;
4748 }
4749
4750 /*
4751 * Also account for subplan's startup cost. If the subplan is
4752 * uncorrelated or undirect correlated, AND its topmost node is one
4753 * that materializes its output, assume that we'll only need to pay
4754 * its startup cost once; otherwise assume we pay the startup cost
4755 * every time.
4756 */
4757 if (subplan->parParam == NIL &&
4759 sp_cost.startup += plan->startup_cost;
4760 else
4761 sp_cost.per_tuple += plan->startup_cost;
4762 }
4763
4764 subplan->disabled_nodes = plan->disabled_nodes;
4765 subplan->startup_cost = sp_cost.startup;
4766 subplan->per_call_cost = sp_cost.per_tuple;
4767}
4768
4769
4770/*
4771 * cost_rescan
4772 * Given a finished Path, estimate the costs of rescanning it after
4773 * having done so the first time. For some Path types a rescan is
4774 * cheaper than an original scan (if no parameters change), and this
4775 * function embodies knowledge about that. The default is to return
4776 * the same costs stored in the Path. (Note that the cost estimates
4777 * actually stored in Paths are always for first scans.)
4778 *
4779 * This function is not currently intended to model effects such as rescans
4780 * being cheaper due to disk block caching; what we are concerned with is
4781 * plan types wherein the executor caches results explicitly, or doesn't
4782 * redo startup calculations, etc.
4783 */
4784static void
4786 Cost *rescan_startup_cost, /* output parameters */
4788{
4789 switch (path->pathtype)
4790 {
4791 case T_FunctionScan:
4792
4793 /*
4794 * Currently, nodeFunctionscan.c always executes the function to
4795 * completion before returning any rows, and caches the results in
4796 * a tuplestore. So the function eval cost is all startup cost
4797 * and isn't paid over again on rescans. However, all run costs
4798 * will be paid over again.
4799 */
4801 *rescan_total_cost = path->total_cost - path->startup_cost;
4802 break;
4803 case T_HashJoin:
4804
4805 /*
4806 * If it's a single-batch join, we don't need to rebuild the hash
4807 * table during a rescan.
4808 */
4809 if (((HashPath *) path)->num_batches == 1)
4810 {
4811 /* Startup cost is exactly the cost of hash table building */
4813 *rescan_total_cost = path->total_cost - path->startup_cost;
4814 }
4815 else
4816 {
4817 /* Otherwise, no special treatment */
4818 *rescan_startup_cost = path->startup_cost;
4819 *rescan_total_cost = path->total_cost;
4820 }
4821 break;
4822 case T_CteScan:
4823 case T_WorkTableScan:
4824 {
4825 /*
4826 * These plan types materialize their final result in a
4827 * tuplestore or tuplesort object. So the rescan cost is only
4828 * cpu_tuple_cost per tuple, unless the result is large enough
4829 * to spill to disk.
4830 */
4831 Cost run_cost = cpu_tuple_cost * path->rows;
4832 double nbytes = relation_byte_size(path->rows,
4833 path->pathtarget->width);
4834 double work_mem_bytes = work_mem * (Size) 1024;
4835
4836 if (nbytes > work_mem_bytes)
4837 {
4838 /* It will spill, so account for re-read cost */
4839 double npages = ceil(nbytes / BLCKSZ);
4840
4841 run_cost += seq_page_cost * npages;
4842 }
4844 *rescan_total_cost = run_cost;
4845 }
4846 break;
4847 case T_Material:
4848 case T_Sort:
4849 {
4850 /*
4851 * These plan types not only materialize their results, but do
4852 * not implement qual filtering or projection. So they are
4853 * even cheaper to rescan than the ones above. We charge only
4854 * cpu_operator_cost per tuple. (Note: keep that in sync with
4855 * the run_cost charge in cost_sort, and also see comments in
4856 * cost_material before you change it.)
4857 */
4858 Cost run_cost = cpu_operator_cost * path->rows;
4859 double nbytes = relation_byte_size(path->rows,
4860 path->pathtarget->width);
4861 double work_mem_bytes = work_mem * (Size) 1024;
4862
4863 if (nbytes > work_mem_bytes)
4864 {
4865 /* It will spill, so account for re-read cost */
4866 double npages = ceil(nbytes / BLCKSZ);
4867
4868 run_cost += seq_page_cost * npages;
4869 }
4871 *rescan_total_cost = run_cost;
4872 }
4873 break;
4874 case T_Memoize:
4875 /* All the hard work is done by cost_memoize_rescan */
4878 break;
4879 default:
4880 *rescan_startup_cost = path->startup_cost;
4881 *rescan_total_cost = path->total_cost;
4882 break;
4883 }
4884}
4885
4886
4887/*
4888 * cost_qual_eval
4889 * Estimate the CPU costs of evaluating a WHERE clause.
4890 * The input can be either an implicitly-ANDed list of boolean
4891 * expressions, or a list of RestrictInfo nodes. (The latter is
4892 * preferred since it allows caching of the results.)
4893 * The result includes both a one-time (startup) component,
4894 * and a per-evaluation component.
4895 *
4896 * Note: in some code paths root can be passed as NULL, resulting in
4897 * slightly worse estimates.
4898 */
4899void
4901{
4902 cost_qual_eval_context context;
4903 ListCell *l;
4904
4905 context.root = root;
4906 context.total.startup = 0;
4907 context.total.per_tuple = 0;
4908
4909 /* We don't charge any cost for the implicit ANDing at top level ... */
4910
4911 foreach(l, quals)
4912 {
4913 Node *qual = (Node *) lfirst(l);
4914
4915 cost_qual_eval_walker(qual, &context);
4916 }
4917
4918 *cost = context.total;
4919}
4920
4921/*
4922 * cost_qual_eval_node
4923 * As above, for a single RestrictInfo or expression.
4924 */
4925void
4927{
4928 cost_qual_eval_context context;
4929
4930 context.root = root;
4931 context.total.startup = 0;
4932 context.total.per_tuple = 0;
4933
4934 cost_qual_eval_walker(qual, &context);
4935
4936 *cost = context.total;
4937}
4938
4939static bool
4941{
4942 if (node == NULL)
4943 return false;
4944
4945 /*
4946 * RestrictInfo nodes contain an eval_cost field reserved for this
4947 * routine's use, so that it's not necessary to evaluate the qual clause's
4948 * cost more than once. If the clause's cost hasn't been computed yet,
4949 * the field's startup value will contain -1.
4950 */
4951 if (IsA(node, RestrictInfo))
4952 {
4953 RestrictInfo *rinfo = (RestrictInfo *) node;
4954
4955 if (rinfo->eval_cost.startup < 0)
4956 {
4958
4959 locContext.root = context->root;
4960 locContext.total.startup = 0;
4961 locContext.total.per_tuple = 0;
4962
4963 /*
4964 * For an OR clause, recurse into the marked-up tree so that we
4965 * set the eval_cost for contained RestrictInfos too.
4966 */
4967 if (rinfo->orclause)
4968 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4969 else
4971
4972 /*
4973 * If the RestrictInfo is marked pseudoconstant, it will be tested
4974 * only once, so treat its cost as all startup cost.
4975 */
4976 if (rinfo->pseudoconstant)
4977 {
4978 /* count one execution during startup */
4979 locContext.total.startup += locContext.total.per_tuple;
4980 locContext.total.per_tuple = 0;
4981 }
4982 rinfo->eval_cost = locContext.total;
4983 }
4984 context->total.startup += rinfo->eval_cost.startup;
4985 context->total.per_tuple += rinfo->eval_cost.per_tuple;
4986 /* do NOT recurse into children */
4987 return false;
4988 }
4989
4990 /*
4991 * For each operator or function node in the given tree, we charge the
4992 * estimated execution cost given by pg_proc.procost (remember to multiply
4993 * this by cpu_operator_cost).
4994 *
4995 * Vars and Consts are charged zero, and so are boolean operators (AND,
4996 * OR, NOT). Simplistic, but a lot better than no model at all.
4997 *
4998 * Should we try to account for the possibility of short-circuit
4999 * evaluation of AND/OR? Probably *not*, because that would make the
5000 * results depend on the clause ordering, and we are not in any position
5001 * to expect that the current ordering of the clauses is the one that's
5002 * going to end up being used. The above per-RestrictInfo caching would
5003 * not mix well with trying to re-order clauses anyway.
5004 *
5005 * Another issue that is entirely ignored here is that if a set-returning
5006 * function is below top level in the tree, the functions/operators above
5007 * it will need to be evaluated multiple times. In practical use, such
5008 * cases arise so seldom as to not be worth the added complexity needed;
5009 * moreover, since our rowcount estimates for functions tend to be pretty
5010 * phony, the results would also be pretty phony.
5011 */
5012 if (IsA(node, FuncExpr))
5013 {
5014 add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
5015 &context->total);
5016 }
5017 else if (IsA(node, OpExpr) ||
5018 IsA(node, DistinctExpr) ||
5019 IsA(node, NullIfExpr))
5020 {
5021 /* rely on struct equivalence to treat these all alike */
5022 set_opfuncid((OpExpr *) node);
5023 add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
5024 &context->total);
5025 }
5026 else if (IsA(node, ScalarArrayOpExpr))
5027 {
5028 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
5029 Node *arraynode = (Node *) lsecond(saop->args);
5032 double estarraylen = estimate_array_length(context->root, arraynode);
5033
5034 set_sa_opfuncid(saop);
5035 sacosts.startup = sacosts.per_tuple = 0;
5036 add_function_cost(context->root, saop->opfuncid, NULL,
5037 &sacosts);
5038
5039 if (OidIsValid(saop->hashfuncid))
5040 {
5041 /* Handle costs for hashed ScalarArrayOpExpr */
5042 hcosts.startup = hcosts.per_tuple = 0;
5043
5044 add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
5045 context->total.startup += sacosts.startup + hcosts.startup;
5046
5047 /* Estimate the cost of building the hashtable. */
5048 context->total.startup += estarraylen * hcosts.per_tuple;
5049
5050 /*
5051 * XXX should we charge a little bit for sacosts.per_tuple when
5052 * building the table, or is it ok to assume there will be zero
5053 * hash collision?
5054 */
5055
5056 /*
5057 * Charge for hashtable lookups. Charge a single hash and a
5058 * single comparison.
5059 */
5060 context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
5061 }
5062 else
5063 {
5064 /*
5065 * Estimate that the operator will be applied to about half of the
5066 * array elements before the answer is determined.
5067 */
5068 context->total.startup += sacosts.startup;
5069 context->total.per_tuple += sacosts.per_tuple *
5070 estimate_array_length(context->root, arraynode) * 0.5;
5071 }
5072 }
5073 else if (IsA(node, Aggref) ||
5074 IsA(node, WindowFunc))
5075 {
5076 /*
5077 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
5078 * ie, zero execution cost in the current model, because they behave
5079 * essentially like Vars at execution. We disregard the costs of
5080 * their input expressions for the same reason. The actual execution
5081 * costs of the aggregate/window functions and their arguments have to
5082 * be factored into plan-node-specific costing of the Agg or WindowAgg
5083 * plan node.
5084 */
5085 return false; /* don't recurse into children */
5086 }
5087 else if (IsA(node, GroupingFunc))
5088 {
5089 /* Treat this as having cost 1 */
5090 context->total.per_tuple += cpu_operator_cost;
5091 return false; /* don't recurse into children */
5092 }
5093 else if (IsA(node, CoerceViaIO))
5094 {
5095 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
5096 Oid iofunc;
5097 Oid typioparam;
5098 bool typisvarlena;
5099
5100 /* check the result type's input function */
5101 getTypeInputInfo(iocoerce->resulttype,
5102 &iofunc, &typioparam);
5103 add_function_cost(context->root, iofunc, NULL,
5104 &context->total);
5105 /* check the input type's output function */
5106 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
5107 &iofunc, &typisvarlena);
5108 add_function_cost(context->root, iofunc, NULL,
5109 &context->total);
5110 }
5111 else if (IsA(node, ArrayCoerceExpr))
5112 {
5115
5117 context->root);
5118 context->total.startup += perelemcost.startup;
5119 if (perelemcost.per_tuple > 0)
5120 context->total.per_tuple += perelemcost.per_tuple *
5121 estimate_array_length(context->root, (Node *) acoerce->arg);
5122 }
5123 else if (IsA(node, RowCompareExpr))
5124 {
5125 /* Conservatively assume we will check all the columns */
5127 ListCell *lc;
5128
5129 foreach(lc, rcexpr->opnos)
5130 {
5131 Oid opid = lfirst_oid(lc);
5132
5134 &context->total);
5135 }
5136 }
5137 else if (IsA(node, MinMaxExpr) ||
5138 IsA(node, SQLValueFunction) ||
5139 IsA(node, XmlExpr) ||
5140 IsA(node, CoerceToDomain) ||
5141 IsA(node, NextValueExpr) ||
5142 IsA(node, JsonExpr))
5143 {
5144 /* Treat all these as having cost 1 */
5145 context->total.per_tuple += cpu_operator_cost;
5146 }
5147 else if (IsA(node, SubLink))
5148 {
5149 /* This routine should not be applied to un-planned expressions */
5150 elog(ERROR, "cannot handle unplanned sub-select");
5151 }
5152 else if (IsA(node, SubPlan))
5153 {
5154 /*
5155 * A subplan node in an expression typically indicates that the
5156 * subplan will be executed on each evaluation, so charge accordingly.
5157 * (Sub-selects that can be executed as InitPlans have already been
5158 * removed from the expression.)
5159 */
5160 SubPlan *subplan = (SubPlan *) node;
5161
5162 context->total.startup += subplan->startup_cost;
5163 context->total.per_tuple += subplan->per_call_cost;
5164
5165 /*
5166 * We don't want to recurse into the testexpr, because it was already
5167 * counted in the SubPlan node's costs. So we're done.
5168 */
5169 return false;
5170 }
5171 else if (IsA(node, AlternativeSubPlan))
5172 {
5173 /*
5174 * Arbitrarily use the first alternative plan for costing. (We should
5175 * certainly only include one alternative, and we don't yet have
5176 * enough information to know which one the executor is most likely to
5177 * use.)
5178 */
5180
5181 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5182 context);
5183 }
5184 else if (IsA(node, PlaceHolderVar))
5185 {
5186 /*
5187 * A PlaceHolderVar should be given cost zero when considering general
5188 * expression evaluation costs. The expense of doing the contained
5189 * expression is charged as part of the tlist eval costs of the scan
5190 * or join where the PHV is first computed (see set_rel_width and
5191 * add_placeholders_to_joinrel). If we charged it again here, we'd be
5192 * double-counting the cost for each level of plan that the PHV
5193 * bubbles up through. Hence, return without recursing into the
5194 * phexpr.
5195 */
5196 return false;
5197 }
5198
5199 /* recurse into children */
5200 return expression_tree_walker(node, cost_qual_eval_walker, context);
5201}
5202
5203/*
5204 * get_restriction_qual_cost
5205 * Compute evaluation costs of a baserel's restriction quals, plus any
5206 * movable join quals that have been pushed down to the scan.
5207 * Results are returned into *qpqual_cost.
5208 *
5209 * This is a convenience subroutine that works for seqscans and other cases
5210 * where all the given quals will be evaluated the hard way. It's not useful
5211 * for cost_index(), for example, where the index machinery takes care of
5212 * some of the quals. We assume baserestrictcost was previously set by
5213 * set_baserel_size_estimates().
5214 */
5215static void
5219{
5220 if (param_info)
5221 {
5222 /* Include costs of pushed-down clauses */
5223 cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
5224
5225 qpqual_cost->startup += baserel->baserestrictcost.startup;
5226 qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
5227 }
5228 else
5229 *qpqual_cost = baserel->baserestrictcost;
5230}
5231
5232
5233/*
5234 * compute_semi_anti_join_factors
5235 * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5236 * can be expected to scan.
5237 *
5238 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
5239 * inner rows as soon as it finds a match to the current outer row.
5240 * The same happens if we have detected the inner rel is unique.
5241 * We should therefore adjust some of the cost components for this effect.
5242 * This function computes some estimates needed for these adjustments.
5243 * These estimates will be the same regardless of the particular paths used
5244 * for the outer and inner relation, so we compute these once and then pass
5245 * them to all the join cost estimation functions.
5246 *
5247 * Input parameters:
5248 * joinrel: join relation under consideration
5249 * outerrel: outer relation under consideration
5250 * innerrel: inner relation under consideration
5251 * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
5252 * sjinfo: SpecialJoinInfo relevant to this join
5253 * restrictlist: join quals
5254 * Output parameters:
5255 * *semifactors is filled in (see pathnodes.h for field definitions)
5256 */
5257void
5259 RelOptInfo *joinrel,
5260 RelOptInfo *outerrel,
5261 RelOptInfo *innerrel,
5262 JoinType jointype,
5263 SpecialJoinInfo *sjinfo,
5264 List *restrictlist,
5265 SemiAntiJoinFactors *semifactors)
5266{
5271 List *joinquals;
5272 ListCell *l;
5273
5274 /*
5275 * In an ANTI join, we must ignore clauses that are "pushed down", since
5276 * those won't affect the match logic. In a SEMI join, we do not
5277 * distinguish joinquals from "pushed down" quals, so just use the whole
5278 * restrictinfo list. For other outer join types, we should consider only
5279 * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5280 */
5281 if (IS_OUTER_JOIN(jointype))
5282 {
5283 joinquals = NIL;
5284 foreach(l, restrictlist)
5285 {
5287
5288 if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5289 joinquals = lappend(joinquals, rinfo);
5290 }
5291 }
5292 else
5293 joinquals = restrictlist;
5294
5295 /*
5296 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5297 */
5299 joinquals,
5300 0,
5301 (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5302 sjinfo);
5303
5304 /*
5305 * Also get the normal inner-join selectivity of the join clauses.
5306 */
5307 init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5308
5310 joinquals,
5311 0,
5312 JOIN_INNER,
5313 &norm_sjinfo);
5314
5315 /* Avoid leaking a lot of ListCells */
5316 if (IS_OUTER_JOIN(jointype))
5318
5319 /*
5320 * jselec can be interpreted as the fraction of outer-rel rows that have
5321 * any matches (this is true for both SEMI and ANTI cases). And nselec is
5322 * the fraction of the Cartesian product that matches. So, the average
5323 * number of matches for each outer-rel row that has at least one match is
5324 * nselec * inner_rows / jselec.
5325 *
5326 * Note: it is correct to use the inner rel's "rows" count here, even
5327 * though we might later be considering a parameterized inner path with
5328 * fewer rows. This is because we have included all the join clauses in
5329 * the selectivity estimate.
5330 */
5331 if (jselec > 0) /* protect against zero divide */
5332 {
5333 avgmatch = nselec * innerrel->rows / jselec;
5334 /* Clamp to sane range */
5335 avgmatch = Max(1.0, avgmatch);
5336 }
5337 else
5338 avgmatch = 1.0;
5339
5340 semifactors->outer_match_frac = jselec;
5341 semifactors->match_count = avgmatch;
5342}
5343
5344/*
5345 * has_indexed_join_quals
5346 * Check whether all the joinquals of a nestloop join are used as
5347 * inner index quals.
5348 *
5349 * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5350 * indexscan) that uses all the joinquals as indexquals, we can assume that an
5351 * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5352 * expensive.
5353 */
5354static bool
5356{
5357 JoinPath *joinpath = &path->jpath;
5358 Relids joinrelids = joinpath->path.parent->relids;
5359 Path *innerpath = joinpath->innerjoinpath;
5360 List *indexclauses;
5361 bool found_one;
5362 ListCell *lc;
5363
5364 /* If join still has quals to evaluate, it's not fast */
5365 if (joinpath->joinrestrictinfo != NIL)
5366 return false;
5367 /* Nor if the inner path isn't parameterized at all */
5368 if (innerpath->param_info == NULL)
5369 return false;
5370
5371 /* Find the indexclauses list for the inner scan */
5372 switch (innerpath->pathtype)
5373 {
5374 case T_IndexScan:
5375 case T_IndexOnlyScan:
5376 indexclauses = ((IndexPath *) innerpath)->indexclauses;
5377 break;
5378 case T_BitmapHeapScan:
5379 {
5380 /* Accept only a simple bitmap scan, not AND/OR cases */
5381 Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5382
5383 if (IsA(bmqual, IndexPath))
5384 indexclauses = ((IndexPath *) bmqual)->indexclauses;
5385 else
5386 return false;
5387 break;
5388 }
5389 default:
5390
5391 /*
5392 * If it's not a simple indexscan, it probably doesn't run quickly
5393 * for zero rows out, even if it's a parameterized path using all
5394 * the joinquals.
5395 */
5396 return false;
5397 }
5398
5399 /*
5400 * Examine the inner path's param clauses. Any that are from the outer
5401 * path must be found in the indexclauses list, either exactly or in an
5402 * equivalent form generated by equivclass.c. Also, we must find at least
5403 * one such clause, else it's a clauseless join which isn't fast.
5404 */
5405 found_one = false;
5406 foreach(lc, innerpath->param_info->ppi_clauses)
5407 {
5408 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5409
5411 innerpath->parent->relids,
5412 joinrelids))
5413 {
5414 if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5415 return false;
5416 found_one = true;
5417 }
5418 }
5419 return found_one;
5420}
5421
5422
5423/*
5424 * approx_tuple_count
5425 * Quick-and-dirty estimation of the number of join rows passing
5426 * a set of qual conditions.
5427 *
5428 * The quals can be either an implicitly-ANDed list of boolean expressions,
5429 * or a list of RestrictInfo nodes (typically the latter).
5430 *
5431 * We intentionally compute the selectivity under JOIN_INNER rules, even
5432 * if it's some type of outer join. This is appropriate because we are
5433 * trying to figure out how many tuples pass the initial merge or hash
5434 * join step.
5435 *
5436 * This is quick-and-dirty because we bypass clauselist_selectivity, and
5437 * simply multiply the independent clause selectivities together. Now
5438 * clauselist_selectivity often can't do any better than that anyhow, but
5439 * for some situations (such as range constraints) it is smarter. However,
5440 * we can't effectively cache the results of clauselist_selectivity, whereas
5441 * the individual clause selectivities can be and are cached.
5442 *
5443 * Since we are only using the results to estimate how many potential
5444 * output tuples are generated and passed through qpqual checking, it
5445 * seems OK to live with the approximation.
5446 */
5447static double
5449{
5450 double tuples;
5451 double outer_tuples = path->outerjoinpath->rows;
5452 double inner_tuples = path->innerjoinpath->rows;
5453 SpecialJoinInfo sjinfo;
5454 Selectivity selec = 1.0;
5455 ListCell *l;
5456
5457 /*
5458 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5459 */
5460 init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5461 path->innerjoinpath->parent->relids);
5462
5463 /* Get the approximate selectivity */
5464 foreach(l, quals)
5465 {
5466 Node *qual = (Node *) lfirst(l);
5467
5468 /* Note that clause_selectivity will be able to cache its result */
5469 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5470 }
5471
5472 /* Apply it to the input relation sizes */
5473 tuples = selec * outer_tuples * inner_tuples;
5474
5475 return clamp_row_est(tuples);
5476}
5477
5478
5479/*
5480 * set_baserel_size_estimates
5481 * Set the size estimates for the given base relation.
5482 *
5483 * The rel's targetlist and restrictinfo list must have been constructed
5484 * already, and rel->tuples must be set.
5485 *
5486 * We set the following fields of the rel node:
5487 * rows: the estimated number of output tuples (after applying
5488 * restriction clauses).
5489 * width: the estimated average output tuple width in bytes.
5490 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5491 */
5492void
5494{
5495 double nrows;
5496
5497 /* Should only be applied to base relations */
5498 Assert(rel->relid > 0);
5499
5500 nrows = rel->tuples *
5502 rel->baserestrictinfo,
5503 0,
5504 JOIN_INNER,
5505 NULL);
5506
5507 rel->rows = clamp_row_est(nrows);
5508
5510
5511 set_rel_width(root, rel);
5512}
5513
5514/*
5515 * get_parameterized_baserel_size
5516 * Make a size estimate for a parameterized scan of a base relation.
5517 *
5518 * 'param_clauses' lists the additional join clauses to be used.
5519 *
5520 * set_baserel_size_estimates must have been applied already.
5521 */
5522double
5525{
5527 double nrows;
5528
5529 /*
5530 * Estimate the number of rows returned by the parameterized scan, knowing
5531 * that it will apply all the extra join clauses as well as the rel's own
5532 * restriction clauses. Note that we force the clauses to be treated as
5533 * non-join clauses during selectivity estimation.
5534 */
5536 nrows = rel->tuples *
5538 allclauses,
5539 rel->relid, /* do not use 0! */
5540 JOIN_INNER,
5541 NULL);
5542 nrows = clamp_row_est(nrows);
5543 /* For safety, make sure result is not more than the base estimate */
5544 if (nrows > rel->rows)
5545 nrows = rel->rows;
5546 return nrows;
5547}
5548
5549/*
5550 * set_joinrel_size_estimates
5551 * Set the size estimates for the given join relation.
5552 *
5553 * The rel's targetlist must have been constructed already, and a
5554 * restriction clause list that matches the given component rels must
5555 * be provided.
5556 *
5557 * Since there is more than one way to make a joinrel for more than two
5558 * base relations, the results we get here could depend on which component
5559 * rel pair is provided. In theory we should get the same answers no matter
5560 * which pair is provided; in practice, since the selectivity estimation
5561 * routines don't handle all cases equally well, we might not. But there's
5562 * not much to be done about it. (Would it make sense to repeat the
5563 * calculations for each pair of input rels that's encountered, and somehow
5564 * average the results? Probably way more trouble than it's worth, and
5565 * anyway we must keep the rowcount estimate the same for all paths for the
5566 * joinrel.)
5567 *
5568 * We set only the rows field here. The reltarget field was already set by
5569 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5570 */
5571void
5575 SpecialJoinInfo *sjinfo,
5576 List *restrictlist)
5577{
5579 rel,
5580 outer_rel,
5581 inner_rel,
5582 outer_rel->rows,
5583 inner_rel->rows,
5584 sjinfo,
5585 restrictlist);
5586}
5587
5588/*
5589 * get_parameterized_joinrel_size
5590 * Make a size estimate for a parameterized scan of a join relation.
5591 *
5592 * 'rel' is the joinrel under consideration.
5593 * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5594 * produce the relations being joined.
5595 * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5596 * 'restrict_clauses' lists the join clauses that need to be applied at the
5597 * join node (including any movable clauses that were moved down to this join,
5598 * and not including any movable clauses that were pushed down into the
5599 * child paths).
5600 *
5601 * set_joinrel_size_estimates must have been applied already.
5602 */
5603double
5607 SpecialJoinInfo *sjinfo,
5609{
5610 double nrows;
5611
5612 /*
5613 * Estimate the number of rows returned by the parameterized join as the
5614 * sizes of the input paths times the selectivity of the clauses that have
5615 * ended up at this join node.
5616 *
5617 * As with set_joinrel_size_estimates, the rowcount estimate could depend
5618 * on the pair of input paths provided, though ideally we'd get the same
5619 * estimate for any pair with the same parameterization.
5620 */
5622 rel,
5623 outer_path->parent,
5624 inner_path->parent,
5626 inner_path->rows,
5627 sjinfo,
5629 /* For safety, make sure result is not more than the base estimate */
5630 if (nrows > rel->rows)
5631 nrows = rel->rows;
5632 return nrows;
5633}
5634
5635/*
5636 * calc_joinrel_size_estimate
5637 * Workhorse for set_joinrel_size_estimates and
5638 * get_parameterized_joinrel_size.
5639 *
5640 * outer_rel/inner_rel are the relations being joined, but they should be
5641 * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5642 * than what rel->rows says, when we are considering parameterized paths.
5643 */
5644static double
5646 RelOptInfo *joinrel,
5649 double outer_rows,
5650 double inner_rows,
5651 SpecialJoinInfo *sjinfo,
5652 List *restrictlist)
5653{
5654 JoinType jointype = sjinfo->jointype;
5658 double nrows;
5659
5660 /*
5661 * Compute joinclause selectivity. Note that we are only considering
5662 * clauses that become restriction clauses at this join level; we are not
5663 * double-counting them because they were not considered in estimating the
5664 * sizes of the component rels.
5665 *
5666 * First, see whether any of the joinclauses can be matched to known FK
5667 * constraints. If so, drop those clauses from the restrictlist, and
5668 * instead estimate their selectivity using FK semantics. (We do this
5669 * without regard to whether said clauses are local or "pushed down".
5670 * Probably, an FK-matching clause could never be seen as pushed down at
5671 * an outer join, since it would be strict and hence would be grounds for
5672 * join strength reduction.) fkselec gets the net selectivity for
5673 * FK-matching clauses, or 1.0 if there are none.
5674 */
5676 outer_rel->relids,
5677 inner_rel->relids,
5678 sjinfo,
5679 &restrictlist);
5680
5681 /*
5682 * For an outer join, we have to distinguish the selectivity of the join's
5683 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5684 * down". For inner joins we just count them all as joinclauses.
5685 */
5686 if (IS_OUTER_JOIN(jointype))
5687 {
5688 List *joinquals = NIL;
5689 List *pushedquals = NIL;
5690 ListCell *l;
5691
5692 /* Grovel through the clauses to separate into two lists */
5693 foreach(l, restrictlist)
5694 {
5696
5697 if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5699 else
5700 joinquals = lappend(joinquals, rinfo);
5701 }
5702
5703 /* Get the separate selectivities */
5705 joinquals,
5706 0,
5707 jointype,
5708 sjinfo);
5711 0,
5712 jointype,
5713 sjinfo);
5714
5715 /* Avoid leaking a lot of ListCells */
5718 }
5719 else
5720 {
5722 restrictlist,
5723 0,
5724 jointype,
5725 sjinfo);
5726 pselec = 0.0; /* not used, keep compiler quiet */
5727 }
5728
5729 /*
5730 * Basically, we multiply size of Cartesian product by selectivity.
5731 *
5732 * If we are doing an outer join, take that into account: the joinqual
5733 * selectivity has to be clamped using the knowledge that the output must
5734 * be at least as large as the non-nullable input. However, any
5735 * pushed-down quals are applied after the outer join, so their
5736 * selectivity applies fully.
5737 *
5738 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5739 * of LHS rows that have matches, and we apply that straightforwardly.
5740 */
5741 switch (jointype)
5742 {
5743 case JOIN_INNER:
5744 nrows = outer_rows * inner_rows * fkselec * jselec;
5745 /* pselec not used */
5746 break;
5747 case JOIN_LEFT:
5748 nrows = outer_rows * inner_rows * fkselec * jselec;
5749 if (nrows < outer_rows)
5750 nrows = outer_rows;
5751 nrows *= pselec;
5752 break;
5753 case JOIN_FULL:
5754 nrows = outer_rows * inner_rows * fkselec * jselec;
5755 if (nrows < outer_rows)
5756 nrows = outer_rows;
5757 if (nrows < inner_rows)
5758 nrows = inner_rows;
5759 nrows *= pselec;
5760 break;
5761 case JOIN_SEMI:
5762 nrows = outer_rows * fkselec * jselec;
5763 /* pselec not used */
5764 break;
5765 case JOIN_ANTI:
5766 nrows = outer_rows * (1.0 - fkselec * jselec);
5767 nrows *= pselec;
5768 break;
5769 default:
5770 /* other values not expected here */
5771 elog(ERROR, "unrecognized join type: %d", (int) jointype);
5772 nrows = 0; /* keep compiler quiet */
5773 break;
5774 }
5775
5776 return clamp_row_est(nrows);
5777}
5778
5779/*
5780 * get_foreign_key_join_selectivity
5781 * Estimate join selectivity for foreign-key-related clauses.
5782 *
5783 * Remove any clauses that can be matched to FK constraints from *restrictlist,
5784 * and return a substitute estimate of their selectivity. 1.0 is returned
5785 * when there are no such clauses.
5786 *
5787 * The reason for treating such clauses specially is that we can get better
5788 * estimates this way than by relying on clauselist_selectivity(), especially
5789 * for multi-column FKs where that function's assumption that the clauses are
5790 * independent falls down badly. But even with single-column FKs, we may be
5791 * able to get a better answer when the pg_statistic stats are missing or out
5792 * of date.
5793 */
5794static Selectivity
5796 Relids outer_relids,
5798 SpecialJoinInfo *sjinfo,
5799 List **restrictlist)
5800{
5801 Selectivity fkselec = 1.0;
5802 JoinType jointype = sjinfo->jointype;
5803 List *worklist = *restrictlist;
5804 ListCell *lc;
5805
5806 /* Consider each FK constraint that is known to match the query */
5807 foreach(lc, root->fkey_list)
5808 {
5810 bool ref_is_outer;
5812 ListCell *cell;
5813
5814 /*
5815 * This FK is not relevant unless it connects a baserel on one side of
5816 * this join to a baserel on the other side.
5817 */
5818 if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5819 bms_is_member(fkinfo->ref_relid, inner_relids))
5820 ref_is_outer = false;
5821 else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5822 bms_is_member(fkinfo->con_relid, inner_relids))
5823 ref_is_outer = true;
5824 else
5825 continue;
5826
5827 /*
5828 * If we're dealing with a semi/anti join, and the FK's referenced
5829 * relation is on the outside, then knowledge of the FK doesn't help
5830 * us figure out what we need to know (which is the fraction of outer
5831 * rows that have matches). On the other hand, if the referenced rel
5832 * is on the inside, then all outer rows must have matches in the
5833 * referenced table (ignoring nulls). But any restriction or join
5834 * clauses that filter that table will reduce the fraction of matches.
5835 * We can account for restriction clauses, but it's too hard to guess
5836 * how many table rows would get through a join that's inside the RHS.
5837 * Hence, if either case applies, punt and ignore the FK.
5838 */
5839 if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5841 continue;
5842
5843 /*
5844 * Modify the restrictlist by removing clauses that match the FK (and
5845 * putting them into removedlist instead). It seems unsafe to modify
5846 * the originally-passed List structure, so we make a shallow copy the
5847 * first time through.
5848 */
5849 if (worklist == *restrictlist)
5851
5852 removedlist = NIL;
5853 foreach(cell, worklist)
5854 {
5855 RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5856 bool remove_it = false;
5857 int i;
5858
5859 /* Drop this clause if it matches any column of the FK */
5860 for (i = 0; i < fkinfo->nkeys; i++)
5861 {
5862 if (rinfo->parent_ec)
5863 {
5864 /*
5865 * EC-derived clauses can only match by EC. It is okay to
5866 * consider any clause derived from the same EC as
5867 * matching the FK: even if equivclass.c chose to generate
5868 * a clause equating some other pair of Vars, it could
5869 * have generated one equating the FK's Vars. So for
5870 * purposes of estimation, we can act as though it did so.
5871 *
5872 * Note: checking parent_ec is a bit of a cheat because
5873 * there are EC-derived clauses that don't have parent_ec
5874 * set; but such clauses must compare expressions that
5875 * aren't just Vars, so they cannot match the FK anyway.
5876 */
5877 if (fkinfo->eclass[i] == rinfo->parent_ec)
5878 {
5879 remove_it = true;
5880 break;
5881 }
5882 }
5883 else
5884 {
5885 /*
5886 * Otherwise, see if rinfo was previously matched to FK as
5887 * a "loose" clause.
5888 */
5889 if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5890 {
5891 remove_it = true;
5892 break;
5893 }
5894 }
5895 }
5896 if (remove_it)
5897 {
5900 }
5901 }
5902
5903 /*
5904 * If we failed to remove all the matching clauses we expected to
5905 * find, chicken out and ignore this FK; applying its selectivity
5906 * might result in double-counting. Put any clauses we did manage to
5907 * remove back into the worklist.
5908 *
5909 * Since the matching clauses are known not outerjoin-delayed, they
5910 * would normally have appeared in the initial joinclause list. If we
5911 * didn't find them, there are two possibilities:
5912 *
5913 * 1. If the FK match is based on an EC that is ec_has_const, it won't
5914 * have generated any join clauses at all. We discount such ECs while
5915 * checking to see if we have "all" the clauses. (Below, we'll adjust
5916 * the selectivity estimate for this case.)
5917 *
5918 * 2. The clauses were matched to some other FK in a previous
5919 * iteration of this loop, and thus removed from worklist. (A likely
5920 * case is that two FKs are matched to the same EC; there will be only
5921 * one EC-derived clause in the initial list, so the first FK will
5922 * consume it.) Applying both FKs' selectivity independently risks
5923 * underestimating the join size; in particular, this would undo one
5924 * of the main things that ECs were invented for, namely to avoid
5925 * double-counting the selectivity of redundant equality conditions.
5926 * Later we might think of a reasonable way to combine the estimates,
5927 * but for now, just punt, since this is a fairly uncommon situation.
5928 */
5929 if (removedlist == NIL ||
5931 (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5932 {
5934 continue;
5935 }
5936
5937 /*
5938 * Finally we get to the payoff: estimate selectivity using the
5939 * knowledge that each referencing row will match exactly one row in
5940 * the referenced table.
5941 *
5942 * XXX that's not true in the presence of nulls in the referencing
5943 * column(s), so in principle we should derate the estimate for those.
5944 * However (1) if there are any strict restriction clauses for the
5945 * referencing column(s) elsewhere in the query, derating here would
5946 * be double-counting the null fraction, and (2) it's not very clear
5947 * how to combine null fractions for multiple referencing columns. So
5948 * we do nothing for now about correcting for nulls.
5949 *
5950 * XXX another point here is that if either side of an FK constraint
5951 * is an inheritance parent, we estimate as though the constraint
5952 * covers all its children as well. This is not an unreasonable
5953 * assumption for a referencing table, ie the user probably applied
5954 * identical constraints to all child tables (though perhaps we ought
5955 * to check that). But it's not possible to have done that for a
5956 * referenced table. Fortunately, precisely because that doesn't
5957 * work, it is uncommon in practice to have an FK referencing a parent
5958 * table. So, at least for now, disregard inheritance here.
5959 */
5960 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5961 {
5962 /*
5963 * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5964 * referenced table is exactly the inside of the join. The join
5965 * selectivity is defined as the fraction of LHS rows that have
5966 * matches. The FK implies that every LHS row has a match *in the
5967 * referenced table*; but any restriction clauses on it will
5968 * reduce the number of matches. Hence we take the join
5969 * selectivity as equal to the selectivity of the table's
5970 * restriction clauses, which is rows / tuples; but we must guard
5971 * against tuples == 0.
5972 */
5973 RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5974 double ref_tuples = Max(ref_rel->tuples, 1.0);
5975
5976 fkselec *= ref_rel->rows / ref_tuples;
5977 }
5978 else
5979 {
5980 /*
5981 * Otherwise, selectivity is exactly 1/referenced-table-size; but
5982 * guard against tuples == 0. Note we should use the raw table
5983 * tuple count, not any estimate of its filtered or joined size.
5984 */
5985 RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5986 double ref_tuples = Max(ref_rel->tuples, 1.0);
5987
5988 fkselec *= 1.0 / ref_tuples;
5989 }
5990
5991 /*
5992 * If any of the FK columns participated in ec_has_const ECs, then
5993 * equivclass.c will have generated "var = const" restrictions for
5994 * each side of the join, thus reducing the sizes of both input
5995 * relations. Taking the fkselec at face value would amount to
5996 * double-counting the selectivity of the constant restriction for the
5997 * referencing Var. Hence, look for the restriction clause(s) that
5998 * were applied to the referencing Var(s), and divide out their
5999 * selectivity to correct for this.
6000 */
6001 if (fkinfo->nconst_ec > 0)
6002 {
6003 for (int i = 0; i < fkinfo->nkeys; i++)
6004 {
6005 EquivalenceClass *ec = fkinfo->eclass[i];
6006
6007 if (ec && ec->ec_has_const)
6008 {
6009 EquivalenceMember *em = fkinfo->fk_eclass_member[i];
6011 ec,
6012 em);
6013
6014 if (rinfo)
6015 {
6016 Selectivity s0;
6017
6019 (Node *) rinfo,
6020 0,
6021 jointype,
6022 sjinfo);
6023 if (s0 > 0)
6024 fkselec /= s0;
6025 }
6026 }
6027 }
6028 }
6029 }
6030
6031 *restrictlist = worklist;
6033 return fkselec;
6034}
6035
6036/*
6037 * set_subquery_size_estimates
6038 * Set the size estimates for a base relation that is a subquery.
6039 *
6040 * The rel's targetlist and restrictinfo list must have been constructed
6041 * already, and the Paths for the subquery must have been completed.
6042 * We look at the subquery's PlannerInfo to extract data.
6043 *
6044 * We set the same fields as set_baserel_size_estimates.
6045 */
6046void
6048{
6049 PlannerInfo *subroot = rel->subroot;
6051 ListCell *lc;
6052
6053 /* Should only be applied to base relations that are subqueries */
6054 Assert(rel->relid > 0);
6055 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
6056
6057 /*
6058 * Copy raw number of output rows from subquery. All of its paths should
6059 * have the same output rowcount, so just look at cheapest-total.
6060 */
6062 rel->tuples = sub_final_rel->cheapest_total_path->rows;
6063
6064 /*
6065 * Compute per-output-column width estimates by examining the subquery's
6066 * targetlist. For any output that is a plain Var, get the width estimate
6067 * that was made while planning the subquery. Otherwise, we leave it to
6068 * set_rel_width to fill in a datatype-based default estimate.
6069 */
6070 foreach(lc, subroot->parse->targetList)
6071 {
6073 Node *texpr = (Node *) te->expr;
6074 int32 item_width = 0;
6075
6076 /* junk columns aren't visible to upper query */
6077 if (te->resjunk)
6078 continue;
6079
6080 /*
6081 * The subquery could be an expansion of a view that's had columns
6082 * added to it since the current query was parsed, so that there are
6083 * non-junk tlist columns in it that don't correspond to any column
6084 * visible at our query level. Ignore such columns.
6085 */
6086 if (te->resno < rel->min_attr || te->resno > rel->max_attr)
6087 continue;
6088
6089 /*
6090 * XXX This currently doesn't work for subqueries containing set
6091 * operations, because the Vars in their tlists are bogus references
6092 * to the first leaf subquery, which wouldn't give the right answer
6093 * even if we could still get to its PlannerInfo.
6094 *
6095 * Also, the subquery could be an appendrel for which all branches are
6096 * known empty due to constraint exclusion, in which case
6097 * set_append_rel_pathlist will have left the attr_widths set to zero.
6098 *
6099 * In either case, we just leave the width estimate zero until
6100 * set_rel_width fixes it.
6101 */
6102 if (IsA(texpr, Var) &&
6103 subroot->parse->setOperations == NULL)
6104 {
6105 Var *var = (Var *) texpr;
6106 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
6107
6108 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
6109 }
6110 rel->attr_widths[te->resno - rel->min_attr] = item_width;
6111 }
6112
6113 /* Now estimate number of output rows, etc */
6115}
6116
6117/*
6118 * set_function_size_estimates
6119 * Set the size estimates for a base relation that is a function call.
6120 *
6121 * The rel's targetlist and restrictinfo list must have been constructed
6122 * already.
6123 *
6124 * We set the same fields as set_baserel_size_estimates.
6125 */
6126void
6128{
6130 ListCell *lc;
6131
6132 /* Should only be applied to base relations that are functions */
6133 Assert(rel->relid > 0);
6134 rte = planner_rt_fetch(rel->relid, root);
6135 Assert(rte->rtekind == RTE_FUNCTION);
6136
6137 /*
6138 * Estimate number of rows the functions will return. The rowcount of the
6139 * node is that of the largest function result.
6140 */
6141 rel->tuples = 0;
6142 foreach(lc, rte->functions)
6143 {
6145 double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
6146
6147 if (ntup > rel->tuples)
6148 rel->tuples = ntup;
6149 }
6150
6151 /* Now estimate number of output rows, etc */
6153}
6154
6155/*
6156 * set_function_size_estimates
6157 * Set the size estimates for a base relation that is a function call.
6158 *
6159 * The rel's targetlist and restrictinfo list must have been constructed
6160 * already.
6161 *
6162 * We set the same fields as set_tablefunc_size_estimates.
6163 */
6164void
6166{
6167 /* Should only be applied to base relations that are functions */
6168 Assert(rel->relid > 0);
6169 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
6170
6171 rel->tuples = 100;
6172
6173 /* Now estimate number of output rows, etc */
6175}
6176
6177/*
6178 * set_values_size_estimates
6179 * Set the size estimates for a base relation that is a values list.
6180 *
6181 * The rel's targetlist and restrictinfo list must have been constructed
6182 * already.
6183 *
6184 * We set the same fields as set_baserel_size_estimates.
6185 */
6186void
6188{
6190
6191 /* Should only be applied to base relations that are values lists */
6192 Assert(rel->relid > 0);
6193 rte = planner_rt_fetch(rel->relid, root);
6194 Assert(rte->rtekind == RTE_VALUES);
6195
6196 /*
6197 * Estimate number of rows the values list will return. We know this
6198 * precisely based on the list length (well, barring set-returning
6199 * functions in list items, but that's a refinement not catered for
6200 * anywhere else either).
6201 */
6202 rel->tuples = list_length(rte->values_lists);
6203
6204 /* Now estimate number of output rows, etc */
6206}
6207
6208/*
6209 * set_cte_size_estimates
6210 * Set the size estimates for a base relation that is a CTE reference.
6211 *
6212 * The rel's targetlist and restrictinfo list must have been constructed
6213 * already, and we need an estimate of the number of rows returned by the CTE
6214 * (if a regular CTE) or the non-recursive term (if a self-reference).
6215 *
6216 * We set the same fields as set_baserel_size_estimates.
6217 */
6218void
6220{
6222
6223 /* Should only be applied to base relations that are CTE references */
6224 Assert(rel->relid > 0);
6225 rte = planner_rt_fetch(rel->relid, root);
6226 Assert(rte->rtekind == RTE_CTE);
6227
6228 if (rte->self_reference)
6229 {
6230 /*
6231 * In a self-reference, we assume the average worktable size is a
6232 * multiple of the nonrecursive term's size. The best multiplier will
6233 * vary depending on query "fan-out", so make its value adjustable.
6234 */
6236 }
6237 else
6238 {
6239 /* Otherwise just believe the CTE's rowcount estimate */
6240 rel->tuples = cte_rows;
6241 }
6242
6243 /* Now estimate number of output rows, etc */
6245}
6246
6247/*
6248 * set_namedtuplestore_size_estimates
6249 * Set the size estimates for a base relation that is a tuplestore reference.
6250 *
6251 * The rel's targetlist and restrictinfo list must have been constructed
6252 * already.
6253 *
6254 * We set the same fields as set_baserel_size_estimates.
6255 */
6256void
6258{
6260
6261 /* Should only be applied to base relations that are tuplestore references */
6262 Assert(rel->relid > 0);
6263 rte = planner_rt_fetch(rel->relid, root);
6264 Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6265
6266 /*
6267 * Use the estimate provided by the code which is generating the named
6268 * tuplestore. In some cases, the actual number might be available; in
6269 * others the same plan will be re-used, so a "typical" value might be
6270 * estimated and used.
6271 */
6272 rel->tuples = rte->enrtuples;
6273 if (rel->tuples < 0)
6274 rel->tuples = 1000;
6275
6276 /* Now estimate number of output rows, etc */
6278}
6279
6280/*
6281 * set_result_size_estimates
6282 * Set the size estimates for an RTE_RESULT base relation
6283 *
6284 * The rel's targetlist and restrictinfo list must have been constructed
6285 * already.
6286 *
6287 * We set the same fields as set_baserel_size_estimates.
6288 */
6289void
6291{
6292 /* Should only be applied to RTE_RESULT base relations */
6293 Assert(rel->relid > 0);
6294 Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6295
6296 /* RTE_RESULT always generates a single row, natively */
6297 rel->tuples = 1;
6298
6299 /* Now estimate number of output rows, etc */
6301}
6302
6303/*
6304 * set_foreign_size_estimates
6305 * Set the size estimates for a base relation that is a foreign table.
6306 *
6307 * There is not a whole lot that we can do here; the foreign-data wrapper
6308 * is responsible for producing useful estimates. We can do a decent job
6309 * of estimating baserestrictcost, so we set that, and we also set up width
6310 * using what will be purely datatype-driven estimates from the targetlist.
6311 * There is no way to do anything sane with the rows value, so we just put
6312 * a default estimate and hope that the wrapper can improve on it. The
6313 * wrapper's GetForeignRelSize function will be called momentarily.
6314 *
6315 * The rel's targetlist and restrictinfo list must have been constructed
6316 * already.
6317 */
6318void
6320{
6321 /* Should only be applied to base relations */
6322 Assert(rel->relid > 0);
6323
6324 rel->rows = 1000; /* entirely bogus default estimate */
6325
6327
6328 set_rel_width(root, rel);
6329}
6330
6331
6332/*
6333 * set_rel_width
6334 * Set the estimated output width of a base relation.
6335 *
6336 * The estimated output width is the sum of the per-attribute width estimates
6337 * for the actually-referenced columns, plus any PHVs or other expressions
6338 * that have to be calculated at this relation. This is the amount of data
6339 * we'd need to pass upwards in case of a sort, hash, etc.
6340 *
6341 * This function also sets reltarget->cost, so it's a bit misnamed now.
6342 *
6343 * NB: this works best on plain relations because it prefers to look at
6344 * real Vars. For subqueries, set_subquery_size_estimates will already have
6345 * copied up whatever per-column estimates were made within the subquery,
6346 * and for other types of rels there isn't much we can do anyway. We fall
6347 * back on (fairly stupid) datatype-based width estimates if we can't get
6348 * any better number.
6349 *
6350 * The per-attribute width estimates are cached for possible re-use while
6351 * building join relations or post-scan/join pathtargets.
6352 */
6353static void
6355{
6356 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6357 int64 tuple_width = 0;
6358 bool have_wholerow_var = false;
6359 ListCell *lc;
6360
6361 /* Vars are assumed to have cost zero, but other exprs do not */
6362 rel->reltarget->cost.startup = 0;
6363 rel->reltarget->cost.per_tuple = 0;
6364
6365 foreach(lc, rel->reltarget->exprs)
6366 {
6367 Node *node = (Node *) lfirst(lc);
6368
6369 /*
6370 * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6371 * but there are corner cases involving LATERAL references where that
6372 * isn't so. If the Var has the wrong varno, fall through to the
6373 * generic case (it doesn't seem worth the trouble to be any smarter).
6374 */
6375 if (IsA(node, Var) &&
6376 ((Var *) node)->varno == rel->relid)
6377 {
6378 Var *var = (Var *) node;
6379 int ndx;
6381
6382 Assert(var->varattno >= rel->min_attr);
6383 Assert(var->varattno <= rel->max_attr);
6384
6385 ndx = var->varattno - rel->min_attr;
6386
6387 /*
6388 * If it's a whole-row Var, we'll deal with it below after we have
6389 * already cached as many attr widths as possible.
6390 */
6391 if (var->varattno == 0)
6392 {
6393 have_wholerow_var = true;
6394 continue;
6395 }
6396
6397 /*
6398 * The width may have been cached already (especially if it's a
6399 * subquery), so don't duplicate effort.
6400 */
6401 if (rel->attr_widths[ndx] > 0)
6402 {
6403 tuple_width += rel->attr_widths[ndx];
6404 continue;
6405 }
6406
6407 /* Try to get column width from statistics */
6408 if (reloid != InvalidOid && var->varattno > 0)
6409 {
6410 item_width = get_attavgwidth(reloid, var->varattno);
6411 if (item_width > 0)
6412 {
6413 rel->attr_widths[ndx] = item_width;
6415 continue;
6416 }
6417 }
6418
6419 /*
6420 * Not a plain relation, or can't find statistics for it. Estimate
6421 * using just the type info.
6422 */
6423 item_width = get_typavgwidth(var->vartype, var->vartypmod);
6424 Assert(item_width > 0);
6425 rel->attr_widths[ndx] = item_width;
6427 }
6428 else if (IsA(node, PlaceHolderVar))
6429 {
6430 /*
6431 * We will need to evaluate the PHV's contained expression while
6432 * scanning this rel, so be sure to include it in reltarget->cost.
6433 */
6434 PlaceHolderVar *phv = (PlaceHolderVar *) node;
6436 QualCost cost;
6437
6438 tuple_width += phinfo->ph_width;
6439 cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6440 rel->reltarget->cost.startup += cost.startup;
6441 rel->reltarget->cost.per_tuple += cost.per_tuple;
6442 }
6443 else
6444 {
6445 /*
6446 * We could be looking at an expression pulled up from a subquery,
6447 * or a ROW() representing a whole-row child Var, etc. Do what we
6448 * can using the expression type information.
6449 */
6451 QualCost cost;
6452
6454 Assert(item_width > 0);
6456 /* Not entirely clear if we need to account for cost, but do so */
6457 cost_qual_eval_node(&cost, node, root);
6458 rel->reltarget->cost.startup += cost.startup;
6459 rel->reltarget->cost.per_tuple += cost.per_tuple;
6460 }
6461 }
6462
6463 /*
6464 * If we have a whole-row reference, estimate its width as the sum of
6465 * per-column widths plus heap tuple header overhead.
6466 */
6468 {
6470
6471 if (reloid != InvalidOid)
6472 {
6473 /* Real relation, so estimate true tuple width */
6475 rel->attr_widths - rel->min_attr);
6476 }
6477 else
6478 {
6479 /* Do what we can with info for a phony rel */
6480 AttrNumber i;
6481
6482 for (i = 1; i <= rel->max_attr; i++)
6483 wholerow_width += rel->attr_widths[i - rel->min_attr];
6484 }
6485
6486 rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6487
6488 /*
6489 * Include the whole-row Var as part of the output tuple. Yes, that
6490 * really is what happens at runtime.
6491 */
6493 }
6494
6496}
6497
6498/*
6499 * set_pathtarget_cost_width
6500 * Set the estimated eval cost and output width of a PathTarget tlist.
6501 *
6502 * As a notational convenience, returns the same PathTarget pointer passed in.
6503 *
6504 * Most, though not quite all, uses of this function occur after we've run
6505 * set_rel_width() for base relations; so we can usually obtain cached width
6506 * estimates for Vars. If we can't, fall back on datatype-based width
6507 * estimates. Present early-planning uses of PathTargets don't need accurate
6508 * widths badly enough to justify going to the catalogs for better data.
6509 */
6510PathTarget *
6512{
6513 int64 tuple_width = 0;
6514 ListCell *lc;
6515
6516 /* Vars are assumed to have cost zero, but other exprs do not */
6517 target->cost.startup = 0;
6518 target->cost.per_tuple = 0;
6519
6520 foreach(lc, target->exprs)
6521 {
6522 Node *node = (Node *) lfirst(lc);
6523
6525
6526 /* For non-Vars, account for evaluation cost */
6527 if (!IsA(node, Var))
6528 {
6529 QualCost cost;
6530
6531 cost_qual_eval_node(&cost, node, root);
6532 target->cost.startup += cost.startup;
6533 target->cost.per_tuple += cost.per_tuple;
6534 }
6535 }
6536
6538
6539 return target;
6540}
6541
6542/*
6543 * get_expr_width
6544 * Estimate the width of the given expr attempting to use the width
6545 * cached in a Var's owning RelOptInfo, else fallback on the type's
6546 * average width when unable to or when the given Node is not a Var.
6547 */
6548static int32
6550{
6551 int32 width;
6552
6553 if (IsA(expr, Var))
6554 {
6555 const Var *var = (const Var *) expr;
6556
6557 /* We should not see any upper-level Vars here */
6558 Assert(var->varlevelsup == 0);
6559
6560 /* Try to get data from RelOptInfo cache */
6561 if (!IS_SPECIAL_VARNO(var->varno) &&
6562 var->varno < root->simple_rel_array_size)
6563 {
6564 RelOptInfo *rel = root->simple_rel_array[var->varno];
6565
6566 if (rel != NULL &&
6567 var->varattno >= rel->min_attr &&
6568 var->varattno <= rel->max_attr)
6569 {
6570 int ndx = var->varattno - rel->min_attr;
6571
6572 if (rel->attr_widths[ndx] > 0)
6573 return rel->attr_widths[ndx];
6574 }
6575 }
6576
6577 /*
6578 * No cached data available, so estimate using just the type info.
6579 */
6580 width = get_typavgwidth(var->vartype, var->vartypmod);
6581 Assert(width > 0);
6582
6583 return width;
6584 }
6585
6586 width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6587 Assert(width > 0);
6588 return width;
6589}
6590
6591/*
6592 * relation_byte_size
6593 * Estimate the storage space in bytes for a given number of tuples
6594 * of a given width (size in bytes).
6595 */
6596static double
6597relation_byte_size(double tuples, int width)
6598{
6599 return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6600}
6601
6602/*
6603 * page_size
6604 * Returns an estimate of the number of pages covered by a given
6605 * number of tuples of a given width (size in bytes).
6606 */
6607static double
6608page_size(double tuples, int width)
6609{
6610 return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6611}
6612
6613/*
6614 * Estimate the fraction of the work that each worker will do given the
6615 * number of workers budgeted for the path.
6616 */
6617static double
6619{
6620 double parallel_divisor = path->parallel_workers;
6621
6622 /*
6623 * Early experience with parallel query suggests that when there is only
6624 * one worker, the leader often makes a very substantial contribution to
6625 * executing the parallel portion of the plan, but as more workers are
6626 * added, it does less and less, because it's busy reading tuples from the
6627 * workers and doing whatever non-parallel post-processing is needed. By
6628 * the time we reach 4 workers, the leader no longer makes a meaningful
6629 * contribution. Thus, for now, estimate that the leader spends 30% of
6630 * its time servicing each worker, and the remainder executing the
6631 * parallel plan.
6632 */
6634 {
6635 double leader_contribution;
6636
6637 leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6638 if (leader_contribution > 0)
6640 }
6641
6642 return parallel_divisor;
6643}
6644
6645/*
6646 * compute_bitmap_pages
6647 * Estimate number of pages fetched from heap in a bitmap heap scan.
6648 *
6649 * 'baserel' is the relation to be scanned
6650 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6651 * 'loop_count' is the number of repetitions of the indexscan to factor into
6652 * estimates of caching behavior
6653 *
6654 * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6655 * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6656 */
6657double
6659 Path *bitmapqual, double loop_count,
6660 Cost *cost_p, double *tuples_p)
6661{
6662 Cost indexTotalCost;
6663 Selectivity indexSelectivity;
6664 double T;
6665 double pages_fetched;
6666 double tuples_fetched;
6667 double heap_pages;
6668 double maxentries;
6669
6670 /*
6671 * Fetch total cost of obtaining the bitmap, as well as its total
6672 * selectivity.
6673 */
6674 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6675
6676 /*
6677 * Estimate number of main-table pages fetched.
6678 */
6679 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6680
6681 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6682
6683 /*
6684 * For a single scan, the number of heap pages that need to be fetched is
6685 * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6686 * re-reads needed).
6687 */
6688 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6689
6690 /*
6691 * Calculate the number of pages fetched from the heap. Then based on
6692 * current work_mem estimate get the estimated maxentries in the bitmap.
6693 * (Note that we always do this calculation based on the number of pages
6694 * that would be fetched in a single iteration, even if loop_count > 1.
6695 * That's correct, because only that number of entries will be stored in
6696 * the bitmap at one time.)
6697 */
6699 maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
6700
6701 if (loop_count > 1)
6702 {
6703 /*
6704 * For repeated bitmap scans, scale up the number of tuples fetched in
6705 * the Mackert and Lohman formula by the number of scans, so that we
6706 * estimate the number of pages fetched by all the scans. Then
6707 * pro-rate for one scan.
6708 */
6709 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6710 baserel->pages,
6711 get_indexpath_pages(bitmapqual),
6712 root);
6714 }
6715
6716 if (pages_fetched >= T)
6717 pages_fetched = T;
6718 else
6720
6721 if (maxentries < heap_pages)
6722 {
6723 double exact_pages;
6724 double lossy_pages;
6725
6726 /*
6727 * Crude approximation of the number of lossy pages. Because of the
6728 * way tbm_lossify() is coded, the number of lossy pages increases
6729 * very sharply as soon as we run short of memory; this formula has
6730 * that property and seems to perform adequately in testing, but it's
6731 * possible we could do better somehow.
6732 */
6733 lossy_pages = Max(0, heap_pages - maxentries / 2);
6734 exact_pages = heap_pages - lossy_pages;
6735
6736 /*
6737 * If there are lossy pages then recompute the number of tuples
6738 * processed by the bitmap heap node. We assume here that the chance
6739 * of a given tuple coming from an exact page is the same as the
6740 * chance that a given page is exact. This might not be true, but
6741 * it's not clear how we can do any better.
6742 */
6743 if (lossy_pages > 0)
6744 tuples_fetched =
6745 clamp_row_est(indexSelectivity *
6746 (exact_pages / heap_pages) * baserel->tuples +
6747 (lossy_pages / heap_pages) * baserel->tuples);
6748 }
6749
6750 if (cost_p)
6751 *cost_p = indexTotalCost;
6752 if (tuples_p)
6753 *tuples_p = tuples_fetched;
6754
6755 return pages_fetched;
6756}
6757
6758/*
6759 * compute_gather_rows
6760 * Estimate number of rows for gather (merge) nodes.
6761 *
6762 * In a parallel plan, each worker's row estimate is determined by dividing the
6763 * total number of rows by parallel_divisor, which accounts for the leader's
6764 * contribution in addition to the number of workers. Accordingly, when
6765 * estimating the number of rows for gather (merge) nodes, we multiply the rows
6766 * per worker by the same parallel_divisor to undo the division.
6767 */
6768double
6770{
6771 Assert(path->parallel_workers > 0);
6772
6773 return clamp_row_est(path->rows * get_parallel_divisor(path));
6774}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4794
void(* amcostestimate_function)(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition amapi.h:148
int16 AttrNumber
Definition attnum.h:21
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:765
@ BMS_SINGLETON
Definition bitmapset.h:72
uint32 BlockNumber
Definition block.h:31
#define Min(x, y)
Definition c.h:1093
#define MAXALIGN(LEN)
Definition c.h:898
#define PG_UINT32_MAX
Definition c.h:676
#define Max(x, y)
Definition c.h:1087
#define Assert(condition)
Definition c.h:945
int64_t int64
Definition c.h:615
int32_t int32
Definition c.h:614
uint64_t uint64
Definition c.h:619
#define OidIsValid(objectId)
Definition c.h:860
size_t Size
Definition c.h:691
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition clauses.c:300
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:100
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition clausesel.c:667
#define DEFAULT_PARALLEL_TUPLE_COST
Definition cost.h:29
#define DEFAULT_PARALLEL_SETUP_COST
Definition cost.h:30
#define DEFAULT_CPU_INDEX_TUPLE_COST
Definition cost.h:27
#define DEFAULT_CPU_TUPLE_COST
Definition cost.h:26
#define DEFAULT_RANDOM_PAGE_COST
Definition cost.h:25
#define DEFAULT_RECURSIVE_WORKTABLE_FACTOR
Definition cost.h:33
#define DEFAULT_EFFECTIVE_CACHE_SIZE
Definition cost.h:34
#define DEFAULT_SEQ_PAGE_COST
Definition cost.h:24
#define DEFAULT_CPU_OPERATOR_COST
Definition cost.h:28
double random_page_cost
Definition costsize.c:132
#define APPEND_CPU_COST_MULTIPLIER
Definition costsize.c:121
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6257
double cpu_operator_cost
Definition costsize.c:135
static double get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc, double input_tuples)
Definition costsize.c:2990
bool enable_partitionwise_aggregate
Definition costsize.c:161
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:4416
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition costsize.c:897
void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
Definition costsize.c:1115
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition costsize.c:5523
bool enable_seqscan
Definition costsize.c:146
static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, QualCost *qpqual_cost)
Definition costsize.c:5216
static double page_size(double tuples, int width)
Definition costsize.c:6608
int max_parallel_workers_per_gather
Definition costsize.c:144
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition costsize.c:5604
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:3955
static List * extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
Definition costsize.c:839
void cost_material(Path *path, bool enabled, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition costsize.c:2583
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6354
bool enable_memoize
Definition costsize.c:156
void compute_semi_anti_join_factors(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, SpecialJoinInfo *sjinfo, List *restrictlist, SemiAntiJoinFactors *semifactors)
Definition costsize.c:5258
static double get_indexpath_pages(Path *bitmapqual)
Definition costsize.c:962
double parallel_setup_cost
Definition costsize.c:137
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
Definition costsize.c:4940
#define LOG2(x)
Definition costsize.c:114
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:5493
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition costsize.c:3204
double recursive_worktable_factor
Definition costsize.c:138
bool enable_gathermerge
Definition costsize.c:159
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1563
void initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, uint64 enable_mask, Path *outer_path, Path *inner_path, JoinPathExtraData *extra)
Definition costsize.c:3373
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition costsize.c:1012
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition costsize.c:1361
static double relation_byte_size(double tuples, int width)
Definition costsize.c:6597
double parallel_tuple_cost
Definition costsize.c:136
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6127
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition costsize.c:2788
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2201
static double calc_joinrel_size_estimate(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, double outer_rows, double inner_rows, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition costsize.c:5645
static MergeScanSelCache * cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
Definition costsize.c:4218
static void cost_rescan(PlannerInfo *root, Path *path, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition costsize.c:4785
bool enable_indexonlyscan
Definition costsize.c:148
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition costsize.c:3455
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition costsize.c:470
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition costsize.c:1875
bool enable_tidscan
Definition costsize.c:150
static void cost_tuplesort(Cost *startup_cost, Cost *run_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:1951
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1629
double cpu_tuple_cost
Definition costsize.c:133
bool enable_material
Definition costsize.c:155
void initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, JoinPathExtraData *extra, bool parallel_hash)
Definition costsize.c:4297
bool enable_hashjoin
Definition costsize.c:158
void initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *mergeclauses, Path *outer_path, Path *inner_path, List *outersortkeys, List *innersortkeys, int outer_presorted_keys, JoinPathExtraData *extra)
Definition costsize.c:3658
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:349
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition costsize.c:430
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition costsize.c:6219
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition costsize.c:5572
bool enable_mergejoin
Definition costsize.c:157
void cost_append(AppendPath *apath, PlannerInfo *root)
Definition costsize.c:2311
double compute_gather_rows(Path *path)
Definition costsize.c:6769
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition costsize.c:4926
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1791
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:270
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition costsize.c:6511
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1690
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2053
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition costsize.c:4900
bool enable_presorted_aggregate
Definition costsize.c:165
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6290
static bool has_indexed_join_quals(NestPath *path)
Definition costsize.c:5355
bool enable_parallel_hash
Definition costsize.c:163
bool enable_partitionwise_join
Definition costsize.c:160
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition costsize.c:3301
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1833
static Cost append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
Definition costsize.c:2235
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition costsize.c:6658
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition costsize.c:1158
bool enable_async_append
Definition costsize.c:166
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6047
double seq_page_cost
Definition costsize.c:131
bool enable_parallel_append
Definition costsize.c:162
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6319
bool enable_nestloop
Definition costsize.c:154
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition costsize.c:1251
bool enable_bitmapscan
Definition costsize.c:149
static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
Definition costsize.c:5448
void cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
Definition costsize.c:4677
static void cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, Cost *rescan_startup_cost, Cost *rescan_total_cost)
Definition costsize.c:2641
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition costsize.c:2525
bool enable_hashagg
Definition costsize.c:153
double clamp_row_est(double nrows)
Definition costsize.c:214
static double get_parallel_divisor(Path *path)
Definition costsize.c:6618
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition costsize.c:1478
Cost disable_cost
Definition costsize.c:142
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition costsize.c:1745
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition costsize.c:1203
bool enable_partition_pruning
Definition costsize.c:164
bool enable_sort
Definition costsize.c:151
int32 clamp_width_est(int64 tuple_width)
Definition costsize.c:243
int effective_cache_size
Definition costsize.c:140
double cpu_index_tuple_cost
Definition costsize.c:134
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6165
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition costsize.c:545
bool enable_indexscan
Definition costsize.c:147
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6187
static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids outer_relids, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist)
Definition costsize.c:5795
bool enable_incremental_sort
Definition costsize.c:152
static int32 get_expr_width(PlannerInfo *root, const Node *expr)
Definition costsize.c:6549
#define MAXIMUM_ROWCOUNT
Definition costsize.c:129
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
bool is_redundant_with_indexclauses(RestrictInfo *rinfo, List *indexclauses)
RestrictInfo * find_derived_clause_for_ec_member(PlannerInfo *root, EquivalenceClass *ec, EquivalenceMember *em)
bool ExecSupportsMarkRestore(Path *pathnode)
Definition execAmi.c:419
bool ExecMaterializesOutput(NodeTag plantype)
Definition execAmi.c:636
#define palloc_object(type)
Definition fe_memutils.h:74
#define MaxAllocSize
Definition fe_memutils.h:22
#define palloc_array(type, count)
Definition fe_memutils.h:76
int work_mem
Definition globals.c:131
#define SizeofHeapTupleHeader
int b
Definition isn.c:74
int i
Definition isn.c:77
void init_dummy_sjinfo(SpecialJoinInfo *sjinfo, Relids left_relids, Relids right_relids)
Definition joinrels.c:664
List * lappend(List *list, void *datum)
Definition list.c:339
List * list_concat(List *list1, const List *list2)
Definition list.c:561
List * list_concat_copy(const List *list1, const List *list2)
Definition list.c:598
List * list_copy(const List *oldlist)
Definition list.c:1573
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
void list_free(List *list)
Definition list.c:1546
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition lsyscache.c:3129
int32 get_attavgwidth(Oid relid, AttrNumber attnum)
Definition lsyscache.c:3380
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1505
void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
Definition lsyscache.c:3096
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2800
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
List * make_ands_implicit(Expr *clause)
Definition makefuncs.c:810
static const uint32 T[65]
Definition md5.c:119
Size hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
Definition nodeAgg.c:1700
void hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits, Size *mem_limit, uint64 *ngroups_limit, int *num_partitions)
Definition nodeAgg.c:1808
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition nodeFuncs.c:304
void set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
Definition nodeFuncs.c:1890
void set_opfuncid(OpExpr *opexpr)
Definition nodeFuncs.c:1879
static Node * get_rightop(const void *clause)
Definition nodeFuncs.h:95
#define expression_tree_walker(n, w, c)
Definition nodeFuncs.h:153
static Node * get_leftop(const void *clause)
Definition nodeFuncs.h:83
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, bool try_combined_hash_mem, int parallel_workers, size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition nodeHash.c:683
size_t get_hash_memory_limit(void)
Definition nodeHash.c:3680
double ExecEstimateCacheEntryOverheadBytes(double ntuples)
#define IsA(nodeptr, _type_)
Definition nodes.h:164
double Cost
Definition nodes.h:261
#define nodeTag(nodeptr)
Definition nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition nodes.h:348
double Cardinality
Definition nodes.h:262
AggStrategy
Definition nodes.h:363
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGG_MIXED
Definition nodes.h:367
@ AGG_PLAIN
Definition nodes.h:364
double Selectivity
Definition nodes.h:260
JoinType
Definition nodes.h:298
@ JOIN_SEMI
Definition nodes.h:317
@ JOIN_FULL
Definition nodes.h:305
@ JOIN_INNER
Definition nodes.h:303
@ JOIN_RIGHT
Definition nodes.h:306
@ JOIN_LEFT
Definition nodes.h:304
@ JOIN_RIGHT_ANTI
Definition nodes.h:320
@ JOIN_ANTI
Definition nodes.h:318
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define FRAMEOPTION_END_CURRENT_ROW
Definition parsenodes.h:619
#define FRAMEOPTION_END_OFFSET_PRECEDING
Definition parsenodes.h:621
@ RTE_CTE
@ RTE_NAMEDTUPLESTORE
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_RELATION
#define FRAMEOPTION_END_OFFSET_FOLLOWING
Definition parsenodes.h:623
#define FRAMEOPTION_RANGE
Definition parsenodes.h:610
#define FRAMEOPTION_GROUPS
Definition parsenodes.h:612
#define FRAMEOPTION_END_UNBOUNDED_FOLLOWING
Definition parsenodes.h:617
#define FRAMEOPTION_ROWS
Definition parsenodes.h:611
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
#define PGS_TIDSCAN
Definition pathnodes.h:70
#define PGS_APPEND
Definition pathnodes.h:78
#define PGS_MERGE_APPEND
Definition pathnodes.h:79
#define RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)
Definition pathnodes.h:3057
#define PGS_SEQSCAN
Definition pathnodes.h:66
#define PGS_MERGEJOIN_PLAIN
Definition pathnodes.h:72
#define PGS_MERGEJOIN_MATERIALIZE
Definition pathnodes.h:73
#define PGS_HASHJOIN
Definition pathnodes.h:77
#define PGS_CONSIDER_NONPARTIAL
Definition pathnodes.h:84
#define PGS_BITMAPSCAN
Definition pathnodes.h:69
#define planner_rt_fetch(rti, root)
Definition pathnodes.h:704
#define PGS_GATHER
Definition pathnodes.h:80
#define RELATION_WAS_MADE_UNIQUE(rel, sjinfo, nominal_jointype)
Definition pathnodes.h:1250
#define PGS_GATHER_MERGE
Definition pathnodes.h:81
@ UPPERREL_FINAL
Definition pathnodes.h:152
#define PGS_INDEXONLYSCAN
Definition pathnodes.h:68
#define PGS_INDEXSCAN
Definition pathnodes.h:67
#define lfirst(lc)
Definition pg_list.h:172
#define lfirst_node(type, lc)
Definition pg_list.h:176
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
#define foreach_current_index(var_or_cell)
Definition pg_list.h:435
#define foreach_delete_current(lst, var_or_cell)
Definition pg_list.h:423
#define for_each_cell(cell, lst, initcell)
Definition pg_list.h:470
#define linitial(l)
Definition pg_list.h:178
#define lsecond(l)
Definition pg_list.h:183
static ListCell * list_head(const List *l)
Definition pg_list.h:128
#define lfirst_oid(lc)
Definition pg_list.h:174
#define plan(x)
Definition pg_regress.c:161
PlaceHolderInfo * find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv)
Definition placeholder.c:83
void add_function_cost(PlannerInfo *root, Oid funcid, Node *node, QualCost *cost)
Definition plancat.c:2355
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition plancat.c:1472
bool parallel_leader_participation
Definition planner.c:70
static int64 DatumGetInt64(Datum X)
Definition postgres.h:403
static int16 DatumGetInt16(Datum X)
Definition postgres.h:162
static int32 DatumGetInt32(Datum X)
Definition postgres.h:202
#define InvalidOid
unsigned int Oid
static int fb(int x)
@ ANY_SUBLINK
Definition primnodes.h:1032
@ ALL_SUBLINK
Definition primnodes.h:1031
@ EXISTS_SUBLINK
Definition primnodes.h:1030
#define IS_SPECIAL_VARNO(varno)
Definition primnodes.h:248
tree ctl root
Definition radixtree.h:1857
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:544
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1617
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
void mergejoinscansel(PlannerInfo *root, Node *clause, Oid opfamily, CompareType cmptype, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
Definition selfuncs.c:3302
double estimate_array_length(PlannerInfo *root, Node *arrayexpr)
Definition selfuncs.c:2240
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3788
List * estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner, List *hashclauses, Selectivity *innerbucketsize)
Definition selfuncs.c:4140
void estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets, Selectivity *mcv_freq, Selectivity *bucketsize_frac)
Definition selfuncs.c:4408
#define CLAMP_PROBABILITY(p)
Definition selfuncs.h:63
#define DEFAULT_INEQ_SEL
Definition selfuncs.h:37
#define DEFAULT_NUM_DISTINCT
Definition selfuncs.h:52
#define SELFLAG_USED_DEFAULT
Definition selfuncs.h:76
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition spccache.c:183
Selectivity bitmapselectivity
Definition pathnodes.h:2144
List * bitmapquals
Definition pathnodes.h:2143
Selectivity bitmapselectivity
Definition pathnodes.h:2157
List * bitmapquals
Definition pathnodes.h:2156
Oid consttype
Definition primnodes.h:330
Path * subpath
Definition pathnodes.h:2372
List * path_hashclauses
Definition pathnodes.h:2490
Cardinality inner_rows_total
Definition pathnodes.h:2492
int num_batches
Definition pathnodes.h:2491
JoinPath jpath
Definition pathnodes.h:2489
List * indrestrictinfo
Definition pathnodes.h:1417
List * indexclauses
Definition pathnodes.h:2057
Selectivity indexselectivity
Definition pathnodes.h:2062
Cost indextotalcost
Definition pathnodes.h:2061
IndexOptInfo * indexinfo
Definition pathnodes.h:2056
Cardinality inner_rows
Definition pathnodes.h:3725
Cardinality outer_rows
Definition pathnodes.h:3724
Cardinality inner_skip_rows
Definition pathnodes.h:3727
Cardinality inner_rows_total
Definition pathnodes.h:3732
Cardinality outer_skip_rows
Definition pathnodes.h:3726
SemiAntiJoinFactors semifactors
Definition pathnodes.h:3609
SpecialJoinInfo * sjinfo
Definition pathnodes.h:3608
Path * outerjoinpath
Definition pathnodes.h:2404
Path * innerjoinpath
Definition pathnodes.h:2405
JoinType jointype
Definition pathnodes.h:2399
List * joinrestrictinfo
Definition pathnodes.h:2407
Definition pg_list.h:54
bool skip_mark_restore
Definition pathnodes.h:2474
List * innersortkeys
Definition pathnodes.h:2471
JoinPath jpath
Definition pathnodes.h:2468
bool materialize_inner
Definition pathnodes.h:2475
List * path_mergeclauses
Definition pathnodes.h:2469
Selectivity leftstartsel
Definition pathnodes.h:3076
Selectivity leftendsel
Definition pathnodes.h:3077
CompareType cmptype
Definition pathnodes.h:3073
Selectivity rightendsel
Definition pathnodes.h:3079
Selectivity rightstartsel
Definition pathnodes.h:3078
JoinPath jpath
Definition pathnodes.h:2422
Definition nodes.h:135
List * exprs
Definition pathnodes.h:1878
QualCost cost
Definition pathnodes.h:1884
NodeTag pathtype
Definition pathnodes.h:1971
Cardinality rows
Definition pathnodes.h:2005
Cost startup_cost
Definition pathnodes.h:2007
int parallel_workers
Definition pathnodes.h:2002
int disabled_nodes
Definition pathnodes.h:2006
Cost total_cost
Definition pathnodes.h:2008
bool parallel_aware
Definition pathnodes.h:1998
Query * parse
Definition pathnodes.h:309
Cost per_tuple
Definition pathnodes.h:121
Cost startup
Definition pathnodes.h:120
Node * setOperations
Definition parsenodes.h:236
List * targetList
Definition parsenodes.h:198
List * baserestrictinfo
Definition pathnodes.h:1142
Relids relids
Definition pathnodes.h:1021
struct PathTarget * reltarget
Definition pathnodes.h:1045
Index relid
Definition pathnodes.h:1069
uint64 pgs_mask
Definition pathnodes.h:1039
Cardinality tuples
Definition pathnodes.h:1096
QualCost baserestrictcost
Definition pathnodes.h:1144
PlannerInfo * subroot
Definition pathnodes.h:1100
AttrNumber max_attr
Definition pathnodes.h:1077
Cardinality rows
Definition pathnodes.h:1027
AttrNumber min_attr
Definition pathnodes.h:1075
Expr * clause
Definition pathnodes.h:2900
Selectivity outer_match_frac
Definition pathnodes.h:3585
Selectivity match_count
Definition pathnodes.h:3586
JoinType jointype
Definition pathnodes.h:3229
int disabled_nodes
Definition primnodes.h:1127
bool useHashTable
Definition primnodes.h:1113
Node * testexpr
Definition primnodes.h:1100
List * parParam
Definition primnodes.h:1124
Cost startup_cost
Definition primnodes.h:1128
Cost per_call_cost
Definition primnodes.h:1129
SubLinkType subLinkType
Definition primnodes.h:1098
Expr * expr
Definition primnodes.h:2266
AttrNumber resno
Definition primnodes.h:2268
AttrNumber varattno
Definition primnodes.h:275
int varno
Definition primnodes.h:270
Index varlevelsup
Definition primnodes.h:295
List * partitionClause
Node * endOffset
List * orderClause
List * args
Definition primnodes.h:606
Expr * aggfilter
Definition primnodes.h:608
PlannerInfo * root
Definition costsize.c:170
Definition type.h:96
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition tablesample.c:27
int tbm_calculate_entries(Size maxbytes)
Definition tidbitmap.c:1542
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition tlist.c:401
int tuplesort_merge_order(int64 allowedMem)
Definition tuplesort.c:1674
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition var.c:114