PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
allpaths.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * allpaths.c
4 * Routines to find possible search paths for processing a query
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/path/allpaths.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/sysattr.h"
22#include "access/tsmapi.h"
23#include "catalog/pg_class.h"
24#include "catalog/pg_operator.h"
25#include "catalog/pg_proc.h"
26#include "foreign/fdwapi.h"
27#include "miscadmin.h"
28#include "nodes/makefuncs.h"
29#include "nodes/nodeFuncs.h"
30#include "nodes/supportnodes.h"
31#ifdef OPTIMIZER_DEBUG
32#include "nodes/print.h"
33#endif
35#include "optimizer/clauses.h"
36#include "optimizer/cost.h"
37#include "optimizer/geqo.h"
38#include "optimizer/optimizer.h"
39#include "optimizer/pathnode.h"
40#include "optimizer/paths.h"
41#include "optimizer/plancat.h"
42#include "optimizer/planner.h"
43#include "optimizer/tlist.h"
44#include "parser/parse_clause.h"
45#include "parser/parsetree.h"
47#include "port/pg_bitutils.h"
49#include "utils/lsyscache.h"
50
51
52/* Bitmask flags for pushdown_safety_info.unsafeFlags */
53#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
54#define UNSAFE_HAS_SET_FUNC (1 << 1)
55#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
56#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
57#define UNSAFE_TYPE_MISMATCH (1 << 4)
58
59/* results of subquery_is_pushdown_safe */
61{
62 unsigned char *unsafeFlags; /* bitmask of reasons why this target list
63 * column is unsafe for qual pushdown, or 0 if
64 * no reason. */
65 bool unsafeVolatile; /* don't push down volatile quals */
66 bool unsafeLeaky; /* don't push down leaky quals */
68
69/* Return type for qual_is_pushdown_safe */
71{
72 PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
73 PUSHDOWN_SAFE, /* safe to push qual into subquery */
74 PUSHDOWN_WINDOWCLAUSE_RUNCOND, /* unsafe, but may work as WindowClause
75 * run condition */
77
78/* These parameters are set by GUC */
79bool enable_geqo = false; /* just in case GUC doesn't set it */
83
84/* Hook for plugins to get control in set_rel_pathlist() */
86
87/* Hook for plugins to replace standard_join_search() */
89
90
94static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
95 Index rti, RangeTblEntry *rte);
97 Index rti, RangeTblEntry *rte);
99 RangeTblEntry *rte);
102 RangeTblEntry *rte);
104 RangeTblEntry *rte);
106 RangeTblEntry *rte);
108 RangeTblEntry *rte);
110 RangeTblEntry *rte);
112 RangeTblEntry *rte);
114 Index rti, RangeTblEntry *rte);
116 Index rti, RangeTblEntry *rte);
118 List *live_childrels,
119 List *all_child_pathkeys);
121 RelOptInfo *rel,
122 Relids required_outer);
123static void accumulate_append_subpath(Path *path,
124 List **subpaths,
125 List **special_subpaths);
127static void set_dummy_rel_pathlist(RelOptInfo *rel);
129 Index rti, RangeTblEntry *rte);
131 RangeTblEntry *rte);
133 RangeTblEntry *rte);
135 RangeTblEntry *rte);
137 RangeTblEntry *rte);
139 RangeTblEntry *rte);
141 RangeTblEntry *rte);
143 RangeTblEntry *rte);
145static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
146 pushdown_safety_info *safetyInfo);
147static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
148 pushdown_safety_info *safetyInfo);
149static void check_output_expressions(Query *subquery,
150 pushdown_safety_info *safetyInfo);
151static void compare_tlist_datatypes(List *tlist, List *colTypes,
152 pushdown_safety_info *safetyInfo);
153static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
155 RestrictInfo *rinfo,
156 pushdown_safety_info *safetyInfo);
157static void subquery_push_qual(Query *subquery,
158 RangeTblEntry *rte, Index rti, Node *qual);
159static void recurse_push_qual(Node *setOp, Query *topquery,
160 RangeTblEntry *rte, Index rti, Node *qual);
161static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
162 Bitmapset *extra_used_attrs);
163
164
165/*
166 * make_one_rel
167 * Finds all possible access paths for executing a query, returning a
168 * single rel that represents the join of all base rels in the query.
169 */
172{
173 RelOptInfo *rel;
174 Index rti;
175 double total_pages;
176
177 /* Mark base rels as to whether we care about fast-start plans */
179
180 /*
181 * Compute size estimates and consider_parallel flags for each base rel.
182 */
184
185 /*
186 * We should now have size estimates for every actual table involved in
187 * the query, and we also know which if any have been deleted from the
188 * query by join removal, pruned by partition pruning, or eliminated by
189 * constraint exclusion. So we can now compute total_table_pages.
190 *
191 * Note that appendrels are not double-counted here, even though we don't
192 * bother to distinguish RelOptInfos for appendrel parents, because the
193 * parents will have pages = 0.
194 *
195 * XXX if a table is self-joined, we will count it once per appearance,
196 * which perhaps is the wrong thing ... but that's not completely clear,
197 * and detecting self-joins here is difficult, so ignore it for now.
198 */
199 total_pages = 0;
200 for (rti = 1; rti < root->simple_rel_array_size; rti++)
201 {
202 RelOptInfo *brel = root->simple_rel_array[rti];
203
204 /* there may be empty slots corresponding to non-baserel RTEs */
205 if (brel == NULL)
206 continue;
207
208 Assert(brel->relid == rti); /* sanity check on array */
209
210 if (IS_DUMMY_REL(brel))
211 continue;
212
213 if (IS_SIMPLE_REL(brel))
214 total_pages += (double) brel->pages;
215 }
216 root->total_table_pages = total_pages;
217
218 /*
219 * Generate access paths for each base rel.
220 */
222
223 /*
224 * Generate access paths for the entire join tree.
225 */
226 rel = make_rel_from_joinlist(root, joinlist);
227
228 /*
229 * The result should join all and only the query's base + outer-join rels.
230 */
231 Assert(bms_equal(rel->relids, root->all_query_rels));
232
233 return rel;
234}
235
236/*
237 * set_base_rel_consider_startup
238 * Set the consider_[param_]startup flags for each base-relation entry.
239 *
240 * For the moment, we only deal with consider_param_startup here; because the
241 * logic for consider_startup is pretty trivial and is the same for every base
242 * relation, we just let build_simple_rel() initialize that flag correctly to
243 * start with. If that logic ever gets more complicated it would probably
244 * be better to move it here.
245 */
246static void
248{
249 /*
250 * Since parameterized paths can only be used on the inside of a nestloop
251 * join plan, there is usually little value in considering fast-start
252 * plans for them. However, for relations that are on the RHS of a SEMI
253 * or ANTI join, a fast-start plan can be useful because we're only going
254 * to care about fetching one tuple anyway.
255 *
256 * To minimize growth of planning time, we currently restrict this to
257 * cases where the RHS is a single base relation, not a join; there is no
258 * provision for consider_param_startup to get set at all on joinrels.
259 * Also we don't worry about appendrels. costsize.c's costing rules for
260 * nestloop semi/antijoins don't consider such cases either.
261 */
262 ListCell *lc;
263
264 foreach(lc, root->join_info_list)
265 {
266 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
267 int varno;
268
269 if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
271 {
272 RelOptInfo *rel = find_base_rel(root, varno);
273
274 rel->consider_param_startup = true;
275 }
276 }
277}
278
279/*
280 * set_base_rel_sizes
281 * Set the size estimates (rows and widths) for each base-relation entry.
282 * Also determine whether to consider parallel paths for base relations.
283 *
284 * We do this in a separate pass over the base rels so that rowcount
285 * estimates are available for parameterized path generation, and also so
286 * that each rel's consider_parallel flag is set correctly before we begin to
287 * generate paths.
288 */
289static void
291{
292 Index rti;
293
294 for (rti = 1; rti < root->simple_rel_array_size; rti++)
295 {
296 RelOptInfo *rel = root->simple_rel_array[rti];
297 RangeTblEntry *rte;
298
299 /* there may be empty slots corresponding to non-baserel RTEs */
300 if (rel == NULL)
301 continue;
302
303 Assert(rel->relid == rti); /* sanity check on array */
304
305 /* ignore RTEs that are "other rels" */
306 if (rel->reloptkind != RELOPT_BASEREL)
307 continue;
308
309 rte = root->simple_rte_array[rti];
310
311 /*
312 * If parallelism is allowable for this query in general, see whether
313 * it's allowable for this rel in particular. We have to do this
314 * before set_rel_size(), because (a) if this rel is an inheritance
315 * parent, set_append_rel_size() will use and perhaps change the rel's
316 * consider_parallel flag, and (b) for some RTE types, set_rel_size()
317 * goes ahead and makes paths immediately.
318 */
319 if (root->glob->parallelModeOK)
321
322 set_rel_size(root, rel, rti, rte);
323 }
324}
325
326/*
327 * set_base_rel_pathlists
328 * Finds all paths available for scanning each base-relation entry.
329 * Sequential scan and any available indices are considered.
330 * Each useful path is attached to its relation's 'pathlist' field.
331 */
332static void
334{
335 Index rti;
336
337 for (rti = 1; rti < root->simple_rel_array_size; rti++)
338 {
339 RelOptInfo *rel = root->simple_rel_array[rti];
340
341 /* there may be empty slots corresponding to non-baserel RTEs */
342 if (rel == NULL)
343 continue;
344
345 Assert(rel->relid == rti); /* sanity check on array */
346
347 /* ignore RTEs that are "other rels" */
348 if (rel->reloptkind != RELOPT_BASEREL)
349 continue;
350
351 set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
352 }
353}
354
355/*
356 * set_rel_size
357 * Set size estimates for a base relation
358 */
359static void
361 Index rti, RangeTblEntry *rte)
362{
363 if (rel->reloptkind == RELOPT_BASEREL &&
365 {
366 /*
367 * We proved we don't need to scan the rel via constraint exclusion,
368 * so set up a single dummy path for it. Here we only check this for
369 * regular baserels; if it's an otherrel, CE was already checked in
370 * set_append_rel_size().
371 *
372 * In this case, we go ahead and set up the relation's path right away
373 * instead of leaving it for set_rel_pathlist to do. This is because
374 * we don't have a convention for marking a rel as dummy except by
375 * assigning a dummy path to it.
376 */
378 }
379 else if (rte->inh)
380 {
381 /* It's an "append relation", process accordingly */
382 set_append_rel_size(root, rel, rti, rte);
383 }
384 else
385 {
386 switch (rel->rtekind)
387 {
388 case RTE_RELATION:
389 if (rte->relkind == RELKIND_FOREIGN_TABLE)
390 {
391 /* Foreign table */
392 set_foreign_size(root, rel, rte);
393 }
394 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
395 {
396 /*
397 * We could get here if asked to scan a partitioned table
398 * with ONLY. In that case we shouldn't scan any of the
399 * partitions, so mark it as a dummy rel.
400 */
402 }
403 else if (rte->tablesample != NULL)
404 {
405 /* Sampled relation */
407 }
408 else
409 {
410 /* Plain relation */
411 set_plain_rel_size(root, rel, rte);
412 }
413 break;
414 case RTE_SUBQUERY:
415
416 /*
417 * Subqueries don't support making a choice between
418 * parameterized and unparameterized paths, so just go ahead
419 * and build their paths immediately.
420 */
421 set_subquery_pathlist(root, rel, rti, rte);
422 break;
423 case RTE_FUNCTION:
425 break;
426 case RTE_TABLEFUNC:
428 break;
429 case RTE_VALUES:
431 break;
432 case RTE_CTE:
433
434 /*
435 * CTEs don't support making a choice between parameterized
436 * and unparameterized paths, so just go ahead and build their
437 * paths immediately.
438 */
439 if (rte->self_reference)
440 set_worktable_pathlist(root, rel, rte);
441 else
442 set_cte_pathlist(root, rel, rte);
443 break;
445 /* Might as well just build the path immediately */
447 break;
448 case RTE_RESULT:
449 /* Might as well just build the path immediately */
450 set_result_pathlist(root, rel, rte);
451 break;
452 default:
453 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
454 break;
455 }
456 }
457
458 /*
459 * We insist that all non-dummy rels have a nonzero rowcount estimate.
460 */
461 Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
462}
463
464/*
465 * set_rel_pathlist
466 * Build access paths for a base relation
467 */
468static void
470 Index rti, RangeTblEntry *rte)
471{
472 if (IS_DUMMY_REL(rel))
473 {
474 /* We already proved the relation empty, so nothing more to do */
475 }
476 else if (rte->inh)
477 {
478 /* It's an "append relation", process accordingly */
479 set_append_rel_pathlist(root, rel, rti, rte);
480 }
481 else
482 {
483 switch (rel->rtekind)
484 {
485 case RTE_RELATION:
486 if (rte->relkind == RELKIND_FOREIGN_TABLE)
487 {
488 /* Foreign table */
489 set_foreign_pathlist(root, rel, rte);
490 }
491 else if (rte->tablesample != NULL)
492 {
493 /* Sampled relation */
495 }
496 else
497 {
498 /* Plain relation */
499 set_plain_rel_pathlist(root, rel, rte);
500 }
501 break;
502 case RTE_SUBQUERY:
503 /* Subquery --- fully handled during set_rel_size */
504 break;
505 case RTE_FUNCTION:
506 /* RangeFunction */
507 set_function_pathlist(root, rel, rte);
508 break;
509 case RTE_TABLEFUNC:
510 /* Table Function */
511 set_tablefunc_pathlist(root, rel, rte);
512 break;
513 case RTE_VALUES:
514 /* Values list */
515 set_values_pathlist(root, rel, rte);
516 break;
517 case RTE_CTE:
518 /* CTE reference --- fully handled during set_rel_size */
519 break;
521 /* tuplestore reference --- fully handled during set_rel_size */
522 break;
523 case RTE_RESULT:
524 /* simple Result --- fully handled during set_rel_size */
525 break;
526 default:
527 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
528 break;
529 }
530 }
531
532 /*
533 * Allow a plugin to editorialize on the set of Paths for this base
534 * relation. It could add new paths (such as CustomPaths) by calling
535 * add_path(), or add_partial_path() if parallel aware. It could also
536 * delete or modify paths added by the core code.
537 */
539 (*set_rel_pathlist_hook) (root, rel, rti, rte);
540
541 /*
542 * If this is a baserel, we should normally consider gathering any partial
543 * paths we may have created for it. We have to do this after calling the
544 * set_rel_pathlist_hook, else it cannot add partial paths to be included
545 * here.
546 *
547 * However, if this is an inheritance child, skip it. Otherwise, we could
548 * end up with a very large number of gather nodes, each trying to grab
549 * its own pool of workers. Instead, we'll consider gathering partial
550 * paths for the parent appendrel.
551 *
552 * Also, if this is the topmost scan/join rel, we postpone gathering until
553 * the final scan/join targetlist is available (see grouping_planner).
554 */
555 if (rel->reloptkind == RELOPT_BASEREL &&
556 !bms_equal(rel->relids, root->all_query_rels))
558
559 /* Now find the cheapest of the paths for this rel */
560 set_cheapest(rel);
561
562#ifdef OPTIMIZER_DEBUG
563 pprint(rel);
564#endif
565}
566
567/*
568 * set_plain_rel_size
569 * Set size estimates for a plain relation (no subquery, no inheritance)
570 */
571static void
573{
574 /*
575 * Test any partial indexes of rel for applicability. We must do this
576 * first since partial unique indexes can affect size estimates.
577 */
579
580 /* Mark rel with estimated output rows, width, etc */
582}
583
584/*
585 * If this relation could possibly be scanned from within a worker, then set
586 * its consider_parallel flag.
587 */
588static void
590 RangeTblEntry *rte)
591{
592 /*
593 * The flag has previously been initialized to false, so we can just
594 * return if it becomes clear that we can't safely set it.
595 */
597
598 /* Don't call this if parallelism is disallowed for the entire query. */
599 Assert(root->glob->parallelModeOK);
600
601 /* This should only be called for baserels and appendrel children. */
602 Assert(IS_SIMPLE_REL(rel));
603
604 /* Assorted checks based on rtekind. */
605 switch (rte->rtekind)
606 {
607 case RTE_RELATION:
608
609 /*
610 * Currently, parallel workers can't access the leader's temporary
611 * tables. We could possibly relax this if we wrote all of its
612 * local buffers at the start of the query and made no changes
613 * thereafter (maybe we could allow hint bit changes), and if we
614 * taught the workers to read them. Writing a large number of
615 * temporary buffers could be expensive, though, and we don't have
616 * the rest of the necessary infrastructure right now anyway. So
617 * for now, bail out if we see a temporary table.
618 */
619 if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
620 return;
621
622 /*
623 * Table sampling can be pushed down to workers if the sample
624 * function and its arguments are safe.
625 */
626 if (rte->tablesample != NULL)
627 {
628 char proparallel = func_parallel(rte->tablesample->tsmhandler);
629
630 if (proparallel != PROPARALLEL_SAFE)
631 return;
632 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
633 return;
634 }
635
636 /*
637 * Ask FDWs whether they can support performing a ForeignScan
638 * within a worker. Most often, the answer will be no. For
639 * example, if the nature of the FDW is such that it opens a TCP
640 * connection with a remote server, each parallel worker would end
641 * up with a separate connection, and these connections might not
642 * be appropriately coordinated between workers and the leader.
643 */
644 if (rte->relkind == RELKIND_FOREIGN_TABLE)
645 {
646 Assert(rel->fdwroutine);
647 if (!rel->fdwroutine->IsForeignScanParallelSafe)
648 return;
649 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
650 return;
651 }
652
653 /*
654 * There are additional considerations for appendrels, which we'll
655 * deal with in set_append_rel_size and set_append_rel_pathlist.
656 * For now, just set consider_parallel based on the rel's own
657 * quals and targetlist.
658 */
659 break;
660
661 case RTE_SUBQUERY:
662
663 /*
664 * There's no intrinsic problem with scanning a subquery-in-FROM
665 * (as distinct from a SubPlan or InitPlan) in a parallel worker.
666 * If the subquery doesn't happen to have any parallel-safe paths,
667 * then flagging it as consider_parallel won't change anything,
668 * but that's true for plain tables, too. We must set
669 * consider_parallel based on the rel's own quals and targetlist,
670 * so that if a subquery path is parallel-safe but the quals and
671 * projection we're sticking onto it are not, we correctly mark
672 * the SubqueryScanPath as not parallel-safe. (Note that
673 * set_subquery_pathlist() might push some of these quals down
674 * into the subquery itself, but that doesn't change anything.)
675 *
676 * We can't push sub-select containing LIMIT/OFFSET to workers as
677 * there is no guarantee that the row order will be fully
678 * deterministic, and applying LIMIT/OFFSET will lead to
679 * inconsistent results at the top-level. (In some cases, where
680 * the result is ordered, we could relax this restriction. But it
681 * doesn't currently seem worth expending extra effort to do so.)
682 */
683 {
684 Query *subquery = castNode(Query, rte->subquery);
685
686 if (limit_needed(subquery))
687 return;
688 }
689 break;
690
691 case RTE_JOIN:
692 /* Shouldn't happen; we're only considering baserels here. */
693 Assert(false);
694 return;
695
696 case RTE_FUNCTION:
697 /* Check for parallel-restricted functions. */
698 if (!is_parallel_safe(root, (Node *) rte->functions))
699 return;
700 break;
701
702 case RTE_TABLEFUNC:
703 /* not parallel safe */
704 return;
705
706 case RTE_VALUES:
707 /* Check for parallel-restricted functions. */
708 if (!is_parallel_safe(root, (Node *) rte->values_lists))
709 return;
710 break;
711
712 case RTE_CTE:
713
714 /*
715 * CTE tuplestores aren't shared among parallel workers, so we
716 * force all CTE scans to happen in the leader. Also, populating
717 * the CTE would require executing a subplan that's not available
718 * in the worker, might be parallel-restricted, and must get
719 * executed only once.
720 */
721 return;
722
724
725 /*
726 * tuplestore cannot be shared, at least without more
727 * infrastructure to support that.
728 */
729 return;
730
731 case RTE_RESULT:
732 /* RESULT RTEs, in themselves, are no problem. */
733 break;
734 case RTE_GROUP:
735 /* Shouldn't happen; we're only considering baserels here. */
736 Assert(false);
737 return;
738 }
739
740 /*
741 * If there's anything in baserestrictinfo that's parallel-restricted, we
742 * give up on parallelizing access to this relation. We could consider
743 * instead postponing application of the restricted quals until we're
744 * above all the parallelism in the plan tree, but it's not clear that
745 * that would be a win in very many cases, and it might be tricky to make
746 * outer join clauses work correctly. It would likely break equivalence
747 * classes, too.
748 */
750 return;
751
752 /*
753 * Likewise, if the relation's outputs are not parallel-safe, give up.
754 * (Usually, they're just Vars, but sometimes they're not.)
755 */
756 if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
757 return;
758
759 /* We have a winner. */
760 rel->consider_parallel = true;
761}
762
763/*
764 * set_plain_rel_pathlist
765 * Build access paths for a plain relation (no subquery, no inheritance)
766 */
767static void
769{
770 Relids required_outer;
771
772 /*
773 * We don't support pushing join clauses into the quals of a seqscan, but
774 * it could still have required parameterization due to LATERAL refs in
775 * its tlist.
776 */
777 required_outer = rel->lateral_relids;
778
779 /*
780 * Consider TID scans.
781 *
782 * If create_tidscan_paths returns true, then a TID scan path is forced.
783 * This happens when rel->baserestrictinfo contains CurrentOfExpr, because
784 * the executor can't handle any other type of path for such queries.
785 * Hence, we return without adding any other paths.
786 */
787 if (create_tidscan_paths(root, rel))
788 return;
789
790 /* Consider sequential scan */
791 add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
792
793 /* If appropriate, consider parallel sequential scan */
794 if (rel->consider_parallel && required_outer == NULL)
796
797 /* Consider index scans */
799}
800
801/*
802 * create_plain_partial_paths
803 * Build partial access paths for parallel scan of a plain relation
804 */
805static void
807{
808 int parallel_workers;
809
810 parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
812
813 /* If any limit was set to zero, the user doesn't want a parallel scan. */
814 if (parallel_workers <= 0)
815 return;
816
817 /* Add an unordered partial path based on a parallel sequential scan. */
818 add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
819}
820
821/*
822 * set_tablesample_rel_size
823 * Set size estimates for a sampled relation
824 */
825static void
827{
828 TableSampleClause *tsc = rte->tablesample;
829 TsmRoutine *tsm;
830 BlockNumber pages;
831 double tuples;
832
833 /*
834 * Test any partial indexes of rel for applicability. We must do this
835 * first since partial unique indexes can affect size estimates.
836 */
838
839 /*
840 * Call the sampling method's estimation function to estimate the number
841 * of pages it will read and the number of tuples it will return. (Note:
842 * we assume the function returns sane values.)
843 */
844 tsm = GetTsmRoutine(tsc->tsmhandler);
845 tsm->SampleScanGetSampleSize(root, rel, tsc->args,
846 &pages, &tuples);
847
848 /*
849 * For the moment, because we will only consider a SampleScan path for the
850 * rel, it's okay to just overwrite the pages and tuples estimates for the
851 * whole relation. If we ever consider multiple path types for sampled
852 * rels, we'll need more complication.
853 */
854 rel->pages = pages;
855 rel->tuples = tuples;
856
857 /* Mark rel with estimated output rows, width, etc */
859}
860
861/*
862 * set_tablesample_rel_pathlist
863 * Build access paths for a sampled relation
864 */
865static void
867{
868 Relids required_outer;
869 Path *path;
870
871 /*
872 * We don't support pushing join clauses into the quals of a samplescan,
873 * but it could still have required parameterization due to LATERAL refs
874 * in its tlist or TABLESAMPLE arguments.
875 */
876 required_outer = rel->lateral_relids;
877
878 /* Consider sampled scan */
879 path = create_samplescan_path(root, rel, required_outer);
880
881 /*
882 * If the sampling method does not support repeatable scans, we must avoid
883 * plans that would scan the rel multiple times. Ideally, we'd simply
884 * avoid putting the rel on the inside of a nestloop join; but adding such
885 * a consideration to the planner seems like a great deal of complication
886 * to support an uncommon usage of second-rate sampling methods. Instead,
887 * if there is a risk that the query might perform an unsafe join, just
888 * wrap the SampleScan in a Materialize node. We can check for joins by
889 * counting the membership of all_query_rels (note that this correctly
890 * counts inheritance trees as single rels). If we're inside a subquery,
891 * we can't easily check whether a join might occur in the outer query, so
892 * just assume one is possible.
893 *
894 * GetTsmRoutine is relatively expensive compared to the other tests here,
895 * so check repeatable_across_scans last, even though that's a bit odd.
896 */
897 if ((root->query_level > 1 ||
898 bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
900 {
901 path = (Path *) create_material_path(rel, path);
902 }
903
904 add_path(rel, path);
905
906 /* For the moment, at least, there are no other paths to consider */
907}
908
909/*
910 * set_foreign_size
911 * Set size estimates for a foreign table RTE
912 */
913static void
915{
916 /* Mark rel with estimated output rows, width, etc */
918
919 /* Let FDW adjust the size estimates, if it can */
920 rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
921
922 /* ... but do not let it set the rows estimate to zero */
923 rel->rows = clamp_row_est(rel->rows);
924
925 /*
926 * Also, make sure rel->tuples is not insane relative to rel->rows.
927 * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
928 * FDW doesn't do anything to replace that.
929 */
930 rel->tuples = Max(rel->tuples, rel->rows);
931}
932
933/*
934 * set_foreign_pathlist
935 * Build access paths for a foreign table RTE
936 */
937static void
939{
940 /* Call the FDW's GetForeignPaths function to generate path(s) */
941 rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
942}
943
944/*
945 * set_append_rel_size
946 * Set size estimates for a simple "append relation"
947 *
948 * The passed-in rel and RTE represent the entire append relation. The
949 * relation's contents are computed by appending together the output of the
950 * individual member relations. Note that in the non-partitioned inheritance
951 * case, the first member relation is actually the same table as is mentioned
952 * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
953 * a good thing because their outputs are not the same size.
954 */
955static void
957 Index rti, RangeTblEntry *rte)
958{
959 int parentRTindex = rti;
960 bool has_live_children;
961 double parent_tuples;
962 double parent_rows;
963 double parent_size;
964 double *parent_attrsizes;
965 int nattrs;
966 ListCell *l;
967
968 /* Guard against stack overflow due to overly deep inheritance tree. */
970
971 Assert(IS_SIMPLE_REL(rel));
972
973 /*
974 * If this is a partitioned baserel, set the consider_partitionwise_join
975 * flag; currently, we only consider partitionwise joins with the baserel
976 * if its targetlist doesn't contain a whole-row Var.
977 */
979 rel->reloptkind == RELOPT_BASEREL &&
980 rte->relkind == RELKIND_PARTITIONED_TABLE &&
981 bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
982 rel->consider_partitionwise_join = true;
983
984 /*
985 * Initialize to compute size estimates for whole append relation.
986 *
987 * We handle tuples estimates by setting "tuples" to the total number of
988 * tuples accumulated from each live child, rather than using "rows".
989 * Although an appendrel itself doesn't directly enforce any quals, its
990 * child relations may. Therefore, setting "tuples" equal to "rows" for
991 * an appendrel isn't always appropriate, and can lead to inaccurate cost
992 * estimates. For example, when estimating the number of distinct values
993 * from an appendrel, we would be unable to adjust the estimate based on
994 * the restriction selectivity (see estimate_num_groups).
995 *
996 * We handle width estimates by weighting the widths of different child
997 * rels proportionally to their number of rows. This is sensible because
998 * the use of width estimates is mainly to compute the total relation
999 * "footprint" if we have to sort or hash it. To do this, we sum the
1000 * total equivalent size (in "double" arithmetic) and then divide by the
1001 * total rowcount estimate. This is done separately for the total rel
1002 * width and each attribute.
1003 *
1004 * Note: if you consider changing this logic, beware that child rels could
1005 * have zero rows and/or width, if they were excluded by constraints.
1006 */
1007 has_live_children = false;
1008 parent_tuples = 0;
1009 parent_rows = 0;
1010 parent_size = 0;
1011 nattrs = rel->max_attr - rel->min_attr + 1;
1012 parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
1013
1014 foreach(l, root->append_rel_list)
1015 {
1016 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1017 int childRTindex;
1018 RangeTblEntry *childRTE;
1019 RelOptInfo *childrel;
1020 List *childrinfos;
1021 ListCell *parentvars;
1022 ListCell *childvars;
1023 ListCell *lc;
1024
1025 /* append_rel_list contains all append rels; ignore others */
1026 if (appinfo->parent_relid != parentRTindex)
1027 continue;
1028
1029 childRTindex = appinfo->child_relid;
1030 childRTE = root->simple_rte_array[childRTindex];
1031
1032 /*
1033 * The child rel's RelOptInfo was already created during
1034 * add_other_rels_to_query.
1035 */
1036 childrel = find_base_rel(root, childRTindex);
1038
1039 /* We may have already proven the child to be dummy. */
1040 if (IS_DUMMY_REL(childrel))
1041 continue;
1042
1043 /*
1044 * We have to copy the parent's targetlist and quals to the child,
1045 * with appropriate substitution of variables. However, the
1046 * baserestrictinfo quals were already copied/substituted when the
1047 * child RelOptInfo was built. So we don't need any additional setup
1048 * before applying constraint exclusion.
1049 */
1050 if (relation_excluded_by_constraints(root, childrel, childRTE))
1051 {
1052 /*
1053 * This child need not be scanned, so we can omit it from the
1054 * appendrel.
1055 */
1056 set_dummy_rel_pathlist(childrel);
1057 continue;
1058 }
1059
1060 /*
1061 * Constraint exclusion failed, so copy the parent's join quals and
1062 * targetlist to the child, with appropriate variable substitutions.
1063 *
1064 * We skip join quals that came from above outer joins that can null
1065 * this rel, since they would be of no value while generating paths
1066 * for the child. This saves some effort while processing the child
1067 * rel, and it also avoids an implementation restriction in
1068 * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
1069 */
1070 childrinfos = NIL;
1071 foreach(lc, rel->joininfo)
1072 {
1073 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1074
1075 if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
1076 childrinfos = lappend(childrinfos,
1078 (Node *) rinfo,
1079 1, &appinfo));
1080 }
1081 childrel->joininfo = childrinfos;
1082
1083 /*
1084 * Now for the child's targetlist.
1085 *
1086 * NB: the resulting childrel->reltarget->exprs may contain arbitrary
1087 * expressions, which otherwise would not occur in a rel's targetlist.
1088 * Code that might be looking at an appendrel child must cope with
1089 * such. (Normally, a rel's targetlist would only include Vars and
1090 * PlaceHolderVars.) XXX we do not bother to update the cost or width
1091 * fields of childrel->reltarget; not clear if that would be useful.
1092 */
1093 childrel->reltarget->exprs = (List *)
1095 (Node *) rel->reltarget->exprs,
1096 1, &appinfo);
1097
1098 /*
1099 * We have to make child entries in the EquivalenceClass data
1100 * structures as well. This is needed either if the parent
1101 * participates in some eclass joins (because we will want to consider
1102 * inner-indexscan joins on the individual children) or if the parent
1103 * has useful pathkeys (because we should try to build MergeAppend
1104 * paths that produce those sort orderings).
1105 */
1106 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
1107 add_child_rel_equivalences(root, appinfo, rel, childrel);
1108 childrel->has_eclass_joins = rel->has_eclass_joins;
1109
1110 /*
1111 * Note: we could compute appropriate attr_needed data for the child's
1112 * variables, by transforming the parent's attr_needed through the
1113 * translated_vars mapping. However, currently there's no need
1114 * because attr_needed is only examined for base relations not
1115 * otherrels. So we just leave the child's attr_needed empty.
1116 */
1117
1118 /*
1119 * If we consider partitionwise joins with the parent rel, do the same
1120 * for partitioned child rels.
1121 *
1122 * Note: here we abuse the consider_partitionwise_join flag by setting
1123 * it for child rels that are not themselves partitioned. We do so to
1124 * tell try_partitionwise_join() that the child rel is sufficiently
1125 * valid to be used as a per-partition input, even if it later gets
1126 * proven to be dummy. (It's not usable until we've set up the
1127 * reltarget and EC entries, which we just did.)
1128 */
1130 childrel->consider_partitionwise_join = true;
1131
1132 /*
1133 * If parallelism is allowable for this query in general, see whether
1134 * it's allowable for this childrel in particular. But if we've
1135 * already decided the appendrel is not parallel-safe as a whole,
1136 * there's no point in considering parallelism for this child. For
1137 * consistency, do this before calling set_rel_size() for the child.
1138 */
1139 if (root->glob->parallelModeOK && rel->consider_parallel)
1140 set_rel_consider_parallel(root, childrel, childRTE);
1141
1142 /*
1143 * Compute the child's size.
1144 */
1145 set_rel_size(root, childrel, childRTindex, childRTE);
1146
1147 /*
1148 * It is possible that constraint exclusion detected a contradiction
1149 * within a child subquery, even though we didn't prove one above. If
1150 * so, we can skip this child.
1151 */
1152 if (IS_DUMMY_REL(childrel))
1153 continue;
1154
1155 /* We have at least one live child. */
1156 has_live_children = true;
1157
1158 /*
1159 * If any live child is not parallel-safe, treat the whole appendrel
1160 * as not parallel-safe. In future we might be able to generate plans
1161 * in which some children are farmed out to workers while others are
1162 * not; but we don't have that today, so it's a waste to consider
1163 * partial paths anywhere in the appendrel unless it's all safe.
1164 * (Child rels visited before this one will be unmarked in
1165 * set_append_rel_pathlist().)
1166 */
1167 if (!childrel->consider_parallel)
1168 rel->consider_parallel = false;
1169
1170 /*
1171 * Accumulate size information from each live child.
1172 */
1173 Assert(childrel->rows > 0);
1174
1175 parent_tuples += childrel->tuples;
1176 parent_rows += childrel->rows;
1177 parent_size += childrel->reltarget->width * childrel->rows;
1178
1179 /*
1180 * Accumulate per-column estimates too. We need not do anything for
1181 * PlaceHolderVars in the parent list. If child expression isn't a
1182 * Var, or we didn't record a width estimate for it, we have to fall
1183 * back on a datatype-based estimate.
1184 *
1185 * By construction, child's targetlist is 1-to-1 with parent's.
1186 */
1187 forboth(parentvars, rel->reltarget->exprs,
1188 childvars, childrel->reltarget->exprs)
1189 {
1190 Var *parentvar = (Var *) lfirst(parentvars);
1191 Node *childvar = (Node *) lfirst(childvars);
1192
1193 if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
1194 {
1195 int pndx = parentvar->varattno - rel->min_attr;
1196 int32 child_width = 0;
1197
1198 if (IsA(childvar, Var) &&
1199 ((Var *) childvar)->varno == childrel->relid)
1200 {
1201 int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
1202
1203 child_width = childrel->attr_widths[cndx];
1204 }
1205 if (child_width <= 0)
1206 child_width = get_typavgwidth(exprType(childvar),
1207 exprTypmod(childvar));
1208 Assert(child_width > 0);
1209 parent_attrsizes[pndx] += child_width * childrel->rows;
1210 }
1211 }
1212 }
1213
1214 if (has_live_children)
1215 {
1216 /*
1217 * Save the finished size estimates.
1218 */
1219 int i;
1220
1221 Assert(parent_rows > 0);
1222 rel->tuples = parent_tuples;
1223 rel->rows = parent_rows;
1224 rel->reltarget->width = rint(parent_size / parent_rows);
1225 for (i = 0; i < nattrs; i++)
1226 rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
1227
1228 /*
1229 * Note that we leave rel->pages as zero; this is important to avoid
1230 * double-counting the appendrel tree in total_table_pages.
1231 */
1232 }
1233 else
1234 {
1235 /*
1236 * All children were excluded by constraints, so mark the whole
1237 * appendrel dummy. We must do this in this phase so that the rel's
1238 * dummy-ness is visible when we generate paths for other rels.
1239 */
1241 }
1242
1243 pfree(parent_attrsizes);
1244}
1245
1246/*
1247 * set_append_rel_pathlist
1248 * Build access paths for an "append relation"
1249 */
1250static void
1252 Index rti, RangeTblEntry *rte)
1253{
1254 int parentRTindex = rti;
1255 List *live_childrels = NIL;
1256 ListCell *l;
1257
1258 /*
1259 * Generate access paths for each member relation, and remember the
1260 * non-dummy children.
1261 */
1262 foreach(l, root->append_rel_list)
1263 {
1264 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1265 int childRTindex;
1266 RangeTblEntry *childRTE;
1267 RelOptInfo *childrel;
1268
1269 /* append_rel_list contains all append rels; ignore others */
1270 if (appinfo->parent_relid != parentRTindex)
1271 continue;
1272
1273 /* Re-locate the child RTE and RelOptInfo */
1274 childRTindex = appinfo->child_relid;
1275 childRTE = root->simple_rte_array[childRTindex];
1276 childrel = root->simple_rel_array[childRTindex];
1277
1278 /*
1279 * If set_append_rel_size() decided the parent appendrel was
1280 * parallel-unsafe at some point after visiting this child rel, we
1281 * need to propagate the unsafety marking down to the child, so that
1282 * we don't generate useless partial paths for it.
1283 */
1284 if (!rel->consider_parallel)
1285 childrel->consider_parallel = false;
1286
1287 /*
1288 * Compute the child's access paths.
1289 */
1290 set_rel_pathlist(root, childrel, childRTindex, childRTE);
1291
1292 /*
1293 * If child is dummy, ignore it.
1294 */
1295 if (IS_DUMMY_REL(childrel))
1296 continue;
1297
1298 /*
1299 * Child is live, so add it to the live_childrels list for use below.
1300 */
1301 live_childrels = lappend(live_childrels, childrel);
1302 }
1303
1304 /* Add paths to the append relation. */
1305 add_paths_to_append_rel(root, rel, live_childrels);
1306}
1307
1308
1309/*
1310 * add_paths_to_append_rel
1311 * Generate paths for the given append relation given the set of non-dummy
1312 * child rels.
1313 *
1314 * The function collects all parameterizations and orderings supported by the
1315 * non-dummy children. For every such parameterization or ordering, it creates
1316 * an append path collecting one path from each non-dummy child with given
1317 * parameterization or ordering. Similarly it collects partial paths from
1318 * non-dummy children to create partial append paths.
1319 */
1320void
1322 List *live_childrels)
1323{
1324 List *subpaths = NIL;
1325 bool subpaths_valid = true;
1326 List *startup_subpaths = NIL;
1327 bool startup_subpaths_valid = true;
1328 List *partial_subpaths = NIL;
1329 List *pa_partial_subpaths = NIL;
1330 List *pa_nonpartial_subpaths = NIL;
1331 bool partial_subpaths_valid = true;
1332 bool pa_subpaths_valid;
1333 List *all_child_pathkeys = NIL;
1334 List *all_child_outers = NIL;
1335 ListCell *l;
1336 double partial_rows = -1;
1337
1338 /* If appropriate, consider parallel append */
1339 pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
1340
1341 /*
1342 * For every non-dummy child, remember the cheapest path. Also, identify
1343 * all pathkeys (orderings) and parameterizations (required_outer sets)
1344 * available for the non-dummy member relations.
1345 */
1346 foreach(l, live_childrels)
1347 {
1348 RelOptInfo *childrel = lfirst(l);
1349 ListCell *lcp;
1350 Path *cheapest_partial_path = NULL;
1351
1352 /*
1353 * If child has an unparameterized cheapest-total path, add that to
1354 * the unparameterized Append path we are constructing for the parent.
1355 * If not, there's no workable unparameterized path.
1356 *
1357 * With partitionwise aggregates, the child rel's pathlist may be
1358 * empty, so don't assume that a path exists here.
1359 */
1360 if (childrel->pathlist != NIL &&
1361 childrel->cheapest_total_path->param_info == NULL)
1363 &subpaths, NULL);
1364 else
1365 subpaths_valid = false;
1366
1367 /*
1368 * When the planner is considering cheap startup plans, we'll also
1369 * collect all the cheapest_startup_paths (if set) and build an
1370 * AppendPath containing those as subpaths.
1371 */
1372 if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
1373 {
1374 Path *cheapest_path;
1375
1376 /*
1377 * With an indication of how many tuples the query should provide,
1378 * the optimizer tries to choose the path optimal for that
1379 * specific number of tuples.
1380 */
1381 if (root->tuple_fraction > 0.0)
1382 cheapest_path =
1384 root->tuple_fraction);
1385 else
1386 cheapest_path = childrel->cheapest_startup_path;
1387
1388 /* cheapest_startup_path must not be a parameterized path. */
1389 Assert(cheapest_path->param_info == NULL);
1390 accumulate_append_subpath(cheapest_path,
1391 &startup_subpaths,
1392 NULL);
1393 }
1394 else
1395 startup_subpaths_valid = false;
1396
1397
1398 /* Same idea, but for a partial plan. */
1399 if (childrel->partial_pathlist != NIL)
1400 {
1401 cheapest_partial_path = linitial(childrel->partial_pathlist);
1402 accumulate_append_subpath(cheapest_partial_path,
1403 &partial_subpaths, NULL);
1404 }
1405 else
1406 partial_subpaths_valid = false;
1407
1408 /*
1409 * Same idea, but for a parallel append mixing partial and non-partial
1410 * paths.
1411 */
1412 if (pa_subpaths_valid)
1413 {
1414 Path *nppath = NULL;
1415
1416 nppath =
1418
1419 if (cheapest_partial_path == NULL && nppath == NULL)
1420 {
1421 /* Neither a partial nor a parallel-safe path? Forget it. */
1422 pa_subpaths_valid = false;
1423 }
1424 else if (nppath == NULL ||
1425 (cheapest_partial_path != NULL &&
1426 cheapest_partial_path->total_cost < nppath->total_cost))
1427 {
1428 /* Partial path is cheaper or the only option. */
1429 Assert(cheapest_partial_path != NULL);
1430 accumulate_append_subpath(cheapest_partial_path,
1431 &pa_partial_subpaths,
1432 &pa_nonpartial_subpaths);
1433 }
1434 else
1435 {
1436 /*
1437 * Either we've got only a non-partial path, or we think that
1438 * a single backend can execute the best non-partial path
1439 * faster than all the parallel backends working together can
1440 * execute the best partial path.
1441 *
1442 * It might make sense to be more aggressive here. Even if
1443 * the best non-partial path is more expensive than the best
1444 * partial path, it could still be better to choose the
1445 * non-partial path if there are several such paths that can
1446 * be given to different workers. For now, we don't try to
1447 * figure that out.
1448 */
1450 &pa_nonpartial_subpaths,
1451 NULL);
1452 }
1453 }
1454
1455 /*
1456 * Collect lists of all the available path orderings and
1457 * parameterizations for all the children. We use these as a
1458 * heuristic to indicate which sort orderings and parameterizations we
1459 * should build Append and MergeAppend paths for.
1460 */
1461 foreach(lcp, childrel->pathlist)
1462 {
1463 Path *childpath = (Path *) lfirst(lcp);
1464 List *childkeys = childpath->pathkeys;
1465 Relids childouter = PATH_REQ_OUTER(childpath);
1466
1467 /* Unsorted paths don't contribute to pathkey list */
1468 if (childkeys != NIL)
1469 {
1470 ListCell *lpk;
1471 bool found = false;
1472
1473 /* Have we already seen this ordering? */
1474 foreach(lpk, all_child_pathkeys)
1475 {
1476 List *existing_pathkeys = (List *) lfirst(lpk);
1477
1478 if (compare_pathkeys(existing_pathkeys,
1479 childkeys) == PATHKEYS_EQUAL)
1480 {
1481 found = true;
1482 break;
1483 }
1484 }
1485 if (!found)
1486 {
1487 /* No, so add it to all_child_pathkeys */
1488 all_child_pathkeys = lappend(all_child_pathkeys,
1489 childkeys);
1490 }
1491 }
1492
1493 /* Unparameterized paths don't contribute to param-set list */
1494 if (childouter)
1495 {
1496 ListCell *lco;
1497 bool found = false;
1498
1499 /* Have we already seen this param set? */
1500 foreach(lco, all_child_outers)
1501 {
1502 Relids existing_outers = (Relids) lfirst(lco);
1503
1504 if (bms_equal(existing_outers, childouter))
1505 {
1506 found = true;
1507 break;
1508 }
1509 }
1510 if (!found)
1511 {
1512 /* No, so add it to all_child_outers */
1513 all_child_outers = lappend(all_child_outers,
1514 childouter);
1515 }
1516 }
1517 }
1518 }
1519
1520 /*
1521 * If we found unparameterized paths for all children, build an unordered,
1522 * unparameterized Append path for the rel. (Note: this is correct even
1523 * if we have zero or one live subpath due to constraint exclusion.)
1524 */
1525 if (subpaths_valid)
1526 add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
1527 NIL, NULL, 0, false,
1528 -1));
1529
1530 /* build an AppendPath for the cheap startup paths, if valid */
1531 if (startup_subpaths_valid)
1532 add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths,
1533 NIL, NIL, NULL, 0, false, -1));
1534
1535 /*
1536 * Consider an append of unordered, unparameterized partial paths. Make
1537 * it parallel-aware if possible.
1538 */
1539 if (partial_subpaths_valid && partial_subpaths != NIL)
1540 {
1541 AppendPath *appendpath;
1542 ListCell *lc;
1543 int parallel_workers = 0;
1544
1545 /* Find the highest number of workers requested for any subpath. */
1546 foreach(lc, partial_subpaths)
1547 {
1548 Path *path = lfirst(lc);
1549
1550 parallel_workers = Max(parallel_workers, path->parallel_workers);
1551 }
1552 Assert(parallel_workers > 0);
1553
1554 /*
1555 * If the use of parallel append is permitted, always request at least
1556 * log2(# of children) workers. We assume it can be useful to have
1557 * extra workers in this case because they will be spread out across
1558 * the children. The precise formula is just a guess, but we don't
1559 * want to end up with a radically different answer for a table with N
1560 * partitions vs. an unpartitioned table with the same data, so the
1561 * use of some kind of log-scaling here seems to make some sense.
1562 */
1564 {
1565 parallel_workers = Max(parallel_workers,
1566 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1567 parallel_workers = Min(parallel_workers,
1569 }
1570 Assert(parallel_workers > 0);
1571
1572 /* Generate a partial append path. */
1573 appendpath = create_append_path(root, rel, NIL, partial_subpaths,
1574 NIL, NULL, parallel_workers,
1576 -1);
1577
1578 /*
1579 * Make sure any subsequent partial paths use the same row count
1580 * estimate.
1581 */
1582 partial_rows = appendpath->path.rows;
1583
1584 /* Add the path. */
1585 add_partial_path(rel, (Path *) appendpath);
1586 }
1587
1588 /*
1589 * Consider a parallel-aware append using a mix of partial and non-partial
1590 * paths. (This only makes sense if there's at least one child which has
1591 * a non-partial path that is substantially cheaper than any partial path;
1592 * otherwise, we should use the append path added in the previous step.)
1593 */
1594 if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
1595 {
1596 AppendPath *appendpath;
1597 ListCell *lc;
1598 int parallel_workers = 0;
1599
1600 /*
1601 * Find the highest number of workers requested for any partial
1602 * subpath.
1603 */
1604 foreach(lc, pa_partial_subpaths)
1605 {
1606 Path *path = lfirst(lc);
1607
1608 parallel_workers = Max(parallel_workers, path->parallel_workers);
1609 }
1610
1611 /*
1612 * Same formula here as above. It's even more important in this
1613 * instance because the non-partial paths won't contribute anything to
1614 * the planned number of parallel workers.
1615 */
1616 parallel_workers = Max(parallel_workers,
1617 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1618 parallel_workers = Min(parallel_workers,
1620 Assert(parallel_workers > 0);
1621
1622 appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
1623 pa_partial_subpaths,
1624 NIL, NULL, parallel_workers, true,
1625 partial_rows);
1626 add_partial_path(rel, (Path *) appendpath);
1627 }
1628
1629 /*
1630 * Also build unparameterized ordered append paths based on the collected
1631 * list of child pathkeys.
1632 */
1633 if (subpaths_valid)
1634 generate_orderedappend_paths(root, rel, live_childrels,
1635 all_child_pathkeys);
1636
1637 /*
1638 * Build Append paths for each parameterization seen among the child rels.
1639 * (This may look pretty expensive, but in most cases of practical
1640 * interest, the child rels will expose mostly the same parameterizations,
1641 * so that not that many cases actually get considered here.)
1642 *
1643 * The Append node itself cannot enforce quals, so all qual checking must
1644 * be done in the child paths. This means that to have a parameterized
1645 * Append path, we must have the exact same parameterization for each
1646 * child path; otherwise some children might be failing to check the
1647 * moved-down quals. To make them match up, we can try to increase the
1648 * parameterization of lesser-parameterized paths.
1649 */
1650 foreach(l, all_child_outers)
1651 {
1652 Relids required_outer = (Relids) lfirst(l);
1653 ListCell *lcr;
1654
1655 /* Select the child paths for an Append with this parameterization */
1656 subpaths = NIL;
1657 subpaths_valid = true;
1658 foreach(lcr, live_childrels)
1659 {
1660 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
1661 Path *subpath;
1662
1663 if (childrel->pathlist == NIL)
1664 {
1665 /* failed to make a suitable path for this child */
1666 subpaths_valid = false;
1667 break;
1668 }
1669
1671 childrel,
1672 required_outer);
1673 if (subpath == NULL)
1674 {
1675 /* failed to make a suitable path for this child */
1676 subpaths_valid = false;
1677 break;
1678 }
1679 accumulate_append_subpath(subpath, &subpaths, NULL);
1680 }
1681
1682 if (subpaths_valid)
1683 add_path(rel, (Path *)
1684 create_append_path(root, rel, subpaths, NIL,
1685 NIL, required_outer, 0, false,
1686 -1));
1687 }
1688
1689 /*
1690 * When there is only a single child relation, the Append path can inherit
1691 * any ordering available for the child rel's path, so that it's useful to
1692 * consider ordered partial paths. Above we only considered the cheapest
1693 * partial path for each child, but let's also make paths using any
1694 * partial paths that have pathkeys.
1695 */
1696 if (list_length(live_childrels) == 1)
1697 {
1698 RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
1699
1700 /* skip the cheapest partial path, since we already used that above */
1701 for_each_from(l, childrel->partial_pathlist, 1)
1702 {
1703 Path *path = (Path *) lfirst(l);
1704 AppendPath *appendpath;
1705
1706 /* skip paths with no pathkeys. */
1707 if (path->pathkeys == NIL)
1708 continue;
1709
1710 appendpath = create_append_path(root, rel, NIL, list_make1(path),
1711 NIL, NULL,
1712 path->parallel_workers, true,
1713 partial_rows);
1714 add_partial_path(rel, (Path *) appendpath);
1715 }
1716 }
1717}
1718
1719/*
1720 * generate_orderedappend_paths
1721 * Generate ordered append paths for an append relation
1722 *
1723 * Usually we generate MergeAppend paths here, but there are some special
1724 * cases where we can generate simple Append paths, because the subpaths
1725 * can provide tuples in the required order already.
1726 *
1727 * We generate a path for each ordering (pathkey list) appearing in
1728 * all_child_pathkeys.
1729 *
1730 * We consider both cheapest-startup and cheapest-total cases, ie, for each
1731 * interesting ordering, collect all the cheapest startup subpaths and all the
1732 * cheapest total paths, and build a suitable path for each case.
1733 *
1734 * We don't currently generate any parameterized ordered paths here. While
1735 * it would not take much more code here to do so, it's very unclear that it
1736 * is worth the planning cycles to investigate such paths: there's little
1737 * use for an ordered path on the inside of a nestloop. In fact, it's likely
1738 * that the current coding of add_path would reject such paths out of hand,
1739 * because add_path gives no credit for sort ordering of parameterized paths,
1740 * and a parameterized MergeAppend is going to be more expensive than the
1741 * corresponding parameterized Append path. If we ever try harder to support
1742 * parameterized mergejoin plans, it might be worth adding support for
1743 * parameterized paths here to feed such joins. (See notes in
1744 * optimizer/README for why that might not ever happen, though.)
1745 */
1746static void
1748 List *live_childrels,
1749 List *all_child_pathkeys)
1750{
1751 ListCell *lcp;
1752 List *partition_pathkeys = NIL;
1753 List *partition_pathkeys_desc = NIL;
1754 bool partition_pathkeys_partial = true;
1755 bool partition_pathkeys_desc_partial = true;
1756
1757 /*
1758 * Some partitioned table setups may allow us to use an Append node
1759 * instead of a MergeAppend. This is possible in cases such as RANGE
1760 * partitioned tables where it's guaranteed that an earlier partition must
1761 * contain rows which come earlier in the sort order. To detect whether
1762 * this is relevant, build pathkey descriptions of the partition ordering,
1763 * for both forward and reverse scans.
1764 */
1765 if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
1766 partitions_are_ordered(rel->boundinfo, rel->live_parts))
1767 {
1768 partition_pathkeys = build_partition_pathkeys(root, rel,
1770 &partition_pathkeys_partial);
1771
1772 partition_pathkeys_desc = build_partition_pathkeys(root, rel,
1774 &partition_pathkeys_desc_partial);
1775
1776 /*
1777 * You might think we should truncate_useless_pathkeys here, but
1778 * allowing partition keys which are a subset of the query's pathkeys
1779 * can often be useful. For example, consider a table partitioned by
1780 * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
1781 * paths that can produce the a, b, c ordering (perhaps via indexes on
1782 * (a, b, c)) then it works to consider the appendrel output as
1783 * ordered by a, b, c.
1784 */
1785 }
1786
1787 /* Now consider each interesting sort ordering */
1788 foreach(lcp, all_child_pathkeys)
1789 {
1790 List *pathkeys = (List *) lfirst(lcp);
1791 List *startup_subpaths = NIL;
1792 List *total_subpaths = NIL;
1793 List *fractional_subpaths = NIL;
1794 bool startup_neq_total = false;
1795 bool match_partition_order;
1796 bool match_partition_order_desc;
1797 int end_index;
1798 int first_index;
1799 int direction;
1800
1801 /*
1802 * Determine if this sort ordering matches any partition pathkeys we
1803 * have, for both ascending and descending partition order. If the
1804 * partition pathkeys happen to be contained in pathkeys then it still
1805 * works, as described above, providing that the partition pathkeys
1806 * are complete and not just a prefix of the partition keys. (In such
1807 * cases we'll be relying on the child paths to have sorted the
1808 * lower-order columns of the required pathkeys.)
1809 */
1810 match_partition_order =
1811 pathkeys_contained_in(pathkeys, partition_pathkeys) ||
1812 (!partition_pathkeys_partial &&
1813 pathkeys_contained_in(partition_pathkeys, pathkeys));
1814
1815 match_partition_order_desc = !match_partition_order &&
1816 (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
1817 (!partition_pathkeys_desc_partial &&
1818 pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
1819
1820 /*
1821 * When the required pathkeys match the reverse of the partition
1822 * order, we must build the list of paths in reverse starting with the
1823 * last matching partition first. We can get away without making any
1824 * special cases for this in the loop below by just looping backward
1825 * over the child relations in this case.
1826 */
1827 if (match_partition_order_desc)
1828 {
1829 /* loop backward */
1830 first_index = list_length(live_childrels) - 1;
1831 end_index = -1;
1832 direction = -1;
1833
1834 /*
1835 * Set this to true to save us having to check for
1836 * match_partition_order_desc in the loop below.
1837 */
1838 match_partition_order = true;
1839 }
1840 else
1841 {
1842 /* for all other case, loop forward */
1843 first_index = 0;
1844 end_index = list_length(live_childrels);
1845 direction = 1;
1846 }
1847
1848 /* Select the child paths for this ordering... */
1849 for (int i = first_index; i != end_index; i += direction)
1850 {
1851 RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
1852 Path *cheapest_startup,
1853 *cheapest_total,
1854 *cheapest_fractional = NULL;
1855
1856 /* Locate the right paths, if they are available. */
1857 cheapest_startup =
1859 pathkeys,
1860 NULL,
1862 false);
1863 cheapest_total =
1865 pathkeys,
1866 NULL,
1867 TOTAL_COST,
1868 false);
1869
1870 /*
1871 * If we can't find any paths with the right order just use the
1872 * cheapest-total path; we'll have to sort it later.
1873 */
1874 if (cheapest_startup == NULL || cheapest_total == NULL)
1875 {
1876 cheapest_startup = cheapest_total =
1877 childrel->cheapest_total_path;
1878 /* Assert we do have an unparameterized path for this child */
1879 Assert(cheapest_total->param_info == NULL);
1880 }
1881
1882 /*
1883 * When building a fractional path, determine a cheapest
1884 * fractional path for each child relation too. Looking at startup
1885 * and total costs is not enough, because the cheapest fractional
1886 * path may be dominated by two separate paths (one for startup,
1887 * one for total).
1888 *
1889 * When needed (building fractional path), determine the cheapest
1890 * fractional path too.
1891 */
1892 if (root->tuple_fraction > 0)
1893 {
1894 double path_fraction = (1.0 / root->tuple_fraction);
1895
1896 cheapest_fractional =
1898 pathkeys,
1899 NULL,
1900 path_fraction);
1901
1902 /*
1903 * If we found no path with matching pathkeys, use the
1904 * cheapest total path instead.
1905 *
1906 * XXX We might consider partially sorted paths too (with an
1907 * incremental sort on top). But we'd have to build all the
1908 * incremental paths, do the costing etc.
1909 */
1910 if (!cheapest_fractional)
1911 cheapest_fractional = cheapest_total;
1912 }
1913
1914 /*
1915 * Notice whether we actually have different paths for the
1916 * "cheapest" and "total" cases; frequently there will be no point
1917 * in two create_merge_append_path() calls.
1918 */
1919 if (cheapest_startup != cheapest_total)
1920 startup_neq_total = true;
1921
1922 /*
1923 * Collect the appropriate child paths. The required logic varies
1924 * for the Append and MergeAppend cases.
1925 */
1926 if (match_partition_order)
1927 {
1928 /*
1929 * We're going to make a plain Append path. We don't need
1930 * most of what accumulate_append_subpath would do, but we do
1931 * want to cut out child Appends or MergeAppends if they have
1932 * just a single subpath (and hence aren't doing anything
1933 * useful).
1934 */
1935 cheapest_startup = get_singleton_append_subpath(cheapest_startup);
1936 cheapest_total = get_singleton_append_subpath(cheapest_total);
1937
1938 startup_subpaths = lappend(startup_subpaths, cheapest_startup);
1939 total_subpaths = lappend(total_subpaths, cheapest_total);
1940
1941 if (cheapest_fractional)
1942 {
1943 cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
1944 fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
1945 }
1946 }
1947 else
1948 {
1949 /*
1950 * Otherwise, rely on accumulate_append_subpath to collect the
1951 * child paths for the MergeAppend.
1952 */
1953 accumulate_append_subpath(cheapest_startup,
1954 &startup_subpaths, NULL);
1955 accumulate_append_subpath(cheapest_total,
1956 &total_subpaths, NULL);
1957
1958 if (cheapest_fractional)
1959 accumulate_append_subpath(cheapest_fractional,
1960 &fractional_subpaths, NULL);
1961 }
1962 }
1963
1964 /* ... and build the Append or MergeAppend paths */
1965 if (match_partition_order)
1966 {
1967 /* We only need Append */
1969 rel,
1970 startup_subpaths,
1971 NIL,
1972 pathkeys,
1973 NULL,
1974 0,
1975 false,
1976 -1));
1977 if (startup_neq_total)
1979 rel,
1980 total_subpaths,
1981 NIL,
1982 pathkeys,
1983 NULL,
1984 0,
1985 false,
1986 -1));
1987
1988 if (fractional_subpaths)
1990 rel,
1991 fractional_subpaths,
1992 NIL,
1993 pathkeys,
1994 NULL,
1995 0,
1996 false,
1997 -1));
1998 }
1999 else
2000 {
2001 /* We need MergeAppend */
2003 rel,
2004 startup_subpaths,
2005 pathkeys,
2006 NULL));
2007 if (startup_neq_total)
2009 rel,
2010 total_subpaths,
2011 pathkeys,
2012 NULL));
2013
2014 if (fractional_subpaths)
2016 rel,
2017 fractional_subpaths,
2018 pathkeys,
2019 NULL));
2020 }
2021 }
2022}
2023
2024/*
2025 * get_cheapest_parameterized_child_path
2026 * Get cheapest path for this relation that has exactly the requested
2027 * parameterization.
2028 *
2029 * Returns NULL if unable to create such a path.
2030 */
2031static Path *
2033 Relids required_outer)
2034{
2035 Path *cheapest;
2036 ListCell *lc;
2037
2038 /*
2039 * Look up the cheapest existing path with no more than the needed
2040 * parameterization. If it has exactly the needed parameterization, we're
2041 * done.
2042 */
2044 NIL,
2045 required_outer,
2046 TOTAL_COST,
2047 false);
2048 Assert(cheapest != NULL);
2049 if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
2050 return cheapest;
2051
2052 /*
2053 * Otherwise, we can "reparameterize" an existing path to match the given
2054 * parameterization, which effectively means pushing down additional
2055 * joinquals to be checked within the path's scan. However, some existing
2056 * paths might check the available joinquals already while others don't;
2057 * therefore, it's not clear which existing path will be cheapest after
2058 * reparameterization. We have to go through them all and find out.
2059 */
2060 cheapest = NULL;
2061 foreach(lc, rel->pathlist)
2062 {
2063 Path *path = (Path *) lfirst(lc);
2064
2065 /* Can't use it if it needs more than requested parameterization */
2066 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
2067 continue;
2068
2069 /*
2070 * Reparameterization can only increase the path's cost, so if it's
2071 * already more expensive than the current cheapest, forget it.
2072 */
2073 if (cheapest != NULL &&
2074 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2075 continue;
2076
2077 /* Reparameterize if needed, then recheck cost */
2078 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
2079 {
2080 path = reparameterize_path(root, path, required_outer, 1.0);
2081 if (path == NULL)
2082 continue; /* failed to reparameterize this one */
2083 Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
2084
2085 if (cheapest != NULL &&
2086 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2087 continue;
2088 }
2089
2090 /* We have a new best path */
2091 cheapest = path;
2092 }
2093
2094 /* Return the best path, or NULL if we found no suitable candidate */
2095 return cheapest;
2096}
2097
2098/*
2099 * accumulate_append_subpath
2100 * Add a subpath to the list being built for an Append or MergeAppend.
2101 *
2102 * It's possible that the child is itself an Append or MergeAppend path, in
2103 * which case we can "cut out the middleman" and just add its child paths to
2104 * our own list. (We don't try to do this earlier because we need to apply
2105 * both levels of transformation to the quals.)
2106 *
2107 * Note that if we omit a child MergeAppend in this way, we are effectively
2108 * omitting a sort step, which seems fine: if the parent is to be an Append,
2109 * its result would be unsorted anyway, while if the parent is to be a
2110 * MergeAppend, there's no point in a separate sort on a child.
2111 *
2112 * Normally, either path is a partial path and subpaths is a list of partial
2113 * paths, or else path is a non-partial plan and subpaths is a list of those.
2114 * However, if path is a parallel-aware Append, then we add its partial path
2115 * children to subpaths and the rest to special_subpaths. If the latter is
2116 * NULL, we don't flatten the path at all (unless it contains only partial
2117 * paths).
2118 */
2119static void
2120accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
2121{
2122 if (IsA(path, AppendPath))
2123 {
2124 AppendPath *apath = (AppendPath *) path;
2125
2126 if (!apath->path.parallel_aware || apath->first_partial_path == 0)
2127 {
2128 *subpaths = list_concat(*subpaths, apath->subpaths);
2129 return;
2130 }
2131 else if (special_subpaths != NULL)
2132 {
2133 List *new_special_subpaths;
2134
2135 /* Split Parallel Append into partial and non-partial subpaths */
2136 *subpaths = list_concat(*subpaths,
2137 list_copy_tail(apath->subpaths,
2138 apath->first_partial_path));
2139 new_special_subpaths = list_copy_head(apath->subpaths,
2140 apath->first_partial_path);
2141 *special_subpaths = list_concat(*special_subpaths,
2142 new_special_subpaths);
2143 return;
2144 }
2145 }
2146 else if (IsA(path, MergeAppendPath))
2147 {
2148 MergeAppendPath *mpath = (MergeAppendPath *) path;
2149
2150 *subpaths = list_concat(*subpaths, mpath->subpaths);
2151 return;
2152 }
2153
2154 *subpaths = lappend(*subpaths, path);
2155}
2156
2157/*
2158 * get_singleton_append_subpath
2159 * Returns the single subpath of an Append/MergeAppend, or just
2160 * return 'path' if it's not a single sub-path Append/MergeAppend.
2161 *
2162 * Note: 'path' must not be a parallel-aware path.
2163 */
2164static Path *
2166{
2167 Assert(!path->parallel_aware);
2168
2169 if (IsA(path, AppendPath))
2170 {
2171 AppendPath *apath = (AppendPath *) path;
2172
2173 if (list_length(apath->subpaths) == 1)
2174 return (Path *) linitial(apath->subpaths);
2175 }
2176 else if (IsA(path, MergeAppendPath))
2177 {
2178 MergeAppendPath *mpath = (MergeAppendPath *) path;
2179
2180 if (list_length(mpath->subpaths) == 1)
2181 return (Path *) linitial(mpath->subpaths);
2182 }
2183
2184 return path;
2185}
2186
2187/*
2188 * set_dummy_rel_pathlist
2189 * Build a dummy path for a relation that's been excluded by constraints
2190 *
2191 * Rather than inventing a special "dummy" path type, we represent this as an
2192 * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
2193 *
2194 * (See also mark_dummy_rel, which does basically the same thing, but is
2195 * typically used to change a rel into dummy state after we already made
2196 * paths for it.)
2197 */
2198static void
2200{
2201 /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
2202 rel->rows = 0;
2203 rel->reltarget->width = 0;
2204
2205 /* Discard any pre-existing paths; no further need for them */
2206 rel->pathlist = NIL;
2207 rel->partial_pathlist = NIL;
2208
2209 /* Set up the dummy path */
2210 add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
2211 NIL, rel->lateral_relids,
2212 0, false, -1));
2213
2214 /*
2215 * We set the cheapest-path fields immediately, just in case they were
2216 * pointing at some discarded path. This is redundant in current usage
2217 * because set_rel_pathlist will do it later, but it's cheap so we keep it
2218 * for safety and consistency with mark_dummy_rel.
2219 */
2220 set_cheapest(rel);
2221}
2222
2223/*
2224 * find_window_run_conditions
2225 * Determine if 'wfunc' is really a WindowFunc and call its prosupport
2226 * function to determine the function's monotonic properties. We then
2227 * see if 'opexpr' can be used to short-circuit execution.
2228 *
2229 * For example row_number() over (order by ...) always produces a value one
2230 * higher than the previous. If someone has a window function in a subquery
2231 * and has a WHERE clause in the outer query to filter rows <= 10, then we may
2232 * as well stop processing the windowagg once the row number reaches 11. Here
2233 * we check if 'opexpr' might help us to stop doing needless extra processing
2234 * in WindowAgg nodes.
2235 *
2236 * '*keep_original' is set to true if the caller should also use 'opexpr' for
2237 * its original purpose. This is set to false if the caller can assume that
2238 * the run condition will handle all of the required filtering.
2239 *
2240 * Returns true if 'opexpr' was found to be useful and was added to the
2241 * WindowFunc's runCondition. We also set *keep_original accordingly and add
2242 * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
2243 * If the 'opexpr' cannot be used then we set *keep_original to true and
2244 * return false.
2245 */
2246static bool
2248 AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr,
2249 bool wfunc_left, bool *keep_original,
2250 Bitmapset **run_cond_attrs)
2251{
2252 Oid prosupport;
2253 Expr *otherexpr;
2256 WindowClause *wclause;
2257 List *opinfos;
2258 OpExpr *runopexpr;
2259 Oid runoperator;
2260 ListCell *lc;
2261
2262 *keep_original = true;
2263
2264 while (IsA(wfunc, RelabelType))
2265 wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
2266
2267 /* we can only work with window functions */
2268 if (!IsA(wfunc, WindowFunc))
2269 return false;
2270
2271 /* can't use it if there are subplans in the WindowFunc */
2272 if (contain_subplans((Node *) wfunc))
2273 return false;
2274
2275 prosupport = get_func_support(wfunc->winfnoid);
2276
2277 /* Check if there's a support function for 'wfunc' */
2278 if (!OidIsValid(prosupport))
2279 return false;
2280
2281 /* get the Expr from the other side of the OpExpr */
2282 if (wfunc_left)
2283 otherexpr = lsecond(opexpr->args);
2284 else
2285 otherexpr = linitial(opexpr->args);
2286
2287 /*
2288 * The value being compared must not change during the evaluation of the
2289 * window partition.
2290 */
2291 if (!is_pseudo_constant_clause((Node *) otherexpr))
2292 return false;
2293
2294 /* find the window clause belonging to the window function */
2295 wclause = (WindowClause *) list_nth(subquery->windowClause,
2296 wfunc->winref - 1);
2297
2298 req.type = T_SupportRequestWFuncMonotonic;
2299 req.window_func = wfunc;
2300 req.window_clause = wclause;
2301
2302 /* call the support function */
2305 PointerGetDatum(&req)));
2306
2307 /*
2308 * Nothing to do if the function is neither monotonically increasing nor
2309 * monotonically decreasing.
2310 */
2311 if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
2312 return false;
2313
2314 runopexpr = NULL;
2315 runoperator = InvalidOid;
2316 opinfos = get_op_index_interpretation(opexpr->opno);
2317
2318 foreach(lc, opinfos)
2319 {
2321 CompareType cmptype = opinfo->cmptype;
2322
2323 /* handle < / <= */
2324 if (cmptype == COMPARE_LT || cmptype == COMPARE_LE)
2325 {
2326 /*
2327 * < / <= is supported for monotonically increasing functions in
2328 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2329 * for monotonically decreasing functions.
2330 */
2331 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
2332 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
2333 {
2334 *keep_original = false;
2335 runopexpr = opexpr;
2336 runoperator = opexpr->opno;
2337 }
2338 break;
2339 }
2340 /* handle > / >= */
2341 else if (cmptype == COMPARE_GT || cmptype == COMPARE_GE)
2342 {
2343 /*
2344 * > / >= is supported for monotonically decreasing functions in
2345 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2346 * for monotonically increasing functions.
2347 */
2348 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
2349 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
2350 {
2351 *keep_original = false;
2352 runopexpr = opexpr;
2353 runoperator = opexpr->opno;
2354 }
2355 break;
2356 }
2357 /* handle = */
2358 else if (cmptype == COMPARE_EQ)
2359 {
2360 CompareType newcmptype;
2361
2362 /*
2363 * When both monotonically increasing and decreasing then the
2364 * return value of the window function will be the same each time.
2365 * We can simply use 'opexpr' as the run condition without
2366 * modifying it.
2367 */
2369 {
2370 *keep_original = false;
2371 runopexpr = opexpr;
2372 runoperator = opexpr->opno;
2373 break;
2374 }
2375
2376 /*
2377 * When monotonically increasing we make a qual with <wfunc> <=
2378 * <value> or <value> >= <wfunc> in order to filter out values
2379 * which are above the value in the equality condition. For
2380 * monotonically decreasing functions we want to filter values
2381 * below the value in the equality condition.
2382 */
2384 newcmptype = wfunc_left ? COMPARE_LE : COMPARE_GE;
2385 else
2386 newcmptype = wfunc_left ? COMPARE_GE : COMPARE_LE;
2387
2388 /* We must keep the original equality qual */
2389 *keep_original = true;
2390 runopexpr = opexpr;
2391
2392 /* determine the operator to use for the WindowFuncRunCondition */
2393 runoperator = get_opfamily_member_for_cmptype(opinfo->opfamily_id,
2394 opinfo->oplefttype,
2395 opinfo->oprighttype,
2396 newcmptype);
2397 break;
2398 }
2399 }
2400
2401 if (runopexpr != NULL)
2402 {
2403 WindowFuncRunCondition *wfuncrc;
2404
2406 wfuncrc->opno = runoperator;
2407 wfuncrc->inputcollid = runopexpr->inputcollid;
2408 wfuncrc->wfunc_left = wfunc_left;
2409 wfuncrc->arg = copyObject(otherexpr);
2410
2411 wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
2412
2413 /* record that this attno was used in a run condition */
2414 *run_cond_attrs = bms_add_member(*run_cond_attrs,
2416 return true;
2417 }
2418
2419 /* unsupported OpExpr */
2420 return false;
2421}
2422
2423/*
2424 * check_and_push_window_quals
2425 * Check if 'clause' is a qual that can be pushed into a WindowFunc
2426 * as a 'runCondition' qual. These, when present, allow some unnecessary
2427 * work to be skipped during execution.
2428 *
2429 * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
2430 * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
2431 * window quals for.
2432 *
2433 * Returns true if the caller still must keep the original qual or false if
2434 * the caller can safely ignore the original qual because the WindowAgg node
2435 * will use the runCondition to stop returning tuples.
2436 */
2437static bool
2439 Node *clause, Bitmapset **run_cond_attrs)
2440{
2441 OpExpr *opexpr = (OpExpr *) clause;
2442 bool keep_original = true;
2443 Var *var1;
2444 Var *var2;
2445
2446 /* We're only able to use OpExprs with 2 operands */
2447 if (!IsA(opexpr, OpExpr))
2448 return true;
2449
2450 if (list_length(opexpr->args) != 2)
2451 return true;
2452
2453 /*
2454 * Currently, we restrict this optimization to strict OpExprs. The reason
2455 * for this is that during execution, once the runcondition becomes false,
2456 * we stop evaluating WindowFuncs. To avoid leaving around stale window
2457 * function result values, we set them to NULL. Having only strict
2458 * OpExprs here ensures that we properly filter out the tuples with NULLs
2459 * in the top-level WindowAgg.
2460 */
2461 set_opfuncid(opexpr);
2462 if (!func_strict(opexpr->opfuncid))
2463 return true;
2464
2465 /*
2466 * Check for plain Vars that reference window functions in the subquery.
2467 * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
2468 * be used as part of the run condition.
2469 */
2470
2471 /* Check the left side of the OpExpr */
2472 var1 = linitial(opexpr->args);
2473 if (IsA(var1, Var) && var1->varattno > 0)
2474 {
2475 TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
2476 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2477
2478 if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
2479 opexpr, true, &keep_original,
2480 run_cond_attrs))
2481 return keep_original;
2482 }
2483
2484 /* and check the right side */
2485 var2 = lsecond(opexpr->args);
2486 if (IsA(var2, Var) && var2->varattno > 0)
2487 {
2488 TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
2489 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2490
2491 if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
2492 opexpr, false, &keep_original,
2493 run_cond_attrs))
2494 return keep_original;
2495 }
2496
2497 return true;
2498}
2499
2500/*
2501 * set_subquery_pathlist
2502 * Generate SubqueryScan access paths for a subquery RTE
2503 *
2504 * We don't currently support generating parameterized paths for subqueries
2505 * by pushing join clauses down into them; it seems too expensive to re-plan
2506 * the subquery multiple times to consider different alternatives.
2507 * (XXX that could stand to be reconsidered, now that we use Paths.)
2508 * So the paths made here will be parameterized if the subquery contains
2509 * LATERAL references, otherwise not. As long as that's true, there's no need
2510 * for a separate set_subquery_size phase: just make the paths right away.
2511 */
2512static void
2514 Index rti, RangeTblEntry *rte)
2515{
2516 Query *parse = root->parse;
2517 Query *subquery = rte->subquery;
2518 bool trivial_pathtarget;
2519 Relids required_outer;
2520 pushdown_safety_info safetyInfo;
2521 double tuple_fraction;
2522 RelOptInfo *sub_final_rel;
2523 Bitmapset *run_cond_attrs = NULL;
2524 ListCell *lc;
2525
2526 /*
2527 * Must copy the Query so that planning doesn't mess up the RTE contents
2528 * (really really need to fix the planner to not scribble on its input,
2529 * someday ... but see remove_unused_subquery_outputs to start with).
2530 */
2531 subquery = copyObject(subquery);
2532
2533 /*
2534 * If it's a LATERAL subquery, it might contain some Vars of the current
2535 * query level, requiring it to be treated as parameterized, even though
2536 * we don't support pushing down join quals into subqueries.
2537 */
2538 required_outer = rel->lateral_relids;
2539
2540 /*
2541 * Zero out result area for subquery_is_pushdown_safe, so that it can set
2542 * flags as needed while recursing. In particular, we need a workspace
2543 * for keeping track of the reasons why columns are unsafe to reference.
2544 * These reasons are stored in the bits inside unsafeFlags[i] when we
2545 * discover reasons that column i of the subquery is unsafe to be used in
2546 * a pushed-down qual.
2547 */
2548 memset(&safetyInfo, 0, sizeof(safetyInfo));
2549 safetyInfo.unsafeFlags = (unsigned char *)
2550 palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
2551
2552 /*
2553 * If the subquery has the "security_barrier" flag, it means the subquery
2554 * originated from a view that must enforce row-level security. Then we
2555 * must not push down quals that contain leaky functions. (Ideally this
2556 * would be checked inside subquery_is_pushdown_safe, but since we don't
2557 * currently pass the RTE to that function, we must do it here.)
2558 */
2559 safetyInfo.unsafeLeaky = rte->security_barrier;
2560
2561 /*
2562 * If there are any restriction clauses that have been attached to the
2563 * subquery relation, consider pushing them down to become WHERE or HAVING
2564 * quals of the subquery itself. This transformation is useful because it
2565 * may allow us to generate a better plan for the subquery than evaluating
2566 * all the subquery output rows and then filtering them.
2567 *
2568 * There are several cases where we cannot push down clauses. Restrictions
2569 * involving the subquery are checked by subquery_is_pushdown_safe().
2570 * Restrictions on individual clauses are checked by
2571 * qual_is_pushdown_safe(). Also, we don't want to push down
2572 * pseudoconstant clauses; better to have the gating node above the
2573 * subquery.
2574 *
2575 * Non-pushed-down clauses will get evaluated as qpquals of the
2576 * SubqueryScan node.
2577 *
2578 * XXX Are there any cases where we want to make a policy decision not to
2579 * push down a pushable qual, because it'd result in a worse plan?
2580 */
2581 if (rel->baserestrictinfo != NIL &&
2582 subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
2583 {
2584 /* OK to consider pushing down individual quals */
2585 List *upperrestrictlist = NIL;
2586 ListCell *l;
2587
2588 foreach(l, rel->baserestrictinfo)
2589 {
2590 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2591 Node *clause = (Node *) rinfo->clause;
2592
2593 if (rinfo->pseudoconstant)
2594 {
2595 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2596 continue;
2597 }
2598
2599 switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
2600 {
2601 case PUSHDOWN_SAFE:
2602 /* Push it down */
2603 subquery_push_qual(subquery, rte, rti, clause);
2604 break;
2605
2607
2608 /*
2609 * Since we can't push the qual down into the subquery,
2610 * check if it happens to reference a window function. If
2611 * so then it might be useful to use for the WindowAgg's
2612 * runCondition.
2613 */
2614 if (!subquery->hasWindowFuncs ||
2615 check_and_push_window_quals(subquery, rte, rti, clause,
2616 &run_cond_attrs))
2617 {
2618 /*
2619 * subquery has no window funcs or the clause is not a
2620 * suitable window run condition qual or it is, but
2621 * the original must also be kept in the upper query.
2622 */
2623 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2624 }
2625 break;
2626
2627 case PUSHDOWN_UNSAFE:
2628 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2629 break;
2630 }
2631 }
2632 rel->baserestrictinfo = upperrestrictlist;
2633 /* We don't bother recomputing baserestrict_min_security */
2634 }
2635
2636 pfree(safetyInfo.unsafeFlags);
2637
2638 /*
2639 * The upper query might not use all the subquery's output columns; if
2640 * not, we can simplify. Pass the attributes that were pushed down into
2641 * WindowAgg run conditions to ensure we don't accidentally think those
2642 * are unused.
2643 */
2644 remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
2645
2646 /*
2647 * We can safely pass the outer tuple_fraction down to the subquery if the
2648 * outer level has no joining, aggregation, or sorting to do. Otherwise
2649 * we'd better tell the subquery to plan for full retrieval. (XXX This
2650 * could probably be made more intelligent ...)
2651 */
2652 if (parse->hasAggs ||
2653 parse->groupClause ||
2654 parse->groupingSets ||
2655 root->hasHavingQual ||
2656 parse->distinctClause ||
2657 parse->sortClause ||
2658 bms_membership(root->all_baserels) == BMS_MULTIPLE)
2659 tuple_fraction = 0.0; /* default case */
2660 else
2661 tuple_fraction = root->tuple_fraction;
2662
2663 /* plan_params should not be in use in current query level */
2664 Assert(root->plan_params == NIL);
2665
2666 /* Generate a subroot and Paths for the subquery */
2667 rel->subroot = subquery_planner(root->glob, subquery, root, false,
2668 tuple_fraction, NULL);
2669
2670 /* Isolate the params needed by this specific subplan */
2671 rel->subplan_params = root->plan_params;
2672 root->plan_params = NIL;
2673
2674 /*
2675 * It's possible that constraint exclusion proved the subquery empty. If
2676 * so, it's desirable to produce an unadorned dummy path so that we will
2677 * recognize appropriate optimizations at this query level.
2678 */
2679 sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
2680
2681 if (IS_DUMMY_REL(sub_final_rel))
2682 {
2684 return;
2685 }
2686
2687 /*
2688 * Mark rel with estimated output rows, width, etc. Note that we have to
2689 * do this before generating outer-query paths, else cost_subqueryscan is
2690 * not happy.
2691 */
2693
2694 /*
2695 * Also detect whether the reltarget is trivial, so that we can pass that
2696 * info to cost_subqueryscan (rather than re-deriving it multiple times).
2697 * It's trivial if it fetches all the subplan output columns in order.
2698 */
2699 if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
2700 trivial_pathtarget = false;
2701 else
2702 {
2703 trivial_pathtarget = true;
2704 foreach(lc, rel->reltarget->exprs)
2705 {
2706 Node *node = (Node *) lfirst(lc);
2707 Var *var;
2708
2709 if (!IsA(node, Var))
2710 {
2711 trivial_pathtarget = false;
2712 break;
2713 }
2714 var = (Var *) node;
2715 if (var->varno != rti ||
2716 var->varattno != foreach_current_index(lc) + 1)
2717 {
2718 trivial_pathtarget = false;
2719 break;
2720 }
2721 }
2722 }
2723
2724 /*
2725 * For each Path that subquery_planner produced, make a SubqueryScanPath
2726 * in the outer query.
2727 */
2728 foreach(lc, sub_final_rel->pathlist)
2729 {
2730 Path *subpath = (Path *) lfirst(lc);
2731 List *pathkeys;
2732
2733 /* Convert subpath's pathkeys to outer representation */
2735 rel,
2736 subpath->pathkeys,
2737 make_tlist_from_pathtarget(subpath->pathtarget));
2738
2739 /* Generate outer path using this subpath */
2740 add_path(rel, (Path *)
2742 trivial_pathtarget,
2743 pathkeys, required_outer));
2744 }
2745
2746 /* If outer rel allows parallelism, do same for partial paths. */
2747 if (rel->consider_parallel && bms_is_empty(required_outer))
2748 {
2749 /* If consider_parallel is false, there should be no partial paths. */
2750 Assert(sub_final_rel->consider_parallel ||
2751 sub_final_rel->partial_pathlist == NIL);
2752
2753 /* Same for partial paths. */
2754 foreach(lc, sub_final_rel->partial_pathlist)
2755 {
2756 Path *subpath = (Path *) lfirst(lc);
2757 List *pathkeys;
2758
2759 /* Convert subpath's pathkeys to outer representation */
2761 rel,
2762 subpath->pathkeys,
2763 make_tlist_from_pathtarget(subpath->pathtarget));
2764
2765 /* Generate outer path using this subpath */
2766 add_partial_path(rel, (Path *)
2768 trivial_pathtarget,
2769 pathkeys,
2770 required_outer));
2771 }
2772 }
2773}
2774
2775/*
2776 * set_function_pathlist
2777 * Build the (single) access path for a function RTE
2778 */
2779static void
2781{
2782 Relids required_outer;
2783 List *pathkeys = NIL;
2784
2785 /*
2786 * We don't support pushing join clauses into the quals of a function
2787 * scan, but it could still have required parameterization due to LATERAL
2788 * refs in the function expression.
2789 */
2790 required_outer = rel->lateral_relids;
2791
2792 /*
2793 * The result is considered unordered unless ORDINALITY was used, in which
2794 * case it is ordered by the ordinal column (the last one). See if we
2795 * care, by checking for uses of that Var in equivalence classes.
2796 */
2797 if (rte->funcordinality)
2798 {
2799 AttrNumber ordattno = rel->max_attr;
2800 Var *var = NULL;
2801 ListCell *lc;
2802
2803 /*
2804 * Is there a Var for it in rel's targetlist? If not, the query did
2805 * not reference the ordinality column, or at least not in any way
2806 * that would be interesting for sorting.
2807 */
2808 foreach(lc, rel->reltarget->exprs)
2809 {
2810 Var *node = (Var *) lfirst(lc);
2811
2812 /* checking varno/varlevelsup is just paranoia */
2813 if (IsA(node, Var) &&
2814 node->varattno == ordattno &&
2815 node->varno == rel->relid &&
2816 node->varlevelsup == 0)
2817 {
2818 var = node;
2819 break;
2820 }
2821 }
2822
2823 /*
2824 * Try to build pathkeys for this Var with int8 sorting. We tell
2825 * build_expression_pathkey not to build any new equivalence class; if
2826 * the Var isn't already mentioned in some EC, it means that nothing
2827 * cares about the ordering.
2828 */
2829 if (var)
2830 pathkeys = build_expression_pathkey(root,
2831 (Expr *) var,
2832 Int8LessOperator,
2833 rel->relids,
2834 false);
2835 }
2836
2837 /* Generate appropriate path */
2839 pathkeys, required_outer));
2840}
2841
2842/*
2843 * set_values_pathlist
2844 * Build the (single) access path for a VALUES RTE
2845 */
2846static void
2848{
2849 Relids required_outer;
2850
2851 /*
2852 * We don't support pushing join clauses into the quals of a values scan,
2853 * but it could still have required parameterization due to LATERAL refs
2854 * in the values expressions.
2855 */
2856 required_outer = rel->lateral_relids;
2857
2858 /* Generate appropriate path */
2859 add_path(rel, create_valuesscan_path(root, rel, required_outer));
2860}
2861
2862/*
2863 * set_tablefunc_pathlist
2864 * Build the (single) access path for a table func RTE
2865 */
2866static void
2868{
2869 Relids required_outer;
2870
2871 /*
2872 * We don't support pushing join clauses into the quals of a tablefunc
2873 * scan, but it could still have required parameterization due to LATERAL
2874 * refs in the function expression.
2875 */
2876 required_outer = rel->lateral_relids;
2877
2878 /* Generate appropriate path */
2880 required_outer));
2881}
2882
2883/*
2884 * set_cte_pathlist
2885 * Build the (single) access path for a non-self-reference CTE RTE
2886 *
2887 * There's no need for a separate set_cte_size phase, since we don't
2888 * support join-qual-parameterized paths for CTEs.
2889 */
2890static void
2892{
2893 Path *ctepath;
2894 Plan *cteplan;
2895 PlannerInfo *cteroot;
2896 Index levelsup;
2897 List *pathkeys;
2898 int ndx;
2899 ListCell *lc;
2900 int plan_id;
2901 Relids required_outer;
2902
2903 /*
2904 * Find the referenced CTE, and locate the path and plan previously made
2905 * for it.
2906 */
2907 levelsup = rte->ctelevelsup;
2908 cteroot = root;
2909 while (levelsup-- > 0)
2910 {
2911 cteroot = cteroot->parent_root;
2912 if (!cteroot) /* shouldn't happen */
2913 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
2914 }
2915
2916 /*
2917 * Note: cte_plan_ids can be shorter than cteList, if we are still working
2918 * on planning the CTEs (ie, this is a side-reference from another CTE).
2919 * So we mustn't use forboth here.
2920 */
2921 ndx = 0;
2922 foreach(lc, cteroot->parse->cteList)
2923 {
2924 CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
2925
2926 if (strcmp(cte->ctename, rte->ctename) == 0)
2927 break;
2928 ndx++;
2929 }
2930 if (lc == NULL) /* shouldn't happen */
2931 elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
2932 if (ndx >= list_length(cteroot->cte_plan_ids))
2933 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
2934 plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
2935 if (plan_id <= 0)
2936 elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
2937
2938 Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
2939 ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
2940 cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
2941
2942 /* Mark rel with estimated output rows, width, etc */
2943 set_cte_size_estimates(root, rel, cteplan->plan_rows);
2944
2945 /* Convert the ctepath's pathkeys to outer query's representation */
2947 rel,
2948 ctepath->pathkeys,
2949 cteplan->targetlist);
2950
2951 /*
2952 * We don't support pushing join clauses into the quals of a CTE scan, but
2953 * it could still have required parameterization due to LATERAL refs in
2954 * its tlist.
2955 */
2956 required_outer = rel->lateral_relids;
2957
2958 /* Generate appropriate path */
2959 add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
2960}
2961
2962/*
2963 * set_namedtuplestore_pathlist
2964 * Build the (single) access path for a named tuplestore RTE
2965 *
2966 * There's no need for a separate set_namedtuplestore_size phase, since we
2967 * don't support join-qual-parameterized paths for tuplestores.
2968 */
2969static void
2971 RangeTblEntry *rte)
2972{
2973 Relids required_outer;
2974
2975 /* Mark rel with estimated output rows, width, etc */
2977
2978 /*
2979 * We don't support pushing join clauses into the quals of a tuplestore
2980 * scan, but it could still have required parameterization due to LATERAL
2981 * refs in its tlist.
2982 */
2983 required_outer = rel->lateral_relids;
2984
2985 /* Generate appropriate path */
2986 add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
2987}
2988
2989/*
2990 * set_result_pathlist
2991 * Build the (single) access path for an RTE_RESULT RTE
2992 *
2993 * There's no need for a separate set_result_size phase, since we
2994 * don't support join-qual-parameterized paths for these RTEs.
2995 */
2996static void
2998 RangeTblEntry *rte)
2999{
3000 Relids required_outer;
3001
3002 /* Mark rel with estimated output rows, width, etc */
3004
3005 /*
3006 * We don't support pushing join clauses into the quals of a Result scan,
3007 * but it could still have required parameterization due to LATERAL refs
3008 * in its tlist.
3009 */
3010 required_outer = rel->lateral_relids;
3011
3012 /* Generate appropriate path */
3013 add_path(rel, create_resultscan_path(root, rel, required_outer));
3014}
3015
3016/*
3017 * set_worktable_pathlist
3018 * Build the (single) access path for a self-reference CTE RTE
3019 *
3020 * There's no need for a separate set_worktable_size phase, since we don't
3021 * support join-qual-parameterized paths for CTEs.
3022 */
3023static void
3025{
3026 Path *ctepath;
3027 PlannerInfo *cteroot;
3028 Index levelsup;
3029 Relids required_outer;
3030
3031 /*
3032 * We need to find the non-recursive term's path, which is in the plan
3033 * level that's processing the recursive UNION, which is one level *below*
3034 * where the CTE comes from.
3035 */
3036 levelsup = rte->ctelevelsup;
3037 if (levelsup == 0) /* shouldn't happen */
3038 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3039 levelsup--;
3040 cteroot = root;
3041 while (levelsup-- > 0)
3042 {
3043 cteroot = cteroot->parent_root;
3044 if (!cteroot) /* shouldn't happen */
3045 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3046 }
3047 ctepath = cteroot->non_recursive_path;
3048 if (!ctepath) /* shouldn't happen */
3049 elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
3050
3051 /* Mark rel with estimated output rows, width, etc */
3052 set_cte_size_estimates(root, rel, ctepath->rows);
3053
3054 /*
3055 * We don't support pushing join clauses into the quals of a worktable
3056 * scan, but it could still have required parameterization due to LATERAL
3057 * refs in its tlist. (I'm not sure this is actually possible given the
3058 * restrictions on recursive references, but it's easy enough to support.)
3059 */
3060 required_outer = rel->lateral_relids;
3061
3062 /* Generate appropriate path */
3063 add_path(rel, create_worktablescan_path(root, rel, required_outer));
3064}
3065
3066/*
3067 * generate_gather_paths
3068 * Generate parallel access paths for a relation by pushing a Gather or
3069 * Gather Merge on top of a partial path.
3070 *
3071 * This must not be called until after we're done creating all partial paths
3072 * for the specified relation. (Otherwise, add_partial_path might delete a
3073 * path that some GatherPath or GatherMergePath has a reference to.)
3074 *
3075 * If we're generating paths for a scan or join relation, override_rows will
3076 * be false, and we'll just use the relation's size estimate. When we're
3077 * being called for a partially-grouped or partially-distinct path, though, we
3078 * need to override the rowcount estimate. (It's not clear that the
3079 * particular value we're using here is actually best, but the underlying rel
3080 * has no estimate so we must do something.)
3081 */
3082void
3084{
3085 Path *cheapest_partial_path;
3086 Path *simple_gather_path;
3087 ListCell *lc;
3088 double rows;
3089 double *rowsp = NULL;
3090
3091 /* If there are no partial paths, there's nothing to do here. */
3092 if (rel->partial_pathlist == NIL)
3093 return;
3094
3095 /* Should we override the rel's rowcount estimate? */
3096 if (override_rows)
3097 rowsp = &rows;
3098
3099 /*
3100 * The output of Gather is always unsorted, so there's only one partial
3101 * path of interest: the cheapest one. That will be the one at the front
3102 * of partial_pathlist because of the way add_partial_path works.
3103 */
3104 cheapest_partial_path = linitial(rel->partial_pathlist);
3105 rows = compute_gather_rows(cheapest_partial_path);
3106 simple_gather_path = (Path *)
3107 create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
3108 NULL, rowsp);
3109 add_path(rel, simple_gather_path);
3110
3111 /*
3112 * For each useful ordering, we can consider an order-preserving Gather
3113 * Merge.
3114 */
3115 foreach(lc, rel->partial_pathlist)
3116 {
3117 Path *subpath = (Path *) lfirst(lc);
3118 GatherMergePath *path;
3119
3120 if (subpath->pathkeys == NIL)
3121 continue;
3122
3125 subpath->pathkeys, NULL, rowsp);
3126 add_path(rel, &path->path);
3127 }
3128}
3129
3130/*
3131 * get_useful_pathkeys_for_relation
3132 * Determine which orderings of a relation might be useful.
3133 *
3134 * Getting data in sorted order can be useful either because the requested
3135 * order matches the final output ordering for the overall query we're
3136 * planning, or because it enables an efficient merge join. Here, we try
3137 * to figure out which pathkeys to consider.
3138 *
3139 * This allows us to do incremental sort on top of an index scan under a gather
3140 * merge node, i.e. parallelized.
3141 *
3142 * If the require_parallel_safe is true, we also require the expressions to
3143 * be parallel safe (which allows pushing the sort below Gather Merge).
3144 *
3145 * XXX At the moment this can only ever return a list with a single element,
3146 * because it looks at query_pathkeys only. So we might return the pathkeys
3147 * directly, but it seems plausible we'll want to consider other orderings
3148 * in the future. For example, we might want to consider pathkeys useful for
3149 * merge joins.
3150 */
3151static List *
3153 bool require_parallel_safe)
3154{
3155 List *useful_pathkeys_list = NIL;
3156
3157 /*
3158 * Considering query_pathkeys is always worth it, because it might allow
3159 * us to avoid a total sort when we have a partially presorted path
3160 * available or to push the total sort into the parallel portion of the
3161 * query.
3162 */
3163 if (root->query_pathkeys)
3164 {
3165 ListCell *lc;
3166 int npathkeys = 0; /* useful pathkeys */
3167
3168 foreach(lc, root->query_pathkeys)
3169 {
3170 PathKey *pathkey = (PathKey *) lfirst(lc);
3171 EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
3172
3173 /*
3174 * We can only build a sort for pathkeys that contain a
3175 * safe-to-compute-early EC member computable from the current
3176 * relation's reltarget, so ignore the remainder of the list as
3177 * soon as we find a pathkey without such a member.
3178 *
3179 * It's still worthwhile to return any prefix of the pathkeys list
3180 * that meets this requirement, as we may be able to do an
3181 * incremental sort.
3182 *
3183 * If requested, ensure the sort expression is parallel-safe too.
3184 */
3185 if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
3186 require_parallel_safe))
3187 break;
3188
3189 npathkeys++;
3190 }
3191
3192 /*
3193 * The whole query_pathkeys list matches, so append it directly, to
3194 * allow comparing pathkeys easily by comparing list pointer. If we
3195 * have to truncate the pathkeys, we gotta do a copy though.
3196 */
3197 if (npathkeys == list_length(root->query_pathkeys))
3198 useful_pathkeys_list = lappend(useful_pathkeys_list,
3199 root->query_pathkeys);
3200 else if (npathkeys > 0)
3201 useful_pathkeys_list = lappend(useful_pathkeys_list,
3202 list_copy_head(root->query_pathkeys,
3203 npathkeys));
3204 }
3205
3206 return useful_pathkeys_list;
3207}
3208
3209/*
3210 * generate_useful_gather_paths
3211 * Generate parallel access paths for a relation by pushing a Gather or
3212 * Gather Merge on top of a partial path.
3213 *
3214 * Unlike plain generate_gather_paths, this looks both at pathkeys of input
3215 * paths (aiming to preserve the ordering), but also considers ordering that
3216 * might be useful for nodes above the gather merge node, and tries to add
3217 * a sort (regular or incremental) to provide that.
3218 */
3219void
3221{
3222 ListCell *lc;
3223 double rows;
3224 double *rowsp = NULL;
3225 List *useful_pathkeys_list = NIL;
3226 Path *cheapest_partial_path = NULL;
3227
3228 /* If there are no partial paths, there's nothing to do here. */
3229 if (rel->partial_pathlist == NIL)
3230 return;
3231
3232 /* Should we override the rel's rowcount estimate? */
3233 if (override_rows)
3234 rowsp = &rows;
3235
3236 /* generate the regular gather (merge) paths */
3237 generate_gather_paths(root, rel, override_rows);
3238
3239 /* consider incremental sort for interesting orderings */
3240 useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
3241
3242 /* used for explicit (full) sort paths */
3243 cheapest_partial_path = linitial(rel->partial_pathlist);
3244
3245 /*
3246 * Consider sorted paths for each interesting ordering. We generate both
3247 * incremental and full sort.
3248 */
3249 foreach(lc, useful_pathkeys_list)
3250 {
3251 List *useful_pathkeys = lfirst(lc);
3252 ListCell *lc2;
3253 bool is_sorted;
3254 int presorted_keys;
3255
3256 foreach(lc2, rel->partial_pathlist)
3257 {
3258 Path *subpath = (Path *) lfirst(lc2);
3259 GatherMergePath *path;
3260
3261 is_sorted = pathkeys_count_contained_in(useful_pathkeys,
3262 subpath->pathkeys,
3263 &presorted_keys);
3264
3265 /*
3266 * We don't need to consider the case where a subpath is already
3267 * fully sorted because generate_gather_paths already creates a
3268 * gather merge path for every subpath that has pathkeys present.
3269 *
3270 * But since the subpath is already sorted, we know we don't need
3271 * to consider adding a sort (full or incremental) on top of it,
3272 * so we can continue here.
3273 */
3274 if (is_sorted)
3275 continue;
3276
3277 /*
3278 * Try at least sorting the cheapest path and also try
3279 * incrementally sorting any path which is partially sorted
3280 * already (no need to deal with paths which have presorted keys
3281 * when incremental sort is disabled unless it's the cheapest
3282 * input path).
3283 */
3284 if (subpath != cheapest_partial_path &&
3285 (presorted_keys == 0 || !enable_incremental_sort))
3286 continue;
3287
3288 /*
3289 * Consider regular sort for any path that's not presorted or if
3290 * incremental sort is disabled. We've no need to consider both
3291 * sort and incremental sort on the same path. We assume that
3292 * incremental sort is always faster when there are presorted
3293 * keys.
3294 *
3295 * This is not redundant with the gather paths created in
3296 * generate_gather_paths, because that doesn't generate ordered
3297 * output. Here we add an explicit sort to match the useful
3298 * ordering.
3299 */
3300 if (presorted_keys == 0 || !enable_incremental_sort)
3301 {
3303 rel,
3304 subpath,
3305 useful_pathkeys,
3306 -1.0);
3307 }
3308 else
3310 rel,
3311 subpath,
3312 useful_pathkeys,
3313 presorted_keys,
3314 -1);
3316 path = create_gather_merge_path(root, rel,
3317 subpath,
3318 rel->reltarget,
3319 subpath->pathkeys,
3320 NULL,
3321 rowsp);
3322
3323 add_path(rel, &path->path);
3324 }
3325 }
3326}
3327
3328/*
3329 * make_rel_from_joinlist
3330 * Build access paths using a "joinlist" to guide the join path search.
3331 *
3332 * See comments for deconstruct_jointree() for definition of the joinlist
3333 * data structure.
3334 */
3335static RelOptInfo *
3337{
3338 int levels_needed;
3339 List *initial_rels;
3340 ListCell *jl;
3341
3342 /*
3343 * Count the number of child joinlist nodes. This is the depth of the
3344 * dynamic-programming algorithm we must employ to consider all ways of
3345 * joining the child nodes.
3346 */
3347 levels_needed = list_length(joinlist);
3348
3349 if (levels_needed <= 0)
3350 return NULL; /* nothing to do? */
3351
3352 /*
3353 * Construct a list of rels corresponding to the child joinlist nodes.
3354 * This may contain both base rels and rels constructed according to
3355 * sub-joinlists.
3356 */
3357 initial_rels = NIL;
3358 foreach(jl, joinlist)
3359 {
3360 Node *jlnode = (Node *) lfirst(jl);
3361 RelOptInfo *thisrel;
3362
3363 if (IsA(jlnode, RangeTblRef))
3364 {
3365 int varno = ((RangeTblRef *) jlnode)->rtindex;
3366
3367 thisrel = find_base_rel(root, varno);
3368 }
3369 else if (IsA(jlnode, List))
3370 {
3371 /* Recurse to handle subproblem */
3372 thisrel = make_rel_from_joinlist(root, (List *) jlnode);
3373 }
3374 else
3375 {
3376 elog(ERROR, "unrecognized joinlist node type: %d",
3377 (int) nodeTag(jlnode));
3378 thisrel = NULL; /* keep compiler quiet */
3379 }
3380
3381 initial_rels = lappend(initial_rels, thisrel);
3382 }
3383
3384 if (levels_needed == 1)
3385 {
3386 /*
3387 * Single joinlist node, so we're done.
3388 */
3389 return (RelOptInfo *) linitial(initial_rels);
3390 }
3391 else
3392 {
3393 /*
3394 * Consider the different orders in which we could join the rels,
3395 * using a plugin, GEQO, or the regular join search code.
3396 *
3397 * We put the initial_rels list into a PlannerInfo field because
3398 * has_legal_joinclause() needs to look at it (ugly :-().
3399 */
3400 root->initial_rels = initial_rels;
3401
3402 if (join_search_hook)
3403 return (*join_search_hook) (root, levels_needed, initial_rels);
3404 else if (enable_geqo && levels_needed >= geqo_threshold)
3405 return geqo(root, levels_needed, initial_rels);
3406 else
3407 return standard_join_search(root, levels_needed, initial_rels);
3408 }
3409}
3410
3411/*
3412 * standard_join_search
3413 * Find possible joinpaths for a query by successively finding ways
3414 * to join component relations into join relations.
3415 *
3416 * 'levels_needed' is the number of iterations needed, ie, the number of
3417 * independent jointree items in the query. This is > 1.
3418 *
3419 * 'initial_rels' is a list of RelOptInfo nodes for each independent
3420 * jointree item. These are the components to be joined together.
3421 * Note that levels_needed == list_length(initial_rels).
3422 *
3423 * Returns the final level of join relations, i.e., the relation that is
3424 * the result of joining all the original relations together.
3425 * At least one implementation path must be provided for this relation and
3426 * all required sub-relations.
3427 *
3428 * To support loadable plugins that modify planner behavior by changing the
3429 * join searching algorithm, we provide a hook variable that lets a plugin
3430 * replace or supplement this function. Any such hook must return the same
3431 * final join relation as the standard code would, but it might have a
3432 * different set of implementation paths attached, and only the sub-joinrels
3433 * needed for these paths need have been instantiated.
3434 *
3435 * Note to plugin authors: the functions invoked during standard_join_search()
3436 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
3437 * than one join-order search, you'll probably need to save and restore the
3438 * original states of those data structures. See geqo_eval() for an example.
3439 */
3440RelOptInfo *
3441standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
3442{
3443 int lev;
3444 RelOptInfo *rel;
3445
3446 /*
3447 * This function cannot be invoked recursively within any one planning
3448 * problem, so join_rel_level[] can't be in use already.
3449 */
3450 Assert(root->join_rel_level == NULL);
3451
3452 /*
3453 * We employ a simple "dynamic programming" algorithm: we first find all
3454 * ways to build joins of two jointree items, then all ways to build joins
3455 * of three items (from two-item joins and single items), then four-item
3456 * joins, and so on until we have considered all ways to join all the
3457 * items into one rel.
3458 *
3459 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
3460 * set root->join_rel_level[1] to represent all the single-jointree-item
3461 * relations.
3462 */
3463 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
3464
3465 root->join_rel_level[1] = initial_rels;
3466
3467 for (lev = 2; lev <= levels_needed; lev++)
3468 {
3469 ListCell *lc;
3470
3471 /*
3472 * Determine all possible pairs of relations to be joined at this
3473 * level, and build paths for making each one from every available
3474 * pair of lower-level relations.
3475 */
3477
3478 /*
3479 * Run generate_partitionwise_join_paths() and
3480 * generate_useful_gather_paths() for each just-processed joinrel. We
3481 * could not do this earlier because both regular and partial paths
3482 * can get added to a particular joinrel at multiple times within
3483 * join_search_one_level.
3484 *
3485 * After that, we're done creating paths for the joinrel, so run
3486 * set_cheapest().
3487 */
3488 foreach(lc, root->join_rel_level[lev])
3489 {
3490 rel = (RelOptInfo *) lfirst(lc);
3491
3492 /* Create paths for partitionwise joins. */
3494
3495 /*
3496 * Except for the topmost scan/join rel, consider gathering
3497 * partial paths. We'll do the same for the topmost scan/join rel
3498 * once we know the final targetlist (see grouping_planner's and
3499 * its call to apply_scanjoin_target_to_paths).
3500 */
3501 if (!bms_equal(rel->relids, root->all_query_rels))
3503
3504 /* Find and save the cheapest paths for this rel */
3505 set_cheapest(rel);
3506
3507#ifdef OPTIMIZER_DEBUG
3508 pprint(rel);
3509#endif
3510 }
3511 }
3512
3513 /*
3514 * We should have a single rel at the final level.
3515 */
3516 if (root->join_rel_level[levels_needed] == NIL)
3517 elog(ERROR, "failed to build any %d-way joins", levels_needed);
3518 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
3519
3520 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
3521
3522 root->join_rel_level = NULL;
3523
3524 return rel;
3525}
3526
3527/*****************************************************************************
3528 * PUSHING QUALS DOWN INTO SUBQUERIES
3529 *****************************************************************************/
3530
3531/*
3532 * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
3533 *
3534 * subquery is the particular component query being checked. topquery
3535 * is the top component of a set-operations tree (the same Query if no
3536 * set-op is involved).
3537 *
3538 * Conditions checked here:
3539 *
3540 * 1. If the subquery has a LIMIT clause, we must not push down any quals,
3541 * since that could change the set of rows returned.
3542 *
3543 * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
3544 * quals into it, because that could change the results.
3545 *
3546 * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
3547 * This is because upper-level quals should semantically be evaluated only
3548 * once per distinct row, not once per original row, and if the qual is
3549 * volatile then extra evaluations could change the results. (This issue
3550 * does not apply to other forms of aggregation such as GROUP BY, because
3551 * when those are present we push into HAVING not WHERE, so that the quals
3552 * are still applied after aggregation.)
3553 *
3554 * 4. If the subquery contains window functions, we cannot push volatile quals
3555 * into it. The issue here is a bit different from DISTINCT: a volatile qual
3556 * might succeed for some rows of a window partition and fail for others,
3557 * thereby changing the partition contents and thus the window functions'
3558 * results for rows that remain.
3559 *
3560 * 5. If the subquery contains any set-returning functions in its targetlist,
3561 * we cannot push volatile quals into it. That would push them below the SRFs
3562 * and thereby change the number of times they are evaluated. Also, a
3563 * volatile qual could succeed for some SRF output rows and fail for others,
3564 * a behavior that cannot occur if it's evaluated before SRF expansion.
3565 *
3566 * 6. If the subquery has nonempty grouping sets, we cannot push down any
3567 * quals. The concern here is that a qual referencing a "constant" grouping
3568 * column could get constant-folded, which would be improper because the value
3569 * is potentially nullable by grouping-set expansion. This restriction could
3570 * be removed if we had a parsetree representation that shows that such
3571 * grouping columns are not really constant. (There are other ideas that
3572 * could be used to relax this restriction, but that's the approach most
3573 * likely to get taken in the future. Note that there's not much to be gained
3574 * so long as subquery_planner can't move HAVING clauses to WHERE within such
3575 * a subquery.)
3576 *
3577 * In addition, we make several checks on the subquery's output columns to see
3578 * if it is safe to reference them in pushed-down quals. If output column k
3579 * is found to be unsafe to reference, we set the reason for that inside
3580 * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
3581 * column k might not be referenced by some/all quals. The unsafeFlags[]
3582 * array will be consulted later by qual_is_pushdown_safe(). It's better to
3583 * do it this way than to make the checks directly in qual_is_pushdown_safe(),
3584 * because when the subquery involves set operations we have to check the
3585 * output expressions in each arm of the set op.
3586 *
3587 * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
3588 * we're effectively assuming that the quals cannot distinguish values that
3589 * the DISTINCT's equality operator sees as equal, yet there are many
3590 * counterexamples to that assumption. However use of such a qual with a
3591 * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
3592 * "equal" value will be chosen as the output value by the DISTINCT operation.
3593 * So we don't worry too much about that. Another objection is that if the
3594 * qual is expensive to evaluate, running it for each original row might cost
3595 * more than we save by eliminating rows before the DISTINCT step. But it
3596 * would be very hard to estimate that at this stage, and in practice pushdown
3597 * seldom seems to make things worse, so we ignore that problem too.
3598 *
3599 * Note: likewise, pushing quals into a subquery with window functions is a
3600 * bit dubious: the quals might remove some rows of a window partition while
3601 * leaving others, causing changes in the window functions' results for the
3602 * surviving rows. We insist that such a qual reference only partitioning
3603 * columns, but again that only protects us if the qual does not distinguish
3604 * values that the partitioning equality operator sees as equal. The risks
3605 * here are perhaps larger than for DISTINCT, since no de-duplication of rows
3606 * occurs and thus there is no theoretical problem with such a qual. But
3607 * we'll do this anyway because the potential performance benefits are very
3608 * large, and we've seen no field complaints about the longstanding comparable
3609 * behavior with DISTINCT.
3610 */
3611static bool
3613 pushdown_safety_info *safetyInfo)
3614{
3615 SetOperationStmt *topop;
3616
3617 /* Check point 1 */
3618 if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
3619 return false;
3620
3621 /* Check point 6 */
3622 if (subquery->groupClause && subquery->groupingSets)
3623 return false;
3624
3625 /* Check points 3, 4, and 5 */
3626 if (subquery->distinctClause ||
3627 subquery->hasWindowFuncs ||
3628 subquery->hasTargetSRFs)
3629 safetyInfo->unsafeVolatile = true;
3630
3631 /*
3632 * If we're at a leaf query, check for unsafe expressions in its target
3633 * list, and mark any reasons why they're unsafe in unsafeFlags[].
3634 * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
3635 * so no need to check them.)
3636 */
3637 if (subquery->setOperations == NULL)
3638 check_output_expressions(subquery, safetyInfo);
3639
3640 /* Are we at top level, or looking at a setop component? */
3641 if (subquery == topquery)
3642 {
3643 /* Top level, so check any component queries */
3644 if (subquery->setOperations != NULL)
3645 if (!recurse_pushdown_safe(subquery->setOperations, topquery,
3646 safetyInfo))
3647 return false;
3648 }
3649 else
3650 {
3651 /* Setop component must not have more components (too weird) */
3652 if (subquery->setOperations != NULL)
3653 return false;
3654 /* Check whether setop component output types match top level */
3655 topop = castNode(SetOperationStmt, topquery->setOperations);
3656 Assert(topop);
3658 topop->colTypes,
3659 safetyInfo);
3660 }
3661 return true;
3662}
3663
3664/*
3665 * Helper routine to recurse through setOperations tree
3666 */
3667static bool
3669 pushdown_safety_info *safetyInfo)
3670{
3671 if (IsA(setOp, RangeTblRef))
3672 {
3673 RangeTblRef *rtr = (RangeTblRef *) setOp;
3674 RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
3675 Query *subquery = rte->subquery;
3676
3677 Assert(subquery != NULL);
3678 return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
3679 }
3680 else if (IsA(setOp, SetOperationStmt))
3681 {
3682 SetOperationStmt *op = (SetOperationStmt *) setOp;
3683
3684 /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
3685 if (op->op == SETOP_EXCEPT)
3686 return false;
3687 /* Else recurse */
3688 if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
3689 return false;
3690 if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
3691 return false;
3692 }
3693 else
3694 {
3695 elog(ERROR, "unrecognized node type: %d",
3696 (int) nodeTag(setOp));
3697 }
3698 return true;
3699}
3700
3701/*
3702 * check_output_expressions - check subquery's output expressions for safety
3703 *
3704 * There are several cases in which it's unsafe to push down an upper-level
3705 * qual if it references a particular output column of a subquery. We check
3706 * each output column of the subquery and set flags in unsafeFlags[k] when we
3707 * see that column is unsafe for a pushed-down qual to reference. The
3708 * conditions checked here are:
3709 *
3710 * 1. We must not push down any quals that refer to subselect outputs that
3711 * return sets, else we'd introduce functions-returning-sets into the
3712 * subquery's WHERE/HAVING quals.
3713 *
3714 * 2. We must not push down any quals that refer to subselect outputs that
3715 * contain volatile functions, for fear of introducing strange results due
3716 * to multiple evaluation of a volatile function.
3717 *
3718 * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
3719 * refer to non-DISTINCT output columns, because that could change the set
3720 * of rows returned. (This condition is vacuous for DISTINCT, because then
3721 * there are no non-DISTINCT output columns, so we needn't check. Note that
3722 * subquery_is_pushdown_safe already reported that we can't use volatile
3723 * quals if there's DISTINCT or DISTINCT ON.)
3724 *
3725 * 4. If the subquery has any window functions, we must not push down quals
3726 * that reference any output columns that are not listed in all the subquery's
3727 * window PARTITION BY clauses. We can push down quals that use only
3728 * partitioning columns because they should succeed or fail identically for
3729 * every row of any one window partition, and totally excluding some
3730 * partitions will not change a window function's results for remaining
3731 * partitions. (Again, this also requires nonvolatile quals, but
3732 * subquery_is_pushdown_safe handles that.). Subquery columns marked as
3733 * unsafe for this reason can still have WindowClause run conditions pushed
3734 * down.
3735 */
3736static void
3738{
3739 ListCell *lc;
3740
3741 foreach(lc, subquery->targetList)
3742 {
3743 TargetEntry *tle = (TargetEntry *) lfirst(lc);
3744
3745 if (tle->resjunk)
3746 continue; /* ignore resjunk columns */
3747
3748 /* Functions returning sets are unsafe (point 1) */
3749 if (subquery->hasTargetSRFs &&
3750 (safetyInfo->unsafeFlags[tle->resno] &
3751 UNSAFE_HAS_SET_FUNC) == 0 &&
3753 {
3754 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
3755 continue;
3756 }
3757
3758 /* Volatile functions are unsafe (point 2) */
3759 if ((safetyInfo->unsafeFlags[tle->resno] &
3762 {
3763 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
3764 continue;
3765 }
3766
3767 /* If subquery uses DISTINCT ON, check point 3 */
3768 if (subquery->hasDistinctOn &&
3769 (safetyInfo->unsafeFlags[tle->resno] &
3772 {
3773 /* non-DISTINCT column, so mark it unsafe */
3775 continue;
3776 }
3777
3778 /* If subquery uses window functions, check point 4 */
3779 if (subquery->hasWindowFuncs &&
3780 (safetyInfo->unsafeFlags[tle->resno] &
3782 !targetIsInAllPartitionLists(tle, subquery))
3783 {
3784 /* not present in all PARTITION BY clauses, so mark it unsafe */
3786 continue;
3787 }
3788 }
3789}
3790
3791/*
3792 * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
3793 * push quals into each component query, but the quals can only reference
3794 * subquery columns that suffer no type coercions in the set operation.
3795 * Otherwise there are possible semantic gotchas. So, we check the
3796 * component queries to see if any of them have output types different from
3797 * the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
3798 * unsafeFlags[k] if column k has different type in any component.
3799 *
3800 * We don't have to care about typmods here: the only allowed difference
3801 * between set-op input and output typmods is input is a specific typmod
3802 * and output is -1, and that does not require a coercion.
3803 *
3804 * tlist is a subquery tlist.
3805 * colTypes is an OID list of the top-level setop's output column types.
3806 * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
3807 */
3808static void
3810 pushdown_safety_info *safetyInfo)
3811{
3812 ListCell *l;
3813 ListCell *colType = list_head(colTypes);
3814
3815 foreach(l, tlist)
3816 {
3817 TargetEntry *tle = (TargetEntry *) lfirst(l);
3818
3819 if (tle->resjunk)
3820 continue; /* ignore resjunk columns */
3821 if (colType == NULL)
3822 elog(ERROR, "wrong number of tlist entries");
3823 if (exprType((Node *) tle->expr) != lfirst_oid(colType))
3824 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
3825 colType = lnext(colTypes, colType);
3826 }
3827 if (colType != NULL)
3828 elog(ERROR, "wrong number of tlist entries");
3829}
3830
3831/*
3832 * targetIsInAllPartitionLists
3833 * True if the TargetEntry is listed in the PARTITION BY clause
3834 * of every window defined in the query.
3835 *
3836 * It would be safe to ignore windows not actually used by any window
3837 * function, but it's not easy to get that info at this stage; and it's
3838 * unlikely to be useful to spend any extra cycles getting it, since
3839 * unreferenced window definitions are probably infrequent in practice.
3840 */
3841static bool
3843{
3844 ListCell *lc;
3845
3846 foreach(lc, query->windowClause)
3847 {
3848 WindowClause *wc = (WindowClause *) lfirst(lc);
3849
3851 return false;
3852 }
3853 return true;
3854}
3855
3856/*
3857 * qual_is_pushdown_safe - is a particular rinfo safe to push down?
3858 *
3859 * rinfo is a restriction clause applying to the given subquery (whose RTE
3860 * has index rti in the parent query).
3861 *
3862 * Conditions checked here:
3863 *
3864 * 1. rinfo's clause must not contain any SubPlans (mainly because it's
3865 * unclear that it will work correctly: SubLinks will already have been
3866 * transformed into SubPlans in the qual, but not in the subquery). Note that
3867 * SubLinks that transform to initplans are safe, and will be accepted here
3868 * because what we'll see in the qual is just a Param referencing the initplan
3869 * output.
3870 *
3871 * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
3872 * functions.
3873 *
3874 * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
3875 * functions that are passed Var nodes, and therefore might reveal values from
3876 * the subquery as side effects.
3877 *
3878 * 4. rinfo's clause must not refer to the whole-row output of the subquery
3879 * (since there is no easy way to name that within the subquery itself).
3880 *
3881 * 5. rinfo's clause must not refer to any subquery output columns that were
3882 * found to be unsafe to reference by subquery_is_pushdown_safe().
3883 */
3884static pushdown_safe_type
3886 pushdown_safety_info *safetyInfo)
3887{
3889 Node *qual = (Node *) rinfo->clause;
3890 List *vars;
3891 ListCell *vl;
3892
3893 /* Refuse subselects (point 1) */
3894 if (contain_subplans(qual))
3895 return PUSHDOWN_UNSAFE;
3896
3897 /* Refuse volatile quals if we found they'd be unsafe (point 2) */
3898 if (safetyInfo->unsafeVolatile &&
3900 return PUSHDOWN_UNSAFE;
3901
3902 /* Refuse leaky quals if told to (point 3) */
3903 if (safetyInfo->unsafeLeaky &&
3904 contain_leaked_vars(qual))
3905 return PUSHDOWN_UNSAFE;
3906
3907 /*
3908 * Examine all Vars used in clause. Since it's a restriction clause, all
3909 * such Vars must refer to subselect output columns ... unless this is
3910 * part of a LATERAL subquery, in which case there could be lateral
3911 * references.
3912 *
3913 * By omitting the relevant flags, this also gives us a cheap sanity check
3914 * that no aggregates or window functions appear in the qual. Those would
3915 * be unsafe to push down, but at least for the moment we could never see
3916 * any in a qual anyhow.
3917 */
3919 foreach(vl, vars)
3920 {
3921 Var *var = (Var *) lfirst(vl);
3922
3923 /*
3924 * XXX Punt if we find any PlaceHolderVars in the restriction clause.
3925 * It's not clear whether a PHV could safely be pushed down, and even
3926 * less clear whether such a situation could arise in any cases of
3927 * practical interest anyway. So for the moment, just refuse to push
3928 * down.
3929 */
3930 if (!IsA(var, Var))
3931 {
3932 safe = PUSHDOWN_UNSAFE;
3933 break;
3934 }
3935
3936 /*
3937 * Punt if we find any lateral references. It would be safe to push
3938 * these down, but we'd have to convert them into outer references,
3939 * which subquery_push_qual lacks the infrastructure to do. The case
3940 * arises so seldom that it doesn't seem worth working hard on.
3941 */
3942 if (var->varno != rti)
3943 {
3944 safe = PUSHDOWN_UNSAFE;
3945 break;
3946 }
3947
3948 /* Subqueries have no system columns */
3949 Assert(var->varattno >= 0);
3950
3951 /* Check point 4 */
3952 if (var->varattno == 0)
3953 {
3954 safe = PUSHDOWN_UNSAFE;
3955 break;
3956 }
3957
3958 /* Check point 5 */
3959 if (safetyInfo->unsafeFlags[var->varattno] != 0)
3960 {
3961 if (safetyInfo->unsafeFlags[var->varattno] &
3964 {
3965 safe = PUSHDOWN_UNSAFE;
3966 break;
3967 }
3968 else
3969 {
3970 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
3972 /* don't break, we might find another Var that's unsafe */
3973 }
3974 }
3975 }
3976
3977 list_free(vars);
3978
3979 return safe;
3980}
3981
3982/*
3983 * subquery_push_qual - push down a qual that we have determined is safe
3984 */
3985static void
3987{
3988 if (subquery->setOperations != NULL)
3989 {
3990 /* Recurse to push it separately to each component query */
3991 recurse_push_qual(subquery->setOperations, subquery,
3992 rte, rti, qual);
3993 }
3994 else
3995 {
3996 /*
3997 * We need to replace Vars in the qual (which must refer to outputs of
3998 * the subquery) with copies of the subquery's targetlist expressions.
3999 * Note that at this point, any uplevel Vars in the qual should have
4000 * been replaced with Params, so they need no work.
4001 *
4002 * This step also ensures that when we are pushing into a setop tree,
4003 * each component query gets its own copy of the qual.
4004 */
4005 qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
4006 subquery->targetList,
4007 subquery->resultRelation,
4009 &subquery->hasSubLinks);
4010
4011 /*
4012 * Now attach the qual to the proper place: normally WHERE, but if the
4013 * subquery uses grouping or aggregation, put it in HAVING (since the
4014 * qual really refers to the group-result rows).
4015 */
4016 if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
4017 subquery->havingQual = make_and_qual(subquery->havingQual, qual);
4018 else
4019 subquery->jointree->quals =
4020 make_and_qual(subquery->jointree->quals, qual);
4021
4022 /*
4023 * We need not change the subquery's hasAggs or hasSubLinks flags,
4024 * since we can't be pushing down any aggregates that weren't there
4025 * before, and we don't push down subselects at all.
4026 */
4027 }
4028}
4029
4030/*
4031 * Helper routine to recurse through setOperations tree
4032 */
4033static void
4034recurse_push_qual(Node *setOp, Query *topquery,
4035 RangeTblEntry *rte, Index rti, Node *qual)
4036{
4037 if (IsA(setOp, RangeTblRef))
4038 {
4039 RangeTblRef *rtr = (RangeTblRef *) setOp;
4040 RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
4041 Query *subquery = subrte->subquery;
4042
4043 Assert(subquery != NULL);
4044 subquery_push_qual(subquery, rte, rti, qual);
4045 }
4046 else if (IsA(setOp, SetOperationStmt))
4047 {
4048 SetOperationStmt *op = (SetOperationStmt *) setOp;
4049
4050 recurse_push_qual(op->larg, topquery, rte, rti, qual);
4051 recurse_push_qual(op->rarg, topquery, rte, rti, qual);
4052 }
4053 else
4054 {
4055 elog(ERROR, "unrecognized node type: %d",
4056 (int) nodeTag(setOp));
4057 }
4058}
4059
4060/*****************************************************************************
4061 * SIMPLIFYING SUBQUERY TARGETLISTS
4062 *****************************************************************************/
4063
4064/*
4065 * remove_unused_subquery_outputs
4066 * Remove subquery targetlist items we don't need
4067 *
4068 * It's possible, even likely, that the upper query does not read all the
4069 * output columns of the subquery. We can remove any such outputs that are
4070 * not needed by the subquery itself (e.g., as sort/group columns) and do not
4071 * affect semantics otherwise (e.g., volatile functions can't be removed).
4072 * This is useful not only because we might be able to remove expensive-to-
4073 * compute expressions, but because deletion of output columns might allow
4074 * optimizations such as join removal to occur within the subquery.
4075 *
4076 * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
4077 * FirstLowInvalidHeapAttributeNumber) that we should not remove. This
4078 * parameter is modified by the function, so callers must make a copy if they
4079 * need to use the passed in Bitmapset after calling this function.
4080 *
4081 * To avoid affecting column numbering in the targetlist, we don't physically
4082 * remove unused tlist entries, but rather replace their expressions with NULL
4083 * constants. This is implemented by modifying subquery->targetList.
4084 */
4085static void
4087 Bitmapset *extra_used_attrs)
4088{
4089 Bitmapset *attrs_used;
4090 ListCell *lc;
4091
4092 /*
4093 * Just point directly to extra_used_attrs. No need to bms_copy as none of
4094 * the current callers use the Bitmapset after calling this function.
4095 */
4096 attrs_used = extra_used_attrs;
4097
4098 /*
4099 * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
4100 * could update all the child SELECTs' tlists, but it seems not worth the
4101 * trouble presently.
4102 */
4103 if (subquery->setOperations)
4104 return;
4105
4106 /*
4107 * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
4108 * time: all its output columns must be used in the distinctClause.
4109 */
4110 if (subquery->distinctClause && !subquery->hasDistinctOn)
4111 return;
4112
4113 /*
4114 * Collect a bitmap of all the output column numbers used by the upper
4115 * query.
4116 *
4117 * Add all the attributes needed for joins or final output. Note: we must
4118 * look at rel's targetlist, not the attr_needed data, because attr_needed
4119 * isn't computed for inheritance child rels, cf set_append_rel_size().
4120 * (XXX might be worth changing that sometime.)
4121 */
4122 pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
4123
4124 /* Add all the attributes used by un-pushed-down restriction clauses. */
4125 foreach(lc, rel->baserestrictinfo)
4126 {
4127 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4128
4129 pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
4130 }
4131
4132 /*
4133 * If there's a whole-row reference to the subquery, we can't remove
4134 * anything.
4135 */
4137 return;
4138
4139 /*
4140 * Run through the tlist and zap entries we don't need. It's okay to
4141 * modify the tlist items in-place because set_subquery_pathlist made a
4142 * copy of the subquery.
4143 */
4144 foreach(lc, subquery->targetList)
4145 {
4146 TargetEntry *tle = (TargetEntry *) lfirst(lc);
4147 Node *texpr = (Node *) tle->expr;
4148
4149 /*
4150 * If it has a sortgroupref number, it's used in some sort/group
4151 * clause so we'd better not remove it. Also, don't remove any
4152 * resjunk columns, since their reason for being has nothing to do
4153 * with anybody reading the subquery's output. (It's likely that
4154 * resjunk columns in a sub-SELECT would always have ressortgroupref
4155 * set, but even if they don't, it seems imprudent to remove them.)
4156 */
4157 if (tle->ressortgroupref || tle->resjunk)
4158 continue;
4159
4160 /*
4161 * If it's used by the upper query, we can't remove it.
4162 */
4164 attrs_used))
4165 continue;
4166
4167 /*
4168 * If it contains a set-returning function, we can't remove it since
4169 * that could change the number of rows returned by the subquery.
4170 */
4171 if (subquery->hasTargetSRFs &&
4173 continue;
4174
4175 /*
4176 * If it contains volatile functions, we daren't remove it for fear
4177 * that the user is expecting their side-effects to happen.
4178 */
4179 if (contain_volatile_functions(texpr))
4180 continue;
4181
4182 /*
4183 * OK, we don't need it. Replace the expression with a NULL constant.
4184 * Preserve the exposed type of the expression, in case something
4185 * looks at the rowtype of the subquery's result.
4186 */
4187 tle->expr = (Expr *) makeNullConst(exprType(texpr),
4188 exprTypmod(texpr),
4189 exprCollation(texpr));
4190 }
4191}
4192
4193/*
4194 * create_partial_bitmap_paths
4195 * Build partial bitmap heap path for the relation
4196 */
4197void
4199 Path *bitmapqual)
4200{
4201 int parallel_workers;
4202 double pages_fetched;
4203
4204 /* Compute heap pages for bitmap heap scan */
4205 pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
4206 NULL, NULL);
4207
4208 parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
4210
4211 if (parallel_workers <= 0)
4212 return;
4213
4215 bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
4216}
4217
4218/*
4219 * Compute the number of parallel workers that should be used to scan a
4220 * relation. We compute the parallel workers based on the size of the heap to
4221 * be scanned and the size of the index to be scanned, then choose a minimum
4222 * of those.
4223 *
4224 * "heap_pages" is the number of pages from the table that we expect to scan, or
4225 * -1 if we don't expect to scan any.
4226 *
4227 * "index_pages" is the number of pages from the index that we expect to scan, or
4228 * -1 if we don't expect to scan any.
4229 *
4230 * "max_workers" is caller's limit on the number of workers. This typically
4231 * comes from a GUC.
4232 */
4233int
4234compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
4235 int max_workers)
4236{
4237 int parallel_workers = 0;
4238
4239 /*
4240 * If the user has set the parallel_workers reloption, use that; otherwise
4241 * select a default number of workers.
4242 */
4243 if (rel->rel_parallel_workers != -1)
4244 parallel_workers = rel->rel_parallel_workers;
4245 else
4246 {
4247 /*
4248 * If the number of pages being scanned is insufficient to justify a
4249 * parallel scan, just return zero ... unless it's an inheritance
4250 * child. In that case, we want to generate a parallel path here
4251 * anyway. It might not be worthwhile just for this relation, but
4252 * when combined with all of its inheritance siblings it may well pay
4253 * off.
4254 */
4255 if (rel->reloptkind == RELOPT_BASEREL &&
4256 ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
4257 (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
4258 return 0;
4259
4260 if (heap_pages >= 0)
4261 {
4262 int heap_parallel_threshold;
4263 int heap_parallel_workers = 1;
4264
4265 /*
4266 * Select the number of workers based on the log of the size of
4267 * the relation. This probably needs to be a good deal more
4268 * sophisticated, but we need something here for now. Note that
4269 * the upper limit of the min_parallel_table_scan_size GUC is
4270 * chosen to prevent overflow here.
4271 */
4272 heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
4273 while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
4274 {
4275 heap_parallel_workers++;
4276 heap_parallel_threshold *= 3;
4277 if (heap_parallel_threshold > INT_MAX / 3)
4278 break; /* avoid overflow */
4279 }
4280
4281 parallel_workers = heap_parallel_workers;
4282 }
4283
4284 if (index_pages >= 0)
4285 {
4286 int index_parallel_workers = 1;
4287 int index_parallel_threshold;
4288
4289 /* same calculation as for heap_pages above */
4290 index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
4291 while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
4292 {
4293 index_parallel_workers++;
4294 index_parallel_threshold *= 3;
4295 if (index_parallel_threshold > INT_MAX / 3)
4296 break; /* avoid overflow */
4297 }
4298
4299 if (parallel_workers > 0)
4300 parallel_workers = Min(parallel_workers, index_parallel_workers);
4301 else
4302 parallel_workers = index_parallel_workers;
4303 }
4304 }
4305
4306 /* In no case use more than caller supplied maximum number of workers */
4307 parallel_workers = Min(parallel_workers, max_workers);
4308
4309 return parallel_workers;
4310}
4311
4312/*
4313 * generate_partitionwise_join_paths
4314 * Create paths representing partitionwise join for given partitioned
4315 * join relation.
4316 *
4317 * This must not be called until after we are done adding paths for all
4318 * child-joins. Otherwise, add_path might delete a path to which some path
4319 * generated here has a reference.
4320 */
4321void
4323{
4324 List *live_children = NIL;
4325 int cnt_parts;
4326 int num_parts;
4327 RelOptInfo **part_rels;
4328
4329 /* Handle only join relations here. */
4330 if (!IS_JOIN_REL(rel))
4331 return;
4332
4333 /* We've nothing to do if the relation is not partitioned. */
4334 if (!IS_PARTITIONED_REL(rel))
4335 return;
4336
4337 /* The relation should have consider_partitionwise_join set. */
4339
4340 /* Guard against stack overflow due to overly deep partition hierarchy. */
4342
4343 num_parts = rel->nparts;
4344 part_rels = rel->part_rels;
4345
4346 /* Collect non-dummy child-joins. */
4347 for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
4348 {
4349 RelOptInfo *child_rel = part_rels[cnt_parts];
4350
4351 /* If it's been pruned entirely, it's certainly dummy. */
4352 if (child_rel == NULL)
4353 continue;
4354
4355 /* Make partitionwise join paths for this partitioned child-join. */
4357
4358 /* If we failed to make any path for this child, we must give up. */
4359 if (child_rel->pathlist == NIL)
4360 {
4361 /*
4362 * Mark the parent joinrel as unpartitioned so that later
4363 * functions treat it correctly.
4364 */
4365 rel->nparts = 0;
4366 return;
4367 }
4368
4369 /* Else, identify the cheapest path for it. */
4370 set_cheapest(child_rel);
4371
4372 /* Dummy children need not be scanned, so ignore those. */
4373 if (IS_DUMMY_REL(child_rel))
4374 continue;
4375
4376#ifdef OPTIMIZER_DEBUG
4377 pprint(child_rel);
4378#endif
4379
4380 live_children = lappend(live_children, child_rel);
4381 }
4382
4383 /* If all child-joins are dummy, parent join is also dummy. */
4384 if (!live_children)
4385 {
4386 mark_dummy_rel(rel);
4387 return;
4388 }
4389
4390 /* Build additional paths for this rel from child-join paths. */
4391 add_paths_to_append_rel(root, rel, live_children);
4392 list_free(live_children);
4393}
static void set_base_rel_sizes(PlannerInfo *root)
Definition: allpaths.c:290
static List * get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel, bool require_parallel_safe)
Definition: allpaths.c:3152
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:2513
static bool find_window_run_conditions(Query *subquery, RangeTblEntry *rte, Index rti, AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left, bool *keep_original, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2247
#define UNSAFE_TYPE_MISMATCH
Definition: allpaths.c:57
static Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: allpaths.c:2032
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2970
static void subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:3986
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:4322
static void set_base_rel_consider_startup(PlannerInfo *root)
Definition: allpaths.c:247
#define UNSAFE_HAS_VOLATILE_FUNC
Definition: allpaths.c:53
#define UNSAFE_NOTIN_DISTINCTON_CLAUSE
Definition: allpaths.c:55
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:469
static Path * get_singleton_append_subpath(Path *path)
Definition: allpaths.c:2165
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:826
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:866
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:938
static void set_base_rel_pathlists(PlannerInfo *root)
Definition: allpaths.c:333
RelOptInfo * standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: allpaths.c:3441
int geqo_threshold
Definition: allpaths.c:80
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:1251
static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3885
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2997
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4234
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3083
static void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition: allpaths.c:2199
static void compare_tlist_datatypes(List *tlist, List *colTypes, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3809
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:3024
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
Definition: allpaths.c:3842
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:806
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3612
join_search_hook_type join_search_hook
Definition: allpaths.c:88
bool enable_geqo
Definition: allpaths.c:79
static bool check_and_push_window_quals(Query *subquery, RangeTblEntry *rte, Index rti, Node *clause, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2438
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3220
static RelOptInfo * make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:3336
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2780
static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:4034
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:768
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel, Bitmapset *extra_used_attrs)
Definition: allpaths.c:4086
static void check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3737
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2867
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:914
set_rel_pathlist_hook_type set_rel_pathlist_hook
Definition: allpaths.c:85
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2847
struct pushdown_safety_info pushdown_safety_info
RelOptInfo * make_one_rel(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:171
#define UNSAFE_HAS_SET_FUNC
Definition: allpaths.c:54
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2891
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:589
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:956
void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual)
Definition: allpaths.c:4198
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:572
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1321
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:360
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, List *live_childrels, List *all_child_pathkeys)
Definition: allpaths.c:1747
#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE
Definition: allpaths.c:56
static bool recurse_pushdown_safe(Node *setOp, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3668
pushdown_safe_type
Definition: allpaths.c:71
@ PUSHDOWN_WINDOWCLAUSE_RUNCOND
Definition: allpaths.c:74
@ PUSHDOWN_UNSAFE
Definition: allpaths.c:72
@ PUSHDOWN_SAFE
Definition: allpaths.c:73
static void accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
Definition: allpaths.c:2120
int min_parallel_index_scan_size
Definition: allpaths.c:82
int min_parallel_table_scan_size
Definition: allpaths.c:81
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
void pprint(const void *obj)
Definition: print.c:54
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582
bool bms_get_singleton_member(const Bitmapset *a, int *member)
Definition: bitmapset.c:715
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_SINGLETON
Definition: bitmapset.h:72
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:975
#define Max(x, y)
Definition: c.h:969
int32_t int32
Definition: c.h:498
unsigned int Index
Definition: c.h:585
#define OidIsValid(objectId)
Definition: c.h:746
bool is_pseudo_constant_clause(Node *clause)
Definition: clauses.c:2090
bool contain_leaked_vars(Node *clause)
Definition: clauses.c:1264
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:754
bool contain_subplans(Node *clause)
Definition: clauses.c:331
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:539
CompareType
Definition: cmptype.h:32
@ COMPARE_LE
Definition: cmptype.h:35
@ COMPARE_GT
Definition: cmptype.h:38
@ COMPARE_EQ
Definition: cmptype.h:36
@ COMPARE_GE
Definition: cmptype.h:37
@ COMPARE_LT
Definition: cmptype.h:34
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6098
int max_parallel_workers_per_gather
Definition: costsize.c:143
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5334
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5968
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:6060
double compute_gather_rows(Path *path)
Definition: costsize.c:6610
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6131
bool enable_partitionwise_join
Definition: costsize.c:159
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition: costsize.c:6499
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5888
bool enable_parallel_append
Definition: costsize.c:161
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6160
double clamp_row_est(double nrows)
Definition: costsize.c:213
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6006
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6028
bool enable_incremental_sort
Definition: costsize.c:151
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void add_child_rel_equivalences(PlannerInfo *root, AppendRelInfo *appinfo, RelOptInfo *parent_rel, RelOptInfo *child_rel)
Definition: equivclass.c:2833
bool relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel, EquivalenceClass *ec, bool require_parallel_safe)
Definition: equivclass.c:1077
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
RelOptInfo * geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Definition: geqo_main.c:72
Assert(PointerIsAligned(start, uint64))
void check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:3966
void create_index_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:239
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void join_search_one_level(PlannerInfo *root, int level)
Definition: joinrels.c:73
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1385
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_copy_tail(const List *oldlist, int nskip)
Definition: list.c:1613
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
void list_free(List *list)
Definition: list.c:1546
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
char get_rel_persistence(Oid relid)
Definition: lsyscache.c:2218
char func_parallel(Oid funcid)
Definition: lsyscache.c:1939
Oid get_opfamily_member_for_cmptype(Oid opfamily, Oid lefttype, Oid righttype, CompareType cmptype)
Definition: lsyscache.c:196
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1998
bool func_strict(Oid funcid)
Definition: lsyscache.c:1901
List * get_op_index_interpretation(Oid opno)
Definition: lsyscache.c:672
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2718
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
Const * makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
Definition: makefuncs.c:388
Node * make_and_qual(Node *qual1, Node *qual2)
Definition: makefuncs.c:780
void pfree(void *pointer)
Definition: mcxt.c:2147
void * palloc0(Size size)
Definition: mcxt.c:1970
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:301
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
void set_opfuncid(OpExpr *opexpr)
Definition: nodeFuncs.c:1872
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:230
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
@ JOIN_SEMI
Definition: nodes.h:313
@ JOIN_ANTI
Definition: nodes.h:314
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:196
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
@ SETOP_EXCEPT
Definition: parsenodes.h:2170
@ RTE_JOIN
Definition: parsenodes.h:1028
@ RTE_CTE
Definition: parsenodes.h:1032
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1033
@ RTE_VALUES
Definition: parsenodes.h:1031
@ RTE_SUBQUERY
Definition: parsenodes.h:1027
@ RTE_RESULT
Definition: parsenodes.h:1034
@ RTE_FUNCTION
Definition: parsenodes.h:1029
@ RTE_TABLEFUNC
Definition: parsenodes.h:1030
@ RTE_GROUP
Definition: parsenodes.h:1037
@ RTE_RELATION
Definition: parsenodes.h:1026
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
bool partitions_are_ordered(PartitionBoundInfo boundinfo, Bitmapset *live_parts)
Definition: partbounds.c:2852
Path * get_cheapest_fractional_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, double fraction)
Definition: pathkeys.c:666
Path * get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, CostSelector cost_criterion, bool require_parallel_safe)
Definition: pathkeys.c:620
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2319
List * build_expression_pathkey(PlannerInfo *root, Expr *expr, Oid opno, Relids rel, bool create_it)
Definition: pathkeys.c:1000
List * build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel, ScanDirection scandir, bool *partialkeys)
Definition: pathkeys.c:919
List * convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *subquery_pathkeys, List *subquery_tlist)
Definition: pathkeys.c:1054
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
Path * get_cheapest_parallel_safe_total_inner(List *paths)
Definition: pathkeys.c:699
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2118
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2170
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2274
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2222
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2088
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2144
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:2044
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1634
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2248
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2196
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1471
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:4103
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:866
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2083
#define IS_JOIN_REL(rel)
Definition: pathnodes.h:871
@ TOTAL_COST
Definition: pathnodes.h:38
@ STARTUP_COST
Definition: pathnodes.h:38
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1089
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1806
Bitmapset * Relids
Definition: pathnodes.h:30
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ RELOPT_BASEREL
Definition: pathnodes.h:854
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:856
void(* set_rel_pathlist_hook_type)(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: paths.h:30
RelOptInfo *(* join_search_hook_type)(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: paths.h:46
@ PATHKEYS_EQUAL
Definition: paths.h:213
void * arg
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define lsecond(l)
Definition: pg_list.h:183
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_nth_int(const List *list, int n)
Definition: pg_list.h:310
bool relation_excluded_by_constraints(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: plancat.c:1596
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6483
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:647
bool limit_needed(Query *parse)
Definition: planner.c:2706
@ MONOTONICFUNC_NONE
Definition: plannodes.h:1768
@ MONOTONICFUNC_DECREASING
Definition: plannodes.h:1770
@ MONOTONICFUNC_INCREASING
Definition: plannodes.h:1769
@ MONOTONICFUNC_BOTH
Definition: plannodes.h:1771
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
#define InvalidOid
Definition: postgres_ext.h:35
unsigned int Oid
Definition: postgres_ext.h:30
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
Node * ReplaceVarsFromTargetList(Node *node, int target_varno, int sublevels_up, RangeTblEntry *target_rte, List *targetlist, int result_relation, ReplaceVarsNoMatchOption nomatch_option, int nomatch_varno, bool *outer_hasSubLinks)
@ REPLACEVARS_REPORT_ERROR
Definition: rewriteManip.h:39
@ BackwardScanDirection
Definition: sdir.h:26
@ ForwardScanDirection
Definition: sdir.h:28
void check_stack_depth(void)
Definition: stack_depth.c:95
int first_partial_path
Definition: pathnodes.h:2071
List * subpaths
Definition: pathnodes.h:2069
Index child_relid
Definition: pathnodes.h:3106
Index parent_relid
Definition: pathnodes.h:3105
Node * quals
Definition: primnodes.h:2338
Definition: pg_list.h:54
Definition: nodes.h:135
Oid opno
Definition: primnodes.h:835
List * args
Definition: primnodes.h:853
CompareType cmptype
Definition: lsyscache.h:28
List * exprs
Definition: pathnodes.h:1669
List * pathkeys
Definition: pathnodes.h:1802
Cardinality rows
Definition: pathnodes.h:1796
int parallel_workers
Definition: pathnodes.h:1793
Cost total_cost
Definition: pathnodes.h:1799
bool parallel_aware
Definition: pathnodes.h:1789
Cardinality plan_rows
Definition: plannodes.h:185
List * targetlist
Definition: plannodes.h:209
List * cte_plan_ids
Definition: pathnodes.h:329
struct Path * non_recursive_path
Definition: pathnodes.h:562
Query * parse
Definition: pathnodes.h:226
Node * limitCount
Definition: parsenodes.h:225
FromExpr * jointree
Definition: parsenodes.h:177
Node * setOperations
Definition: parsenodes.h:230
List * cteList
Definition: parsenodes.h:168
List * groupClause
Definition: parsenodes.h:211
Node * havingQual
Definition: parsenodes.h:216
List * rtable
Definition: parsenodes.h:170
Node * limitOffset
Definition: parsenodes.h:224
List * windowClause
Definition: parsenodes.h:218
List * targetList
Definition: parsenodes.h:193
List * groupingSets
Definition: parsenodes.h:214
List * distinctClause
Definition: parsenodes.h:220
char * ctename
Definition: parsenodes.h:1210
Index ctelevelsup
Definition: parsenodes.h:1212
bool funcordinality
Definition: parsenodes.h:1193
struct TableSampleClause * tablesample
Definition: parsenodes.h:1112
Query * subquery
Definition: parsenodes.h:1118
List * values_lists
Definition: parsenodes.h:1204
List * functions
Definition: parsenodes.h:1191
RTEKind rtekind
Definition: parsenodes.h:1061
List * baserestrictinfo
Definition: pathnodes.h:1012
bool consider_param_startup
Definition: pathnodes.h:912
List * subplan_params
Definition: pathnodes.h:981
List * joininfo
Definition: pathnodes.h:1018
Relids relids
Definition: pathnodes.h:898
struct PathTarget * reltarget
Definition: pathnodes.h:920
Index relid
Definition: pathnodes.h:945
Cardinality tuples
Definition: pathnodes.h:976
bool consider_parallel
Definition: pathnodes.h:914
BlockNumber pages
Definition: pathnodes.h:975
Relids lateral_relids
Definition: pathnodes.h:940
List * pathlist
Definition: pathnodes.h:925
RelOptKind reloptkind
Definition: pathnodes.h:892
struct Path * cheapest_startup_path
Definition: pathnodes.h:928
struct Path * cheapest_total_path
Definition: pathnodes.h:929
bool has_eclass_joins
Definition: pathnodes.h:1020
bool consider_startup
Definition: pathnodes.h:910
Bitmapset * live_parts
Definition: pathnodes.h:1066
int rel_parallel_workers
Definition: pathnodes.h:983
bool consider_partitionwise_join
Definition: pathnodes.h:1026
List * partial_pathlist
Definition: pathnodes.h:927
PlannerInfo * subroot
Definition: pathnodes.h:980
AttrNumber max_attr
Definition: pathnodes.h:953
Relids nulling_relids
Definition: pathnodes.h:965
Cardinality rows
Definition: pathnodes.h:904
AttrNumber min_attr
Definition: pathnodes.h:951
RTEKind rtekind
Definition: pathnodes.h:949
Expr * clause
Definition: pathnodes.h:2700
SetOperation op
Definition: parsenodes.h:2247
JoinType jointype
Definition: pathnodes.h:3034
Relids syn_righthand
Definition: pathnodes.h:3033
struct WindowClause * window_clause
Definition: supportnodes.h:296
MonotonicFunction monotonic
Definition: supportnodes.h:299
Expr * expr
Definition: primnodes.h:2219
AttrNumber resno
Definition: primnodes.h:2221
Index ressortgroupref
Definition: primnodes.h:2225
bool repeatable_across_scans
Definition: tsmapi.h:65
SampleScanGetSampleSize_function SampleScanGetSampleSize
Definition: tsmapi.h:68
Definition: primnodes.h:262
AttrNumber varattno
Definition: primnodes.h:274
int varno
Definition: primnodes.h:269
Index varlevelsup
Definition: primnodes.h:294
List * partitionClause
Definition: parsenodes.h:1557
Index winref
Definition: primnodes.h:598
Oid winfnoid
Definition: primnodes.h:584
unsigned char * unsafeFlags
Definition: allpaths.c:62
Definition: regcomp.c:282
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
bool create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: tidpath.c:498
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:624
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition: var.c:296