PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
allpaths.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * allpaths.c
4 * Routines to find possible search paths for processing a query
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/path/allpaths.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/sysattr.h"
22#include "access/tsmapi.h"
23#include "catalog/pg_class.h"
24#include "catalog/pg_operator.h"
25#include "catalog/pg_proc.h"
26#include "foreign/fdwapi.h"
27#include "miscadmin.h"
28#include "nodes/makefuncs.h"
29#include "nodes/nodeFuncs.h"
30#include "nodes/supportnodes.h"
31#ifdef OPTIMIZER_DEBUG
32#include "nodes/print.h"
33#endif
35#include "optimizer/clauses.h"
36#include "optimizer/cost.h"
37#include "optimizer/geqo.h"
38#include "optimizer/optimizer.h"
39#include "optimizer/pathnode.h"
40#include "optimizer/paths.h"
41#include "optimizer/plancat.h"
42#include "optimizer/planner.h"
43#include "optimizer/prep.h"
44#include "optimizer/tlist.h"
45#include "parser/parse_clause.h"
46#include "parser/parsetree.h"
48#include "port/pg_bitutils.h"
50#include "utils/lsyscache.h"
51#include "utils/selfuncs.h"
52
53
54/* Bitmask flags for pushdown_safety_info.unsafeFlags */
55#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
56#define UNSAFE_HAS_SET_FUNC (1 << 1)
57#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
58#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
59#define UNSAFE_TYPE_MISMATCH (1 << 4)
60
61/* results of subquery_is_pushdown_safe */
63{
64 unsigned char *unsafeFlags; /* bitmask of reasons why this target list
65 * column is unsafe for qual pushdown, or 0 if
66 * no reason. */
67 bool unsafeVolatile; /* don't push down volatile quals */
68 bool unsafeLeaky; /* don't push down leaky quals */
70
71/* Return type for qual_is_pushdown_safe */
73{
74 PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
75 PUSHDOWN_SAFE, /* safe to push qual into subquery */
76 PUSHDOWN_WINDOWCLAUSE_RUNCOND, /* unsafe, but may work as WindowClause
77 * run condition */
79
80/* These parameters are set by GUC */
81bool enable_geqo = false; /* just in case GUC doesn't set it */
87
88/* Hook for plugins to get control in set_rel_pathlist() */
90
91/* Hook for plugins to replace standard_join_search() */
93
94
99static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
100 Index rti, RangeTblEntry *rte);
102 Index rti, RangeTblEntry *rte);
104 RangeTblEntry *rte);
107 RangeTblEntry *rte);
109 RangeTblEntry *rte);
111 RangeTblEntry *rte);
113 RangeTblEntry *rte);
115 RangeTblEntry *rte);
117 RangeTblEntry *rte);
119 Index rti, RangeTblEntry *rte);
121 Index rti, RangeTblEntry *rte);
124 List *live_childrels,
125 List *all_child_pathkeys);
127 RelOptInfo *rel,
128 Relids required_outer);
129static void accumulate_append_subpath(Path *path,
130 List **subpaths,
131 List **special_subpaths);
133static void set_dummy_rel_pathlist(RelOptInfo *rel);
135 Index rti, RangeTblEntry *rte);
137 RangeTblEntry *rte);
139 RangeTblEntry *rte);
141 RangeTblEntry *rte);
143 RangeTblEntry *rte);
145 RangeTblEntry *rte);
147 RangeTblEntry *rte);
149 RangeTblEntry *rte);
151static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
152 pushdown_safety_info *safetyInfo);
153static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
154 pushdown_safety_info *safetyInfo);
155static void check_output_expressions(Query *subquery,
156 pushdown_safety_info *safetyInfo);
157static void compare_tlist_datatypes(List *tlist, List *colTypes,
158 pushdown_safety_info *safetyInfo);
159static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
161 RestrictInfo *rinfo,
162 pushdown_safety_info *safetyInfo);
163static void subquery_push_qual(Query *subquery,
164 RangeTblEntry *rte, Index rti, Node *qual);
165static void recurse_push_qual(Node *setOp, Query *topquery,
166 RangeTblEntry *rte, Index rti, Node *qual);
167static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
168 Bitmapset *extra_used_attrs);
169
170
171/*
172 * make_one_rel
173 * Finds all possible access paths for executing a query, returning a
174 * single rel that represents the join of all base rels in the query.
175 */
178{
179 RelOptInfo *rel;
180 Index rti;
181 double total_pages;
182
183 /* Mark base rels as to whether we care about fast-start plans */
185
186 /*
187 * Compute size estimates and consider_parallel flags for each base rel.
188 */
190
191 /*
192 * Build grouped relations for simple rels (i.e., base or "other" member
193 * relations) where possible.
194 */
196
197 /*
198 * We should now have size estimates for every actual table involved in
199 * the query, and we also know which if any have been deleted from the
200 * query by join removal, pruned by partition pruning, or eliminated by
201 * constraint exclusion. So we can now compute total_table_pages.
202 *
203 * Note that appendrels are not double-counted here, even though we don't
204 * bother to distinguish RelOptInfos for appendrel parents, because the
205 * parents will have pages = 0.
206 *
207 * XXX if a table is self-joined, we will count it once per appearance,
208 * which perhaps is the wrong thing ... but that's not completely clear,
209 * and detecting self-joins here is difficult, so ignore it for now.
210 */
211 total_pages = 0;
212 for (rti = 1; rti < root->simple_rel_array_size; rti++)
213 {
214 RelOptInfo *brel = root->simple_rel_array[rti];
215
216 /* there may be empty slots corresponding to non-baserel RTEs */
217 if (brel == NULL)
218 continue;
219
220 Assert(brel->relid == rti); /* sanity check on array */
221
222 if (IS_DUMMY_REL(brel))
223 continue;
224
225 if (IS_SIMPLE_REL(brel))
226 total_pages += (double) brel->pages;
227 }
228 root->total_table_pages = total_pages;
229
230 /*
231 * Generate access paths for each base rel.
232 */
234
235 /*
236 * Generate access paths for the entire join tree.
237 */
238 rel = make_rel_from_joinlist(root, joinlist);
239
240 /*
241 * The result should join all and only the query's base + outer-join rels.
242 */
243 Assert(bms_equal(rel->relids, root->all_query_rels));
244
245 return rel;
246}
247
248/*
249 * set_base_rel_consider_startup
250 * Set the consider_[param_]startup flags for each base-relation entry.
251 *
252 * For the moment, we only deal with consider_param_startup here; because the
253 * logic for consider_startup is pretty trivial and is the same for every base
254 * relation, we just let build_simple_rel() initialize that flag correctly to
255 * start with. If that logic ever gets more complicated it would probably
256 * be better to move it here.
257 */
258static void
260{
261 /*
262 * Since parameterized paths can only be used on the inside of a nestloop
263 * join plan, there is usually little value in considering fast-start
264 * plans for them. However, for relations that are on the RHS of a SEMI
265 * or ANTI join, a fast-start plan can be useful because we're only going
266 * to care about fetching one tuple anyway.
267 *
268 * To minimize growth of planning time, we currently restrict this to
269 * cases where the RHS is a single base relation, not a join; there is no
270 * provision for consider_param_startup to get set at all on joinrels.
271 * Also we don't worry about appendrels. costsize.c's costing rules for
272 * nestloop semi/antijoins don't consider such cases either.
273 */
274 ListCell *lc;
275
276 foreach(lc, root->join_info_list)
277 {
278 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
279 int varno;
280
281 if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
283 {
284 RelOptInfo *rel = find_base_rel(root, varno);
285
286 rel->consider_param_startup = true;
287 }
288 }
289}
290
291/*
292 * set_base_rel_sizes
293 * Set the size estimates (rows and widths) for each base-relation entry.
294 * Also determine whether to consider parallel paths for base relations.
295 *
296 * We do this in a separate pass over the base rels so that rowcount
297 * estimates are available for parameterized path generation, and also so
298 * that each rel's consider_parallel flag is set correctly before we begin to
299 * generate paths.
300 */
301static void
303{
304 Index rti;
305
306 for (rti = 1; rti < root->simple_rel_array_size; rti++)
307 {
308 RelOptInfo *rel = root->simple_rel_array[rti];
309 RangeTblEntry *rte;
310
311 /* there may be empty slots corresponding to non-baserel RTEs */
312 if (rel == NULL)
313 continue;
314
315 Assert(rel->relid == rti); /* sanity check on array */
316
317 /* ignore RTEs that are "other rels" */
318 if (rel->reloptkind != RELOPT_BASEREL)
319 continue;
320
321 rte = root->simple_rte_array[rti];
322
323 /*
324 * If parallelism is allowable for this query in general, see whether
325 * it's allowable for this rel in particular. We have to do this
326 * before set_rel_size(), because (a) if this rel is an inheritance
327 * parent, set_append_rel_size() will use and perhaps change the rel's
328 * consider_parallel flag, and (b) for some RTE types, set_rel_size()
329 * goes ahead and makes paths immediately.
330 */
331 if (root->glob->parallelModeOK)
333
334 set_rel_size(root, rel, rti, rte);
335 }
336}
337
338/*
339 * setup_simple_grouped_rels
340 * For each simple relation, build a grouped simple relation if eager
341 * aggregation is possible and if this relation can produce grouped paths.
342 */
343static void
345{
346 Index rti;
347
348 /*
349 * If there are no aggregate expressions or grouping expressions, eager
350 * aggregation is not possible.
351 */
352 if (root->agg_clause_list == NIL ||
353 root->group_expr_list == NIL)
354 return;
355
356 for (rti = 1; rti < root->simple_rel_array_size; rti++)
357 {
358 RelOptInfo *rel = root->simple_rel_array[rti];
359
360 /* there may be empty slots corresponding to non-baserel RTEs */
361 if (rel == NULL)
362 continue;
363
364 Assert(rel->relid == rti); /* sanity check on array */
365 Assert(IS_SIMPLE_REL(rel)); /* sanity check on rel */
366
367 (void) build_simple_grouped_rel(root, rel);
368 }
369}
370
371/*
372 * set_base_rel_pathlists
373 * Finds all paths available for scanning each base-relation entry.
374 * Sequential scan and any available indices are considered.
375 * Each useful path is attached to its relation's 'pathlist' field.
376 */
377static void
379{
380 Index rti;
381
382 for (rti = 1; rti < root->simple_rel_array_size; rti++)
383 {
384 RelOptInfo *rel = root->simple_rel_array[rti];
385
386 /* there may be empty slots corresponding to non-baserel RTEs */
387 if (rel == NULL)
388 continue;
389
390 Assert(rel->relid == rti); /* sanity check on array */
391
392 /* ignore RTEs that are "other rels" */
393 if (rel->reloptkind != RELOPT_BASEREL)
394 continue;
395
396 set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
397 }
398}
399
400/*
401 * set_rel_size
402 * Set size estimates for a base relation
403 */
404static void
406 Index rti, RangeTblEntry *rte)
407{
408 if (rel->reloptkind == RELOPT_BASEREL &&
410 {
411 /*
412 * We proved we don't need to scan the rel via constraint exclusion,
413 * so set up a single dummy path for it. Here we only check this for
414 * regular baserels; if it's an otherrel, CE was already checked in
415 * set_append_rel_size().
416 *
417 * In this case, we go ahead and set up the relation's path right away
418 * instead of leaving it for set_rel_pathlist to do. This is because
419 * we don't have a convention for marking a rel as dummy except by
420 * assigning a dummy path to it.
421 */
423 }
424 else if (rte->inh)
425 {
426 /* It's an "append relation", process accordingly */
427 set_append_rel_size(root, rel, rti, rte);
428 }
429 else
430 {
431 switch (rel->rtekind)
432 {
433 case RTE_RELATION:
434 if (rte->relkind == RELKIND_FOREIGN_TABLE)
435 {
436 /* Foreign table */
437 set_foreign_size(root, rel, rte);
438 }
439 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
440 {
441 /*
442 * We could get here if asked to scan a partitioned table
443 * with ONLY. In that case we shouldn't scan any of the
444 * partitions, so mark it as a dummy rel.
445 */
447 }
448 else if (rte->tablesample != NULL)
449 {
450 /* Sampled relation */
452 }
453 else
454 {
455 /* Plain relation */
456 set_plain_rel_size(root, rel, rte);
457 }
458 break;
459 case RTE_SUBQUERY:
460
461 /*
462 * Subqueries don't support making a choice between
463 * parameterized and unparameterized paths, so just go ahead
464 * and build their paths immediately.
465 */
466 set_subquery_pathlist(root, rel, rti, rte);
467 break;
468 case RTE_FUNCTION:
470 break;
471 case RTE_TABLEFUNC:
473 break;
474 case RTE_VALUES:
476 break;
477 case RTE_CTE:
478
479 /*
480 * CTEs don't support making a choice between parameterized
481 * and unparameterized paths, so just go ahead and build their
482 * paths immediately.
483 */
484 if (rte->self_reference)
485 set_worktable_pathlist(root, rel, rte);
486 else
487 set_cte_pathlist(root, rel, rte);
488 break;
490 /* Might as well just build the path immediately */
492 break;
493 case RTE_RESULT:
494 /* Might as well just build the path immediately */
495 set_result_pathlist(root, rel, rte);
496 break;
497 default:
498 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
499 break;
500 }
501 }
502
503 /*
504 * We insist that all non-dummy rels have a nonzero rowcount estimate.
505 */
506 Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
507}
508
509/*
510 * set_rel_pathlist
511 * Build access paths for a base relation
512 */
513static void
515 Index rti, RangeTblEntry *rte)
516{
517 if (IS_DUMMY_REL(rel))
518 {
519 /* We already proved the relation empty, so nothing more to do */
520 }
521 else if (rte->inh)
522 {
523 /* It's an "append relation", process accordingly */
524 set_append_rel_pathlist(root, rel, rti, rte);
525 }
526 else
527 {
528 switch (rel->rtekind)
529 {
530 case RTE_RELATION:
531 if (rte->relkind == RELKIND_FOREIGN_TABLE)
532 {
533 /* Foreign table */
534 set_foreign_pathlist(root, rel, rte);
535 }
536 else if (rte->tablesample != NULL)
537 {
538 /* Sampled relation */
540 }
541 else
542 {
543 /* Plain relation */
544 set_plain_rel_pathlist(root, rel, rte);
545 }
546 break;
547 case RTE_SUBQUERY:
548 /* Subquery --- fully handled during set_rel_size */
549 break;
550 case RTE_FUNCTION:
551 /* RangeFunction */
552 set_function_pathlist(root, rel, rte);
553 break;
554 case RTE_TABLEFUNC:
555 /* Table Function */
556 set_tablefunc_pathlist(root, rel, rte);
557 break;
558 case RTE_VALUES:
559 /* Values list */
560 set_values_pathlist(root, rel, rte);
561 break;
562 case RTE_CTE:
563 /* CTE reference --- fully handled during set_rel_size */
564 break;
566 /* tuplestore reference --- fully handled during set_rel_size */
567 break;
568 case RTE_RESULT:
569 /* simple Result --- fully handled during set_rel_size */
570 break;
571 default:
572 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
573 break;
574 }
575 }
576
577 /*
578 * Allow a plugin to editorialize on the set of Paths for this base
579 * relation. It could add new paths (such as CustomPaths) by calling
580 * add_path(), or add_partial_path() if parallel aware. It could also
581 * delete or modify paths added by the core code.
582 */
584 (*set_rel_pathlist_hook) (root, rel, rti, rte);
585
586 /*
587 * If this is a baserel, we should normally consider gathering any partial
588 * paths we may have created for it. We have to do this after calling the
589 * set_rel_pathlist_hook, else it cannot add partial paths to be included
590 * here.
591 *
592 * However, if this is an inheritance child, skip it. Otherwise, we could
593 * end up with a very large number of gather nodes, each trying to grab
594 * its own pool of workers. Instead, we'll consider gathering partial
595 * paths for the parent appendrel.
596 *
597 * Also, if this is the topmost scan/join rel, we postpone gathering until
598 * the final scan/join targetlist is available (see grouping_planner).
599 */
600 if (rel->reloptkind == RELOPT_BASEREL &&
601 !bms_equal(rel->relids, root->all_query_rels))
603
604 /* Now find the cheapest of the paths for this rel */
605 set_cheapest(rel);
606
607 /*
608 * If a grouped relation for this rel exists, build partial aggregation
609 * paths for it.
610 *
611 * Note that this can only happen after we've called set_cheapest() for
612 * this base rel, because we need its cheapest paths.
613 */
615
616#ifdef OPTIMIZER_DEBUG
617 pprint(rel);
618#endif
619}
620
621/*
622 * set_plain_rel_size
623 * Set size estimates for a plain relation (no subquery, no inheritance)
624 */
625static void
627{
628 /*
629 * Test any partial indexes of rel for applicability. We must do this
630 * first since partial unique indexes can affect size estimates.
631 */
633
634 /* Mark rel with estimated output rows, width, etc */
636}
637
638/*
639 * If this relation could possibly be scanned from within a worker, then set
640 * its consider_parallel flag.
641 */
642static void
644 RangeTblEntry *rte)
645{
646 /*
647 * The flag has previously been initialized to false, so we can just
648 * return if it becomes clear that we can't safely set it.
649 */
651
652 /* Don't call this if parallelism is disallowed for the entire query. */
653 Assert(root->glob->parallelModeOK);
654
655 /* This should only be called for baserels and appendrel children. */
656 Assert(IS_SIMPLE_REL(rel));
657
658 /* Assorted checks based on rtekind. */
659 switch (rte->rtekind)
660 {
661 case RTE_RELATION:
662
663 /*
664 * Currently, parallel workers can't access the leader's temporary
665 * tables. We could possibly relax this if we wrote all of its
666 * local buffers at the start of the query and made no changes
667 * thereafter (maybe we could allow hint bit changes), and if we
668 * taught the workers to read them. Writing a large number of
669 * temporary buffers could be expensive, though, and we don't have
670 * the rest of the necessary infrastructure right now anyway. So
671 * for now, bail out if we see a temporary table.
672 */
673 if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
674 return;
675
676 /*
677 * Table sampling can be pushed down to workers if the sample
678 * function and its arguments are safe.
679 */
680 if (rte->tablesample != NULL)
681 {
682 char proparallel = func_parallel(rte->tablesample->tsmhandler);
683
684 if (proparallel != PROPARALLEL_SAFE)
685 return;
686 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
687 return;
688 }
689
690 /*
691 * Ask FDWs whether they can support performing a ForeignScan
692 * within a worker. Most often, the answer will be no. For
693 * example, if the nature of the FDW is such that it opens a TCP
694 * connection with a remote server, each parallel worker would end
695 * up with a separate connection, and these connections might not
696 * be appropriately coordinated between workers and the leader.
697 */
698 if (rte->relkind == RELKIND_FOREIGN_TABLE)
699 {
700 Assert(rel->fdwroutine);
701 if (!rel->fdwroutine->IsForeignScanParallelSafe)
702 return;
703 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
704 return;
705 }
706
707 /*
708 * There are additional considerations for appendrels, which we'll
709 * deal with in set_append_rel_size and set_append_rel_pathlist.
710 * For now, just set consider_parallel based on the rel's own
711 * quals and targetlist.
712 */
713 break;
714
715 case RTE_SUBQUERY:
716
717 /*
718 * There's no intrinsic problem with scanning a subquery-in-FROM
719 * (as distinct from a SubPlan or InitPlan) in a parallel worker.
720 * If the subquery doesn't happen to have any parallel-safe paths,
721 * then flagging it as consider_parallel won't change anything,
722 * but that's true for plain tables, too. We must set
723 * consider_parallel based on the rel's own quals and targetlist,
724 * so that if a subquery path is parallel-safe but the quals and
725 * projection we're sticking onto it are not, we correctly mark
726 * the SubqueryScanPath as not parallel-safe. (Note that
727 * set_subquery_pathlist() might push some of these quals down
728 * into the subquery itself, but that doesn't change anything.)
729 *
730 * We can't push sub-select containing LIMIT/OFFSET to workers as
731 * there is no guarantee that the row order will be fully
732 * deterministic, and applying LIMIT/OFFSET will lead to
733 * inconsistent results at the top-level. (In some cases, where
734 * the result is ordered, we could relax this restriction. But it
735 * doesn't currently seem worth expending extra effort to do so.)
736 */
737 {
738 Query *subquery = castNode(Query, rte->subquery);
739
740 if (limit_needed(subquery))
741 return;
742 }
743 break;
744
745 case RTE_JOIN:
746 /* Shouldn't happen; we're only considering baserels here. */
747 Assert(false);
748 return;
749
750 case RTE_FUNCTION:
751 /* Check for parallel-restricted functions. */
752 if (!is_parallel_safe(root, (Node *) rte->functions))
753 return;
754 break;
755
756 case RTE_TABLEFUNC:
757 /* not parallel safe */
758 return;
759
760 case RTE_VALUES:
761 /* Check for parallel-restricted functions. */
762 if (!is_parallel_safe(root, (Node *) rte->values_lists))
763 return;
764 break;
765
766 case RTE_CTE:
767
768 /*
769 * CTE tuplestores aren't shared among parallel workers, so we
770 * force all CTE scans to happen in the leader. Also, populating
771 * the CTE would require executing a subplan that's not available
772 * in the worker, might be parallel-restricted, and must get
773 * executed only once.
774 */
775 return;
776
778
779 /*
780 * tuplestore cannot be shared, at least without more
781 * infrastructure to support that.
782 */
783 return;
784
785 case RTE_RESULT:
786 /* RESULT RTEs, in themselves, are no problem. */
787 break;
788 case RTE_GROUP:
789 /* Shouldn't happen; we're only considering baserels here. */
790 Assert(false);
791 return;
792 }
793
794 /*
795 * If there's anything in baserestrictinfo that's parallel-restricted, we
796 * give up on parallelizing access to this relation. We could consider
797 * instead postponing application of the restricted quals until we're
798 * above all the parallelism in the plan tree, but it's not clear that
799 * that would be a win in very many cases, and it might be tricky to make
800 * outer join clauses work correctly. It would likely break equivalence
801 * classes, too.
802 */
804 return;
805
806 /*
807 * Likewise, if the relation's outputs are not parallel-safe, give up.
808 * (Usually, they're just Vars, but sometimes they're not.)
809 */
810 if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
811 return;
812
813 /* We have a winner. */
814 rel->consider_parallel = true;
815}
816
817/*
818 * set_plain_rel_pathlist
819 * Build access paths for a plain relation (no subquery, no inheritance)
820 */
821static void
823{
824 Relids required_outer;
825
826 /*
827 * We don't support pushing join clauses into the quals of a seqscan, but
828 * it could still have required parameterization due to LATERAL refs in
829 * its tlist.
830 */
831 required_outer = rel->lateral_relids;
832
833 /*
834 * Consider TID scans.
835 *
836 * If create_tidscan_paths returns true, then a TID scan path is forced.
837 * This happens when rel->baserestrictinfo contains CurrentOfExpr, because
838 * the executor can't handle any other type of path for such queries.
839 * Hence, we return without adding any other paths.
840 */
841 if (create_tidscan_paths(root, rel))
842 return;
843
844 /* Consider sequential scan */
845 add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
846
847 /* If appropriate, consider parallel sequential scan */
848 if (rel->consider_parallel && required_outer == NULL)
850
851 /* Consider index scans */
853}
854
855/*
856 * create_plain_partial_paths
857 * Build partial access paths for parallel scan of a plain relation
858 */
859static void
861{
862 int parallel_workers;
863
864 parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
866
867 /* If any limit was set to zero, the user doesn't want a parallel scan. */
868 if (parallel_workers <= 0)
869 return;
870
871 /* Add an unordered partial path based on a parallel sequential scan. */
872 add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
873}
874
875/*
876 * set_tablesample_rel_size
877 * Set size estimates for a sampled relation
878 */
879static void
881{
882 TableSampleClause *tsc = rte->tablesample;
883 TsmRoutine *tsm;
884 BlockNumber pages;
885 double tuples;
886
887 /*
888 * Test any partial indexes of rel for applicability. We must do this
889 * first since partial unique indexes can affect size estimates.
890 */
892
893 /*
894 * Call the sampling method's estimation function to estimate the number
895 * of pages it will read and the number of tuples it will return. (Note:
896 * we assume the function returns sane values.)
897 */
898 tsm = GetTsmRoutine(tsc->tsmhandler);
899 tsm->SampleScanGetSampleSize(root, rel, tsc->args,
900 &pages, &tuples);
901
902 /*
903 * For the moment, because we will only consider a SampleScan path for the
904 * rel, it's okay to just overwrite the pages and tuples estimates for the
905 * whole relation. If we ever consider multiple path types for sampled
906 * rels, we'll need more complication.
907 */
908 rel->pages = pages;
909 rel->tuples = tuples;
910
911 /* Mark rel with estimated output rows, width, etc */
913}
914
915/*
916 * set_tablesample_rel_pathlist
917 * Build access paths for a sampled relation
918 */
919static void
921{
922 Relids required_outer;
923 Path *path;
924
925 /*
926 * We don't support pushing join clauses into the quals of a samplescan,
927 * but it could still have required parameterization due to LATERAL refs
928 * in its tlist or TABLESAMPLE arguments.
929 */
930 required_outer = rel->lateral_relids;
931
932 /* Consider sampled scan */
933 path = create_samplescan_path(root, rel, required_outer);
934
935 /*
936 * If the sampling method does not support repeatable scans, we must avoid
937 * plans that would scan the rel multiple times. Ideally, we'd simply
938 * avoid putting the rel on the inside of a nestloop join; but adding such
939 * a consideration to the planner seems like a great deal of complication
940 * to support an uncommon usage of second-rate sampling methods. Instead,
941 * if there is a risk that the query might perform an unsafe join, just
942 * wrap the SampleScan in a Materialize node. We can check for joins by
943 * counting the membership of all_query_rels (note that this correctly
944 * counts inheritance trees as single rels). If we're inside a subquery,
945 * we can't easily check whether a join might occur in the outer query, so
946 * just assume one is possible.
947 *
948 * GetTsmRoutine is relatively expensive compared to the other tests here,
949 * so check repeatable_across_scans last, even though that's a bit odd.
950 */
951 if ((root->query_level > 1 ||
952 bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
954 {
955 path = (Path *) create_material_path(rel, path);
956 }
957
958 add_path(rel, path);
959
960 /* For the moment, at least, there are no other paths to consider */
961}
962
963/*
964 * set_foreign_size
965 * Set size estimates for a foreign table RTE
966 */
967static void
969{
970 /* Mark rel with estimated output rows, width, etc */
972
973 /* Let FDW adjust the size estimates, if it can */
974 rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
975
976 /* ... but do not let it set the rows estimate to zero */
977 rel->rows = clamp_row_est(rel->rows);
978
979 /*
980 * Also, make sure rel->tuples is not insane relative to rel->rows.
981 * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
982 * FDW doesn't do anything to replace that.
983 */
984 rel->tuples = Max(rel->tuples, rel->rows);
985}
986
987/*
988 * set_foreign_pathlist
989 * Build access paths for a foreign table RTE
990 */
991static void
993{
994 /* Call the FDW's GetForeignPaths function to generate path(s) */
995 rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
996}
997
998/*
999 * set_append_rel_size
1000 * Set size estimates for a simple "append relation"
1001 *
1002 * The passed-in rel and RTE represent the entire append relation. The
1003 * relation's contents are computed by appending together the output of the
1004 * individual member relations. Note that in the non-partitioned inheritance
1005 * case, the first member relation is actually the same table as is mentioned
1006 * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
1007 * a good thing because their outputs are not the same size.
1008 */
1009static void
1011 Index rti, RangeTblEntry *rte)
1012{
1013 int parentRTindex = rti;
1014 bool has_live_children;
1015 double parent_tuples;
1016 double parent_rows;
1017 double parent_size;
1018 double *parent_attrsizes;
1019 int nattrs;
1020 ListCell *l;
1021
1022 /* Guard against stack overflow due to overly deep inheritance tree. */
1024
1025 Assert(IS_SIMPLE_REL(rel));
1026
1027 /*
1028 * If this is a partitioned baserel, set the consider_partitionwise_join
1029 * flag; currently, we only consider partitionwise joins with the baserel
1030 * if its targetlist doesn't contain a whole-row Var.
1031 */
1033 rel->reloptkind == RELOPT_BASEREL &&
1034 rte->relkind == RELKIND_PARTITIONED_TABLE &&
1035 bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
1036 rel->consider_partitionwise_join = true;
1037
1038 /*
1039 * Initialize to compute size estimates for whole append relation.
1040 *
1041 * We handle tuples estimates by setting "tuples" to the total number of
1042 * tuples accumulated from each live child, rather than using "rows".
1043 * Although an appendrel itself doesn't directly enforce any quals, its
1044 * child relations may. Therefore, setting "tuples" equal to "rows" for
1045 * an appendrel isn't always appropriate, and can lead to inaccurate cost
1046 * estimates. For example, when estimating the number of distinct values
1047 * from an appendrel, we would be unable to adjust the estimate based on
1048 * the restriction selectivity (see estimate_num_groups).
1049 *
1050 * We handle width estimates by weighting the widths of different child
1051 * rels proportionally to their number of rows. This is sensible because
1052 * the use of width estimates is mainly to compute the total relation
1053 * "footprint" if we have to sort or hash it. To do this, we sum the
1054 * total equivalent size (in "double" arithmetic) and then divide by the
1055 * total rowcount estimate. This is done separately for the total rel
1056 * width and each attribute.
1057 *
1058 * Note: if you consider changing this logic, beware that child rels could
1059 * have zero rows and/or width, if they were excluded by constraints.
1060 */
1061 has_live_children = false;
1062 parent_tuples = 0;
1063 parent_rows = 0;
1064 parent_size = 0;
1065 nattrs = rel->max_attr - rel->min_attr + 1;
1066 parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
1067
1068 foreach(l, root->append_rel_list)
1069 {
1070 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1071 int childRTindex;
1072 RangeTblEntry *childRTE;
1073 RelOptInfo *childrel;
1074 List *childrinfos;
1075 ListCell *parentvars;
1076 ListCell *childvars;
1077 ListCell *lc;
1078
1079 /* append_rel_list contains all append rels; ignore others */
1080 if (appinfo->parent_relid != parentRTindex)
1081 continue;
1082
1083 childRTindex = appinfo->child_relid;
1084 childRTE = root->simple_rte_array[childRTindex];
1085
1086 /*
1087 * The child rel's RelOptInfo was already created during
1088 * add_other_rels_to_query.
1089 */
1090 childrel = find_base_rel(root, childRTindex);
1092
1093 /* We may have already proven the child to be dummy. */
1094 if (IS_DUMMY_REL(childrel))
1095 continue;
1096
1097 /*
1098 * We have to copy the parent's targetlist and quals to the child,
1099 * with appropriate substitution of variables. However, the
1100 * baserestrictinfo quals were already copied/substituted when the
1101 * child RelOptInfo was built. So we don't need any additional setup
1102 * before applying constraint exclusion.
1103 */
1104 if (relation_excluded_by_constraints(root, childrel, childRTE))
1105 {
1106 /*
1107 * This child need not be scanned, so we can omit it from the
1108 * appendrel.
1109 */
1110 set_dummy_rel_pathlist(childrel);
1111 continue;
1112 }
1113
1114 /*
1115 * Constraint exclusion failed, so copy the parent's join quals and
1116 * targetlist to the child, with appropriate variable substitutions.
1117 *
1118 * We skip join quals that came from above outer joins that can null
1119 * this rel, since they would be of no value while generating paths
1120 * for the child. This saves some effort while processing the child
1121 * rel, and it also avoids an implementation restriction in
1122 * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
1123 */
1124 childrinfos = NIL;
1125 foreach(lc, rel->joininfo)
1126 {
1127 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1128
1129 if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
1130 childrinfos = lappend(childrinfos,
1132 (Node *) rinfo,
1133 1, &appinfo));
1134 }
1135 childrel->joininfo = childrinfos;
1136
1137 /*
1138 * Now for the child's targetlist.
1139 *
1140 * NB: the resulting childrel->reltarget->exprs may contain arbitrary
1141 * expressions, which otherwise would not occur in a rel's targetlist.
1142 * Code that might be looking at an appendrel child must cope with
1143 * such. (Normally, a rel's targetlist would only include Vars and
1144 * PlaceHolderVars.) XXX we do not bother to update the cost or width
1145 * fields of childrel->reltarget; not clear if that would be useful.
1146 */
1147 childrel->reltarget->exprs = (List *)
1149 (Node *) rel->reltarget->exprs,
1150 1, &appinfo);
1151
1152 /*
1153 * We have to make child entries in the EquivalenceClass data
1154 * structures as well. This is needed either if the parent
1155 * participates in some eclass joins (because we will want to consider
1156 * inner-indexscan joins on the individual children) or if the parent
1157 * has useful pathkeys (because we should try to build MergeAppend
1158 * paths that produce those sort orderings).
1159 */
1160 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
1161 add_child_rel_equivalences(root, appinfo, rel, childrel);
1162 childrel->has_eclass_joins = rel->has_eclass_joins;
1163
1164 /*
1165 * Note: we could compute appropriate attr_needed data for the child's
1166 * variables, by transforming the parent's attr_needed through the
1167 * translated_vars mapping. However, currently there's no need
1168 * because attr_needed is only examined for base relations not
1169 * otherrels. So we just leave the child's attr_needed empty.
1170 */
1171
1172 /*
1173 * If we consider partitionwise joins with the parent rel, do the same
1174 * for partitioned child rels.
1175 *
1176 * Note: here we abuse the consider_partitionwise_join flag by setting
1177 * it for child rels that are not themselves partitioned. We do so to
1178 * tell try_partitionwise_join() that the child rel is sufficiently
1179 * valid to be used as a per-partition input, even if it later gets
1180 * proven to be dummy. (It's not usable until we've set up the
1181 * reltarget and EC entries, which we just did.)
1182 */
1184 childrel->consider_partitionwise_join = true;
1185
1186 /*
1187 * If parallelism is allowable for this query in general, see whether
1188 * it's allowable for this childrel in particular. But if we've
1189 * already decided the appendrel is not parallel-safe as a whole,
1190 * there's no point in considering parallelism for this child. For
1191 * consistency, do this before calling set_rel_size() for the child.
1192 */
1193 if (root->glob->parallelModeOK && rel->consider_parallel)
1194 set_rel_consider_parallel(root, childrel, childRTE);
1195
1196 /*
1197 * Compute the child's size.
1198 */
1199 set_rel_size(root, childrel, childRTindex, childRTE);
1200
1201 /*
1202 * It is possible that constraint exclusion detected a contradiction
1203 * within a child subquery, even though we didn't prove one above. If
1204 * so, we can skip this child.
1205 */
1206 if (IS_DUMMY_REL(childrel))
1207 continue;
1208
1209 /* We have at least one live child. */
1210 has_live_children = true;
1211
1212 /*
1213 * If any live child is not parallel-safe, treat the whole appendrel
1214 * as not parallel-safe. In future we might be able to generate plans
1215 * in which some children are farmed out to workers while others are
1216 * not; but we don't have that today, so it's a waste to consider
1217 * partial paths anywhere in the appendrel unless it's all safe.
1218 * (Child rels visited before this one will be unmarked in
1219 * set_append_rel_pathlist().)
1220 */
1221 if (!childrel->consider_parallel)
1222 rel->consider_parallel = false;
1223
1224 /*
1225 * Accumulate size information from each live child.
1226 */
1227 Assert(childrel->rows > 0);
1228
1229 parent_tuples += childrel->tuples;
1230 parent_rows += childrel->rows;
1231 parent_size += childrel->reltarget->width * childrel->rows;
1232
1233 /*
1234 * Accumulate per-column estimates too. We need not do anything for
1235 * PlaceHolderVars in the parent list. If child expression isn't a
1236 * Var, or we didn't record a width estimate for it, we have to fall
1237 * back on a datatype-based estimate.
1238 *
1239 * By construction, child's targetlist is 1-to-1 with parent's.
1240 */
1241 forboth(parentvars, rel->reltarget->exprs,
1242 childvars, childrel->reltarget->exprs)
1243 {
1244 Var *parentvar = (Var *) lfirst(parentvars);
1245 Node *childvar = (Node *) lfirst(childvars);
1246
1247 if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
1248 {
1249 int pndx = parentvar->varattno - rel->min_attr;
1250 int32 child_width = 0;
1251
1252 if (IsA(childvar, Var) &&
1253 ((Var *) childvar)->varno == childrel->relid)
1254 {
1255 int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
1256
1257 child_width = childrel->attr_widths[cndx];
1258 }
1259 if (child_width <= 0)
1260 child_width = get_typavgwidth(exprType(childvar),
1261 exprTypmod(childvar));
1262 Assert(child_width > 0);
1263 parent_attrsizes[pndx] += child_width * childrel->rows;
1264 }
1265 }
1266 }
1267
1268 if (has_live_children)
1269 {
1270 /*
1271 * Save the finished size estimates.
1272 */
1273 int i;
1274
1275 Assert(parent_rows > 0);
1276 rel->tuples = parent_tuples;
1277 rel->rows = parent_rows;
1278 rel->reltarget->width = rint(parent_size / parent_rows);
1279 for (i = 0; i < nattrs; i++)
1280 rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
1281
1282 /*
1283 * Note that we leave rel->pages as zero; this is important to avoid
1284 * double-counting the appendrel tree in total_table_pages.
1285 */
1286 }
1287 else
1288 {
1289 /*
1290 * All children were excluded by constraints, so mark the whole
1291 * appendrel dummy. We must do this in this phase so that the rel's
1292 * dummy-ness is visible when we generate paths for other rels.
1293 */
1295 }
1296
1297 pfree(parent_attrsizes);
1298}
1299
1300/*
1301 * set_append_rel_pathlist
1302 * Build access paths for an "append relation"
1303 */
1304static void
1306 Index rti, RangeTblEntry *rte)
1307{
1308 int parentRTindex = rti;
1309 List *live_childrels = NIL;
1310 ListCell *l;
1311
1312 /*
1313 * Generate access paths for each member relation, and remember the
1314 * non-dummy children.
1315 */
1316 foreach(l, root->append_rel_list)
1317 {
1318 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1319 int childRTindex;
1320 RangeTblEntry *childRTE;
1321 RelOptInfo *childrel;
1322
1323 /* append_rel_list contains all append rels; ignore others */
1324 if (appinfo->parent_relid != parentRTindex)
1325 continue;
1326
1327 /* Re-locate the child RTE and RelOptInfo */
1328 childRTindex = appinfo->child_relid;
1329 childRTE = root->simple_rte_array[childRTindex];
1330 childrel = root->simple_rel_array[childRTindex];
1331
1332 /*
1333 * If set_append_rel_size() decided the parent appendrel was
1334 * parallel-unsafe at some point after visiting this child rel, we
1335 * need to propagate the unsafety marking down to the child, so that
1336 * we don't generate useless partial paths for it.
1337 */
1338 if (!rel->consider_parallel)
1339 childrel->consider_parallel = false;
1340
1341 /*
1342 * Compute the child's access paths.
1343 */
1344 set_rel_pathlist(root, childrel, childRTindex, childRTE);
1345
1346 /*
1347 * If child is dummy, ignore it.
1348 */
1349 if (IS_DUMMY_REL(childrel))
1350 continue;
1351
1352 /*
1353 * Child is live, so add it to the live_childrels list for use below.
1354 */
1355 live_childrels = lappend(live_childrels, childrel);
1356 }
1357
1358 /* Add paths to the append relation. */
1359 add_paths_to_append_rel(root, rel, live_childrels);
1360}
1361
1362/*
1363 * set_grouped_rel_pathlist
1364 * If a grouped relation for the given 'rel' exists, build partial
1365 * aggregation paths for it.
1366 */
1367static void
1369{
1370 RelOptInfo *grouped_rel;
1371
1372 /*
1373 * If there are no aggregate expressions or grouping expressions, eager
1374 * aggregation is not possible.
1375 */
1376 if (root->agg_clause_list == NIL ||
1377 root->group_expr_list == NIL)
1378 return;
1379
1380 /* Add paths to the grouped base relation if one exists. */
1381 grouped_rel = rel->grouped_rel;
1382 if (grouped_rel)
1383 {
1384 Assert(IS_GROUPED_REL(grouped_rel));
1385
1386 generate_grouped_paths(root, grouped_rel, rel);
1387 set_cheapest(grouped_rel);
1388 }
1389}
1390
1391
1392/*
1393 * add_paths_to_append_rel
1394 * Generate paths for the given append relation given the set of non-dummy
1395 * child rels.
1396 *
1397 * The function collects all parameterizations and orderings supported by the
1398 * non-dummy children. For every such parameterization or ordering, it creates
1399 * an append path collecting one path from each non-dummy child with given
1400 * parameterization or ordering. Similarly it collects partial paths from
1401 * non-dummy children to create partial append paths.
1402 */
1403void
1405 List *live_childrels)
1406{
1407 List *subpaths = NIL;
1408 bool subpaths_valid = true;
1409 List *startup_subpaths = NIL;
1410 bool startup_subpaths_valid = true;
1411 List *partial_subpaths = NIL;
1412 List *pa_partial_subpaths = NIL;
1413 List *pa_nonpartial_subpaths = NIL;
1414 bool partial_subpaths_valid = true;
1415 bool pa_subpaths_valid;
1416 List *all_child_pathkeys = NIL;
1417 List *all_child_outers = NIL;
1418 ListCell *l;
1419 double partial_rows = -1;
1420
1421 /* If appropriate, consider parallel append */
1422 pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
1423
1424 /*
1425 * For every non-dummy child, remember the cheapest path. Also, identify
1426 * all pathkeys (orderings) and parameterizations (required_outer sets)
1427 * available for the non-dummy member relations.
1428 */
1429 foreach(l, live_childrels)
1430 {
1431 RelOptInfo *childrel = lfirst(l);
1432 ListCell *lcp;
1433 Path *cheapest_partial_path = NULL;
1434
1435 /*
1436 * If child has an unparameterized cheapest-total path, add that to
1437 * the unparameterized Append path we are constructing for the parent.
1438 * If not, there's no workable unparameterized path.
1439 *
1440 * With partitionwise aggregates, the child rel's pathlist may be
1441 * empty, so don't assume that a path exists here.
1442 */
1443 if (childrel->pathlist != NIL &&
1444 childrel->cheapest_total_path->param_info == NULL)
1446 &subpaths, NULL);
1447 else
1448 subpaths_valid = false;
1449
1450 /*
1451 * When the planner is considering cheap startup plans, we'll also
1452 * collect all the cheapest_startup_paths (if set) and build an
1453 * AppendPath containing those as subpaths.
1454 */
1455 if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
1456 {
1457 Path *cheapest_path;
1458
1459 /*
1460 * With an indication of how many tuples the query should provide,
1461 * the optimizer tries to choose the path optimal for that
1462 * specific number of tuples.
1463 */
1464 if (root->tuple_fraction > 0.0)
1465 cheapest_path =
1467 root->tuple_fraction);
1468 else
1469 cheapest_path = childrel->cheapest_startup_path;
1470
1471 /* cheapest_startup_path must not be a parameterized path. */
1472 Assert(cheapest_path->param_info == NULL);
1473 accumulate_append_subpath(cheapest_path,
1474 &startup_subpaths,
1475 NULL);
1476 }
1477 else
1478 startup_subpaths_valid = false;
1479
1480
1481 /* Same idea, but for a partial plan. */
1482 if (childrel->partial_pathlist != NIL)
1483 {
1484 cheapest_partial_path = linitial(childrel->partial_pathlist);
1485 accumulate_append_subpath(cheapest_partial_path,
1486 &partial_subpaths, NULL);
1487 }
1488 else
1489 partial_subpaths_valid = false;
1490
1491 /*
1492 * Same idea, but for a parallel append mixing partial and non-partial
1493 * paths.
1494 */
1495 if (pa_subpaths_valid)
1496 {
1497 Path *nppath = NULL;
1498
1499 nppath =
1501
1502 if (cheapest_partial_path == NULL && nppath == NULL)
1503 {
1504 /* Neither a partial nor a parallel-safe path? Forget it. */
1505 pa_subpaths_valid = false;
1506 }
1507 else if (nppath == NULL ||
1508 (cheapest_partial_path != NULL &&
1509 cheapest_partial_path->total_cost < nppath->total_cost))
1510 {
1511 /* Partial path is cheaper or the only option. */
1512 Assert(cheapest_partial_path != NULL);
1513 accumulate_append_subpath(cheapest_partial_path,
1514 &pa_partial_subpaths,
1515 &pa_nonpartial_subpaths);
1516 }
1517 else
1518 {
1519 /*
1520 * Either we've got only a non-partial path, or we think that
1521 * a single backend can execute the best non-partial path
1522 * faster than all the parallel backends working together can
1523 * execute the best partial path.
1524 *
1525 * It might make sense to be more aggressive here. Even if
1526 * the best non-partial path is more expensive than the best
1527 * partial path, it could still be better to choose the
1528 * non-partial path if there are several such paths that can
1529 * be given to different workers. For now, we don't try to
1530 * figure that out.
1531 */
1533 &pa_nonpartial_subpaths,
1534 NULL);
1535 }
1536 }
1537
1538 /*
1539 * Collect lists of all the available path orderings and
1540 * parameterizations for all the children. We use these as a
1541 * heuristic to indicate which sort orderings and parameterizations we
1542 * should build Append and MergeAppend paths for.
1543 */
1544 foreach(lcp, childrel->pathlist)
1545 {
1546 Path *childpath = (Path *) lfirst(lcp);
1547 List *childkeys = childpath->pathkeys;
1548 Relids childouter = PATH_REQ_OUTER(childpath);
1549
1550 /* Unsorted paths don't contribute to pathkey list */
1551 if (childkeys != NIL)
1552 {
1553 ListCell *lpk;
1554 bool found = false;
1555
1556 /* Have we already seen this ordering? */
1557 foreach(lpk, all_child_pathkeys)
1558 {
1559 List *existing_pathkeys = (List *) lfirst(lpk);
1560
1561 if (compare_pathkeys(existing_pathkeys,
1562 childkeys) == PATHKEYS_EQUAL)
1563 {
1564 found = true;
1565 break;
1566 }
1567 }
1568 if (!found)
1569 {
1570 /* No, so add it to all_child_pathkeys */
1571 all_child_pathkeys = lappend(all_child_pathkeys,
1572 childkeys);
1573 }
1574 }
1575
1576 /* Unparameterized paths don't contribute to param-set list */
1577 if (childouter)
1578 {
1579 ListCell *lco;
1580 bool found = false;
1581
1582 /* Have we already seen this param set? */
1583 foreach(lco, all_child_outers)
1584 {
1585 Relids existing_outers = (Relids) lfirst(lco);
1586
1587 if (bms_equal(existing_outers, childouter))
1588 {
1589 found = true;
1590 break;
1591 }
1592 }
1593 if (!found)
1594 {
1595 /* No, so add it to all_child_outers */
1596 all_child_outers = lappend(all_child_outers,
1597 childouter);
1598 }
1599 }
1600 }
1601 }
1602
1603 /*
1604 * If we found unparameterized paths for all children, build an unordered,
1605 * unparameterized Append path for the rel. (Note: this is correct even
1606 * if we have zero or one live subpath due to constraint exclusion.)
1607 */
1608 if (subpaths_valid)
1609 add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
1610 NIL, NULL, 0, false,
1611 -1));
1612
1613 /* build an AppendPath for the cheap startup paths, if valid */
1614 if (startup_subpaths_valid)
1615 add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths,
1616 NIL, NIL, NULL, 0, false, -1));
1617
1618 /*
1619 * Consider an append of unordered, unparameterized partial paths. Make
1620 * it parallel-aware if possible.
1621 */
1622 if (partial_subpaths_valid && partial_subpaths != NIL)
1623 {
1624 AppendPath *appendpath;
1625 ListCell *lc;
1626 int parallel_workers = 0;
1627
1628 /* Find the highest number of workers requested for any subpath. */
1629 foreach(lc, partial_subpaths)
1630 {
1631 Path *path = lfirst(lc);
1632
1633 parallel_workers = Max(parallel_workers, path->parallel_workers);
1634 }
1635 Assert(parallel_workers > 0);
1636
1637 /*
1638 * If the use of parallel append is permitted, always request at least
1639 * log2(# of children) workers. We assume it can be useful to have
1640 * extra workers in this case because they will be spread out across
1641 * the children. The precise formula is just a guess, but we don't
1642 * want to end up with a radically different answer for a table with N
1643 * partitions vs. an unpartitioned table with the same data, so the
1644 * use of some kind of log-scaling here seems to make some sense.
1645 */
1647 {
1648 parallel_workers = Max(parallel_workers,
1649 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1650 parallel_workers = Min(parallel_workers,
1652 }
1653 Assert(parallel_workers > 0);
1654
1655 /* Generate a partial append path. */
1656 appendpath = create_append_path(root, rel, NIL, partial_subpaths,
1657 NIL, NULL, parallel_workers,
1659 -1);
1660
1661 /*
1662 * Make sure any subsequent partial paths use the same row count
1663 * estimate.
1664 */
1665 partial_rows = appendpath->path.rows;
1666
1667 /* Add the path. */
1668 add_partial_path(rel, (Path *) appendpath);
1669 }
1670
1671 /*
1672 * Consider a parallel-aware append using a mix of partial and non-partial
1673 * paths. (This only makes sense if there's at least one child which has
1674 * a non-partial path that is substantially cheaper than any partial path;
1675 * otherwise, we should use the append path added in the previous step.)
1676 */
1677 if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
1678 {
1679 AppendPath *appendpath;
1680 ListCell *lc;
1681 int parallel_workers = 0;
1682
1683 /*
1684 * Find the highest number of workers requested for any partial
1685 * subpath.
1686 */
1687 foreach(lc, pa_partial_subpaths)
1688 {
1689 Path *path = lfirst(lc);
1690
1691 parallel_workers = Max(parallel_workers, path->parallel_workers);
1692 }
1693
1694 /*
1695 * Same formula here as above. It's even more important in this
1696 * instance because the non-partial paths won't contribute anything to
1697 * the planned number of parallel workers.
1698 */
1699 parallel_workers = Max(parallel_workers,
1700 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1701 parallel_workers = Min(parallel_workers,
1703 Assert(parallel_workers > 0);
1704
1705 appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
1706 pa_partial_subpaths,
1707 NIL, NULL, parallel_workers, true,
1708 partial_rows);
1709 add_partial_path(rel, (Path *) appendpath);
1710 }
1711
1712 /*
1713 * Also build unparameterized ordered append paths based on the collected
1714 * list of child pathkeys.
1715 */
1716 if (subpaths_valid)
1717 generate_orderedappend_paths(root, rel, live_childrels,
1718 all_child_pathkeys);
1719
1720 /*
1721 * Build Append paths for each parameterization seen among the child rels.
1722 * (This may look pretty expensive, but in most cases of practical
1723 * interest, the child rels will expose mostly the same parameterizations,
1724 * so that not that many cases actually get considered here.)
1725 *
1726 * The Append node itself cannot enforce quals, so all qual checking must
1727 * be done in the child paths. This means that to have a parameterized
1728 * Append path, we must have the exact same parameterization for each
1729 * child path; otherwise some children might be failing to check the
1730 * moved-down quals. To make them match up, we can try to increase the
1731 * parameterization of lesser-parameterized paths.
1732 */
1733 foreach(l, all_child_outers)
1734 {
1735 Relids required_outer = (Relids) lfirst(l);
1736 ListCell *lcr;
1737
1738 /* Select the child paths for an Append with this parameterization */
1739 subpaths = NIL;
1740 subpaths_valid = true;
1741 foreach(lcr, live_childrels)
1742 {
1743 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
1744 Path *subpath;
1745
1746 if (childrel->pathlist == NIL)
1747 {
1748 /* failed to make a suitable path for this child */
1749 subpaths_valid = false;
1750 break;
1751 }
1752
1754 childrel,
1755 required_outer);
1756 if (subpath == NULL)
1757 {
1758 /* failed to make a suitable path for this child */
1759 subpaths_valid = false;
1760 break;
1761 }
1762 accumulate_append_subpath(subpath, &subpaths, NULL);
1763 }
1764
1765 if (subpaths_valid)
1766 add_path(rel, (Path *)
1767 create_append_path(root, rel, subpaths, NIL,
1768 NIL, required_outer, 0, false,
1769 -1));
1770 }
1771
1772 /*
1773 * When there is only a single child relation, the Append path can inherit
1774 * any ordering available for the child rel's path, so that it's useful to
1775 * consider ordered partial paths. Above we only considered the cheapest
1776 * partial path for each child, but let's also make paths using any
1777 * partial paths that have pathkeys.
1778 */
1779 if (list_length(live_childrels) == 1)
1780 {
1781 RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
1782
1783 /* skip the cheapest partial path, since we already used that above */
1784 for_each_from(l, childrel->partial_pathlist, 1)
1785 {
1786 Path *path = (Path *) lfirst(l);
1787 AppendPath *appendpath;
1788
1789 /* skip paths with no pathkeys. */
1790 if (path->pathkeys == NIL)
1791 continue;
1792
1793 appendpath = create_append_path(root, rel, NIL, list_make1(path),
1794 NIL, NULL,
1795 path->parallel_workers, true,
1796 partial_rows);
1797 add_partial_path(rel, (Path *) appendpath);
1798 }
1799 }
1800}
1801
1802/*
1803 * generate_orderedappend_paths
1804 * Generate ordered append paths for an append relation
1805 *
1806 * Usually we generate MergeAppend paths here, but there are some special
1807 * cases where we can generate simple Append paths, because the subpaths
1808 * can provide tuples in the required order already.
1809 *
1810 * We generate a path for each ordering (pathkey list) appearing in
1811 * all_child_pathkeys.
1812 *
1813 * We consider the cheapest-startup and cheapest-total cases, and also the
1814 * cheapest-fractional case when not all tuples need to be retrieved. For each
1815 * interesting ordering, we collect all the cheapest startup subpaths, all the
1816 * cheapest total paths, and, if applicable, all the cheapest fractional paths,
1817 * and build a suitable path for each case.
1818 *
1819 * We don't currently generate any parameterized ordered paths here. While
1820 * it would not take much more code here to do so, it's very unclear that it
1821 * is worth the planning cycles to investigate such paths: there's little
1822 * use for an ordered path on the inside of a nestloop. In fact, it's likely
1823 * that the current coding of add_path would reject such paths out of hand,
1824 * because add_path gives no credit for sort ordering of parameterized paths,
1825 * and a parameterized MergeAppend is going to be more expensive than the
1826 * corresponding parameterized Append path. If we ever try harder to support
1827 * parameterized mergejoin plans, it might be worth adding support for
1828 * parameterized paths here to feed such joins. (See notes in
1829 * optimizer/README for why that might not ever happen, though.)
1830 */
1831static void
1833 List *live_childrels,
1834 List *all_child_pathkeys)
1835{
1836 ListCell *lcp;
1837 List *partition_pathkeys = NIL;
1838 List *partition_pathkeys_desc = NIL;
1839 bool partition_pathkeys_partial = true;
1840 bool partition_pathkeys_desc_partial = true;
1841
1842 /*
1843 * Some partitioned table setups may allow us to use an Append node
1844 * instead of a MergeAppend. This is possible in cases such as RANGE
1845 * partitioned tables where it's guaranteed that an earlier partition must
1846 * contain rows which come earlier in the sort order. To detect whether
1847 * this is relevant, build pathkey descriptions of the partition ordering,
1848 * for both forward and reverse scans.
1849 */
1850 if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
1851 partitions_are_ordered(rel->boundinfo, rel->live_parts))
1852 {
1853 partition_pathkeys = build_partition_pathkeys(root, rel,
1855 &partition_pathkeys_partial);
1856
1857 partition_pathkeys_desc = build_partition_pathkeys(root, rel,
1859 &partition_pathkeys_desc_partial);
1860
1861 /*
1862 * You might think we should truncate_useless_pathkeys here, but
1863 * allowing partition keys which are a subset of the query's pathkeys
1864 * can often be useful. For example, consider a table partitioned by
1865 * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
1866 * paths that can produce the a, b, c ordering (perhaps via indexes on
1867 * (a, b, c)) then it works to consider the appendrel output as
1868 * ordered by a, b, c.
1869 */
1870 }
1871
1872 /* Now consider each interesting sort ordering */
1873 foreach(lcp, all_child_pathkeys)
1874 {
1875 List *pathkeys = (List *) lfirst(lcp);
1876 List *startup_subpaths = NIL;
1877 List *total_subpaths = NIL;
1878 List *fractional_subpaths = NIL;
1879 bool startup_neq_total = false;
1880 bool fraction_neq_total = false;
1881 bool match_partition_order;
1882 bool match_partition_order_desc;
1883 int end_index;
1884 int first_index;
1885 int direction;
1886
1887 /*
1888 * Determine if this sort ordering matches any partition pathkeys we
1889 * have, for both ascending and descending partition order. If the
1890 * partition pathkeys happen to be contained in pathkeys then it still
1891 * works, as described above, providing that the partition pathkeys
1892 * are complete and not just a prefix of the partition keys. (In such
1893 * cases we'll be relying on the child paths to have sorted the
1894 * lower-order columns of the required pathkeys.)
1895 */
1896 match_partition_order =
1897 pathkeys_contained_in(pathkeys, partition_pathkeys) ||
1898 (!partition_pathkeys_partial &&
1899 pathkeys_contained_in(partition_pathkeys, pathkeys));
1900
1901 match_partition_order_desc = !match_partition_order &&
1902 (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
1903 (!partition_pathkeys_desc_partial &&
1904 pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
1905
1906 /*
1907 * When the required pathkeys match the reverse of the partition
1908 * order, we must build the list of paths in reverse starting with the
1909 * last matching partition first. We can get away without making any
1910 * special cases for this in the loop below by just looping backward
1911 * over the child relations in this case.
1912 */
1913 if (match_partition_order_desc)
1914 {
1915 /* loop backward */
1916 first_index = list_length(live_childrels) - 1;
1917 end_index = -1;
1918 direction = -1;
1919
1920 /*
1921 * Set this to true to save us having to check for
1922 * match_partition_order_desc in the loop below.
1923 */
1924 match_partition_order = true;
1925 }
1926 else
1927 {
1928 /* for all other case, loop forward */
1929 first_index = 0;
1930 end_index = list_length(live_childrels);
1931 direction = 1;
1932 }
1933
1934 /* Select the child paths for this ordering... */
1935 for (int i = first_index; i != end_index; i += direction)
1936 {
1937 RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
1938 Path *cheapest_startup,
1939 *cheapest_total,
1940 *cheapest_fractional = NULL;
1941
1942 /* Locate the right paths, if they are available. */
1943 cheapest_startup =
1945 pathkeys,
1946 NULL,
1948 false);
1949 cheapest_total =
1951 pathkeys,
1952 NULL,
1953 TOTAL_COST,
1954 false);
1955
1956 /*
1957 * If we can't find any paths with the right order just use the
1958 * cheapest-total path; we'll have to sort it later.
1959 */
1960 if (cheapest_startup == NULL || cheapest_total == NULL)
1961 {
1962 cheapest_startup = cheapest_total =
1963 childrel->cheapest_total_path;
1964 /* Assert we do have an unparameterized path for this child */
1965 Assert(cheapest_total->param_info == NULL);
1966 }
1967
1968 /*
1969 * When building a fractional path, determine a cheapest
1970 * fractional path for each child relation too. Looking at startup
1971 * and total costs is not enough, because the cheapest fractional
1972 * path may be dominated by two separate paths (one for startup,
1973 * one for total).
1974 *
1975 * When needed (building fractional path), determine the cheapest
1976 * fractional path too.
1977 */
1978 if (root->tuple_fraction > 0)
1979 {
1980 double path_fraction = root->tuple_fraction;
1981
1982 /*
1983 * We should not have a dummy child relation here. However,
1984 * we cannot use childrel->rows to compute the tuple fraction,
1985 * as childrel can be an upper relation with an unset row
1986 * estimate. Instead, we use the row estimate from the
1987 * cheapest_total path, which should already have been forced
1988 * to a sane value.
1989 */
1990 Assert(cheapest_total->rows > 0);
1991
1992 /* Convert absolute limit to a path fraction */
1993 if (path_fraction >= 1.0)
1994 path_fraction /= cheapest_total->rows;
1995
1996 cheapest_fractional =
1998 pathkeys,
1999 NULL,
2000 path_fraction);
2001
2002 /*
2003 * If we found no path with matching pathkeys, use the
2004 * cheapest total path instead.
2005 *
2006 * XXX We might consider partially sorted paths too (with an
2007 * incremental sort on top). But we'd have to build all the
2008 * incremental paths, do the costing etc.
2009 *
2010 * Also, notice whether we actually have different paths for
2011 * the "fractional" and "total" cases. This helps avoid
2012 * generating two identical ordered append paths.
2013 */
2014 if (cheapest_fractional == NULL)
2015 cheapest_fractional = cheapest_total;
2016 else if (cheapest_fractional != cheapest_total)
2017 fraction_neq_total = true;
2018 }
2019
2020 /*
2021 * Notice whether we actually have different paths for the
2022 * "cheapest" and "total" cases. This helps avoid generating two
2023 * identical ordered append paths.
2024 */
2025 if (cheapest_startup != cheapest_total)
2026 startup_neq_total = true;
2027
2028 /*
2029 * Collect the appropriate child paths. The required logic varies
2030 * for the Append and MergeAppend cases.
2031 */
2032 if (match_partition_order)
2033 {
2034 /*
2035 * We're going to make a plain Append path. We don't need
2036 * most of what accumulate_append_subpath would do, but we do
2037 * want to cut out child Appends or MergeAppends if they have
2038 * just a single subpath (and hence aren't doing anything
2039 * useful).
2040 */
2041 cheapest_startup = get_singleton_append_subpath(cheapest_startup);
2042 cheapest_total = get_singleton_append_subpath(cheapest_total);
2043
2044 startup_subpaths = lappend(startup_subpaths, cheapest_startup);
2045 total_subpaths = lappend(total_subpaths, cheapest_total);
2046
2047 if (cheapest_fractional)
2048 {
2049 cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
2050 fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
2051 }
2052 }
2053 else
2054 {
2055 /*
2056 * Otherwise, rely on accumulate_append_subpath to collect the
2057 * child paths for the MergeAppend.
2058 */
2059 accumulate_append_subpath(cheapest_startup,
2060 &startup_subpaths, NULL);
2061 accumulate_append_subpath(cheapest_total,
2062 &total_subpaths, NULL);
2063
2064 if (cheapest_fractional)
2065 accumulate_append_subpath(cheapest_fractional,
2066 &fractional_subpaths, NULL);
2067 }
2068 }
2069
2070 /* ... and build the Append or MergeAppend paths */
2071 if (match_partition_order)
2072 {
2073 /* We only need Append */
2075 rel,
2076 startup_subpaths,
2077 NIL,
2078 pathkeys,
2079 NULL,
2080 0,
2081 false,
2082 -1));
2083 if (startup_neq_total)
2085 rel,
2086 total_subpaths,
2087 NIL,
2088 pathkeys,
2089 NULL,
2090 0,
2091 false,
2092 -1));
2093
2094 if (fractional_subpaths && fraction_neq_total)
2096 rel,
2097 fractional_subpaths,
2098 NIL,
2099 pathkeys,
2100 NULL,
2101 0,
2102 false,
2103 -1));
2104 }
2105 else
2106 {
2107 /* We need MergeAppend */
2109 rel,
2110 startup_subpaths,
2111 pathkeys,
2112 NULL));
2113 if (startup_neq_total)
2115 rel,
2116 total_subpaths,
2117 pathkeys,
2118 NULL));
2119
2120 if (fractional_subpaths && fraction_neq_total)
2122 rel,
2123 fractional_subpaths,
2124 pathkeys,
2125 NULL));
2126 }
2127 }
2128}
2129
2130/*
2131 * get_cheapest_parameterized_child_path
2132 * Get cheapest path for this relation that has exactly the requested
2133 * parameterization.
2134 *
2135 * Returns NULL if unable to create such a path.
2136 */
2137static Path *
2139 Relids required_outer)
2140{
2141 Path *cheapest;
2142 ListCell *lc;
2143
2144 /*
2145 * Look up the cheapest existing path with no more than the needed
2146 * parameterization. If it has exactly the needed parameterization, we're
2147 * done.
2148 */
2150 NIL,
2151 required_outer,
2152 TOTAL_COST,
2153 false);
2154 Assert(cheapest != NULL);
2155 if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
2156 return cheapest;
2157
2158 /*
2159 * Otherwise, we can "reparameterize" an existing path to match the given
2160 * parameterization, which effectively means pushing down additional
2161 * joinquals to be checked within the path's scan. However, some existing
2162 * paths might check the available joinquals already while others don't;
2163 * therefore, it's not clear which existing path will be cheapest after
2164 * reparameterization. We have to go through them all and find out.
2165 */
2166 cheapest = NULL;
2167 foreach(lc, rel->pathlist)
2168 {
2169 Path *path = (Path *) lfirst(lc);
2170
2171 /* Can't use it if it needs more than requested parameterization */
2172 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
2173 continue;
2174
2175 /*
2176 * Reparameterization can only increase the path's cost, so if it's
2177 * already more expensive than the current cheapest, forget it.
2178 */
2179 if (cheapest != NULL &&
2180 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2181 continue;
2182
2183 /* Reparameterize if needed, then recheck cost */
2184 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
2185 {
2186 path = reparameterize_path(root, path, required_outer, 1.0);
2187 if (path == NULL)
2188 continue; /* failed to reparameterize this one */
2189 Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
2190
2191 if (cheapest != NULL &&
2192 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2193 continue;
2194 }
2195
2196 /* We have a new best path */
2197 cheapest = path;
2198 }
2199
2200 /* Return the best path, or NULL if we found no suitable candidate */
2201 return cheapest;
2202}
2203
2204/*
2205 * accumulate_append_subpath
2206 * Add a subpath to the list being built for an Append or MergeAppend.
2207 *
2208 * It's possible that the child is itself an Append or MergeAppend path, in
2209 * which case we can "cut out the middleman" and just add its child paths to
2210 * our own list. (We don't try to do this earlier because we need to apply
2211 * both levels of transformation to the quals.)
2212 *
2213 * Note that if we omit a child MergeAppend in this way, we are effectively
2214 * omitting a sort step, which seems fine: if the parent is to be an Append,
2215 * its result would be unsorted anyway, while if the parent is to be a
2216 * MergeAppend, there's no point in a separate sort on a child.
2217 *
2218 * Normally, either path is a partial path and subpaths is a list of partial
2219 * paths, or else path is a non-partial plan and subpaths is a list of those.
2220 * However, if path is a parallel-aware Append, then we add its partial path
2221 * children to subpaths and the rest to special_subpaths. If the latter is
2222 * NULL, we don't flatten the path at all (unless it contains only partial
2223 * paths).
2224 */
2225static void
2226accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
2227{
2228 if (IsA(path, AppendPath))
2229 {
2230 AppendPath *apath = (AppendPath *) path;
2231
2232 if (!apath->path.parallel_aware || apath->first_partial_path == 0)
2233 {
2234 *subpaths = list_concat(*subpaths, apath->subpaths);
2235 return;
2236 }
2237 else if (special_subpaths != NULL)
2238 {
2239 List *new_special_subpaths;
2240
2241 /* Split Parallel Append into partial and non-partial subpaths */
2242 *subpaths = list_concat(*subpaths,
2243 list_copy_tail(apath->subpaths,
2244 apath->first_partial_path));
2245 new_special_subpaths = list_copy_head(apath->subpaths,
2246 apath->first_partial_path);
2247 *special_subpaths = list_concat(*special_subpaths,
2248 new_special_subpaths);
2249 return;
2250 }
2251 }
2252 else if (IsA(path, MergeAppendPath))
2253 {
2254 MergeAppendPath *mpath = (MergeAppendPath *) path;
2255
2256 *subpaths = list_concat(*subpaths, mpath->subpaths);
2257 return;
2258 }
2259
2260 *subpaths = lappend(*subpaths, path);
2261}
2262
2263/*
2264 * get_singleton_append_subpath
2265 * Returns the single subpath of an Append/MergeAppend, or just
2266 * return 'path' if it's not a single sub-path Append/MergeAppend.
2267 *
2268 * Note: 'path' must not be a parallel-aware path.
2269 */
2270static Path *
2272{
2273 Assert(!path->parallel_aware);
2274
2275 if (IsA(path, AppendPath))
2276 {
2277 AppendPath *apath = (AppendPath *) path;
2278
2279 if (list_length(apath->subpaths) == 1)
2280 return (Path *) linitial(apath->subpaths);
2281 }
2282 else if (IsA(path, MergeAppendPath))
2283 {
2284 MergeAppendPath *mpath = (MergeAppendPath *) path;
2285
2286 if (list_length(mpath->subpaths) == 1)
2287 return (Path *) linitial(mpath->subpaths);
2288 }
2289
2290 return path;
2291}
2292
2293/*
2294 * set_dummy_rel_pathlist
2295 * Build a dummy path for a relation that's been excluded by constraints
2296 *
2297 * Rather than inventing a special "dummy" path type, we represent this as an
2298 * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
2299 *
2300 * (See also mark_dummy_rel, which does basically the same thing, but is
2301 * typically used to change a rel into dummy state after we already made
2302 * paths for it.)
2303 */
2304static void
2306{
2307 /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
2308 rel->rows = 0;
2309 rel->reltarget->width = 0;
2310
2311 /* Discard any pre-existing paths; no further need for them */
2312 rel->pathlist = NIL;
2313 rel->partial_pathlist = NIL;
2314
2315 /* Set up the dummy path */
2316 add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
2317 NIL, rel->lateral_relids,
2318 0, false, -1));
2319
2320 /*
2321 * We set the cheapest-path fields immediately, just in case they were
2322 * pointing at some discarded path. This is redundant in current usage
2323 * because set_rel_pathlist will do it later, but it's cheap so we keep it
2324 * for safety and consistency with mark_dummy_rel.
2325 */
2326 set_cheapest(rel);
2327}
2328
2329/*
2330 * find_window_run_conditions
2331 * Determine if 'wfunc' is really a WindowFunc and call its prosupport
2332 * function to determine the function's monotonic properties. We then
2333 * see if 'opexpr' can be used to short-circuit execution.
2334 *
2335 * For example row_number() over (order by ...) always produces a value one
2336 * higher than the previous. If someone has a window function in a subquery
2337 * and has a WHERE clause in the outer query to filter rows <= 10, then we may
2338 * as well stop processing the windowagg once the row number reaches 11. Here
2339 * we check if 'opexpr' might help us to stop doing needless extra processing
2340 * in WindowAgg nodes.
2341 *
2342 * '*keep_original' is set to true if the caller should also use 'opexpr' for
2343 * its original purpose. This is set to false if the caller can assume that
2344 * the run condition will handle all of the required filtering.
2345 *
2346 * Returns true if 'opexpr' was found to be useful and was added to the
2347 * WindowFunc's runCondition. We also set *keep_original accordingly and add
2348 * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
2349 * If the 'opexpr' cannot be used then we set *keep_original to true and
2350 * return false.
2351 */
2352static bool
2354 WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left,
2355 bool *keep_original, Bitmapset **run_cond_attrs)
2356{
2357 Oid prosupport;
2358 Expr *otherexpr;
2361 WindowClause *wclause;
2362 List *opinfos;
2363 OpExpr *runopexpr;
2364 Oid runoperator;
2365 ListCell *lc;
2366
2367 *keep_original = true;
2368
2369 while (IsA(wfunc, RelabelType))
2370 wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
2371
2372 /* we can only work with window functions */
2373 if (!IsA(wfunc, WindowFunc))
2374 return false;
2375
2376 /* can't use it if there are subplans in the WindowFunc */
2377 if (contain_subplans((Node *) wfunc))
2378 return false;
2379
2380 prosupport = get_func_support(wfunc->winfnoid);
2381
2382 /* Check if there's a support function for 'wfunc' */
2383 if (!OidIsValid(prosupport))
2384 return false;
2385
2386 /* get the Expr from the other side of the OpExpr */
2387 if (wfunc_left)
2388 otherexpr = lsecond(opexpr->args);
2389 else
2390 otherexpr = linitial(opexpr->args);
2391
2392 /*
2393 * The value being compared must not change during the evaluation of the
2394 * window partition.
2395 */
2396 if (!is_pseudo_constant_clause((Node *) otherexpr))
2397 return false;
2398
2399 /* find the window clause belonging to the window function */
2400 wclause = (WindowClause *) list_nth(subquery->windowClause,
2401 wfunc->winref - 1);
2402
2403 req.type = T_SupportRequestWFuncMonotonic;
2404 req.window_func = wfunc;
2405 req.window_clause = wclause;
2406
2407 /* call the support function */
2410 PointerGetDatum(&req)));
2411
2412 /*
2413 * Nothing to do if the function is neither monotonically increasing nor
2414 * monotonically decreasing.
2415 */
2416 if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
2417 return false;
2418
2419 runopexpr = NULL;
2420 runoperator = InvalidOid;
2421 opinfos = get_op_index_interpretation(opexpr->opno);
2422
2423 foreach(lc, opinfos)
2424 {
2426 CompareType cmptype = opinfo->cmptype;
2427
2428 /* handle < / <= */
2429 if (cmptype == COMPARE_LT || cmptype == COMPARE_LE)
2430 {
2431 /*
2432 * < / <= is supported for monotonically increasing functions in
2433 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2434 * for monotonically decreasing functions.
2435 */
2436 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
2437 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
2438 {
2439 *keep_original = false;
2440 runopexpr = opexpr;
2441 runoperator = opexpr->opno;
2442 }
2443 break;
2444 }
2445 /* handle > / >= */
2446 else if (cmptype == COMPARE_GT || cmptype == COMPARE_GE)
2447 {
2448 /*
2449 * > / >= is supported for monotonically decreasing functions in
2450 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2451 * for monotonically increasing functions.
2452 */
2453 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
2454 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
2455 {
2456 *keep_original = false;
2457 runopexpr = opexpr;
2458 runoperator = opexpr->opno;
2459 }
2460 break;
2461 }
2462 /* handle = */
2463 else if (cmptype == COMPARE_EQ)
2464 {
2465 CompareType newcmptype;
2466
2467 /*
2468 * When both monotonically increasing and decreasing then the
2469 * return value of the window function will be the same each time.
2470 * We can simply use 'opexpr' as the run condition without
2471 * modifying it.
2472 */
2474 {
2475 *keep_original = false;
2476 runopexpr = opexpr;
2477 runoperator = opexpr->opno;
2478 break;
2479 }
2480
2481 /*
2482 * When monotonically increasing we make a qual with <wfunc> <=
2483 * <value> or <value> >= <wfunc> in order to filter out values
2484 * which are above the value in the equality condition. For
2485 * monotonically decreasing functions we want to filter values
2486 * below the value in the equality condition.
2487 */
2489 newcmptype = wfunc_left ? COMPARE_LE : COMPARE_GE;
2490 else
2491 newcmptype = wfunc_left ? COMPARE_GE : COMPARE_LE;
2492
2493 /* We must keep the original equality qual */
2494 *keep_original = true;
2495 runopexpr = opexpr;
2496
2497 /* determine the operator to use for the WindowFuncRunCondition */
2498 runoperator = get_opfamily_member_for_cmptype(opinfo->opfamily_id,
2499 opinfo->oplefttype,
2500 opinfo->oprighttype,
2501 newcmptype);
2502 break;
2503 }
2504 }
2505
2506 if (runopexpr != NULL)
2507 {
2508 WindowFuncRunCondition *wfuncrc;
2509
2511 wfuncrc->opno = runoperator;
2512 wfuncrc->inputcollid = runopexpr->inputcollid;
2513 wfuncrc->wfunc_left = wfunc_left;
2514 wfuncrc->arg = copyObject(otherexpr);
2515
2516 wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
2517
2518 /* record that this attno was used in a run condition */
2519 *run_cond_attrs = bms_add_member(*run_cond_attrs,
2521 return true;
2522 }
2523
2524 /* unsupported OpExpr */
2525 return false;
2526}
2527
2528/*
2529 * check_and_push_window_quals
2530 * Check if 'clause' is a qual that can be pushed into a WindowFunc
2531 * as a 'runCondition' qual. These, when present, allow some unnecessary
2532 * work to be skipped during execution.
2533 *
2534 * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
2535 * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
2536 * window quals for.
2537 *
2538 * Returns true if the caller still must keep the original qual or false if
2539 * the caller can safely ignore the original qual because the WindowAgg node
2540 * will use the runCondition to stop returning tuples.
2541 */
2542static bool
2544 Bitmapset **run_cond_attrs)
2545{
2546 OpExpr *opexpr = (OpExpr *) clause;
2547 bool keep_original = true;
2548 Var *var1;
2549 Var *var2;
2550
2551 /* We're only able to use OpExprs with 2 operands */
2552 if (!IsA(opexpr, OpExpr))
2553 return true;
2554
2555 if (list_length(opexpr->args) != 2)
2556 return true;
2557
2558 /*
2559 * Currently, we restrict this optimization to strict OpExprs. The reason
2560 * for this is that during execution, once the runcondition becomes false,
2561 * we stop evaluating WindowFuncs. To avoid leaving around stale window
2562 * function result values, we set them to NULL. Having only strict
2563 * OpExprs here ensures that we properly filter out the tuples with NULLs
2564 * in the top-level WindowAgg.
2565 */
2566 set_opfuncid(opexpr);
2567 if (!func_strict(opexpr->opfuncid))
2568 return true;
2569
2570 /*
2571 * Check for plain Vars that reference window functions in the subquery.
2572 * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
2573 * be used as part of the run condition.
2574 */
2575
2576 /* Check the left side of the OpExpr */
2577 var1 = linitial(opexpr->args);
2578 if (IsA(var1, Var) && var1->varattno > 0)
2579 {
2580 TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
2581 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2582
2583 if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
2584 true, &keep_original, run_cond_attrs))
2585 return keep_original;
2586 }
2587
2588 /* and check the right side */
2589 var2 = lsecond(opexpr->args);
2590 if (IsA(var2, Var) && var2->varattno > 0)
2591 {
2592 TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
2593 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2594
2595 if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
2596 false, &keep_original, run_cond_attrs))
2597 return keep_original;
2598 }
2599
2600 return true;
2601}
2602
2603/*
2604 * set_subquery_pathlist
2605 * Generate SubqueryScan access paths for a subquery RTE
2606 *
2607 * We don't currently support generating parameterized paths for subqueries
2608 * by pushing join clauses down into them; it seems too expensive to re-plan
2609 * the subquery multiple times to consider different alternatives.
2610 * (XXX that could stand to be reconsidered, now that we use Paths.)
2611 * So the paths made here will be parameterized if the subquery contains
2612 * LATERAL references, otherwise not. As long as that's true, there's no need
2613 * for a separate set_subquery_size phase: just make the paths right away.
2614 */
2615static void
2617 Index rti, RangeTblEntry *rte)
2618{
2619 Query *parse = root->parse;
2620 Query *subquery = rte->subquery;
2621 bool trivial_pathtarget;
2622 Relids required_outer;
2623 pushdown_safety_info safetyInfo;
2624 double tuple_fraction;
2625 RelOptInfo *sub_final_rel;
2626 Bitmapset *run_cond_attrs = NULL;
2627 ListCell *lc;
2628 char *plan_name;
2629
2630 /*
2631 * Must copy the Query so that planning doesn't mess up the RTE contents
2632 * (really really need to fix the planner to not scribble on its input,
2633 * someday ... but see remove_unused_subquery_outputs to start with).
2634 */
2635 subquery = copyObject(subquery);
2636
2637 /*
2638 * If it's a LATERAL subquery, it might contain some Vars of the current
2639 * query level, requiring it to be treated as parameterized, even though
2640 * we don't support pushing down join quals into subqueries.
2641 */
2642 required_outer = rel->lateral_relids;
2643
2644 /*
2645 * Zero out result area for subquery_is_pushdown_safe, so that it can set
2646 * flags as needed while recursing. In particular, we need a workspace
2647 * for keeping track of the reasons why columns are unsafe to reference.
2648 * These reasons are stored in the bits inside unsafeFlags[i] when we
2649 * discover reasons that column i of the subquery is unsafe to be used in
2650 * a pushed-down qual.
2651 */
2652 memset(&safetyInfo, 0, sizeof(safetyInfo));
2653 safetyInfo.unsafeFlags = (unsigned char *)
2654 palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
2655
2656 /*
2657 * If the subquery has the "security_barrier" flag, it means the subquery
2658 * originated from a view that must enforce row-level security. Then we
2659 * must not push down quals that contain leaky functions. (Ideally this
2660 * would be checked inside subquery_is_pushdown_safe, but since we don't
2661 * currently pass the RTE to that function, we must do it here.)
2662 */
2663 safetyInfo.unsafeLeaky = rte->security_barrier;
2664
2665 /*
2666 * If there are any restriction clauses that have been attached to the
2667 * subquery relation, consider pushing them down to become WHERE or HAVING
2668 * quals of the subquery itself. This transformation is useful because it
2669 * may allow us to generate a better plan for the subquery than evaluating
2670 * all the subquery output rows and then filtering them.
2671 *
2672 * There are several cases where we cannot push down clauses. Restrictions
2673 * involving the subquery are checked by subquery_is_pushdown_safe().
2674 * Restrictions on individual clauses are checked by
2675 * qual_is_pushdown_safe(). Also, we don't want to push down
2676 * pseudoconstant clauses; better to have the gating node above the
2677 * subquery.
2678 *
2679 * Non-pushed-down clauses will get evaluated as qpquals of the
2680 * SubqueryScan node.
2681 *
2682 * XXX Are there any cases where we want to make a policy decision not to
2683 * push down a pushable qual, because it'd result in a worse plan?
2684 */
2685 if (rel->baserestrictinfo != NIL &&
2686 subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
2687 {
2688 /* OK to consider pushing down individual quals */
2689 List *upperrestrictlist = NIL;
2690 ListCell *l;
2691
2692 foreach(l, rel->baserestrictinfo)
2693 {
2694 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2695 Node *clause = (Node *) rinfo->clause;
2696
2697 if (rinfo->pseudoconstant)
2698 {
2699 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2700 continue;
2701 }
2702
2703 switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
2704 {
2705 case PUSHDOWN_SAFE:
2706 /* Push it down */
2707 subquery_push_qual(subquery, rte, rti, clause);
2708 break;
2709
2711
2712 /*
2713 * Since we can't push the qual down into the subquery,
2714 * check if it happens to reference a window function. If
2715 * so then it might be useful to use for the WindowAgg's
2716 * runCondition.
2717 */
2718 if (!subquery->hasWindowFuncs ||
2719 check_and_push_window_quals(subquery, clause,
2720 &run_cond_attrs))
2721 {
2722 /*
2723 * subquery has no window funcs or the clause is not a
2724 * suitable window run condition qual or it is, but
2725 * the original must also be kept in the upper query.
2726 */
2727 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2728 }
2729 break;
2730
2731 case PUSHDOWN_UNSAFE:
2732 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2733 break;
2734 }
2735 }
2736 rel->baserestrictinfo = upperrestrictlist;
2737 /* We don't bother recomputing baserestrict_min_security */
2738 }
2739
2740 pfree(safetyInfo.unsafeFlags);
2741
2742 /*
2743 * The upper query might not use all the subquery's output columns; if
2744 * not, we can simplify. Pass the attributes that were pushed down into
2745 * WindowAgg run conditions to ensure we don't accidentally think those
2746 * are unused.
2747 */
2748 remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
2749
2750 /*
2751 * We can safely pass the outer tuple_fraction down to the subquery if the
2752 * outer level has no joining, aggregation, or sorting to do. Otherwise
2753 * we'd better tell the subquery to plan for full retrieval. (XXX This
2754 * could probably be made more intelligent ...)
2755 */
2756 if (parse->hasAggs ||
2757 parse->groupClause ||
2758 parse->groupingSets ||
2759 root->hasHavingQual ||
2760 parse->distinctClause ||
2761 parse->sortClause ||
2762 bms_membership(root->all_baserels) == BMS_MULTIPLE)
2763 tuple_fraction = 0.0; /* default case */
2764 else
2765 tuple_fraction = root->tuple_fraction;
2766
2767 /* plan_params should not be in use in current query level */
2768 Assert(root->plan_params == NIL);
2769
2770 /* Generate a subroot and Paths for the subquery */
2771 plan_name = choose_plan_name(root->glob, rte->eref->aliasname, false);
2772 rel->subroot = subquery_planner(root->glob, subquery, plan_name,
2773 root, false, tuple_fraction, NULL);
2774
2775 /* Isolate the params needed by this specific subplan */
2776 rel->subplan_params = root->plan_params;
2777 root->plan_params = NIL;
2778
2779 /*
2780 * It's possible that constraint exclusion proved the subquery empty. If
2781 * so, it's desirable to produce an unadorned dummy path so that we will
2782 * recognize appropriate optimizations at this query level.
2783 */
2784 sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
2785
2786 if (IS_DUMMY_REL(sub_final_rel))
2787 {
2789 return;
2790 }
2791
2792 /*
2793 * Mark rel with estimated output rows, width, etc. Note that we have to
2794 * do this before generating outer-query paths, else cost_subqueryscan is
2795 * not happy.
2796 */
2798
2799 /*
2800 * Also detect whether the reltarget is trivial, so that we can pass that
2801 * info to cost_subqueryscan (rather than re-deriving it multiple times).
2802 * It's trivial if it fetches all the subplan output columns in order.
2803 */
2804 if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
2805 trivial_pathtarget = false;
2806 else
2807 {
2808 trivial_pathtarget = true;
2809 foreach(lc, rel->reltarget->exprs)
2810 {
2811 Node *node = (Node *) lfirst(lc);
2812 Var *var;
2813
2814 if (!IsA(node, Var))
2815 {
2816 trivial_pathtarget = false;
2817 break;
2818 }
2819 var = (Var *) node;
2820 if (var->varno != rti ||
2821 var->varattno != foreach_current_index(lc) + 1)
2822 {
2823 trivial_pathtarget = false;
2824 break;
2825 }
2826 }
2827 }
2828
2829 /*
2830 * For each Path that subquery_planner produced, make a SubqueryScanPath
2831 * in the outer query.
2832 */
2833 foreach(lc, sub_final_rel->pathlist)
2834 {
2835 Path *subpath = (Path *) lfirst(lc);
2836 List *pathkeys;
2837
2838 /* Convert subpath's pathkeys to outer representation */
2840 rel,
2841 subpath->pathkeys,
2842 make_tlist_from_pathtarget(subpath->pathtarget));
2843
2844 /* Generate outer path using this subpath */
2845 add_path(rel, (Path *)
2847 trivial_pathtarget,
2848 pathkeys, required_outer));
2849 }
2850
2851 /* If outer rel allows parallelism, do same for partial paths. */
2852 if (rel->consider_parallel && bms_is_empty(required_outer))
2853 {
2854 /* If consider_parallel is false, there should be no partial paths. */
2855 Assert(sub_final_rel->consider_parallel ||
2856 sub_final_rel->partial_pathlist == NIL);
2857
2858 /* Same for partial paths. */
2859 foreach(lc, sub_final_rel->partial_pathlist)
2860 {
2861 Path *subpath = (Path *) lfirst(lc);
2862 List *pathkeys;
2863
2864 /* Convert subpath's pathkeys to outer representation */
2866 rel,
2867 subpath->pathkeys,
2868 make_tlist_from_pathtarget(subpath->pathtarget));
2869
2870 /* Generate outer path using this subpath */
2871 add_partial_path(rel, (Path *)
2873 trivial_pathtarget,
2874 pathkeys,
2875 required_outer));
2876 }
2877 }
2878}
2879
2880/*
2881 * set_function_pathlist
2882 * Build the (single) access path for a function RTE
2883 */
2884static void
2886{
2887 Relids required_outer;
2888 List *pathkeys = NIL;
2889
2890 /*
2891 * We don't support pushing join clauses into the quals of a function
2892 * scan, but it could still have required parameterization due to LATERAL
2893 * refs in the function expression.
2894 */
2895 required_outer = rel->lateral_relids;
2896
2897 /*
2898 * The result is considered unordered unless ORDINALITY was used, in which
2899 * case it is ordered by the ordinal column (the last one). See if we
2900 * care, by checking for uses of that Var in equivalence classes.
2901 */
2902 if (rte->funcordinality)
2903 {
2904 AttrNumber ordattno = rel->max_attr;
2905 Var *var = NULL;
2906 ListCell *lc;
2907
2908 /*
2909 * Is there a Var for it in rel's targetlist? If not, the query did
2910 * not reference the ordinality column, or at least not in any way
2911 * that would be interesting for sorting.
2912 */
2913 foreach(lc, rel->reltarget->exprs)
2914 {
2915 Var *node = (Var *) lfirst(lc);
2916
2917 /* checking varno/varlevelsup is just paranoia */
2918 if (IsA(node, Var) &&
2919 node->varattno == ordattno &&
2920 node->varno == rel->relid &&
2921 node->varlevelsup == 0)
2922 {
2923 var = node;
2924 break;
2925 }
2926 }
2927
2928 /*
2929 * Try to build pathkeys for this Var with int8 sorting. We tell
2930 * build_expression_pathkey not to build any new equivalence class; if
2931 * the Var isn't already mentioned in some EC, it means that nothing
2932 * cares about the ordering.
2933 */
2934 if (var)
2935 pathkeys = build_expression_pathkey(root,
2936 (Expr *) var,
2937 Int8LessOperator,
2938 rel->relids,
2939 false);
2940 }
2941
2942 /* Generate appropriate path */
2944 pathkeys, required_outer));
2945}
2946
2947/*
2948 * set_values_pathlist
2949 * Build the (single) access path for a VALUES RTE
2950 */
2951static void
2953{
2954 Relids required_outer;
2955
2956 /*
2957 * We don't support pushing join clauses into the quals of a values scan,
2958 * but it could still have required parameterization due to LATERAL refs
2959 * in the values expressions.
2960 */
2961 required_outer = rel->lateral_relids;
2962
2963 /* Generate appropriate path */
2964 add_path(rel, create_valuesscan_path(root, rel, required_outer));
2965}
2966
2967/*
2968 * set_tablefunc_pathlist
2969 * Build the (single) access path for a table func RTE
2970 */
2971static void
2973{
2974 Relids required_outer;
2975
2976 /*
2977 * We don't support pushing join clauses into the quals of a tablefunc
2978 * scan, but it could still have required parameterization due to LATERAL
2979 * refs in the function expression.
2980 */
2981 required_outer = rel->lateral_relids;
2982
2983 /* Generate appropriate path */
2985 required_outer));
2986}
2987
2988/*
2989 * set_cte_pathlist
2990 * Build the (single) access path for a non-self-reference CTE RTE
2991 *
2992 * There's no need for a separate set_cte_size phase, since we don't
2993 * support join-qual-parameterized paths for CTEs.
2994 */
2995static void
2997{
2998 Path *ctepath;
2999 Plan *cteplan;
3000 PlannerInfo *cteroot;
3001 Index levelsup;
3002 List *pathkeys;
3003 int ndx;
3004 ListCell *lc;
3005 int plan_id;
3006 Relids required_outer;
3007
3008 /*
3009 * Find the referenced CTE, and locate the path and plan previously made
3010 * for it.
3011 */
3012 levelsup = rte->ctelevelsup;
3013 cteroot = root;
3014 while (levelsup-- > 0)
3015 {
3016 cteroot = cteroot->parent_root;
3017 if (!cteroot) /* shouldn't happen */
3018 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3019 }
3020
3021 /*
3022 * Note: cte_plan_ids can be shorter than cteList, if we are still working
3023 * on planning the CTEs (ie, this is a side-reference from another CTE).
3024 * So we mustn't use forboth here.
3025 */
3026 ndx = 0;
3027 foreach(lc, cteroot->parse->cteList)
3028 {
3029 CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3030
3031 if (strcmp(cte->ctename, rte->ctename) == 0)
3032 break;
3033 ndx++;
3034 }
3035 if (lc == NULL) /* shouldn't happen */
3036 elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3037 if (ndx >= list_length(cteroot->cte_plan_ids))
3038 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3039 plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3040 if (plan_id <= 0)
3041 elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
3042
3043 Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
3044 ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
3045 cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
3046
3047 /* Mark rel with estimated output rows, width, etc */
3048 set_cte_size_estimates(root, rel, cteplan->plan_rows);
3049
3050 /* Convert the ctepath's pathkeys to outer query's representation */
3052 rel,
3053 ctepath->pathkeys,
3054 cteplan->targetlist);
3055
3056 /*
3057 * We don't support pushing join clauses into the quals of a CTE scan, but
3058 * it could still have required parameterization due to LATERAL refs in
3059 * its tlist.
3060 */
3061 required_outer = rel->lateral_relids;
3062
3063 /* Generate appropriate path */
3064 add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
3065}
3066
3067/*
3068 * set_namedtuplestore_pathlist
3069 * Build the (single) access path for a named tuplestore RTE
3070 *
3071 * There's no need for a separate set_namedtuplestore_size phase, since we
3072 * don't support join-qual-parameterized paths for tuplestores.
3073 */
3074static void
3076 RangeTblEntry *rte)
3077{
3078 Relids required_outer;
3079
3080 /* Mark rel with estimated output rows, width, etc */
3082
3083 /*
3084 * We don't support pushing join clauses into the quals of a tuplestore
3085 * scan, but it could still have required parameterization due to LATERAL
3086 * refs in its tlist.
3087 */
3088 required_outer = rel->lateral_relids;
3089
3090 /* Generate appropriate path */
3091 add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
3092}
3093
3094/*
3095 * set_result_pathlist
3096 * Build the (single) access path for an RTE_RESULT RTE
3097 *
3098 * There's no need for a separate set_result_size phase, since we
3099 * don't support join-qual-parameterized paths for these RTEs.
3100 */
3101static void
3103 RangeTblEntry *rte)
3104{
3105 Relids required_outer;
3106
3107 /* Mark rel with estimated output rows, width, etc */
3109
3110 /*
3111 * We don't support pushing join clauses into the quals of a Result scan,
3112 * but it could still have required parameterization due to LATERAL refs
3113 * in its tlist.
3114 */
3115 required_outer = rel->lateral_relids;
3116
3117 /* Generate appropriate path */
3118 add_path(rel, create_resultscan_path(root, rel, required_outer));
3119}
3120
3121/*
3122 * set_worktable_pathlist
3123 * Build the (single) access path for a self-reference CTE RTE
3124 *
3125 * There's no need for a separate set_worktable_size phase, since we don't
3126 * support join-qual-parameterized paths for CTEs.
3127 */
3128static void
3130{
3131 Path *ctepath;
3132 PlannerInfo *cteroot;
3133 Index levelsup;
3134 Relids required_outer;
3135
3136 /*
3137 * We need to find the non-recursive term's path, which is in the plan
3138 * level that's processing the recursive UNION, which is one level *below*
3139 * where the CTE comes from.
3140 */
3141 levelsup = rte->ctelevelsup;
3142 if (levelsup == 0) /* shouldn't happen */
3143 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3144 levelsup--;
3145 cteroot = root;
3146 while (levelsup-- > 0)
3147 {
3148 cteroot = cteroot->parent_root;
3149 if (!cteroot) /* shouldn't happen */
3150 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3151 }
3152 ctepath = cteroot->non_recursive_path;
3153 if (!ctepath) /* shouldn't happen */
3154 elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
3155
3156 /* Mark rel with estimated output rows, width, etc */
3157 set_cte_size_estimates(root, rel, ctepath->rows);
3158
3159 /*
3160 * We don't support pushing join clauses into the quals of a worktable
3161 * scan, but it could still have required parameterization due to LATERAL
3162 * refs in its tlist. (I'm not sure this is actually possible given the
3163 * restrictions on recursive references, but it's easy enough to support.)
3164 */
3165 required_outer = rel->lateral_relids;
3166
3167 /* Generate appropriate path */
3168 add_path(rel, create_worktablescan_path(root, rel, required_outer));
3169}
3170
3171/*
3172 * generate_gather_paths
3173 * Generate parallel access paths for a relation by pushing a Gather or
3174 * Gather Merge on top of a partial path.
3175 *
3176 * This must not be called until after we're done creating all partial paths
3177 * for the specified relation. (Otherwise, add_partial_path might delete a
3178 * path that some GatherPath or GatherMergePath has a reference to.)
3179 *
3180 * If we're generating paths for a scan or join relation, override_rows will
3181 * be false, and we'll just use the relation's size estimate. When we're
3182 * being called for a partially-grouped or partially-distinct path, though, we
3183 * need to override the rowcount estimate. (It's not clear that the
3184 * particular value we're using here is actually best, but the underlying rel
3185 * has no estimate so we must do something.)
3186 */
3187void
3189{
3190 Path *cheapest_partial_path;
3191 Path *simple_gather_path;
3192 ListCell *lc;
3193 double rows;
3194 double *rowsp = NULL;
3195
3196 /* If there are no partial paths, there's nothing to do here. */
3197 if (rel->partial_pathlist == NIL)
3198 return;
3199
3200 /* Should we override the rel's rowcount estimate? */
3201 if (override_rows)
3202 rowsp = &rows;
3203
3204 /*
3205 * The output of Gather is always unsorted, so there's only one partial
3206 * path of interest: the cheapest one. That will be the one at the front
3207 * of partial_pathlist because of the way add_partial_path works.
3208 */
3209 cheapest_partial_path = linitial(rel->partial_pathlist);
3210 rows = compute_gather_rows(cheapest_partial_path);
3211 simple_gather_path = (Path *)
3212 create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
3213 NULL, rowsp);
3214 add_path(rel, simple_gather_path);
3215
3216 /*
3217 * For each useful ordering, we can consider an order-preserving Gather
3218 * Merge.
3219 */
3220 foreach(lc, rel->partial_pathlist)
3221 {
3222 Path *subpath = (Path *) lfirst(lc);
3223 GatherMergePath *path;
3224
3225 if (subpath->pathkeys == NIL)
3226 continue;
3227
3230 subpath->pathkeys, NULL, rowsp);
3231 add_path(rel, &path->path);
3232 }
3233}
3234
3235/*
3236 * get_useful_pathkeys_for_relation
3237 * Determine which orderings of a relation might be useful.
3238 *
3239 * Getting data in sorted order can be useful either because the requested
3240 * order matches the final output ordering for the overall query we're
3241 * planning, or because it enables an efficient merge join. Here, we try
3242 * to figure out which pathkeys to consider.
3243 *
3244 * This allows us to do incremental sort on top of an index scan under a gather
3245 * merge node, i.e. parallelized.
3246 *
3247 * If the require_parallel_safe is true, we also require the expressions to
3248 * be parallel safe (which allows pushing the sort below Gather Merge).
3249 *
3250 * XXX At the moment this can only ever return a list with a single element,
3251 * because it looks at query_pathkeys only. So we might return the pathkeys
3252 * directly, but it seems plausible we'll want to consider other orderings
3253 * in the future. For example, we might want to consider pathkeys useful for
3254 * merge joins.
3255 */
3256static List *
3258 bool require_parallel_safe)
3259{
3260 List *useful_pathkeys_list = NIL;
3261
3262 /*
3263 * Considering query_pathkeys is always worth it, because it might allow
3264 * us to avoid a total sort when we have a partially presorted path
3265 * available or to push the total sort into the parallel portion of the
3266 * query.
3267 */
3268 if (root->query_pathkeys)
3269 {
3270 ListCell *lc;
3271 int npathkeys = 0; /* useful pathkeys */
3272
3273 foreach(lc, root->query_pathkeys)
3274 {
3275 PathKey *pathkey = (PathKey *) lfirst(lc);
3276 EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
3277
3278 /*
3279 * We can only build a sort for pathkeys that contain a
3280 * safe-to-compute-early EC member computable from the current
3281 * relation's reltarget, so ignore the remainder of the list as
3282 * soon as we find a pathkey without such a member.
3283 *
3284 * It's still worthwhile to return any prefix of the pathkeys list
3285 * that meets this requirement, as we may be able to do an
3286 * incremental sort.
3287 *
3288 * If requested, ensure the sort expression is parallel-safe too.
3289 */
3290 if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
3291 require_parallel_safe))
3292 break;
3293
3294 npathkeys++;
3295 }
3296
3297 /*
3298 * The whole query_pathkeys list matches, so append it directly, to
3299 * allow comparing pathkeys easily by comparing list pointer. If we
3300 * have to truncate the pathkeys, we gotta do a copy though.
3301 */
3302 if (npathkeys == list_length(root->query_pathkeys))
3303 useful_pathkeys_list = lappend(useful_pathkeys_list,
3304 root->query_pathkeys);
3305 else if (npathkeys > 0)
3306 useful_pathkeys_list = lappend(useful_pathkeys_list,
3307 list_copy_head(root->query_pathkeys,
3308 npathkeys));
3309 }
3310
3311 return useful_pathkeys_list;
3312}
3313
3314/*
3315 * generate_useful_gather_paths
3316 * Generate parallel access paths for a relation by pushing a Gather or
3317 * Gather Merge on top of a partial path.
3318 *
3319 * Unlike plain generate_gather_paths, this looks both at pathkeys of input
3320 * paths (aiming to preserve the ordering), but also considers ordering that
3321 * might be useful for nodes above the gather merge node, and tries to add
3322 * a sort (regular or incremental) to provide that.
3323 */
3324void
3326{
3327 ListCell *lc;
3328 double rows;
3329 double *rowsp = NULL;
3330 List *useful_pathkeys_list = NIL;
3331 Path *cheapest_partial_path = NULL;
3332
3333 /* If there are no partial paths, there's nothing to do here. */
3334 if (rel->partial_pathlist == NIL)
3335 return;
3336
3337 /* Should we override the rel's rowcount estimate? */
3338 if (override_rows)
3339 rowsp = &rows;
3340
3341 /* generate the regular gather (merge) paths */
3342 generate_gather_paths(root, rel, override_rows);
3343
3344 /* consider incremental sort for interesting orderings */
3345 useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
3346
3347 /* used for explicit (full) sort paths */
3348 cheapest_partial_path = linitial(rel->partial_pathlist);
3349
3350 /*
3351 * Consider sorted paths for each interesting ordering. We generate both
3352 * incremental and full sort.
3353 */
3354 foreach(lc, useful_pathkeys_list)
3355 {
3356 List *useful_pathkeys = lfirst(lc);
3357 ListCell *lc2;
3358 bool is_sorted;
3359 int presorted_keys;
3360
3361 foreach(lc2, rel->partial_pathlist)
3362 {
3363 Path *subpath = (Path *) lfirst(lc2);
3364 GatherMergePath *path;
3365
3366 is_sorted = pathkeys_count_contained_in(useful_pathkeys,
3367 subpath->pathkeys,
3368 &presorted_keys);
3369
3370 /*
3371 * We don't need to consider the case where a subpath is already
3372 * fully sorted because generate_gather_paths already creates a
3373 * gather merge path for every subpath that has pathkeys present.
3374 *
3375 * But since the subpath is already sorted, we know we don't need
3376 * to consider adding a sort (full or incremental) on top of it,
3377 * so we can continue here.
3378 */
3379 if (is_sorted)
3380 continue;
3381
3382 /*
3383 * Try at least sorting the cheapest path and also try
3384 * incrementally sorting any path which is partially sorted
3385 * already (no need to deal with paths which have presorted keys
3386 * when incremental sort is disabled unless it's the cheapest
3387 * input path).
3388 */
3389 if (subpath != cheapest_partial_path &&
3390 (presorted_keys == 0 || !enable_incremental_sort))
3391 continue;
3392
3393 /*
3394 * Consider regular sort for any path that's not presorted or if
3395 * incremental sort is disabled. We've no need to consider both
3396 * sort and incremental sort on the same path. We assume that
3397 * incremental sort is always faster when there are presorted
3398 * keys.
3399 *
3400 * This is not redundant with the gather paths created in
3401 * generate_gather_paths, because that doesn't generate ordered
3402 * output. Here we add an explicit sort to match the useful
3403 * ordering.
3404 */
3405 if (presorted_keys == 0 || !enable_incremental_sort)
3406 {
3408 rel,
3409 subpath,
3410 useful_pathkeys,
3411 -1.0);
3412 }
3413 else
3415 rel,
3416 subpath,
3417 useful_pathkeys,
3418 presorted_keys,
3419 -1);
3421 path = create_gather_merge_path(root, rel,
3422 subpath,
3423 rel->reltarget,
3424 subpath->pathkeys,
3425 NULL,
3426 rowsp);
3427
3428 add_path(rel, &path->path);
3429 }
3430 }
3431}
3432
3433/*
3434 * generate_grouped_paths
3435 * Generate paths for a grouped relation by adding sorted and hashed
3436 * partial aggregation paths on top of paths of the ungrouped relation.
3437 *
3438 * The information needed is provided by the RelAggInfo structure stored in
3439 * "grouped_rel".
3440 */
3441void
3443 RelOptInfo *rel)
3444{
3445 RelAggInfo *agg_info = grouped_rel->agg_info;
3446 AggClauseCosts agg_costs;
3447 bool can_hash;
3448 bool can_sort;
3449 Path *cheapest_total_path = NULL;
3450 Path *cheapest_partial_path = NULL;
3451 double dNumGroups = 0;
3452 double dNumPartialGroups = 0;
3453 List *group_pathkeys = NIL;
3454
3455 if (IS_DUMMY_REL(rel))
3456 {
3457 mark_dummy_rel(grouped_rel);
3458 return;
3459 }
3460
3461 /*
3462 * We push partial aggregation only to the lowest possible level in the
3463 * join tree that is deemed useful.
3464 */
3465 if (!bms_equal(agg_info->apply_agg_at, rel->relids) ||
3466 !agg_info->agg_useful)
3467 return;
3468
3469 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3471
3472 /*
3473 * Determine whether it's possible to perform sort-based implementations
3474 * of grouping, and generate the pathkeys that represent the grouping
3475 * requirements in that case.
3476 */
3477 can_sort = grouping_is_sortable(agg_info->group_clauses);
3478 if (can_sort)
3479 {
3480 RelOptInfo *top_grouped_rel;
3481 List *top_group_tlist;
3482
3483 top_grouped_rel = IS_OTHER_REL(rel) ?
3484 rel->top_parent->grouped_rel : grouped_rel;
3485 top_group_tlist =
3486 make_tlist_from_pathtarget(top_grouped_rel->agg_info->target);
3487
3488 group_pathkeys =
3490 top_group_tlist);
3491 }
3492
3493 /*
3494 * Determine whether we should consider hash-based implementations of
3495 * grouping.
3496 */
3497 Assert(root->numOrderedAggs == 0);
3498 can_hash = (agg_info->group_clauses != NIL &&
3500
3501 /*
3502 * Consider whether we should generate partially aggregated non-partial
3503 * paths. We can only do this if we have a non-partial path.
3504 */
3505 if (rel->pathlist != NIL)
3506 {
3507 cheapest_total_path = rel->cheapest_total_path;
3508 Assert(cheapest_total_path != NULL);
3509 }
3510
3511 /*
3512 * If parallelism is possible for grouped_rel, then we should consider
3513 * generating partially-grouped partial paths. However, if the ungrouped
3514 * rel has no partial paths, then we can't.
3515 */
3516 if (grouped_rel->consider_parallel && rel->partial_pathlist != NIL)
3517 {
3518 cheapest_partial_path = linitial(rel->partial_pathlist);
3519 Assert(cheapest_partial_path != NULL);
3520 }
3521
3522 /* Estimate number of partial groups. */
3523 if (cheapest_total_path != NULL)
3524 dNumGroups = estimate_num_groups(root,
3525 agg_info->group_exprs,
3526 cheapest_total_path->rows,
3527 NULL, NULL);
3528 if (cheapest_partial_path != NULL)
3529 dNumPartialGroups = estimate_num_groups(root,
3530 agg_info->group_exprs,
3531 cheapest_partial_path->rows,
3532 NULL, NULL);
3533
3534 if (can_sort && cheapest_total_path != NULL)
3535 {
3536 ListCell *lc;
3537
3538 /*
3539 * Use any available suitably-sorted path as input, and also consider
3540 * sorting the cheapest-total path and incremental sort on any paths
3541 * with presorted keys.
3542 *
3543 * To save planning time, we ignore parameterized input paths unless
3544 * they are the cheapest-total path.
3545 */
3546 foreach(lc, rel->pathlist)
3547 {
3548 Path *input_path = (Path *) lfirst(lc);
3549 Path *path;
3550 bool is_sorted;
3551 int presorted_keys;
3552
3553 /*
3554 * Ignore parameterized paths that are not the cheapest-total
3555 * path.
3556 */
3557 if (input_path->param_info &&
3558 input_path != cheapest_total_path)
3559 continue;
3560
3561 is_sorted = pathkeys_count_contained_in(group_pathkeys,
3562 input_path->pathkeys,
3563 &presorted_keys);
3564
3565 /*
3566 * Ignore paths that are not suitably or partially sorted, unless
3567 * they are the cheapest total path (no need to deal with paths
3568 * which have presorted keys when incremental sort is disabled).
3569 */
3570 if (!is_sorted && input_path != cheapest_total_path &&
3571 (presorted_keys == 0 || !enable_incremental_sort))
3572 continue;
3573
3574 /*
3575 * Since the path originates from a non-grouped relation that is
3576 * not aware of eager aggregation, we must ensure that it provides
3577 * the correct input for partial aggregation.
3578 */
3579 path = (Path *) create_projection_path(root,
3580 grouped_rel,
3581 input_path,
3582 agg_info->agg_input);
3583
3584 if (!is_sorted)
3585 {
3586 /*
3587 * We've no need to consider both a sort and incremental sort.
3588 * We'll just do a sort if there are no presorted keys and an
3589 * incremental sort when there are presorted keys.
3590 */
3591 if (presorted_keys == 0 || !enable_incremental_sort)
3592 path = (Path *) create_sort_path(root,
3593 grouped_rel,
3594 path,
3595 group_pathkeys,
3596 -1.0);
3597 else
3599 grouped_rel,
3600 path,
3601 group_pathkeys,
3602 presorted_keys,
3603 -1.0);
3604 }
3605
3606 /*
3607 * qual is NIL because the HAVING clause cannot be evaluated until
3608 * the final value of the aggregate is known.
3609 */
3610 path = (Path *) create_agg_path(root,
3611 grouped_rel,
3612 path,
3613 agg_info->target,
3614 AGG_SORTED,
3616 agg_info->group_clauses,
3617 NIL,
3618 &agg_costs,
3619 dNumGroups);
3620
3621 add_path(grouped_rel, path);
3622 }
3623 }
3624
3625 if (can_sort && cheapest_partial_path != NULL)
3626 {
3627 ListCell *lc;
3628
3629 /* Similar to above logic, but for partial paths. */
3630 foreach(lc, rel->partial_pathlist)
3631 {
3632 Path *input_path = (Path *) lfirst(lc);
3633 Path *path;
3634 bool is_sorted;
3635 int presorted_keys;
3636
3637 is_sorted = pathkeys_count_contained_in(group_pathkeys,
3638 input_path->pathkeys,
3639 &presorted_keys);
3640
3641 /*
3642 * Ignore paths that are not suitably or partially sorted, unless
3643 * they are the cheapest partial path (no need to deal with paths
3644 * which have presorted keys when incremental sort is disabled).
3645 */
3646 if (!is_sorted && input_path != cheapest_partial_path &&
3647 (presorted_keys == 0 || !enable_incremental_sort))
3648 continue;
3649
3650 /*
3651 * Since the path originates from a non-grouped relation that is
3652 * not aware of eager aggregation, we must ensure that it provides
3653 * the correct input for partial aggregation.
3654 */
3655 path = (Path *) create_projection_path(root,
3656 grouped_rel,
3657 input_path,
3658 agg_info->agg_input);
3659
3660 if (!is_sorted)
3661 {
3662 /*
3663 * We've no need to consider both a sort and incremental sort.
3664 * We'll just do a sort if there are no presorted keys and an
3665 * incremental sort when there are presorted keys.
3666 */
3667 if (presorted_keys == 0 || !enable_incremental_sort)
3668 path = (Path *) create_sort_path(root,
3669 grouped_rel,
3670 path,
3671 group_pathkeys,
3672 -1.0);
3673 else
3675 grouped_rel,
3676 path,
3677 group_pathkeys,
3678 presorted_keys,
3679 -1.0);
3680 }
3681
3682 /*
3683 * qual is NIL because the HAVING clause cannot be evaluated until
3684 * the final value of the aggregate is known.
3685 */
3686 path = (Path *) create_agg_path(root,
3687 grouped_rel,
3688 path,
3689 agg_info->target,
3690 AGG_SORTED,
3692 agg_info->group_clauses,
3693 NIL,
3694 &agg_costs,
3695 dNumPartialGroups);
3696
3697 add_partial_path(grouped_rel, path);
3698 }
3699 }
3700
3701 /*
3702 * Add a partially-grouped HashAgg Path where possible
3703 */
3704 if (can_hash && cheapest_total_path != NULL)
3705 {
3706 Path *path;
3707
3708 /*
3709 * Since the path originates from a non-grouped relation that is not
3710 * aware of eager aggregation, we must ensure that it provides the
3711 * correct input for partial aggregation.
3712 */
3713 path = (Path *) create_projection_path(root,
3714 grouped_rel,
3715 cheapest_total_path,
3716 agg_info->agg_input);
3717
3718 /*
3719 * qual is NIL because the HAVING clause cannot be evaluated until the
3720 * final value of the aggregate is known.
3721 */
3722 path = (Path *) create_agg_path(root,
3723 grouped_rel,
3724 path,
3725 agg_info->target,
3726 AGG_HASHED,
3728 agg_info->group_clauses,
3729 NIL,
3730 &agg_costs,
3731 dNumGroups);
3732
3733 add_path(grouped_rel, path);
3734 }
3735
3736 /*
3737 * Now add a partially-grouped HashAgg partial Path where possible
3738 */
3739 if (can_hash && cheapest_partial_path != NULL)
3740 {
3741 Path *path;
3742
3743 /*
3744 * Since the path originates from a non-grouped relation that is not
3745 * aware of eager aggregation, we must ensure that it provides the
3746 * correct input for partial aggregation.
3747 */
3748 path = (Path *) create_projection_path(root,
3749 grouped_rel,
3750 cheapest_partial_path,
3751 agg_info->agg_input);
3752
3753 /*
3754 * qual is NIL because the HAVING clause cannot be evaluated until the
3755 * final value of the aggregate is known.
3756 */
3757 path = (Path *) create_agg_path(root,
3758 grouped_rel,
3759 path,
3760 agg_info->target,
3761 AGG_HASHED,
3763 agg_info->group_clauses,
3764 NIL,
3765 &agg_costs,
3766 dNumPartialGroups);
3767
3768 add_partial_path(grouped_rel, path);
3769 }
3770}
3771
3772/*
3773 * make_rel_from_joinlist
3774 * Build access paths using a "joinlist" to guide the join path search.
3775 *
3776 * See comments for deconstruct_jointree() for definition of the joinlist
3777 * data structure.
3778 */
3779static RelOptInfo *
3781{
3782 int levels_needed;
3783 List *initial_rels;
3784 ListCell *jl;
3785
3786 /*
3787 * Count the number of child joinlist nodes. This is the depth of the
3788 * dynamic-programming algorithm we must employ to consider all ways of
3789 * joining the child nodes.
3790 */
3791 levels_needed = list_length(joinlist);
3792
3793 if (levels_needed <= 0)
3794 return NULL; /* nothing to do? */
3795
3796 /*
3797 * Construct a list of rels corresponding to the child joinlist nodes.
3798 * This may contain both base rels and rels constructed according to
3799 * sub-joinlists.
3800 */
3801 initial_rels = NIL;
3802 foreach(jl, joinlist)
3803 {
3804 Node *jlnode = (Node *) lfirst(jl);
3805 RelOptInfo *thisrel;
3806
3807 if (IsA(jlnode, RangeTblRef))
3808 {
3809 int varno = ((RangeTblRef *) jlnode)->rtindex;
3810
3811 thisrel = find_base_rel(root, varno);
3812 }
3813 else if (IsA(jlnode, List))
3814 {
3815 /* Recurse to handle subproblem */
3816 thisrel = make_rel_from_joinlist(root, (List *) jlnode);
3817 }
3818 else
3819 {
3820 elog(ERROR, "unrecognized joinlist node type: %d",
3821 (int) nodeTag(jlnode));
3822 thisrel = NULL; /* keep compiler quiet */
3823 }
3824
3825 initial_rels = lappend(initial_rels, thisrel);
3826 }
3827
3828 if (levels_needed == 1)
3829 {
3830 /*
3831 * Single joinlist node, so we're done.
3832 */
3833 return (RelOptInfo *) linitial(initial_rels);
3834 }
3835 else
3836 {
3837 /*
3838 * Consider the different orders in which we could join the rels,
3839 * using a plugin, GEQO, or the regular join search code.
3840 *
3841 * We put the initial_rels list into a PlannerInfo field because
3842 * has_legal_joinclause() needs to look at it (ugly :-().
3843 */
3844 root->initial_rels = initial_rels;
3845
3846 if (join_search_hook)
3847 return (*join_search_hook) (root, levels_needed, initial_rels);
3848 else if (enable_geqo && levels_needed >= geqo_threshold)
3849 return geqo(root, levels_needed, initial_rels);
3850 else
3851 return standard_join_search(root, levels_needed, initial_rels);
3852 }
3853}
3854
3855/*
3856 * standard_join_search
3857 * Find possible joinpaths for a query by successively finding ways
3858 * to join component relations into join relations.
3859 *
3860 * 'levels_needed' is the number of iterations needed, ie, the number of
3861 * independent jointree items in the query. This is > 1.
3862 *
3863 * 'initial_rels' is a list of RelOptInfo nodes for each independent
3864 * jointree item. These are the components to be joined together.
3865 * Note that levels_needed == list_length(initial_rels).
3866 *
3867 * Returns the final level of join relations, i.e., the relation that is
3868 * the result of joining all the original relations together.
3869 * At least one implementation path must be provided for this relation and
3870 * all required sub-relations.
3871 *
3872 * To support loadable plugins that modify planner behavior by changing the
3873 * join searching algorithm, we provide a hook variable that lets a plugin
3874 * replace or supplement this function. Any such hook must return the same
3875 * final join relation as the standard code would, but it might have a
3876 * different set of implementation paths attached, and only the sub-joinrels
3877 * needed for these paths need have been instantiated.
3878 *
3879 * Note to plugin authors: the functions invoked during standard_join_search()
3880 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
3881 * than one join-order search, you'll probably need to save and restore the
3882 * original states of those data structures. See geqo_eval() for an example.
3883 */
3884RelOptInfo *
3885standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
3886{
3887 int lev;
3888 RelOptInfo *rel;
3889
3890 /*
3891 * This function cannot be invoked recursively within any one planning
3892 * problem, so join_rel_level[] can't be in use already.
3893 */
3894 Assert(root->join_rel_level == NULL);
3895
3896 /*
3897 * We employ a simple "dynamic programming" algorithm: we first find all
3898 * ways to build joins of two jointree items, then all ways to build joins
3899 * of three items (from two-item joins and single items), then four-item
3900 * joins, and so on until we have considered all ways to join all the
3901 * items into one rel.
3902 *
3903 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
3904 * set root->join_rel_level[1] to represent all the single-jointree-item
3905 * relations.
3906 */
3907 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
3908
3909 root->join_rel_level[1] = initial_rels;
3910
3911 for (lev = 2; lev <= levels_needed; lev++)
3912 {
3913 ListCell *lc;
3914
3915 /*
3916 * Determine all possible pairs of relations to be joined at this
3917 * level, and build paths for making each one from every available
3918 * pair of lower-level relations.
3919 */
3921
3922 /*
3923 * Run generate_partitionwise_join_paths() and
3924 * generate_useful_gather_paths() for each just-processed joinrel. We
3925 * could not do this earlier because both regular and partial paths
3926 * can get added to a particular joinrel at multiple times within
3927 * join_search_one_level.
3928 *
3929 * After that, we're done creating paths for the joinrel, so run
3930 * set_cheapest().
3931 *
3932 * In addition, we also run generate_grouped_paths() for the grouped
3933 * relation of each just-processed joinrel, and run set_cheapest() for
3934 * the grouped relation afterwards.
3935 */
3936 foreach(lc, root->join_rel_level[lev])
3937 {
3938 bool is_top_rel;
3939
3940 rel = (RelOptInfo *) lfirst(lc);
3941
3942 is_top_rel = bms_equal(rel->relids, root->all_query_rels);
3943
3944 /* Create paths for partitionwise joins. */
3946
3947 /*
3948 * Except for the topmost scan/join rel, consider gathering
3949 * partial paths. We'll do the same for the topmost scan/join rel
3950 * once we know the final targetlist (see grouping_planner's and
3951 * its call to apply_scanjoin_target_to_paths).
3952 */
3953 if (!is_top_rel)
3955
3956 /* Find and save the cheapest paths for this rel */
3957 set_cheapest(rel);
3958
3959 /*
3960 * Except for the topmost scan/join rel, consider generating
3961 * partial aggregation paths for the grouped relation on top of
3962 * the paths of this rel. After that, we're done creating paths
3963 * for the grouped relation, so run set_cheapest().
3964 */
3965 if (rel->grouped_rel != NULL && !is_top_rel)
3966 {
3967 RelOptInfo *grouped_rel = rel->grouped_rel;
3968
3969 Assert(IS_GROUPED_REL(grouped_rel));
3970
3971 generate_grouped_paths(root, grouped_rel, rel);
3972 set_cheapest(grouped_rel);
3973 }
3974
3975#ifdef OPTIMIZER_DEBUG
3976 pprint(rel);
3977#endif
3978 }
3979 }
3980
3981 /*
3982 * We should have a single rel at the final level.
3983 */
3984 if (root->join_rel_level[levels_needed] == NIL)
3985 elog(ERROR, "failed to build any %d-way joins", levels_needed);
3986 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
3987
3988 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
3989
3990 root->join_rel_level = NULL;
3991
3992 return rel;
3993}
3994
3995/*****************************************************************************
3996 * PUSHING QUALS DOWN INTO SUBQUERIES
3997 *****************************************************************************/
3998
3999/*
4000 * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
4001 *
4002 * subquery is the particular component query being checked. topquery
4003 * is the top component of a set-operations tree (the same Query if no
4004 * set-op is involved).
4005 *
4006 * Conditions checked here:
4007 *
4008 * 1. If the subquery has a LIMIT clause, we must not push down any quals,
4009 * since that could change the set of rows returned.
4010 *
4011 * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
4012 * quals into it, because that could change the results.
4013 *
4014 * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
4015 * This is because upper-level quals should semantically be evaluated only
4016 * once per distinct row, not once per original row, and if the qual is
4017 * volatile then extra evaluations could change the results. (This issue
4018 * does not apply to other forms of aggregation such as GROUP BY, because
4019 * when those are present we push into HAVING not WHERE, so that the quals
4020 * are still applied after aggregation.)
4021 *
4022 * 4. If the subquery contains window functions, we cannot push volatile quals
4023 * into it. The issue here is a bit different from DISTINCT: a volatile qual
4024 * might succeed for some rows of a window partition and fail for others,
4025 * thereby changing the partition contents and thus the window functions'
4026 * results for rows that remain.
4027 *
4028 * 5. If the subquery contains any set-returning functions in its targetlist,
4029 * we cannot push volatile quals into it. That would push them below the SRFs
4030 * and thereby change the number of times they are evaluated. Also, a
4031 * volatile qual could succeed for some SRF output rows and fail for others,
4032 * a behavior that cannot occur if it's evaluated before SRF expansion.
4033 *
4034 * 6. If the subquery has nonempty grouping sets, we cannot push down any
4035 * quals. The concern here is that a qual referencing a "constant" grouping
4036 * column could get constant-folded, which would be improper because the value
4037 * is potentially nullable by grouping-set expansion. This restriction could
4038 * be removed if we had a parsetree representation that shows that such
4039 * grouping columns are not really constant. (There are other ideas that
4040 * could be used to relax this restriction, but that's the approach most
4041 * likely to get taken in the future. Note that there's not much to be gained
4042 * so long as subquery_planner can't move HAVING clauses to WHERE within such
4043 * a subquery.)
4044 *
4045 * In addition, we make several checks on the subquery's output columns to see
4046 * if it is safe to reference them in pushed-down quals. If output column k
4047 * is found to be unsafe to reference, we set the reason for that inside
4048 * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
4049 * column k might not be referenced by some/all quals. The unsafeFlags[]
4050 * array will be consulted later by qual_is_pushdown_safe(). It's better to
4051 * do it this way than to make the checks directly in qual_is_pushdown_safe(),
4052 * because when the subquery involves set operations we have to check the
4053 * output expressions in each arm of the set op.
4054 *
4055 * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
4056 * we're effectively assuming that the quals cannot distinguish values that
4057 * the DISTINCT's equality operator sees as equal, yet there are many
4058 * counterexamples to that assumption. However use of such a qual with a
4059 * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
4060 * "equal" value will be chosen as the output value by the DISTINCT operation.
4061 * So we don't worry too much about that. Another objection is that if the
4062 * qual is expensive to evaluate, running it for each original row might cost
4063 * more than we save by eliminating rows before the DISTINCT step. But it
4064 * would be very hard to estimate that at this stage, and in practice pushdown
4065 * seldom seems to make things worse, so we ignore that problem too.
4066 *
4067 * Note: likewise, pushing quals into a subquery with window functions is a
4068 * bit dubious: the quals might remove some rows of a window partition while
4069 * leaving others, causing changes in the window functions' results for the
4070 * surviving rows. We insist that such a qual reference only partitioning
4071 * columns, but again that only protects us if the qual does not distinguish
4072 * values that the partitioning equality operator sees as equal. The risks
4073 * here are perhaps larger than for DISTINCT, since no de-duplication of rows
4074 * occurs and thus there is no theoretical problem with such a qual. But
4075 * we'll do this anyway because the potential performance benefits are very
4076 * large, and we've seen no field complaints about the longstanding comparable
4077 * behavior with DISTINCT.
4078 */
4079static bool
4081 pushdown_safety_info *safetyInfo)
4082{
4083 SetOperationStmt *topop;
4084
4085 /* Check point 1 */
4086 if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
4087 return false;
4088
4089 /* Check point 6 */
4090 if (subquery->groupClause && subquery->groupingSets)
4091 return false;
4092
4093 /* Check points 3, 4, and 5 */
4094 if (subquery->distinctClause ||
4095 subquery->hasWindowFuncs ||
4096 subquery->hasTargetSRFs)
4097 safetyInfo->unsafeVolatile = true;
4098
4099 /*
4100 * If we're at a leaf query, check for unsafe expressions in its target
4101 * list, and mark any reasons why they're unsafe in unsafeFlags[].
4102 * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
4103 * so no need to check them.)
4104 */
4105 if (subquery->setOperations == NULL)
4106 check_output_expressions(subquery, safetyInfo);
4107
4108 /* Are we at top level, or looking at a setop component? */
4109 if (subquery == topquery)
4110 {
4111 /* Top level, so check any component queries */
4112 if (subquery->setOperations != NULL)
4113 if (!recurse_pushdown_safe(subquery->setOperations, topquery,
4114 safetyInfo))
4115 return false;
4116 }
4117 else
4118 {
4119 /* Setop component must not have more components (too weird) */
4120 if (subquery->setOperations != NULL)
4121 return false;
4122 /* Check whether setop component output types match top level */
4123 topop = castNode(SetOperationStmt, topquery->setOperations);
4124 Assert(topop);
4126 topop->colTypes,
4127 safetyInfo);
4128 }
4129 return true;
4130}
4131
4132/*
4133 * Helper routine to recurse through setOperations tree
4134 */
4135static bool
4137 pushdown_safety_info *safetyInfo)
4138{
4139 if (IsA(setOp, RangeTblRef))
4140 {
4141 RangeTblRef *rtr = (RangeTblRef *) setOp;
4142 RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
4143 Query *subquery = rte->subquery;
4144
4145 Assert(subquery != NULL);
4146 return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
4147 }
4148 else if (IsA(setOp, SetOperationStmt))
4149 {
4150 SetOperationStmt *op = (SetOperationStmt *) setOp;
4151
4152 /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
4153 if (op->op == SETOP_EXCEPT)
4154 return false;
4155 /* Else recurse */
4156 if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
4157 return false;
4158 if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
4159 return false;
4160 }
4161 else
4162 {
4163 elog(ERROR, "unrecognized node type: %d",
4164 (int) nodeTag(setOp));
4165 }
4166 return true;
4167}
4168
4169/*
4170 * check_output_expressions - check subquery's output expressions for safety
4171 *
4172 * There are several cases in which it's unsafe to push down an upper-level
4173 * qual if it references a particular output column of a subquery. We check
4174 * each output column of the subquery and set flags in unsafeFlags[k] when we
4175 * see that column is unsafe for a pushed-down qual to reference. The
4176 * conditions checked here are:
4177 *
4178 * 1. We must not push down any quals that refer to subselect outputs that
4179 * return sets, else we'd introduce functions-returning-sets into the
4180 * subquery's WHERE/HAVING quals.
4181 *
4182 * 2. We must not push down any quals that refer to subselect outputs that
4183 * contain volatile functions, for fear of introducing strange results due
4184 * to multiple evaluation of a volatile function.
4185 *
4186 * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
4187 * refer to non-DISTINCT output columns, because that could change the set
4188 * of rows returned. (This condition is vacuous for DISTINCT, because then
4189 * there are no non-DISTINCT output columns, so we needn't check. Note that
4190 * subquery_is_pushdown_safe already reported that we can't use volatile
4191 * quals if there's DISTINCT or DISTINCT ON.)
4192 *
4193 * 4. If the subquery has any window functions, we must not push down quals
4194 * that reference any output columns that are not listed in all the subquery's
4195 * window PARTITION BY clauses. We can push down quals that use only
4196 * partitioning columns because they should succeed or fail identically for
4197 * every row of any one window partition, and totally excluding some
4198 * partitions will not change a window function's results for remaining
4199 * partitions. (Again, this also requires nonvolatile quals, but
4200 * subquery_is_pushdown_safe handles that.). Subquery columns marked as
4201 * unsafe for this reason can still have WindowClause run conditions pushed
4202 * down.
4203 */
4204static void
4206{
4207 ListCell *lc;
4208
4209 foreach(lc, subquery->targetList)
4210 {
4211 TargetEntry *tle = (TargetEntry *) lfirst(lc);
4212
4213 if (tle->resjunk)
4214 continue; /* ignore resjunk columns */
4215
4216 /* Functions returning sets are unsafe (point 1) */
4217 if (subquery->hasTargetSRFs &&
4218 (safetyInfo->unsafeFlags[tle->resno] &
4219 UNSAFE_HAS_SET_FUNC) == 0 &&
4221 {
4222 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
4223 continue;
4224 }
4225
4226 /* Volatile functions are unsafe (point 2) */
4227 if ((safetyInfo->unsafeFlags[tle->resno] &
4230 {
4231 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
4232 continue;
4233 }
4234
4235 /* If subquery uses DISTINCT ON, check point 3 */
4236 if (subquery->hasDistinctOn &&
4237 (safetyInfo->unsafeFlags[tle->resno] &
4240 {
4241 /* non-DISTINCT column, so mark it unsafe */
4243 continue;
4244 }
4245
4246 /* If subquery uses window functions, check point 4 */
4247 if (subquery->hasWindowFuncs &&
4248 (safetyInfo->unsafeFlags[tle->resno] &
4250 !targetIsInAllPartitionLists(tle, subquery))
4251 {
4252 /* not present in all PARTITION BY clauses, so mark it unsafe */
4254 continue;
4255 }
4256 }
4257}
4258
4259/*
4260 * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
4261 * push quals into each component query, but the quals can only reference
4262 * subquery columns that suffer no type coercions in the set operation.
4263 * Otherwise there are possible semantic gotchas. So, we check the
4264 * component queries to see if any of them have output types different from
4265 * the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
4266 * unsafeFlags[k] if column k has different type in any component.
4267 *
4268 * We don't have to care about typmods here: the only allowed difference
4269 * between set-op input and output typmods is input is a specific typmod
4270 * and output is -1, and that does not require a coercion.
4271 *
4272 * tlist is a subquery tlist.
4273 * colTypes is an OID list of the top-level setop's output column types.
4274 * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
4275 */
4276static void
4278 pushdown_safety_info *safetyInfo)
4279{
4280 ListCell *l;
4281 ListCell *colType = list_head(colTypes);
4282
4283 foreach(l, tlist)
4284 {
4285 TargetEntry *tle = (TargetEntry *) lfirst(l);
4286
4287 if (tle->resjunk)
4288 continue; /* ignore resjunk columns */
4289 if (colType == NULL)
4290 elog(ERROR, "wrong number of tlist entries");
4291 if (exprType((Node *) tle->expr) != lfirst_oid(colType))
4292 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
4293 colType = lnext(colTypes, colType);
4294 }
4295 if (colType != NULL)
4296 elog(ERROR, "wrong number of tlist entries");
4297}
4298
4299/*
4300 * targetIsInAllPartitionLists
4301 * True if the TargetEntry is listed in the PARTITION BY clause
4302 * of every window defined in the query.
4303 *
4304 * It would be safe to ignore windows not actually used by any window
4305 * function, but it's not easy to get that info at this stage; and it's
4306 * unlikely to be useful to spend any extra cycles getting it, since
4307 * unreferenced window definitions are probably infrequent in practice.
4308 */
4309static bool
4311{
4312 ListCell *lc;
4313
4314 foreach(lc, query->windowClause)
4315 {
4316 WindowClause *wc = (WindowClause *) lfirst(lc);
4317
4319 return false;
4320 }
4321 return true;
4322}
4323
4324/*
4325 * qual_is_pushdown_safe - is a particular rinfo safe to push down?
4326 *
4327 * rinfo is a restriction clause applying to the given subquery (whose RTE
4328 * has index rti in the parent query).
4329 *
4330 * Conditions checked here:
4331 *
4332 * 1. rinfo's clause must not contain any SubPlans (mainly because it's
4333 * unclear that it will work correctly: SubLinks will already have been
4334 * transformed into SubPlans in the qual, but not in the subquery). Note that
4335 * SubLinks that transform to initplans are safe, and will be accepted here
4336 * because what we'll see in the qual is just a Param referencing the initplan
4337 * output.
4338 *
4339 * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
4340 * functions.
4341 *
4342 * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
4343 * functions that are passed Var nodes, and therefore might reveal values from
4344 * the subquery as side effects.
4345 *
4346 * 4. rinfo's clause must not refer to the whole-row output of the subquery
4347 * (since there is no easy way to name that within the subquery itself).
4348 *
4349 * 5. rinfo's clause must not refer to any subquery output columns that were
4350 * found to be unsafe to reference by subquery_is_pushdown_safe().
4351 */
4352static pushdown_safe_type
4354 pushdown_safety_info *safetyInfo)
4355{
4357 Node *qual = (Node *) rinfo->clause;
4358 List *vars;
4359 ListCell *vl;
4360
4361 /* Refuse subselects (point 1) */
4362 if (contain_subplans(qual))
4363 return PUSHDOWN_UNSAFE;
4364
4365 /* Refuse volatile quals if we found they'd be unsafe (point 2) */
4366 if (safetyInfo->unsafeVolatile &&
4368 return PUSHDOWN_UNSAFE;
4369
4370 /* Refuse leaky quals if told to (point 3) */
4371 if (safetyInfo->unsafeLeaky &&
4372 contain_leaked_vars(qual))
4373 return PUSHDOWN_UNSAFE;
4374
4375 /*
4376 * Examine all Vars used in clause. Since it's a restriction clause, all
4377 * such Vars must refer to subselect output columns ... unless this is
4378 * part of a LATERAL subquery, in which case there could be lateral
4379 * references.
4380 *
4381 * By omitting the relevant flags, this also gives us a cheap sanity check
4382 * that no aggregates or window functions appear in the qual. Those would
4383 * be unsafe to push down, but at least for the moment we could never see
4384 * any in a qual anyhow.
4385 */
4387 foreach(vl, vars)
4388 {
4389 Var *var = (Var *) lfirst(vl);
4390
4391 /*
4392 * XXX Punt if we find any PlaceHolderVars in the restriction clause.
4393 * It's not clear whether a PHV could safely be pushed down, and even
4394 * less clear whether such a situation could arise in any cases of
4395 * practical interest anyway. So for the moment, just refuse to push
4396 * down.
4397 */
4398 if (!IsA(var, Var))
4399 {
4400 safe = PUSHDOWN_UNSAFE;
4401 break;
4402 }
4403
4404 /*
4405 * Punt if we find any lateral references. It would be safe to push
4406 * these down, but we'd have to convert them into outer references,
4407 * which subquery_push_qual lacks the infrastructure to do. The case
4408 * arises so seldom that it doesn't seem worth working hard on.
4409 */
4410 if (var->varno != rti)
4411 {
4412 safe = PUSHDOWN_UNSAFE;
4413 break;
4414 }
4415
4416 /* Subqueries have no system columns */
4417 Assert(var->varattno >= 0);
4418
4419 /* Check point 4 */
4420 if (var->varattno == 0)
4421 {
4422 safe = PUSHDOWN_UNSAFE;
4423 break;
4424 }
4425
4426 /* Check point 5 */
4427 if (safetyInfo->unsafeFlags[var->varattno] != 0)
4428 {
4429 if (safetyInfo->unsafeFlags[var->varattno] &
4432 {
4433 safe = PUSHDOWN_UNSAFE;
4434 break;
4435 }
4436 else
4437 {
4438 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
4440 /* don't break, we might find another Var that's unsafe */
4441 }
4442 }
4443 }
4444
4445 list_free(vars);
4446
4447 return safe;
4448}
4449
4450/*
4451 * subquery_push_qual - push down a qual that we have determined is safe
4452 */
4453static void
4455{
4456 if (subquery->setOperations != NULL)
4457 {
4458 /* Recurse to push it separately to each component query */
4459 recurse_push_qual(subquery->setOperations, subquery,
4460 rte, rti, qual);
4461 }
4462 else
4463 {
4464 /*
4465 * We need to replace Vars in the qual (which must refer to outputs of
4466 * the subquery) with copies of the subquery's targetlist expressions.
4467 * Note that at this point, any uplevel Vars in the qual should have
4468 * been replaced with Params, so they need no work.
4469 *
4470 * This step also ensures that when we are pushing into a setop tree,
4471 * each component query gets its own copy of the qual.
4472 */
4473 qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
4474 subquery->targetList,
4475 subquery->resultRelation,
4477 &subquery->hasSubLinks);
4478
4479 /*
4480 * Now attach the qual to the proper place: normally WHERE, but if the
4481 * subquery uses grouping or aggregation, put it in HAVING (since the
4482 * qual really refers to the group-result rows).
4483 */
4484 if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
4485 subquery->havingQual = make_and_qual(subquery->havingQual, qual);
4486 else
4487 subquery->jointree->quals =
4488 make_and_qual(subquery->jointree->quals, qual);
4489
4490 /*
4491 * We need not change the subquery's hasAggs or hasSubLinks flags,
4492 * since we can't be pushing down any aggregates that weren't there
4493 * before, and we don't push down subselects at all.
4494 */
4495 }
4496}
4497
4498/*
4499 * Helper routine to recurse through setOperations tree
4500 */
4501static void
4502recurse_push_qual(Node *setOp, Query *topquery,
4503 RangeTblEntry *rte, Index rti, Node *qual)
4504{
4505 if (IsA(setOp, RangeTblRef))
4506 {
4507 RangeTblRef *rtr = (RangeTblRef *) setOp;
4508 RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
4509 Query *subquery = subrte->subquery;
4510
4511 Assert(subquery != NULL);
4512 subquery_push_qual(subquery, rte, rti, qual);
4513 }
4514 else if (IsA(setOp, SetOperationStmt))
4515 {
4516 SetOperationStmt *op = (SetOperationStmt *) setOp;
4517
4518 recurse_push_qual(op->larg, topquery, rte, rti, qual);
4519 recurse_push_qual(op->rarg, topquery, rte, rti, qual);
4520 }
4521 else
4522 {
4523 elog(ERROR, "unrecognized node type: %d",
4524 (int) nodeTag(setOp));
4525 }
4526}
4527
4528/*****************************************************************************
4529 * SIMPLIFYING SUBQUERY TARGETLISTS
4530 *****************************************************************************/
4531
4532/*
4533 * remove_unused_subquery_outputs
4534 * Remove subquery targetlist items we don't need
4535 *
4536 * It's possible, even likely, that the upper query does not read all the
4537 * output columns of the subquery. We can remove any such outputs that are
4538 * not needed by the subquery itself (e.g., as sort/group columns) and do not
4539 * affect semantics otherwise (e.g., volatile functions can't be removed).
4540 * This is useful not only because we might be able to remove expensive-to-
4541 * compute expressions, but because deletion of output columns might allow
4542 * optimizations such as join removal to occur within the subquery.
4543 *
4544 * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
4545 * FirstLowInvalidHeapAttributeNumber) that we should not remove. This
4546 * parameter is modified by the function, so callers must make a copy if they
4547 * need to use the passed in Bitmapset after calling this function.
4548 *
4549 * To avoid affecting column numbering in the targetlist, we don't physically
4550 * remove unused tlist entries, but rather replace their expressions with NULL
4551 * constants. This is implemented by modifying subquery->targetList.
4552 */
4553static void
4555 Bitmapset *extra_used_attrs)
4556{
4557 Bitmapset *attrs_used;
4558 ListCell *lc;
4559
4560 /*
4561 * Just point directly to extra_used_attrs. No need to bms_copy as none of
4562 * the current callers use the Bitmapset after calling this function.
4563 */
4564 attrs_used = extra_used_attrs;
4565
4566 /*
4567 * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
4568 * could update all the child SELECTs' tlists, but it seems not worth the
4569 * trouble presently.
4570 */
4571 if (subquery->setOperations)
4572 return;
4573
4574 /*
4575 * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
4576 * time: all its output columns must be used in the distinctClause.
4577 */
4578 if (subquery->distinctClause && !subquery->hasDistinctOn)
4579 return;
4580
4581 /*
4582 * Collect a bitmap of all the output column numbers used by the upper
4583 * query.
4584 *
4585 * Add all the attributes needed for joins or final output. Note: we must
4586 * look at rel's targetlist, not the attr_needed data, because attr_needed
4587 * isn't computed for inheritance child rels, cf set_append_rel_size().
4588 * (XXX might be worth changing that sometime.)
4589 */
4590 pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
4591
4592 /* Add all the attributes used by un-pushed-down restriction clauses. */
4593 foreach(lc, rel->baserestrictinfo)
4594 {
4595 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4596
4597 pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
4598 }
4599
4600 /*
4601 * If there's a whole-row reference to the subquery, we can't remove
4602 * anything.
4603 */
4605 return;
4606
4607 /*
4608 * Run through the tlist and zap entries we don't need. It's okay to
4609 * modify the tlist items in-place because set_subquery_pathlist made a
4610 * copy of the subquery.
4611 */
4612 foreach(lc, subquery->targetList)
4613 {
4614 TargetEntry *tle = (TargetEntry *) lfirst(lc);
4615 Node *texpr = (Node *) tle->expr;
4616
4617 /*
4618 * If it has a sortgroupref number, it's used in some sort/group
4619 * clause so we'd better not remove it. Also, don't remove any
4620 * resjunk columns, since their reason for being has nothing to do
4621 * with anybody reading the subquery's output. (It's likely that
4622 * resjunk columns in a sub-SELECT would always have ressortgroupref
4623 * set, but even if they don't, it seems imprudent to remove them.)
4624 */
4625 if (tle->ressortgroupref || tle->resjunk)
4626 continue;
4627
4628 /*
4629 * If it's used by the upper query, we can't remove it.
4630 */
4632 attrs_used))
4633 continue;
4634
4635 /*
4636 * If it contains a set-returning function, we can't remove it since
4637 * that could change the number of rows returned by the subquery.
4638 */
4639 if (subquery->hasTargetSRFs &&
4641 continue;
4642
4643 /*
4644 * If it contains volatile functions, we daren't remove it for fear
4645 * that the user is expecting their side-effects to happen.
4646 */
4647 if (contain_volatile_functions(texpr))
4648 continue;
4649
4650 /*
4651 * OK, we don't need it. Replace the expression with a NULL constant.
4652 * Preserve the exposed type of the expression, in case something
4653 * looks at the rowtype of the subquery's result.
4654 */
4655 tle->expr = (Expr *) makeNullConst(exprType(texpr),
4656 exprTypmod(texpr),
4657 exprCollation(texpr));
4658 }
4659}
4660
4661/*
4662 * create_partial_bitmap_paths
4663 * Build partial bitmap heap path for the relation
4664 */
4665void
4667 Path *bitmapqual)
4668{
4669 int parallel_workers;
4670 double pages_fetched;
4671
4672 /* Compute heap pages for bitmap heap scan */
4673 pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
4674 NULL, NULL);
4675
4676 parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
4678
4679 if (parallel_workers <= 0)
4680 return;
4681
4683 bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
4684}
4685
4686/*
4687 * Compute the number of parallel workers that should be used to scan a
4688 * relation. We compute the parallel workers based on the size of the heap to
4689 * be scanned and the size of the index to be scanned, then choose a minimum
4690 * of those.
4691 *
4692 * "heap_pages" is the number of pages from the table that we expect to scan, or
4693 * -1 if we don't expect to scan any.
4694 *
4695 * "index_pages" is the number of pages from the index that we expect to scan, or
4696 * -1 if we don't expect to scan any.
4697 *
4698 * "max_workers" is caller's limit on the number of workers. This typically
4699 * comes from a GUC.
4700 */
4701int
4702compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
4703 int max_workers)
4704{
4705 int parallel_workers = 0;
4706
4707 /*
4708 * If the user has set the parallel_workers reloption, use that; otherwise
4709 * select a default number of workers.
4710 */
4711 if (rel->rel_parallel_workers != -1)
4712 parallel_workers = rel->rel_parallel_workers;
4713 else
4714 {
4715 /*
4716 * If the number of pages being scanned is insufficient to justify a
4717 * parallel scan, just return zero ... unless it's an inheritance
4718 * child. In that case, we want to generate a parallel path here
4719 * anyway. It might not be worthwhile just for this relation, but
4720 * when combined with all of its inheritance siblings it may well pay
4721 * off.
4722 */
4723 if (rel->reloptkind == RELOPT_BASEREL &&
4724 ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
4725 (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
4726 return 0;
4727
4728 if (heap_pages >= 0)
4729 {
4730 int heap_parallel_threshold;
4731 int heap_parallel_workers = 1;
4732
4733 /*
4734 * Select the number of workers based on the log of the size of
4735 * the relation. This probably needs to be a good deal more
4736 * sophisticated, but we need something here for now. Note that
4737 * the upper limit of the min_parallel_table_scan_size GUC is
4738 * chosen to prevent overflow here.
4739 */
4740 heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
4741 while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
4742 {
4743 heap_parallel_workers++;
4744 heap_parallel_threshold *= 3;
4745 if (heap_parallel_threshold > INT_MAX / 3)
4746 break; /* avoid overflow */
4747 }
4748
4749 parallel_workers = heap_parallel_workers;
4750 }
4751
4752 if (index_pages >= 0)
4753 {
4754 int index_parallel_workers = 1;
4755 int index_parallel_threshold;
4756
4757 /* same calculation as for heap_pages above */
4758 index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
4759 while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
4760 {
4761 index_parallel_workers++;
4762 index_parallel_threshold *= 3;
4763 if (index_parallel_threshold > INT_MAX / 3)
4764 break; /* avoid overflow */
4765 }
4766
4767 if (parallel_workers > 0)
4768 parallel_workers = Min(parallel_workers, index_parallel_workers);
4769 else
4770 parallel_workers = index_parallel_workers;
4771 }
4772 }
4773
4774 /* In no case use more than caller supplied maximum number of workers */
4775 parallel_workers = Min(parallel_workers, max_workers);
4776
4777 return parallel_workers;
4778}
4779
4780/*
4781 * generate_partitionwise_join_paths
4782 * Create paths representing partitionwise join for given partitioned
4783 * join relation.
4784 *
4785 * This must not be called until after we are done adding paths for all
4786 * child-joins. Otherwise, add_path might delete a path to which some path
4787 * generated here has a reference.
4788 */
4789void
4791{
4792 List *live_children = NIL;
4793 int cnt_parts;
4794 int num_parts;
4795 RelOptInfo **part_rels;
4796
4797 /* Handle only join relations here. */
4798 if (!IS_JOIN_REL(rel))
4799 return;
4800
4801 /* We've nothing to do if the relation is not partitioned. */
4802 if (!IS_PARTITIONED_REL(rel))
4803 return;
4804
4805 /* The relation should have consider_partitionwise_join set. */
4807
4808 /* Guard against stack overflow due to overly deep partition hierarchy. */
4810
4811 num_parts = rel->nparts;
4812 part_rels = rel->part_rels;
4813
4814 /* Collect non-dummy child-joins. */
4815 for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
4816 {
4817 RelOptInfo *child_rel = part_rels[cnt_parts];
4818
4819 /* If it's been pruned entirely, it's certainly dummy. */
4820 if (child_rel == NULL)
4821 continue;
4822
4823 /* Make partitionwise join paths for this partitioned child-join. */
4825
4826 /* If we failed to make any path for this child, we must give up. */
4827 if (child_rel->pathlist == NIL)
4828 {
4829 /*
4830 * Mark the parent joinrel as unpartitioned so that later
4831 * functions treat it correctly.
4832 */
4833 rel->nparts = 0;
4834 return;
4835 }
4836
4837 /* Else, identify the cheapest path for it. */
4838 set_cheapest(child_rel);
4839
4840 /* Dummy children need not be scanned, so ignore those. */
4841 if (IS_DUMMY_REL(child_rel))
4842 continue;
4843
4844 /*
4845 * Except for the topmost scan/join rel, consider generating partial
4846 * aggregation paths for the grouped relation on top of the paths of
4847 * this partitioned child-join. After that, we're done creating paths
4848 * for the grouped relation, so run set_cheapest().
4849 */
4850 if (child_rel->grouped_rel != NULL &&
4851 !bms_equal(IS_OTHER_REL(rel) ?
4852 rel->top_parent_relids : rel->relids,
4853 root->all_query_rels))
4854 {
4855 RelOptInfo *grouped_rel = child_rel->grouped_rel;
4856
4857 Assert(IS_GROUPED_REL(grouped_rel));
4858
4859 generate_grouped_paths(root, grouped_rel, child_rel);
4860 set_cheapest(grouped_rel);
4861 }
4862
4863#ifdef OPTIMIZER_DEBUG
4864 pprint(child_rel);
4865#endif
4866
4867 live_children = lappend(live_children, child_rel);
4868 }
4869
4870 /* If all child-joins are dummy, parent join is also dummy. */
4871 if (!live_children)
4872 {
4873 mark_dummy_rel(rel);
4874 return;
4875 }
4876
4877 /* Build additional paths for this rel from child-join paths. */
4878 add_paths_to_append_rel(root, rel, live_children);
4879 list_free(live_children);
4880}
static void set_base_rel_sizes(PlannerInfo *root)
Definition: allpaths.c:302
static List * get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel, bool require_parallel_safe)
Definition: allpaths.c:3257
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:2616
#define UNSAFE_TYPE_MISMATCH
Definition: allpaths.c:59
static Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: allpaths.c:2138
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:3075
static void subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:4454
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:4790
void generate_grouped_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *rel)
Definition: allpaths.c:3442
static void set_base_rel_consider_startup(PlannerInfo *root)
Definition: allpaths.c:259
#define UNSAFE_HAS_VOLATILE_FUNC
Definition: allpaths.c:55
#define UNSAFE_NOTIN_DISTINCTON_CLAUSE
Definition: allpaths.c:57
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:514
static Path * get_singleton_append_subpath(Path *path)
Definition: allpaths.c:2271
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:880
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:920
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:992
static void set_base_rel_pathlists(PlannerInfo *root)
Definition: allpaths.c:378
RelOptInfo * standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: allpaths.c:3885
int geqo_threshold
Definition: allpaths.c:83
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:1305
static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:4353
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:3102
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4702
static bool check_and_push_window_quals(Query *subquery, Node *clause, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2543
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3188
static void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition: allpaths.c:2305
static void compare_tlist_datatypes(List *tlist, List *colTypes, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:4277
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:3129
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
Definition: allpaths.c:4310
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:860
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:4080
join_search_hook_type join_search_hook
Definition: allpaths.c:92
bool enable_geqo
Definition: allpaths.c:81
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3325
static RelOptInfo * make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:3780
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2885
static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:4502
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:822
static void setup_simple_grouped_rels(PlannerInfo *root)
Definition: allpaths.c:344
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel, Bitmapset *extra_used_attrs)
Definition: allpaths.c:4554
static void check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:4205
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2972
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:968
set_rel_pathlist_hook_type set_rel_pathlist_hook
Definition: allpaths.c:89
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2952
struct pushdown_safety_info pushdown_safety_info
RelOptInfo * make_one_rel(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:177
#define UNSAFE_HAS_SET_FUNC
Definition: allpaths.c:56
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2996
bool enable_eager_aggregate
Definition: allpaths.c:82
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:643
static void set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:1368
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:1010
void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual)
Definition: allpaths.c:4666
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:626
double min_eager_agg_group_size
Definition: allpaths.c:84
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1404
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:405
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, List *live_childrels, List *all_child_pathkeys)
Definition: allpaths.c:1832
#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE
Definition: allpaths.c:58
static bool find_window_run_conditions(Query *subquery, AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left, bool *keep_original, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2353
static bool recurse_pushdown_safe(Node *setOp, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:4136
pushdown_safe_type
Definition: allpaths.c:73
@ PUSHDOWN_WINDOWCLAUSE_RUNCOND
Definition: allpaths.c:76
@ PUSHDOWN_UNSAFE
Definition: allpaths.c:74
@ PUSHDOWN_SAFE
Definition: allpaths.c:75
static void accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
Definition: allpaths.c:2226
int min_parallel_index_scan_size
Definition: allpaths.c:86
int min_parallel_table_scan_size
Definition: allpaths.c:85
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
void pprint(const void *obj)
Definition: print.c:54
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582
bool bms_get_singleton_member(const Bitmapset *a, int *member)
Definition: bitmapset.c:715
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_SINGLETON
Definition: bitmapset.h:72
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:1007
#define Max(x, y)
Definition: c.h:1001
int32_t int32
Definition: c.h:538
unsigned int Index
Definition: c.h:623
#define MemSet(start, val, len)
Definition: c.h:1023
#define OidIsValid(objectId)
Definition: c.h:778
bool is_pseudo_constant_clause(Node *clause)
Definition: clauses.c:2095
bool contain_leaked_vars(Node *clause)
Definition: clauses.c:1269
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:757
bool contain_subplans(Node *clause)
Definition: clauses.c:334
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:542
CompareType
Definition: cmptype.h:32
@ COMPARE_LE
Definition: cmptype.h:35
@ COMPARE_GT
Definition: cmptype.h:38
@ COMPARE_EQ
Definition: cmptype.h:36
@ COMPARE_GE
Definition: cmptype.h:37
@ COMPARE_LT
Definition: cmptype.h:34
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6122
int max_parallel_workers_per_gather
Definition: costsize.c:143
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5358
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5992
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:6084
double compute_gather_rows(Path *path)
Definition: costsize.c:6634
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6155
bool enable_partitionwise_join
Definition: costsize.c:159
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition: costsize.c:6523
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5912
bool enable_parallel_append
Definition: costsize.c:161
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6184
double clamp_row_est(double nrows)
Definition: costsize.c:213
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6030
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6052
bool enable_incremental_sort
Definition: costsize.c:151
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void add_child_rel_equivalences(PlannerInfo *root, AppendRelInfo *appinfo, RelOptInfo *parent_rel, RelOptInfo *child_rel)
Definition: equivclass.c:2833
bool relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel, EquivalenceClass *ec, bool require_parallel_safe)
Definition: equivclass.c:1077
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
RelOptInfo * geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Definition: geqo_main.c:74
Assert(PointerIsAligned(start, uint64))
void check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:3967
void create_index_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:240
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void join_search_one_level(PlannerInfo *root, int level)
Definition: joinrels.c:78
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1513
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_copy_tail(const List *oldlist, int nskip)
Definition: list.c:1613
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
void list_free(List *list)
Definition: list.c:1546
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
char get_rel_persistence(Oid relid)
Definition: lsyscache.c:2245
char func_parallel(Oid funcid)
Definition: lsyscache.c:1966
Oid get_opfamily_member_for_cmptype(Oid opfamily, Oid lefttype, Oid righttype, CompareType cmptype)
Definition: lsyscache.c:197
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:2025
bool func_strict(Oid funcid)
Definition: lsyscache.c:1928
List * get_op_index_interpretation(Oid opno)
Definition: lsyscache.c:673
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2745
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
Const * makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
Definition: makefuncs.c:388
Node * make_and_qual(Node *qual1, Node *qual2)
Definition: makefuncs.c:780
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:301
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
void set_opfuncid(OpExpr *opexpr)
Definition: nodeFuncs.c:1868
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:232
#define nodeTag(nodeptr)
Definition: nodes.h:139
@ AGG_SORTED
Definition: nodes.h:365
@ AGG_HASHED
Definition: nodes.h:366
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:389
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
@ JOIN_SEMI
Definition: nodes.h:317
@ JOIN_ANTI
Definition: nodes.h:318
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:189
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
@ SETOP_EXCEPT
Definition: parsenodes.h:2179
@ RTE_JOIN
Definition: parsenodes.h:1045
@ RTE_CTE
Definition: parsenodes.h:1049
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1050
@ RTE_VALUES
Definition: parsenodes.h:1048
@ RTE_SUBQUERY
Definition: parsenodes.h:1044
@ RTE_RESULT
Definition: parsenodes.h:1051
@ RTE_FUNCTION
Definition: parsenodes.h:1046
@ RTE_TABLEFUNC
Definition: parsenodes.h:1047
@ RTE_GROUP
Definition: parsenodes.h:1054
@ RTE_RELATION
Definition: parsenodes.h:1043
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
bool partitions_are_ordered(PartitionBoundInfo boundinfo, Bitmapset *live_parts)
Definition: partbounds.c:2853
Path * get_cheapest_fractional_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, double fraction)
Definition: pathkeys.c:666
Path * get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, CostSelector cost_criterion, bool require_parallel_safe)
Definition: pathkeys.c:620
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2291
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1336
List * build_expression_pathkey(PlannerInfo *root, Expr *expr, Oid opno, Relids rel, bool create_it)
Definition: pathkeys.c:1000
List * build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel, ScanDirection scandir, bool *partialkeys)
Definition: pathkeys.c:919
List * convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *subquery_pathkeys, List *subquery_tlist)
Definition: pathkeys.c:1054
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
Path * get_cheapest_parallel_safe_total_inner(List *paths)
Definition: pathkeys.c:699
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1876
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1928
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2032
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2524
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1750
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:270
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1980
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1846
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2792
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1902
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2841
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1802
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1657
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:70
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2006
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1954
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2994
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1471
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3853
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:895
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2193
#define IS_JOIN_REL(rel)
Definition: pathnodes.h:900
@ TOTAL_COST
Definition: pathnodes.h:38
@ STARTUP_COST
Definition: pathnodes.h:38
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1135
#define IS_GROUPED_REL(rel)
Definition: pathnodes.h:1161
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1916
Bitmapset * Relids
Definition: pathnodes.h:30
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ RELOPT_BASEREL
Definition: pathnodes.h:883
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:885
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:910
void(* set_rel_pathlist_hook_type)(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: paths.h:32
RelOptInfo *(* join_search_hook_type)(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: paths.h:48
@ PATHKEYS_EQUAL
Definition: paths.h:212
void * arg
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define lsecond(l)
Definition: pg_list.h:183
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_nth_int(const List *list, int n)
Definition: pg_list.h:310
bool relation_excluded_by_constraints(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: plancat.c:1705
char * choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
Definition: planner.c:8961
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:693
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6601
bool limit_needed(Query *parse)
Definition: planner.c:2790
@ MONOTONICFUNC_NONE
Definition: plannodes.h:1818
@ MONOTONICFUNC_DECREASING
Definition: plannodes.h:1820
@ MONOTONICFUNC_INCREASING
Definition: plannodes.h:1819
@ MONOTONICFUNC_BOTH
Definition: plannodes.h:1821
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:529
RelOptInfo * build_simple_grouped_rel(PlannerInfo *root, RelOptInfo *rel)
Definition: relnode.c:433
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1581
Node * ReplaceVarsFromTargetList(Node *node, int target_varno, int sublevels_up, RangeTblEntry *target_rte, List *targetlist, int result_relation, ReplaceVarsNoMatchOption nomatch_option, int nomatch_varno, bool *outer_hasSubLinks)
@ REPLACEVARS_REPORT_ERROR
Definition: rewriteManip.h:39
@ BackwardScanDirection
Definition: sdir.h:26
@ ForwardScanDirection
Definition: sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3456
void check_stack_depth(void)
Definition: stack_depth.c:95
int first_partial_path
Definition: pathnodes.h:2181
List * subpaths
Definition: pathnodes.h:2179
Index child_relid
Definition: pathnodes.h:3192
Index parent_relid
Definition: pathnodes.h:3191
Node * quals
Definition: primnodes.h:2358
Definition: pg_list.h:54
Definition: nodes.h:135
Oid opno
Definition: primnodes.h:850
List * args
Definition: primnodes.h:868
CompareType cmptype
Definition: lsyscache.h:28
List * exprs
Definition: pathnodes.h:1779
List * pathkeys
Definition: pathnodes.h:1912
Cardinality rows
Definition: pathnodes.h:1906
int parallel_workers
Definition: pathnodes.h:1903
Cost total_cost
Definition: pathnodes.h:1909
bool parallel_aware
Definition: pathnodes.h:1899
Cardinality plan_rows
Definition: plannodes.h:205
List * targetlist
Definition: plannodes.h:229
List * cte_plan_ids
Definition: pathnodes.h:333
struct Path * non_recursive_path
Definition: pathnodes.h:577
Query * parse
Definition: pathnodes.h:227
Node * limitCount
Definition: parsenodes.h:231
FromExpr * jointree
Definition: parsenodes.h:182
Node * setOperations
Definition: parsenodes.h:236
List * cteList
Definition: parsenodes.h:173
List * groupClause
Definition: parsenodes.h:216
Node * havingQual
Definition: parsenodes.h:222
List * rtable
Definition: parsenodes.h:175
Node * limitOffset
Definition: parsenodes.h:230
List * windowClause
Definition: parsenodes.h:224
List * targetList
Definition: parsenodes.h:198
List * groupingSets
Definition: parsenodes.h:220
List * distinctClause
Definition: parsenodes.h:226
char * ctename
Definition: parsenodes.h:1227
Index ctelevelsup
Definition: parsenodes.h:1229
bool funcordinality
Definition: parsenodes.h:1210
struct TableSampleClause * tablesample
Definition: parsenodes.h:1129
Query * subquery
Definition: parsenodes.h:1135
List * values_lists
Definition: parsenodes.h:1221
List * functions
Definition: parsenodes.h:1208
RTEKind rtekind
Definition: parsenodes.h:1078
Relids apply_agg_at
Definition: pathnodes.h:1206
List * group_exprs
Definition: pathnodes.h:1203
bool agg_useful
Definition: pathnodes.h:1212
List * group_clauses
Definition: pathnodes.h:1201
struct PathTarget * agg_input
Definition: pathnodes.h:1198
struct PathTarget * target
Definition: pathnodes.h:1195
List * baserestrictinfo
Definition: pathnodes.h:1046
bool consider_param_startup
Definition: pathnodes.h:941
List * subplan_params
Definition: pathnodes.h:1005
List * joininfo
Definition: pathnodes.h:1052
Relids relids
Definition: pathnodes.h:927
struct PathTarget * reltarget
Definition: pathnodes.h:949
Index relid
Definition: pathnodes.h:973
struct RelAggInfo * agg_info
Definition: pathnodes.h:1066
Cardinality tuples
Definition: pathnodes.h:1000
bool consider_parallel
Definition: pathnodes.h:943
Relids top_parent_relids
Definition: pathnodes.h:1078
BlockNumber pages
Definition: pathnodes.h:999
Relids lateral_relids
Definition: pathnodes.h:968
List * pathlist
Definition: pathnodes.h:954
RelOptKind reloptkind
Definition: pathnodes.h:921
struct Path * cheapest_startup_path
Definition: pathnodes.h:957
struct Path * cheapest_total_path
Definition: pathnodes.h:958
struct RelOptInfo * grouped_rel
Definition: pathnodes.h:1068
bool has_eclass_joins
Definition: pathnodes.h:1054
bool consider_startup
Definition: pathnodes.h:939
Bitmapset * live_parts
Definition: pathnodes.h:1108
int rel_parallel_workers
Definition: pathnodes.h:1007
bool consider_partitionwise_join
Definition: pathnodes.h:1060
List * partial_pathlist
Definition: pathnodes.h:956
PlannerInfo * subroot
Definition: pathnodes.h:1004
AttrNumber max_attr
Definition: pathnodes.h:981
Relids nulling_relids
Definition: pathnodes.h:989
Cardinality rows
Definition: pathnodes.h:933
AttrNumber min_attr
Definition: pathnodes.h:979
RTEKind rtekind
Definition: pathnodes.h:977
Expr * clause
Definition: pathnodes.h:2791
SetOperation op
Definition: parsenodes.h:2255
JoinType jointype
Definition: pathnodes.h:3120
Relids syn_righthand
Definition: pathnodes.h:3119
MonotonicFunction monotonic
Definition: supportnodes.h:299
Expr * expr
Definition: primnodes.h:2239
AttrNumber resno
Definition: primnodes.h:2241
Index ressortgroupref
Definition: primnodes.h:2245
bool repeatable_across_scans
Definition: tsmapi.h:65
SampleScanGetSampleSize_function SampleScanGetSampleSize
Definition: tsmapi.h:68
Definition: primnodes.h:262
AttrNumber varattno
Definition: primnodes.h:274
int varno
Definition: primnodes.h:269
Index varlevelsup
Definition: primnodes.h:294
List * partitionClause
Definition: parsenodes.h:1574
Index winref
Definition: primnodes.h:611
Oid winfnoid
Definition: primnodes.h:597
unsigned char * unsafeFlags
Definition: allpaths.c:64
Definition: regcomp.c:282
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
bool create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: tidpath.c:498
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:624
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition: var.c:296