PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
allpaths.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * allpaths.c
4 * Routines to find possible search paths for processing a query
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/path/allpaths.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/sysattr.h"
22#include "access/tsmapi.h"
23#include "catalog/pg_class.h"
24#include "catalog/pg_operator.h"
25#include "catalog/pg_proc.h"
26#include "foreign/fdwapi.h"
27#include "miscadmin.h"
28#include "nodes/makefuncs.h"
29#include "nodes/nodeFuncs.h"
30#include "nodes/supportnodes.h"
31#ifdef OPTIMIZER_DEBUG
32#include "nodes/print.h"
33#endif
35#include "optimizer/clauses.h"
36#include "optimizer/cost.h"
37#include "optimizer/geqo.h"
38#include "optimizer/optimizer.h"
39#include "optimizer/pathnode.h"
40#include "optimizer/paths.h"
41#include "optimizer/plancat.h"
42#include "optimizer/planner.h"
43#include "optimizer/tlist.h"
44#include "parser/parse_clause.h"
45#include "parser/parsetree.h"
47#include "port/pg_bitutils.h"
49#include "utils/lsyscache.h"
50
51
52/* Bitmask flags for pushdown_safety_info.unsafeFlags */
53#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
54#define UNSAFE_HAS_SET_FUNC (1 << 1)
55#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
56#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
57#define UNSAFE_TYPE_MISMATCH (1 << 4)
58
59/* results of subquery_is_pushdown_safe */
61{
62 unsigned char *unsafeFlags; /* bitmask of reasons why this target list
63 * column is unsafe for qual pushdown, or 0 if
64 * no reason. */
65 bool unsafeVolatile; /* don't push down volatile quals */
66 bool unsafeLeaky; /* don't push down leaky quals */
68
69/* Return type for qual_is_pushdown_safe */
71{
72 PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
73 PUSHDOWN_SAFE, /* safe to push qual into subquery */
74 PUSHDOWN_WINDOWCLAUSE_RUNCOND, /* unsafe, but may work as WindowClause
75 * run condition */
77
78/* These parameters are set by GUC */
79bool enable_geqo = false; /* just in case GUC doesn't set it */
83
84/* Hook for plugins to get control in set_rel_pathlist() */
86
87/* Hook for plugins to replace standard_join_search() */
89
90
94static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
95 Index rti, RangeTblEntry *rte);
97 Index rti, RangeTblEntry *rte);
99 RangeTblEntry *rte);
102 RangeTblEntry *rte);
104 RangeTblEntry *rte);
106 RangeTblEntry *rte);
108 RangeTblEntry *rte);
110 RangeTblEntry *rte);
112 RangeTblEntry *rte);
114 Index rti, RangeTblEntry *rte);
116 Index rti, RangeTblEntry *rte);
118 List *live_childrels,
119 List *all_child_pathkeys);
121 RelOptInfo *rel,
122 Relids required_outer);
123static void accumulate_append_subpath(Path *path,
124 List **subpaths,
125 List **special_subpaths);
127static void set_dummy_rel_pathlist(RelOptInfo *rel);
129 Index rti, RangeTblEntry *rte);
131 RangeTblEntry *rte);
133 RangeTblEntry *rte);
135 RangeTblEntry *rte);
137 RangeTblEntry *rte);
139 RangeTblEntry *rte);
141 RangeTblEntry *rte);
143 RangeTblEntry *rte);
145static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
146 pushdown_safety_info *safetyInfo);
147static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
148 pushdown_safety_info *safetyInfo);
149static void check_output_expressions(Query *subquery,
150 pushdown_safety_info *safetyInfo);
151static void compare_tlist_datatypes(List *tlist, List *colTypes,
152 pushdown_safety_info *safetyInfo);
153static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
155 RestrictInfo *rinfo,
156 pushdown_safety_info *safetyInfo);
157static void subquery_push_qual(Query *subquery,
158 RangeTblEntry *rte, Index rti, Node *qual);
159static void recurse_push_qual(Node *setOp, Query *topquery,
160 RangeTblEntry *rte, Index rti, Node *qual);
161static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
162 Bitmapset *extra_used_attrs);
163
164
165/*
166 * make_one_rel
167 * Finds all possible access paths for executing a query, returning a
168 * single rel that represents the join of all base rels in the query.
169 */
172{
173 RelOptInfo *rel;
174 Index rti;
175 double total_pages;
176
177 /* Mark base rels as to whether we care about fast-start plans */
179
180 /*
181 * Compute size estimates and consider_parallel flags for each base rel.
182 */
184
185 /*
186 * We should now have size estimates for every actual table involved in
187 * the query, and we also know which if any have been deleted from the
188 * query by join removal, pruned by partition pruning, or eliminated by
189 * constraint exclusion. So we can now compute total_table_pages.
190 *
191 * Note that appendrels are not double-counted here, even though we don't
192 * bother to distinguish RelOptInfos for appendrel parents, because the
193 * parents will have pages = 0.
194 *
195 * XXX if a table is self-joined, we will count it once per appearance,
196 * which perhaps is the wrong thing ... but that's not completely clear,
197 * and detecting self-joins here is difficult, so ignore it for now.
198 */
199 total_pages = 0;
200 for (rti = 1; rti < root->simple_rel_array_size; rti++)
201 {
202 RelOptInfo *brel = root->simple_rel_array[rti];
203
204 /* there may be empty slots corresponding to non-baserel RTEs */
205 if (brel == NULL)
206 continue;
207
208 Assert(brel->relid == rti); /* sanity check on array */
209
210 if (IS_DUMMY_REL(brel))
211 continue;
212
213 if (IS_SIMPLE_REL(brel))
214 total_pages += (double) brel->pages;
215 }
216 root->total_table_pages = total_pages;
217
218 /*
219 * Generate access paths for each base rel.
220 */
222
223 /*
224 * Generate access paths for the entire join tree.
225 */
226 rel = make_rel_from_joinlist(root, joinlist);
227
228 /*
229 * The result should join all and only the query's base + outer-join rels.
230 */
231 Assert(bms_equal(rel->relids, root->all_query_rels));
232
233 return rel;
234}
235
236/*
237 * set_base_rel_consider_startup
238 * Set the consider_[param_]startup flags for each base-relation entry.
239 *
240 * For the moment, we only deal with consider_param_startup here; because the
241 * logic for consider_startup is pretty trivial and is the same for every base
242 * relation, we just let build_simple_rel() initialize that flag correctly to
243 * start with. If that logic ever gets more complicated it would probably
244 * be better to move it here.
245 */
246static void
248{
249 /*
250 * Since parameterized paths can only be used on the inside of a nestloop
251 * join plan, there is usually little value in considering fast-start
252 * plans for them. However, for relations that are on the RHS of a SEMI
253 * or ANTI join, a fast-start plan can be useful because we're only going
254 * to care about fetching one tuple anyway.
255 *
256 * To minimize growth of planning time, we currently restrict this to
257 * cases where the RHS is a single base relation, not a join; there is no
258 * provision for consider_param_startup to get set at all on joinrels.
259 * Also we don't worry about appendrels. costsize.c's costing rules for
260 * nestloop semi/antijoins don't consider such cases either.
261 */
262 ListCell *lc;
263
264 foreach(lc, root->join_info_list)
265 {
266 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
267 int varno;
268
269 if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
271 {
272 RelOptInfo *rel = find_base_rel(root, varno);
273
274 rel->consider_param_startup = true;
275 }
276 }
277}
278
279/*
280 * set_base_rel_sizes
281 * Set the size estimates (rows and widths) for each base-relation entry.
282 * Also determine whether to consider parallel paths for base relations.
283 *
284 * We do this in a separate pass over the base rels so that rowcount
285 * estimates are available for parameterized path generation, and also so
286 * that each rel's consider_parallel flag is set correctly before we begin to
287 * generate paths.
288 */
289static void
291{
292 Index rti;
293
294 for (rti = 1; rti < root->simple_rel_array_size; rti++)
295 {
296 RelOptInfo *rel = root->simple_rel_array[rti];
297 RangeTblEntry *rte;
298
299 /* there may be empty slots corresponding to non-baserel RTEs */
300 if (rel == NULL)
301 continue;
302
303 Assert(rel->relid == rti); /* sanity check on array */
304
305 /* ignore RTEs that are "other rels" */
306 if (rel->reloptkind != RELOPT_BASEREL)
307 continue;
308
309 rte = root->simple_rte_array[rti];
310
311 /*
312 * If parallelism is allowable for this query in general, see whether
313 * it's allowable for this rel in particular. We have to do this
314 * before set_rel_size(), because (a) if this rel is an inheritance
315 * parent, set_append_rel_size() will use and perhaps change the rel's
316 * consider_parallel flag, and (b) for some RTE types, set_rel_size()
317 * goes ahead and makes paths immediately.
318 */
319 if (root->glob->parallelModeOK)
321
322 set_rel_size(root, rel, rti, rte);
323 }
324}
325
326/*
327 * set_base_rel_pathlists
328 * Finds all paths available for scanning each base-relation entry.
329 * Sequential scan and any available indices are considered.
330 * Each useful path is attached to its relation's 'pathlist' field.
331 */
332static void
334{
335 Index rti;
336
337 for (rti = 1; rti < root->simple_rel_array_size; rti++)
338 {
339 RelOptInfo *rel = root->simple_rel_array[rti];
340
341 /* there may be empty slots corresponding to non-baserel RTEs */
342 if (rel == NULL)
343 continue;
344
345 Assert(rel->relid == rti); /* sanity check on array */
346
347 /* ignore RTEs that are "other rels" */
348 if (rel->reloptkind != RELOPT_BASEREL)
349 continue;
350
351 set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
352 }
353}
354
355/*
356 * set_rel_size
357 * Set size estimates for a base relation
358 */
359static void
361 Index rti, RangeTblEntry *rte)
362{
363 if (rel->reloptkind == RELOPT_BASEREL &&
365 {
366 /*
367 * We proved we don't need to scan the rel via constraint exclusion,
368 * so set up a single dummy path for it. Here we only check this for
369 * regular baserels; if it's an otherrel, CE was already checked in
370 * set_append_rel_size().
371 *
372 * In this case, we go ahead and set up the relation's path right away
373 * instead of leaving it for set_rel_pathlist to do. This is because
374 * we don't have a convention for marking a rel as dummy except by
375 * assigning a dummy path to it.
376 */
378 }
379 else if (rte->inh)
380 {
381 /* It's an "append relation", process accordingly */
382 set_append_rel_size(root, rel, rti, rte);
383 }
384 else
385 {
386 switch (rel->rtekind)
387 {
388 case RTE_RELATION:
389 if (rte->relkind == RELKIND_FOREIGN_TABLE)
390 {
391 /* Foreign table */
392 set_foreign_size(root, rel, rte);
393 }
394 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
395 {
396 /*
397 * We could get here if asked to scan a partitioned table
398 * with ONLY. In that case we shouldn't scan any of the
399 * partitions, so mark it as a dummy rel.
400 */
402 }
403 else if (rte->tablesample != NULL)
404 {
405 /* Sampled relation */
407 }
408 else
409 {
410 /* Plain relation */
411 set_plain_rel_size(root, rel, rte);
412 }
413 break;
414 case RTE_SUBQUERY:
415
416 /*
417 * Subqueries don't support making a choice between
418 * parameterized and unparameterized paths, so just go ahead
419 * and build their paths immediately.
420 */
421 set_subquery_pathlist(root, rel, rti, rte);
422 break;
423 case RTE_FUNCTION:
425 break;
426 case RTE_TABLEFUNC:
428 break;
429 case RTE_VALUES:
431 break;
432 case RTE_CTE:
433
434 /*
435 * CTEs don't support making a choice between parameterized
436 * and unparameterized paths, so just go ahead and build their
437 * paths immediately.
438 */
439 if (rte->self_reference)
440 set_worktable_pathlist(root, rel, rte);
441 else
442 set_cte_pathlist(root, rel, rte);
443 break;
445 /* Might as well just build the path immediately */
447 break;
448 case RTE_RESULT:
449 /* Might as well just build the path immediately */
450 set_result_pathlist(root, rel, rte);
451 break;
452 default:
453 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
454 break;
455 }
456 }
457
458 /*
459 * We insist that all non-dummy rels have a nonzero rowcount estimate.
460 */
461 Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
462}
463
464/*
465 * set_rel_pathlist
466 * Build access paths for a base relation
467 */
468static void
470 Index rti, RangeTblEntry *rte)
471{
472 if (IS_DUMMY_REL(rel))
473 {
474 /* We already proved the relation empty, so nothing more to do */
475 }
476 else if (rte->inh)
477 {
478 /* It's an "append relation", process accordingly */
479 set_append_rel_pathlist(root, rel, rti, rte);
480 }
481 else
482 {
483 switch (rel->rtekind)
484 {
485 case RTE_RELATION:
486 if (rte->relkind == RELKIND_FOREIGN_TABLE)
487 {
488 /* Foreign table */
489 set_foreign_pathlist(root, rel, rte);
490 }
491 else if (rte->tablesample != NULL)
492 {
493 /* Sampled relation */
495 }
496 else
497 {
498 /* Plain relation */
499 set_plain_rel_pathlist(root, rel, rte);
500 }
501 break;
502 case RTE_SUBQUERY:
503 /* Subquery --- fully handled during set_rel_size */
504 break;
505 case RTE_FUNCTION:
506 /* RangeFunction */
507 set_function_pathlist(root, rel, rte);
508 break;
509 case RTE_TABLEFUNC:
510 /* Table Function */
511 set_tablefunc_pathlist(root, rel, rte);
512 break;
513 case RTE_VALUES:
514 /* Values list */
515 set_values_pathlist(root, rel, rte);
516 break;
517 case RTE_CTE:
518 /* CTE reference --- fully handled during set_rel_size */
519 break;
521 /* tuplestore reference --- fully handled during set_rel_size */
522 break;
523 case RTE_RESULT:
524 /* simple Result --- fully handled during set_rel_size */
525 break;
526 default:
527 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
528 break;
529 }
530 }
531
532 /*
533 * Allow a plugin to editorialize on the set of Paths for this base
534 * relation. It could add new paths (such as CustomPaths) by calling
535 * add_path(), or add_partial_path() if parallel aware. It could also
536 * delete or modify paths added by the core code.
537 */
539 (*set_rel_pathlist_hook) (root, rel, rti, rte);
540
541 /*
542 * If this is a baserel, we should normally consider gathering any partial
543 * paths we may have created for it. We have to do this after calling the
544 * set_rel_pathlist_hook, else it cannot add partial paths to be included
545 * here.
546 *
547 * However, if this is an inheritance child, skip it. Otherwise, we could
548 * end up with a very large number of gather nodes, each trying to grab
549 * its own pool of workers. Instead, we'll consider gathering partial
550 * paths for the parent appendrel.
551 *
552 * Also, if this is the topmost scan/join rel, we postpone gathering until
553 * the final scan/join targetlist is available (see grouping_planner).
554 */
555 if (rel->reloptkind == RELOPT_BASEREL &&
556 !bms_equal(rel->relids, root->all_query_rels))
558
559 /* Now find the cheapest of the paths for this rel */
560 set_cheapest(rel);
561
562#ifdef OPTIMIZER_DEBUG
563 pprint(rel);
564#endif
565}
566
567/*
568 * set_plain_rel_size
569 * Set size estimates for a plain relation (no subquery, no inheritance)
570 */
571static void
573{
574 /*
575 * Test any partial indexes of rel for applicability. We must do this
576 * first since partial unique indexes can affect size estimates.
577 */
579
580 /* Mark rel with estimated output rows, width, etc */
582}
583
584/*
585 * If this relation could possibly be scanned from within a worker, then set
586 * its consider_parallel flag.
587 */
588static void
590 RangeTblEntry *rte)
591{
592 /*
593 * The flag has previously been initialized to false, so we can just
594 * return if it becomes clear that we can't safely set it.
595 */
597
598 /* Don't call this if parallelism is disallowed for the entire query. */
599 Assert(root->glob->parallelModeOK);
600
601 /* This should only be called for baserels and appendrel children. */
602 Assert(IS_SIMPLE_REL(rel));
603
604 /* Assorted checks based on rtekind. */
605 switch (rte->rtekind)
606 {
607 case RTE_RELATION:
608
609 /*
610 * Currently, parallel workers can't access the leader's temporary
611 * tables. We could possibly relax this if we wrote all of its
612 * local buffers at the start of the query and made no changes
613 * thereafter (maybe we could allow hint bit changes), and if we
614 * taught the workers to read them. Writing a large number of
615 * temporary buffers could be expensive, though, and we don't have
616 * the rest of the necessary infrastructure right now anyway. So
617 * for now, bail out if we see a temporary table.
618 */
619 if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
620 return;
621
622 /*
623 * Table sampling can be pushed down to workers if the sample
624 * function and its arguments are safe.
625 */
626 if (rte->tablesample != NULL)
627 {
628 char proparallel = func_parallel(rte->tablesample->tsmhandler);
629
630 if (proparallel != PROPARALLEL_SAFE)
631 return;
632 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
633 return;
634 }
635
636 /*
637 * Ask FDWs whether they can support performing a ForeignScan
638 * within a worker. Most often, the answer will be no. For
639 * example, if the nature of the FDW is such that it opens a TCP
640 * connection with a remote server, each parallel worker would end
641 * up with a separate connection, and these connections might not
642 * be appropriately coordinated between workers and the leader.
643 */
644 if (rte->relkind == RELKIND_FOREIGN_TABLE)
645 {
646 Assert(rel->fdwroutine);
647 if (!rel->fdwroutine->IsForeignScanParallelSafe)
648 return;
649 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
650 return;
651 }
652
653 /*
654 * There are additional considerations for appendrels, which we'll
655 * deal with in set_append_rel_size and set_append_rel_pathlist.
656 * For now, just set consider_parallel based on the rel's own
657 * quals and targetlist.
658 */
659 break;
660
661 case RTE_SUBQUERY:
662
663 /*
664 * There's no intrinsic problem with scanning a subquery-in-FROM
665 * (as distinct from a SubPlan or InitPlan) in a parallel worker.
666 * If the subquery doesn't happen to have any parallel-safe paths,
667 * then flagging it as consider_parallel won't change anything,
668 * but that's true for plain tables, too. We must set
669 * consider_parallel based on the rel's own quals and targetlist,
670 * so that if a subquery path is parallel-safe but the quals and
671 * projection we're sticking onto it are not, we correctly mark
672 * the SubqueryScanPath as not parallel-safe. (Note that
673 * set_subquery_pathlist() might push some of these quals down
674 * into the subquery itself, but that doesn't change anything.)
675 *
676 * We can't push sub-select containing LIMIT/OFFSET to workers as
677 * there is no guarantee that the row order will be fully
678 * deterministic, and applying LIMIT/OFFSET will lead to
679 * inconsistent results at the top-level. (In some cases, where
680 * the result is ordered, we could relax this restriction. But it
681 * doesn't currently seem worth expending extra effort to do so.)
682 */
683 {
684 Query *subquery = castNode(Query, rte->subquery);
685
686 if (limit_needed(subquery))
687 return;
688 }
689 break;
690
691 case RTE_JOIN:
692 /* Shouldn't happen; we're only considering baserels here. */
693 Assert(false);
694 return;
695
696 case RTE_FUNCTION:
697 /* Check for parallel-restricted functions. */
698 if (!is_parallel_safe(root, (Node *) rte->functions))
699 return;
700 break;
701
702 case RTE_TABLEFUNC:
703 /* not parallel safe */
704 return;
705
706 case RTE_VALUES:
707 /* Check for parallel-restricted functions. */
708 if (!is_parallel_safe(root, (Node *) rte->values_lists))
709 return;
710 break;
711
712 case RTE_CTE:
713
714 /*
715 * CTE tuplestores aren't shared among parallel workers, so we
716 * force all CTE scans to happen in the leader. Also, populating
717 * the CTE would require executing a subplan that's not available
718 * in the worker, might be parallel-restricted, and must get
719 * executed only once.
720 */
721 return;
722
724
725 /*
726 * tuplestore cannot be shared, at least without more
727 * infrastructure to support that.
728 */
729 return;
730
731 case RTE_RESULT:
732 /* RESULT RTEs, in themselves, are no problem. */
733 break;
734 case RTE_GROUP:
735 /* Shouldn't happen; we're only considering baserels here. */
736 Assert(false);
737 return;
738 }
739
740 /*
741 * If there's anything in baserestrictinfo that's parallel-restricted, we
742 * give up on parallelizing access to this relation. We could consider
743 * instead postponing application of the restricted quals until we're
744 * above all the parallelism in the plan tree, but it's not clear that
745 * that would be a win in very many cases, and it might be tricky to make
746 * outer join clauses work correctly. It would likely break equivalence
747 * classes, too.
748 */
750 return;
751
752 /*
753 * Likewise, if the relation's outputs are not parallel-safe, give up.
754 * (Usually, they're just Vars, but sometimes they're not.)
755 */
756 if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
757 return;
758
759 /* We have a winner. */
760 rel->consider_parallel = true;
761}
762
763/*
764 * set_plain_rel_pathlist
765 * Build access paths for a plain relation (no subquery, no inheritance)
766 */
767static void
769{
770 Relids required_outer;
771
772 /*
773 * We don't support pushing join clauses into the quals of a seqscan, but
774 * it could still have required parameterization due to LATERAL refs in
775 * its tlist.
776 */
777 required_outer = rel->lateral_relids;
778
779 /*
780 * Consider TID scans.
781 *
782 * If create_tidscan_paths returns true, then a TID scan path is forced.
783 * This happens when rel->baserestrictinfo contains CurrentOfExpr, because
784 * the executor can't handle any other type of path for such queries.
785 * Hence, we return without adding any other paths.
786 */
787 if (create_tidscan_paths(root, rel))
788 return;
789
790 /* Consider sequential scan */
791 add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
792
793 /* If appropriate, consider parallel sequential scan */
794 if (rel->consider_parallel && required_outer == NULL)
796
797 /* Consider index scans */
799}
800
801/*
802 * create_plain_partial_paths
803 * Build partial access paths for parallel scan of a plain relation
804 */
805static void
807{
808 int parallel_workers;
809
810 parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
812
813 /* If any limit was set to zero, the user doesn't want a parallel scan. */
814 if (parallel_workers <= 0)
815 return;
816
817 /* Add an unordered partial path based on a parallel sequential scan. */
818 add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
819}
820
821/*
822 * set_tablesample_rel_size
823 * Set size estimates for a sampled relation
824 */
825static void
827{
828 TableSampleClause *tsc = rte->tablesample;
829 TsmRoutine *tsm;
830 BlockNumber pages;
831 double tuples;
832
833 /*
834 * Test any partial indexes of rel for applicability. We must do this
835 * first since partial unique indexes can affect size estimates.
836 */
838
839 /*
840 * Call the sampling method's estimation function to estimate the number
841 * of pages it will read and the number of tuples it will return. (Note:
842 * we assume the function returns sane values.)
843 */
844 tsm = GetTsmRoutine(tsc->tsmhandler);
845 tsm->SampleScanGetSampleSize(root, rel, tsc->args,
846 &pages, &tuples);
847
848 /*
849 * For the moment, because we will only consider a SampleScan path for the
850 * rel, it's okay to just overwrite the pages and tuples estimates for the
851 * whole relation. If we ever consider multiple path types for sampled
852 * rels, we'll need more complication.
853 */
854 rel->pages = pages;
855 rel->tuples = tuples;
856
857 /* Mark rel with estimated output rows, width, etc */
859}
860
861/*
862 * set_tablesample_rel_pathlist
863 * Build access paths for a sampled relation
864 */
865static void
867{
868 Relids required_outer;
869 Path *path;
870
871 /*
872 * We don't support pushing join clauses into the quals of a samplescan,
873 * but it could still have required parameterization due to LATERAL refs
874 * in its tlist or TABLESAMPLE arguments.
875 */
876 required_outer = rel->lateral_relids;
877
878 /* Consider sampled scan */
879 path = create_samplescan_path(root, rel, required_outer);
880
881 /*
882 * If the sampling method does not support repeatable scans, we must avoid
883 * plans that would scan the rel multiple times. Ideally, we'd simply
884 * avoid putting the rel on the inside of a nestloop join; but adding such
885 * a consideration to the planner seems like a great deal of complication
886 * to support an uncommon usage of second-rate sampling methods. Instead,
887 * if there is a risk that the query might perform an unsafe join, just
888 * wrap the SampleScan in a Materialize node. We can check for joins by
889 * counting the membership of all_query_rels (note that this correctly
890 * counts inheritance trees as single rels). If we're inside a subquery,
891 * we can't easily check whether a join might occur in the outer query, so
892 * just assume one is possible.
893 *
894 * GetTsmRoutine is relatively expensive compared to the other tests here,
895 * so check repeatable_across_scans last, even though that's a bit odd.
896 */
897 if ((root->query_level > 1 ||
898 bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
900 {
901 path = (Path *) create_material_path(rel, path);
902 }
903
904 add_path(rel, path);
905
906 /* For the moment, at least, there are no other paths to consider */
907}
908
909/*
910 * set_foreign_size
911 * Set size estimates for a foreign table RTE
912 */
913static void
915{
916 /* Mark rel with estimated output rows, width, etc */
918
919 /* Let FDW adjust the size estimates, if it can */
920 rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
921
922 /* ... but do not let it set the rows estimate to zero */
923 rel->rows = clamp_row_est(rel->rows);
924
925 /*
926 * Also, make sure rel->tuples is not insane relative to rel->rows.
927 * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
928 * FDW doesn't do anything to replace that.
929 */
930 rel->tuples = Max(rel->tuples, rel->rows);
931}
932
933/*
934 * set_foreign_pathlist
935 * Build access paths for a foreign table RTE
936 */
937static void
939{
940 /* Call the FDW's GetForeignPaths function to generate path(s) */
941 rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
942}
943
944/*
945 * set_append_rel_size
946 * Set size estimates for a simple "append relation"
947 *
948 * The passed-in rel and RTE represent the entire append relation. The
949 * relation's contents are computed by appending together the output of the
950 * individual member relations. Note that in the non-partitioned inheritance
951 * case, the first member relation is actually the same table as is mentioned
952 * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
953 * a good thing because their outputs are not the same size.
954 */
955static void
957 Index rti, RangeTblEntry *rte)
958{
959 int parentRTindex = rti;
960 bool has_live_children;
961 double parent_rows;
962 double parent_size;
963 double *parent_attrsizes;
964 int nattrs;
965 ListCell *l;
966
967 /* Guard against stack overflow due to overly deep inheritance tree. */
969
970 Assert(IS_SIMPLE_REL(rel));
971
972 /*
973 * If this is a partitioned baserel, set the consider_partitionwise_join
974 * flag; currently, we only consider partitionwise joins with the baserel
975 * if its targetlist doesn't contain a whole-row Var.
976 */
978 rel->reloptkind == RELOPT_BASEREL &&
979 rte->relkind == RELKIND_PARTITIONED_TABLE &&
980 bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
981 rel->consider_partitionwise_join = true;
982
983 /*
984 * Initialize to compute size estimates for whole append relation.
985 *
986 * We handle width estimates by weighting the widths of different child
987 * rels proportionally to their number of rows. This is sensible because
988 * the use of width estimates is mainly to compute the total relation
989 * "footprint" if we have to sort or hash it. To do this, we sum the
990 * total equivalent size (in "double" arithmetic) and then divide by the
991 * total rowcount estimate. This is done separately for the total rel
992 * width and each attribute.
993 *
994 * Note: if you consider changing this logic, beware that child rels could
995 * have zero rows and/or width, if they were excluded by constraints.
996 */
997 has_live_children = false;
998 parent_rows = 0;
999 parent_size = 0;
1000 nattrs = rel->max_attr - rel->min_attr + 1;
1001 parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
1002
1003 foreach(l, root->append_rel_list)
1004 {
1005 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1006 int childRTindex;
1007 RangeTblEntry *childRTE;
1008 RelOptInfo *childrel;
1009 List *childrinfos;
1010 ListCell *parentvars;
1011 ListCell *childvars;
1012 ListCell *lc;
1013
1014 /* append_rel_list contains all append rels; ignore others */
1015 if (appinfo->parent_relid != parentRTindex)
1016 continue;
1017
1018 childRTindex = appinfo->child_relid;
1019 childRTE = root->simple_rte_array[childRTindex];
1020
1021 /*
1022 * The child rel's RelOptInfo was already created during
1023 * add_other_rels_to_query.
1024 */
1025 childrel = find_base_rel(root, childRTindex);
1027
1028 /* We may have already proven the child to be dummy. */
1029 if (IS_DUMMY_REL(childrel))
1030 continue;
1031
1032 /*
1033 * We have to copy the parent's targetlist and quals to the child,
1034 * with appropriate substitution of variables. However, the
1035 * baserestrictinfo quals were already copied/substituted when the
1036 * child RelOptInfo was built. So we don't need any additional setup
1037 * before applying constraint exclusion.
1038 */
1039 if (relation_excluded_by_constraints(root, childrel, childRTE))
1040 {
1041 /*
1042 * This child need not be scanned, so we can omit it from the
1043 * appendrel.
1044 */
1045 set_dummy_rel_pathlist(childrel);
1046 continue;
1047 }
1048
1049 /*
1050 * Constraint exclusion failed, so copy the parent's join quals and
1051 * targetlist to the child, with appropriate variable substitutions.
1052 *
1053 * We skip join quals that came from above outer joins that can null
1054 * this rel, since they would be of no value while generating paths
1055 * for the child. This saves some effort while processing the child
1056 * rel, and it also avoids an implementation restriction in
1057 * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
1058 */
1059 childrinfos = NIL;
1060 foreach(lc, rel->joininfo)
1061 {
1062 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1063
1064 if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
1065 childrinfos = lappend(childrinfos,
1067 (Node *) rinfo,
1068 1, &appinfo));
1069 }
1070 childrel->joininfo = childrinfos;
1071
1072 /*
1073 * Now for the child's targetlist.
1074 *
1075 * NB: the resulting childrel->reltarget->exprs may contain arbitrary
1076 * expressions, which otherwise would not occur in a rel's targetlist.
1077 * Code that might be looking at an appendrel child must cope with
1078 * such. (Normally, a rel's targetlist would only include Vars and
1079 * PlaceHolderVars.) XXX we do not bother to update the cost or width
1080 * fields of childrel->reltarget; not clear if that would be useful.
1081 */
1082 childrel->reltarget->exprs = (List *)
1084 (Node *) rel->reltarget->exprs,
1085 1, &appinfo);
1086
1087 /*
1088 * We have to make child entries in the EquivalenceClass data
1089 * structures as well. This is needed either if the parent
1090 * participates in some eclass joins (because we will want to consider
1091 * inner-indexscan joins on the individual children) or if the parent
1092 * has useful pathkeys (because we should try to build MergeAppend
1093 * paths that produce those sort orderings).
1094 */
1095 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
1096 add_child_rel_equivalences(root, appinfo, rel, childrel);
1097 childrel->has_eclass_joins = rel->has_eclass_joins;
1098
1099 /*
1100 * Note: we could compute appropriate attr_needed data for the child's
1101 * variables, by transforming the parent's attr_needed through the
1102 * translated_vars mapping. However, currently there's no need
1103 * because attr_needed is only examined for base relations not
1104 * otherrels. So we just leave the child's attr_needed empty.
1105 */
1106
1107 /*
1108 * If we consider partitionwise joins with the parent rel, do the same
1109 * for partitioned child rels.
1110 *
1111 * Note: here we abuse the consider_partitionwise_join flag by setting
1112 * it for child rels that are not themselves partitioned. We do so to
1113 * tell try_partitionwise_join() that the child rel is sufficiently
1114 * valid to be used as a per-partition input, even if it later gets
1115 * proven to be dummy. (It's not usable until we've set up the
1116 * reltarget and EC entries, which we just did.)
1117 */
1119 childrel->consider_partitionwise_join = true;
1120
1121 /*
1122 * If parallelism is allowable for this query in general, see whether
1123 * it's allowable for this childrel in particular. But if we've
1124 * already decided the appendrel is not parallel-safe as a whole,
1125 * there's no point in considering parallelism for this child. For
1126 * consistency, do this before calling set_rel_size() for the child.
1127 */
1128 if (root->glob->parallelModeOK && rel->consider_parallel)
1129 set_rel_consider_parallel(root, childrel, childRTE);
1130
1131 /*
1132 * Compute the child's size.
1133 */
1134 set_rel_size(root, childrel, childRTindex, childRTE);
1135
1136 /*
1137 * It is possible that constraint exclusion detected a contradiction
1138 * within a child subquery, even though we didn't prove one above. If
1139 * so, we can skip this child.
1140 */
1141 if (IS_DUMMY_REL(childrel))
1142 continue;
1143
1144 /* We have at least one live child. */
1145 has_live_children = true;
1146
1147 /*
1148 * If any live child is not parallel-safe, treat the whole appendrel
1149 * as not parallel-safe. In future we might be able to generate plans
1150 * in which some children are farmed out to workers while others are
1151 * not; but we don't have that today, so it's a waste to consider
1152 * partial paths anywhere in the appendrel unless it's all safe.
1153 * (Child rels visited before this one will be unmarked in
1154 * set_append_rel_pathlist().)
1155 */
1156 if (!childrel->consider_parallel)
1157 rel->consider_parallel = false;
1158
1159 /*
1160 * Accumulate size information from each live child.
1161 */
1162 Assert(childrel->rows > 0);
1163
1164 parent_rows += childrel->rows;
1165 parent_size += childrel->reltarget->width * childrel->rows;
1166
1167 /*
1168 * Accumulate per-column estimates too. We need not do anything for
1169 * PlaceHolderVars in the parent list. If child expression isn't a
1170 * Var, or we didn't record a width estimate for it, we have to fall
1171 * back on a datatype-based estimate.
1172 *
1173 * By construction, child's targetlist is 1-to-1 with parent's.
1174 */
1175 forboth(parentvars, rel->reltarget->exprs,
1176 childvars, childrel->reltarget->exprs)
1177 {
1178 Var *parentvar = (Var *) lfirst(parentvars);
1179 Node *childvar = (Node *) lfirst(childvars);
1180
1181 if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
1182 {
1183 int pndx = parentvar->varattno - rel->min_attr;
1184 int32 child_width = 0;
1185
1186 if (IsA(childvar, Var) &&
1187 ((Var *) childvar)->varno == childrel->relid)
1188 {
1189 int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
1190
1191 child_width = childrel->attr_widths[cndx];
1192 }
1193 if (child_width <= 0)
1194 child_width = get_typavgwidth(exprType(childvar),
1195 exprTypmod(childvar));
1196 Assert(child_width > 0);
1197 parent_attrsizes[pndx] += child_width * childrel->rows;
1198 }
1199 }
1200 }
1201
1202 if (has_live_children)
1203 {
1204 /*
1205 * Save the finished size estimates.
1206 */
1207 int i;
1208
1209 Assert(parent_rows > 0);
1210 rel->rows = parent_rows;
1211 rel->reltarget->width = rint(parent_size / parent_rows);
1212 for (i = 0; i < nattrs; i++)
1213 rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
1214
1215 /*
1216 * Set "raw tuples" count equal to "rows" for the appendrel; needed
1217 * because some places assume rel->tuples is valid for any baserel.
1218 */
1219 rel->tuples = parent_rows;
1220
1221 /*
1222 * Note that we leave rel->pages as zero; this is important to avoid
1223 * double-counting the appendrel tree in total_table_pages.
1224 */
1225 }
1226 else
1227 {
1228 /*
1229 * All children were excluded by constraints, so mark the whole
1230 * appendrel dummy. We must do this in this phase so that the rel's
1231 * dummy-ness is visible when we generate paths for other rels.
1232 */
1234 }
1235
1236 pfree(parent_attrsizes);
1237}
1238
1239/*
1240 * set_append_rel_pathlist
1241 * Build access paths for an "append relation"
1242 */
1243static void
1245 Index rti, RangeTblEntry *rte)
1246{
1247 int parentRTindex = rti;
1248 List *live_childrels = NIL;
1249 ListCell *l;
1250
1251 /*
1252 * Generate access paths for each member relation, and remember the
1253 * non-dummy children.
1254 */
1255 foreach(l, root->append_rel_list)
1256 {
1257 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1258 int childRTindex;
1259 RangeTblEntry *childRTE;
1260 RelOptInfo *childrel;
1261
1262 /* append_rel_list contains all append rels; ignore others */
1263 if (appinfo->parent_relid != parentRTindex)
1264 continue;
1265
1266 /* Re-locate the child RTE and RelOptInfo */
1267 childRTindex = appinfo->child_relid;
1268 childRTE = root->simple_rte_array[childRTindex];
1269 childrel = root->simple_rel_array[childRTindex];
1270
1271 /*
1272 * If set_append_rel_size() decided the parent appendrel was
1273 * parallel-unsafe at some point after visiting this child rel, we
1274 * need to propagate the unsafety marking down to the child, so that
1275 * we don't generate useless partial paths for it.
1276 */
1277 if (!rel->consider_parallel)
1278 childrel->consider_parallel = false;
1279
1280 /*
1281 * Compute the child's access paths.
1282 */
1283 set_rel_pathlist(root, childrel, childRTindex, childRTE);
1284
1285 /*
1286 * If child is dummy, ignore it.
1287 */
1288 if (IS_DUMMY_REL(childrel))
1289 continue;
1290
1291 /*
1292 * Child is live, so add it to the live_childrels list for use below.
1293 */
1294 live_childrels = lappend(live_childrels, childrel);
1295 }
1296
1297 /* Add paths to the append relation. */
1298 add_paths_to_append_rel(root, rel, live_childrels);
1299}
1300
1301
1302/*
1303 * add_paths_to_append_rel
1304 * Generate paths for the given append relation given the set of non-dummy
1305 * child rels.
1306 *
1307 * The function collects all parameterizations and orderings supported by the
1308 * non-dummy children. For every such parameterization or ordering, it creates
1309 * an append path collecting one path from each non-dummy child with given
1310 * parameterization or ordering. Similarly it collects partial paths from
1311 * non-dummy children to create partial append paths.
1312 */
1313void
1315 List *live_childrels)
1316{
1317 List *subpaths = NIL;
1318 bool subpaths_valid = true;
1319 List *startup_subpaths = NIL;
1320 bool startup_subpaths_valid = true;
1321 List *partial_subpaths = NIL;
1322 List *pa_partial_subpaths = NIL;
1323 List *pa_nonpartial_subpaths = NIL;
1324 bool partial_subpaths_valid = true;
1325 bool pa_subpaths_valid;
1326 List *all_child_pathkeys = NIL;
1327 List *all_child_outers = NIL;
1328 ListCell *l;
1329 double partial_rows = -1;
1330
1331 /* If appropriate, consider parallel append */
1332 pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
1333
1334 /*
1335 * For every non-dummy child, remember the cheapest path. Also, identify
1336 * all pathkeys (orderings) and parameterizations (required_outer sets)
1337 * available for the non-dummy member relations.
1338 */
1339 foreach(l, live_childrels)
1340 {
1341 RelOptInfo *childrel = lfirst(l);
1342 ListCell *lcp;
1343 Path *cheapest_partial_path = NULL;
1344
1345 /*
1346 * If child has an unparameterized cheapest-total path, add that to
1347 * the unparameterized Append path we are constructing for the parent.
1348 * If not, there's no workable unparameterized path.
1349 *
1350 * With partitionwise aggregates, the child rel's pathlist may be
1351 * empty, so don't assume that a path exists here.
1352 */
1353 if (childrel->pathlist != NIL &&
1354 childrel->cheapest_total_path->param_info == NULL)
1356 &subpaths, NULL);
1357 else
1358 subpaths_valid = false;
1359
1360 /*
1361 * When the planner is considering cheap startup plans, we'll also
1362 * collect all the cheapest_startup_paths (if set) and build an
1363 * AppendPath containing those as subpaths.
1364 */
1365 if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
1366 {
1367 /* cheapest_startup_path must not be a parameterized path. */
1368 Assert(childrel->cheapest_startup_path->param_info == NULL);
1370 &startup_subpaths,
1371 NULL);
1372 }
1373 else
1374 startup_subpaths_valid = false;
1375
1376
1377 /* Same idea, but for a partial plan. */
1378 if (childrel->partial_pathlist != NIL)
1379 {
1380 cheapest_partial_path = linitial(childrel->partial_pathlist);
1381 accumulate_append_subpath(cheapest_partial_path,
1382 &partial_subpaths, NULL);
1383 }
1384 else
1385 partial_subpaths_valid = false;
1386
1387 /*
1388 * Same idea, but for a parallel append mixing partial and non-partial
1389 * paths.
1390 */
1391 if (pa_subpaths_valid)
1392 {
1393 Path *nppath = NULL;
1394
1395 nppath =
1397
1398 if (cheapest_partial_path == NULL && nppath == NULL)
1399 {
1400 /* Neither a partial nor a parallel-safe path? Forget it. */
1401 pa_subpaths_valid = false;
1402 }
1403 else if (nppath == NULL ||
1404 (cheapest_partial_path != NULL &&
1405 cheapest_partial_path->total_cost < nppath->total_cost))
1406 {
1407 /* Partial path is cheaper or the only option. */
1408 Assert(cheapest_partial_path != NULL);
1409 accumulate_append_subpath(cheapest_partial_path,
1410 &pa_partial_subpaths,
1411 &pa_nonpartial_subpaths);
1412 }
1413 else
1414 {
1415 /*
1416 * Either we've got only a non-partial path, or we think that
1417 * a single backend can execute the best non-partial path
1418 * faster than all the parallel backends working together can
1419 * execute the best partial path.
1420 *
1421 * It might make sense to be more aggressive here. Even if
1422 * the best non-partial path is more expensive than the best
1423 * partial path, it could still be better to choose the
1424 * non-partial path if there are several such paths that can
1425 * be given to different workers. For now, we don't try to
1426 * figure that out.
1427 */
1429 &pa_nonpartial_subpaths,
1430 NULL);
1431 }
1432 }
1433
1434 /*
1435 * Collect lists of all the available path orderings and
1436 * parameterizations for all the children. We use these as a
1437 * heuristic to indicate which sort orderings and parameterizations we
1438 * should build Append and MergeAppend paths for.
1439 */
1440 foreach(lcp, childrel->pathlist)
1441 {
1442 Path *childpath = (Path *) lfirst(lcp);
1443 List *childkeys = childpath->pathkeys;
1444 Relids childouter = PATH_REQ_OUTER(childpath);
1445
1446 /* Unsorted paths don't contribute to pathkey list */
1447 if (childkeys != NIL)
1448 {
1449 ListCell *lpk;
1450 bool found = false;
1451
1452 /* Have we already seen this ordering? */
1453 foreach(lpk, all_child_pathkeys)
1454 {
1455 List *existing_pathkeys = (List *) lfirst(lpk);
1456
1457 if (compare_pathkeys(existing_pathkeys,
1458 childkeys) == PATHKEYS_EQUAL)
1459 {
1460 found = true;
1461 break;
1462 }
1463 }
1464 if (!found)
1465 {
1466 /* No, so add it to all_child_pathkeys */
1467 all_child_pathkeys = lappend(all_child_pathkeys,
1468 childkeys);
1469 }
1470 }
1471
1472 /* Unparameterized paths don't contribute to param-set list */
1473 if (childouter)
1474 {
1475 ListCell *lco;
1476 bool found = false;
1477
1478 /* Have we already seen this param set? */
1479 foreach(lco, all_child_outers)
1480 {
1481 Relids existing_outers = (Relids) lfirst(lco);
1482
1483 if (bms_equal(existing_outers, childouter))
1484 {
1485 found = true;
1486 break;
1487 }
1488 }
1489 if (!found)
1490 {
1491 /* No, so add it to all_child_outers */
1492 all_child_outers = lappend(all_child_outers,
1493 childouter);
1494 }
1495 }
1496 }
1497 }
1498
1499 /*
1500 * If we found unparameterized paths for all children, build an unordered,
1501 * unparameterized Append path for the rel. (Note: this is correct even
1502 * if we have zero or one live subpath due to constraint exclusion.)
1503 */
1504 if (subpaths_valid)
1505 add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
1506 NIL, NULL, 0, false,
1507 -1));
1508
1509 /* build an AppendPath for the cheap startup paths, if valid */
1510 if (startup_subpaths_valid)
1511 add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths,
1512 NIL, NIL, NULL, 0, false, -1));
1513
1514 /*
1515 * Consider an append of unordered, unparameterized partial paths. Make
1516 * it parallel-aware if possible.
1517 */
1518 if (partial_subpaths_valid && partial_subpaths != NIL)
1519 {
1520 AppendPath *appendpath;
1521 ListCell *lc;
1522 int parallel_workers = 0;
1523
1524 /* Find the highest number of workers requested for any subpath. */
1525 foreach(lc, partial_subpaths)
1526 {
1527 Path *path = lfirst(lc);
1528
1529 parallel_workers = Max(parallel_workers, path->parallel_workers);
1530 }
1531 Assert(parallel_workers > 0);
1532
1533 /*
1534 * If the use of parallel append is permitted, always request at least
1535 * log2(# of children) workers. We assume it can be useful to have
1536 * extra workers in this case because they will be spread out across
1537 * the children. The precise formula is just a guess, but we don't
1538 * want to end up with a radically different answer for a table with N
1539 * partitions vs. an unpartitioned table with the same data, so the
1540 * use of some kind of log-scaling here seems to make some sense.
1541 */
1543 {
1544 parallel_workers = Max(parallel_workers,
1545 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1546 parallel_workers = Min(parallel_workers,
1548 }
1549 Assert(parallel_workers > 0);
1550
1551 /* Generate a partial append path. */
1552 appendpath = create_append_path(root, rel, NIL, partial_subpaths,
1553 NIL, NULL, parallel_workers,
1555 -1);
1556
1557 /*
1558 * Make sure any subsequent partial paths use the same row count
1559 * estimate.
1560 */
1561 partial_rows = appendpath->path.rows;
1562
1563 /* Add the path. */
1564 add_partial_path(rel, (Path *) appendpath);
1565 }
1566
1567 /*
1568 * Consider a parallel-aware append using a mix of partial and non-partial
1569 * paths. (This only makes sense if there's at least one child which has
1570 * a non-partial path that is substantially cheaper than any partial path;
1571 * otherwise, we should use the append path added in the previous step.)
1572 */
1573 if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
1574 {
1575 AppendPath *appendpath;
1576 ListCell *lc;
1577 int parallel_workers = 0;
1578
1579 /*
1580 * Find the highest number of workers requested for any partial
1581 * subpath.
1582 */
1583 foreach(lc, pa_partial_subpaths)
1584 {
1585 Path *path = lfirst(lc);
1586
1587 parallel_workers = Max(parallel_workers, path->parallel_workers);
1588 }
1589
1590 /*
1591 * Same formula here as above. It's even more important in this
1592 * instance because the non-partial paths won't contribute anything to
1593 * the planned number of parallel workers.
1594 */
1595 parallel_workers = Max(parallel_workers,
1596 pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
1597 parallel_workers = Min(parallel_workers,
1599 Assert(parallel_workers > 0);
1600
1601 appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
1602 pa_partial_subpaths,
1603 NIL, NULL, parallel_workers, true,
1604 partial_rows);
1605 add_partial_path(rel, (Path *) appendpath);
1606 }
1607
1608 /*
1609 * Also build unparameterized ordered append paths based on the collected
1610 * list of child pathkeys.
1611 */
1612 if (subpaths_valid)
1613 generate_orderedappend_paths(root, rel, live_childrels,
1614 all_child_pathkeys);
1615
1616 /*
1617 * Build Append paths for each parameterization seen among the child rels.
1618 * (This may look pretty expensive, but in most cases of practical
1619 * interest, the child rels will expose mostly the same parameterizations,
1620 * so that not that many cases actually get considered here.)
1621 *
1622 * The Append node itself cannot enforce quals, so all qual checking must
1623 * be done in the child paths. This means that to have a parameterized
1624 * Append path, we must have the exact same parameterization for each
1625 * child path; otherwise some children might be failing to check the
1626 * moved-down quals. To make them match up, we can try to increase the
1627 * parameterization of lesser-parameterized paths.
1628 */
1629 foreach(l, all_child_outers)
1630 {
1631 Relids required_outer = (Relids) lfirst(l);
1632 ListCell *lcr;
1633
1634 /* Select the child paths for an Append with this parameterization */
1635 subpaths = NIL;
1636 subpaths_valid = true;
1637 foreach(lcr, live_childrels)
1638 {
1639 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
1640 Path *subpath;
1641
1642 if (childrel->pathlist == NIL)
1643 {
1644 /* failed to make a suitable path for this child */
1645 subpaths_valid = false;
1646 break;
1647 }
1648
1650 childrel,
1651 required_outer);
1652 if (subpath == NULL)
1653 {
1654 /* failed to make a suitable path for this child */
1655 subpaths_valid = false;
1656 break;
1657 }
1658 accumulate_append_subpath(subpath, &subpaths, NULL);
1659 }
1660
1661 if (subpaths_valid)
1662 add_path(rel, (Path *)
1663 create_append_path(root, rel, subpaths, NIL,
1664 NIL, required_outer, 0, false,
1665 -1));
1666 }
1667
1668 /*
1669 * When there is only a single child relation, the Append path can inherit
1670 * any ordering available for the child rel's path, so that it's useful to
1671 * consider ordered partial paths. Above we only considered the cheapest
1672 * partial path for each child, but let's also make paths using any
1673 * partial paths that have pathkeys.
1674 */
1675 if (list_length(live_childrels) == 1)
1676 {
1677 RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
1678
1679 /* skip the cheapest partial path, since we already used that above */
1680 for_each_from(l, childrel->partial_pathlist, 1)
1681 {
1682 Path *path = (Path *) lfirst(l);
1683 AppendPath *appendpath;
1684
1685 /* skip paths with no pathkeys. */
1686 if (path->pathkeys == NIL)
1687 continue;
1688
1689 appendpath = create_append_path(root, rel, NIL, list_make1(path),
1690 NIL, NULL,
1691 path->parallel_workers, true,
1692 partial_rows);
1693 add_partial_path(rel, (Path *) appendpath);
1694 }
1695 }
1696}
1697
1698/*
1699 * generate_orderedappend_paths
1700 * Generate ordered append paths for an append relation
1701 *
1702 * Usually we generate MergeAppend paths here, but there are some special
1703 * cases where we can generate simple Append paths, because the subpaths
1704 * can provide tuples in the required order already.
1705 *
1706 * We generate a path for each ordering (pathkey list) appearing in
1707 * all_child_pathkeys.
1708 *
1709 * We consider both cheapest-startup and cheapest-total cases, ie, for each
1710 * interesting ordering, collect all the cheapest startup subpaths and all the
1711 * cheapest total paths, and build a suitable path for each case.
1712 *
1713 * We don't currently generate any parameterized ordered paths here. While
1714 * it would not take much more code here to do so, it's very unclear that it
1715 * is worth the planning cycles to investigate such paths: there's little
1716 * use for an ordered path on the inside of a nestloop. In fact, it's likely
1717 * that the current coding of add_path would reject such paths out of hand,
1718 * because add_path gives no credit for sort ordering of parameterized paths,
1719 * and a parameterized MergeAppend is going to be more expensive than the
1720 * corresponding parameterized Append path. If we ever try harder to support
1721 * parameterized mergejoin plans, it might be worth adding support for
1722 * parameterized paths here to feed such joins. (See notes in
1723 * optimizer/README for why that might not ever happen, though.)
1724 */
1725static void
1727 List *live_childrels,
1728 List *all_child_pathkeys)
1729{
1730 ListCell *lcp;
1731 List *partition_pathkeys = NIL;
1732 List *partition_pathkeys_desc = NIL;
1733 bool partition_pathkeys_partial = true;
1734 bool partition_pathkeys_desc_partial = true;
1735
1736 /*
1737 * Some partitioned table setups may allow us to use an Append node
1738 * instead of a MergeAppend. This is possible in cases such as RANGE
1739 * partitioned tables where it's guaranteed that an earlier partition must
1740 * contain rows which come earlier in the sort order. To detect whether
1741 * this is relevant, build pathkey descriptions of the partition ordering,
1742 * for both forward and reverse scans.
1743 */
1744 if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
1745 partitions_are_ordered(rel->boundinfo, rel->live_parts))
1746 {
1747 partition_pathkeys = build_partition_pathkeys(root, rel,
1749 &partition_pathkeys_partial);
1750
1751 partition_pathkeys_desc = build_partition_pathkeys(root, rel,
1753 &partition_pathkeys_desc_partial);
1754
1755 /*
1756 * You might think we should truncate_useless_pathkeys here, but
1757 * allowing partition keys which are a subset of the query's pathkeys
1758 * can often be useful. For example, consider a table partitioned by
1759 * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
1760 * paths that can produce the a, b, c ordering (perhaps via indexes on
1761 * (a, b, c)) then it works to consider the appendrel output as
1762 * ordered by a, b, c.
1763 */
1764 }
1765
1766 /* Now consider each interesting sort ordering */
1767 foreach(lcp, all_child_pathkeys)
1768 {
1769 List *pathkeys = (List *) lfirst(lcp);
1770 List *startup_subpaths = NIL;
1771 List *total_subpaths = NIL;
1772 List *fractional_subpaths = NIL;
1773 bool startup_neq_total = false;
1774 bool match_partition_order;
1775 bool match_partition_order_desc;
1776 int end_index;
1777 int first_index;
1778 int direction;
1779
1780 /*
1781 * Determine if this sort ordering matches any partition pathkeys we
1782 * have, for both ascending and descending partition order. If the
1783 * partition pathkeys happen to be contained in pathkeys then it still
1784 * works, as described above, providing that the partition pathkeys
1785 * are complete and not just a prefix of the partition keys. (In such
1786 * cases we'll be relying on the child paths to have sorted the
1787 * lower-order columns of the required pathkeys.)
1788 */
1789 match_partition_order =
1790 pathkeys_contained_in(pathkeys, partition_pathkeys) ||
1791 (!partition_pathkeys_partial &&
1792 pathkeys_contained_in(partition_pathkeys, pathkeys));
1793
1794 match_partition_order_desc = !match_partition_order &&
1795 (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
1796 (!partition_pathkeys_desc_partial &&
1797 pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
1798
1799 /*
1800 * When the required pathkeys match the reverse of the partition
1801 * order, we must build the list of paths in reverse starting with the
1802 * last matching partition first. We can get away without making any
1803 * special cases for this in the loop below by just looping backward
1804 * over the child relations in this case.
1805 */
1806 if (match_partition_order_desc)
1807 {
1808 /* loop backward */
1809 first_index = list_length(live_childrels) - 1;
1810 end_index = -1;
1811 direction = -1;
1812
1813 /*
1814 * Set this to true to save us having to check for
1815 * match_partition_order_desc in the loop below.
1816 */
1817 match_partition_order = true;
1818 }
1819 else
1820 {
1821 /* for all other case, loop forward */
1822 first_index = 0;
1823 end_index = list_length(live_childrels);
1824 direction = 1;
1825 }
1826
1827 /* Select the child paths for this ordering... */
1828 for (int i = first_index; i != end_index; i += direction)
1829 {
1830 RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
1831 Path *cheapest_startup,
1832 *cheapest_total,
1833 *cheapest_fractional = NULL;
1834
1835 /* Locate the right paths, if they are available. */
1836 cheapest_startup =
1838 pathkeys,
1839 NULL,
1841 false);
1842 cheapest_total =
1844 pathkeys,
1845 NULL,
1846 TOTAL_COST,
1847 false);
1848
1849 /*
1850 * If we can't find any paths with the right order just use the
1851 * cheapest-total path; we'll have to sort it later.
1852 */
1853 if (cheapest_startup == NULL || cheapest_total == NULL)
1854 {
1855 cheapest_startup = cheapest_total =
1856 childrel->cheapest_total_path;
1857 /* Assert we do have an unparameterized path for this child */
1858 Assert(cheapest_total->param_info == NULL);
1859 }
1860
1861 /*
1862 * When building a fractional path, determine a cheapest
1863 * fractional path for each child relation too. Looking at startup
1864 * and total costs is not enough, because the cheapest fractional
1865 * path may be dominated by two separate paths (one for startup,
1866 * one for total).
1867 *
1868 * When needed (building fractional path), determine the cheapest
1869 * fractional path too.
1870 */
1871 if (root->tuple_fraction > 0)
1872 {
1873 double path_fraction = (1.0 / root->tuple_fraction);
1874
1875 cheapest_fractional =
1877 pathkeys,
1878 NULL,
1879 path_fraction);
1880
1881 /*
1882 * If we found no path with matching pathkeys, use the
1883 * cheapest total path instead.
1884 *
1885 * XXX We might consider partially sorted paths too (with an
1886 * incremental sort on top). But we'd have to build all the
1887 * incremental paths, do the costing etc.
1888 */
1889 if (!cheapest_fractional)
1890 cheapest_fractional = cheapest_total;
1891 }
1892
1893 /*
1894 * Notice whether we actually have different paths for the
1895 * "cheapest" and "total" cases; frequently there will be no point
1896 * in two create_merge_append_path() calls.
1897 */
1898 if (cheapest_startup != cheapest_total)
1899 startup_neq_total = true;
1900
1901 /*
1902 * Collect the appropriate child paths. The required logic varies
1903 * for the Append and MergeAppend cases.
1904 */
1905 if (match_partition_order)
1906 {
1907 /*
1908 * We're going to make a plain Append path. We don't need
1909 * most of what accumulate_append_subpath would do, but we do
1910 * want to cut out child Appends or MergeAppends if they have
1911 * just a single subpath (and hence aren't doing anything
1912 * useful).
1913 */
1914 cheapest_startup = get_singleton_append_subpath(cheapest_startup);
1915 cheapest_total = get_singleton_append_subpath(cheapest_total);
1916
1917 startup_subpaths = lappend(startup_subpaths, cheapest_startup);
1918 total_subpaths = lappend(total_subpaths, cheapest_total);
1919
1920 if (cheapest_fractional)
1921 {
1922 cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
1923 fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
1924 }
1925 }
1926 else
1927 {
1928 /*
1929 * Otherwise, rely on accumulate_append_subpath to collect the
1930 * child paths for the MergeAppend.
1931 */
1932 accumulate_append_subpath(cheapest_startup,
1933 &startup_subpaths, NULL);
1934 accumulate_append_subpath(cheapest_total,
1935 &total_subpaths, NULL);
1936
1937 if (cheapest_fractional)
1938 accumulate_append_subpath(cheapest_fractional,
1939 &fractional_subpaths, NULL);
1940 }
1941 }
1942
1943 /* ... and build the Append or MergeAppend paths */
1944 if (match_partition_order)
1945 {
1946 /* We only need Append */
1948 rel,
1949 startup_subpaths,
1950 NIL,
1951 pathkeys,
1952 NULL,
1953 0,
1954 false,
1955 -1));
1956 if (startup_neq_total)
1958 rel,
1959 total_subpaths,
1960 NIL,
1961 pathkeys,
1962 NULL,
1963 0,
1964 false,
1965 -1));
1966
1967 if (fractional_subpaths)
1969 rel,
1970 fractional_subpaths,
1971 NIL,
1972 pathkeys,
1973 NULL,
1974 0,
1975 false,
1976 -1));
1977 }
1978 else
1979 {
1980 /* We need MergeAppend */
1982 rel,
1983 startup_subpaths,
1984 pathkeys,
1985 NULL));
1986 if (startup_neq_total)
1988 rel,
1989 total_subpaths,
1990 pathkeys,
1991 NULL));
1992
1993 if (fractional_subpaths)
1995 rel,
1996 fractional_subpaths,
1997 pathkeys,
1998 NULL));
1999 }
2000 }
2001}
2002
2003/*
2004 * get_cheapest_parameterized_child_path
2005 * Get cheapest path for this relation that has exactly the requested
2006 * parameterization.
2007 *
2008 * Returns NULL if unable to create such a path.
2009 */
2010static Path *
2012 Relids required_outer)
2013{
2014 Path *cheapest;
2015 ListCell *lc;
2016
2017 /*
2018 * Look up the cheapest existing path with no more than the needed
2019 * parameterization. If it has exactly the needed parameterization, we're
2020 * done.
2021 */
2023 NIL,
2024 required_outer,
2025 TOTAL_COST,
2026 false);
2027 Assert(cheapest != NULL);
2028 if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
2029 return cheapest;
2030
2031 /*
2032 * Otherwise, we can "reparameterize" an existing path to match the given
2033 * parameterization, which effectively means pushing down additional
2034 * joinquals to be checked within the path's scan. However, some existing
2035 * paths might check the available joinquals already while others don't;
2036 * therefore, it's not clear which existing path will be cheapest after
2037 * reparameterization. We have to go through them all and find out.
2038 */
2039 cheapest = NULL;
2040 foreach(lc, rel->pathlist)
2041 {
2042 Path *path = (Path *) lfirst(lc);
2043
2044 /* Can't use it if it needs more than requested parameterization */
2045 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
2046 continue;
2047
2048 /*
2049 * Reparameterization can only increase the path's cost, so if it's
2050 * already more expensive than the current cheapest, forget it.
2051 */
2052 if (cheapest != NULL &&
2053 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2054 continue;
2055
2056 /* Reparameterize if needed, then recheck cost */
2057 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
2058 {
2059 path = reparameterize_path(root, path, required_outer, 1.0);
2060 if (path == NULL)
2061 continue; /* failed to reparameterize this one */
2062 Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
2063
2064 if (cheapest != NULL &&
2065 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2066 continue;
2067 }
2068
2069 /* We have a new best path */
2070 cheapest = path;
2071 }
2072
2073 /* Return the best path, or NULL if we found no suitable candidate */
2074 return cheapest;
2075}
2076
2077/*
2078 * accumulate_append_subpath
2079 * Add a subpath to the list being built for an Append or MergeAppend.
2080 *
2081 * It's possible that the child is itself an Append or MergeAppend path, in
2082 * which case we can "cut out the middleman" and just add its child paths to
2083 * our own list. (We don't try to do this earlier because we need to apply
2084 * both levels of transformation to the quals.)
2085 *
2086 * Note that if we omit a child MergeAppend in this way, we are effectively
2087 * omitting a sort step, which seems fine: if the parent is to be an Append,
2088 * its result would be unsorted anyway, while if the parent is to be a
2089 * MergeAppend, there's no point in a separate sort on a child.
2090 *
2091 * Normally, either path is a partial path and subpaths is a list of partial
2092 * paths, or else path is a non-partial plan and subpaths is a list of those.
2093 * However, if path is a parallel-aware Append, then we add its partial path
2094 * children to subpaths and the rest to special_subpaths. If the latter is
2095 * NULL, we don't flatten the path at all (unless it contains only partial
2096 * paths).
2097 */
2098static void
2099accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
2100{
2101 if (IsA(path, AppendPath))
2102 {
2103 AppendPath *apath = (AppendPath *) path;
2104
2105 if (!apath->path.parallel_aware || apath->first_partial_path == 0)
2106 {
2107 *subpaths = list_concat(*subpaths, apath->subpaths);
2108 return;
2109 }
2110 else if (special_subpaths != NULL)
2111 {
2112 List *new_special_subpaths;
2113
2114 /* Split Parallel Append into partial and non-partial subpaths */
2115 *subpaths = list_concat(*subpaths,
2116 list_copy_tail(apath->subpaths,
2117 apath->first_partial_path));
2118 new_special_subpaths = list_copy_head(apath->subpaths,
2119 apath->first_partial_path);
2120 *special_subpaths = list_concat(*special_subpaths,
2121 new_special_subpaths);
2122 return;
2123 }
2124 }
2125 else if (IsA(path, MergeAppendPath))
2126 {
2127 MergeAppendPath *mpath = (MergeAppendPath *) path;
2128
2129 *subpaths = list_concat(*subpaths, mpath->subpaths);
2130 return;
2131 }
2132
2133 *subpaths = lappend(*subpaths, path);
2134}
2135
2136/*
2137 * get_singleton_append_subpath
2138 * Returns the single subpath of an Append/MergeAppend, or just
2139 * return 'path' if it's not a single sub-path Append/MergeAppend.
2140 *
2141 * Note: 'path' must not be a parallel-aware path.
2142 */
2143static Path *
2145{
2146 Assert(!path->parallel_aware);
2147
2148 if (IsA(path, AppendPath))
2149 {
2150 AppendPath *apath = (AppendPath *) path;
2151
2152 if (list_length(apath->subpaths) == 1)
2153 return (Path *) linitial(apath->subpaths);
2154 }
2155 else if (IsA(path, MergeAppendPath))
2156 {
2157 MergeAppendPath *mpath = (MergeAppendPath *) path;
2158
2159 if (list_length(mpath->subpaths) == 1)
2160 return (Path *) linitial(mpath->subpaths);
2161 }
2162
2163 return path;
2164}
2165
2166/*
2167 * set_dummy_rel_pathlist
2168 * Build a dummy path for a relation that's been excluded by constraints
2169 *
2170 * Rather than inventing a special "dummy" path type, we represent this as an
2171 * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
2172 *
2173 * (See also mark_dummy_rel, which does basically the same thing, but is
2174 * typically used to change a rel into dummy state after we already made
2175 * paths for it.)
2176 */
2177static void
2179{
2180 /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
2181 rel->rows = 0;
2182 rel->reltarget->width = 0;
2183
2184 /* Discard any pre-existing paths; no further need for them */
2185 rel->pathlist = NIL;
2186 rel->partial_pathlist = NIL;
2187
2188 /* Set up the dummy path */
2189 add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
2190 NIL, rel->lateral_relids,
2191 0, false, -1));
2192
2193 /*
2194 * We set the cheapest-path fields immediately, just in case they were
2195 * pointing at some discarded path. This is redundant in current usage
2196 * because set_rel_pathlist will do it later, but it's cheap so we keep it
2197 * for safety and consistency with mark_dummy_rel.
2198 */
2199 set_cheapest(rel);
2200}
2201
2202/*
2203 * find_window_run_conditions
2204 * Determine if 'wfunc' is really a WindowFunc and call its prosupport
2205 * function to determine the function's monotonic properties. We then
2206 * see if 'opexpr' can be used to short-circuit execution.
2207 *
2208 * For example row_number() over (order by ...) always produces a value one
2209 * higher than the previous. If someone has a window function in a subquery
2210 * and has a WHERE clause in the outer query to filter rows <= 10, then we may
2211 * as well stop processing the windowagg once the row number reaches 11. Here
2212 * we check if 'opexpr' might help us to stop doing needless extra processing
2213 * in WindowAgg nodes.
2214 *
2215 * '*keep_original' is set to true if the caller should also use 'opexpr' for
2216 * its original purpose. This is set to false if the caller can assume that
2217 * the run condition will handle all of the required filtering.
2218 *
2219 * Returns true if 'opexpr' was found to be useful and was added to the
2220 * WindowFunc's runCondition. We also set *keep_original accordingly and add
2221 * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
2222 * If the 'opexpr' cannot be used then we set *keep_original to true and
2223 * return false.
2224 */
2225static bool
2227 AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr,
2228 bool wfunc_left, bool *keep_original,
2229 Bitmapset **run_cond_attrs)
2230{
2231 Oid prosupport;
2232 Expr *otherexpr;
2235 WindowClause *wclause;
2236 List *opinfos;
2237 OpExpr *runopexpr;
2238 Oid runoperator;
2239 ListCell *lc;
2240
2241 *keep_original = true;
2242
2243 while (IsA(wfunc, RelabelType))
2244 wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
2245
2246 /* we can only work with window functions */
2247 if (!IsA(wfunc, WindowFunc))
2248 return false;
2249
2250 /* can't use it if there are subplans in the WindowFunc */
2251 if (contain_subplans((Node *) wfunc))
2252 return false;
2253
2254 prosupport = get_func_support(wfunc->winfnoid);
2255
2256 /* Check if there's a support function for 'wfunc' */
2257 if (!OidIsValid(prosupport))
2258 return false;
2259
2260 /* get the Expr from the other side of the OpExpr */
2261 if (wfunc_left)
2262 otherexpr = lsecond(opexpr->args);
2263 else
2264 otherexpr = linitial(opexpr->args);
2265
2266 /*
2267 * The value being compared must not change during the evaluation of the
2268 * window partition.
2269 */
2270 if (!is_pseudo_constant_clause((Node *) otherexpr))
2271 return false;
2272
2273 /* find the window clause belonging to the window function */
2274 wclause = (WindowClause *) list_nth(subquery->windowClause,
2275 wfunc->winref - 1);
2276
2277 req.type = T_SupportRequestWFuncMonotonic;
2278 req.window_func = wfunc;
2279 req.window_clause = wclause;
2280
2281 /* call the support function */
2284 PointerGetDatum(&req)));
2285
2286 /*
2287 * Nothing to do if the function is neither monotonically increasing nor
2288 * monotonically decreasing.
2289 */
2290 if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
2291 return false;
2292
2293 runopexpr = NULL;
2294 runoperator = InvalidOid;
2295 opinfos = get_op_btree_interpretation(opexpr->opno);
2296
2297 foreach(lc, opinfos)
2298 {
2300 int strategy = opinfo->strategy;
2301
2302 /* handle < / <= */
2303 if (strategy == BTLessStrategyNumber ||
2304 strategy == BTLessEqualStrategyNumber)
2305 {
2306 /*
2307 * < / <= is supported for monotonically increasing functions in
2308 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2309 * for monotonically decreasing functions.
2310 */
2311 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
2312 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
2313 {
2314 *keep_original = false;
2315 runopexpr = opexpr;
2316 runoperator = opexpr->opno;
2317 }
2318 break;
2319 }
2320 /* handle > / >= */
2321 else if (strategy == BTGreaterStrategyNumber ||
2322 strategy == BTGreaterEqualStrategyNumber)
2323 {
2324 /*
2325 * > / >= is supported for monotonically decreasing functions in
2326 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2327 * for monotonically increasing functions.
2328 */
2329 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
2330 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
2331 {
2332 *keep_original = false;
2333 runopexpr = opexpr;
2334 runoperator = opexpr->opno;
2335 }
2336 break;
2337 }
2338 /* handle = */
2339 else if (strategy == BTEqualStrategyNumber)
2340 {
2341 int16 newstrategy;
2342
2343 /*
2344 * When both monotonically increasing and decreasing then the
2345 * return value of the window function will be the same each time.
2346 * We can simply use 'opexpr' as the run condition without
2347 * modifying it.
2348 */
2349 if ((res->monotonic & MONOTONICFUNC_BOTH) == MONOTONICFUNC_BOTH)
2350 {
2351 *keep_original = false;
2352 runopexpr = opexpr;
2353 runoperator = opexpr->opno;
2354 break;
2355 }
2356
2357 /*
2358 * When monotonically increasing we make a qual with <wfunc> <=
2359 * <value> or <value> >= <wfunc> in order to filter out values
2360 * which are above the value in the equality condition. For
2361 * monotonically decreasing functions we want to filter values
2362 * below the value in the equality condition.
2363 */
2364 if (res->monotonic & MONOTONICFUNC_INCREASING)
2365 newstrategy = wfunc_left ? BTLessEqualStrategyNumber : BTGreaterEqualStrategyNumber;
2366 else
2367 newstrategy = wfunc_left ? BTGreaterEqualStrategyNumber : BTLessEqualStrategyNumber;
2368
2369 /* We must keep the original equality qual */
2370 *keep_original = true;
2371 runopexpr = opexpr;
2372
2373 /* determine the operator to use for the WindowFuncRunCondition */
2374 runoperator = get_opfamily_member(opinfo->opfamily_id,
2375 opinfo->oplefttype,
2376 opinfo->oprighttype,
2377 newstrategy);
2378 break;
2379 }
2380 }
2381
2382 if (runopexpr != NULL)
2383 {
2384 WindowFuncRunCondition *wfuncrc;
2385
2387 wfuncrc->opno = runoperator;
2388 wfuncrc->inputcollid = runopexpr->inputcollid;
2389 wfuncrc->wfunc_left = wfunc_left;
2390 wfuncrc->arg = copyObject(otherexpr);
2391
2392 wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
2393
2394 /* record that this attno was used in a run condition */
2395 *run_cond_attrs = bms_add_member(*run_cond_attrs,
2397 return true;
2398 }
2399
2400 /* unsupported OpExpr */
2401 return false;
2402}
2403
2404/*
2405 * check_and_push_window_quals
2406 * Check if 'clause' is a qual that can be pushed into a WindowFunc
2407 * as a 'runCondition' qual. These, when present, allow some unnecessary
2408 * work to be skipped during execution.
2409 *
2410 * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
2411 * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
2412 * window quals for.
2413 *
2414 * Returns true if the caller still must keep the original qual or false if
2415 * the caller can safely ignore the original qual because the WindowAgg node
2416 * will use the runCondition to stop returning tuples.
2417 */
2418static bool
2420 Node *clause, Bitmapset **run_cond_attrs)
2421{
2422 OpExpr *opexpr = (OpExpr *) clause;
2423 bool keep_original = true;
2424 Var *var1;
2425 Var *var2;
2426
2427 /* We're only able to use OpExprs with 2 operands */
2428 if (!IsA(opexpr, OpExpr))
2429 return true;
2430
2431 if (list_length(opexpr->args) != 2)
2432 return true;
2433
2434 /*
2435 * Currently, we restrict this optimization to strict OpExprs. The reason
2436 * for this is that during execution, once the runcondition becomes false,
2437 * we stop evaluating WindowFuncs. To avoid leaving around stale window
2438 * function result values, we set them to NULL. Having only strict
2439 * OpExprs here ensures that we properly filter out the tuples with NULLs
2440 * in the top-level WindowAgg.
2441 */
2442 set_opfuncid(opexpr);
2443 if (!func_strict(opexpr->opfuncid))
2444 return true;
2445
2446 /*
2447 * Check for plain Vars that reference window functions in the subquery.
2448 * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
2449 * be used as part of the run condition.
2450 */
2451
2452 /* Check the left side of the OpExpr */
2453 var1 = linitial(opexpr->args);
2454 if (IsA(var1, Var) && var1->varattno > 0)
2455 {
2456 TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
2457 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2458
2459 if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
2460 opexpr, true, &keep_original,
2461 run_cond_attrs))
2462 return keep_original;
2463 }
2464
2465 /* and check the right side */
2466 var2 = lsecond(opexpr->args);
2467 if (IsA(var2, Var) && var2->varattno > 0)
2468 {
2469 TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
2470 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2471
2472 if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
2473 opexpr, false, &keep_original,
2474 run_cond_attrs))
2475 return keep_original;
2476 }
2477
2478 return true;
2479}
2480
2481/*
2482 * set_subquery_pathlist
2483 * Generate SubqueryScan access paths for a subquery RTE
2484 *
2485 * We don't currently support generating parameterized paths for subqueries
2486 * by pushing join clauses down into them; it seems too expensive to re-plan
2487 * the subquery multiple times to consider different alternatives.
2488 * (XXX that could stand to be reconsidered, now that we use Paths.)
2489 * So the paths made here will be parameterized if the subquery contains
2490 * LATERAL references, otherwise not. As long as that's true, there's no need
2491 * for a separate set_subquery_size phase: just make the paths right away.
2492 */
2493static void
2495 Index rti, RangeTblEntry *rte)
2496{
2497 Query *parse = root->parse;
2498 Query *subquery = rte->subquery;
2499 bool trivial_pathtarget;
2500 Relids required_outer;
2501 pushdown_safety_info safetyInfo;
2502 double tuple_fraction;
2503 RelOptInfo *sub_final_rel;
2504 Bitmapset *run_cond_attrs = NULL;
2505 ListCell *lc;
2506
2507 /*
2508 * Must copy the Query so that planning doesn't mess up the RTE contents
2509 * (really really need to fix the planner to not scribble on its input,
2510 * someday ... but see remove_unused_subquery_outputs to start with).
2511 */
2512 subquery = copyObject(subquery);
2513
2514 /*
2515 * If it's a LATERAL subquery, it might contain some Vars of the current
2516 * query level, requiring it to be treated as parameterized, even though
2517 * we don't support pushing down join quals into subqueries.
2518 */
2519 required_outer = rel->lateral_relids;
2520
2521 /*
2522 * Zero out result area for subquery_is_pushdown_safe, so that it can set
2523 * flags as needed while recursing. In particular, we need a workspace
2524 * for keeping track of the reasons why columns are unsafe to reference.
2525 * These reasons are stored in the bits inside unsafeFlags[i] when we
2526 * discover reasons that column i of the subquery is unsafe to be used in
2527 * a pushed-down qual.
2528 */
2529 memset(&safetyInfo, 0, sizeof(safetyInfo));
2530 safetyInfo.unsafeFlags = (unsigned char *)
2531 palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
2532
2533 /*
2534 * If the subquery has the "security_barrier" flag, it means the subquery
2535 * originated from a view that must enforce row-level security. Then we
2536 * must not push down quals that contain leaky functions. (Ideally this
2537 * would be checked inside subquery_is_pushdown_safe, but since we don't
2538 * currently pass the RTE to that function, we must do it here.)
2539 */
2540 safetyInfo.unsafeLeaky = rte->security_barrier;
2541
2542 /*
2543 * If there are any restriction clauses that have been attached to the
2544 * subquery relation, consider pushing them down to become WHERE or HAVING
2545 * quals of the subquery itself. This transformation is useful because it
2546 * may allow us to generate a better plan for the subquery than evaluating
2547 * all the subquery output rows and then filtering them.
2548 *
2549 * There are several cases where we cannot push down clauses. Restrictions
2550 * involving the subquery are checked by subquery_is_pushdown_safe().
2551 * Restrictions on individual clauses are checked by
2552 * qual_is_pushdown_safe(). Also, we don't want to push down
2553 * pseudoconstant clauses; better to have the gating node above the
2554 * subquery.
2555 *
2556 * Non-pushed-down clauses will get evaluated as qpquals of the
2557 * SubqueryScan node.
2558 *
2559 * XXX Are there any cases where we want to make a policy decision not to
2560 * push down a pushable qual, because it'd result in a worse plan?
2561 */
2562 if (rel->baserestrictinfo != NIL &&
2563 subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
2564 {
2565 /* OK to consider pushing down individual quals */
2566 List *upperrestrictlist = NIL;
2567 ListCell *l;
2568
2569 foreach(l, rel->baserestrictinfo)
2570 {
2571 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2572 Node *clause = (Node *) rinfo->clause;
2573
2574 if (rinfo->pseudoconstant)
2575 {
2576 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2577 continue;
2578 }
2579
2580 switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
2581 {
2582 case PUSHDOWN_SAFE:
2583 /* Push it down */
2584 subquery_push_qual(subquery, rte, rti, clause);
2585 break;
2586
2588
2589 /*
2590 * Since we can't push the qual down into the subquery,
2591 * check if it happens to reference a window function. If
2592 * so then it might be useful to use for the WindowAgg's
2593 * runCondition.
2594 */
2595 if (!subquery->hasWindowFuncs ||
2596 check_and_push_window_quals(subquery, rte, rti, clause,
2597 &run_cond_attrs))
2598 {
2599 /*
2600 * subquery has no window funcs or the clause is not a
2601 * suitable window run condition qual or it is, but
2602 * the original must also be kept in the upper query.
2603 */
2604 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2605 }
2606 break;
2607
2608 case PUSHDOWN_UNSAFE:
2609 upperrestrictlist = lappend(upperrestrictlist, rinfo);
2610 break;
2611 }
2612 }
2613 rel->baserestrictinfo = upperrestrictlist;
2614 /* We don't bother recomputing baserestrict_min_security */
2615 }
2616
2617 pfree(safetyInfo.unsafeFlags);
2618
2619 /*
2620 * The upper query might not use all the subquery's output columns; if
2621 * not, we can simplify. Pass the attributes that were pushed down into
2622 * WindowAgg run conditions to ensure we don't accidentally think those
2623 * are unused.
2624 */
2625 remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
2626
2627 /*
2628 * We can safely pass the outer tuple_fraction down to the subquery if the
2629 * outer level has no joining, aggregation, or sorting to do. Otherwise
2630 * we'd better tell the subquery to plan for full retrieval. (XXX This
2631 * could probably be made more intelligent ...)
2632 */
2633 if (parse->hasAggs ||
2634 parse->groupClause ||
2635 parse->groupingSets ||
2636 root->hasHavingQual ||
2637 parse->distinctClause ||
2638 parse->sortClause ||
2639 bms_membership(root->all_baserels) == BMS_MULTIPLE)
2640 tuple_fraction = 0.0; /* default case */
2641 else
2642 tuple_fraction = root->tuple_fraction;
2643
2644 /* plan_params should not be in use in current query level */
2645 Assert(root->plan_params == NIL);
2646
2647 /* Generate a subroot and Paths for the subquery */
2648 rel->subroot = subquery_planner(root->glob, subquery, root, false,
2649 tuple_fraction, NULL);
2650
2651 /* Isolate the params needed by this specific subplan */
2652 rel->subplan_params = root->plan_params;
2653 root->plan_params = NIL;
2654
2655 /*
2656 * It's possible that constraint exclusion proved the subquery empty. If
2657 * so, it's desirable to produce an unadorned dummy path so that we will
2658 * recognize appropriate optimizations at this query level.
2659 */
2660 sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
2661
2662 if (IS_DUMMY_REL(sub_final_rel))
2663 {
2665 return;
2666 }
2667
2668 /*
2669 * Mark rel with estimated output rows, width, etc. Note that we have to
2670 * do this before generating outer-query paths, else cost_subqueryscan is
2671 * not happy.
2672 */
2674
2675 /*
2676 * Also detect whether the reltarget is trivial, so that we can pass that
2677 * info to cost_subqueryscan (rather than re-deriving it multiple times).
2678 * It's trivial if it fetches all the subplan output columns in order.
2679 */
2680 if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
2681 trivial_pathtarget = false;
2682 else
2683 {
2684 trivial_pathtarget = true;
2685 foreach(lc, rel->reltarget->exprs)
2686 {
2687 Node *node = (Node *) lfirst(lc);
2688 Var *var;
2689
2690 if (!IsA(node, Var))
2691 {
2692 trivial_pathtarget = false;
2693 break;
2694 }
2695 var = (Var *) node;
2696 if (var->varno != rti ||
2697 var->varattno != foreach_current_index(lc) + 1)
2698 {
2699 trivial_pathtarget = false;
2700 break;
2701 }
2702 }
2703 }
2704
2705 /*
2706 * For each Path that subquery_planner produced, make a SubqueryScanPath
2707 * in the outer query.
2708 */
2709 foreach(lc, sub_final_rel->pathlist)
2710 {
2711 Path *subpath = (Path *) lfirst(lc);
2712 List *pathkeys;
2713
2714 /* Convert subpath's pathkeys to outer representation */
2716 rel,
2717 subpath->pathkeys,
2718 make_tlist_from_pathtarget(subpath->pathtarget));
2719
2720 /* Generate outer path using this subpath */
2721 add_path(rel, (Path *)
2723 trivial_pathtarget,
2724 pathkeys, required_outer));
2725 }
2726
2727 /* If outer rel allows parallelism, do same for partial paths. */
2728 if (rel->consider_parallel && bms_is_empty(required_outer))
2729 {
2730 /* If consider_parallel is false, there should be no partial paths. */
2731 Assert(sub_final_rel->consider_parallel ||
2732 sub_final_rel->partial_pathlist == NIL);
2733
2734 /* Same for partial paths. */
2735 foreach(lc, sub_final_rel->partial_pathlist)
2736 {
2737 Path *subpath = (Path *) lfirst(lc);
2738 List *pathkeys;
2739
2740 /* Convert subpath's pathkeys to outer representation */
2742 rel,
2743 subpath->pathkeys,
2744 make_tlist_from_pathtarget(subpath->pathtarget));
2745
2746 /* Generate outer path using this subpath */
2747 add_partial_path(rel, (Path *)
2749 trivial_pathtarget,
2750 pathkeys,
2751 required_outer));
2752 }
2753 }
2754}
2755
2756/*
2757 * set_function_pathlist
2758 * Build the (single) access path for a function RTE
2759 */
2760static void
2762{
2763 Relids required_outer;
2764 List *pathkeys = NIL;
2765
2766 /*
2767 * We don't support pushing join clauses into the quals of a function
2768 * scan, but it could still have required parameterization due to LATERAL
2769 * refs in the function expression.
2770 */
2771 required_outer = rel->lateral_relids;
2772
2773 /*
2774 * The result is considered unordered unless ORDINALITY was used, in which
2775 * case it is ordered by the ordinal column (the last one). See if we
2776 * care, by checking for uses of that Var in equivalence classes.
2777 */
2778 if (rte->funcordinality)
2779 {
2780 AttrNumber ordattno = rel->max_attr;
2781 Var *var = NULL;
2782 ListCell *lc;
2783
2784 /*
2785 * Is there a Var for it in rel's targetlist? If not, the query did
2786 * not reference the ordinality column, or at least not in any way
2787 * that would be interesting for sorting.
2788 */
2789 foreach(lc, rel->reltarget->exprs)
2790 {
2791 Var *node = (Var *) lfirst(lc);
2792
2793 /* checking varno/varlevelsup is just paranoia */
2794 if (IsA(node, Var) &&
2795 node->varattno == ordattno &&
2796 node->varno == rel->relid &&
2797 node->varlevelsup == 0)
2798 {
2799 var = node;
2800 break;
2801 }
2802 }
2803
2804 /*
2805 * Try to build pathkeys for this Var with int8 sorting. We tell
2806 * build_expression_pathkey not to build any new equivalence class; if
2807 * the Var isn't already mentioned in some EC, it means that nothing
2808 * cares about the ordering.
2809 */
2810 if (var)
2811 pathkeys = build_expression_pathkey(root,
2812 (Expr *) var,
2813 Int8LessOperator,
2814 rel->relids,
2815 false);
2816 }
2817
2818 /* Generate appropriate path */
2820 pathkeys, required_outer));
2821}
2822
2823/*
2824 * set_values_pathlist
2825 * Build the (single) access path for a VALUES RTE
2826 */
2827static void
2829{
2830 Relids required_outer;
2831
2832 /*
2833 * We don't support pushing join clauses into the quals of a values scan,
2834 * but it could still have required parameterization due to LATERAL refs
2835 * in the values expressions.
2836 */
2837 required_outer = rel->lateral_relids;
2838
2839 /* Generate appropriate path */
2840 add_path(rel, create_valuesscan_path(root, rel, required_outer));
2841}
2842
2843/*
2844 * set_tablefunc_pathlist
2845 * Build the (single) access path for a table func RTE
2846 */
2847static void
2849{
2850 Relids required_outer;
2851
2852 /*
2853 * We don't support pushing join clauses into the quals of a tablefunc
2854 * scan, but it could still have required parameterization due to LATERAL
2855 * refs in the function expression.
2856 */
2857 required_outer = rel->lateral_relids;
2858
2859 /* Generate appropriate path */
2861 required_outer));
2862}
2863
2864/*
2865 * set_cte_pathlist
2866 * Build the (single) access path for a non-self-reference CTE RTE
2867 *
2868 * There's no need for a separate set_cte_size phase, since we don't
2869 * support join-qual-parameterized paths for CTEs.
2870 */
2871static void
2873{
2874 Path *ctepath;
2875 Plan *cteplan;
2876 PlannerInfo *cteroot;
2877 Index levelsup;
2878 List *pathkeys;
2879 int ndx;
2880 ListCell *lc;
2881 int plan_id;
2882 Relids required_outer;
2883
2884 /*
2885 * Find the referenced CTE, and locate the path and plan previously made
2886 * for it.
2887 */
2888 levelsup = rte->ctelevelsup;
2889 cteroot = root;
2890 while (levelsup-- > 0)
2891 {
2892 cteroot = cteroot->parent_root;
2893 if (!cteroot) /* shouldn't happen */
2894 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
2895 }
2896
2897 /*
2898 * Note: cte_plan_ids can be shorter than cteList, if we are still working
2899 * on planning the CTEs (ie, this is a side-reference from another CTE).
2900 * So we mustn't use forboth here.
2901 */
2902 ndx = 0;
2903 foreach(lc, cteroot->parse->cteList)
2904 {
2905 CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
2906
2907 if (strcmp(cte->ctename, rte->ctename) == 0)
2908 break;
2909 ndx++;
2910 }
2911 if (lc == NULL) /* shouldn't happen */
2912 elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
2913 if (ndx >= list_length(cteroot->cte_plan_ids))
2914 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
2915 plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
2916 if (plan_id <= 0)
2917 elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
2918
2919 Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
2920 ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
2921 cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
2922
2923 /* Mark rel with estimated output rows, width, etc */
2924 set_cte_size_estimates(root, rel, cteplan->plan_rows);
2925
2926 /* Convert the ctepath's pathkeys to outer query's representation */
2928 rel,
2929 ctepath->pathkeys,
2930 cteplan->targetlist);
2931
2932 /*
2933 * We don't support pushing join clauses into the quals of a CTE scan, but
2934 * it could still have required parameterization due to LATERAL refs in
2935 * its tlist.
2936 */
2937 required_outer = rel->lateral_relids;
2938
2939 /* Generate appropriate path */
2940 add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
2941}
2942
2943/*
2944 * set_namedtuplestore_pathlist
2945 * Build the (single) access path for a named tuplestore RTE
2946 *
2947 * There's no need for a separate set_namedtuplestore_size phase, since we
2948 * don't support join-qual-parameterized paths for tuplestores.
2949 */
2950static void
2952 RangeTblEntry *rte)
2953{
2954 Relids required_outer;
2955
2956 /* Mark rel with estimated output rows, width, etc */
2958
2959 /*
2960 * We don't support pushing join clauses into the quals of a tuplestore
2961 * scan, but it could still have required parameterization due to LATERAL
2962 * refs in its tlist.
2963 */
2964 required_outer = rel->lateral_relids;
2965
2966 /* Generate appropriate path */
2967 add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
2968}
2969
2970/*
2971 * set_result_pathlist
2972 * Build the (single) access path for an RTE_RESULT RTE
2973 *
2974 * There's no need for a separate set_result_size phase, since we
2975 * don't support join-qual-parameterized paths for these RTEs.
2976 */
2977static void
2979 RangeTblEntry *rte)
2980{
2981 Relids required_outer;
2982
2983 /* Mark rel with estimated output rows, width, etc */
2985
2986 /*
2987 * We don't support pushing join clauses into the quals of a Result scan,
2988 * but it could still have required parameterization due to LATERAL refs
2989 * in its tlist.
2990 */
2991 required_outer = rel->lateral_relids;
2992
2993 /* Generate appropriate path */
2994 add_path(rel, create_resultscan_path(root, rel, required_outer));
2995}
2996
2997/*
2998 * set_worktable_pathlist
2999 * Build the (single) access path for a self-reference CTE RTE
3000 *
3001 * There's no need for a separate set_worktable_size phase, since we don't
3002 * support join-qual-parameterized paths for CTEs.
3003 */
3004static void
3006{
3007 Path *ctepath;
3008 PlannerInfo *cteroot;
3009 Index levelsup;
3010 Relids required_outer;
3011
3012 /*
3013 * We need to find the non-recursive term's path, which is in the plan
3014 * level that's processing the recursive UNION, which is one level *below*
3015 * where the CTE comes from.
3016 */
3017 levelsup = rte->ctelevelsup;
3018 if (levelsup == 0) /* shouldn't happen */
3019 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3020 levelsup--;
3021 cteroot = root;
3022 while (levelsup-- > 0)
3023 {
3024 cteroot = cteroot->parent_root;
3025 if (!cteroot) /* shouldn't happen */
3026 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3027 }
3028 ctepath = cteroot->non_recursive_path;
3029 if (!ctepath) /* shouldn't happen */
3030 elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
3031
3032 /* Mark rel with estimated output rows, width, etc */
3033 set_cte_size_estimates(root, rel, ctepath->rows);
3034
3035 /*
3036 * We don't support pushing join clauses into the quals of a worktable
3037 * scan, but it could still have required parameterization due to LATERAL
3038 * refs in its tlist. (I'm not sure this is actually possible given the
3039 * restrictions on recursive references, but it's easy enough to support.)
3040 */
3041 required_outer = rel->lateral_relids;
3042
3043 /* Generate appropriate path */
3044 add_path(rel, create_worktablescan_path(root, rel, required_outer));
3045}
3046
3047/*
3048 * generate_gather_paths
3049 * Generate parallel access paths for a relation by pushing a Gather or
3050 * Gather Merge on top of a partial path.
3051 *
3052 * This must not be called until after we're done creating all partial paths
3053 * for the specified relation. (Otherwise, add_partial_path might delete a
3054 * path that some GatherPath or GatherMergePath has a reference to.)
3055 *
3056 * If we're generating paths for a scan or join relation, override_rows will
3057 * be false, and we'll just use the relation's size estimate. When we're
3058 * being called for a partially-grouped or partially-distinct path, though, we
3059 * need to override the rowcount estimate. (It's not clear that the
3060 * particular value we're using here is actually best, but the underlying rel
3061 * has no estimate so we must do something.)
3062 */
3063void
3065{
3066 Path *cheapest_partial_path;
3067 Path *simple_gather_path;
3068 ListCell *lc;
3069 double rows;
3070 double *rowsp = NULL;
3071
3072 /* If there are no partial paths, there's nothing to do here. */
3073 if (rel->partial_pathlist == NIL)
3074 return;
3075
3076 /* Should we override the rel's rowcount estimate? */
3077 if (override_rows)
3078 rowsp = &rows;
3079
3080 /*
3081 * The output of Gather is always unsorted, so there's only one partial
3082 * path of interest: the cheapest one. That will be the one at the front
3083 * of partial_pathlist because of the way add_partial_path works.
3084 */
3085 cheapest_partial_path = linitial(rel->partial_pathlist);
3086 rows = compute_gather_rows(cheapest_partial_path);
3087 simple_gather_path = (Path *)
3088 create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
3089 NULL, rowsp);
3090 add_path(rel, simple_gather_path);
3091
3092 /*
3093 * For each useful ordering, we can consider an order-preserving Gather
3094 * Merge.
3095 */
3096 foreach(lc, rel->partial_pathlist)
3097 {
3098 Path *subpath = (Path *) lfirst(lc);
3099 GatherMergePath *path;
3100
3101 if (subpath->pathkeys == NIL)
3102 continue;
3103
3106 subpath->pathkeys, NULL, rowsp);
3107 add_path(rel, &path->path);
3108 }
3109}
3110
3111/*
3112 * get_useful_pathkeys_for_relation
3113 * Determine which orderings of a relation might be useful.
3114 *
3115 * Getting data in sorted order can be useful either because the requested
3116 * order matches the final output ordering for the overall query we're
3117 * planning, or because it enables an efficient merge join. Here, we try
3118 * to figure out which pathkeys to consider.
3119 *
3120 * This allows us to do incremental sort on top of an index scan under a gather
3121 * merge node, i.e. parallelized.
3122 *
3123 * If the require_parallel_safe is true, we also require the expressions to
3124 * be parallel safe (which allows pushing the sort below Gather Merge).
3125 *
3126 * XXX At the moment this can only ever return a list with a single element,
3127 * because it looks at query_pathkeys only. So we might return the pathkeys
3128 * directly, but it seems plausible we'll want to consider other orderings
3129 * in the future. For example, we might want to consider pathkeys useful for
3130 * merge joins.
3131 */
3132static List *
3134 bool require_parallel_safe)
3135{
3136 List *useful_pathkeys_list = NIL;
3137
3138 /*
3139 * Considering query_pathkeys is always worth it, because it might allow
3140 * us to avoid a total sort when we have a partially presorted path
3141 * available or to push the total sort into the parallel portion of the
3142 * query.
3143 */
3144 if (root->query_pathkeys)
3145 {
3146 ListCell *lc;
3147 int npathkeys = 0; /* useful pathkeys */
3148
3149 foreach(lc, root->query_pathkeys)
3150 {
3151 PathKey *pathkey = (PathKey *) lfirst(lc);
3152 EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
3153
3154 /*
3155 * We can only build a sort for pathkeys that contain a
3156 * safe-to-compute-early EC member computable from the current
3157 * relation's reltarget, so ignore the remainder of the list as
3158 * soon as we find a pathkey without such a member.
3159 *
3160 * It's still worthwhile to return any prefix of the pathkeys list
3161 * that meets this requirement, as we may be able to do an
3162 * incremental sort.
3163 *
3164 * If requested, ensure the sort expression is parallel-safe too.
3165 */
3166 if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
3167 require_parallel_safe))
3168 break;
3169
3170 npathkeys++;
3171 }
3172
3173 /*
3174 * The whole query_pathkeys list matches, so append it directly, to
3175 * allow comparing pathkeys easily by comparing list pointer. If we
3176 * have to truncate the pathkeys, we gotta do a copy though.
3177 */
3178 if (npathkeys == list_length(root->query_pathkeys))
3179 useful_pathkeys_list = lappend(useful_pathkeys_list,
3180 root->query_pathkeys);
3181 else if (npathkeys > 0)
3182 useful_pathkeys_list = lappend(useful_pathkeys_list,
3183 list_copy_head(root->query_pathkeys,
3184 npathkeys));
3185 }
3186
3187 return useful_pathkeys_list;
3188}
3189
3190/*
3191 * generate_useful_gather_paths
3192 * Generate parallel access paths for a relation by pushing a Gather or
3193 * Gather Merge on top of a partial path.
3194 *
3195 * Unlike plain generate_gather_paths, this looks both at pathkeys of input
3196 * paths (aiming to preserve the ordering), but also considers ordering that
3197 * might be useful for nodes above the gather merge node, and tries to add
3198 * a sort (regular or incremental) to provide that.
3199 */
3200void
3202{
3203 ListCell *lc;
3204 double rows;
3205 double *rowsp = NULL;
3206 List *useful_pathkeys_list = NIL;
3207 Path *cheapest_partial_path = NULL;
3208
3209 /* If there are no partial paths, there's nothing to do here. */
3210 if (rel->partial_pathlist == NIL)
3211 return;
3212
3213 /* Should we override the rel's rowcount estimate? */
3214 if (override_rows)
3215 rowsp = &rows;
3216
3217 /* generate the regular gather (merge) paths */
3218 generate_gather_paths(root, rel, override_rows);
3219
3220 /* consider incremental sort for interesting orderings */
3221 useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
3222
3223 /* used for explicit (full) sort paths */
3224 cheapest_partial_path = linitial(rel->partial_pathlist);
3225
3226 /*
3227 * Consider sorted paths for each interesting ordering. We generate both
3228 * incremental and full sort.
3229 */
3230 foreach(lc, useful_pathkeys_list)
3231 {
3232 List *useful_pathkeys = lfirst(lc);
3233 ListCell *lc2;
3234 bool is_sorted;
3235 int presorted_keys;
3236
3237 foreach(lc2, rel->partial_pathlist)
3238 {
3239 Path *subpath = (Path *) lfirst(lc2);
3240 GatherMergePath *path;
3241
3242 is_sorted = pathkeys_count_contained_in(useful_pathkeys,
3243 subpath->pathkeys,
3244 &presorted_keys);
3245
3246 /*
3247 * We don't need to consider the case where a subpath is already
3248 * fully sorted because generate_gather_paths already creates a
3249 * gather merge path for every subpath that has pathkeys present.
3250 *
3251 * But since the subpath is already sorted, we know we don't need
3252 * to consider adding a sort (full or incremental) on top of it,
3253 * so we can continue here.
3254 */
3255 if (is_sorted)
3256 continue;
3257
3258 /*
3259 * Try at least sorting the cheapest path and also try
3260 * incrementally sorting any path which is partially sorted
3261 * already (no need to deal with paths which have presorted keys
3262 * when incremental sort is disabled unless it's the cheapest
3263 * input path).
3264 */
3265 if (subpath != cheapest_partial_path &&
3266 (presorted_keys == 0 || !enable_incremental_sort))
3267 continue;
3268
3269 /*
3270 * Consider regular sort for any path that's not presorted or if
3271 * incremental sort is disabled. We've no need to consider both
3272 * sort and incremental sort on the same path. We assume that
3273 * incremental sort is always faster when there are presorted
3274 * keys.
3275 *
3276 * This is not redundant with the gather paths created in
3277 * generate_gather_paths, because that doesn't generate ordered
3278 * output. Here we add an explicit sort to match the useful
3279 * ordering.
3280 */
3281 if (presorted_keys == 0 || !enable_incremental_sort)
3282 {
3284 rel,
3285 subpath,
3286 useful_pathkeys,
3287 -1.0);
3288 }
3289 else
3291 rel,
3292 subpath,
3293 useful_pathkeys,
3294 presorted_keys,
3295 -1);
3297 path = create_gather_merge_path(root, rel,
3298 subpath,
3299 rel->reltarget,
3300 subpath->pathkeys,
3301 NULL,
3302 rowsp);
3303
3304 add_path(rel, &path->path);
3305 }
3306 }
3307}
3308
3309/*
3310 * make_rel_from_joinlist
3311 * Build access paths using a "joinlist" to guide the join path search.
3312 *
3313 * See comments for deconstruct_jointree() for definition of the joinlist
3314 * data structure.
3315 */
3316static RelOptInfo *
3318{
3319 int levels_needed;
3320 List *initial_rels;
3321 ListCell *jl;
3322
3323 /*
3324 * Count the number of child joinlist nodes. This is the depth of the
3325 * dynamic-programming algorithm we must employ to consider all ways of
3326 * joining the child nodes.
3327 */
3328 levels_needed = list_length(joinlist);
3329
3330 if (levels_needed <= 0)
3331 return NULL; /* nothing to do? */
3332
3333 /*
3334 * Construct a list of rels corresponding to the child joinlist nodes.
3335 * This may contain both base rels and rels constructed according to
3336 * sub-joinlists.
3337 */
3338 initial_rels = NIL;
3339 foreach(jl, joinlist)
3340 {
3341 Node *jlnode = (Node *) lfirst(jl);
3342 RelOptInfo *thisrel;
3343
3344 if (IsA(jlnode, RangeTblRef))
3345 {
3346 int varno = ((RangeTblRef *) jlnode)->rtindex;
3347
3348 thisrel = find_base_rel(root, varno);
3349 }
3350 else if (IsA(jlnode, List))
3351 {
3352 /* Recurse to handle subproblem */
3353 thisrel = make_rel_from_joinlist(root, (List *) jlnode);
3354 }
3355 else
3356 {
3357 elog(ERROR, "unrecognized joinlist node type: %d",
3358 (int) nodeTag(jlnode));
3359 thisrel = NULL; /* keep compiler quiet */
3360 }
3361
3362 initial_rels = lappend(initial_rels, thisrel);
3363 }
3364
3365 if (levels_needed == 1)
3366 {
3367 /*
3368 * Single joinlist node, so we're done.
3369 */
3370 return (RelOptInfo *) linitial(initial_rels);
3371 }
3372 else
3373 {
3374 /*
3375 * Consider the different orders in which we could join the rels,
3376 * using a plugin, GEQO, or the regular join search code.
3377 *
3378 * We put the initial_rels list into a PlannerInfo field because
3379 * has_legal_joinclause() needs to look at it (ugly :-().
3380 */
3381 root->initial_rels = initial_rels;
3382
3383 if (join_search_hook)
3384 return (*join_search_hook) (root, levels_needed, initial_rels);
3385 else if (enable_geqo && levels_needed >= geqo_threshold)
3386 return geqo(root, levels_needed, initial_rels);
3387 else
3388 return standard_join_search(root, levels_needed, initial_rels);
3389 }
3390}
3391
3392/*
3393 * standard_join_search
3394 * Find possible joinpaths for a query by successively finding ways
3395 * to join component relations into join relations.
3396 *
3397 * 'levels_needed' is the number of iterations needed, ie, the number of
3398 * independent jointree items in the query. This is > 1.
3399 *
3400 * 'initial_rels' is a list of RelOptInfo nodes for each independent
3401 * jointree item. These are the components to be joined together.
3402 * Note that levels_needed == list_length(initial_rels).
3403 *
3404 * Returns the final level of join relations, i.e., the relation that is
3405 * the result of joining all the original relations together.
3406 * At least one implementation path must be provided for this relation and
3407 * all required sub-relations.
3408 *
3409 * To support loadable plugins that modify planner behavior by changing the
3410 * join searching algorithm, we provide a hook variable that lets a plugin
3411 * replace or supplement this function. Any such hook must return the same
3412 * final join relation as the standard code would, but it might have a
3413 * different set of implementation paths attached, and only the sub-joinrels
3414 * needed for these paths need have been instantiated.
3415 *
3416 * Note to plugin authors: the functions invoked during standard_join_search()
3417 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
3418 * than one join-order search, you'll probably need to save and restore the
3419 * original states of those data structures. See geqo_eval() for an example.
3420 */
3421RelOptInfo *
3422standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
3423{
3424 int lev;
3425 RelOptInfo *rel;
3426
3427 /*
3428 * This function cannot be invoked recursively within any one planning
3429 * problem, so join_rel_level[] can't be in use already.
3430 */
3431 Assert(root->join_rel_level == NULL);
3432
3433 /*
3434 * We employ a simple "dynamic programming" algorithm: we first find all
3435 * ways to build joins of two jointree items, then all ways to build joins
3436 * of three items (from two-item joins and single items), then four-item
3437 * joins, and so on until we have considered all ways to join all the
3438 * items into one rel.
3439 *
3440 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
3441 * set root->join_rel_level[1] to represent all the single-jointree-item
3442 * relations.
3443 */
3444 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
3445
3446 root->join_rel_level[1] = initial_rels;
3447
3448 for (lev = 2; lev <= levels_needed; lev++)
3449 {
3450 ListCell *lc;
3451
3452 /*
3453 * Determine all possible pairs of relations to be joined at this
3454 * level, and build paths for making each one from every available
3455 * pair of lower-level relations.
3456 */
3458
3459 /*
3460 * Run generate_partitionwise_join_paths() and
3461 * generate_useful_gather_paths() for each just-processed joinrel. We
3462 * could not do this earlier because both regular and partial paths
3463 * can get added to a particular joinrel at multiple times within
3464 * join_search_one_level.
3465 *
3466 * After that, we're done creating paths for the joinrel, so run
3467 * set_cheapest().
3468 */
3469 foreach(lc, root->join_rel_level[lev])
3470 {
3471 rel = (RelOptInfo *) lfirst(lc);
3472
3473 /* Create paths for partitionwise joins. */
3475
3476 /*
3477 * Except for the topmost scan/join rel, consider gathering
3478 * partial paths. We'll do the same for the topmost scan/join rel
3479 * once we know the final targetlist (see grouping_planner's and
3480 * its call to apply_scanjoin_target_to_paths).
3481 */
3482 if (!bms_equal(rel->relids, root->all_query_rels))
3484
3485 /* Find and save the cheapest paths for this rel */
3486 set_cheapest(rel);
3487
3488#ifdef OPTIMIZER_DEBUG
3489 pprint(rel);
3490#endif
3491 }
3492 }
3493
3494 /*
3495 * We should have a single rel at the final level.
3496 */
3497 if (root->join_rel_level[levels_needed] == NIL)
3498 elog(ERROR, "failed to build any %d-way joins", levels_needed);
3499 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
3500
3501 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
3502
3503 root->join_rel_level = NULL;
3504
3505 return rel;
3506}
3507
3508/*****************************************************************************
3509 * PUSHING QUALS DOWN INTO SUBQUERIES
3510 *****************************************************************************/
3511
3512/*
3513 * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
3514 *
3515 * subquery is the particular component query being checked. topquery
3516 * is the top component of a set-operations tree (the same Query if no
3517 * set-op is involved).
3518 *
3519 * Conditions checked here:
3520 *
3521 * 1. If the subquery has a LIMIT clause, we must not push down any quals,
3522 * since that could change the set of rows returned.
3523 *
3524 * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
3525 * quals into it, because that could change the results.
3526 *
3527 * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
3528 * This is because upper-level quals should semantically be evaluated only
3529 * once per distinct row, not once per original row, and if the qual is
3530 * volatile then extra evaluations could change the results. (This issue
3531 * does not apply to other forms of aggregation such as GROUP BY, because
3532 * when those are present we push into HAVING not WHERE, so that the quals
3533 * are still applied after aggregation.)
3534 *
3535 * 4. If the subquery contains window functions, we cannot push volatile quals
3536 * into it. The issue here is a bit different from DISTINCT: a volatile qual
3537 * might succeed for some rows of a window partition and fail for others,
3538 * thereby changing the partition contents and thus the window functions'
3539 * results for rows that remain.
3540 *
3541 * 5. If the subquery contains any set-returning functions in its targetlist,
3542 * we cannot push volatile quals into it. That would push them below the SRFs
3543 * and thereby change the number of times they are evaluated. Also, a
3544 * volatile qual could succeed for some SRF output rows and fail for others,
3545 * a behavior that cannot occur if it's evaluated before SRF expansion.
3546 *
3547 * 6. If the subquery has nonempty grouping sets, we cannot push down any
3548 * quals. The concern here is that a qual referencing a "constant" grouping
3549 * column could get constant-folded, which would be improper because the value
3550 * is potentially nullable by grouping-set expansion. This restriction could
3551 * be removed if we had a parsetree representation that shows that such
3552 * grouping columns are not really constant. (There are other ideas that
3553 * could be used to relax this restriction, but that's the approach most
3554 * likely to get taken in the future. Note that there's not much to be gained
3555 * so long as subquery_planner can't move HAVING clauses to WHERE within such
3556 * a subquery.)
3557 *
3558 * In addition, we make several checks on the subquery's output columns to see
3559 * if it is safe to reference them in pushed-down quals. If output column k
3560 * is found to be unsafe to reference, we set the reason for that inside
3561 * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
3562 * column k might not be referenced by some/all quals. The unsafeFlags[]
3563 * array will be consulted later by qual_is_pushdown_safe(). It's better to
3564 * do it this way than to make the checks directly in qual_is_pushdown_safe(),
3565 * because when the subquery involves set operations we have to check the
3566 * output expressions in each arm of the set op.
3567 *
3568 * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
3569 * we're effectively assuming that the quals cannot distinguish values that
3570 * the DISTINCT's equality operator sees as equal, yet there are many
3571 * counterexamples to that assumption. However use of such a qual with a
3572 * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
3573 * "equal" value will be chosen as the output value by the DISTINCT operation.
3574 * So we don't worry too much about that. Another objection is that if the
3575 * qual is expensive to evaluate, running it for each original row might cost
3576 * more than we save by eliminating rows before the DISTINCT step. But it
3577 * would be very hard to estimate that at this stage, and in practice pushdown
3578 * seldom seems to make things worse, so we ignore that problem too.
3579 *
3580 * Note: likewise, pushing quals into a subquery with window functions is a
3581 * bit dubious: the quals might remove some rows of a window partition while
3582 * leaving others, causing changes in the window functions' results for the
3583 * surviving rows. We insist that such a qual reference only partitioning
3584 * columns, but again that only protects us if the qual does not distinguish
3585 * values that the partitioning equality operator sees as equal. The risks
3586 * here are perhaps larger than for DISTINCT, since no de-duplication of rows
3587 * occurs and thus there is no theoretical problem with such a qual. But
3588 * we'll do this anyway because the potential performance benefits are very
3589 * large, and we've seen no field complaints about the longstanding comparable
3590 * behavior with DISTINCT.
3591 */
3592static bool
3594 pushdown_safety_info *safetyInfo)
3595{
3596 SetOperationStmt *topop;
3597
3598 /* Check point 1 */
3599 if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
3600 return false;
3601
3602 /* Check point 6 */
3603 if (subquery->groupClause && subquery->groupingSets)
3604 return false;
3605
3606 /* Check points 3, 4, and 5 */
3607 if (subquery->distinctClause ||
3608 subquery->hasWindowFuncs ||
3609 subquery->hasTargetSRFs)
3610 safetyInfo->unsafeVolatile = true;
3611
3612 /*
3613 * If we're at a leaf query, check for unsafe expressions in its target
3614 * list, and mark any reasons why they're unsafe in unsafeFlags[].
3615 * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
3616 * so no need to check them.)
3617 */
3618 if (subquery->setOperations == NULL)
3619 check_output_expressions(subquery, safetyInfo);
3620
3621 /* Are we at top level, or looking at a setop component? */
3622 if (subquery == topquery)
3623 {
3624 /* Top level, so check any component queries */
3625 if (subquery->setOperations != NULL)
3626 if (!recurse_pushdown_safe(subquery->setOperations, topquery,
3627 safetyInfo))
3628 return false;
3629 }
3630 else
3631 {
3632 /* Setop component must not have more components (too weird) */
3633 if (subquery->setOperations != NULL)
3634 return false;
3635 /* Check whether setop component output types match top level */
3636 topop = castNode(SetOperationStmt, topquery->setOperations);
3637 Assert(topop);
3639 topop->colTypes,
3640 safetyInfo);
3641 }
3642 return true;
3643}
3644
3645/*
3646 * Helper routine to recurse through setOperations tree
3647 */
3648static bool
3650 pushdown_safety_info *safetyInfo)
3651{
3652 if (IsA(setOp, RangeTblRef))
3653 {
3654 RangeTblRef *rtr = (RangeTblRef *) setOp;
3655 RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
3656 Query *subquery = rte->subquery;
3657
3658 Assert(subquery != NULL);
3659 return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
3660 }
3661 else if (IsA(setOp, SetOperationStmt))
3662 {
3663 SetOperationStmt *op = (SetOperationStmt *) setOp;
3664
3665 /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
3666 if (op->op == SETOP_EXCEPT)
3667 return false;
3668 /* Else recurse */
3669 if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
3670 return false;
3671 if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
3672 return false;
3673 }
3674 else
3675 {
3676 elog(ERROR, "unrecognized node type: %d",
3677 (int) nodeTag(setOp));
3678 }
3679 return true;
3680}
3681
3682/*
3683 * check_output_expressions - check subquery's output expressions for safety
3684 *
3685 * There are several cases in which it's unsafe to push down an upper-level
3686 * qual if it references a particular output column of a subquery. We check
3687 * each output column of the subquery and set flags in unsafeFlags[k] when we
3688 * see that column is unsafe for a pushed-down qual to reference. The
3689 * conditions checked here are:
3690 *
3691 * 1. We must not push down any quals that refer to subselect outputs that
3692 * return sets, else we'd introduce functions-returning-sets into the
3693 * subquery's WHERE/HAVING quals.
3694 *
3695 * 2. We must not push down any quals that refer to subselect outputs that
3696 * contain volatile functions, for fear of introducing strange results due
3697 * to multiple evaluation of a volatile function.
3698 *
3699 * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
3700 * refer to non-DISTINCT output columns, because that could change the set
3701 * of rows returned. (This condition is vacuous for DISTINCT, because then
3702 * there are no non-DISTINCT output columns, so we needn't check. Note that
3703 * subquery_is_pushdown_safe already reported that we can't use volatile
3704 * quals if there's DISTINCT or DISTINCT ON.)
3705 *
3706 * 4. If the subquery has any window functions, we must not push down quals
3707 * that reference any output columns that are not listed in all the subquery's
3708 * window PARTITION BY clauses. We can push down quals that use only
3709 * partitioning columns because they should succeed or fail identically for
3710 * every row of any one window partition, and totally excluding some
3711 * partitions will not change a window function's results for remaining
3712 * partitions. (Again, this also requires nonvolatile quals, but
3713 * subquery_is_pushdown_safe handles that.). Subquery columns marked as
3714 * unsafe for this reason can still have WindowClause run conditions pushed
3715 * down.
3716 */
3717static void
3719{
3720 ListCell *lc;
3721
3722 foreach(lc, subquery->targetList)
3723 {
3724 TargetEntry *tle = (TargetEntry *) lfirst(lc);
3725
3726 if (tle->resjunk)
3727 continue; /* ignore resjunk columns */
3728
3729 /* Functions returning sets are unsafe (point 1) */
3730 if (subquery->hasTargetSRFs &&
3731 (safetyInfo->unsafeFlags[tle->resno] &
3732 UNSAFE_HAS_SET_FUNC) == 0 &&
3734 {
3735 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
3736 continue;
3737 }
3738
3739 /* Volatile functions are unsafe (point 2) */
3740 if ((safetyInfo->unsafeFlags[tle->resno] &
3743 {
3744 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
3745 continue;
3746 }
3747
3748 /* If subquery uses DISTINCT ON, check point 3 */
3749 if (subquery->hasDistinctOn &&
3750 (safetyInfo->unsafeFlags[tle->resno] &
3753 {
3754 /* non-DISTINCT column, so mark it unsafe */
3756 continue;
3757 }
3758
3759 /* If subquery uses window functions, check point 4 */
3760 if (subquery->hasWindowFuncs &&
3761 (safetyInfo->unsafeFlags[tle->resno] &
3763 !targetIsInAllPartitionLists(tle, subquery))
3764 {
3765 /* not present in all PARTITION BY clauses, so mark it unsafe */
3767 continue;
3768 }
3769 }
3770}
3771
3772/*
3773 * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
3774 * push quals into each component query, but the quals can only reference
3775 * subquery columns that suffer no type coercions in the set operation.
3776 * Otherwise there are possible semantic gotchas. So, we check the
3777 * component queries to see if any of them have output types different from
3778 * the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
3779 * unsafeFlags[k] if column k has different type in any component.
3780 *
3781 * We don't have to care about typmods here: the only allowed difference
3782 * between set-op input and output typmods is input is a specific typmod
3783 * and output is -1, and that does not require a coercion.
3784 *
3785 * tlist is a subquery tlist.
3786 * colTypes is an OID list of the top-level setop's output column types.
3787 * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
3788 */
3789static void
3791 pushdown_safety_info *safetyInfo)
3792{
3793 ListCell *l;
3794 ListCell *colType = list_head(colTypes);
3795
3796 foreach(l, tlist)
3797 {
3798 TargetEntry *tle = (TargetEntry *) lfirst(l);
3799
3800 if (tle->resjunk)
3801 continue; /* ignore resjunk columns */
3802 if (colType == NULL)
3803 elog(ERROR, "wrong number of tlist entries");
3804 if (exprType((Node *) tle->expr) != lfirst_oid(colType))
3805 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
3806 colType = lnext(colTypes, colType);
3807 }
3808 if (colType != NULL)
3809 elog(ERROR, "wrong number of tlist entries");
3810}
3811
3812/*
3813 * targetIsInAllPartitionLists
3814 * True if the TargetEntry is listed in the PARTITION BY clause
3815 * of every window defined in the query.
3816 *
3817 * It would be safe to ignore windows not actually used by any window
3818 * function, but it's not easy to get that info at this stage; and it's
3819 * unlikely to be useful to spend any extra cycles getting it, since
3820 * unreferenced window definitions are probably infrequent in practice.
3821 */
3822static bool
3824{
3825 ListCell *lc;
3826
3827 foreach(lc, query->windowClause)
3828 {
3829 WindowClause *wc = (WindowClause *) lfirst(lc);
3830
3832 return false;
3833 }
3834 return true;
3835}
3836
3837/*
3838 * qual_is_pushdown_safe - is a particular rinfo safe to push down?
3839 *
3840 * rinfo is a restriction clause applying to the given subquery (whose RTE
3841 * has index rti in the parent query).
3842 *
3843 * Conditions checked here:
3844 *
3845 * 1. rinfo's clause must not contain any SubPlans (mainly because it's
3846 * unclear that it will work correctly: SubLinks will already have been
3847 * transformed into SubPlans in the qual, but not in the subquery). Note that
3848 * SubLinks that transform to initplans are safe, and will be accepted here
3849 * because what we'll see in the qual is just a Param referencing the initplan
3850 * output.
3851 *
3852 * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
3853 * functions.
3854 *
3855 * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
3856 * functions that are passed Var nodes, and therefore might reveal values from
3857 * the subquery as side effects.
3858 *
3859 * 4. rinfo's clause must not refer to the whole-row output of the subquery
3860 * (since there is no easy way to name that within the subquery itself).
3861 *
3862 * 5. rinfo's clause must not refer to any subquery output columns that were
3863 * found to be unsafe to reference by subquery_is_pushdown_safe().
3864 */
3865static pushdown_safe_type
3867 pushdown_safety_info *safetyInfo)
3868{
3870 Node *qual = (Node *) rinfo->clause;
3871 List *vars;
3872 ListCell *vl;
3873
3874 /* Refuse subselects (point 1) */
3875 if (contain_subplans(qual))
3876 return PUSHDOWN_UNSAFE;
3877
3878 /* Refuse volatile quals if we found they'd be unsafe (point 2) */
3879 if (safetyInfo->unsafeVolatile &&
3881 return PUSHDOWN_UNSAFE;
3882
3883 /* Refuse leaky quals if told to (point 3) */
3884 if (safetyInfo->unsafeLeaky &&
3885 contain_leaked_vars(qual))
3886 return PUSHDOWN_UNSAFE;
3887
3888 /*
3889 * Examine all Vars used in clause. Since it's a restriction clause, all
3890 * such Vars must refer to subselect output columns ... unless this is
3891 * part of a LATERAL subquery, in which case there could be lateral
3892 * references.
3893 *
3894 * By omitting the relevant flags, this also gives us a cheap sanity check
3895 * that no aggregates or window functions appear in the qual. Those would
3896 * be unsafe to push down, but at least for the moment we could never see
3897 * any in a qual anyhow.
3898 */
3900 foreach(vl, vars)
3901 {
3902 Var *var = (Var *) lfirst(vl);
3903
3904 /*
3905 * XXX Punt if we find any PlaceHolderVars in the restriction clause.
3906 * It's not clear whether a PHV could safely be pushed down, and even
3907 * less clear whether such a situation could arise in any cases of
3908 * practical interest anyway. So for the moment, just refuse to push
3909 * down.
3910 */
3911 if (!IsA(var, Var))
3912 {
3913 safe = PUSHDOWN_UNSAFE;
3914 break;
3915 }
3916
3917 /*
3918 * Punt if we find any lateral references. It would be safe to push
3919 * these down, but we'd have to convert them into outer references,
3920 * which subquery_push_qual lacks the infrastructure to do. The case
3921 * arises so seldom that it doesn't seem worth working hard on.
3922 */
3923 if (var->varno != rti)
3924 {
3925 safe = PUSHDOWN_UNSAFE;
3926 break;
3927 }
3928
3929 /* Subqueries have no system columns */
3930 Assert(var->varattno >= 0);
3931
3932 /* Check point 4 */
3933 if (var->varattno == 0)
3934 {
3935 safe = PUSHDOWN_UNSAFE;
3936 break;
3937 }
3938
3939 /* Check point 5 */
3940 if (safetyInfo->unsafeFlags[var->varattno] != 0)
3941 {
3942 if (safetyInfo->unsafeFlags[var->varattno] &
3945 {
3946 safe = PUSHDOWN_UNSAFE;
3947 break;
3948 }
3949 else
3950 {
3951 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
3953 /* don't break, we might find another Var that's unsafe */
3954 }
3955 }
3956 }
3957
3958 list_free(vars);
3959
3960 return safe;
3961}
3962
3963/*
3964 * subquery_push_qual - push down a qual that we have determined is safe
3965 */
3966static void
3968{
3969 if (subquery->setOperations != NULL)
3970 {
3971 /* Recurse to push it separately to each component query */
3972 recurse_push_qual(subquery->setOperations, subquery,
3973 rte, rti, qual);
3974 }
3975 else
3976 {
3977 /*
3978 * We need to replace Vars in the qual (which must refer to outputs of
3979 * the subquery) with copies of the subquery's targetlist expressions.
3980 * Note that at this point, any uplevel Vars in the qual should have
3981 * been replaced with Params, so they need no work.
3982 *
3983 * This step also ensures that when we are pushing into a setop tree,
3984 * each component query gets its own copy of the qual.
3985 */
3986 qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
3987 subquery->targetList,
3989 &subquery->hasSubLinks);
3990
3991 /*
3992 * Now attach the qual to the proper place: normally WHERE, but if the
3993 * subquery uses grouping or aggregation, put it in HAVING (since the
3994 * qual really refers to the group-result rows).
3995 */
3996 if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
3997 subquery->havingQual = make_and_qual(subquery->havingQual, qual);
3998 else
3999 subquery->jointree->quals =
4000 make_and_qual(subquery->jointree->quals, qual);
4001
4002 /*
4003 * We need not change the subquery's hasAggs or hasSubLinks flags,
4004 * since we can't be pushing down any aggregates that weren't there
4005 * before, and we don't push down subselects at all.
4006 */
4007 }
4008}
4009
4010/*
4011 * Helper routine to recurse through setOperations tree
4012 */
4013static void
4014recurse_push_qual(Node *setOp, Query *topquery,
4015 RangeTblEntry *rte, Index rti, Node *qual)
4016{
4017 if (IsA(setOp, RangeTblRef))
4018 {
4019 RangeTblRef *rtr = (RangeTblRef *) setOp;
4020 RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
4021 Query *subquery = subrte->subquery;
4022
4023 Assert(subquery != NULL);
4024 subquery_push_qual(subquery, rte, rti, qual);
4025 }
4026 else if (IsA(setOp, SetOperationStmt))
4027 {
4028 SetOperationStmt *op = (SetOperationStmt *) setOp;
4029
4030 recurse_push_qual(op->larg, topquery, rte, rti, qual);
4031 recurse_push_qual(op->rarg, topquery, rte, rti, qual);
4032 }
4033 else
4034 {
4035 elog(ERROR, "unrecognized node type: %d",
4036 (int) nodeTag(setOp));
4037 }
4038}
4039
4040/*****************************************************************************
4041 * SIMPLIFYING SUBQUERY TARGETLISTS
4042 *****************************************************************************/
4043
4044/*
4045 * remove_unused_subquery_outputs
4046 * Remove subquery targetlist items we don't need
4047 *
4048 * It's possible, even likely, that the upper query does not read all the
4049 * output columns of the subquery. We can remove any such outputs that are
4050 * not needed by the subquery itself (e.g., as sort/group columns) and do not
4051 * affect semantics otherwise (e.g., volatile functions can't be removed).
4052 * This is useful not only because we might be able to remove expensive-to-
4053 * compute expressions, but because deletion of output columns might allow
4054 * optimizations such as join removal to occur within the subquery.
4055 *
4056 * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
4057 * FirstLowInvalidHeapAttributeNumber) that we should not remove. This
4058 * parameter is modified by the function, so callers must make a copy if they
4059 * need to use the passed in Bitmapset after calling this function.
4060 *
4061 * To avoid affecting column numbering in the targetlist, we don't physically
4062 * remove unused tlist entries, but rather replace their expressions with NULL
4063 * constants. This is implemented by modifying subquery->targetList.
4064 */
4065static void
4067 Bitmapset *extra_used_attrs)
4068{
4069 Bitmapset *attrs_used;
4070 ListCell *lc;
4071
4072 /*
4073 * Just point directly to extra_used_attrs. No need to bms_copy as none of
4074 * the current callers use the Bitmapset after calling this function.
4075 */
4076 attrs_used = extra_used_attrs;
4077
4078 /*
4079 * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
4080 * could update all the child SELECTs' tlists, but it seems not worth the
4081 * trouble presently.
4082 */
4083 if (subquery->setOperations)
4084 return;
4085
4086 /*
4087 * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
4088 * time: all its output columns must be used in the distinctClause.
4089 */
4090 if (subquery->distinctClause && !subquery->hasDistinctOn)
4091 return;
4092
4093 /*
4094 * Collect a bitmap of all the output column numbers used by the upper
4095 * query.
4096 *
4097 * Add all the attributes needed for joins or final output. Note: we must
4098 * look at rel's targetlist, not the attr_needed data, because attr_needed
4099 * isn't computed for inheritance child rels, cf set_append_rel_size().
4100 * (XXX might be worth changing that sometime.)
4101 */
4102 pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
4103
4104 /* Add all the attributes used by un-pushed-down restriction clauses. */
4105 foreach(lc, rel->baserestrictinfo)
4106 {
4107 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4108
4109 pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
4110 }
4111
4112 /*
4113 * If there's a whole-row reference to the subquery, we can't remove
4114 * anything.
4115 */
4117 return;
4118
4119 /*
4120 * Run through the tlist and zap entries we don't need. It's okay to
4121 * modify the tlist items in-place because set_subquery_pathlist made a
4122 * copy of the subquery.
4123 */
4124 foreach(lc, subquery->targetList)
4125 {
4126 TargetEntry *tle = (TargetEntry *) lfirst(lc);
4127 Node *texpr = (Node *) tle->expr;
4128
4129 /*
4130 * If it has a sortgroupref number, it's used in some sort/group
4131 * clause so we'd better not remove it. Also, don't remove any
4132 * resjunk columns, since their reason for being has nothing to do
4133 * with anybody reading the subquery's output. (It's likely that
4134 * resjunk columns in a sub-SELECT would always have ressortgroupref
4135 * set, but even if they don't, it seems imprudent to remove them.)
4136 */
4137 if (tle->ressortgroupref || tle->resjunk)
4138 continue;
4139
4140 /*
4141 * If it's used by the upper query, we can't remove it.
4142 */
4144 attrs_used))
4145 continue;
4146
4147 /*
4148 * If it contains a set-returning function, we can't remove it since
4149 * that could change the number of rows returned by the subquery.
4150 */
4151 if (subquery->hasTargetSRFs &&
4153 continue;
4154
4155 /*
4156 * If it contains volatile functions, we daren't remove it for fear
4157 * that the user is expecting their side-effects to happen.
4158 */
4159 if (contain_volatile_functions(texpr))
4160 continue;
4161
4162 /*
4163 * OK, we don't need it. Replace the expression with a NULL constant.
4164 * Preserve the exposed type of the expression, in case something
4165 * looks at the rowtype of the subquery's result.
4166 */
4167 tle->expr = (Expr *) makeNullConst(exprType(texpr),
4168 exprTypmod(texpr),
4169 exprCollation(texpr));
4170 }
4171}
4172
4173/*
4174 * create_partial_bitmap_paths
4175 * Build partial bitmap heap path for the relation
4176 */
4177void
4179 Path *bitmapqual)
4180{
4181 int parallel_workers;
4182 double pages_fetched;
4183
4184 /* Compute heap pages for bitmap heap scan */
4185 pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
4186 NULL, NULL);
4187
4188 parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
4190
4191 if (parallel_workers <= 0)
4192 return;
4193
4195 bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
4196}
4197
4198/*
4199 * Compute the number of parallel workers that should be used to scan a
4200 * relation. We compute the parallel workers based on the size of the heap to
4201 * be scanned and the size of the index to be scanned, then choose a minimum
4202 * of those.
4203 *
4204 * "heap_pages" is the number of pages from the table that we expect to scan, or
4205 * -1 if we don't expect to scan any.
4206 *
4207 * "index_pages" is the number of pages from the index that we expect to scan, or
4208 * -1 if we don't expect to scan any.
4209 *
4210 * "max_workers" is caller's limit on the number of workers. This typically
4211 * comes from a GUC.
4212 */
4213int
4214compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
4215 int max_workers)
4216{
4217 int parallel_workers = 0;
4218
4219 /*
4220 * If the user has set the parallel_workers reloption, use that; otherwise
4221 * select a default number of workers.
4222 */
4223 if (rel->rel_parallel_workers != -1)
4224 parallel_workers = rel->rel_parallel_workers;
4225 else
4226 {
4227 /*
4228 * If the number of pages being scanned is insufficient to justify a
4229 * parallel scan, just return zero ... unless it's an inheritance
4230 * child. In that case, we want to generate a parallel path here
4231 * anyway. It might not be worthwhile just for this relation, but
4232 * when combined with all of its inheritance siblings it may well pay
4233 * off.
4234 */
4235 if (rel->reloptkind == RELOPT_BASEREL &&
4236 ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
4237 (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
4238 return 0;
4239
4240 if (heap_pages >= 0)
4241 {
4242 int heap_parallel_threshold;
4243 int heap_parallel_workers = 1;
4244
4245 /*
4246 * Select the number of workers based on the log of the size of
4247 * the relation. This probably needs to be a good deal more
4248 * sophisticated, but we need something here for now. Note that
4249 * the upper limit of the min_parallel_table_scan_size GUC is
4250 * chosen to prevent overflow here.
4251 */
4252 heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
4253 while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
4254 {
4255 heap_parallel_workers++;
4256 heap_parallel_threshold *= 3;
4257 if (heap_parallel_threshold > INT_MAX / 3)
4258 break; /* avoid overflow */
4259 }
4260
4261 parallel_workers = heap_parallel_workers;
4262 }
4263
4264 if (index_pages >= 0)
4265 {
4266 int index_parallel_workers = 1;
4267 int index_parallel_threshold;
4268
4269 /* same calculation as for heap_pages above */
4270 index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
4271 while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
4272 {
4273 index_parallel_workers++;
4274 index_parallel_threshold *= 3;
4275 if (index_parallel_threshold > INT_MAX / 3)
4276 break; /* avoid overflow */
4277 }
4278
4279 if (parallel_workers > 0)
4280 parallel_workers = Min(parallel_workers, index_parallel_workers);
4281 else
4282 parallel_workers = index_parallel_workers;
4283 }
4284 }
4285
4286 /* In no case use more than caller supplied maximum number of workers */
4287 parallel_workers = Min(parallel_workers, max_workers);
4288
4289 return parallel_workers;
4290}
4291
4292/*
4293 * generate_partitionwise_join_paths
4294 * Create paths representing partitionwise join for given partitioned
4295 * join relation.
4296 *
4297 * This must not be called until after we are done adding paths for all
4298 * child-joins. Otherwise, add_path might delete a path to which some path
4299 * generated here has a reference.
4300 */
4301void
4303{
4304 List *live_children = NIL;
4305 int cnt_parts;
4306 int num_parts;
4307 RelOptInfo **part_rels;
4308
4309 /* Handle only join relations here. */
4310 if (!IS_JOIN_REL(rel))
4311 return;
4312
4313 /* We've nothing to do if the relation is not partitioned. */
4314 if (!IS_PARTITIONED_REL(rel))
4315 return;
4316
4317 /* The relation should have consider_partitionwise_join set. */
4319
4320 /* Guard against stack overflow due to overly deep partition hierarchy. */
4322
4323 num_parts = rel->nparts;
4324 part_rels = rel->part_rels;
4325
4326 /* Collect non-dummy child-joins. */
4327 for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
4328 {
4329 RelOptInfo *child_rel = part_rels[cnt_parts];
4330
4331 /* If it's been pruned entirely, it's certainly dummy. */
4332 if (child_rel == NULL)
4333 continue;
4334
4335 /* Make partitionwise join paths for this partitioned child-join. */
4337
4338 /* If we failed to make any path for this child, we must give up. */
4339 if (child_rel->pathlist == NIL)
4340 {
4341 /*
4342 * Mark the parent joinrel as unpartitioned so that later
4343 * functions treat it correctly.
4344 */
4345 rel->nparts = 0;
4346 return;
4347 }
4348
4349 /* Else, identify the cheapest path for it. */
4350 set_cheapest(child_rel);
4351
4352 /* Dummy children need not be scanned, so ignore those. */
4353 if (IS_DUMMY_REL(child_rel))
4354 continue;
4355
4356#ifdef OPTIMIZER_DEBUG
4357 pprint(child_rel);
4358#endif
4359
4360 live_children = lappend(live_children, child_rel);
4361 }
4362
4363 /* If all child-joins are dummy, parent join is also dummy. */
4364 if (!live_children)
4365 {
4366 mark_dummy_rel(rel);
4367 return;
4368 }
4369
4370 /* Build additional paths for this rel from child-join paths. */
4371 add_paths_to_append_rel(root, rel, live_children);
4372 list_free(live_children);
4373}
static void set_base_rel_sizes(PlannerInfo *root)
Definition: allpaths.c:290
static List * get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel, bool require_parallel_safe)
Definition: allpaths.c:3133
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:2494
static bool find_window_run_conditions(Query *subquery, RangeTblEntry *rte, Index rti, AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left, bool *keep_original, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2226
#define UNSAFE_TYPE_MISMATCH
Definition: allpaths.c:57
static Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: allpaths.c:2011
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2951
static void subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:3967
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:4302
static void set_base_rel_consider_startup(PlannerInfo *root)
Definition: allpaths.c:247
#define UNSAFE_HAS_VOLATILE_FUNC
Definition: allpaths.c:53
#define UNSAFE_NOTIN_DISTINCTON_CLAUSE
Definition: allpaths.c:55
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:469
static Path * get_singleton_append_subpath(Path *path)
Definition: allpaths.c:2144
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:826
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:866
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:938
static void set_base_rel_pathlists(PlannerInfo *root)
Definition: allpaths.c:333
RelOptInfo * standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: allpaths.c:3422
int geqo_threshold
Definition: allpaths.c:80
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:1244
static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3866
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2978
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4214
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3064
static void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition: allpaths.c:2178
static void compare_tlist_datatypes(List *tlist, List *colTypes, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3790
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:3005
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
Definition: allpaths.c:3823
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:806
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3593
join_search_hook_type join_search_hook
Definition: allpaths.c:88
bool enable_geqo
Definition: allpaths.c:79
static bool check_and_push_window_quals(Query *subquery, RangeTblEntry *rte, Index rti, Node *clause, Bitmapset **run_cond_attrs)
Definition: allpaths.c:2419
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3201
static RelOptInfo * make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:3317
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2761
static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:4014
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:768
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel, Bitmapset *extra_used_attrs)
Definition: allpaths.c:4066
static void check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3718
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2848
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:914
set_rel_pathlist_hook_type set_rel_pathlist_hook
Definition: allpaths.c:85
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2828
struct pushdown_safety_info pushdown_safety_info
RelOptInfo * make_one_rel(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:171
#define UNSAFE_HAS_SET_FUNC
Definition: allpaths.c:54
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2872
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:589
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:956
void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual)
Definition: allpaths.c:4178
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:572
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1314
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:360
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, List *live_childrels, List *all_child_pathkeys)
Definition: allpaths.c:1726
#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE
Definition: allpaths.c:56
static bool recurse_pushdown_safe(Node *setOp, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3649
pushdown_safe_type
Definition: allpaths.c:71
@ PUSHDOWN_WINDOWCLAUSE_RUNCOND
Definition: allpaths.c:74
@ PUSHDOWN_UNSAFE
Definition: allpaths.c:72
@ PUSHDOWN_SAFE
Definition: allpaths.c:73
static void accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
Definition: allpaths.c:2099
int min_parallel_index_scan_size
Definition: allpaths.c:82
int min_parallel_table_scan_size
Definition: allpaths.c:81
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
void pprint(const void *obj)
Definition: print.c:54
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582
bool bms_get_singleton_member(const Bitmapset *a, int *member)
Definition: bitmapset.c:715
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_SINGLETON
Definition: bitmapset.h:72
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:958
#define Max(x, y)
Definition: c.h:952
#define Assert(condition)
Definition: c.h:812
int16_t int16
Definition: c.h:480
int32_t int32
Definition: c.h:481
unsigned int Index
Definition: c.h:568
#define OidIsValid(objectId)
Definition: c.h:729
bool is_pseudo_constant_clause(Node *clause)
Definition: clauses.c:2087
bool contain_leaked_vars(Node *clause)
Definition: clauses.c:1262
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:752
bool contain_subplans(Node *clause)
Definition: clauses.c:329
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:537
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6088
int max_parallel_workers_per_gather
Definition: costsize.c:143
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5325
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5958
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:6050
double compute_gather_rows(Path *path)
Definition: costsize.c:6600
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6121
bool enable_partitionwise_join
Definition: costsize.c:159
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition: costsize.c:6489
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5878
bool enable_parallel_append
Definition: costsize.c:161
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6150
double clamp_row_est(double nrows)
Definition: costsize.c:213
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5996
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:6018
bool enable_incremental_sort
Definition: costsize.c:151
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
void add_child_rel_equivalences(PlannerInfo *root, AppendRelInfo *appinfo, RelOptInfo *parent_rel, RelOptInfo *child_rel)
Definition: equivclass.c:2690
bool relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel, EquivalenceClass *ec, bool require_parallel_safe)
Definition: equivclass.c:921
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:679
RelOptInfo * geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Definition: geqo_main.c:72
void check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:3953
void create_index_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:241
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
void join_search_one_level(PlannerInfo *root, int level)
Definition: joinrels.c:72
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1384
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_copy_tail(const List *oldlist, int nskip)
Definition: list.c:1613
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
void list_free(List *list)
Definition: list.c:1546
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
List * get_op_btree_interpretation(Oid opno)
Definition: lsyscache.c:601
char get_rel_persistence(Oid relid)
Definition: lsyscache.c:2078
char func_parallel(Oid funcid)
Definition: lsyscache.c:1799
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1858
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:166
bool func_strict(Oid funcid)
Definition: lsyscache.c:1761
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:308
Const * makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
Definition: makefuncs.c:339
Node * make_and_qual(Node *qual1, Node *qual2)
Definition: makefuncs.c:730
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:298
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:816
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:758
void set_opfuncid(OpExpr *opexpr)
Definition: nodeFuncs.c:1861
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
#define copyObject(obj)
Definition: nodes.h:224
#define nodeTag(nodeptr)
Definition: nodes.h:133
#define makeNode(_type_)
Definition: nodes.h:155
#define castNode(_type_, nodeptr)
Definition: nodes.h:176
@ JOIN_SEMI
Definition: nodes.h:307
@ JOIN_ANTI
Definition: nodes.h:308
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:191
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
@ SETOP_EXCEPT
Definition: parsenodes.h:2121
@ RTE_JOIN
Definition: parsenodes.h:1019
@ RTE_CTE
Definition: parsenodes.h:1023
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1024
@ RTE_VALUES
Definition: parsenodes.h:1022
@ RTE_SUBQUERY
Definition: parsenodes.h:1018
@ RTE_RESULT
Definition: parsenodes.h:1025
@ RTE_FUNCTION
Definition: parsenodes.h:1020
@ RTE_TABLEFUNC
Definition: parsenodes.h:1021
@ RTE_GROUP
Definition: parsenodes.h:1028
@ RTE_RELATION
Definition: parsenodes.h:1017
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
bool partitions_are_ordered(PartitionBoundInfo boundinfo, Bitmapset *live_parts)
Definition: partbounds.c:2852
Path * get_cheapest_fractional_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, double fraction)
Definition: pathkeys.c:666
Path * get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, CostSelector cost_criterion, bool require_parallel_safe)
Definition: pathkeys.c:620
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2315
List * build_expression_pathkey(PlannerInfo *root, Expr *expr, Oid opno, Relids rel, bool create_it)
Definition: pathkeys.c:1000
List * build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel, ScanDirection scandir, bool *partialkeys)
Definition: pathkeys.c:919
List * convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *subquery_pathkeys, List *subquery_tlist)
Definition: pathkeys.c:1054
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
Path * get_cheapest_parallel_safe_total_inner(List *paths)
Definition: pathkeys.c:699
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2118
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2170
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2274
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2222
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2088
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2144
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:2044
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1634
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2248
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2196
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1471
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:4103
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:839
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1958
#define IS_JOIN_REL(rel)
Definition: pathnodes.h:844
@ TOTAL_COST
Definition: pathnodes.h:38
@ STARTUP_COST
Definition: pathnodes.h:38
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1062
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1681
Bitmapset * Relids
Definition: pathnodes.h:30
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ RELOPT_BASEREL
Definition: pathnodes.h:827
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:829
void(* set_rel_pathlist_hook_type)(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: paths.h:30
RelOptInfo *(* join_search_hook_type)(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: paths.h:46
@ PATHKEYS_EQUAL
Definition: paths.h:204
void * arg
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define lsecond(l)
Definition: pg_list.h:183
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_nth_int(const List *list, int n)
Definition: pg_list.h:310
bool relation_excluded_by_constraints(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: plancat.c:1583
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:638
bool limit_needed(Query *parse)
Definition: planner.c:2685
@ MONOTONICFUNC_NONE
Definition: plannodes.h:1585
@ MONOTONICFUNC_DECREASING
Definition: plannodes.h:1587
@ MONOTONICFUNC_INCREASING
Definition: plannodes.h:1586
@ MONOTONICFUNC_BOTH
Definition: plannodes.h:1588
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
tree ctl root
Definition: radixtree.h:1888
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
Node * ReplaceVarsFromTargetList(Node *node, int target_varno, int sublevels_up, RangeTblEntry *target_rte, List *targetlist, ReplaceVarsNoMatchOption nomatch_option, int nomatch_varno, bool *outer_hasSubLinks)
@ REPLACEVARS_REPORT_ERROR
Definition: rewriteManip.h:38
@ BackwardScanDirection
Definition: sdir.h:26
@ ForwardScanDirection
Definition: sdir.h:28
void check_stack_depth(void)
Definition: stack_depth.c:95
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
int first_partial_path
Definition: pathnodes.h:1946
List * subpaths
Definition: pathnodes.h:1944
Index child_relid
Definition: pathnodes.h:2981
Index parent_relid
Definition: pathnodes.h:2980
Node * quals
Definition: primnodes.h:2309
Definition: pg_list.h:54
Definition: nodes.h:129
Oid opno
Definition: primnodes.h:818
List * args
Definition: primnodes.h:836
List * exprs
Definition: pathnodes.h:1544
List * pathkeys
Definition: pathnodes.h:1677
Cardinality rows
Definition: pathnodes.h:1671
int parallel_workers
Definition: pathnodes.h:1668
Cost total_cost
Definition: pathnodes.h:1674
bool parallel_aware
Definition: pathnodes.h:1664
Cardinality plan_rows
Definition: plannodes.h:135
List * targetlist
Definition: plannodes.h:153
List * cte_plan_ids
Definition: pathnodes.h:305
struct Path * non_recursive_path
Definition: pathnodes.h:538
Query * parse
Definition: pathnodes.h:202
Node * limitCount
Definition: parsenodes.h:216
FromExpr * jointree
Definition: parsenodes.h:177
Node * setOperations
Definition: parsenodes.h:221
List * cteList
Definition: parsenodes.h:168
List * groupClause
Definition: parsenodes.h:202
Node * havingQual
Definition: parsenodes.h:207
List * rtable
Definition: parsenodes.h:170
Node * limitOffset
Definition: parsenodes.h:215
List * windowClause
Definition: parsenodes.h:209
List * targetList
Definition: parsenodes.h:193
List * groupingSets
Definition: parsenodes.h:205
List * distinctClause
Definition: parsenodes.h:211
char * ctename
Definition: parsenodes.h:1196
Index ctelevelsup
Definition: parsenodes.h:1198
bool funcordinality
Definition: parsenodes.h:1179
struct TableSampleClause * tablesample
Definition: parsenodes.h:1098
Query * subquery
Definition: parsenodes.h:1104
List * values_lists
Definition: parsenodes.h:1190
List * functions
Definition: parsenodes.h:1177
RTEKind rtekind
Definition: parsenodes.h:1047
List * baserestrictinfo
Definition: pathnodes.h:985
bool consider_param_startup
Definition: pathnodes.h:885
List * subplan_params
Definition: pathnodes.h:954
List * joininfo
Definition: pathnodes.h:991
Relids relids
Definition: pathnodes.h:871
struct PathTarget * reltarget
Definition: pathnodes.h:893
Index relid
Definition: pathnodes.h:918
Cardinality tuples
Definition: pathnodes.h:949
bool consider_parallel
Definition: pathnodes.h:887
BlockNumber pages
Definition: pathnodes.h:948
Relids lateral_relids
Definition: pathnodes.h:913
List * pathlist
Definition: pathnodes.h:898
RelOptKind reloptkind
Definition: pathnodes.h:865
struct Path * cheapest_startup_path
Definition: pathnodes.h:901
struct Path * cheapest_total_path
Definition: pathnodes.h:902
bool has_eclass_joins
Definition: pathnodes.h:993
bool consider_startup
Definition: pathnodes.h:883
Bitmapset * live_parts
Definition: pathnodes.h:1039
int rel_parallel_workers
Definition: pathnodes.h:956
bool consider_partitionwise_join
Definition: pathnodes.h:999
List * partial_pathlist
Definition: pathnodes.h:900
PlannerInfo * subroot
Definition: pathnodes.h:953
AttrNumber max_attr
Definition: pathnodes.h:926
Relids nulling_relids
Definition: pathnodes.h:938
Cardinality rows
Definition: pathnodes.h:877
AttrNumber min_attr
Definition: pathnodes.h:924
RTEKind rtekind
Definition: pathnodes.h:922
Expr * clause
Definition: pathnodes.h:2575
SetOperation op
Definition: parsenodes.h:2198
JoinType jointype
Definition: pathnodes.h:2909
Relids syn_righthand
Definition: pathnodes.h:2908
struct WindowClause * window_clause
Definition: supportnodes.h:296
Expr * expr
Definition: primnodes.h:2190
AttrNumber resno
Definition: primnodes.h:2192
Index ressortgroupref
Definition: primnodes.h:2196
bool repeatable_across_scans
Definition: tsmapi.h:65
SampleScanGetSampleSize_function SampleScanGetSampleSize
Definition: tsmapi.h:68
Definition: primnodes.h:248
AttrNumber varattno
Definition: primnodes.h:260
int varno
Definition: primnodes.h:255
Index varlevelsup
Definition: primnodes.h:280
List * partitionClause
Definition: parsenodes.h:1543
Index winref
Definition: primnodes.h:581
Oid winfnoid
Definition: primnodes.h:567
unsigned char * unsafeFlags
Definition: allpaths.c:62
Definition: regcomp.c:282
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
bool create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: tidpath.c:498
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:624
List * pull_var_clause(Node *node, int flags)
Definition: var.c:609
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition: var.c:295