PostgreSQL Source Code git master
Loading...
Searching...
No Matches
allpaths.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * allpaths.c
4 * Routines to find possible search paths for processing a query
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/path/allpaths.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/sysattr.h"
22#include "access/tsmapi.h"
23#include "catalog/pg_class.h"
24#include "catalog/pg_operator.h"
25#include "catalog/pg_proc.h"
26#include "foreign/fdwapi.h"
27#include "miscadmin.h"
28#include "nodes/makefuncs.h"
29#include "nodes/nodeFuncs.h"
30#include "nodes/supportnodes.h"
31#ifdef OPTIMIZER_DEBUG
32#include "nodes/print.h"
33#endif
35#include "optimizer/clauses.h"
36#include "optimizer/cost.h"
37#include "optimizer/geqo.h"
38#include "optimizer/optimizer.h"
39#include "optimizer/pathnode.h"
40#include "optimizer/paths.h"
41#include "optimizer/plancat.h"
42#include "optimizer/planner.h"
43#include "optimizer/prep.h"
44#include "optimizer/tlist.h"
45#include "parser/parse_clause.h"
46#include "parser/parsetree.h"
48#include "port/pg_bitutils.h"
50#include "utils/lsyscache.h"
51#include "utils/selfuncs.h"
52
53
54/* Bitmask flags for pushdown_safety_info.unsafeFlags */
55#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
56#define UNSAFE_HAS_SET_FUNC (1 << 1)
57#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
58#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
59#define UNSAFE_TYPE_MISMATCH (1 << 4)
60
61/* results of subquery_is_pushdown_safe */
63{
64 unsigned char *unsafeFlags; /* bitmask of reasons why this target list
65 * column is unsafe for qual pushdown, or 0 if
66 * no reason. */
67 bool unsafeVolatile; /* don't push down volatile quals */
68 bool unsafeLeaky; /* don't push down leaky quals */
70
71/* Return type for qual_is_pushdown_safe */
73{
74 PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
75 PUSHDOWN_SAFE, /* safe to push qual into subquery */
76 PUSHDOWN_WINDOWCLAUSE_RUNCOND, /* unsafe, but may work as WindowClause
77 * run condition */
79
80/* These parameters are set by GUC */
81bool enable_geqo = false; /* just in case GUC doesn't set it */
87
88/* Hook for plugins to get control in set_rel_pathlist() */
90
91/* Hook for plugins to replace standard_join_search() */
93
94
99static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
100 Index rti, RangeTblEntry *rte);
102 Index rti, RangeTblEntry *rte);
119 Index rti, RangeTblEntry *rte);
121 Index rti, RangeTblEntry *rte);
127 RelOptInfo *rel,
129static void accumulate_append_subpath(Path *path,
130 List **subpaths,
132 List **child_append_relid_sets);
134 List **child_append_relid_sets);
135static void set_dummy_rel_pathlist(RelOptInfo *rel);
137 Index rti, RangeTblEntry *rte);
153static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
157static void check_output_expressions(Query *subquery,
159static void compare_tlist_datatypes(List *tlist, List *colTypes,
163 RestrictInfo *rinfo,
165static void subquery_push_qual(Query *subquery,
166 RangeTblEntry *rte, Index rti, Node *qual);
168 RangeTblEntry *rte, Index rti, Node *qual);
169static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
171
172
173/*
174 * make_one_rel
175 * Finds all possible access paths for executing a query, returning a
176 * single rel that represents the join of all base rels in the query.
177 */
180{
181 RelOptInfo *rel;
182 Index rti;
183 double total_pages;
184
185 /* Mark base rels as to whether we care about fast-start plans */
187
188 /*
189 * Compute size estimates and consider_parallel flags for each base rel.
190 */
192
193 /*
194 * Build grouped relations for simple rels (i.e., base or "other" member
195 * relations) where possible.
196 */
198
199 /*
200 * We should now have size estimates for every actual table involved in
201 * the query, and we also know which if any have been deleted from the
202 * query by join removal, pruned by partition pruning, or eliminated by
203 * constraint exclusion. So we can now compute total_table_pages.
204 *
205 * Note that appendrels are not double-counted here, even though we don't
206 * bother to distinguish RelOptInfos for appendrel parents, because the
207 * parents will have pages = 0.
208 *
209 * XXX if a table is self-joined, we will count it once per appearance,
210 * which perhaps is the wrong thing ... but that's not completely clear,
211 * and detecting self-joins here is difficult, so ignore it for now.
212 */
213 total_pages = 0;
214 for (rti = 1; rti < root->simple_rel_array_size; rti++)
215 {
216 RelOptInfo *brel = root->simple_rel_array[rti];
217
218 /* there may be empty slots corresponding to non-baserel RTEs */
219 if (brel == NULL)
220 continue;
221
222 Assert(brel->relid == rti); /* sanity check on array */
223
224 if (IS_DUMMY_REL(brel))
225 continue;
226
227 if (IS_SIMPLE_REL(brel))
228 total_pages += (double) brel->pages;
229 }
230 root->total_table_pages = total_pages;
231
232 /*
233 * Generate access paths for each base rel.
234 */
236
237 /*
238 * Generate access paths for the entire join tree.
239 */
241
242 /*
243 * The result should join all and only the query's base + outer-join rels.
244 */
245 Assert(bms_equal(rel->relids, root->all_query_rels));
246
247 return rel;
248}
249
250/*
251 * set_base_rel_consider_startup
252 * Set the consider_[param_]startup flags for each base-relation entry.
253 *
254 * For the moment, we only deal with consider_param_startup here; because the
255 * logic for consider_startup is pretty trivial and is the same for every base
256 * relation, we just let build_simple_rel() initialize that flag correctly to
257 * start with. If that logic ever gets more complicated it would probably
258 * be better to move it here.
259 */
260static void
262{
263 /*
264 * Since parameterized paths can only be used on the inside of a nestloop
265 * join plan, there is usually little value in considering fast-start
266 * plans for them. However, for relations that are on the RHS of a SEMI
267 * or ANTI join, a fast-start plan can be useful because we're only going
268 * to care about fetching one tuple anyway.
269 *
270 * To minimize growth of planning time, we currently restrict this to
271 * cases where the RHS is a single base relation, not a join; there is no
272 * provision for consider_param_startup to get set at all on joinrels.
273 * Also we don't worry about appendrels. costsize.c's costing rules for
274 * nestloop semi/antijoins don't consider such cases either.
275 */
276 ListCell *lc;
277
278 foreach(lc, root->join_info_list)
279 {
281 int varno;
282
283 if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
285 {
286 RelOptInfo *rel = find_base_rel(root, varno);
287
288 rel->consider_param_startup = true;
289 }
290 }
291}
292
293/*
294 * set_base_rel_sizes
295 * Set the size estimates (rows and widths) for each base-relation entry.
296 * Also determine whether to consider parallel paths for base relations.
297 *
298 * We do this in a separate pass over the base rels so that rowcount
299 * estimates are available for parameterized path generation, and also so
300 * that each rel's consider_parallel flag is set correctly before we begin to
301 * generate paths.
302 */
303static void
305{
306 Index rti;
307
308 for (rti = 1; rti < root->simple_rel_array_size; rti++)
309 {
310 RelOptInfo *rel = root->simple_rel_array[rti];
312
313 /* there may be empty slots corresponding to non-baserel RTEs */
314 if (rel == NULL)
315 continue;
316
317 Assert(rel->relid == rti); /* sanity check on array */
318
319 /* ignore RTEs that are "other rels" */
320 if (rel->reloptkind != RELOPT_BASEREL)
321 continue;
322
323 rte = root->simple_rte_array[rti];
324
325 /*
326 * If parallelism is allowable for this query in general, see whether
327 * it's allowable for this rel in particular. We have to do this
328 * before set_rel_size(), because (a) if this rel is an inheritance
329 * parent, set_append_rel_size() will use and perhaps change the rel's
330 * consider_parallel flag, and (b) for some RTE types, set_rel_size()
331 * goes ahead and makes paths immediately.
332 */
333 if (root->glob->parallelModeOK)
335
336 set_rel_size(root, rel, rti, rte);
337 }
338}
339
340/*
341 * setup_simple_grouped_rels
342 * For each simple relation, build a grouped simple relation if eager
343 * aggregation is possible and if this relation can produce grouped paths.
344 */
345static void
347{
348 Index rti;
349
350 /*
351 * If there are no aggregate expressions or grouping expressions, eager
352 * aggregation is not possible.
353 */
354 if (root->agg_clause_list == NIL ||
355 root->group_expr_list == NIL)
356 return;
357
358 for (rti = 1; rti < root->simple_rel_array_size; rti++)
359 {
360 RelOptInfo *rel = root->simple_rel_array[rti];
361
362 /* there may be empty slots corresponding to non-baserel RTEs */
363 if (rel == NULL)
364 continue;
365
366 Assert(rel->relid == rti); /* sanity check on array */
367 Assert(IS_SIMPLE_REL(rel)); /* sanity check on rel */
368
370 }
371}
372
373/*
374 * set_base_rel_pathlists
375 * Finds all paths available for scanning each base-relation entry.
376 * Sequential scan and any available indices are considered.
377 * Each useful path is attached to its relation's 'pathlist' field.
378 */
379static void
381{
382 Index rti;
383
384 for (rti = 1; rti < root->simple_rel_array_size; rti++)
385 {
386 RelOptInfo *rel = root->simple_rel_array[rti];
387
388 /* there may be empty slots corresponding to non-baserel RTEs */
389 if (rel == NULL)
390 continue;
391
392 Assert(rel->relid == rti); /* sanity check on array */
393
394 /* ignore RTEs that are "other rels" */
395 if (rel->reloptkind != RELOPT_BASEREL)
396 continue;
397
398 set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
399 }
400}
401
402/*
403 * set_rel_size
404 * Set size estimates for a base relation
405 */
406static void
408 Index rti, RangeTblEntry *rte)
409{
410 if (rel->reloptkind == RELOPT_BASEREL &&
412 {
413 /*
414 * We proved we don't need to scan the rel via constraint exclusion,
415 * so set up a single dummy path for it. Here we only check this for
416 * regular baserels; if it's an otherrel, CE was already checked in
417 * set_append_rel_size().
418 *
419 * In this case, we go ahead and set up the relation's path right away
420 * instead of leaving it for set_rel_pathlist to do. This is because
421 * we don't have a convention for marking a rel as dummy except by
422 * assigning a dummy path to it.
423 */
425 }
426 else if (rte->inh)
427 {
428 /* It's an "append relation", process accordingly */
429 set_append_rel_size(root, rel, rti, rte);
430 }
431 else
432 {
433 switch (rel->rtekind)
434 {
435 case RTE_RELATION:
436 if (rte->relkind == RELKIND_FOREIGN_TABLE)
437 {
438 /* Foreign table */
440 }
441 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
442 {
443 /*
444 * We could get here if asked to scan a partitioned table
445 * with ONLY. In that case we shouldn't scan any of the
446 * partitions, so mark it as a dummy rel.
447 */
449 }
450 else if (rte->tablesample != NULL)
451 {
452 /* Sampled relation */
454 }
455 else
456 {
457 /* Plain relation */
459 }
460 break;
461 case RTE_SUBQUERY:
462
463 /*
464 * Subqueries don't support making a choice between
465 * parameterized and unparameterized paths, so just go ahead
466 * and build their paths immediately.
467 */
468 set_subquery_pathlist(root, rel, rti, rte);
469 break;
470 case RTE_FUNCTION:
472 break;
473 case RTE_TABLEFUNC:
475 break;
476 case RTE_VALUES:
478 break;
479 case RTE_CTE:
480
481 /*
482 * CTEs don't support making a choice between parameterized
483 * and unparameterized paths, so just go ahead and build their
484 * paths immediately.
485 */
486 if (rte->self_reference)
488 else
490 break;
492 /* Might as well just build the path immediately */
494 break;
495 case RTE_RESULT:
496 /* Might as well just build the path immediately */
498 break;
499 default:
500 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
501 break;
502 }
503 }
504
505 /*
506 * We insist that all non-dummy rels have a nonzero rowcount estimate.
507 */
508 Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
509}
510
511/*
512 * set_rel_pathlist
513 * Build access paths for a base relation
514 */
515static void
517 Index rti, RangeTblEntry *rte)
518{
519 if (IS_DUMMY_REL(rel))
520 {
521 /* We already proved the relation empty, so nothing more to do */
522 }
523 else if (rte->inh)
524 {
525 /* It's an "append relation", process accordingly */
526 set_append_rel_pathlist(root, rel, rti, rte);
527 }
528 else
529 {
530 switch (rel->rtekind)
531 {
532 case RTE_RELATION:
533 if (rte->relkind == RELKIND_FOREIGN_TABLE)
534 {
535 /* Foreign table */
537 }
538 else if (rte->tablesample != NULL)
539 {
540 /* Sampled relation */
542 }
543 else
544 {
545 /* Plain relation */
547 }
548 break;
549 case RTE_SUBQUERY:
550 /* Subquery --- fully handled during set_rel_size */
551 break;
552 case RTE_FUNCTION:
553 /* RangeFunction */
555 break;
556 case RTE_TABLEFUNC:
557 /* Table Function */
559 break;
560 case RTE_VALUES:
561 /* Values list */
563 break;
564 case RTE_CTE:
565 /* CTE reference --- fully handled during set_rel_size */
566 break;
568 /* tuplestore reference --- fully handled during set_rel_size */
569 break;
570 case RTE_RESULT:
571 /* simple Result --- fully handled during set_rel_size */
572 break;
573 default:
574 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
575 break;
576 }
577 }
578
579 /*
580 * Allow a plugin to editorialize on the set of Paths for this base
581 * relation. It could add new paths (such as CustomPaths) by calling
582 * add_path(), or add_partial_path() if parallel aware. It could also
583 * delete or modify paths added by the core code.
584 */
586 (*set_rel_pathlist_hook) (root, rel, rti, rte);
587
588 /*
589 * If this is a baserel, we should normally consider gathering any partial
590 * paths we may have created for it. We have to do this after calling the
591 * set_rel_pathlist_hook, else it cannot add partial paths to be included
592 * here.
593 *
594 * However, if this is an inheritance child, skip it. Otherwise, we could
595 * end up with a very large number of gather nodes, each trying to grab
596 * its own pool of workers. Instead, we'll consider gathering partial
597 * paths for the parent appendrel.
598 *
599 * Also, if this is the topmost scan/join rel, we postpone gathering until
600 * the final scan/join targetlist is available (see grouping_planner).
601 */
602 if (rel->reloptkind == RELOPT_BASEREL &&
603 !bms_equal(rel->relids, root->all_query_rels))
605
606 /* Now find the cheapest of the paths for this rel */
607 set_cheapest(rel);
608
609 /*
610 * If a grouped relation for this rel exists, build partial aggregation
611 * paths for it.
612 *
613 * Note that this can only happen after we've called set_cheapest() for
614 * this base rel, because we need its cheapest paths.
615 */
617
618#ifdef OPTIMIZER_DEBUG
619 pprint(rel);
620#endif
621}
622
623/*
624 * set_plain_rel_size
625 * Set size estimates for a plain relation (no subquery, no inheritance)
626 */
627static void
629{
630 /*
631 * Test any partial indexes of rel for applicability. We must do this
632 * first since partial unique indexes can affect size estimates.
633 */
635
636 /* Mark rel with estimated output rows, width, etc */
638}
639
640/*
641 * If this relation could possibly be scanned from within a worker, then set
642 * its consider_parallel flag.
643 */
644static void
647{
648 /*
649 * The flag has previously been initialized to false, so we can just
650 * return if it becomes clear that we can't safely set it.
651 */
653
654 /* Don't call this if parallelism is disallowed for the entire query. */
655 Assert(root->glob->parallelModeOK);
656
657 /* This should only be called for baserels and appendrel children. */
658 Assert(IS_SIMPLE_REL(rel));
659
660 /* Assorted checks based on rtekind. */
661 switch (rte->rtekind)
662 {
663 case RTE_RELATION:
664
665 /*
666 * Currently, parallel workers can't access the leader's temporary
667 * tables. We could possibly relax this if we wrote all of its
668 * local buffers at the start of the query and made no changes
669 * thereafter (maybe we could allow hint bit changes), and if we
670 * taught the workers to read them. Writing a large number of
671 * temporary buffers could be expensive, though, and we don't have
672 * the rest of the necessary infrastructure right now anyway. So
673 * for now, bail out if we see a temporary table.
674 */
676 return;
677
678 /*
679 * Table sampling can be pushed down to workers if the sample
680 * function and its arguments are safe.
681 */
682 if (rte->tablesample != NULL)
683 {
684 char proparallel = func_parallel(rte->tablesample->tsmhandler);
685
687 return;
688 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
689 return;
690 }
691
692 /*
693 * Ask FDWs whether they can support performing a ForeignScan
694 * within a worker. Most often, the answer will be no. For
695 * example, if the nature of the FDW is such that it opens a TCP
696 * connection with a remote server, each parallel worker would end
697 * up with a separate connection, and these connections might not
698 * be appropriately coordinated between workers and the leader.
699 */
700 if (rte->relkind == RELKIND_FOREIGN_TABLE)
701 {
702 Assert(rel->fdwroutine);
703 if (!rel->fdwroutine->IsForeignScanParallelSafe)
704 return;
705 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
706 return;
707 }
708
709 /*
710 * There are additional considerations for appendrels, which we'll
711 * deal with in set_append_rel_size and set_append_rel_pathlist.
712 * For now, just set consider_parallel based on the rel's own
713 * quals and targetlist.
714 */
715 break;
716
717 case RTE_SUBQUERY:
718
719 /*
720 * There's no intrinsic problem with scanning a subquery-in-FROM
721 * (as distinct from a SubPlan or InitPlan) in a parallel worker.
722 * If the subquery doesn't happen to have any parallel-safe paths,
723 * then flagging it as consider_parallel won't change anything,
724 * but that's true for plain tables, too. We must set
725 * consider_parallel based on the rel's own quals and targetlist,
726 * so that if a subquery path is parallel-safe but the quals and
727 * projection we're sticking onto it are not, we correctly mark
728 * the SubqueryScanPath as not parallel-safe. (Note that
729 * set_subquery_pathlist() might push some of these quals down
730 * into the subquery itself, but that doesn't change anything.)
731 *
732 * We can't push sub-select containing LIMIT/OFFSET to workers as
733 * there is no guarantee that the row order will be fully
734 * deterministic, and applying LIMIT/OFFSET will lead to
735 * inconsistent results at the top-level. (In some cases, where
736 * the result is ordered, we could relax this restriction. But it
737 * doesn't currently seem worth expending extra effort to do so.)
738 */
739 {
740 Query *subquery = castNode(Query, rte->subquery);
741
742 if (limit_needed(subquery))
743 return;
744 }
745 break;
746
747 case RTE_JOIN:
748 /* Shouldn't happen; we're only considering baserels here. */
749 Assert(false);
750 return;
751
752 case RTE_FUNCTION:
753 /* Check for parallel-restricted functions. */
754 if (!is_parallel_safe(root, (Node *) rte->functions))
755 return;
756 break;
757
758 case RTE_TABLEFUNC:
759 /* not parallel safe */
760 return;
761
762 case RTE_VALUES:
763 /* Check for parallel-restricted functions. */
764 if (!is_parallel_safe(root, (Node *) rte->values_lists))
765 return;
766 break;
767
768 case RTE_CTE:
769
770 /*
771 * CTE tuplestores aren't shared among parallel workers, so we
772 * force all CTE scans to happen in the leader. Also, populating
773 * the CTE would require executing a subplan that's not available
774 * in the worker, might be parallel-restricted, and must get
775 * executed only once.
776 */
777 return;
778
780
781 /*
782 * tuplestore cannot be shared, at least without more
783 * infrastructure to support that.
784 */
785 return;
786
787 case RTE_RESULT:
788 /* RESULT RTEs, in themselves, are no problem. */
789 break;
790
791 case RTE_GRAPH_TABLE:
792
793 /*
794 * Shouldn't happen since these are replaced by subquery RTEs when
795 * rewriting queries.
796 */
797 Assert(false);
798 return;
799
800 case RTE_GROUP:
801 /* Shouldn't happen; we're only considering baserels here. */
802 Assert(false);
803 return;
804 }
805
806 /*
807 * If there's anything in baserestrictinfo that's parallel-restricted, we
808 * give up on parallelizing access to this relation. We could consider
809 * instead postponing application of the restricted quals until we're
810 * above all the parallelism in the plan tree, but it's not clear that
811 * that would be a win in very many cases, and it might be tricky to make
812 * outer join clauses work correctly. It would likely break equivalence
813 * classes, too.
814 */
816 return;
817
818 /*
819 * Likewise, if the relation's outputs are not parallel-safe, give up.
820 * (Usually, they're just Vars, but sometimes they're not.)
821 */
822 if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
823 return;
824
825 /* We have a winner. */
826 rel->consider_parallel = true;
827}
828
829/*
830 * set_plain_rel_pathlist
831 * Build access paths for a plain relation (no subquery, no inheritance)
832 */
833static void
835{
837
838 /*
839 * We don't support pushing join clauses into the quals of a seqscan, but
840 * it could still have required parameterization due to LATERAL refs in
841 * its tlist.
842 */
844
845 /*
846 * Consider TID scans.
847 *
848 * If create_tidscan_paths returns true, then a TID scan path is forced.
849 * This happens when rel->baserestrictinfo contains CurrentOfExpr, because
850 * the executor can't handle any other type of path for such queries.
851 * Hence, we return without adding any other paths.
852 */
853 if (create_tidscan_paths(root, rel))
854 return;
855
856 /* Consider sequential scan */
858
859 /* If appropriate, consider parallel sequential scan */
862
863 /* Consider index scans */
865}
866
867/*
868 * create_plain_partial_paths
869 * Build partial access paths for parallel scan of a plain relation
870 */
871static void
873{
874 int parallel_workers;
875
876 parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
878
879 /* If any limit was set to zero, the user doesn't want a parallel scan. */
880 if (parallel_workers <= 0)
881 return;
882
883 /* Add an unordered partial path based on a parallel sequential scan. */
884 add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
885}
886
887/*
888 * set_tablesample_rel_size
889 * Set size estimates for a sampled relation
890 */
891static void
893{
894 TableSampleClause *tsc = rte->tablesample;
896 BlockNumber pages;
897 double tuples;
898
899 /*
900 * Test any partial indexes of rel for applicability. We must do this
901 * first since partial unique indexes can affect size estimates.
902 */
904
905 /*
906 * Call the sampling method's estimation function to estimate the number
907 * of pages it will read and the number of tuples it will return. (Note:
908 * we assume the function returns sane values.)
909 */
910 tsm = GetTsmRoutine(tsc->tsmhandler);
911 tsm->SampleScanGetSampleSize(root, rel, tsc->args,
912 &pages, &tuples);
913
914 /*
915 * For the moment, because we will only consider a SampleScan path for the
916 * rel, it's okay to just overwrite the pages and tuples estimates for the
917 * whole relation. If we ever consider multiple path types for sampled
918 * rels, we'll need more complication.
919 */
920 rel->pages = pages;
921 rel->tuples = tuples;
922
923 /* Mark rel with estimated output rows, width, etc */
925}
926
927/*
928 * set_tablesample_rel_pathlist
929 * Build access paths for a sampled relation
930 */
931static void
933{
935 Path *path;
936
937 /*
938 * We don't support pushing join clauses into the quals of a samplescan,
939 * but it could still have required parameterization due to LATERAL refs
940 * in its tlist or TABLESAMPLE arguments.
941 */
943
944 /* Consider sampled scan */
946
947 /*
948 * If the sampling method does not support repeatable scans, we must avoid
949 * plans that would scan the rel multiple times. Ideally, we'd simply
950 * avoid putting the rel on the inside of a nestloop join; but adding such
951 * a consideration to the planner seems like a great deal of complication
952 * to support an uncommon usage of second-rate sampling methods. Instead,
953 * if there is a risk that the query might perform an unsafe join, just
954 * wrap the SampleScan in a Materialize node. We can check for joins by
955 * counting the membership of all_query_rels (note that this correctly
956 * counts inheritance trees as single rels). If we're inside a subquery,
957 * we can't easily check whether a join might occur in the outer query, so
958 * just assume one is possible.
959 *
960 * GetTsmRoutine is relatively expensive compared to the other tests here,
961 * so check repeatable_across_scans last, even though that's a bit odd.
962 */
963 if ((root->query_level > 1 ||
964 bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
965 !(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
966 {
967 path = (Path *) create_material_path(rel, path, true);
968 }
969
970 add_path(rel, path);
971
972 /* For the moment, at least, there are no other paths to consider */
973}
974
975/*
976 * set_foreign_size
977 * Set size estimates for a foreign table RTE
978 */
979static void
981{
982 /* Mark rel with estimated output rows, width, etc */
984
985 /* Let FDW adjust the size estimates, if it can */
986 rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
987
988 /* ... but do not let it set the rows estimate to zero */
989 rel->rows = clamp_row_est(rel->rows);
990
991 /*
992 * Also, make sure rel->tuples is not insane relative to rel->rows.
993 * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
994 * FDW doesn't do anything to replace that.
995 */
996 rel->tuples = Max(rel->tuples, rel->rows);
997}
998
999/*
1000 * set_foreign_pathlist
1001 * Build access paths for a foreign table RTE
1002 */
1003static void
1005{
1006 /* Call the FDW's GetForeignPaths function to generate path(s) */
1007 rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
1008}
1009
1010/*
1011 * set_append_rel_size
1012 * Set size estimates for a simple "append relation"
1013 *
1014 * The passed-in rel and RTE represent the entire append relation. The
1015 * relation's contents are computed by appending together the output of the
1016 * individual member relations. Note that in the non-partitioned inheritance
1017 * case, the first member relation is actually the same table as is mentioned
1018 * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
1019 * a good thing because their outputs are not the same size.
1020 */
1021static void
1023 Index rti, RangeTblEntry *rte)
1024{
1025 int parentRTindex = rti;
1026 bool has_live_children;
1027 double parent_tuples;
1028 double parent_rows;
1029 double parent_size;
1030 double *parent_attrsizes;
1031 int nattrs;
1032 ListCell *l;
1033
1034 /* Guard against stack overflow due to overly deep inheritance tree. */
1036
1037 Assert(IS_SIMPLE_REL(rel));
1038
1039 /*
1040 * If this is a partitioned baserel, set the consider_partitionwise_join
1041 * flag; currently, we only consider partitionwise joins with the baserel
1042 * if its targetlist doesn't contain a whole-row Var.
1043 */
1045 rel->reloptkind == RELOPT_BASEREL &&
1046 rte->relkind == RELKIND_PARTITIONED_TABLE &&
1047 bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
1048 rel->consider_partitionwise_join = true;
1049
1050 /*
1051 * Initialize to compute size estimates for whole append relation.
1052 *
1053 * We handle tuples estimates by setting "tuples" to the total number of
1054 * tuples accumulated from each live child, rather than using "rows".
1055 * Although an appendrel itself doesn't directly enforce any quals, its
1056 * child relations may. Therefore, setting "tuples" equal to "rows" for
1057 * an appendrel isn't always appropriate, and can lead to inaccurate cost
1058 * estimates. For example, when estimating the number of distinct values
1059 * from an appendrel, we would be unable to adjust the estimate based on
1060 * the restriction selectivity (see estimate_num_groups).
1061 *
1062 * We handle width estimates by weighting the widths of different child
1063 * rels proportionally to their number of rows. This is sensible because
1064 * the use of width estimates is mainly to compute the total relation
1065 * "footprint" if we have to sort or hash it. To do this, we sum the
1066 * total equivalent size (in "double" arithmetic) and then divide by the
1067 * total rowcount estimate. This is done separately for the total rel
1068 * width and each attribute.
1069 *
1070 * Note: if you consider changing this logic, beware that child rels could
1071 * have zero rows and/or width, if they were excluded by constraints.
1072 */
1073 has_live_children = false;
1074 parent_tuples = 0;
1075 parent_rows = 0;
1076 parent_size = 0;
1077 nattrs = rel->max_attr - rel->min_attr + 1;
1078 parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
1079
1080 foreach(l, root->append_rel_list)
1081 {
1083 int childRTindex;
1089 ListCell *lc;
1090
1091 /* append_rel_list contains all append rels; ignore others */
1092 if (appinfo->parent_relid != parentRTindex)
1093 continue;
1094
1095 childRTindex = appinfo->child_relid;
1096 childRTE = root->simple_rte_array[childRTindex];
1097
1098 /*
1099 * The child rel's RelOptInfo was already created during
1100 * add_other_rels_to_query.
1101 */
1103 Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1104
1105 /* We may have already proven the child to be dummy. */
1107 continue;
1108
1109 /*
1110 * We have to copy the parent's targetlist and quals to the child,
1111 * with appropriate substitution of variables. However, the
1112 * baserestrictinfo quals were already copied/substituted when the
1113 * child RelOptInfo was built. So we don't need any additional setup
1114 * before applying constraint exclusion.
1115 */
1117 {
1118 /*
1119 * This child need not be scanned, so we can omit it from the
1120 * appendrel.
1121 */
1123 continue;
1124 }
1125
1126 /*
1127 * Constraint exclusion failed, so copy the parent's join quals and
1128 * targetlist to the child, with appropriate variable substitutions.
1129 *
1130 * We skip join quals that came from above outer joins that can null
1131 * this rel, since they would be of no value while generating paths
1132 * for the child. This saves some effort while processing the child
1133 * rel, and it also avoids an implementation restriction in
1134 * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
1135 */
1136 childrinfos = NIL;
1137 foreach(lc, rel->joininfo)
1138 {
1139 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1140
1141 if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
1144 (Node *) rinfo,
1145 1, &appinfo));
1146 }
1147 childrel->joininfo = childrinfos;
1148
1149 /*
1150 * Now for the child's targetlist.
1151 *
1152 * NB: the resulting childrel->reltarget->exprs may contain arbitrary
1153 * expressions, which otherwise would not occur in a rel's targetlist.
1154 * Code that might be looking at an appendrel child must cope with
1155 * such. (Normally, a rel's targetlist would only include Vars and
1156 * PlaceHolderVars.) XXX we do not bother to update the cost or width
1157 * fields of childrel->reltarget; not clear if that would be useful.
1158 */
1159 childrel->reltarget->exprs = (List *)
1161 (Node *) rel->reltarget->exprs,
1162 1, &appinfo);
1163
1164 /*
1165 * We have to make child entries in the EquivalenceClass data
1166 * structures as well. This is needed either if the parent
1167 * participates in some eclass joins (because we will want to consider
1168 * inner-indexscan joins on the individual children) or if the parent
1169 * has useful pathkeys (because we should try to build MergeAppend
1170 * paths that produce those sort orderings).
1171 */
1172 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
1174 childrel->has_eclass_joins = rel->has_eclass_joins;
1175
1176 /*
1177 * Note: we could compute appropriate attr_needed data for the child's
1178 * variables, by transforming the parent's attr_needed through the
1179 * translated_vars mapping. However, currently there's no need
1180 * because attr_needed is only examined for base relations not
1181 * otherrels. So we just leave the child's attr_needed empty.
1182 */
1183
1184 /*
1185 * If we consider partitionwise joins with the parent rel, do the same
1186 * for partitioned child rels.
1187 *
1188 * Note: here we abuse the consider_partitionwise_join flag by setting
1189 * it for child rels that are not themselves partitioned. We do so to
1190 * tell try_partitionwise_join() that the child rel is sufficiently
1191 * valid to be used as a per-partition input, even if it later gets
1192 * proven to be dummy. (It's not usable until we've set up the
1193 * reltarget and EC entries, which we just did.)
1194 */
1196 childrel->consider_partitionwise_join = true;
1197
1198 /*
1199 * If parallelism is allowable for this query in general, see whether
1200 * it's allowable for this childrel in particular. But if we've
1201 * already decided the appendrel is not parallel-safe as a whole,
1202 * there's no point in considering parallelism for this child. For
1203 * consistency, do this before calling set_rel_size() for the child.
1204 */
1205 if (root->glob->parallelModeOK && rel->consider_parallel)
1207
1208 /*
1209 * Compute the child's size.
1210 */
1212
1213 /*
1214 * It is possible that constraint exclusion detected a contradiction
1215 * within a child subquery, even though we didn't prove one above. If
1216 * so, we can skip this child.
1217 */
1219 continue;
1220
1221 /* We have at least one live child. */
1222 has_live_children = true;
1223
1224 /*
1225 * If any live child is not parallel-safe, treat the whole appendrel
1226 * as not parallel-safe. In future we might be able to generate plans
1227 * in which some children are farmed out to workers while others are
1228 * not; but we don't have that today, so it's a waste to consider
1229 * partial paths anywhere in the appendrel unless it's all safe.
1230 * (Child rels visited before this one will be unmarked in
1231 * set_append_rel_pathlist().)
1232 */
1233 if (!childrel->consider_parallel)
1234 rel->consider_parallel = false;
1235
1236 /*
1237 * Accumulate size information from each live child.
1238 */
1239 Assert(childrel->rows > 0);
1240
1241 parent_tuples += childrel->tuples;
1242 parent_rows += childrel->rows;
1243 parent_size += childrel->reltarget->width * childrel->rows;
1244
1245 /*
1246 * Accumulate per-column estimates too. We need not do anything for
1247 * PlaceHolderVars in the parent list. If child expression isn't a
1248 * Var, or we didn't record a width estimate for it, we have to fall
1249 * back on a datatype-based estimate.
1250 *
1251 * By construction, child's targetlist is 1-to-1 with parent's.
1252 */
1254 childvars, childrel->reltarget->exprs)
1255 {
1258
1259 if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
1260 {
1261 int pndx = parentvar->varattno - rel->min_attr;
1262 int32 child_width = 0;
1263
1264 if (IsA(childvar, Var) &&
1265 ((Var *) childvar)->varno == childrel->relid)
1266 {
1267 int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
1268
1269 child_width = childrel->attr_widths[cndx];
1270 }
1271 if (child_width <= 0)
1274 Assert(child_width > 0);
1276 }
1277 }
1278 }
1279
1281 {
1282 /*
1283 * Save the finished size estimates.
1284 */
1285 int i;
1286
1287 Assert(parent_rows > 0);
1288 rel->tuples = parent_tuples;
1289 rel->rows = parent_rows;
1291 for (i = 0; i < nattrs; i++)
1292 rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
1293
1294 /*
1295 * Note that we leave rel->pages as zero; this is important to avoid
1296 * double-counting the appendrel tree in total_table_pages.
1297 */
1298 }
1299 else
1300 {
1301 /*
1302 * All children were excluded by constraints, so mark the whole
1303 * appendrel dummy. We must do this in this phase so that the rel's
1304 * dummy-ness is visible when we generate paths for other rels.
1305 */
1307 }
1308
1310}
1311
1312/*
1313 * set_append_rel_pathlist
1314 * Build access paths for an "append relation"
1315 */
1316static void
1318 Index rti, RangeTblEntry *rte)
1319{
1320 int parentRTindex = rti;
1322 ListCell *l;
1323
1324 /*
1325 * Generate access paths for each member relation, and remember the
1326 * non-dummy children.
1327 */
1328 foreach(l, root->append_rel_list)
1329 {
1331 int childRTindex;
1334
1335 /* append_rel_list contains all append rels; ignore others */
1336 if (appinfo->parent_relid != parentRTindex)
1337 continue;
1338
1339 /* Re-locate the child RTE and RelOptInfo */
1340 childRTindex = appinfo->child_relid;
1341 childRTE = root->simple_rte_array[childRTindex];
1342 childrel = root->simple_rel_array[childRTindex];
1343
1344 /*
1345 * If set_append_rel_size() decided the parent appendrel was
1346 * parallel-unsafe at some point after visiting this child rel, we
1347 * need to propagate the unsafety marking down to the child, so that
1348 * we don't generate useless partial paths for it.
1349 */
1350 if (!rel->consider_parallel)
1351 childrel->consider_parallel = false;
1352
1353 /*
1354 * Compute the child's access paths.
1355 */
1357
1358 /*
1359 * If child is dummy, ignore it.
1360 */
1362 continue;
1363
1364 /*
1365 * Child is live, so add it to the live_childrels list for use below.
1366 */
1368 }
1369
1370 /* Add paths to the append relation. */
1372}
1373
1374/*
1375 * set_grouped_rel_pathlist
1376 * If a grouped relation for the given 'rel' exists, build partial
1377 * aggregation paths for it.
1378 */
1379static void
1381{
1382 RelOptInfo *grouped_rel;
1383
1384 /*
1385 * If there are no aggregate expressions or grouping expressions, eager
1386 * aggregation is not possible.
1387 */
1388 if (root->agg_clause_list == NIL ||
1389 root->group_expr_list == NIL)
1390 return;
1391
1392 /* Add paths to the grouped base relation if one exists. */
1393 grouped_rel = rel->grouped_rel;
1394 if (grouped_rel)
1395 {
1396 Assert(IS_GROUPED_REL(grouped_rel));
1397
1398 generate_grouped_paths(root, grouped_rel, rel);
1399 set_cheapest(grouped_rel);
1400 }
1401}
1402
1403
1404/*
1405 * add_paths_to_append_rel
1406 * Generate paths for the given append relation given the set of non-dummy
1407 * child rels.
1408 *
1409 * The function collects all parameterizations and orderings supported by the
1410 * non-dummy children. For every such parameterization or ordering, it creates
1411 * an append path collecting one path from each non-dummy child with given
1412 * parameterization or ordering. Similarly it collects partial paths from
1413 * non-dummy children to create partial append paths.
1414 */
1415void
1418{
1420 AppendPathInput startup = {0};
1423 bool unparameterized_valid = true;
1424 bool startup_valid = true;
1425 bool partial_only_valid = true;
1426 bool parallel_append_valid = true;
1429 ListCell *l;
1430 double partial_rows = -1;
1431
1432 /* If appropriate, consider parallel append */
1434
1435 /*
1436 * For every non-dummy child, remember the cheapest path. Also, identify
1437 * all pathkeys (orderings) and parameterizations (required_outer sets)
1438 * available for the non-dummy member relations.
1439 */
1440 foreach(l, live_childrels)
1441 {
1443 ListCell *lcp;
1445
1446 /*
1447 * If child has an unparameterized cheapest-total path, add that to
1448 * the unparameterized Append path we are constructing for the parent.
1449 * If not, there's no workable unparameterized path.
1450 *
1451 * With partitionwise aggregates, the child rel's pathlist may be
1452 * empty, so don't assume that a path exists here.
1453 */
1454 if (childrel->pathlist != NIL &&
1455 childrel->cheapest_total_path->param_info == NULL)
1456 accumulate_append_subpath(childrel->cheapest_total_path,
1457 &unparameterized.subpaths, NULL, &unparameterized.child_append_relid_sets);
1458 else
1459 unparameterized_valid = false;
1460
1461 /*
1462 * When the planner is considering cheap startup plans, we'll also
1463 * collect all the cheapest_startup_paths (if set) and build an
1464 * AppendPath containing those as subpaths.
1465 */
1466 if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
1467 {
1469
1470 /*
1471 * With an indication of how many tuples the query should provide,
1472 * the optimizer tries to choose the path optimal for that
1473 * specific number of tuples.
1474 */
1475 if (root->tuple_fraction > 0.0)
1478 root->tuple_fraction);
1479 else
1480 cheapest_path = childrel->cheapest_startup_path;
1481
1482 /* cheapest_startup_path must not be a parameterized path. */
1483 Assert(cheapest_path->param_info == NULL);
1485 &startup.subpaths,
1486 NULL,
1487 &startup.child_append_relid_sets);
1488 }
1489 else
1490 startup_valid = false;
1491
1492
1493 /* Same idea, but for a partial plan. */
1494 if (childrel->partial_pathlist != NIL)
1495 {
1496 cheapest_partial_path = linitial(childrel->partial_pathlist);
1498 &partial_only.partial_subpaths, NULL,
1499 &partial_only.child_append_relid_sets);
1500 }
1501 else
1502 partial_only_valid = false;
1503
1504 /*
1505 * Same idea, but for a parallel append mixing partial and non-partial
1506 * paths.
1507 */
1509 {
1510 Path *nppath = NULL;
1511
1512 nppath =
1514
1516 {
1517 /* Neither a partial nor a parallel-safe path? Forget it. */
1518 parallel_append_valid = false;
1519 }
1520 else if (nppath == NULL ||
1522 cheapest_partial_path->total_cost < nppath->total_cost))
1523 {
1524 /* Partial path is cheaper or the only option. */
1527 &parallel_append.partial_subpaths,
1528 &parallel_append.subpaths,
1529 &parallel_append.child_append_relid_sets);
1530 }
1531 else
1532 {
1533 /*
1534 * Either we've got only a non-partial path, or we think that
1535 * a single backend can execute the best non-partial path
1536 * faster than all the parallel backends working together can
1537 * execute the best partial path.
1538 *
1539 * It might make sense to be more aggressive here. Even if
1540 * the best non-partial path is more expensive than the best
1541 * partial path, it could still be better to choose the
1542 * non-partial path if there are several such paths that can
1543 * be given to different workers. For now, we don't try to
1544 * figure that out.
1545 */
1547 &parallel_append.subpaths,
1548 NULL,
1549 &parallel_append.child_append_relid_sets);
1550 }
1551 }
1552
1553 /*
1554 * Collect lists of all the available path orderings and
1555 * parameterizations for all the children. We use these as a
1556 * heuristic to indicate which sort orderings and parameterizations we
1557 * should build Append and MergeAppend paths for.
1558 */
1559 foreach(lcp, childrel->pathlist)
1560 {
1561 Path *childpath = (Path *) lfirst(lcp);
1562 List *childkeys = childpath->pathkeys;
1564
1565 /* Unsorted paths don't contribute to pathkey list */
1566 if (childkeys != NIL)
1567 {
1568 ListCell *lpk;
1569 bool found = false;
1570
1571 /* Have we already seen this ordering? */
1572 foreach(lpk, all_child_pathkeys)
1573 {
1575
1578 {
1579 found = true;
1580 break;
1581 }
1582 }
1583 if (!found)
1584 {
1585 /* No, so add it to all_child_pathkeys */
1587 childkeys);
1588 }
1589 }
1590
1591 /* Unparameterized paths don't contribute to param-set list */
1592 if (childouter)
1593 {
1594 ListCell *lco;
1595 bool found = false;
1596
1597 /* Have we already seen this param set? */
1598 foreach(lco, all_child_outers)
1599 {
1601
1603 {
1604 found = true;
1605 break;
1606 }
1607 }
1608 if (!found)
1609 {
1610 /* No, so add it to all_child_outers */
1612 childouter);
1613 }
1614 }
1615 }
1616 }
1617
1618 /*
1619 * If we found unparameterized paths for all children, build an unordered,
1620 * unparameterized Append path for the rel. (Note: this is correct even
1621 * if we have zero or one live subpath due to constraint exclusion.)
1622 */
1625 NIL, NULL, 0, false,
1626 -1));
1627
1628 /* build an AppendPath for the cheap startup paths, if valid */
1629 if (startup_valid)
1630 add_path(rel, (Path *) create_append_path(root, rel, startup,
1631 NIL, NULL, 0, false, -1));
1632
1633 /*
1634 * Consider an append of unordered, unparameterized partial paths. Make
1635 * it parallel-aware if possible.
1636 */
1637 if (partial_only_valid && partial_only.partial_subpaths != NIL)
1638 {
1640 ListCell *lc;
1641 int parallel_workers = 0;
1642
1643 /* Find the highest number of workers requested for any subpath. */
1644 foreach(lc, partial_only.partial_subpaths)
1645 {
1646 Path *path = lfirst(lc);
1647
1648 parallel_workers = Max(parallel_workers, path->parallel_workers);
1649 }
1650 Assert(parallel_workers > 0);
1651
1652 /*
1653 * If the use of parallel append is permitted, always request at least
1654 * log2(# of children) workers. We assume it can be useful to have
1655 * extra workers in this case because they will be spread out across
1656 * the children. The precise formula is just a guess, but we don't
1657 * want to end up with a radically different answer for a table with N
1658 * partitions vs. an unpartitioned table with the same data, so the
1659 * use of some kind of log-scaling here seems to make some sense.
1660 */
1662 {
1663 parallel_workers = Max(parallel_workers,
1665 parallel_workers = Min(parallel_workers,
1667 }
1668 Assert(parallel_workers > 0);
1669
1670 /* Generate a partial append path. */
1672 NIL, NULL, parallel_workers,
1674 -1);
1675
1676 /*
1677 * Make sure any subsequent partial paths use the same row count
1678 * estimate.
1679 */
1680 partial_rows = appendpath->path.rows;
1681
1682 /* Add the path. */
1684 }
1685
1686 /*
1687 * Consider a parallel-aware append using a mix of partial and non-partial
1688 * paths. (This only makes sense if there's at least one child which has
1689 * a non-partial path that is substantially cheaper than any partial path;
1690 * otherwise, we should use the append path added in the previous step.)
1691 */
1692 if (parallel_append_valid && parallel_append.subpaths != NIL)
1693 {
1695 ListCell *lc;
1696 int parallel_workers = 0;
1697
1698 /*
1699 * Find the highest number of workers requested for any partial
1700 * subpath.
1701 */
1702 foreach(lc, parallel_append.partial_subpaths)
1703 {
1704 Path *path = lfirst(lc);
1705
1706 parallel_workers = Max(parallel_workers, path->parallel_workers);
1707 }
1708
1709 /*
1710 * Same formula here as above. It's even more important in this
1711 * instance because the non-partial paths won't contribute anything to
1712 * the planned number of parallel workers.
1713 */
1714 parallel_workers = Max(parallel_workers,
1716 parallel_workers = Min(parallel_workers,
1718 Assert(parallel_workers > 0);
1719
1721 NIL, NULL, parallel_workers, true,
1722 partial_rows);
1724 }
1725
1726 /*
1727 * Also build unparameterized ordered append paths based on the collected
1728 * list of child pathkeys.
1729 */
1733
1734 /*
1735 * Build Append paths for each parameterization seen among the child rels.
1736 * (This may look pretty expensive, but in most cases of practical
1737 * interest, the child rels will expose mostly the same parameterizations,
1738 * so that not that many cases actually get considered here.)
1739 *
1740 * The Append node itself cannot enforce quals, so all qual checking must
1741 * be done in the child paths. This means that to have a parameterized
1742 * Append path, we must have the exact same parameterization for each
1743 * child path; otherwise some children might be failing to check the
1744 * moved-down quals. To make them match up, we can try to increase the
1745 * parameterization of lesser-parameterized paths.
1746 */
1747 foreach(l, all_child_outers)
1748 {
1750 ListCell *lcr;
1752 bool parameterized_valid = true;
1753
1754 /* Select the child paths for an Append with this parameterization */
1755 foreach(lcr, live_childrels)
1756 {
1758 Path *subpath;
1759
1760 if (childrel->pathlist == NIL)
1761 {
1762 /* failed to make a suitable path for this child */
1763 parameterized_valid = false;
1764 break;
1765 }
1766
1768 childrel,
1770 if (subpath == NULL)
1771 {
1772 /* failed to make a suitable path for this child */
1773 parameterized_valid = false;
1774 break;
1775 }
1777 &parameterized.child_append_relid_sets);
1778 }
1779
1781 add_path(rel, (Path *)
1783 NIL, required_outer, 0, false,
1784 -1));
1785 }
1786
1787 /*
1788 * When there is only a single child relation, the Append path can inherit
1789 * any ordering available for the child rel's path, so that it's useful to
1790 * consider ordered partial paths. Above we only considered the cheapest
1791 * partial path for each child, but let's also make paths using any
1792 * partial paths that have pathkeys.
1793 */
1794 if (list_length(live_childrels) == 1)
1795 {
1797
1798 /* skip the cheapest partial path, since we already used that above */
1799 for_each_from(l, childrel->partial_pathlist, 1)
1800 {
1801 Path *path = (Path *) lfirst(l);
1803 AppendPathInput append = {0};
1804
1805 /* skip paths with no pathkeys. */
1806 if (path->pathkeys == NIL)
1807 continue;
1808
1811 path->parallel_workers, true,
1812 partial_rows);
1814 }
1815 }
1816}
1817
1818/*
1819 * generate_orderedappend_paths
1820 * Generate ordered append paths for an append relation
1821 *
1822 * Usually we generate MergeAppend paths here, but there are some special
1823 * cases where we can generate simple Append paths, because the subpaths
1824 * can provide tuples in the required order already.
1825 *
1826 * We generate a path for each ordering (pathkey list) appearing in
1827 * all_child_pathkeys.
1828 *
1829 * We consider the cheapest-startup and cheapest-total cases, and also the
1830 * cheapest-fractional case when not all tuples need to be retrieved. For each
1831 * interesting ordering, we collect all the cheapest startup subpaths, all the
1832 * cheapest total paths, and, if applicable, all the cheapest fractional paths,
1833 * and build a suitable path for each case.
1834 *
1835 * We don't currently generate any parameterized ordered paths here. While
1836 * it would not take much more code here to do so, it's very unclear that it
1837 * is worth the planning cycles to investigate such paths: there's little
1838 * use for an ordered path on the inside of a nestloop. In fact, it's likely
1839 * that the current coding of add_path would reject such paths out of hand,
1840 * because add_path gives no credit for sort ordering of parameterized paths,
1841 * and a parameterized MergeAppend is going to be more expensive than the
1842 * corresponding parameterized Append path. If we ever try harder to support
1843 * parameterized mergejoin plans, it might be worth adding support for
1844 * parameterized paths here to feed such joins. (See notes in
1845 * optimizer/README for why that might not ever happen, though.)
1846 */
1847static void
1851{
1852 ListCell *lcp;
1855 bool partition_pathkeys_partial = true;
1857
1858 /*
1859 * Some partitioned table setups may allow us to use an Append node
1860 * instead of a MergeAppend. This is possible in cases such as RANGE
1861 * partitioned tables where it's guaranteed that an earlier partition must
1862 * contain rows which come earlier in the sort order. To detect whether
1863 * this is relevant, build pathkey descriptions of the partition ordering,
1864 * for both forward and reverse scans.
1865 */
1866 if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
1867 partitions_are_ordered(rel->boundinfo, rel->live_parts))
1868 {
1872
1876
1877 /*
1878 * You might think we should truncate_useless_pathkeys here, but
1879 * allowing partition keys which are a subset of the query's pathkeys
1880 * can often be useful. For example, consider a table partitioned by
1881 * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
1882 * paths that can produce the a, b, c ordering (perhaps via indexes on
1883 * (a, b, c)) then it works to consider the appendrel output as
1884 * ordered by a, b, c.
1885 */
1886 }
1887
1888 /* Now consider each interesting sort ordering */
1889 foreach(lcp, all_child_pathkeys)
1890 {
1891 List *pathkeys = (List *) lfirst(lcp);
1892 AppendPathInput startup = {0};
1893 AppendPathInput total = {0};
1895 bool startup_neq_total = false;
1896 bool fraction_neq_total = false;
1899 int end_index;
1900 int first_index;
1901 int direction;
1902
1903 /*
1904 * Determine if this sort ordering matches any partition pathkeys we
1905 * have, for both ascending and descending partition order. If the
1906 * partition pathkeys happen to be contained in pathkeys then it still
1907 * works, as described above, providing that the partition pathkeys
1908 * are complete and not just a prefix of the partition keys. (In such
1909 * cases we'll be relying on the child paths to have sorted the
1910 * lower-order columns of the required pathkeys.)
1911 */
1916
1921
1922 /*
1923 * When the required pathkeys match the reverse of the partition
1924 * order, we must build the list of paths in reverse starting with the
1925 * last matching partition first. We can get away without making any
1926 * special cases for this in the loop below by just looping backward
1927 * over the child relations in this case.
1928 */
1930 {
1931 /* loop backward */
1933 end_index = -1;
1934 direction = -1;
1935
1936 /*
1937 * Set this to true to save us having to check for
1938 * match_partition_order_desc in the loop below.
1939 */
1940 match_partition_order = true;
1941 }
1942 else
1943 {
1944 /* for all other case, loop forward */
1945 first_index = 0;
1947 direction = 1;
1948 }
1949
1950 /* Select the child paths for this ordering... */
1951 for (int i = first_index; i != end_index; i += direction)
1952 {
1957
1958 /* Locate the right paths, if they are available. */
1961 pathkeys,
1962 NULL,
1964 false);
1967 pathkeys,
1968 NULL,
1969 TOTAL_COST,
1970 false);
1971
1972 /*
1973 * If we can't find any paths with the right order just use the
1974 * cheapest-total path; we'll have to sort it later.
1975 */
1977 {
1979 childrel->cheapest_total_path;
1980 /* Assert we do have an unparameterized path for this child */
1981 Assert(cheapest_total->param_info == NULL);
1982 }
1983
1984 /*
1985 * When building a fractional path, determine a cheapest
1986 * fractional path for each child relation too. Looking at startup
1987 * and total costs is not enough, because the cheapest fractional
1988 * path may be dominated by two separate paths (one for startup,
1989 * one for total).
1990 *
1991 * When needed (building fractional path), determine the cheapest
1992 * fractional path too.
1993 */
1994 if (root->tuple_fraction > 0)
1995 {
1996 double path_fraction = root->tuple_fraction;
1997
1998 /*
1999 * We should not have a dummy child relation here. However,
2000 * we cannot use childrel->rows to compute the tuple fraction,
2001 * as childrel can be an upper relation with an unset row
2002 * estimate. Instead, we use the row estimate from the
2003 * cheapest_total path, which should already have been forced
2004 * to a sane value.
2005 */
2006 Assert(cheapest_total->rows > 0);
2007
2008 /* Convert absolute limit to a path fraction */
2009 if (path_fraction >= 1.0)
2011
2014 pathkeys,
2015 NULL,
2017
2018 /*
2019 * If we found no path with matching pathkeys, use the
2020 * cheapest total path instead.
2021 *
2022 * XXX We might consider partially sorted paths too (with an
2023 * incremental sort on top). But we'd have to build all the
2024 * incremental paths, do the costing etc.
2025 *
2026 * Also, notice whether we actually have different paths for
2027 * the "fractional" and "total" cases. This helps avoid
2028 * generating two identical ordered append paths.
2029 */
2033 fraction_neq_total = true;
2034 }
2035
2036 /*
2037 * Notice whether we actually have different paths for the
2038 * "cheapest" and "total" cases. This helps avoid generating two
2039 * identical ordered append paths.
2040 */
2042 startup_neq_total = true;
2043
2044 /*
2045 * Collect the appropriate child paths. The required logic varies
2046 * for the Append and MergeAppend cases.
2047 */
2049 {
2050 /*
2051 * We're going to make a plain Append path. We don't need
2052 * most of what accumulate_append_subpath would do, but we do
2053 * want to cut out child Appends or MergeAppends if they have
2054 * just a single subpath (and hence aren't doing anything
2055 * useful).
2056 */
2059 &startup.child_append_relid_sets);
2063
2064 startup.subpaths = lappend(startup.subpaths, cheapest_startup);
2065 total.subpaths = lappend(total.subpaths, cheapest_total);
2066
2068 {
2071 &fractional.child_append_relid_sets);
2072 fractional.subpaths =
2074 }
2075 }
2076 else
2077 {
2078 /*
2079 * Otherwise, rely on accumulate_append_subpath to collect the
2080 * child paths for the MergeAppend.
2081 */
2083 &startup.subpaths, NULL,
2084 &startup.child_append_relid_sets);
2086 &total.subpaths, NULL,
2088
2091 &fractional.subpaths, NULL,
2092 &fractional.child_append_relid_sets);
2093 }
2094 }
2095
2096 /* ... and build the Append or MergeAppend paths */
2098 {
2099 /* We only need Append */
2101 rel,
2102 startup,
2103 pathkeys,
2104 NULL,
2105 0,
2106 false,
2107 -1));
2110 rel,
2111 total,
2112 pathkeys,
2113 NULL,
2114 0,
2115 false,
2116 -1));
2117
2118 if (fractional.subpaths && fraction_neq_total)
2120 rel,
2121 fractional,
2122 pathkeys,
2123 NULL,
2124 0,
2125 false,
2126 -1));
2127 }
2128 else
2129 {
2130 /* We need MergeAppend */
2132 rel,
2133 startup.subpaths,
2135 pathkeys,
2136 NULL));
2139 rel,
2140 total.subpaths,
2142 pathkeys,
2143 NULL));
2144
2145 if (fractional.subpaths && fraction_neq_total)
2147 rel,
2148 fractional.subpaths,
2149 fractional.child_append_relid_sets,
2150 pathkeys,
2151 NULL));
2152 }
2153 }
2154}
2155
2156/*
2157 * get_cheapest_parameterized_child_path
2158 * Get cheapest path for this relation that has exactly the requested
2159 * parameterization.
2160 *
2161 * Returns NULL if unable to create such a path.
2162 */
2163static Path *
2166{
2167 Path *cheapest;
2168 ListCell *lc;
2169
2170 /*
2171 * Look up the cheapest existing path with no more than the needed
2172 * parameterization. If it has exactly the needed parameterization, we're
2173 * done.
2174 */
2176 NIL,
2178 TOTAL_COST,
2179 false);
2180 Assert(cheapest != NULL);
2182 return cheapest;
2183
2184 /*
2185 * Otherwise, we can "reparameterize" an existing path to match the given
2186 * parameterization, which effectively means pushing down additional
2187 * joinquals to be checked within the path's scan. However, some existing
2188 * paths might check the available joinquals already while others don't;
2189 * therefore, it's not clear which existing path will be cheapest after
2190 * reparameterization. We have to go through them all and find out.
2191 */
2192 cheapest = NULL;
2193 foreach(lc, rel->pathlist)
2194 {
2195 Path *path = (Path *) lfirst(lc);
2196
2197 /* Can't use it if it needs more than requested parameterization */
2199 continue;
2200
2201 /*
2202 * Reparameterization can only increase the path's cost, so if it's
2203 * already more expensive than the current cheapest, forget it.
2204 */
2205 if (cheapest != NULL &&
2207 continue;
2208
2209 /* Reparameterize if needed, then recheck cost */
2211 {
2212 path = reparameterize_path(root, path, required_outer, 1.0);
2213 if (path == NULL)
2214 continue; /* failed to reparameterize this one */
2216
2217 if (cheapest != NULL &&
2219 continue;
2220 }
2221
2222 /* We have a new best path */
2223 cheapest = path;
2224 }
2225
2226 /* Return the best path, or NULL if we found no suitable candidate */
2227 return cheapest;
2228}
2229
2230/*
2231 * accumulate_append_subpath
2232 * Add a subpath to the list being built for an Append or MergeAppend.
2233 *
2234 * It's possible that the child is itself an Append or MergeAppend path, in
2235 * which case we can "cut out the middleman" and just add its child paths to
2236 * our own list. (We don't try to do this earlier because we need to apply
2237 * both levels of transformation to the quals.)
2238 *
2239 * Note that if we omit a child MergeAppend in this way, we are effectively
2240 * omitting a sort step, which seems fine: if the parent is to be an Append,
2241 * its result would be unsorted anyway, while if the parent is to be a
2242 * MergeAppend, there's no point in a separate sort on a child.
2243 *
2244 * Normally, either path is a partial path and subpaths is a list of partial
2245 * paths, or else path is a non-partial plan and subpaths is a list of those.
2246 * However, if path is a parallel-aware Append, then we add its partial path
2247 * children to subpaths and the rest to special_subpaths. If the latter is
2248 * NULL, we don't flatten the path at all (unless it contains only partial
2249 * paths).
2250 */
2251static void
2253 List **child_append_relid_sets)
2254{
2255 if (IsA(path, AppendPath))
2256 {
2257 AppendPath *apath = (AppendPath *) path;
2258
2259 if (!apath->path.parallel_aware || apath->first_partial_path == 0)
2260 {
2261 *subpaths = list_concat(*subpaths, apath->subpaths);
2262 *child_append_relid_sets =
2263 lappend(*child_append_relid_sets, path->parent->relids);
2264 *child_append_relid_sets =
2265 list_concat(*child_append_relid_sets,
2266 apath->child_append_relid_sets);
2267 return;
2268 }
2269 else if (special_subpaths != NULL)
2270 {
2272
2273 /* Split Parallel Append into partial and non-partial subpaths */
2274 *subpaths = list_concat(*subpaths,
2275 list_copy_tail(apath->subpaths,
2276 apath->first_partial_path));
2278 apath->first_partial_path);
2281 *child_append_relid_sets =
2282 lappend(*child_append_relid_sets, path->parent->relids);
2283 *child_append_relid_sets =
2284 list_concat(*child_append_relid_sets,
2285 apath->child_append_relid_sets);
2286 return;
2287 }
2288 }
2289 else if (IsA(path, MergeAppendPath))
2290 {
2292
2293 *subpaths = list_concat(*subpaths, mpath->subpaths);
2294 *child_append_relid_sets =
2295 lappend(*child_append_relid_sets, path->parent->relids);
2296 *child_append_relid_sets =
2297 list_concat(*child_append_relid_sets,
2298 mpath->child_append_relid_sets);
2299 return;
2300 }
2301
2302 *subpaths = lappend(*subpaths, path);
2303}
2304
2305/*
2306 * get_singleton_append_subpath
2307 * Returns the single subpath of an Append/MergeAppend, or just
2308 * return 'path' if it's not a single sub-path Append/MergeAppend.
2309 *
2310 * As a side effect, whenever we return a single subpath rather than the
2311 * original path, add the relid sets for the original path to
2312 * child_append_relid_sets, so that those relids don't entirely disappear
2313 * from the final plan.
2314 *
2315 * Note: 'path' must not be a parallel-aware path.
2316 */
2317static Path *
2318get_singleton_append_subpath(Path *path, List **child_append_relid_sets)
2319{
2320 Assert(!path->parallel_aware);
2321
2322 if (IsA(path, AppendPath))
2323 {
2324 AppendPath *apath = (AppendPath *) path;
2325
2326 if (list_length(apath->subpaths) == 1)
2327 {
2328 *child_append_relid_sets =
2329 lappend(*child_append_relid_sets, path->parent->relids);
2330 *child_append_relid_sets =
2331 list_concat(*child_append_relid_sets,
2332 apath->child_append_relid_sets);
2333 return (Path *) linitial(apath->subpaths);
2334 }
2335 }
2336 else if (IsA(path, MergeAppendPath))
2337 {
2339
2340 if (list_length(mpath->subpaths) == 1)
2341 {
2342 *child_append_relid_sets =
2343 lappend(*child_append_relid_sets, path->parent->relids);
2344 *child_append_relid_sets =
2345 list_concat(*child_append_relid_sets,
2346 mpath->child_append_relid_sets);
2347 return (Path *) linitial(mpath->subpaths);
2348 }
2349 }
2350
2351 return path;
2352}
2353
2354/*
2355 * set_dummy_rel_pathlist
2356 * Build a dummy path for a relation that's been excluded by constraints
2357 *
2358 * Rather than inventing a special "dummy" path type, we represent this as an
2359 * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
2360 *
2361 * (See also mark_dummy_rel, which does basically the same thing, but is
2362 * typically used to change a rel into dummy state after we already made
2363 * paths for it.)
2364 */
2365static void
2367{
2368 AppendPathInput in = {0};
2369
2370 /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
2371 rel->rows = 0;
2372 rel->reltarget->width = 0;
2373
2374 /* Discard any pre-existing paths; no further need for them */
2375 rel->pathlist = NIL;
2376 rel->partial_pathlist = NIL;
2377
2378 /* Set up the dummy path */
2379 add_path(rel, (Path *) create_append_path(NULL, rel, in,
2380 NIL, rel->lateral_relids,
2381 0, false, -1));
2382
2383 /*
2384 * We set the cheapest-path fields immediately, just in case they were
2385 * pointing at some discarded path. This is redundant in current usage
2386 * because set_rel_pathlist will do it later, but it's cheap so we keep it
2387 * for safety and consistency with mark_dummy_rel.
2388 */
2389 set_cheapest(rel);
2390}
2391
2392/*
2393 * find_window_run_conditions
2394 * Determine if 'wfunc' is really a WindowFunc and call its prosupport
2395 * function to determine the function's monotonic properties. We then
2396 * see if 'opexpr' can be used to short-circuit execution.
2397 *
2398 * For example row_number() over (order by ...) always produces a value one
2399 * higher than the previous. If someone has a window function in a subquery
2400 * and has a WHERE clause in the outer query to filter rows <= 10, then we may
2401 * as well stop processing the windowagg once the row number reaches 11. Here
2402 * we check if 'opexpr' might help us to stop doing needless extra processing
2403 * in WindowAgg nodes.
2404 *
2405 * '*keep_original' is set to true if the caller should also use 'opexpr' for
2406 * its original purpose. This is set to false if the caller can assume that
2407 * the run condition will handle all of the required filtering.
2408 *
2409 * Returns true if 'opexpr' was found to be useful and was added to the
2410 * WindowFunc's runCondition. We also set *keep_original accordingly and add
2411 * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
2412 * If the 'opexpr' cannot be used then we set *keep_original to true and
2413 * return false.
2414 */
2415static bool
2417 WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left,
2419{
2421 Expr *otherexpr;
2425 List *opinfos;
2428 ListCell *lc;
2429
2430 *keep_original = true;
2431
2432 while (IsA(wfunc, RelabelType))
2433 wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
2434
2435 /* we can only work with window functions */
2436 if (!IsA(wfunc, WindowFunc))
2437 return false;
2438
2439 /* can't use it if there are subplans in the WindowFunc */
2440 if (contain_subplans((Node *) wfunc))
2441 return false;
2442
2444
2445 /* Check if there's a support function for 'wfunc' */
2446 if (!OidIsValid(prosupport))
2447 return false;
2448
2449 /* get the Expr from the other side of the OpExpr */
2450 if (wfunc_left)
2451 otherexpr = lsecond(opexpr->args);
2452 else
2453 otherexpr = linitial(opexpr->args);
2454
2455 /*
2456 * The value being compared must not change during the evaluation of the
2457 * window partition.
2458 */
2460 return false;
2461
2462 /* find the window clause belonging to the window function */
2463 wclause = (WindowClause *) list_nth(subquery->windowClause,
2464 wfunc->winref - 1);
2465
2467 req.window_func = wfunc;
2468 req.window_clause = wclause;
2469
2470 /* call the support function */
2473 PointerGetDatum(&req)));
2474
2475 /*
2476 * Nothing to do if the function is neither monotonically increasing nor
2477 * monotonically decreasing.
2478 */
2479 if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
2480 return false;
2481
2482 runopexpr = NULL;
2485
2486 foreach(lc, opinfos)
2487 {
2489 CompareType cmptype = opinfo->cmptype;
2490
2491 /* handle < / <= */
2492 if (cmptype == COMPARE_LT || cmptype == COMPARE_LE)
2493 {
2494 /*
2495 * < / <= is supported for monotonically increasing functions in
2496 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2497 * for monotonically decreasing functions.
2498 */
2499 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
2500 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
2501 {
2502 *keep_original = false;
2503 runopexpr = opexpr;
2504 runoperator = opexpr->opno;
2505 }
2506 break;
2507 }
2508 /* handle > / >= */
2509 else if (cmptype == COMPARE_GT || cmptype == COMPARE_GE)
2510 {
2511 /*
2512 * > / >= is supported for monotonically decreasing functions in
2513 * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
2514 * for monotonically increasing functions.
2515 */
2516 if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
2517 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
2518 {
2519 *keep_original = false;
2520 runopexpr = opexpr;
2521 runoperator = opexpr->opno;
2522 }
2523 break;
2524 }
2525 /* handle = */
2526 else if (cmptype == COMPARE_EQ)
2527 {
2529
2530 /*
2531 * When both monotonically increasing and decreasing then the
2532 * return value of the window function will be the same each time.
2533 * We can simply use 'opexpr' as the run condition without
2534 * modifying it.
2535 */
2537 {
2538 *keep_original = false;
2539 runopexpr = opexpr;
2540 runoperator = opexpr->opno;
2541 break;
2542 }
2543
2544 /*
2545 * When monotonically increasing we make a qual with <wfunc> <=
2546 * <value> or <value> >= <wfunc> in order to filter out values
2547 * which are above the value in the equality condition. For
2548 * monotonically decreasing functions we want to filter values
2549 * below the value in the equality condition.
2550 */
2552 newcmptype = wfunc_left ? COMPARE_LE : COMPARE_GE;
2553 else
2554 newcmptype = wfunc_left ? COMPARE_GE : COMPARE_LE;
2555
2556 /* We must keep the original equality qual */
2557 *keep_original = true;
2558 runopexpr = opexpr;
2559
2560 /* determine the operator to use for the WindowFuncRunCondition */
2562 opinfo->oplefttype,
2563 opinfo->oprighttype,
2564 newcmptype);
2565 break;
2566 }
2567 }
2568
2569 if (runopexpr != NULL)
2570 {
2572
2574 wfuncrc->opno = runoperator;
2575 wfuncrc->inputcollid = runopexpr->inputcollid;
2576 wfuncrc->wfunc_left = wfunc_left;
2578
2579 wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
2580
2581 /* record that this attno was used in a run condition */
2584 return true;
2585 }
2586
2587 /* unsupported OpExpr */
2588 return false;
2589}
2590
2591/*
2592 * check_and_push_window_quals
2593 * Check if 'clause' is a qual that can be pushed into a WindowFunc
2594 * as a 'runCondition' qual. These, when present, allow some unnecessary
2595 * work to be skipped during execution.
2596 *
2597 * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
2598 * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
2599 * window quals for.
2600 *
2601 * Returns true if the caller still must keep the original qual or false if
2602 * the caller can safely ignore the original qual because the WindowAgg node
2603 * will use the runCondition to stop returning tuples.
2604 */
2605static bool
2608{
2609 OpExpr *opexpr = (OpExpr *) clause;
2610 bool keep_original = true;
2611 Var *var1;
2612 Var *var2;
2613
2614 /* We're only able to use OpExprs with 2 operands */
2615 if (!IsA(opexpr, OpExpr))
2616 return true;
2617
2618 if (list_length(opexpr->args) != 2)
2619 return true;
2620
2621 /*
2622 * Currently, we restrict this optimization to strict OpExprs. The reason
2623 * for this is that during execution, once the runcondition becomes false,
2624 * we stop evaluating WindowFuncs. To avoid leaving around stale window
2625 * function result values, we set them to NULL. Having only strict
2626 * OpExprs here ensures that we properly filter out the tuples with NULLs
2627 * in the top-level WindowAgg.
2628 */
2629 set_opfuncid(opexpr);
2630 if (!func_strict(opexpr->opfuncid))
2631 return true;
2632
2633 /*
2634 * Check for plain Vars that reference window functions in the subquery.
2635 * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
2636 * be used as part of the run condition.
2637 */
2638
2639 /* Check the left side of the OpExpr */
2640 var1 = linitial(opexpr->args);
2641 if (IsA(var1, Var) && var1->varattno > 0)
2642 {
2643 TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
2644 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2645
2646 if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
2648 return keep_original;
2649 }
2650
2651 /* and check the right side */
2652 var2 = lsecond(opexpr->args);
2653 if (IsA(var2, Var) && var2->varattno > 0)
2654 {
2655 TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
2656 WindowFunc *wfunc = (WindowFunc *) tle->expr;
2657
2658 if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
2659 false, &keep_original, run_cond_attrs))
2660 return keep_original;
2661 }
2662
2663 return true;
2664}
2665
2666/*
2667 * set_subquery_pathlist
2668 * Generate SubqueryScan access paths for a subquery RTE
2669 *
2670 * We don't currently support generating parameterized paths for subqueries
2671 * by pushing join clauses down into them; it seems too expensive to re-plan
2672 * the subquery multiple times to consider different alternatives.
2673 * (XXX that could stand to be reconsidered, now that we use Paths.)
2674 * So the paths made here will be parameterized if the subquery contains
2675 * LATERAL references, otherwise not. As long as that's true, there's no need
2676 * for a separate set_subquery_size phase: just make the paths right away.
2677 */
2678static void
2680 Index rti, RangeTblEntry *rte)
2681{
2682 Query *parse = root->parse;
2683 Query *subquery = rte->subquery;
2684 bool trivial_pathtarget;
2687 double tuple_fraction;
2690 ListCell *lc;
2691 char *plan_name;
2692
2693 /*
2694 * Must copy the Query so that planning doesn't mess up the RTE contents
2695 * (really really need to fix the planner to not scribble on its input,
2696 * someday ... but see remove_unused_subquery_outputs to start with).
2697 */
2698 subquery = copyObject(subquery);
2699
2700 /*
2701 * If it's a LATERAL subquery, it might contain some Vars of the current
2702 * query level, requiring it to be treated as parameterized, even though
2703 * we don't support pushing down join quals into subqueries.
2704 */
2706
2707 /*
2708 * Zero out result area for subquery_is_pushdown_safe, so that it can set
2709 * flags as needed while recursing. In particular, we need a workspace
2710 * for keeping track of the reasons why columns are unsafe to reference.
2711 * These reasons are stored in the bits inside unsafeFlags[i] when we
2712 * discover reasons that column i of the subquery is unsafe to be used in
2713 * a pushed-down qual.
2714 */
2715 memset(&safetyInfo, 0, sizeof(safetyInfo));
2716 safetyInfo.unsafeFlags = (unsigned char *)
2717 palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
2718
2719 /*
2720 * If the subquery has the "security_barrier" flag, it means the subquery
2721 * originated from a view that must enforce row-level security. Then we
2722 * must not push down quals that contain leaky functions. (Ideally this
2723 * would be checked inside subquery_is_pushdown_safe, but since we don't
2724 * currently pass the RTE to that function, we must do it here.)
2725 */
2726 safetyInfo.unsafeLeaky = rte->security_barrier;
2727
2728 /*
2729 * If there are any restriction clauses that have been attached to the
2730 * subquery relation, consider pushing them down to become WHERE or HAVING
2731 * quals of the subquery itself. This transformation is useful because it
2732 * may allow us to generate a better plan for the subquery than evaluating
2733 * all the subquery output rows and then filtering them.
2734 *
2735 * There are several cases where we cannot push down clauses. Restrictions
2736 * involving the subquery are checked by subquery_is_pushdown_safe().
2737 * Restrictions on individual clauses are checked by
2738 * qual_is_pushdown_safe(). Also, we don't want to push down
2739 * pseudoconstant clauses; better to have the gating node above the
2740 * subquery.
2741 *
2742 * Non-pushed-down clauses will get evaluated as qpquals of the
2743 * SubqueryScan node.
2744 *
2745 * XXX Are there any cases where we want to make a policy decision not to
2746 * push down a pushable qual, because it'd result in a worse plan?
2747 */
2748 if (rel->baserestrictinfo != NIL &&
2749 subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
2750 {
2751 /* OK to consider pushing down individual quals */
2753 ListCell *l;
2754
2755 foreach(l, rel->baserestrictinfo)
2756 {
2757 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2758 Node *clause = (Node *) rinfo->clause;
2759
2760 if (rinfo->pseudoconstant)
2761 {
2763 continue;
2764 }
2765
2766 switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
2767 {
2768 case PUSHDOWN_SAFE:
2769 /* Push it down */
2770 subquery_push_qual(subquery, rte, rti, clause);
2771 break;
2772
2774
2775 /*
2776 * Since we can't push the qual down into the subquery,
2777 * check if it happens to reference a window function. If
2778 * so then it might be useful to use for the WindowAgg's
2779 * runCondition.
2780 */
2781 if (!subquery->hasWindowFuncs ||
2782 check_and_push_window_quals(subquery, clause,
2784 {
2785 /*
2786 * subquery has no window funcs or the clause is not a
2787 * suitable window run condition qual or it is, but
2788 * the original must also be kept in the upper query.
2789 */
2791 }
2792 break;
2793
2794 case PUSHDOWN_UNSAFE:
2796 break;
2797 }
2798 }
2800 /* We don't bother recomputing baserestrict_min_security */
2801 }
2802
2803 pfree(safetyInfo.unsafeFlags);
2804
2805 /*
2806 * The upper query might not use all the subquery's output columns; if
2807 * not, we can simplify. Pass the attributes that were pushed down into
2808 * WindowAgg run conditions to ensure we don't accidentally think those
2809 * are unused.
2810 */
2812
2813 /*
2814 * We can safely pass the outer tuple_fraction down to the subquery if the
2815 * outer level has no joining, aggregation, or sorting to do. Otherwise
2816 * we'd better tell the subquery to plan for full retrieval. (XXX This
2817 * could probably be made more intelligent ...)
2818 */
2819 if (parse->hasAggs ||
2820 parse->groupClause ||
2821 parse->groupingSets ||
2822 root->hasHavingQual ||
2823 parse->distinctClause ||
2824 parse->sortClause ||
2825 bms_membership(root->all_baserels) == BMS_MULTIPLE)
2826 tuple_fraction = 0.0; /* default case */
2827 else
2828 tuple_fraction = root->tuple_fraction;
2829
2830 /* plan_params should not be in use in current query level */
2831 Assert(root->plan_params == NIL);
2832
2833 /* Generate a subroot and Paths for the subquery */
2834 plan_name = choose_plan_name(root->glob, rte->eref->aliasname, false);
2835 rel->subroot = subquery_planner(root->glob, subquery, plan_name,
2836 root, false, tuple_fraction, NULL);
2837
2838 /* Isolate the params needed by this specific subplan */
2839 rel->subplan_params = root->plan_params;
2840 root->plan_params = NIL;
2841
2842 /*
2843 * It's possible that constraint exclusion proved the subquery empty. If
2844 * so, it's desirable to produce an unadorned dummy path so that we will
2845 * recognize appropriate optimizations at this query level.
2846 */
2848
2850 {
2852 return;
2853 }
2854
2855 /*
2856 * Mark rel with estimated output rows, width, etc. Note that we have to
2857 * do this before generating outer-query paths, else cost_subqueryscan is
2858 * not happy.
2859 */
2861
2862 /*
2863 * Also detect whether the reltarget is trivial, so that we can pass that
2864 * info to cost_subqueryscan (rather than re-deriving it multiple times).
2865 * It's trivial if it fetches all the subplan output columns in order.
2866 */
2867 if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
2868 trivial_pathtarget = false;
2869 else
2870 {
2871 trivial_pathtarget = true;
2872 foreach(lc, rel->reltarget->exprs)
2873 {
2874 Node *node = (Node *) lfirst(lc);
2875 Var *var;
2876
2877 if (!IsA(node, Var))
2878 {
2879 trivial_pathtarget = false;
2880 break;
2881 }
2882 var = (Var *) node;
2883 if (var->varno != rti ||
2884 var->varattno != foreach_current_index(lc) + 1)
2885 {
2886 trivial_pathtarget = false;
2887 break;
2888 }
2889 }
2890 }
2891
2892 /*
2893 * For each Path that subquery_planner produced, make a SubqueryScanPath
2894 * in the outer query.
2895 */
2896 foreach(lc, sub_final_rel->pathlist)
2897 {
2898 Path *subpath = (Path *) lfirst(lc);
2899 List *pathkeys;
2900
2901 /* Convert subpath's pathkeys to outer representation */
2903 rel,
2904 subpath->pathkeys,
2905 make_tlist_from_pathtarget(subpath->pathtarget));
2906
2907 /* Generate outer path using this subpath */
2908 add_path(rel, (Path *)
2911 pathkeys, required_outer));
2912 }
2913
2914 /* If outer rel allows parallelism, do same for partial paths. */
2916 {
2917 /* If consider_parallel is false, there should be no partial paths. */
2918 Assert(sub_final_rel->consider_parallel ||
2919 sub_final_rel->partial_pathlist == NIL);
2920
2921 /* Same for partial paths. */
2922 foreach(lc, sub_final_rel->partial_pathlist)
2923 {
2924 Path *subpath = (Path *) lfirst(lc);
2925 List *pathkeys;
2926
2927 /* Convert subpath's pathkeys to outer representation */
2929 rel,
2930 subpath->pathkeys,
2931 make_tlist_from_pathtarget(subpath->pathtarget));
2932
2933 /* Generate outer path using this subpath */
2934 add_partial_path(rel, (Path *)
2937 pathkeys,
2939 }
2940 }
2941}
2942
2943/*
2944 * set_function_pathlist
2945 * Build the (single) access path for a function RTE
2946 */
2947static void
2949{
2951 List *pathkeys = NIL;
2952
2953 /*
2954 * We don't support pushing join clauses into the quals of a function
2955 * scan, but it could still have required parameterization due to LATERAL
2956 * refs in the function expression.
2957 */
2959
2960 /*
2961 * The result is considered unordered unless ORDINALITY was used, in which
2962 * case it is ordered by the ordinal column (the last one). See if we
2963 * care, by checking for uses of that Var in equivalence classes.
2964 */
2965 if (rte->funcordinality)
2966 {
2968 Var *var = NULL;
2969 ListCell *lc;
2970
2971 /*
2972 * Is there a Var for it in rel's targetlist? If not, the query did
2973 * not reference the ordinality column, or at least not in any way
2974 * that would be interesting for sorting.
2975 */
2976 foreach(lc, rel->reltarget->exprs)
2977 {
2978 Var *node = (Var *) lfirst(lc);
2979
2980 /* checking varno/varlevelsup is just paranoia */
2981 if (IsA(node, Var) &&
2982 node->varattno == ordattno &&
2983 node->varno == rel->relid &&
2984 node->varlevelsup == 0)
2985 {
2986 var = node;
2987 break;
2988 }
2989 }
2990
2991 /*
2992 * Try to build pathkeys for this Var with int8 sorting. We tell
2993 * build_expression_pathkey not to build any new equivalence class; if
2994 * the Var isn't already mentioned in some EC, it means that nothing
2995 * cares about the ordering.
2996 */
2997 if (var)
2998 pathkeys = build_expression_pathkey(root,
2999 (Expr *) var,
3001 rel->relids,
3002 false);
3003 }
3004
3005 /* Generate appropriate path */
3007 pathkeys, required_outer));
3008}
3009
3010/*
3011 * set_values_pathlist
3012 * Build the (single) access path for a VALUES RTE
3013 */
3014static void
3016{
3018
3019 /*
3020 * We don't support pushing join clauses into the quals of a values scan,
3021 * but it could still have required parameterization due to LATERAL refs
3022 * in the values expressions.
3023 */
3025
3026 /* Generate appropriate path */
3028}
3029
3030/*
3031 * set_tablefunc_pathlist
3032 * Build the (single) access path for a table func RTE
3033 */
3034static void
3036{
3038
3039 /*
3040 * We don't support pushing join clauses into the quals of a tablefunc
3041 * scan, but it could still have required parameterization due to LATERAL
3042 * refs in the function expression.
3043 */
3045
3046 /* Generate appropriate path */
3049}
3050
3051/*
3052 * set_cte_pathlist
3053 * Build the (single) access path for a non-self-reference CTE RTE
3054 *
3055 * There's no need for a separate set_cte_size phase, since we don't
3056 * support join-qual-parameterized paths for CTEs.
3057 */
3058static void
3060{
3061 Path *ctepath;
3062 Plan *cteplan;
3064 Index levelsup;
3065 List *pathkeys;
3066 int ndx;
3067 ListCell *lc;
3068 int plan_id;
3070
3071 /*
3072 * Find the referenced CTE, and locate the path and plan previously made
3073 * for it.
3074 */
3075 levelsup = rte->ctelevelsup;
3076 cteroot = root;
3077 while (levelsup-- > 0)
3078 {
3079 cteroot = cteroot->parent_root;
3080 if (!cteroot) /* shouldn't happen */
3081 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3082 }
3083
3084 /*
3085 * Note: cte_plan_ids can be shorter than cteList, if we are still working
3086 * on planning the CTEs (ie, this is a side-reference from another CTE).
3087 * So we mustn't use forboth here.
3088 */
3089 ndx = 0;
3090 foreach(lc, cteroot->parse->cteList)
3091 {
3093
3094 if (strcmp(cte->ctename, rte->ctename) == 0)
3095 break;
3096 ndx++;
3097 }
3098 if (lc == NULL) /* shouldn't happen */
3099 elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3100 if (ndx >= list_length(cteroot->cte_plan_ids))
3101 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3102 plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3103 if (plan_id <= 0)
3104 elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
3105
3106 Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
3107 ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
3108 cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
3109
3110 /* Mark rel with estimated output rows, width, etc */
3111 set_cte_size_estimates(root, rel, cteplan->plan_rows);
3112
3113 /* Convert the ctepath's pathkeys to outer query's representation */
3115 rel,
3116 ctepath->pathkeys,
3117 cteplan->targetlist);
3118
3119 /*
3120 * We don't support pushing join clauses into the quals of a CTE scan, but
3121 * it could still have required parameterization due to LATERAL refs in
3122 * its tlist.
3123 */
3125
3126 /* Generate appropriate path */
3127 add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
3128}
3129
3130/*
3131 * set_namedtuplestore_pathlist
3132 * Build the (single) access path for a named tuplestore RTE
3133 *
3134 * There's no need for a separate set_namedtuplestore_size phase, since we
3135 * don't support join-qual-parameterized paths for tuplestores.
3136 */
3137static void
3140{
3142
3143 /* Mark rel with estimated output rows, width, etc */
3145
3146 /*
3147 * We don't support pushing join clauses into the quals of a tuplestore
3148 * scan, but it could still have required parameterization due to LATERAL
3149 * refs in its tlist.
3150 */
3152
3153 /* Generate appropriate path */
3155}
3156
3157/*
3158 * set_result_pathlist
3159 * Build the (single) access path for an RTE_RESULT RTE
3160 *
3161 * There's no need for a separate set_result_size phase, since we
3162 * don't support join-qual-parameterized paths for these RTEs.
3163 */
3164static void
3167{
3169
3170 /* Mark rel with estimated output rows, width, etc */
3172
3173 /*
3174 * We don't support pushing join clauses into the quals of a Result scan,
3175 * but it could still have required parameterization due to LATERAL refs
3176 * in its tlist.
3177 */
3179
3180 /* Generate appropriate path */
3182}
3183
3184/*
3185 * set_worktable_pathlist
3186 * Build the (single) access path for a self-reference CTE RTE
3187 *
3188 * There's no need for a separate set_worktable_size phase, since we don't
3189 * support join-qual-parameterized paths for CTEs.
3190 */
3191static void
3193{
3194 Path *ctepath;
3196 Index levelsup;
3198
3199 /*
3200 * We need to find the non-recursive term's path, which is in the plan
3201 * level that's processing the recursive UNION, which is one level *below*
3202 * where the CTE comes from.
3203 */
3204 levelsup = rte->ctelevelsup;
3205 if (levelsup == 0) /* shouldn't happen */
3206 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3207 levelsup--;
3208 cteroot = root;
3209 while (levelsup-- > 0)
3210 {
3211 cteroot = cteroot->parent_root;
3212 if (!cteroot) /* shouldn't happen */
3213 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3214 }
3215 ctepath = cteroot->non_recursive_path;
3216 if (!ctepath) /* shouldn't happen */
3217 elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
3218
3219 /* Mark rel with estimated output rows, width, etc */
3221
3222 /*
3223 * We don't support pushing join clauses into the quals of a worktable
3224 * scan, but it could still have required parameterization due to LATERAL
3225 * refs in its tlist. (I'm not sure this is actually possible given the
3226 * restrictions on recursive references, but it's easy enough to support.)
3227 */
3229
3230 /* Generate appropriate path */
3232}
3233
3234/*
3235 * generate_gather_paths
3236 * Generate parallel access paths for a relation by pushing a Gather or
3237 * Gather Merge on top of a partial path.
3238 *
3239 * This must not be called until after we're done creating all partial paths
3240 * for the specified relation. (Otherwise, add_partial_path might delete a
3241 * path that some GatherPath or GatherMergePath has a reference to.)
3242 *
3243 * If we're generating paths for a scan or join relation, override_rows will
3244 * be false, and we'll just use the relation's size estimate. When we're
3245 * being called for a partially-grouped or partially-distinct path, though, we
3246 * need to override the rowcount estimate. (It's not clear that the
3247 * particular value we're using here is actually best, but the underlying rel
3248 * has no estimate so we must do something.)
3249 */
3250void
3252{
3255 ListCell *lc;
3256 double rows;
3257 double *rowsp = NULL;
3258
3259 /* If there are no partial paths, there's nothing to do here. */
3260 if (rel->partial_pathlist == NIL)
3261 return;
3262
3263 /* Should we override the rel's rowcount estimate? */
3264 if (override_rows)
3265 rowsp = &rows;
3266
3267 /*
3268 * The output of Gather is always unsorted, so there's only one partial
3269 * path of interest: the cheapest one. That will be the one at the front
3270 * of partial_pathlist because of the way add_partial_path works.
3271 */
3276 NULL, rowsp);
3278
3279 /*
3280 * For each useful ordering, we can consider an order-preserving Gather
3281 * Merge.
3282 */
3283 foreach(lc, rel->partial_pathlist)
3284 {
3285 Path *subpath = (Path *) lfirst(lc);
3286 GatherMergePath *path;
3287
3288 if (subpath->pathkeys == NIL)
3289 continue;
3290
3293 subpath->pathkeys, NULL, rowsp);
3294 add_path(rel, &path->path);
3295 }
3296}
3297
3298/*
3299 * get_useful_pathkeys_for_relation
3300 * Determine which orderings of a relation might be useful.
3301 *
3302 * Getting data in sorted order can be useful either because the requested
3303 * order matches the final output ordering for the overall query we're
3304 * planning, or because it enables an efficient merge join. Here, we try
3305 * to figure out which pathkeys to consider.
3306 *
3307 * This allows us to do incremental sort on top of an index scan under a gather
3308 * merge node, i.e. parallelized.
3309 *
3310 * If the require_parallel_safe is true, we also require the expressions to
3311 * be parallel safe (which allows pushing the sort below Gather Merge).
3312 *
3313 * XXX At the moment this can only ever return a list with a single element,
3314 * because it looks at query_pathkeys only. So we might return the pathkeys
3315 * directly, but it seems plausible we'll want to consider other orderings
3316 * in the future. For example, we might want to consider pathkeys useful for
3317 * merge joins.
3318 */
3319static List *
3322{
3324
3325 /*
3326 * Considering query_pathkeys is always worth it, because it might allow
3327 * us to avoid a total sort when we have a partially presorted path
3328 * available or to push the total sort into the parallel portion of the
3329 * query.
3330 */
3331 if (root->query_pathkeys)
3332 {
3333 ListCell *lc;
3334 int npathkeys = 0; /* useful pathkeys */
3335
3336 foreach(lc, root->query_pathkeys)
3337 {
3339 EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
3340
3341 /*
3342 * We can only build a sort for pathkeys that contain a
3343 * safe-to-compute-early EC member computable from the current
3344 * relation's reltarget, so ignore the remainder of the list as
3345 * soon as we find a pathkey without such a member.
3346 *
3347 * It's still worthwhile to return any prefix of the pathkeys list
3348 * that meets this requirement, as we may be able to do an
3349 * incremental sort.
3350 *
3351 * If requested, ensure the sort expression is parallel-safe too.
3352 */
3355 break;
3356
3357 npathkeys++;
3358 }
3359
3360 /*
3361 * The whole query_pathkeys list matches, so append it directly, to
3362 * allow comparing pathkeys easily by comparing list pointer. If we
3363 * have to truncate the pathkeys, we gotta do a copy though.
3364 */
3365 if (npathkeys == list_length(root->query_pathkeys))
3367 root->query_pathkeys);
3368 else if (npathkeys > 0)
3370 list_copy_head(root->query_pathkeys,
3371 npathkeys));
3372 }
3373
3374 return useful_pathkeys_list;
3375}
3376
3377/*
3378 * generate_useful_gather_paths
3379 * Generate parallel access paths for a relation by pushing a Gather or
3380 * Gather Merge on top of a partial path.
3381 *
3382 * Unlike plain generate_gather_paths, this looks both at pathkeys of input
3383 * paths (aiming to preserve the ordering), but also considers ordering that
3384 * might be useful for nodes above the gather merge node, and tries to add
3385 * a sort (regular or incremental) to provide that.
3386 */
3387void
3389{
3390 ListCell *lc;
3391 double rows;
3392 double *rowsp = NULL;
3395
3396 /* If there are no partial paths, there's nothing to do here. */
3397 if (rel->partial_pathlist == NIL)
3398 return;
3399
3400 /* Should we override the rel's rowcount estimate? */
3401 if (override_rows)
3402 rowsp = &rows;
3403
3404 /* generate the regular gather (merge) paths */
3406
3407 /* consider incremental sort for interesting orderings */
3409
3410 /* used for explicit (full) sort paths */
3412
3413 /*
3414 * Consider sorted paths for each interesting ordering. We generate both
3415 * incremental and full sort.
3416 */
3417 foreach(lc, useful_pathkeys_list)
3418 {
3420 ListCell *lc2;
3421 bool is_sorted;
3422 int presorted_keys;
3423
3424 foreach(lc2, rel->partial_pathlist)
3425 {
3426 Path *subpath = (Path *) lfirst(lc2);
3427 GatherMergePath *path;
3428
3430 subpath->pathkeys,
3431 &presorted_keys);
3432
3433 /*
3434 * We don't need to consider the case where a subpath is already
3435 * fully sorted because generate_gather_paths already creates a
3436 * gather merge path for every subpath that has pathkeys present.
3437 *
3438 * But since the subpath is already sorted, we know we don't need
3439 * to consider adding a sort (full or incremental) on top of it,
3440 * so we can continue here.
3441 */
3442 if (is_sorted)
3443 continue;
3444
3445 /*
3446 * Try at least sorting the cheapest path and also try
3447 * incrementally sorting any path which is partially sorted
3448 * already (no need to deal with paths which have presorted keys
3449 * when incremental sort is disabled unless it's the cheapest
3450 * input path).
3451 */
3453 (presorted_keys == 0 || !enable_incremental_sort))
3454 continue;
3455
3456 /*
3457 * Consider regular sort for any path that's not presorted or if
3458 * incremental sort is disabled. We've no need to consider both
3459 * sort and incremental sort on the same path. We assume that
3460 * incremental sort is always faster when there are presorted
3461 * keys.
3462 *
3463 * This is not redundant with the gather paths created in
3464 * generate_gather_paths, because that doesn't generate ordered
3465 * output. Here we add an explicit sort to match the useful
3466 * ordering.
3467 */
3468 if (presorted_keys == 0 || !enable_incremental_sort)
3469 {
3471 rel,
3472 subpath,
3474 -1.0);
3475 }
3476 else
3478 rel,
3479 subpath,
3481 presorted_keys,
3482 -1);
3484 path = create_gather_merge_path(root, rel,
3485 subpath,
3486 rel->reltarget,
3487 subpath->pathkeys,
3488 NULL,
3489 rowsp);
3490
3491 add_path(rel, &path->path);
3492 }
3493 }
3494}
3495
3496/*
3497 * generate_grouped_paths
3498 * Generate paths for a grouped relation by adding sorted and hashed
3499 * partial aggregation paths on top of paths of the ungrouped relation.
3500 *
3501 * The information needed is provided by the RelAggInfo structure stored in
3502 * "grouped_rel".
3503 */
3504void
3506 RelOptInfo *rel)
3507{
3508 RelAggInfo *agg_info = grouped_rel->agg_info;
3510 bool can_hash;
3511 bool can_sort;
3512 Path *cheapest_total_path = NULL;
3514 double dNumGroups = 0;
3515 double dNumPartialGroups = 0;
3516 List *group_pathkeys = NIL;
3517
3518 if (IS_DUMMY_REL(rel))
3519 {
3520 mark_dummy_rel(grouped_rel);
3521 return;
3522 }
3523
3524 /*
3525 * We push partial aggregation only to the lowest possible level in the
3526 * join tree that is deemed useful.
3527 */
3528 if (!bms_equal(agg_info->apply_agg_at, rel->relids) ||
3529 !agg_info->agg_useful)
3530 return;
3531
3532 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3534
3535 /*
3536 * Determine whether it's possible to perform sort-based implementations
3537 * of grouping, and generate the pathkeys that represent the grouping
3538 * requirements in that case.
3539 */
3541 if (can_sort)
3542 {
3545
3547 rel->top_parent->grouped_rel : grouped_rel;
3550
3551 group_pathkeys =
3554 }
3555
3556 /*
3557 * Determine whether we should consider hash-based implementations of
3558 * grouping.
3559 */
3560 Assert(root->numOrderedAggs == 0);
3561 can_hash = (agg_info->group_clauses != NIL &&
3563
3564 /*
3565 * Consider whether we should generate partially aggregated non-partial
3566 * paths. We can only do this if we have a non-partial path.
3567 */
3568 if (rel->pathlist != NIL)
3569 {
3570 cheapest_total_path = rel->cheapest_total_path;
3571 Assert(cheapest_total_path != NULL);
3572 }
3573
3574 /*
3575 * If parallelism is possible for grouped_rel, then we should consider
3576 * generating partially-grouped partial paths. However, if the ungrouped
3577 * rel has no partial paths, then we can't.
3578 */
3579 if (grouped_rel->consider_parallel && rel->partial_pathlist != NIL)
3580 {
3583 }
3584
3585 /* Estimate number of partial groups. */
3586 if (cheapest_total_path != NULL)
3588 agg_info->group_exprs,
3589 cheapest_total_path->rows,
3590 NULL, NULL);
3593 agg_info->group_exprs,
3595 NULL, NULL);
3596
3597 if (can_sort && cheapest_total_path != NULL)
3598 {
3599 ListCell *lc;
3600
3601 /*
3602 * Use any available suitably-sorted path as input, and also consider
3603 * sorting the cheapest-total path and incremental sort on any paths
3604 * with presorted keys.
3605 *
3606 * To save planning time, we ignore parameterized input paths unless
3607 * they are the cheapest-total path.
3608 */
3609 foreach(lc, rel->pathlist)
3610 {
3611 Path *input_path = (Path *) lfirst(lc);
3612 Path *path;
3613 bool is_sorted;
3614 int presorted_keys;
3615
3616 /*
3617 * Ignore parameterized paths that are not the cheapest-total
3618 * path.
3619 */
3620 if (input_path->param_info &&
3621 input_path != cheapest_total_path)
3622 continue;
3623
3624 is_sorted = pathkeys_count_contained_in(group_pathkeys,
3625 input_path->pathkeys,
3626 &presorted_keys);
3627
3628 /*
3629 * Ignore paths that are not suitably or partially sorted, unless
3630 * they are the cheapest total path (no need to deal with paths
3631 * which have presorted keys when incremental sort is disabled).
3632 */
3633 if (!is_sorted && input_path != cheapest_total_path &&
3634 (presorted_keys == 0 || !enable_incremental_sort))
3635 continue;
3636
3637 /*
3638 * Since the path originates from a non-grouped relation that is
3639 * not aware of eager aggregation, we must ensure that it provides
3640 * the correct input for partial aggregation.
3641 */
3642 path = (Path *) create_projection_path(root,
3643 grouped_rel,
3644 input_path,
3645 agg_info->agg_input);
3646
3647 if (!is_sorted)
3648 {
3649 /*
3650 * We've no need to consider both a sort and incremental sort.
3651 * We'll just do a sort if there are no presorted keys and an
3652 * incremental sort when there are presorted keys.
3653 */
3654 if (presorted_keys == 0 || !enable_incremental_sort)
3655 path = (Path *) create_sort_path(root,
3656 grouped_rel,
3657 path,
3658 group_pathkeys,
3659 -1.0);
3660 else
3662 grouped_rel,
3663 path,
3664 group_pathkeys,
3665 presorted_keys,
3666 -1.0);
3667 }
3668
3669 /*
3670 * qual is NIL because the HAVING clause cannot be evaluated until
3671 * the final value of the aggregate is known.
3672 */
3673 path = (Path *) create_agg_path(root,
3674 grouped_rel,
3675 path,
3676 agg_info->target,
3677 AGG_SORTED,
3679 agg_info->group_clauses,
3680 NIL,
3681 &agg_costs,
3682 dNumGroups);
3683
3684 add_path(grouped_rel, path);
3685 }
3686 }
3687
3689 {
3690 ListCell *lc;
3691
3692 /* Similar to above logic, but for partial paths. */
3693 foreach(lc, rel->partial_pathlist)
3694 {
3695 Path *input_path = (Path *) lfirst(lc);
3696 Path *path;
3697 bool is_sorted;
3698 int presorted_keys;
3699
3700 is_sorted = pathkeys_count_contained_in(group_pathkeys,
3701 input_path->pathkeys,
3702 &presorted_keys);
3703
3704 /*
3705 * Ignore paths that are not suitably or partially sorted, unless
3706 * they are the cheapest partial path (no need to deal with paths
3707 * which have presorted keys when incremental sort is disabled).
3708 */
3710 (presorted_keys == 0 || !enable_incremental_sort))
3711 continue;
3712
3713 /*
3714 * Since the path originates from a non-grouped relation that is
3715 * not aware of eager aggregation, we must ensure that it provides
3716 * the correct input for partial aggregation.
3717 */
3718 path = (Path *) create_projection_path(root,
3719 grouped_rel,
3720 input_path,
3721 agg_info->agg_input);
3722
3723 if (!is_sorted)
3724 {
3725 /*
3726 * We've no need to consider both a sort and incremental sort.
3727 * We'll just do a sort if there are no presorted keys and an
3728 * incremental sort when there are presorted keys.
3729 */
3730 if (presorted_keys == 0 || !enable_incremental_sort)
3731 path = (Path *) create_sort_path(root,
3732 grouped_rel,
3733 path,
3734 group_pathkeys,
3735 -1.0);
3736 else
3738 grouped_rel,
3739 path,
3740 group_pathkeys,
3741 presorted_keys,
3742 -1.0);
3743 }
3744
3745 /*
3746 * qual is NIL because the HAVING clause cannot be evaluated until
3747 * the final value of the aggregate is known.
3748 */
3749 path = (Path *) create_agg_path(root,
3750 grouped_rel,
3751 path,
3752 agg_info->target,
3753 AGG_SORTED,
3755 agg_info->group_clauses,
3756 NIL,
3757 &agg_costs,
3759
3760 add_partial_path(grouped_rel, path);
3761 }
3762 }
3763
3764 /*
3765 * Add a partially-grouped HashAgg Path where possible
3766 */
3767 if (can_hash && cheapest_total_path != NULL)
3768 {
3769 Path *path;
3770
3771 /*
3772 * Since the path originates from a non-grouped relation that is not
3773 * aware of eager aggregation, we must ensure that it provides the
3774 * correct input for partial aggregation.
3775 */
3776 path = (Path *) create_projection_path(root,
3777 grouped_rel,
3778 cheapest_total_path,
3779 agg_info->agg_input);
3780
3781 /*
3782 * qual is NIL because the HAVING clause cannot be evaluated until the
3783 * final value of the aggregate is known.
3784 */
3785 path = (Path *) create_agg_path(root,
3786 grouped_rel,
3787 path,
3788 agg_info->target,
3789 AGG_HASHED,
3791 agg_info->group_clauses,
3792 NIL,
3793 &agg_costs,
3794 dNumGroups);
3795
3796 add_path(grouped_rel, path);
3797 }
3798
3799 /*
3800 * Now add a partially-grouped HashAgg partial Path where possible
3801 */
3803 {
3804 Path *path;
3805
3806 /*
3807 * Since the path originates from a non-grouped relation that is not
3808 * aware of eager aggregation, we must ensure that it provides the
3809 * correct input for partial aggregation.
3810 */
3811 path = (Path *) create_projection_path(root,
3812 grouped_rel,
3814 agg_info->agg_input);
3815
3816 /*
3817 * qual is NIL because the HAVING clause cannot be evaluated until the
3818 * final value of the aggregate is known.
3819 */
3820 path = (Path *) create_agg_path(root,
3821 grouped_rel,
3822 path,
3823 agg_info->target,
3824 AGG_HASHED,
3826 agg_info->group_clauses,
3827 NIL,
3828 &agg_costs,
3830
3831 add_partial_path(grouped_rel, path);
3832 }
3833}
3834
3835/*
3836 * make_rel_from_joinlist
3837 * Build access paths using a "joinlist" to guide the join path search.
3838 *
3839 * See comments for deconstruct_jointree() for definition of the joinlist
3840 * data structure.
3841 */
3842static RelOptInfo *
3844{
3845 int levels_needed;
3846 List *initial_rels;
3847 ListCell *jl;
3848
3849 /*
3850 * Count the number of child joinlist nodes. This is the depth of the
3851 * dynamic-programming algorithm we must employ to consider all ways of
3852 * joining the child nodes.
3853 */
3855
3856 if (levels_needed <= 0)
3857 return NULL; /* nothing to do? */
3858
3859 /*
3860 * Construct a list of rels corresponding to the child joinlist nodes.
3861 * This may contain both base rels and rels constructed according to
3862 * sub-joinlists.
3863 */
3864 initial_rels = NIL;
3865 foreach(jl, joinlist)
3866 {
3867 Node *jlnode = (Node *) lfirst(jl);
3869
3870 if (IsA(jlnode, RangeTblRef))
3871 {
3872 int varno = ((RangeTblRef *) jlnode)->rtindex;
3873
3874 thisrel = find_base_rel(root, varno);
3875 }
3876 else if (IsA(jlnode, List))
3877 {
3878 /* Recurse to handle subproblem */
3880 }
3881 else
3882 {
3883 elog(ERROR, "unrecognized joinlist node type: %d",
3884 (int) nodeTag(jlnode));
3885 thisrel = NULL; /* keep compiler quiet */
3886 }
3887
3888 initial_rels = lappend(initial_rels, thisrel);
3889 }
3890
3891 if (levels_needed == 1)
3892 {
3893 /*
3894 * Single joinlist node, so we're done.
3895 */
3896 return (RelOptInfo *) linitial(initial_rels);
3897 }
3898 else
3899 {
3900 /*
3901 * Consider the different orders in which we could join the rels,
3902 * using a plugin, GEQO, or the regular join search code.
3903 *
3904 * We put the initial_rels list into a PlannerInfo field because
3905 * has_legal_joinclause() needs to look at it (ugly :-().
3906 */
3907 root->initial_rels = initial_rels;
3908
3909 if (join_search_hook)
3910 return (*join_search_hook) (root, levels_needed, initial_rels);
3912 return geqo(root, levels_needed, initial_rels);
3913 else
3914 return standard_join_search(root, levels_needed, initial_rels);
3915 }
3916}
3917
3918/*
3919 * standard_join_search
3920 * Find possible joinpaths for a query by successively finding ways
3921 * to join component relations into join relations.
3922 *
3923 * 'levels_needed' is the number of iterations needed, ie, the number of
3924 * independent jointree items in the query. This is > 1.
3925 *
3926 * 'initial_rels' is a list of RelOptInfo nodes for each independent
3927 * jointree item. These are the components to be joined together.
3928 * Note that levels_needed == list_length(initial_rels).
3929 *
3930 * Returns the final level of join relations, i.e., the relation that is
3931 * the result of joining all the original relations together.
3932 * At least one implementation path must be provided for this relation and
3933 * all required sub-relations.
3934 *
3935 * To support loadable plugins that modify planner behavior by changing the
3936 * join searching algorithm, we provide a hook variable that lets a plugin
3937 * replace or supplement this function. Any such hook must return the same
3938 * final join relation as the standard code would, but it might have a
3939 * different set of implementation paths attached, and only the sub-joinrels
3940 * needed for these paths need have been instantiated.
3941 *
3942 * Note to plugin authors: the functions invoked during standard_join_search()
3943 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
3944 * than one join-order search, you'll probably need to save and restore the
3945 * original states of those data structures. See geqo_eval() for an example.
3946 */
3947RelOptInfo *
3949{
3950 int lev;
3951 RelOptInfo *rel;
3952
3953 /*
3954 * This function cannot be invoked recursively within any one planning
3955 * problem, so join_rel_level[] can't be in use already.
3956 */
3957 Assert(root->join_rel_level == NULL);
3958
3959 /*
3960 * We employ a simple "dynamic programming" algorithm: we first find all
3961 * ways to build joins of two jointree items, then all ways to build joins
3962 * of three items (from two-item joins and single items), then four-item
3963 * joins, and so on until we have considered all ways to join all the
3964 * items into one rel.
3965 *
3966 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
3967 * set root->join_rel_level[1] to represent all the single-jointree-item
3968 * relations.
3969 */
3970 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
3971
3972 root->join_rel_level[1] = initial_rels;
3973
3974 for (lev = 2; lev <= levels_needed; lev++)
3975 {
3976 ListCell *lc;
3977
3978 /*
3979 * Determine all possible pairs of relations to be joined at this
3980 * level, and build paths for making each one from every available
3981 * pair of lower-level relations.
3982 */
3984
3985 /*
3986 * Run generate_partitionwise_join_paths() and
3987 * generate_useful_gather_paths() for each just-processed joinrel. We
3988 * could not do this earlier because both regular and partial paths
3989 * can get added to a particular joinrel at multiple times within
3990 * join_search_one_level.
3991 *
3992 * After that, we're done creating paths for the joinrel, so run
3993 * set_cheapest().
3994 *
3995 * In addition, we also run generate_grouped_paths() for the grouped
3996 * relation of each just-processed joinrel, and run set_cheapest() for
3997 * the grouped relation afterwards.
3998 */
3999 foreach(lc, root->join_rel_level[lev])
4000 {
4001 bool is_top_rel;
4002
4003 rel = (RelOptInfo *) lfirst(lc);
4004
4005 is_top_rel = bms_equal(rel->relids, root->all_query_rels);
4006
4007 /* Create paths for partitionwise joins. */
4009
4010 /*
4011 * Except for the topmost scan/join rel, consider gathering
4012 * partial paths. We'll do the same for the topmost scan/join rel
4013 * once we know the final targetlist (see grouping_planner's and
4014 * its call to apply_scanjoin_target_to_paths).
4015 */
4016 if (!is_top_rel)
4018
4019 /* Find and save the cheapest paths for this rel */
4020 set_cheapest(rel);
4021
4022 /*
4023 * Except for the topmost scan/join rel, consider generating
4024 * partial aggregation paths for the grouped relation on top of
4025 * the paths of this rel. After that, we're done creating paths
4026 * for the grouped relation, so run set_cheapest().
4027 */
4028 if (rel->grouped_rel != NULL && !is_top_rel)
4029 {
4030 RelOptInfo *grouped_rel = rel->grouped_rel;
4031
4032 Assert(IS_GROUPED_REL(grouped_rel));
4033
4034 generate_grouped_paths(root, grouped_rel, rel);
4035 set_cheapest(grouped_rel);
4036 }
4037
4038#ifdef OPTIMIZER_DEBUG
4039 pprint(rel);
4040#endif
4041 }
4042 }
4043
4044 /*
4045 * We should have a single rel at the final level.
4046 */
4047 if (root->join_rel_level[levels_needed] == NIL)
4048 elog(ERROR, "failed to build any %d-way joins", levels_needed);
4049 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
4050
4051 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
4052
4053 root->join_rel_level = NULL;
4054
4055 return rel;
4056}
4057
4058/*****************************************************************************
4059 * PUSHING QUALS DOWN INTO SUBQUERIES
4060 *****************************************************************************/
4061
4062/*
4063 * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
4064 *
4065 * subquery is the particular component query being checked. topquery
4066 * is the top component of a set-operations tree (the same Query if no
4067 * set-op is involved).
4068 *
4069 * Conditions checked here:
4070 *
4071 * 1. If the subquery has a LIMIT clause, we must not push down any quals,
4072 * since that could change the set of rows returned.
4073 *
4074 * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
4075 * quals into it, because that could change the results.
4076 *
4077 * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
4078 * This is because upper-level quals should semantically be evaluated only
4079 * once per distinct row, not once per original row, and if the qual is
4080 * volatile then extra evaluations could change the results. (This issue
4081 * does not apply to other forms of aggregation such as GROUP BY, because
4082 * when those are present we push into HAVING not WHERE, so that the quals
4083 * are still applied after aggregation.)
4084 *
4085 * 4. If the subquery contains window functions, we cannot push volatile quals
4086 * into it. The issue here is a bit different from DISTINCT: a volatile qual
4087 * might succeed for some rows of a window partition and fail for others,
4088 * thereby changing the partition contents and thus the window functions'
4089 * results for rows that remain.
4090 *
4091 * 5. If the subquery contains any set-returning functions in its targetlist,
4092 * we cannot push volatile quals into it. That would push them below the SRFs
4093 * and thereby change the number of times they are evaluated. Also, a
4094 * volatile qual could succeed for some SRF output rows and fail for others,
4095 * a behavior that cannot occur if it's evaluated before SRF expansion.
4096 *
4097 * 6. If the subquery has nonempty grouping sets, we cannot push down any
4098 * quals. The concern here is that a qual referencing a "constant" grouping
4099 * column could get constant-folded, which would be improper because the value
4100 * is potentially nullable by grouping-set expansion. This restriction could
4101 * be removed if we had a parsetree representation that shows that such
4102 * grouping columns are not really constant. (There are other ideas that
4103 * could be used to relax this restriction, but that's the approach most
4104 * likely to get taken in the future. Note that there's not much to be gained
4105 * so long as subquery_planner can't move HAVING clauses to WHERE within such
4106 * a subquery.)
4107 *
4108 * In addition, we make several checks on the subquery's output columns to see
4109 * if it is safe to reference them in pushed-down quals. If output column k
4110 * is found to be unsafe to reference, we set the reason for that inside
4111 * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
4112 * column k might not be referenced by some/all quals. The unsafeFlags[]
4113 * array will be consulted later by qual_is_pushdown_safe(). It's better to
4114 * do it this way than to make the checks directly in qual_is_pushdown_safe(),
4115 * because when the subquery involves set operations we have to check the
4116 * output expressions in each arm of the set op.
4117 *
4118 * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
4119 * we're effectively assuming that the quals cannot distinguish values that
4120 * the DISTINCT's equality operator sees as equal, yet there are many
4121 * counterexamples to that assumption. However use of such a qual with a
4122 * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
4123 * "equal" value will be chosen as the output value by the DISTINCT operation.
4124 * So we don't worry too much about that. Another objection is that if the
4125 * qual is expensive to evaluate, running it for each original row might cost
4126 * more than we save by eliminating rows before the DISTINCT step. But it
4127 * would be very hard to estimate that at this stage, and in practice pushdown
4128 * seldom seems to make things worse, so we ignore that problem too.
4129 *
4130 * Note: likewise, pushing quals into a subquery with window functions is a
4131 * bit dubious: the quals might remove some rows of a window partition while
4132 * leaving others, causing changes in the window functions' results for the
4133 * surviving rows. We insist that such a qual reference only partitioning
4134 * columns, but again that only protects us if the qual does not distinguish
4135 * values that the partitioning equality operator sees as equal. The risks
4136 * here are perhaps larger than for DISTINCT, since no de-duplication of rows
4137 * occurs and thus there is no theoretical problem with such a qual. But
4138 * we'll do this anyway because the potential performance benefits are very
4139 * large, and we've seen no field complaints about the longstanding comparable
4140 * behavior with DISTINCT.
4141 */
4142static bool
4145{
4147
4148 /* Check point 1 */
4149 if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
4150 return false;
4151
4152 /* Check point 6 */
4153 if (subquery->groupClause && subquery->groupingSets)
4154 return false;
4155
4156 /* Check points 3, 4, and 5 */
4157 if (subquery->distinctClause ||
4158 subquery->hasWindowFuncs ||
4159 subquery->hasTargetSRFs)
4160 safetyInfo->unsafeVolatile = true;
4161
4162 /*
4163 * If we're at a leaf query, check for unsafe expressions in its target
4164 * list, and mark any reasons why they're unsafe in unsafeFlags[].
4165 * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
4166 * so no need to check them.)
4167 */
4168 if (subquery->setOperations == NULL)
4170
4171 /* Are we at top level, or looking at a setop component? */
4172 if (subquery == topquery)
4173 {
4174 /* Top level, so check any component queries */
4175 if (subquery->setOperations != NULL)
4177 safetyInfo))
4178 return false;
4179 }
4180 else
4181 {
4182 /* Setop component must not have more components (too weird) */
4183 if (subquery->setOperations != NULL)
4184 return false;
4185 /* Check whether setop component output types match top level */
4186 topop = castNode(SetOperationStmt, topquery->setOperations);
4187 Assert(topop);
4189 topop->colTypes,
4190 safetyInfo);
4191 }
4192 return true;
4193}
4194
4195/*
4196 * Helper routine to recurse through setOperations tree
4197 */
4198static bool
4201{
4202 if (IsA(setOp, RangeTblRef))
4203 {
4205 RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
4206 Query *subquery = rte->subquery;
4207
4208 Assert(subquery != NULL);
4210 }
4211 else if (IsA(setOp, SetOperationStmt))
4212 {
4214
4215 /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
4216 if (op->op == SETOP_EXCEPT)
4217 return false;
4218 /* Else recurse */
4220 return false;
4222 return false;
4223 }
4224 else
4225 {
4226 elog(ERROR, "unrecognized node type: %d",
4227 (int) nodeTag(setOp));
4228 }
4229 return true;
4230}
4231
4232/*
4233 * check_output_expressions - check subquery's output expressions for safety
4234 *
4235 * There are several cases in which it's unsafe to push down an upper-level
4236 * qual if it references a particular output column of a subquery. We check
4237 * each output column of the subquery and set flags in unsafeFlags[k] when we
4238 * see that column is unsafe for a pushed-down qual to reference. The
4239 * conditions checked here are:
4240 *
4241 * 1. We must not push down any quals that refer to subselect outputs that
4242 * return sets, else we'd introduce functions-returning-sets into the
4243 * subquery's WHERE/HAVING quals.
4244 *
4245 * 2. We must not push down any quals that refer to subselect outputs that
4246 * contain volatile functions, for fear of introducing strange results due
4247 * to multiple evaluation of a volatile function.
4248 *
4249 * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
4250 * refer to non-DISTINCT output columns, because that could change the set
4251 * of rows returned. (This condition is vacuous for DISTINCT, because then
4252 * there are no non-DISTINCT output columns, so we needn't check. Note that
4253 * subquery_is_pushdown_safe already reported that we can't use volatile
4254 * quals if there's DISTINCT or DISTINCT ON.)
4255 *
4256 * 4. If the subquery has any window functions, we must not push down quals
4257 * that reference any output columns that are not listed in all the subquery's
4258 * window PARTITION BY clauses. We can push down quals that use only
4259 * partitioning columns because they should succeed or fail identically for
4260 * every row of any one window partition, and totally excluding some
4261 * partitions will not change a window function's results for remaining
4262 * partitions. (Again, this also requires nonvolatile quals, but
4263 * subquery_is_pushdown_safe handles that.). Subquery columns marked as
4264 * unsafe for this reason can still have WindowClause run conditions pushed
4265 * down.
4266 */
4267static void
4269{
4271 ListCell *lc;
4272
4273 /*
4274 * We must be careful with grouping Vars and join alias Vars in the
4275 * subquery's outputs, as they hide the underlying expressions.
4276 *
4277 * We need to expand grouping Vars to their underlying expressions (the
4278 * grouping clauses) because the grouping expressions themselves might be
4279 * volatile or set-returning. However, we do not need to expand join
4280 * alias Vars, as their underlying structure does not introduce volatile
4281 * or set-returning functions at the current level.
4282 *
4283 * In neither case do we need to recursively examine the Vars contained in
4284 * these underlying expressions. Even if they reference outputs from
4285 * lower-level subqueries (at any depth), those references are guaranteed
4286 * not to expand to volatile or set-returning functions, because
4287 * subqueries containing such functions in their targetlists are never
4288 * pulled up.
4289 */
4290 if (subquery->hasGroupRTE)
4291 {
4292 /*
4293 * We can safely pass NULL for the root here. This function uses the
4294 * expanded expressions solely to check for volatile or set-returning
4295 * functions, which is independent of the Vars' nullingrels.
4296 */
4298 flatten_group_exprs(NULL, subquery, (Node *) subquery->targetList);
4299 }
4300
4301 foreach(lc, flattened_targetList)
4302 {
4304
4305 if (tle->resjunk)
4306 continue; /* ignore resjunk columns */
4307
4308 /* Functions returning sets are unsafe (point 1) */
4309 if (subquery->hasTargetSRFs &&
4310 (safetyInfo->unsafeFlags[tle->resno] &
4311 UNSAFE_HAS_SET_FUNC) == 0 &&
4312 expression_returns_set((Node *) tle->expr))
4313 {
4314 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
4315 continue;
4316 }
4317
4318 /* Volatile functions are unsafe (point 2) */
4319 if ((safetyInfo->unsafeFlags[tle->resno] &
4322 {
4323 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
4324 continue;
4325 }
4326
4327 /* If subquery uses DISTINCT ON, check point 3 */
4328 if (subquery->hasDistinctOn &&
4329 (safetyInfo->unsafeFlags[tle->resno] &
4332 {
4333 /* non-DISTINCT column, so mark it unsafe */
4334 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_DISTINCTON_CLAUSE;
4335 continue;
4336 }
4337
4338 /* If subquery uses window functions, check point 4 */
4339 if (subquery->hasWindowFuncs &&
4340 (safetyInfo->unsafeFlags[tle->resno] &
4342 !targetIsInAllPartitionLists(tle, subquery))
4343 {
4344 /* not present in all PARTITION BY clauses, so mark it unsafe */
4345 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_PARTITIONBY_CLAUSE;
4346 continue;
4347 }
4348 }
4349}
4350
4351/*
4352 * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
4353 * push quals into each component query, but the quals can only reference
4354 * subquery columns that suffer no type coercions in the set operation.
4355 * Otherwise there are possible semantic gotchas. So, we check the
4356 * component queries to see if any of them have output types different from
4357 * the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
4358 * unsafeFlags[k] if column k has different type in any component.
4359 *
4360 * We don't have to care about typmods here: the only allowed difference
4361 * between set-op input and output typmods is input is a specific typmod
4362 * and output is -1, and that does not require a coercion.
4363 *
4364 * tlist is a subquery tlist.
4365 * colTypes is an OID list of the top-level setop's output column types.
4366 * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
4367 */
4368static void
4371{
4372 ListCell *l;
4374
4375 foreach(l, tlist)
4376 {
4378
4379 if (tle->resjunk)
4380 continue; /* ignore resjunk columns */
4381 if (colType == NULL)
4382 elog(ERROR, "wrong number of tlist entries");
4383 if (exprType((Node *) tle->expr) != lfirst_oid(colType))
4384 safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
4386 }
4387 if (colType != NULL)
4388 elog(ERROR, "wrong number of tlist entries");
4389}
4390
4391/*
4392 * targetIsInAllPartitionLists
4393 * True if the TargetEntry is listed in the PARTITION BY clause
4394 * of every window defined in the query.
4395 *
4396 * It would be safe to ignore windows not actually used by any window
4397 * function, but it's not easy to get that info at this stage; and it's
4398 * unlikely to be useful to spend any extra cycles getting it, since
4399 * unreferenced window definitions are probably infrequent in practice.
4400 */
4401static bool
4403{
4404 ListCell *lc;
4405
4406 foreach(lc, query->windowClause)
4407 {
4409
4411 return false;
4412 }
4413 return true;
4414}
4415
4416/*
4417 * qual_is_pushdown_safe - is a particular rinfo safe to push down?
4418 *
4419 * rinfo is a restriction clause applying to the given subquery (whose RTE
4420 * has index rti in the parent query).
4421 *
4422 * Conditions checked here:
4423 *
4424 * 1. rinfo's clause must not contain any SubPlans (mainly because it's
4425 * unclear that it will work correctly: SubLinks will already have been
4426 * transformed into SubPlans in the qual, but not in the subquery). Note that
4427 * SubLinks that transform to initplans are safe, and will be accepted here
4428 * because what we'll see in the qual is just a Param referencing the initplan
4429 * output.
4430 *
4431 * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
4432 * functions.
4433 *
4434 * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
4435 * functions that are passed Var nodes, and therefore might reveal values from
4436 * the subquery as side effects.
4437 *
4438 * 4. rinfo's clause must not refer to the whole-row output of the subquery
4439 * (since there is no easy way to name that within the subquery itself).
4440 *
4441 * 5. rinfo's clause must not refer to any subquery output columns that were
4442 * found to be unsafe to reference by subquery_is_pushdown_safe().
4443 */
4444static pushdown_safe_type
4447{
4449 Node *qual = (Node *) rinfo->clause;
4450 List *vars;
4451 ListCell *vl;
4452
4453 /* Refuse subselects (point 1) */
4454 if (contain_subplans(qual))
4455 return PUSHDOWN_UNSAFE;
4456
4457 /* Refuse volatile quals if we found they'd be unsafe (point 2) */
4458 if (safetyInfo->unsafeVolatile &&
4460 return PUSHDOWN_UNSAFE;
4461
4462 /* Refuse leaky quals if told to (point 3) */
4463 if (safetyInfo->unsafeLeaky &&
4464 contain_leaked_vars(qual))
4465 return PUSHDOWN_UNSAFE;
4466
4467 /*
4468 * Examine all Vars used in clause. Since it's a restriction clause, all
4469 * such Vars must refer to subselect output columns ... unless this is
4470 * part of a LATERAL subquery, in which case there could be lateral
4471 * references.
4472 *
4473 * By omitting the relevant flags, this also gives us a cheap sanity check
4474 * that no aggregates or window functions appear in the qual. Those would
4475 * be unsafe to push down, but at least for the moment we could never see
4476 * any in a qual anyhow.
4477 */
4479 foreach(vl, vars)
4480 {
4481 Var *var = (Var *) lfirst(vl);
4482
4483 /*
4484 * XXX Punt if we find any PlaceHolderVars in the restriction clause.
4485 * It's not clear whether a PHV could safely be pushed down, and even
4486 * less clear whether such a situation could arise in any cases of
4487 * practical interest anyway. So for the moment, just refuse to push
4488 * down.
4489 */
4490 if (!IsA(var, Var))
4491 {
4493 break;
4494 }
4495
4496 /*
4497 * Punt if we find any lateral references. It would be safe to push
4498 * these down, but we'd have to convert them into outer references,
4499 * which subquery_push_qual lacks the infrastructure to do. The case
4500 * arises so seldom that it doesn't seem worth working hard on.
4501 */
4502 if (var->varno != rti)
4503 {
4505 break;
4506 }
4507
4508 /* Subqueries have no system columns */
4509 Assert(var->varattno >= 0);
4510
4511 /* Check point 4 */
4512 if (var->varattno == 0)
4513 {
4515 break;
4516 }
4517
4518 /* Check point 5 */
4519 if (safetyInfo->unsafeFlags[var->varattno] != 0)
4520 {
4521 if (safetyInfo->unsafeFlags[var->varattno] &
4524 {
4526 break;
4527 }
4528 else
4529 {
4530 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
4532 /* don't break, we might find another Var that's unsafe */
4533 }
4534 }
4535 }
4536
4537 list_free(vars);
4538
4539 return safe;
4540}
4541
4542/*
4543 * subquery_push_qual - push down a qual that we have determined is safe
4544 */
4545static void
4547{
4548 if (subquery->setOperations != NULL)
4549 {
4550 /* Recurse to push it separately to each component query */
4551 recurse_push_qual(subquery->setOperations, subquery,
4552 rte, rti, qual);
4553 }
4554 else
4555 {
4556 /*
4557 * We need to replace Vars in the qual (which must refer to outputs of
4558 * the subquery) with copies of the subquery's targetlist expressions.
4559 * Note that at this point, any uplevel Vars in the qual should have
4560 * been replaced with Params, so they need no work.
4561 *
4562 * This step also ensures that when we are pushing into a setop tree,
4563 * each component query gets its own copy of the qual.
4564 */
4565 qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
4566 subquery->targetList,
4567 subquery->resultRelation,
4569 &subquery->hasSubLinks);
4570
4571 /*
4572 * Now attach the qual to the proper place: normally WHERE, but if the
4573 * subquery uses grouping or aggregation, put it in HAVING (since the
4574 * qual really refers to the group-result rows).
4575 */
4576 if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
4577 subquery->havingQual = make_and_qual(subquery->havingQual, qual);
4578 else
4579 subquery->jointree->quals =
4580 make_and_qual(subquery->jointree->quals, qual);
4581
4582 /*
4583 * We need not change the subquery's hasAggs or hasSubLinks flags,
4584 * since we can't be pushing down any aggregates that weren't there
4585 * before, and we don't push down subselects at all.
4586 */
4587 }
4588}
4589
4590/*
4591 * Helper routine to recurse through setOperations tree
4592 */
4593static void
4595 RangeTblEntry *rte, Index rti, Node *qual)
4596{
4597 if (IsA(setOp, RangeTblRef))
4598 {
4600 RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
4601 Query *subquery = subrte->subquery;
4602
4603 Assert(subquery != NULL);
4604 subquery_push_qual(subquery, rte, rti, qual);
4605 }
4606 else if (IsA(setOp, SetOperationStmt))
4607 {
4609
4610 recurse_push_qual(op->larg, topquery, rte, rti, qual);
4611 recurse_push_qual(op->rarg, topquery, rte, rti, qual);
4612 }
4613 else
4614 {
4615 elog(ERROR, "unrecognized node type: %d",
4616 (int) nodeTag(setOp));
4617 }
4618}
4619
4620/*****************************************************************************
4621 * SIMPLIFYING SUBQUERY TARGETLISTS
4622 *****************************************************************************/
4623
4624/*
4625 * remove_unused_subquery_outputs
4626 * Remove subquery targetlist items we don't need
4627 *
4628 * It's possible, even likely, that the upper query does not read all the
4629 * output columns of the subquery. We can remove any such outputs that are
4630 * not needed by the subquery itself (e.g., as sort/group columns) and do not
4631 * affect semantics otherwise (e.g., volatile functions can't be removed).
4632 * This is useful not only because we might be able to remove expensive-to-
4633 * compute expressions, but because deletion of output columns might allow
4634 * optimizations such as join removal to occur within the subquery.
4635 *
4636 * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
4637 * FirstLowInvalidHeapAttributeNumber) that we should not remove. This
4638 * parameter is modified by the function, so callers must make a copy if they
4639 * need to use the passed in Bitmapset after calling this function.
4640 *
4641 * To avoid affecting column numbering in the targetlist, we don't physically
4642 * remove unused tlist entries, but rather replace their expressions with NULL
4643 * constants. This is implemented by modifying subquery->targetList.
4644 */
4645static void
4648{
4649 Bitmapset *attrs_used;
4650 ListCell *lc;
4651
4652 /*
4653 * Just point directly to extra_used_attrs. No need to bms_copy as none of
4654 * the current callers use the Bitmapset after calling this function.
4655 */
4656 attrs_used = extra_used_attrs;
4657
4658 /*
4659 * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
4660 * could update all the child SELECTs' tlists, but it seems not worth the
4661 * trouble presently.
4662 */
4663 if (subquery->setOperations)
4664 return;
4665
4666 /*
4667 * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
4668 * time: all its output columns must be used in the distinctClause.
4669 */
4670 if (subquery->distinctClause && !subquery->hasDistinctOn)
4671 return;
4672
4673 /*
4674 * Collect a bitmap of all the output column numbers used by the upper
4675 * query.
4676 *
4677 * Add all the attributes needed for joins or final output. Note: we must
4678 * look at rel's targetlist, not the attr_needed data, because attr_needed
4679 * isn't computed for inheritance child rels, cf set_append_rel_size().
4680 * (XXX might be worth changing that sometime.)
4681 */
4682 pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
4683
4684 /* Add all the attributes used by un-pushed-down restriction clauses. */
4685 foreach(lc, rel->baserestrictinfo)
4686 {
4687 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4688
4689 pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
4690 }
4691
4692 /*
4693 * If there's a whole-row reference to the subquery, we can't remove
4694 * anything.
4695 */
4697 return;
4698
4699 /*
4700 * Run through the tlist and zap entries we don't need. It's okay to
4701 * modify the tlist items in-place because set_subquery_pathlist made a
4702 * copy of the subquery.
4703 */
4704 foreach(lc, subquery->targetList)
4705 {
4707 Node *texpr = (Node *) tle->expr;
4708
4709 /*
4710 * If it has a sortgroupref number, it's used in some sort/group
4711 * clause so we'd better not remove it. Also, don't remove any
4712 * resjunk columns, since their reason for being has nothing to do
4713 * with anybody reading the subquery's output. (It's likely that
4714 * resjunk columns in a sub-SELECT would always have ressortgroupref
4715 * set, but even if they don't, it seems imprudent to remove them.)
4716 */
4717 if (tle->ressortgroupref || tle->resjunk)
4718 continue;
4719
4720 /*
4721 * If it's used by the upper query, we can't remove it.
4722 */
4724 attrs_used))
4725 continue;
4726
4727 /*
4728 * If it contains a set-returning function, we can't remove it since
4729 * that could change the number of rows returned by the subquery.
4730 */
4731 if (subquery->hasTargetSRFs &&
4733 continue;
4734
4735 /*
4736 * If it contains volatile functions, we daren't remove it for fear
4737 * that the user is expecting their side-effects to happen.
4738 */
4740 continue;
4741
4742 /*
4743 * OK, we don't need it. Replace the expression with a NULL constant.
4744 * Preserve the exposed type of the expression, in case something
4745 * looks at the rowtype of the subquery's result.
4746 */
4747 tle->expr = (Expr *) makeNullConst(exprType(texpr),
4750 }
4751}
4752
4753/*
4754 * create_partial_bitmap_paths
4755 * Build partial bitmap heap path for the relation
4756 */
4757void
4759 Path *bitmapqual)
4760{
4761 int parallel_workers;
4762 double pages_fetched;
4763
4764 /* Compute heap pages for bitmap heap scan */
4765 pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
4766 NULL, NULL);
4767
4768 parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
4770
4771 if (parallel_workers <= 0)
4772 return;
4773
4775 bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
4776}
4777
4778/*
4779 * Compute the number of parallel workers that should be used to scan a
4780 * relation. We compute the parallel workers based on the size of the heap to
4781 * be scanned and the size of the index to be scanned, then choose a minimum
4782 * of those.
4783 *
4784 * "heap_pages" is the number of pages from the table that we expect to scan, or
4785 * -1 if we don't expect to scan any.
4786 *
4787 * "index_pages" is the number of pages from the index that we expect to scan, or
4788 * -1 if we don't expect to scan any.
4789 *
4790 * "max_workers" is caller's limit on the number of workers. This typically
4791 * comes from a GUC.
4792 */
4793int
4795 int max_workers)
4796{
4797 int parallel_workers = 0;
4798
4799 /*
4800 * If the user has set the parallel_workers reloption, use that; otherwise
4801 * select a default number of workers.
4802 */
4803 if (rel->rel_parallel_workers != -1)
4804 parallel_workers = rel->rel_parallel_workers;
4805 else
4806 {
4807 /*
4808 * If the number of pages being scanned is insufficient to justify a
4809 * parallel scan, just return zero ... unless it's an inheritance
4810 * child. In that case, we want to generate a parallel path here
4811 * anyway. It might not be worthwhile just for this relation, but
4812 * when combined with all of its inheritance siblings it may well pay
4813 * off.
4814 */
4815 if (rel->reloptkind == RELOPT_BASEREL &&
4818 return 0;
4819
4820 if (heap_pages >= 0)
4821 {
4823 int heap_parallel_workers = 1;
4824
4825 /*
4826 * Select the number of workers based on the log of the size of
4827 * the relation. This probably needs to be a good deal more
4828 * sophisticated, but we need something here for now. Note that
4829 * the upper limit of the min_parallel_table_scan_size GUC is
4830 * chosen to prevent overflow here.
4831 */
4834 {
4838 break; /* avoid overflow */
4839 }
4840
4841 parallel_workers = heap_parallel_workers;
4842 }
4843
4844 if (index_pages >= 0)
4845 {
4846 int index_parallel_workers = 1;
4848
4849 /* same calculation as for heap_pages above */
4852 {
4856 break; /* avoid overflow */
4857 }
4858
4859 if (parallel_workers > 0)
4860 parallel_workers = Min(parallel_workers, index_parallel_workers);
4861 else
4862 parallel_workers = index_parallel_workers;
4863 }
4864 }
4865
4866 /* In no case use more than caller supplied maximum number of workers */
4867 parallel_workers = Min(parallel_workers, max_workers);
4868
4869 return parallel_workers;
4870}
4871
4872/*
4873 * generate_partitionwise_join_paths
4874 * Create paths representing partitionwise join for given partitioned
4875 * join relation.
4876 *
4877 * This must not be called until after we are done adding paths for all
4878 * child-joins. Otherwise, add_path might delete a path to which some path
4879 * generated here has a reference.
4880 */
4881void
4883{
4885 int cnt_parts;
4886 int num_parts;
4888
4889 /* Handle only join relations here. */
4890 if (!IS_JOIN_REL(rel))
4891 return;
4892
4893 /* We've nothing to do if the relation is not partitioned. */
4894 if (!IS_PARTITIONED_REL(rel))
4895 return;
4896
4897 /* The relation should have consider_partitionwise_join set. */
4899
4900 /* Guard against stack overflow due to overly deep partition hierarchy. */
4902
4903 num_parts = rel->nparts;
4904 part_rels = rel->part_rels;
4905
4906 /* Collect non-dummy child-joins. */
4907 for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
4908 {
4910
4911 /* If it's been pruned entirely, it's certainly dummy. */
4912 if (child_rel == NULL)
4913 continue;
4914
4915 /* Make partitionwise join paths for this partitioned child-join. */
4917
4918 /* If we failed to make any path for this child, we must give up. */
4919 if (child_rel->pathlist == NIL)
4920 {
4921 /*
4922 * Mark the parent joinrel as unpartitioned so that later
4923 * functions treat it correctly.
4924 */
4925 rel->nparts = 0;
4926 return;
4927 }
4928
4929 /* Else, identify the cheapest path for it. */
4931
4932 /* Dummy children need not be scanned, so ignore those. */
4934 continue;
4935
4936 /*
4937 * Except for the topmost scan/join rel, consider generating partial
4938 * aggregation paths for the grouped relation on top of the paths of
4939 * this partitioned child-join. After that, we're done creating paths
4940 * for the grouped relation, so run set_cheapest().
4941 */
4942 if (child_rel->grouped_rel != NULL &&
4943 !bms_equal(IS_OTHER_REL(rel) ?
4944 rel->top_parent_relids : rel->relids,
4945 root->all_query_rels))
4946 {
4947 RelOptInfo *grouped_rel = child_rel->grouped_rel;
4948
4949 Assert(IS_GROUPED_REL(grouped_rel));
4950
4951 generate_grouped_paths(root, grouped_rel, child_rel);
4952 set_cheapest(grouped_rel);
4953 }
4954
4955#ifdef OPTIMIZER_DEBUG
4957#endif
4958
4960 }
4961
4962 /* If all child-joins are dummy, parent join is also dummy. */
4963 if (!live_children)
4964 {
4965 mark_dummy_rel(rel);
4966 return;
4967 }
4968
4969 /* Build additional paths for this rel from child-join paths. */
4972}
static void set_base_rel_sizes(PlannerInfo *root)
Definition allpaths.c:304
static List * get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel, bool require_parallel_safe)
Definition allpaths.c:3320
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition allpaths.c:2679
#define UNSAFE_TYPE_MISMATCH
Definition allpaths.c:59
static Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition allpaths.c:2164
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3138
static void subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition allpaths.c:4546
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
Definition allpaths.c:4882
void generate_grouped_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *rel)
Definition allpaths.c:3505
static void set_base_rel_consider_startup(PlannerInfo *root)
Definition allpaths.c:261
#define UNSAFE_HAS_VOLATILE_FUNC
Definition allpaths.c:55
#define UNSAFE_NOTIN_DISTINCTON_CLAUSE
Definition allpaths.c:57
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition allpaths.c:516
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:892
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:932
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:1004
static void set_base_rel_pathlists(PlannerInfo *root)
Definition allpaths.c:380
RelOptInfo * standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition allpaths.c:3948
int geqo_threshold
Definition allpaths.c:83
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition allpaths.c:1317
static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo, pushdown_safety_info *safetyInfo)
Definition allpaths.c:4445
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3165
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4794
static bool check_and_push_window_quals(Query *subquery, Node *clause, Bitmapset **run_cond_attrs)
Definition allpaths.c:2606
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition allpaths.c:3251
static void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition allpaths.c:2366
static void compare_tlist_datatypes(List *tlist, List *colTypes, pushdown_safety_info *safetyInfo)
Definition allpaths.c:4369
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3192
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
Definition allpaths.c:4402
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
Definition allpaths.c:872
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, pushdown_safety_info *safetyInfo)
Definition allpaths.c:4143
join_search_hook_type join_search_hook
Definition allpaths.c:92
bool enable_geqo
Definition allpaths.c:81
static Path * get_singleton_append_subpath(Path *path, List **child_append_relid_sets)
Definition allpaths.c:2318
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition allpaths.c:3388
static RelOptInfo * make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
Definition allpaths.c:3843
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:2948
static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition allpaths.c:4594
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:834
static void setup_simple_grouped_rels(PlannerInfo *root)
Definition allpaths.c:346
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel, Bitmapset *extra_used_attrs)
Definition allpaths.c:4646
static void check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
Definition allpaths.c:4268
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3035
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:980
set_rel_pathlist_hook_type set_rel_pathlist_hook
Definition allpaths.c:89
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3015
RelOptInfo * make_one_rel(PlannerInfo *root, List *joinlist)
Definition allpaths.c:179
#define UNSAFE_HAS_SET_FUNC
Definition allpaths.c:56
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:3059
bool enable_eager_aggregate
Definition allpaths.c:82
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:645
static void set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel)
Definition allpaths.c:1380
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition allpaths.c:1022
void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual)
Definition allpaths.c:4758
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition allpaths.c:628
double min_eager_agg_group_size
Definition allpaths.c:84
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition allpaths.c:1416
static void accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths, List **child_append_relid_sets)
Definition allpaths.c:2252
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition allpaths.c:407
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, List *live_childrels, List *all_child_pathkeys)
Definition allpaths.c:1848
#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE
Definition allpaths.c:58
static bool find_window_run_conditions(Query *subquery, AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left, bool *keep_original, Bitmapset **run_cond_attrs)
Definition allpaths.c:2416
static bool recurse_pushdown_safe(Node *setOp, Query *topquery, pushdown_safety_info *safetyInfo)
Definition allpaths.c:4199
pushdown_safe_type
Definition allpaths.c:73
@ PUSHDOWN_WINDOWCLAUSE_RUNCOND
Definition allpaths.c:76
@ PUSHDOWN_UNSAFE
Definition allpaths.c:74
@ PUSHDOWN_SAFE
Definition allpaths.c:75
int min_parallel_index_scan_size
Definition allpaths.c:86
int min_parallel_table_scan_size
Definition allpaths.c:85
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition appendinfo.c:201
int16 AttrNumber
Definition attnum.h:21
#define InvalidAttrNumber
Definition attnum.h:23
void pprint(const void *obj)
Definition print.c:54
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:765
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:575
bool bms_get_singleton_member(const Bitmapset *a, int *member)
Definition bitmapset.c:708
#define bms_is_empty(a)
Definition bitmapset.h:118
@ BMS_SINGLETON
Definition bitmapset.h:72
@ BMS_MULTIPLE
Definition bitmapset.h:73
uint32 BlockNumber
Definition block.h:31
#define Min(x, y)
Definition c.h:1093
#define Max(x, y)
Definition c.h:1087
#define Assert(condition)
Definition c.h:945
int32_t int32
Definition c.h:614
unsigned int Index
Definition c.h:700
#define MemSet(start, val, len)
Definition c.h:1109
#define OidIsValid(objectId)
Definition c.h:860
bool is_pseudo_constant_clause(Node *clause)
Definition clauses.c:2331
bool contain_leaked_vars(Node *clause)
Definition clauses.c:1276
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition clauses.c:764
bool contain_subplans(Node *clause)
Definition clauses.c:341
bool contain_volatile_functions(Node *clause)
Definition clauses.c:549
CompareType
Definition cmptype.h:32
@ COMPARE_LE
Definition cmptype.h:35
@ COMPARE_GT
Definition cmptype.h:38
@ COMPARE_EQ
Definition cmptype.h:36
@ COMPARE_GE
Definition cmptype.h:37
@ COMPARE_LT
Definition cmptype.h:34
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6256
int max_parallel_workers_per_gather
Definition costsize.c:144
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:5492
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6126
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition costsize.c:6218
double compute_gather_rows(Path *path)
Definition costsize.c:6768
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6289
bool enable_partitionwise_join
Definition costsize.c:160
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double loop_count, Cost *cost_p, double *tuples_p)
Definition costsize.c:6657
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6046
bool enable_parallel_append
Definition costsize.c:162
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6318
double clamp_row_est(double nrows)
Definition costsize.c:214
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6164
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition costsize.c:6186
bool enable_incremental_sort
Definition costsize.c:152
Datum arg
Definition elog.c:1322
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
void add_child_rel_equivalences(PlannerInfo *root, AppendRelInfo *appinfo, RelOptInfo *parent_rel, RelOptInfo *child_rel)
bool relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel, EquivalenceClass *ec, bool require_parallel_safe)
#define OidFunctionCall1(functionId, arg1)
Definition fmgr.h:722
RelOptInfo * geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Definition geqo_main.c:74
void parse(int)
Definition parse.c:49
void check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
Definition indxpath.c:3941
void create_index_paths(PlannerInfo *root, RelOptInfo *rel)
Definition indxpath.c:240
int i
Definition isn.c:77
void join_search_one_level(PlannerInfo *root, int level)
Definition joinrels.c:78
void mark_dummy_rel(RelOptInfo *rel)
Definition joinrels.c:1513
List * lappend(List *list, void *datum)
Definition list.c:339
List * list_copy_tail(const List *oldlist, int nskip)
Definition list.c:1613
List * list_concat(List *list1, const List *list2)
Definition list.c:561
void list_free(List *list)
Definition list.c:1546
List * list_copy_head(const List *oldlist, int len)
Definition list.c:1593
char get_rel_persistence(Oid relid)
Definition lsyscache.c:2298
char func_parallel(Oid funcid)
Definition lsyscache.c:2019
Oid get_opfamily_member_for_cmptype(Oid opfamily, Oid lefttype, Oid righttype, CompareType cmptype)
Definition lsyscache.c:199
RegProcedure get_func_support(Oid funcid)
Definition lsyscache.c:2078
bool func_strict(Oid funcid)
Definition lsyscache.c:1981
List * get_op_index_interpretation(Oid opno)
Definition lsyscache.c:668
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2800
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
Const * makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
Definition makefuncs.c:388
Node * make_and_qual(Node *qual1, Node *qual2)
Definition makefuncs.c:780
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42
int32 exprTypmod(const Node *expr)
Definition nodeFuncs.c:304
Oid exprCollation(const Node *expr)
Definition nodeFuncs.c:826
bool expression_returns_set(Node *clause)
Definition nodeFuncs.c:768
void set_opfuncid(OpExpr *opexpr)
Definition nodeFuncs.c:1879
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define copyObject(obj)
Definition nodes.h:232
#define nodeTag(nodeptr)
Definition nodes.h:139
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGGSPLIT_INITIAL_SERIAL
Definition nodes.h:389
#define makeNode(_type_)
Definition nodes.h:161
#define castNode(_type_, nodeptr)
Definition nodes.h:182
@ JOIN_SEMI
Definition nodes.h:317
@ JOIN_ANTI
Definition nodes.h:318
#define PVC_INCLUDE_PLACEHOLDERS
Definition optimizer.h:201
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
@ SETOP_EXCEPT
@ RTE_JOIN
@ RTE_CTE
@ RTE_NAMEDTUPLESTORE
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_GROUP
@ RTE_GRAPH_TABLE
@ RTE_RELATION
#define rt_fetch(rangetable_index, rangetable)
Definition parsetree.h:31
bool partitions_are_ordered(PartitionBoundInfo boundinfo, Bitmapset *live_parts)
Path * get_cheapest_fractional_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, double fraction)
Definition pathkeys.c:666
Path * get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, CostSelector cost_criterion, bool require_parallel_safe)
Definition pathkeys.c:620
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition pathkeys.c:2291
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition pathkeys.c:1336
List * build_expression_pathkey(PlannerInfo *root, Expr *expr, Oid opno, Relids rel, bool create_it)
Definition pathkeys.c:1000
List * build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel, ScanDirection scandir, bool *partialkeys)
Definition pathkeys.c:919
List * convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *subquery_pathkeys, List *subquery_tlist)
Definition pathkeys.c:1054
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition pathkeys.c:304
Path * get_cheapest_parallel_safe_total_inner(List *paths)
Definition pathkeys.c:699
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition pathnode.c:1939
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:1991
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath, bool enabled)
Definition pathnode.c:1712
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:2095
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2587
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition pathnode.c:1026
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition pathnode.c:1813
void set_cheapest(RelOptInfo *parent_rel)
Definition pathnode.c:268
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:793
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:2043
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition pathnode.c:1909
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition pathnode.c:2855
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition pathnode.c:1149
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:1965
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition pathnode.c:2904
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *child_append_relid_sets, List *pathkeys, Relids required_outer)
Definition pathnode.c:1524
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition pathnode.c:1865
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:1051
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:459
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition pathnode.c:68
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition pathnode.c:2069
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, AppendPathInput input, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition pathnode.c:1352
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition pathnode.c:2017
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition pathnode.c:3057
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition pathnode.c:3916
#define IS_SIMPLE_REL(rel)
Definition pathnodes.h:977
#define IS_DUMMY_REL(r)
Definition pathnodes.h:2287
#define IS_JOIN_REL(rel)
Definition pathnodes.h:982
@ TOTAL_COST
Definition pathnodes.h:111
@ STARTUP_COST
Definition pathnodes.h:111
#define IS_PARTITIONED_REL(rel)
Definition pathnodes.h:1219
#define IS_GROUPED_REL(rel)
Definition pathnodes.h:1245
#define PATH_REQ_OUTER(path)
Definition pathnodes.h:2003
Bitmapset * Relids
Definition pathnodes.h:103
@ UPPERREL_FINAL
Definition pathnodes.h:152
@ RELOPT_BASEREL
Definition pathnodes.h:965
@ RELOPT_OTHER_MEMBER_REL
Definition pathnodes.h:967
#define IS_OTHER_REL(rel)
Definition pathnodes.h:992
void(* set_rel_pathlist_hook_type)(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition paths.h:39
RelOptInfo *(* join_search_hook_type)(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition paths.h:55
@ PATHKEYS_EQUAL
Definition paths.h:220
static int pg_leftmost_one_pos32(uint32 word)
Definition pg_bitutils.h:41
#define lfirst(lc)
Definition pg_list.h:172
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition pg_list.h:403
#define list_make1(x1)
Definition pg_list.h:212
#define for_each_from(cell, lst, N)
Definition pg_list.h:414
static void * list_nth(const List *list, int n)
Definition pg_list.h:299
#define linitial(l)
Definition pg_list.h:178
#define lsecond(l)
Definition pg_list.h:183
static ListCell * list_head(const List *l)
Definition pg_list.h:128
#define list_nth_node(type, list, n)
Definition pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition pg_list.h:343
#define lfirst_oid(lc)
Definition pg_list.h:174
static int list_nth_int(const List *list, int n)
Definition pg_list.h:310
bool relation_excluded_by_constraints(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition plancat.c:1849
char * choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
Definition planner.c:9024
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:743
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition planner.c:6657
bool limit_needed(Query *parse)
Definition planner.c:2841
@ MONOTONICFUNC_NONE
Definition plannodes.h:1835
@ MONOTONICFUNC_DECREASING
Definition plannodes.h:1837
@ MONOTONICFUNC_INCREASING
Definition plannodes.h:1836
@ MONOTONICFUNC_BOTH
Definition plannodes.h:1838
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
#define InvalidOid
unsigned int Oid
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition prepagg.c:559
static int fb(int x)
tree ctl root
Definition radixtree.h:1857
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:544
RelOptInfo * build_simple_grouped_rel(PlannerInfo *root, RelOptInfo *rel)
Definition relnode.c:448
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1617
Node * ReplaceVarsFromTargetList(Node *node, int target_varno, int sublevels_up, RangeTblEntry *target_rte, List *targetlist, int result_relation, ReplaceVarsNoMatchOption nomatch_option, int nomatch_varno, bool *outer_hasSubLinks)
@ REPLACEVARS_REPORT_ERROR
@ BackwardScanDirection
Definition sdir.h:26
@ ForwardScanDirection
Definition sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3788
void check_stack_depth(void)
Definition stack_depth.c:95
List * subpaths
Definition pathnode.h:36
List * child_append_relid_sets
Definition pathnode.h:38
List * partial_subpaths
Definition pathnode.h:37
Node * quals
Definition primnodes.h:2384
Definition pg_list.h:54
Definition nodes.h:135
Oid opno
Definition primnodes.h:851
List * args
Definition primnodes.h:869
CompareType cmptype
Definition lsyscache.h:28
List * exprs
Definition pathnodes.h:1866
List * pathkeys
Definition pathnodes.h:1999
Cardinality rows
Definition pathnodes.h:1993
int parallel_workers
Definition pathnodes.h:1990
bool parallel_aware
Definition pathnodes.h:1986
Node * limitCount
Definition parsenodes.h:231
FromExpr * jointree
Definition parsenodes.h:182
Node * setOperations
Definition parsenodes.h:236
List * groupClause
Definition parsenodes.h:216
Node * havingQual
Definition parsenodes.h:222
Node * limitOffset
Definition parsenodes.h:230
List * windowClause
Definition parsenodes.h:224
List * targetList
Definition parsenodes.h:198
List * groupingSets
Definition parsenodes.h:220
List * distinctClause
Definition parsenodes.h:226
Relids apply_agg_at
Definition pathnodes.h:1290
List * group_exprs
Definition pathnodes.h:1287
bool agg_useful
Definition pathnodes.h:1296
List * group_clauses
Definition pathnodes.h:1285
struct PathTarget * agg_input
Definition pathnodes.h:1282
struct PathTarget * target
Definition pathnodes.h:1279
List * baserestrictinfo
Definition pathnodes.h:1130
bool consider_param_startup
Definition pathnodes.h:1023
List * subplan_params
Definition pathnodes.h:1089
List * joininfo
Definition pathnodes.h:1136
Relids relids
Definition pathnodes.h:1009
struct PathTarget * reltarget
Definition pathnodes.h:1033
Index relid
Definition pathnodes.h:1057
struct RelAggInfo * agg_info
Definition pathnodes.h:1150
Cardinality tuples
Definition pathnodes.h:1084
bool consider_parallel
Definition pathnodes.h:1025
Relids top_parent_relids
Definition pathnodes.h:1162
BlockNumber pages
Definition pathnodes.h:1083
Relids lateral_relids
Definition pathnodes.h:1052
List * pathlist
Definition pathnodes.h:1038
RelOptKind reloptkind
Definition pathnodes.h:1003
struct Path * cheapest_total_path
Definition pathnodes.h:1042
struct RelOptInfo * grouped_rel
Definition pathnodes.h:1152
bool has_eclass_joins
Definition pathnodes.h:1138
bool consider_startup
Definition pathnodes.h:1021
Bitmapset * live_parts
Definition pathnodes.h:1192
int rel_parallel_workers
Definition pathnodes.h:1091
bool consider_partitionwise_join
Definition pathnodes.h:1144
List * partial_pathlist
Definition pathnodes.h:1040
PlannerInfo * subroot
Definition pathnodes.h:1088
AttrNumber max_attr
Definition pathnodes.h:1065
Relids nulling_relids
Definition pathnodes.h:1073
Cardinality rows
Definition pathnodes.h:1015
AttrNumber min_attr
Definition pathnodes.h:1063
RTEKind rtekind
Definition pathnodes.h:1061
Expr * clause
Definition pathnodes.h:2888
SetOperation op
JoinType jointype
Definition pathnodes.h:3217
Relids syn_righthand
Definition pathnodes.h:3216
MonotonicFunction monotonic
bool repeatable_across_scans
Definition tsmapi.h:65
AttrNumber varattno
Definition primnodes.h:275
int varno
Definition primnodes.h:270
Index varlevelsup
Definition primnodes.h:295
List * partitionClause
Index winref
Definition primnodes.h:612
unsigned char * unsafeFlags
Definition allpaths.c:64
#define FirstLowInvalidHeapAttributeNumber
Definition sysattr.h:27
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition tablesample.c:27
bool create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
Definition tidpath.c:497
bool grouping_is_sortable(List *groupClause)
Definition tlist.c:549
List * make_tlist_from_pathtarget(PathTarget *target)
Definition tlist.c:633
bool grouping_is_hashable(List *groupClause)
Definition tlist.c:569
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition var.c:999
List * pull_var_clause(Node *node, int flags)
Definition var.c:653
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition var.c:296