PostgreSQL Source Code  git master
allpaths.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * allpaths.c
4  * Routines to find possible search paths for processing a query
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/optimizer/path/allpaths.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include <limits.h>
19 #include <math.h>
20 
21 #include "access/sysattr.h"
22 #include "access/tsmapi.h"
23 #include "catalog/pg_class.h"
24 #include "catalog/pg_operator.h"
25 #include "catalog/pg_proc.h"
26 #include "foreign/fdwapi.h"
27 #include "miscadmin.h"
28 #include "nodes/makefuncs.h"
29 #include "nodes/nodeFuncs.h"
30 #ifdef OPTIMIZER_DEBUG
31 #include "nodes/print.h"
32 #endif
33 #include "optimizer/appendinfo.h"
34 #include "optimizer/clauses.h"
35 #include "optimizer/cost.h"
36 #include "optimizer/geqo.h"
37 #include "optimizer/inherit.h"
38 #include "optimizer/optimizer.h"
39 #include "optimizer/pathnode.h"
40 #include "optimizer/paths.h"
41 #include "optimizer/plancat.h"
42 #include "optimizer/planner.h"
43 #include "optimizer/restrictinfo.h"
44 #include "optimizer/tlist.h"
45 #include "parser/parse_clause.h"
46 #include "parser/parsetree.h"
48 #include "partitioning/partprune.h"
49 #include "rewrite/rewriteManip.h"
50 #include "utils/lsyscache.h"
51 
52 
53 /* results of subquery_is_pushdown_safe */
54 typedef struct pushdown_safety_info
55 {
56  bool *unsafeColumns; /* which output columns are unsafe to use */
57  bool unsafeVolatile; /* don't push down volatile quals */
58  bool unsafeLeaky; /* don't push down leaky quals */
60 
61 /* These parameters are set by GUC */
62 bool enable_geqo = false; /* just in case GUC doesn't set it */
66 
67 /* Hook for plugins to get control in set_rel_pathlist() */
69 
70 /* Hook for plugins to replace standard_join_search() */
72 
73 
75 static void set_base_rel_sizes(PlannerInfo *root);
76 static void set_base_rel_pathlists(PlannerInfo *root);
77 static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
78  Index rti, RangeTblEntry *rte);
79 static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
80  Index rti, RangeTblEntry *rte);
81 static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
82  RangeTblEntry *rte);
83 static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
84 static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
85  RangeTblEntry *rte);
86 static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
87  RangeTblEntry *rte);
88 static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
89  RangeTblEntry *rte);
91  RangeTblEntry *rte);
92 static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
93  RangeTblEntry *rte);
94 static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
95  RangeTblEntry *rte);
96 static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
97  Index rti, RangeTblEntry *rte);
98 static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
99  Index rti, RangeTblEntry *rte);
100 static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
101  List *live_childrels,
102  List *all_child_pathkeys,
103  List *partitioned_rels);
105  RelOptInfo *rel,
106  Relids required_outer);
107 static List *accumulate_partitioned_rels(List *partitioned_rels,
108  List *sub_partitioned_rels,
109  bool flatten_partitioned_rels);
110 static void accumulate_append_subpath(Path *path,
111  List **subpaths, List **special_subpaths,
112  List **partitioned_rels,
113  bool flatten_partitioned_rels);
114 static Path *get_singleton_append_subpath(Path *path);
115 static void set_dummy_rel_pathlist(RelOptInfo *rel);
116 static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
117  Index rti, RangeTblEntry *rte);
118 static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
119  RangeTblEntry *rte);
120 static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
121  RangeTblEntry *rte);
122 static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
123  RangeTblEntry *rte);
124 static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
125  RangeTblEntry *rte);
126 static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
127  RangeTblEntry *rte);
128 static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
129  RangeTblEntry *rte);
130 static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
131  RangeTblEntry *rte);
132 static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
133 static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
134  pushdown_safety_info *safetyInfo);
135 static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
136  pushdown_safety_info *safetyInfo);
137 static void check_output_expressions(Query *subquery,
138  pushdown_safety_info *safetyInfo);
139 static void compare_tlist_datatypes(List *tlist, List *colTypes,
140  pushdown_safety_info *safetyInfo);
141 static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
142 static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
143  pushdown_safety_info *safetyInfo);
144 static void subquery_push_qual(Query *subquery,
145  RangeTblEntry *rte, Index rti, Node *qual);
146 static void recurse_push_qual(Node *setOp, Query *topquery,
147  RangeTblEntry *rte, Index rti, Node *qual);
148 static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel);
149 
150 
151 /*
152  * make_one_rel
153  * Finds all possible access paths for executing a query, returning a
154  * single rel that represents the join of all base rels in the query.
155  */
156 RelOptInfo *
157 make_one_rel(PlannerInfo *root, List *joinlist)
158 {
159  RelOptInfo *rel;
160  Index rti;
161  double total_pages;
162 
163  /*
164  * Construct the all_baserels Relids set.
165  */
166  root->all_baserels = NULL;
167  for (rti = 1; rti < root->simple_rel_array_size; rti++)
168  {
169  RelOptInfo *brel = root->simple_rel_array[rti];
170 
171  /* there may be empty slots corresponding to non-baserel RTEs */
172  if (brel == NULL)
173  continue;
174 
175  Assert(brel->relid == rti); /* sanity check on array */
176 
177  /* ignore RTEs that are "other rels" */
178  if (brel->reloptkind != RELOPT_BASEREL)
179  continue;
180 
181  root->all_baserels = bms_add_member(root->all_baserels, brel->relid);
182  }
183 
184  /* Mark base rels as to whether we care about fast-start plans */
186 
187  /*
188  * Compute size estimates and consider_parallel flags for each base rel.
189  */
190  set_base_rel_sizes(root);
191 
192  /*
193  * We should now have size estimates for every actual table involved in
194  * the query, and we also know which if any have been deleted from the
195  * query by join removal, pruned by partition pruning, or eliminated by
196  * constraint exclusion. So we can now compute total_table_pages.
197  *
198  * Note that appendrels are not double-counted here, even though we don't
199  * bother to distinguish RelOptInfos for appendrel parents, because the
200  * parents will have pages = 0.
201  *
202  * XXX if a table is self-joined, we will count it once per appearance,
203  * which perhaps is the wrong thing ... but that's not completely clear,
204  * and detecting self-joins here is difficult, so ignore it for now.
205  */
206  total_pages = 0;
207  for (rti = 1; rti < root->simple_rel_array_size; rti++)
208  {
209  RelOptInfo *brel = root->simple_rel_array[rti];
210 
211  if (brel == NULL)
212  continue;
213 
214  Assert(brel->relid == rti); /* sanity check on array */
215 
216  if (IS_DUMMY_REL(brel))
217  continue;
218 
219  if (IS_SIMPLE_REL(brel))
220  total_pages += (double) brel->pages;
221  }
222  root->total_table_pages = total_pages;
223 
224  /*
225  * Generate access paths for each base rel.
226  */
228 
229  /*
230  * Generate access paths for the entire join tree.
231  */
232  rel = make_rel_from_joinlist(root, joinlist);
233 
234  /*
235  * The result should join all and only the query's base rels.
236  */
237  Assert(bms_equal(rel->relids, root->all_baserels));
238 
239  return rel;
240 }
241 
242 /*
243  * set_base_rel_consider_startup
244  * Set the consider_[param_]startup flags for each base-relation entry.
245  *
246  * For the moment, we only deal with consider_param_startup here; because the
247  * logic for consider_startup is pretty trivial and is the same for every base
248  * relation, we just let build_simple_rel() initialize that flag correctly to
249  * start with. If that logic ever gets more complicated it would probably
250  * be better to move it here.
251  */
252 static void
254 {
255  /*
256  * Since parameterized paths can only be used on the inside of a nestloop
257  * join plan, there is usually little value in considering fast-start
258  * plans for them. However, for relations that are on the RHS of a SEMI
259  * or ANTI join, a fast-start plan can be useful because we're only going
260  * to care about fetching one tuple anyway.
261  *
262  * To minimize growth of planning time, we currently restrict this to
263  * cases where the RHS is a single base relation, not a join; there is no
264  * provision for consider_param_startup to get set at all on joinrels.
265  * Also we don't worry about appendrels. costsize.c's costing rules for
266  * nestloop semi/antijoins don't consider such cases either.
267  */
268  ListCell *lc;
269 
270  foreach(lc, root->join_info_list)
271  {
272  SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
273  int varno;
274 
275  if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
276  bms_get_singleton_member(sjinfo->syn_righthand, &varno))
277  {
278  RelOptInfo *rel = find_base_rel(root, varno);
279 
280  rel->consider_param_startup = true;
281  }
282  }
283 }
284 
285 /*
286  * set_base_rel_sizes
287  * Set the size estimates (rows and widths) for each base-relation entry.
288  * Also determine whether to consider parallel paths for base relations.
289  *
290  * We do this in a separate pass over the base rels so that rowcount
291  * estimates are available for parameterized path generation, and also so
292  * that each rel's consider_parallel flag is set correctly before we begin to
293  * generate paths.
294  */
295 static void
297 {
298  Index rti;
299 
300  for (rti = 1; rti < root->simple_rel_array_size; rti++)
301  {
302  RelOptInfo *rel = root->simple_rel_array[rti];
303  RangeTblEntry *rte;
304 
305  /* there may be empty slots corresponding to non-baserel RTEs */
306  if (rel == NULL)
307  continue;
308 
309  Assert(rel->relid == rti); /* sanity check on array */
310 
311  /* ignore RTEs that are "other rels" */
312  if (rel->reloptkind != RELOPT_BASEREL)
313  continue;
314 
315  rte = root->simple_rte_array[rti];
316 
317  /*
318  * If parallelism is allowable for this query in general, see whether
319  * it's allowable for this rel in particular. We have to do this
320  * before set_rel_size(), because (a) if this rel is an inheritance
321  * parent, set_append_rel_size() will use and perhaps change the rel's
322  * consider_parallel flag, and (b) for some RTE types, set_rel_size()
323  * goes ahead and makes paths immediately.
324  */
325  if (root->glob->parallelModeOK)
326  set_rel_consider_parallel(root, rel, rte);
327 
328  set_rel_size(root, rel, rti, rte);
329  }
330 }
331 
332 /*
333  * set_base_rel_pathlists
334  * Finds all paths available for scanning each base-relation entry.
335  * Sequential scan and any available indices are considered.
336  * Each useful path is attached to its relation's 'pathlist' field.
337  */
338 static void
340 {
341  Index rti;
342 
343  for (rti = 1; rti < root->simple_rel_array_size; rti++)
344  {
345  RelOptInfo *rel = root->simple_rel_array[rti];
346 
347  /* there may be empty slots corresponding to non-baserel RTEs */
348  if (rel == NULL)
349  continue;
350 
351  Assert(rel->relid == rti); /* sanity check on array */
352 
353  /* ignore RTEs that are "other rels" */
354  if (rel->reloptkind != RELOPT_BASEREL)
355  continue;
356 
357  set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
358  }
359 }
360 
361 /*
362  * set_rel_size
363  * Set size estimates for a base relation
364  */
365 static void
367  Index rti, RangeTblEntry *rte)
368 {
369  if (rel->reloptkind == RELOPT_BASEREL &&
370  relation_excluded_by_constraints(root, rel, rte))
371  {
372  /*
373  * We proved we don't need to scan the rel via constraint exclusion,
374  * so set up a single dummy path for it. Here we only check this for
375  * regular baserels; if it's an otherrel, CE was already checked in
376  * set_append_rel_size().
377  *
378  * In this case, we go ahead and set up the relation's path right away
379  * instead of leaving it for set_rel_pathlist to do. This is because
380  * we don't have a convention for marking a rel as dummy except by
381  * assigning a dummy path to it.
382  */
384  }
385  else if (rte->inh)
386  {
387  /* It's an "append relation", process accordingly */
388  set_append_rel_size(root, rel, rti, rte);
389  }
390  else
391  {
392  switch (rel->rtekind)
393  {
394  case RTE_RELATION:
395  if (rte->relkind == RELKIND_FOREIGN_TABLE)
396  {
397  /* Foreign table */
398  set_foreign_size(root, rel, rte);
399  }
400  else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
401  {
402  /*
403  * We could get here if asked to scan a partitioned table
404  * with ONLY. In that case we shouldn't scan any of the
405  * partitions, so mark it as a dummy rel.
406  */
408  }
409  else if (rte->tablesample != NULL)
410  {
411  /* Sampled relation */
412  set_tablesample_rel_size(root, rel, rte);
413  }
414  else
415  {
416  /* Plain relation */
417  set_plain_rel_size(root, rel, rte);
418  }
419  break;
420  case RTE_SUBQUERY:
421 
422  /*
423  * Subqueries don't support making a choice between
424  * parameterized and unparameterized paths, so just go ahead
425  * and build their paths immediately.
426  */
427  set_subquery_pathlist(root, rel, rti, rte);
428  break;
429  case RTE_FUNCTION:
430  set_function_size_estimates(root, rel);
431  break;
432  case RTE_TABLEFUNC:
433  set_tablefunc_size_estimates(root, rel);
434  break;
435  case RTE_VALUES:
436  set_values_size_estimates(root, rel);
437  break;
438  case RTE_CTE:
439 
440  /*
441  * CTEs don't support making a choice between parameterized
442  * and unparameterized paths, so just go ahead and build their
443  * paths immediately.
444  */
445  if (rte->self_reference)
446  set_worktable_pathlist(root, rel, rte);
447  else
448  set_cte_pathlist(root, rel, rte);
449  break;
450  case RTE_NAMEDTUPLESTORE:
451  /* Might as well just build the path immediately */
452  set_namedtuplestore_pathlist(root, rel, rte);
453  break;
454  case RTE_RESULT:
455  /* Might as well just build the path immediately */
456  set_result_pathlist(root, rel, rte);
457  break;
458  default:
459  elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
460  break;
461  }
462  }
463 
464  /*
465  * We insist that all non-dummy rels have a nonzero rowcount estimate.
466  */
467  Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
468 }
469 
470 /*
471  * set_rel_pathlist
472  * Build access paths for a base relation
473  */
474 static void
476  Index rti, RangeTblEntry *rte)
477 {
478  if (IS_DUMMY_REL(rel))
479  {
480  /* We already proved the relation empty, so nothing more to do */
481  }
482  else if (rte->inh)
483  {
484  /* It's an "append relation", process accordingly */
485  set_append_rel_pathlist(root, rel, rti, rte);
486  }
487  else
488  {
489  switch (rel->rtekind)
490  {
491  case RTE_RELATION:
492  if (rte->relkind == RELKIND_FOREIGN_TABLE)
493  {
494  /* Foreign table */
495  set_foreign_pathlist(root, rel, rte);
496  }
497  else if (rte->tablesample != NULL)
498  {
499  /* Sampled relation */
500  set_tablesample_rel_pathlist(root, rel, rte);
501  }
502  else
503  {
504  /* Plain relation */
505  set_plain_rel_pathlist(root, rel, rte);
506  }
507  break;
508  case RTE_SUBQUERY:
509  /* Subquery --- fully handled during set_rel_size */
510  break;
511  case RTE_FUNCTION:
512  /* RangeFunction */
513  set_function_pathlist(root, rel, rte);
514  break;
515  case RTE_TABLEFUNC:
516  /* Table Function */
517  set_tablefunc_pathlist(root, rel, rte);
518  break;
519  case RTE_VALUES:
520  /* Values list */
521  set_values_pathlist(root, rel, rte);
522  break;
523  case RTE_CTE:
524  /* CTE reference --- fully handled during set_rel_size */
525  break;
526  case RTE_NAMEDTUPLESTORE:
527  /* tuplestore reference --- fully handled during set_rel_size */
528  break;
529  case RTE_RESULT:
530  /* simple Result --- fully handled during set_rel_size */
531  break;
532  default:
533  elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
534  break;
535  }
536  }
537 
538  /*
539  * Allow a plugin to editorialize on the set of Paths for this base
540  * relation. It could add new paths (such as CustomPaths) by calling
541  * add_path(), or add_partial_path() if parallel aware. It could also
542  * delete or modify paths added by the core code.
543  */
545  (*set_rel_pathlist_hook) (root, rel, rti, rte);
546 
547  /*
548  * If this is a baserel, we should normally consider gathering any partial
549  * paths we may have created for it. We have to do this after calling the
550  * set_rel_pathlist_hook, else it cannot add partial paths to be included
551  * here.
552  *
553  * However, if this is an inheritance child, skip it. Otherwise, we could
554  * end up with a very large number of gather nodes, each trying to grab
555  * its own pool of workers. Instead, we'll consider gathering partial
556  * paths for the parent appendrel.
557  *
558  * Also, if this is the topmost scan/join rel (that is, the only baserel),
559  * we postpone gathering until the final scan/join targetlist is available
560  * (see grouping_planner).
561  */
562  if (rel->reloptkind == RELOPT_BASEREL &&
563  bms_membership(root->all_baserels) != BMS_SINGLETON)
564  generate_useful_gather_paths(root, rel, false);
565 
566  /* Now find the cheapest of the paths for this rel */
567  set_cheapest(rel);
568 
569 #ifdef OPTIMIZER_DEBUG
570  debug_print_rel(root, rel);
571 #endif
572 }
573 
574 /*
575  * set_plain_rel_size
576  * Set size estimates for a plain relation (no subquery, no inheritance)
577  */
578 static void
580 {
581  /*
582  * Test any partial indexes of rel for applicability. We must do this
583  * first since partial unique indexes can affect size estimates.
584  */
585  check_index_predicates(root, rel);
586 
587  /* Mark rel with estimated output rows, width, etc */
588  set_baserel_size_estimates(root, rel);
589 }
590 
591 /*
592  * If this relation could possibly be scanned from within a worker, then set
593  * its consider_parallel flag.
594  */
595 static void
597  RangeTblEntry *rte)
598 {
599  /*
600  * The flag has previously been initialized to false, so we can just
601  * return if it becomes clear that we can't safely set it.
602  */
603  Assert(!rel->consider_parallel);
604 
605  /* Don't call this if parallelism is disallowed for the entire query. */
606  Assert(root->glob->parallelModeOK);
607 
608  /* This should only be called for baserels and appendrel children. */
609  Assert(IS_SIMPLE_REL(rel));
610 
611  /* Assorted checks based on rtekind. */
612  switch (rte->rtekind)
613  {
614  case RTE_RELATION:
615 
616  /*
617  * Currently, parallel workers can't access the leader's temporary
618  * tables. We could possibly relax this if we wrote all of its
619  * local buffers at the start of the query and made no changes
620  * thereafter (maybe we could allow hint bit changes), and if we
621  * taught the workers to read them. Writing a large number of
622  * temporary buffers could be expensive, though, and we don't have
623  * the rest of the necessary infrastructure right now anyway. So
624  * for now, bail out if we see a temporary table.
625  */
626  if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
627  return;
628 
629  /*
630  * Table sampling can be pushed down to workers if the sample
631  * function and its arguments are safe.
632  */
633  if (rte->tablesample != NULL)
634  {
635  char proparallel = func_parallel(rte->tablesample->tsmhandler);
636 
637  if (proparallel != PROPARALLEL_SAFE)
638  return;
639  if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
640  return;
641  }
642 
643  /*
644  * Ask FDWs whether they can support performing a ForeignScan
645  * within a worker. Most often, the answer will be no. For
646  * example, if the nature of the FDW is such that it opens a TCP
647  * connection with a remote server, each parallel worker would end
648  * up with a separate connection, and these connections might not
649  * be appropriately coordinated between workers and the leader.
650  */
651  if (rte->relkind == RELKIND_FOREIGN_TABLE)
652  {
653  Assert(rel->fdwroutine);
655  return;
656  if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
657  return;
658  }
659 
660  /*
661  * There are additional considerations for appendrels, which we'll
662  * deal with in set_append_rel_size and set_append_rel_pathlist.
663  * For now, just set consider_parallel based on the rel's own
664  * quals and targetlist.
665  */
666  break;
667 
668  case RTE_SUBQUERY:
669 
670  /*
671  * There's no intrinsic problem with scanning a subquery-in-FROM
672  * (as distinct from a SubPlan or InitPlan) in a parallel worker.
673  * If the subquery doesn't happen to have any parallel-safe paths,
674  * then flagging it as consider_parallel won't change anything,
675  * but that's true for plain tables, too. We must set
676  * consider_parallel based on the rel's own quals and targetlist,
677  * so that if a subquery path is parallel-safe but the quals and
678  * projection we're sticking onto it are not, we correctly mark
679  * the SubqueryScanPath as not parallel-safe. (Note that
680  * set_subquery_pathlist() might push some of these quals down
681  * into the subquery itself, but that doesn't change anything.)
682  *
683  * We can't push sub-select containing LIMIT/OFFSET to workers as
684  * there is no guarantee that the row order will be fully
685  * deterministic, and applying LIMIT/OFFSET will lead to
686  * inconsistent results at the top-level. (In some cases, where
687  * the result is ordered, we could relax this restriction. But it
688  * doesn't currently seem worth expending extra effort to do so.)
689  */
690  {
691  Query *subquery = castNode(Query, rte->subquery);
692 
693  if (limit_needed(subquery))
694  return;
695  }
696  break;
697 
698  case RTE_JOIN:
699  /* Shouldn't happen; we're only considering baserels here. */
700  Assert(false);
701  return;
702 
703  case RTE_FUNCTION:
704  /* Check for parallel-restricted functions. */
705  if (!is_parallel_safe(root, (Node *) rte->functions))
706  return;
707  break;
708 
709  case RTE_TABLEFUNC:
710  /* not parallel safe */
711  return;
712 
713  case RTE_VALUES:
714  /* Check for parallel-restricted functions. */
715  if (!is_parallel_safe(root, (Node *) rte->values_lists))
716  return;
717  break;
718 
719  case RTE_CTE:
720 
721  /*
722  * CTE tuplestores aren't shared among parallel workers, so we
723  * force all CTE scans to happen in the leader. Also, populating
724  * the CTE would require executing a subplan that's not available
725  * in the worker, might be parallel-restricted, and must get
726  * executed only once.
727  */
728  return;
729 
730  case RTE_NAMEDTUPLESTORE:
731 
732  /*
733  * tuplestore cannot be shared, at least without more
734  * infrastructure to support that.
735  */
736  return;
737 
738  case RTE_RESULT:
739  /* RESULT RTEs, in themselves, are no problem. */
740  break;
741  }
742 
743  /*
744  * If there's anything in baserestrictinfo that's parallel-restricted, we
745  * give up on parallelizing access to this relation. We could consider
746  * instead postponing application of the restricted quals until we're
747  * above all the parallelism in the plan tree, but it's not clear that
748  * that would be a win in very many cases, and it might be tricky to make
749  * outer join clauses work correctly. It would likely break equivalence
750  * classes, too.
751  */
752  if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo))
753  return;
754 
755  /*
756  * Likewise, if the relation's outputs are not parallel-safe, give up.
757  * (Usually, they're just Vars, but sometimes they're not.)
758  */
759  if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
760  return;
761 
762  /* We have a winner. */
763  rel->consider_parallel = true;
764 }
765 
766 /*
767  * set_plain_rel_pathlist
768  * Build access paths for a plain relation (no subquery, no inheritance)
769  */
770 static void
772 {
773  Relids required_outer;
774 
775  /*
776  * We don't support pushing join clauses into the quals of a seqscan, but
777  * it could still have required parameterization due to LATERAL refs in
778  * its tlist.
779  */
780  required_outer = rel->lateral_relids;
781 
782  /* Consider sequential scan */
783  add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
784 
785  /* If appropriate, consider parallel sequential scan */
786  if (rel->consider_parallel && required_outer == NULL)
787  create_plain_partial_paths(root, rel);
788 
789  /* Consider index scans */
790  create_index_paths(root, rel);
791 
792  /* Consider TID scans */
793  create_tidscan_paths(root, rel);
794 }
795 
796 /*
797  * create_plain_partial_paths
798  * Build partial access paths for parallel scan of a plain relation
799  */
800 static void
802 {
803  int parallel_workers;
804 
805  parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
807 
808  /* If any limit was set to zero, the user doesn't want a parallel scan. */
809  if (parallel_workers <= 0)
810  return;
811 
812  /* Add an unordered partial path based on a parallel sequential scan. */
813  add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
814 }
815 
816 /*
817  * set_tablesample_rel_size
818  * Set size estimates for a sampled relation
819  */
820 static void
822 {
823  TableSampleClause *tsc = rte->tablesample;
824  TsmRoutine *tsm;
825  BlockNumber pages;
826  double tuples;
827 
828  /*
829  * Test any partial indexes of rel for applicability. We must do this
830  * first since partial unique indexes can affect size estimates.
831  */
832  check_index_predicates(root, rel);
833 
834  /*
835  * Call the sampling method's estimation function to estimate the number
836  * of pages it will read and the number of tuples it will return. (Note:
837  * we assume the function returns sane values.)
838  */
839  tsm = GetTsmRoutine(tsc->tsmhandler);
840  tsm->SampleScanGetSampleSize(root, rel, tsc->args,
841  &pages, &tuples);
842 
843  /*
844  * For the moment, because we will only consider a SampleScan path for the
845  * rel, it's okay to just overwrite the pages and tuples estimates for the
846  * whole relation. If we ever consider multiple path types for sampled
847  * rels, we'll need more complication.
848  */
849  rel->pages = pages;
850  rel->tuples = tuples;
851 
852  /* Mark rel with estimated output rows, width, etc */
853  set_baserel_size_estimates(root, rel);
854 }
855 
856 /*
857  * set_tablesample_rel_pathlist
858  * Build access paths for a sampled relation
859  */
860 static void
862 {
863  Relids required_outer;
864  Path *path;
865 
866  /*
867  * We don't support pushing join clauses into the quals of a samplescan,
868  * but it could still have required parameterization due to LATERAL refs
869  * in its tlist or TABLESAMPLE arguments.
870  */
871  required_outer = rel->lateral_relids;
872 
873  /* Consider sampled scan */
874  path = create_samplescan_path(root, rel, required_outer);
875 
876  /*
877  * If the sampling method does not support repeatable scans, we must avoid
878  * plans that would scan the rel multiple times. Ideally, we'd simply
879  * avoid putting the rel on the inside of a nestloop join; but adding such
880  * a consideration to the planner seems like a great deal of complication
881  * to support an uncommon usage of second-rate sampling methods. Instead,
882  * if there is a risk that the query might perform an unsafe join, just
883  * wrap the SampleScan in a Materialize node. We can check for joins by
884  * counting the membership of all_baserels (note that this correctly
885  * counts inheritance trees as single rels). If we're inside a subquery,
886  * we can't easily check whether a join might occur in the outer query, so
887  * just assume one is possible.
888  *
889  * GetTsmRoutine is relatively expensive compared to the other tests here,
890  * so check repeatable_across_scans last, even though that's a bit odd.
891  */
892  if ((root->query_level > 1 ||
895  {
896  path = (Path *) create_material_path(rel, path);
897  }
898 
899  add_path(rel, path);
900 
901  /* For the moment, at least, there are no other paths to consider */
902 }
903 
904 /*
905  * set_foreign_size
906  * Set size estimates for a foreign table RTE
907  */
908 static void
910 {
911  /* Mark rel with estimated output rows, width, etc */
912  set_foreign_size_estimates(root, rel);
913 
914  /* Let FDW adjust the size estimates, if it can */
915  rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
916 
917  /* ... but do not let it set the rows estimate to zero */
918  rel->rows = clamp_row_est(rel->rows);
919 
920  /*
921  * Also, make sure rel->tuples is not insane relative to rel->rows.
922  * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
923  * FDW doesn't do anything to replace that.
924  */
925  rel->tuples = Max(rel->tuples, rel->rows);
926 }
927 
928 /*
929  * set_foreign_pathlist
930  * Build access paths for a foreign table RTE
931  */
932 static void
934 {
935  /* Call the FDW's GetForeignPaths function to generate path(s) */
936  rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
937 }
938 
939 /*
940  * set_append_rel_size
941  * Set size estimates for a simple "append relation"
942  *
943  * The passed-in rel and RTE represent the entire append relation. The
944  * relation's contents are computed by appending together the output of the
945  * individual member relations. Note that in the non-partitioned inheritance
946  * case, the first member relation is actually the same table as is mentioned
947  * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
948  * a good thing because their outputs are not the same size.
949  */
950 static void
952  Index rti, RangeTblEntry *rte)
953 {
954  int parentRTindex = rti;
955  bool has_live_children;
956  double parent_rows;
957  double parent_size;
958  double *parent_attrsizes;
959  int nattrs;
960  ListCell *l;
961 
962  /* Guard against stack overflow due to overly deep inheritance tree. */
964 
965  Assert(IS_SIMPLE_REL(rel));
966 
967  /*
968  * If this is a partitioned baserel, set the consider_partitionwise_join
969  * flag; currently, we only consider partitionwise joins with the baserel
970  * if its targetlist doesn't contain a whole-row Var.
971  */
973  rel->reloptkind == RELOPT_BASEREL &&
974  rte->relkind == RELKIND_PARTITIONED_TABLE &&
975  rel->attr_needed[InvalidAttrNumber - rel->min_attr] == NULL)
976  rel->consider_partitionwise_join = true;
977 
978  /*
979  * Initialize to compute size estimates for whole append relation.
980  *
981  * We handle width estimates by weighting the widths of different child
982  * rels proportionally to their number of rows. This is sensible because
983  * the use of width estimates is mainly to compute the total relation
984  * "footprint" if we have to sort or hash it. To do this, we sum the
985  * total equivalent size (in "double" arithmetic) and then divide by the
986  * total rowcount estimate. This is done separately for the total rel
987  * width and each attribute.
988  *
989  * Note: if you consider changing this logic, beware that child rels could
990  * have zero rows and/or width, if they were excluded by constraints.
991  */
992  has_live_children = false;
993  parent_rows = 0;
994  parent_size = 0;
995  nattrs = rel->max_attr - rel->min_attr + 1;
996  parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
997 
998  foreach(l, root->append_rel_list)
999  {
1000  AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1001  int childRTindex;
1002  RangeTblEntry *childRTE;
1003  RelOptInfo *childrel;
1004  ListCell *parentvars;
1005  ListCell *childvars;
1006 
1007  /* append_rel_list contains all append rels; ignore others */
1008  if (appinfo->parent_relid != parentRTindex)
1009  continue;
1010 
1011  childRTindex = appinfo->child_relid;
1012  childRTE = root->simple_rte_array[childRTindex];
1013 
1014  /*
1015  * The child rel's RelOptInfo was already created during
1016  * add_other_rels_to_query.
1017  */
1018  childrel = find_base_rel(root, childRTindex);
1020 
1021  /* We may have already proven the child to be dummy. */
1022  if (IS_DUMMY_REL(childrel))
1023  continue;
1024 
1025  /*
1026  * We have to copy the parent's targetlist and quals to the child,
1027  * with appropriate substitution of variables. However, the
1028  * baserestrictinfo quals were already copied/substituted when the
1029  * child RelOptInfo was built. So we don't need any additional setup
1030  * before applying constraint exclusion.
1031  */
1032  if (relation_excluded_by_constraints(root, childrel, childRTE))
1033  {
1034  /*
1035  * This child need not be scanned, so we can omit it from the
1036  * appendrel.
1037  */
1038  set_dummy_rel_pathlist(childrel);
1039  continue;
1040  }
1041 
1042  /*
1043  * Constraint exclusion failed, so copy the parent's join quals and
1044  * targetlist to the child, with appropriate variable substitutions.
1045  *
1046  * NB: the resulting childrel->reltarget->exprs may contain arbitrary
1047  * expressions, which otherwise would not occur in a rel's targetlist.
1048  * Code that might be looking at an appendrel child must cope with
1049  * such. (Normally, a rel's targetlist would only include Vars and
1050  * PlaceHolderVars.) XXX we do not bother to update the cost or width
1051  * fields of childrel->reltarget; not clear if that would be useful.
1052  */
1053  childrel->joininfo = (List *)
1055  (Node *) rel->joininfo,
1056  1, &appinfo);
1057  childrel->reltarget->exprs = (List *)
1059  (Node *) rel->reltarget->exprs,
1060  1, &appinfo);
1061 
1062  /*
1063  * We have to make child entries in the EquivalenceClass data
1064  * structures as well. This is needed either if the parent
1065  * participates in some eclass joins (because we will want to consider
1066  * inner-indexscan joins on the individual children) or if the parent
1067  * has useful pathkeys (because we should try to build MergeAppend
1068  * paths that produce those sort orderings).
1069  */
1070  if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
1071  add_child_rel_equivalences(root, appinfo, rel, childrel);
1072  childrel->has_eclass_joins = rel->has_eclass_joins;
1073 
1074  /*
1075  * Note: we could compute appropriate attr_needed data for the child's
1076  * variables, by transforming the parent's attr_needed through the
1077  * translated_vars mapping. However, currently there's no need
1078  * because attr_needed is only examined for base relations not
1079  * otherrels. So we just leave the child's attr_needed empty.
1080  */
1081 
1082  /*
1083  * If we consider partitionwise joins with the parent rel, do the same
1084  * for partitioned child rels.
1085  *
1086  * Note: here we abuse the consider_partitionwise_join flag by setting
1087  * it for child rels that are not themselves partitioned. We do so to
1088  * tell try_partitionwise_join() that the child rel is sufficiently
1089  * valid to be used as a per-partition input, even if it later gets
1090  * proven to be dummy. (It's not usable until we've set up the
1091  * reltarget and EC entries, which we just did.)
1092  */
1093  if (rel->consider_partitionwise_join)
1094  childrel->consider_partitionwise_join = true;
1095 
1096  /*
1097  * If parallelism is allowable for this query in general, see whether
1098  * it's allowable for this childrel in particular. But if we've
1099  * already decided the appendrel is not parallel-safe as a whole,
1100  * there's no point in considering parallelism for this child. For
1101  * consistency, do this before calling set_rel_size() for the child.
1102  */
1103  if (root->glob->parallelModeOK && rel->consider_parallel)
1104  set_rel_consider_parallel(root, childrel, childRTE);
1105 
1106  /*
1107  * Compute the child's size.
1108  */
1109  set_rel_size(root, childrel, childRTindex, childRTE);
1110 
1111  /*
1112  * It is possible that constraint exclusion detected a contradiction
1113  * within a child subquery, even though we didn't prove one above. If
1114  * so, we can skip this child.
1115  */
1116  if (IS_DUMMY_REL(childrel))
1117  continue;
1118 
1119  /* We have at least one live child. */
1120  has_live_children = true;
1121 
1122  /*
1123  * If any live child is not parallel-safe, treat the whole appendrel
1124  * as not parallel-safe. In future we might be able to generate plans
1125  * in which some children are farmed out to workers while others are
1126  * not; but we don't have that today, so it's a waste to consider
1127  * partial paths anywhere in the appendrel unless it's all safe.
1128  * (Child rels visited before this one will be unmarked in
1129  * set_append_rel_pathlist().)
1130  */
1131  if (!childrel->consider_parallel)
1132  rel->consider_parallel = false;
1133 
1134  /*
1135  * Accumulate size information from each live child.
1136  */
1137  Assert(childrel->rows > 0);
1138 
1139  parent_rows += childrel->rows;
1140  parent_size += childrel->reltarget->width * childrel->rows;
1141 
1142  /*
1143  * Accumulate per-column estimates too. We need not do anything for
1144  * PlaceHolderVars in the parent list. If child expression isn't a
1145  * Var, or we didn't record a width estimate for it, we have to fall
1146  * back on a datatype-based estimate.
1147  *
1148  * By construction, child's targetlist is 1-to-1 with parent's.
1149  */
1150  forboth(parentvars, rel->reltarget->exprs,
1151  childvars, childrel->reltarget->exprs)
1152  {
1153  Var *parentvar = (Var *) lfirst(parentvars);
1154  Node *childvar = (Node *) lfirst(childvars);
1155 
1156  if (IsA(parentvar, Var))
1157  {
1158  int pndx = parentvar->varattno - rel->min_attr;
1159  int32 child_width = 0;
1160 
1161  if (IsA(childvar, Var) &&
1162  ((Var *) childvar)->varno == childrel->relid)
1163  {
1164  int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
1165 
1166  child_width = childrel->attr_widths[cndx];
1167  }
1168  if (child_width <= 0)
1169  child_width = get_typavgwidth(exprType(childvar),
1170  exprTypmod(childvar));
1171  Assert(child_width > 0);
1172  parent_attrsizes[pndx] += child_width * childrel->rows;
1173  }
1174  }
1175  }
1176 
1177  if (has_live_children)
1178  {
1179  /*
1180  * Save the finished size estimates.
1181  */
1182  int i;
1183 
1184  Assert(parent_rows > 0);
1185  rel->rows = parent_rows;
1186  rel->reltarget->width = rint(parent_size / parent_rows);
1187  for (i = 0; i < nattrs; i++)
1188  rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
1189 
1190  /*
1191  * Set "raw tuples" count equal to "rows" for the appendrel; needed
1192  * because some places assume rel->tuples is valid for any baserel.
1193  */
1194  rel->tuples = parent_rows;
1195 
1196  /*
1197  * Note that we leave rel->pages as zero; this is important to avoid
1198  * double-counting the appendrel tree in total_table_pages.
1199  */
1200  }
1201  else
1202  {
1203  /*
1204  * All children were excluded by constraints, so mark the whole
1205  * appendrel dummy. We must do this in this phase so that the rel's
1206  * dummy-ness is visible when we generate paths for other rels.
1207  */
1209  }
1210 
1211  pfree(parent_attrsizes);
1212 }
1213 
1214 /*
1215  * set_append_rel_pathlist
1216  * Build access paths for an "append relation"
1217  */
1218 static void
1220  Index rti, RangeTblEntry *rte)
1221 {
1222  int parentRTindex = rti;
1223  List *live_childrels = NIL;
1224  ListCell *l;
1225 
1226  /*
1227  * Generate access paths for each member relation, and remember the
1228  * non-dummy children.
1229  */
1230  foreach(l, root->append_rel_list)
1231  {
1232  AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
1233  int childRTindex;
1234  RangeTblEntry *childRTE;
1235  RelOptInfo *childrel;
1236 
1237  /* append_rel_list contains all append rels; ignore others */
1238  if (appinfo->parent_relid != parentRTindex)
1239  continue;
1240 
1241  /* Re-locate the child RTE and RelOptInfo */
1242  childRTindex = appinfo->child_relid;
1243  childRTE = root->simple_rte_array[childRTindex];
1244  childrel = root->simple_rel_array[childRTindex];
1245 
1246  /*
1247  * If set_append_rel_size() decided the parent appendrel was
1248  * parallel-unsafe at some point after visiting this child rel, we
1249  * need to propagate the unsafety marking down to the child, so that
1250  * we don't generate useless partial paths for it.
1251  */
1252  if (!rel->consider_parallel)
1253  childrel->consider_parallel = false;
1254 
1255  /*
1256  * Compute the child's access paths.
1257  */
1258  set_rel_pathlist(root, childrel, childRTindex, childRTE);
1259 
1260  /*
1261  * If child is dummy, ignore it.
1262  */
1263  if (IS_DUMMY_REL(childrel))
1264  continue;
1265 
1266  /*
1267  * Child is live, so add it to the live_childrels list for use below.
1268  */
1269  live_childrels = lappend(live_childrels, childrel);
1270  }
1271 
1272  /* Add paths to the append relation. */
1273  add_paths_to_append_rel(root, rel, live_childrels);
1274 }
1275 
1276 
1277 /*
1278  * add_paths_to_append_rel
1279  * Generate paths for the given append relation given the set of non-dummy
1280  * child rels.
1281  *
1282  * The function collects all parameterizations and orderings supported by the
1283  * non-dummy children. For every such parameterization or ordering, it creates
1284  * an append path collecting one path from each non-dummy child with given
1285  * parameterization or ordering. Similarly it collects partial paths from
1286  * non-dummy children to create partial append paths.
1287  */
1288 void
1290  List *live_childrels)
1291 {
1292  List *subpaths = NIL;
1293  bool subpaths_valid = true;
1294  List *partial_subpaths = NIL;
1295  List *pa_partial_subpaths = NIL;
1296  List *pa_nonpartial_subpaths = NIL;
1297  bool partial_subpaths_valid = true;
1298  bool pa_subpaths_valid;
1299  List *all_child_pathkeys = NIL;
1300  List *all_child_outers = NIL;
1301  ListCell *l;
1302  List *partitioned_rels = NIL;
1303  List *partial_partitioned_rels = NIL;
1304  List *pa_partitioned_rels = NIL;
1305  double partial_rows = -1;
1306  bool flatten_partitioned_rels;
1307 
1308  /* If appropriate, consider parallel append */
1309  pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
1310 
1311  /* What we do with the partitioned_rels list is different for UNION ALL */
1312  flatten_partitioned_rels = (rel->rtekind != RTE_SUBQUERY);
1313 
1314  /*
1315  * For partitioned tables, we accumulate a list of Relids of each
1316  * partitioned table which has at least one of its subpartitions directly
1317  * present as a subpath in this Append. This is used later for run-time
1318  * partition pruning. We must maintain separate lists for each Append
1319  * Path that we create as some paths that we create here can't flatten
1320  * sub-Appends and sub-MergeAppends into the top-level Append. We needn't
1321  * bother doing this for join rels as no run-time pruning is done on
1322  * those.
1323  */
1324  if (rel->reloptkind != RELOPT_JOINREL && rel->part_scheme != NULL)
1325  {
1326  partitioned_rels = list_make1(bms_make_singleton(rel->relid));
1327  partial_partitioned_rels = list_make1(bms_make_singleton(rel->relid));
1328 
1329  /* skip this one if we're not going to make a Parallel Append path */
1330  if (pa_subpaths_valid)
1331  pa_partitioned_rels = list_make1(bms_make_singleton(rel->relid));
1332  }
1333 
1334  /*
1335  * For every non-dummy child, remember the cheapest path. Also, identify
1336  * all pathkeys (orderings) and parameterizations (required_outer sets)
1337  * available for the non-dummy member relations.
1338  */
1339  foreach(l, live_childrels)
1340  {
1341  RelOptInfo *childrel = lfirst(l);
1342  ListCell *lcp;
1343  Path *cheapest_partial_path = NULL;
1344 
1345  /*
1346  * If child has an unparameterized cheapest-total path, add that to
1347  * the unparameterized Append path we are constructing for the parent.
1348  * If not, there's no workable unparameterized path.
1349  *
1350  * With partitionwise aggregates, the child rel's pathlist may be
1351  * empty, so don't assume that a path exists here.
1352  */
1353  if (childrel->pathlist != NIL &&
1354  childrel->cheapest_total_path->param_info == NULL)
1356  &subpaths, NULL, &partitioned_rels,
1357  flatten_partitioned_rels);
1358  else
1359  subpaths_valid = false;
1360 
1361  /* Same idea, but for a partial plan. */
1362  if (childrel->partial_pathlist != NIL)
1363  {
1364  cheapest_partial_path = linitial(childrel->partial_pathlist);
1365  accumulate_append_subpath(cheapest_partial_path,
1366  &partial_subpaths, NULL,
1367  &partial_partitioned_rels,
1368  flatten_partitioned_rels);
1369  }
1370  else
1371  partial_subpaths_valid = false;
1372 
1373  /*
1374  * Same idea, but for a parallel append mixing partial and non-partial
1375  * paths.
1376  */
1377  if (pa_subpaths_valid)
1378  {
1379  Path *nppath = NULL;
1380 
1381  nppath =
1383 
1384  if (cheapest_partial_path == NULL && nppath == NULL)
1385  {
1386  /* Neither a partial nor a parallel-safe path? Forget it. */
1387  pa_subpaths_valid = false;
1388  }
1389  else if (nppath == NULL ||
1390  (cheapest_partial_path != NULL &&
1391  cheapest_partial_path->total_cost < nppath->total_cost))
1392  {
1393  /* Partial path is cheaper or the only option. */
1394  Assert(cheapest_partial_path != NULL);
1395  accumulate_append_subpath(cheapest_partial_path,
1396  &pa_partial_subpaths,
1397  &pa_nonpartial_subpaths,
1398  &pa_partitioned_rels,
1399  flatten_partitioned_rels);
1400 
1401  }
1402  else
1403  {
1404  /*
1405  * Either we've got only a non-partial path, or we think that
1406  * a single backend can execute the best non-partial path
1407  * faster than all the parallel backends working together can
1408  * execute the best partial path.
1409  *
1410  * It might make sense to be more aggressive here. Even if
1411  * the best non-partial path is more expensive than the best
1412  * partial path, it could still be better to choose the
1413  * non-partial path if there are several such paths that can
1414  * be given to different workers. For now, we don't try to
1415  * figure that out.
1416  */
1418  &pa_nonpartial_subpaths,
1419  NULL,
1420  &pa_partitioned_rels,
1421  flatten_partitioned_rels);
1422  }
1423  }
1424 
1425  /*
1426  * Collect lists of all the available path orderings and
1427  * parameterizations for all the children. We use these as a
1428  * heuristic to indicate which sort orderings and parameterizations we
1429  * should build Append and MergeAppend paths for.
1430  */
1431  foreach(lcp, childrel->pathlist)
1432  {
1433  Path *childpath = (Path *) lfirst(lcp);
1434  List *childkeys = childpath->pathkeys;
1435  Relids childouter = PATH_REQ_OUTER(childpath);
1436 
1437  /* Unsorted paths don't contribute to pathkey list */
1438  if (childkeys != NIL)
1439  {
1440  ListCell *lpk;
1441  bool found = false;
1442 
1443  /* Have we already seen this ordering? */
1444  foreach(lpk, all_child_pathkeys)
1445  {
1446  List *existing_pathkeys = (List *) lfirst(lpk);
1447 
1448  if (compare_pathkeys(existing_pathkeys,
1449  childkeys) == PATHKEYS_EQUAL)
1450  {
1451  found = true;
1452  break;
1453  }
1454  }
1455  if (!found)
1456  {
1457  /* No, so add it to all_child_pathkeys */
1458  all_child_pathkeys = lappend(all_child_pathkeys,
1459  childkeys);
1460  }
1461  }
1462 
1463  /* Unparameterized paths don't contribute to param-set list */
1464  if (childouter)
1465  {
1466  ListCell *lco;
1467  bool found = false;
1468 
1469  /* Have we already seen this param set? */
1470  foreach(lco, all_child_outers)
1471  {
1472  Relids existing_outers = (Relids) lfirst(lco);
1473 
1474  if (bms_equal(existing_outers, childouter))
1475  {
1476  found = true;
1477  break;
1478  }
1479  }
1480  if (!found)
1481  {
1482  /* No, so add it to all_child_outers */
1483  all_child_outers = lappend(all_child_outers,
1484  childouter);
1485  }
1486  }
1487  }
1488  }
1489 
1490  /*
1491  * If we found unparameterized paths for all children, build an unordered,
1492  * unparameterized Append path for the rel. (Note: this is correct even
1493  * if we have zero or one live subpath due to constraint exclusion.)
1494  */
1495  if (subpaths_valid)
1496  add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
1497  NIL, NULL, 0, false,
1498  partitioned_rels, -1));
1499 
1500  /*
1501  * Consider an append of unordered, unparameterized partial paths. Make
1502  * it parallel-aware if possible.
1503  */
1504  if (partial_subpaths_valid && partial_subpaths != NIL)
1505  {
1506  AppendPath *appendpath;
1507  ListCell *lc;
1508  int parallel_workers = 0;
1509 
1510  /* Find the highest number of workers requested for any subpath. */
1511  foreach(lc, partial_subpaths)
1512  {
1513  Path *path = lfirst(lc);
1514 
1515  parallel_workers = Max(parallel_workers, path->parallel_workers);
1516  }
1517  Assert(parallel_workers > 0);
1518 
1519  /*
1520  * If the use of parallel append is permitted, always request at least
1521  * log2(# of children) workers. We assume it can be useful to have
1522  * extra workers in this case because they will be spread out across
1523  * the children. The precise formula is just a guess, but we don't
1524  * want to end up with a radically different answer for a table with N
1525  * partitions vs. an unpartitioned table with the same data, so the
1526  * use of some kind of log-scaling here seems to make some sense.
1527  */
1529  {
1530  parallel_workers = Max(parallel_workers,
1531  fls(list_length(live_childrels)));
1532  parallel_workers = Min(parallel_workers,
1534  }
1535  Assert(parallel_workers > 0);
1536 
1537  /* Generate a partial append path. */
1538  appendpath = create_append_path(root, rel, NIL, partial_subpaths,
1539  NIL, NULL, parallel_workers,
1541  partial_partitioned_rels, -1);
1542 
1543  /*
1544  * Make sure any subsequent partial paths use the same row count
1545  * estimate.
1546  */
1547  partial_rows = appendpath->path.rows;
1548 
1549  /* Add the path. */
1550  add_partial_path(rel, (Path *) appendpath);
1551  }
1552 
1553  /*
1554  * Consider a parallel-aware append using a mix of partial and non-partial
1555  * paths. (This only makes sense if there's at least one child which has
1556  * a non-partial path that is substantially cheaper than any partial path;
1557  * otherwise, we should use the append path added in the previous step.)
1558  */
1559  if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
1560  {
1561  AppendPath *appendpath;
1562  ListCell *lc;
1563  int parallel_workers = 0;
1564 
1565  /*
1566  * Find the highest number of workers requested for any partial
1567  * subpath.
1568  */
1569  foreach(lc, pa_partial_subpaths)
1570  {
1571  Path *path = lfirst(lc);
1572 
1573  parallel_workers = Max(parallel_workers, path->parallel_workers);
1574  }
1575 
1576  /*
1577  * Same formula here as above. It's even more important in this
1578  * instance because the non-partial paths won't contribute anything to
1579  * the planned number of parallel workers.
1580  */
1581  parallel_workers = Max(parallel_workers,
1582  fls(list_length(live_childrels)));
1583  parallel_workers = Min(parallel_workers,
1585  Assert(parallel_workers > 0);
1586 
1587  appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
1588  pa_partial_subpaths,
1589  NIL, NULL, parallel_workers, true,
1590  pa_partitioned_rels, partial_rows);
1591  add_partial_path(rel, (Path *) appendpath);
1592  }
1593 
1594  /*
1595  * Also build unparameterized ordered append paths based on the collected
1596  * list of child pathkeys.
1597  */
1598  if (subpaths_valid)
1599  generate_orderedappend_paths(root, rel, live_childrels,
1600  all_child_pathkeys,
1601  partitioned_rels);
1602 
1603  /*
1604  * Build Append paths for each parameterization seen among the child rels.
1605  * (This may look pretty expensive, but in most cases of practical
1606  * interest, the child rels will expose mostly the same parameterizations,
1607  * so that not that many cases actually get considered here.)
1608  *
1609  * The Append node itself cannot enforce quals, so all qual checking must
1610  * be done in the child paths. This means that to have a parameterized
1611  * Append path, we must have the exact same parameterization for each
1612  * child path; otherwise some children might be failing to check the
1613  * moved-down quals. To make them match up, we can try to increase the
1614  * parameterization of lesser-parameterized paths.
1615  */
1616  foreach(l, all_child_outers)
1617  {
1618  Relids required_outer = (Relids) lfirst(l);
1619  ListCell *lcr;
1620  List *part_rels = NIL;
1621 
1622  if (rel->reloptkind != RELOPT_JOINREL && rel->part_scheme != NULL)
1623  part_rels = list_make1(bms_make_singleton(rel->relid));
1624 
1625  /* Select the child paths for an Append with this parameterization */
1626  subpaths = NIL;
1627  subpaths_valid = true;
1628  foreach(lcr, live_childrels)
1629  {
1630  RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
1631  Path *subpath;
1632 
1633  if (childrel->pathlist == NIL)
1634  {
1635  /* failed to make a suitable path for this child */
1636  subpaths_valid = false;
1637  break;
1638  }
1639 
1641  childrel,
1642  required_outer);
1643  if (subpath == NULL)
1644  {
1645  /* failed to make a suitable path for this child */
1646  subpaths_valid = false;
1647  break;
1648  }
1649  accumulate_append_subpath(subpath, &subpaths, NULL, &part_rels,
1650  flatten_partitioned_rels);
1651  }
1652 
1653  if (subpaths_valid)
1654  add_path(rel, (Path *)
1655  create_append_path(root, rel, subpaths, NIL,
1656  NIL, required_outer, 0, false,
1657  part_rels, -1));
1658  }
1659 
1660  /*
1661  * When there is only a single child relation, the Append path can inherit
1662  * any ordering available for the child rel's path, so that it's useful to
1663  * consider ordered partial paths. Above we only considered the cheapest
1664  * partial path for each child, but let's also make paths using any
1665  * partial paths that have pathkeys.
1666  */
1667  if (list_length(live_childrels) == 1)
1668  {
1669  RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
1670 
1671  /* skip the cheapest partial path, since we already used that above */
1672  for_each_from(l, childrel->partial_pathlist, 1)
1673  {
1674  Path *path = (Path *) lfirst(l);
1675  AppendPath *appendpath;
1676 
1677  /* skip paths with no pathkeys. */
1678  if (path->pathkeys == NIL)
1679  continue;
1680 
1681  appendpath = create_append_path(root, rel, NIL, list_make1(path),
1682  NIL, NULL,
1683  path->parallel_workers, true,
1684  partitioned_rels, partial_rows);
1685  add_partial_path(rel, (Path *) appendpath);
1686  }
1687  }
1688 }
1689 
1690 /*
1691  * generate_orderedappend_paths
1692  * Generate ordered append paths for an append relation
1693  *
1694  * Usually we generate MergeAppend paths here, but there are some special
1695  * cases where we can generate simple Append paths, because the subpaths
1696  * can provide tuples in the required order already.
1697  *
1698  * We generate a path for each ordering (pathkey list) appearing in
1699  * all_child_pathkeys.
1700  *
1701  * We consider both cheapest-startup and cheapest-total cases, ie, for each
1702  * interesting ordering, collect all the cheapest startup subpaths and all the
1703  * cheapest total paths, and build a suitable path for each case.
1704  *
1705  * We don't currently generate any parameterized ordered paths here. While
1706  * it would not take much more code here to do so, it's very unclear that it
1707  * is worth the planning cycles to investigate such paths: there's little
1708  * use for an ordered path on the inside of a nestloop. In fact, it's likely
1709  * that the current coding of add_path would reject such paths out of hand,
1710  * because add_path gives no credit for sort ordering of parameterized paths,
1711  * and a parameterized MergeAppend is going to be more expensive than the
1712  * corresponding parameterized Append path. If we ever try harder to support
1713  * parameterized mergejoin plans, it might be worth adding support for
1714  * parameterized paths here to feed such joins. (See notes in
1715  * optimizer/README for why that might not ever happen, though.)
1716  */
1717 static void
1719  List *live_childrels,
1720  List *all_child_pathkeys,
1721  List *partitioned_rels)
1722 {
1723  ListCell *lcp;
1724  List *partition_pathkeys = NIL;
1725  List *partition_pathkeys_desc = NIL;
1726  bool partition_pathkeys_partial = true;
1727  bool partition_pathkeys_desc_partial = true;
1728  List *startup_partitioned_rels = NIL;
1729  List *total_partitioned_rels = NIL;
1730  bool flatten_partitioned_rels;
1731 
1732  /* Set up the method for building the partitioned rels lists */
1733  flatten_partitioned_rels = (rel->rtekind != RTE_SUBQUERY);
1734 
1735  if (rel->reloptkind != RELOPT_JOINREL && rel->part_scheme != NULL)
1736  {
1737  startup_partitioned_rels = list_make1(bms_make_singleton(rel->relid));
1738  total_partitioned_rels = list_make1(bms_make_singleton(rel->relid));
1739  }
1740 
1741  /*
1742  * Some partitioned table setups may allow us to use an Append node
1743  * instead of a MergeAppend. This is possible in cases such as RANGE
1744  * partitioned tables where it's guaranteed that an earlier partition must
1745  * contain rows which come earlier in the sort order. To detect whether
1746  * this is relevant, build pathkey descriptions of the partition ordering,
1747  * for both forward and reverse scans.
1748  */
1749  if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
1751  {
1752  partition_pathkeys = build_partition_pathkeys(root, rel,
1754  &partition_pathkeys_partial);
1755 
1756  partition_pathkeys_desc = build_partition_pathkeys(root, rel,
1758  &partition_pathkeys_desc_partial);
1759 
1760  /*
1761  * You might think we should truncate_useless_pathkeys here, but
1762  * allowing partition keys which are a subset of the query's pathkeys
1763  * can often be useful. For example, consider a table partitioned by
1764  * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
1765  * paths that can produce the a, b, c ordering (perhaps via indexes on
1766  * (a, b, c)) then it works to consider the appendrel output as
1767  * ordered by a, b, c.
1768  */
1769  }
1770 
1771  /* Now consider each interesting sort ordering */
1772  foreach(lcp, all_child_pathkeys)
1773  {
1774  List *pathkeys = (List *) lfirst(lcp);
1775  List *startup_subpaths = NIL;
1776  List *total_subpaths = NIL;
1777  bool startup_neq_total = false;
1778  ListCell *lcr;
1779  bool match_partition_order;
1780  bool match_partition_order_desc;
1781 
1782  /*
1783  * Determine if this sort ordering matches any partition pathkeys we
1784  * have, for both ascending and descending partition order. If the
1785  * partition pathkeys happen to be contained in pathkeys then it still
1786  * works, as described above, providing that the partition pathkeys
1787  * are complete and not just a prefix of the partition keys. (In such
1788  * cases we'll be relying on the child paths to have sorted the
1789  * lower-order columns of the required pathkeys.)
1790  */
1791  match_partition_order =
1792  pathkeys_contained_in(pathkeys, partition_pathkeys) ||
1793  (!partition_pathkeys_partial &&
1794  pathkeys_contained_in(partition_pathkeys, pathkeys));
1795 
1796  match_partition_order_desc = !match_partition_order &&
1797  (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
1798  (!partition_pathkeys_desc_partial &&
1799  pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
1800 
1801  /* Select the child paths for this ordering... */
1802  foreach(lcr, live_childrels)
1803  {
1804  RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
1805  Path *cheapest_startup,
1806  *cheapest_total;
1807 
1808  /* Locate the right paths, if they are available. */
1809  cheapest_startup =
1811  pathkeys,
1812  NULL,
1813  STARTUP_COST,
1814  false);
1815  cheapest_total =
1817  pathkeys,
1818  NULL,
1819  TOTAL_COST,
1820  false);
1821 
1822  /*
1823  * If we can't find any paths with the right order just use the
1824  * cheapest-total path; we'll have to sort it later.
1825  */
1826  if (cheapest_startup == NULL || cheapest_total == NULL)
1827  {
1828  cheapest_startup = cheapest_total =
1829  childrel->cheapest_total_path;
1830  /* Assert we do have an unparameterized path for this child */
1831  Assert(cheapest_total->param_info == NULL);
1832  }
1833 
1834  /*
1835  * Notice whether we actually have different paths for the
1836  * "cheapest" and "total" cases; frequently there will be no point
1837  * in two create_merge_append_path() calls.
1838  */
1839  if (cheapest_startup != cheapest_total)
1840  startup_neq_total = true;
1841 
1842  /*
1843  * Collect the appropriate child paths. The required logic varies
1844  * for the Append and MergeAppend cases.
1845  */
1846  if (match_partition_order)
1847  {
1848  /*
1849  * We're going to make a plain Append path. We don't need
1850  * most of what accumulate_append_subpath would do, but we do
1851  * want to cut out child Appends or MergeAppends if they have
1852  * just a single subpath (and hence aren't doing anything
1853  * useful).
1854  */
1855  cheapest_startup = get_singleton_append_subpath(cheapest_startup);
1856  cheapest_total = get_singleton_append_subpath(cheapest_total);
1857 
1858  startup_subpaths = lappend(startup_subpaths, cheapest_startup);
1859  total_subpaths = lappend(total_subpaths, cheapest_total);
1860  }
1861  else if (match_partition_order_desc)
1862  {
1863  /*
1864  * As above, but we need to reverse the order of the children,
1865  * because nodeAppend.c doesn't know anything about reverse
1866  * ordering and will scan the children in the order presented.
1867  */
1868  cheapest_startup = get_singleton_append_subpath(cheapest_startup);
1869  cheapest_total = get_singleton_append_subpath(cheapest_total);
1870 
1871  startup_subpaths = lcons(cheapest_startup, startup_subpaths);
1872  total_subpaths = lcons(cheapest_total, total_subpaths);
1873  }
1874  else
1875  {
1876  /*
1877  * Otherwise, rely on accumulate_append_subpath to collect the
1878  * child paths for the MergeAppend.
1879  */
1880  accumulate_append_subpath(cheapest_startup,
1881  &startup_subpaths, NULL,
1882  &startup_partitioned_rels,
1883  flatten_partitioned_rels);
1884  accumulate_append_subpath(cheapest_total,
1885  &total_subpaths, NULL,
1886  &total_partitioned_rels,
1887  flatten_partitioned_rels);
1888  }
1889  }
1890 
1891  /* ... and build the Append or MergeAppend paths */
1892  if (match_partition_order || match_partition_order_desc)
1893  {
1894  /* We only need Append */
1895  add_path(rel, (Path *) create_append_path(root,
1896  rel,
1897  startup_subpaths,
1898  NIL,
1899  pathkeys,
1900  NULL,
1901  0,
1902  false,
1903  startup_partitioned_rels,
1904  -1));
1905  if (startup_neq_total)
1906  add_path(rel, (Path *) create_append_path(root,
1907  rel,
1908  total_subpaths,
1909  NIL,
1910  pathkeys,
1911  NULL,
1912  0,
1913  false,
1914  total_partitioned_rels,
1915  -1));
1916  }
1917  else
1918  {
1919  /* We need MergeAppend */
1920  add_path(rel, (Path *) create_merge_append_path(root,
1921  rel,
1922  startup_subpaths,
1923  pathkeys,
1924  NULL,
1925  startup_partitioned_rels));
1926  if (startup_neq_total)
1927  add_path(rel, (Path *) create_merge_append_path(root,
1928  rel,
1929  total_subpaths,
1930  pathkeys,
1931  NULL,
1932  total_partitioned_rels));
1933  }
1934  }
1935 }
1936 
1937 /*
1938  * get_cheapest_parameterized_child_path
1939  * Get cheapest path for this relation that has exactly the requested
1940  * parameterization.
1941  *
1942  * Returns NULL if unable to create such a path.
1943  */
1944 static Path *
1946  Relids required_outer)
1947 {
1948  Path *cheapest;
1949  ListCell *lc;
1950 
1951  /*
1952  * Look up the cheapest existing path with no more than the needed
1953  * parameterization. If it has exactly the needed parameterization, we're
1954  * done.
1955  */
1956  cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
1957  NIL,
1958  required_outer,
1959  TOTAL_COST,
1960  false);
1961  Assert(cheapest != NULL);
1962  if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
1963  return cheapest;
1964 
1965  /*
1966  * Otherwise, we can "reparameterize" an existing path to match the given
1967  * parameterization, which effectively means pushing down additional
1968  * joinquals to be checked within the path's scan. However, some existing
1969  * paths might check the available joinquals already while others don't;
1970  * therefore, it's not clear which existing path will be cheapest after
1971  * reparameterization. We have to go through them all and find out.
1972  */
1973  cheapest = NULL;
1974  foreach(lc, rel->pathlist)
1975  {
1976  Path *path = (Path *) lfirst(lc);
1977 
1978  /* Can't use it if it needs more than requested parameterization */
1979  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
1980  continue;
1981 
1982  /*
1983  * Reparameterization can only increase the path's cost, so if it's
1984  * already more expensive than the current cheapest, forget it.
1985  */
1986  if (cheapest != NULL &&
1987  compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
1988  continue;
1989 
1990  /* Reparameterize if needed, then recheck cost */
1991  if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
1992  {
1993  path = reparameterize_path(root, path, required_outer, 1.0);
1994  if (path == NULL)
1995  continue; /* failed to reparameterize this one */
1996  Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
1997 
1998  if (cheapest != NULL &&
1999  compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
2000  continue;
2001  }
2002 
2003  /* We have a new best path */
2004  cheapest = path;
2005  }
2006 
2007  /* Return the best path, or NULL if we found no suitable candidate */
2008  return cheapest;
2009 }
2010 
2011 /*
2012  * accumulate_partitioned_rels
2013  * Record 'sub_partitioned_rels' in the 'partitioned_rels' list,
2014  * flattening as appropriate.
2015  */
2016 static List *
2018  List *sub_partitioned_rels,
2019  bool flatten)
2020 {
2021  if (flatten)
2022  {
2023  /*
2024  * We're only called with flatten == true when the partitioned_rels
2025  * list has at most 1 element. So we can just add the members from
2026  * sub list's first element onto the first element of
2027  * partitioned_rels. Only later in planning when doing UNION ALL
2028  * Append processing will we see flatten == false. partitioned_rels
2029  * may end up with more than 1 element then, but we never expect to be
2030  * called with flatten == true again after that, so we needn't bother
2031  * doing anything here for anything but the initial element.
2032  */
2033  if (partitioned_rels != NIL && sub_partitioned_rels != NIL)
2034  {
2035  Relids partrels = (Relids) linitial(partitioned_rels);
2036  Relids subpartrels = (Relids) linitial(sub_partitioned_rels);
2037 
2038  /* Ensure the above comment holds true */
2039  Assert(list_length(partitioned_rels) == 1);
2040  Assert(list_length(sub_partitioned_rels) == 1);
2041 
2042  linitial(partitioned_rels) = bms_add_members(partrels, subpartrels);
2043  }
2044  }
2045  else
2046  {
2047  /*
2048  * Handle UNION ALL to partitioned tables. This always occurs after
2049  * we've done the accumulation for sub-partitioned tables, so there's
2050  * no need to consider how adding multiple elements to the top level
2051  * list affects the flatten == true case above.
2052  */
2053  partitioned_rels = list_concat(partitioned_rels, sub_partitioned_rels);
2054  }
2055 
2056  return partitioned_rels;
2057 }
2058 
2059 /*
2060  * accumulate_append_subpath
2061  * Add a subpath to the list being built for an Append or MergeAppend.
2062  *
2063  * It's possible that the child is itself an Append or MergeAppend path, in
2064  * which case we can "cut out the middleman" and just add its child paths to
2065  * our own list. (We don't try to do this earlier because we need to apply
2066  * both levels of transformation to the quals.)
2067  *
2068  * Note that if we omit a child MergeAppend in this way, we are effectively
2069  * omitting a sort step, which seems fine: if the parent is to be an Append,
2070  * its result would be unsorted anyway, while if the parent is to be a
2071  * MergeAppend, there's no point in a separate sort on a child.
2072  *
2073  * Normally, either path is a partial path and subpaths is a list of partial
2074  * paths, or else path is a non-partial plan and subpaths is a list of those.
2075  * However, if path is a parallel-aware Append, then we add its partial path
2076  * children to subpaths and the rest to special_subpaths. If the latter is
2077  * NULL, we don't flatten the path at all (unless it contains only partial
2078  * paths).
2079  *
2080  * When pulling up sub-Appends and sub-Merge Appends, we also gather the
2081  * path's list of partitioned tables and store in 'partitioned_rels'. The
2082  * exact behavior here depends on the value of 'flatten_partitioned_rels'.
2083  *
2084  * When 'flatten_partitioned_rels' is true, 'partitioned_rels' will contain at
2085  * most one element which is a Relids of the partitioned relations which there
2086  * are subpaths for. In this case, we just add the RT indexes for the
2087  * partitioned tables for the subpath we're pulling up to the single entry in
2088  * 'partitioned_rels'. When 'flatten_partitioned_rels' is false we
2089  * concatenate the path's partitioned rel list onto the top-level list. This
2090  * done for UNION ALLs which could have a partitioned table in each union
2091  * branch.
2092  */
2093 static void
2094 accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths,
2095  List **partitioned_rels,
2096  bool flatten_partitioned_rels)
2097 {
2098  if (IsA(path, AppendPath))
2099  {
2100  AppendPath *apath = (AppendPath *) path;
2101 
2102  if (!apath->path.parallel_aware || apath->first_partial_path == 0)
2103  {
2104  *subpaths = list_concat(*subpaths, apath->subpaths);
2105  *partitioned_rels = accumulate_partitioned_rels(*partitioned_rels,
2106  apath->partitioned_rels,
2107  flatten_partitioned_rels);
2108  return;
2109  }
2110  else if (special_subpaths != NULL)
2111  {
2112  List *new_special_subpaths;
2113 
2114  /* Split Parallel Append into partial and non-partial subpaths */
2115  *subpaths = list_concat(*subpaths,
2116  list_copy_tail(apath->subpaths,
2117  apath->first_partial_path));
2118  new_special_subpaths =
2120  apath->first_partial_path);
2121  *special_subpaths = list_concat(*special_subpaths,
2122  new_special_subpaths);
2123  *partitioned_rels = accumulate_partitioned_rels(*partitioned_rels,
2124  apath->partitioned_rels,
2125  flatten_partitioned_rels);
2126  return;
2127  }
2128  }
2129  else if (IsA(path, MergeAppendPath))
2130  {
2131  MergeAppendPath *mpath = (MergeAppendPath *) path;
2132 
2133  *subpaths = list_concat(*subpaths, mpath->subpaths);
2134  *partitioned_rels = accumulate_partitioned_rels(*partitioned_rels,
2135  mpath->partitioned_rels,
2136  flatten_partitioned_rels);
2137  return;
2138  }
2139 
2140  *subpaths = lappend(*subpaths, path);
2141 }
2142 
2143 /*
2144  * get_singleton_append_subpath
2145  * Returns the single subpath of an Append/MergeAppend, or just
2146  * return 'path' if it's not a single sub-path Append/MergeAppend.
2147  *
2148  * Note: 'path' must not be a parallel-aware path.
2149  */
2150 static Path *
2152 {
2153  Assert(!path->parallel_aware);
2154 
2155  if (IsA(path, AppendPath))
2156  {
2157  AppendPath *apath = (AppendPath *) path;
2158 
2159  if (list_length(apath->subpaths) == 1)
2160  return (Path *) linitial(apath->subpaths);
2161  }
2162  else if (IsA(path, MergeAppendPath))
2163  {
2164  MergeAppendPath *mpath = (MergeAppendPath *) path;
2165 
2166  if (list_length(mpath->subpaths) == 1)
2167  return (Path *) linitial(mpath->subpaths);
2168  }
2169 
2170  return path;
2171 }
2172 
2173 /*
2174  * set_dummy_rel_pathlist
2175  * Build a dummy path for a relation that's been excluded by constraints
2176  *
2177  * Rather than inventing a special "dummy" path type, we represent this as an
2178  * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
2179  *
2180  * (See also mark_dummy_rel, which does basically the same thing, but is
2181  * typically used to change a rel into dummy state after we already made
2182  * paths for it.)
2183  */
2184 static void
2186 {
2187  /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
2188  rel->rows = 0;
2189  rel->reltarget->width = 0;
2190 
2191  /* Discard any pre-existing paths; no further need for them */
2192  rel->pathlist = NIL;
2193  rel->partial_pathlist = NIL;
2194 
2195  /* Set up the dummy path */
2196  add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
2197  NIL, rel->lateral_relids,
2198  0, false, NIL, -1));
2199 
2200  /*
2201  * We set the cheapest-path fields immediately, just in case they were
2202  * pointing at some discarded path. This is redundant when we're called
2203  * from set_rel_size(), but not when called from elsewhere, and doing it
2204  * twice is harmless anyway.
2205  */
2206  set_cheapest(rel);
2207 }
2208 
2209 /* quick-and-dirty test to see if any joining is needed */
2210 static bool
2212 {
2213  int num_base_rels = 0;
2214  Index rti;
2215 
2216  for (rti = 1; rti < root->simple_rel_array_size; rti++)
2217  {
2218  RelOptInfo *brel = root->simple_rel_array[rti];
2219 
2220  if (brel == NULL)
2221  continue;
2222 
2223  /* ignore RTEs that are "other rels" */
2224  if (brel->reloptkind == RELOPT_BASEREL)
2225  if (++num_base_rels > 1)
2226  return true;
2227  }
2228  return false;
2229 }
2230 
2231 /*
2232  * set_subquery_pathlist
2233  * Generate SubqueryScan access paths for a subquery RTE
2234  *
2235  * We don't currently support generating parameterized paths for subqueries
2236  * by pushing join clauses down into them; it seems too expensive to re-plan
2237  * the subquery multiple times to consider different alternatives.
2238  * (XXX that could stand to be reconsidered, now that we use Paths.)
2239  * So the paths made here will be parameterized if the subquery contains
2240  * LATERAL references, otherwise not. As long as that's true, there's no need
2241  * for a separate set_subquery_size phase: just make the paths right away.
2242  */
2243 static void
2245  Index rti, RangeTblEntry *rte)
2246 {
2247  Query *parse = root->parse;
2248  Query *subquery = rte->subquery;
2249  Relids required_outer;
2250  pushdown_safety_info safetyInfo;
2251  double tuple_fraction;
2252  RelOptInfo *sub_final_rel;
2253  ListCell *lc;
2254 
2255  /*
2256  * Must copy the Query so that planning doesn't mess up the RTE contents
2257  * (really really need to fix the planner to not scribble on its input,
2258  * someday ... but see remove_unused_subquery_outputs to start with).
2259  */
2260  subquery = copyObject(subquery);
2261 
2262  /*
2263  * If it's a LATERAL subquery, it might contain some Vars of the current
2264  * query level, requiring it to be treated as parameterized, even though
2265  * we don't support pushing down join quals into subqueries.
2266  */
2267  required_outer = rel->lateral_relids;
2268 
2269  /*
2270  * Zero out result area for subquery_is_pushdown_safe, so that it can set
2271  * flags as needed while recursing. In particular, we need a workspace
2272  * for keeping track of unsafe-to-reference columns. unsafeColumns[i]
2273  * will be set true if we find that output column i of the subquery is
2274  * unsafe to use in a pushed-down qual.
2275  */
2276  memset(&safetyInfo, 0, sizeof(safetyInfo));
2277  safetyInfo.unsafeColumns = (bool *)
2278  palloc0((list_length(subquery->targetList) + 1) * sizeof(bool));
2279 
2280  /*
2281  * If the subquery has the "security_barrier" flag, it means the subquery
2282  * originated from a view that must enforce row level security. Then we
2283  * must not push down quals that contain leaky functions. (Ideally this
2284  * would be checked inside subquery_is_pushdown_safe, but since we don't
2285  * currently pass the RTE to that function, we must do it here.)
2286  */
2287  safetyInfo.unsafeLeaky = rte->security_barrier;
2288 
2289  /*
2290  * If there are any restriction clauses that have been attached to the
2291  * subquery relation, consider pushing them down to become WHERE or HAVING
2292  * quals of the subquery itself. This transformation is useful because it
2293  * may allow us to generate a better plan for the subquery than evaluating
2294  * all the subquery output rows and then filtering them.
2295  *
2296  * There are several cases where we cannot push down clauses. Restrictions
2297  * involving the subquery are checked by subquery_is_pushdown_safe().
2298  * Restrictions on individual clauses are checked by
2299  * qual_is_pushdown_safe(). Also, we don't want to push down
2300  * pseudoconstant clauses; better to have the gating node above the
2301  * subquery.
2302  *
2303  * Non-pushed-down clauses will get evaluated as qpquals of the
2304  * SubqueryScan node.
2305  *
2306  * XXX Are there any cases where we want to make a policy decision not to
2307  * push down a pushable qual, because it'd result in a worse plan?
2308  */
2309  if (rel->baserestrictinfo != NIL &&
2310  subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
2311  {
2312  /* OK to consider pushing down individual quals */
2313  List *upperrestrictlist = NIL;
2314  ListCell *l;
2315 
2316  foreach(l, rel->baserestrictinfo)
2317  {
2318  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2319  Node *clause = (Node *) rinfo->clause;
2320 
2321  if (!rinfo->pseudoconstant &&
2322  qual_is_pushdown_safe(subquery, rti, clause, &safetyInfo))
2323  {
2324  /* Push it down */
2325  subquery_push_qual(subquery, rte, rti, clause);
2326  }
2327  else
2328  {
2329  /* Keep it in the upper query */
2330  upperrestrictlist = lappend(upperrestrictlist, rinfo);
2331  }
2332  }
2333  rel->baserestrictinfo = upperrestrictlist;
2334  /* We don't bother recomputing baserestrict_min_security */
2335  }
2336 
2337  pfree(safetyInfo.unsafeColumns);
2338 
2339  /*
2340  * The upper query might not use all the subquery's output columns; if
2341  * not, we can simplify.
2342  */
2343  remove_unused_subquery_outputs(subquery, rel);
2344 
2345  /*
2346  * We can safely pass the outer tuple_fraction down to the subquery if the
2347  * outer level has no joining, aggregation, or sorting to do. Otherwise
2348  * we'd better tell the subquery to plan for full retrieval. (XXX This
2349  * could probably be made more intelligent ...)
2350  */
2351  if (parse->hasAggs ||
2352  parse->groupClause ||
2353  parse->groupingSets ||
2354  parse->havingQual ||
2355  parse->distinctClause ||
2356  parse->sortClause ||
2357  has_multiple_baserels(root))
2358  tuple_fraction = 0.0; /* default case */
2359  else
2360  tuple_fraction = root->tuple_fraction;
2361 
2362  /* plan_params should not be in use in current query level */
2363  Assert(root->plan_params == NIL);
2364 
2365  /* Generate a subroot and Paths for the subquery */
2366  rel->subroot = subquery_planner(root->glob, subquery,
2367  root,
2368  false, tuple_fraction);
2369 
2370  /* Isolate the params needed by this specific subplan */
2371  rel->subplan_params = root->plan_params;
2372  root->plan_params = NIL;
2373 
2374  /*
2375  * It's possible that constraint exclusion proved the subquery empty. If
2376  * so, it's desirable to produce an unadorned dummy path so that we will
2377  * recognize appropriate optimizations at this query level.
2378  */
2379  sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
2380 
2381  if (IS_DUMMY_REL(sub_final_rel))
2382  {
2384  return;
2385  }
2386 
2387  /*
2388  * Mark rel with estimated output rows, width, etc. Note that we have to
2389  * do this before generating outer-query paths, else cost_subqueryscan is
2390  * not happy.
2391  */
2392  set_subquery_size_estimates(root, rel);
2393 
2394  /*
2395  * For each Path that subquery_planner produced, make a SubqueryScanPath
2396  * in the outer query.
2397  */
2398  foreach(lc, sub_final_rel->pathlist)
2399  {
2400  Path *subpath = (Path *) lfirst(lc);
2401  List *pathkeys;
2402 
2403  /* Convert subpath's pathkeys to outer representation */
2404  pathkeys = convert_subquery_pathkeys(root,
2405  rel,
2406  subpath->pathkeys,
2408 
2409  /* Generate outer path using this subpath */
2410  add_path(rel, (Path *)
2411  create_subqueryscan_path(root, rel, subpath,
2412  pathkeys, required_outer));
2413  }
2414 
2415  /* If outer rel allows parallelism, do same for partial paths. */
2416  if (rel->consider_parallel && bms_is_empty(required_outer))
2417  {
2418  /* If consider_parallel is false, there should be no partial paths. */
2419  Assert(sub_final_rel->consider_parallel ||
2420  sub_final_rel->partial_pathlist == NIL);
2421 
2422  /* Same for partial paths. */
2423  foreach(lc, sub_final_rel->partial_pathlist)
2424  {
2425  Path *subpath = (Path *) lfirst(lc);
2426  List *pathkeys;
2427 
2428  /* Convert subpath's pathkeys to outer representation */
2429  pathkeys = convert_subquery_pathkeys(root,
2430  rel,
2431  subpath->pathkeys,
2433 
2434  /* Generate outer path using this subpath */
2435  add_partial_path(rel, (Path *)
2436  create_subqueryscan_path(root, rel, subpath,
2437  pathkeys,
2438  required_outer));
2439  }
2440  }
2441 }
2442 
2443 /*
2444  * set_function_pathlist
2445  * Build the (single) access path for a function RTE
2446  */
2447 static void
2449 {
2450  Relids required_outer;
2451  List *pathkeys = NIL;
2452 
2453  /*
2454  * We don't support pushing join clauses into the quals of a function
2455  * scan, but it could still have required parameterization due to LATERAL
2456  * refs in the function expression.
2457  */
2458  required_outer = rel->lateral_relids;
2459 
2460  /*
2461  * The result is considered unordered unless ORDINALITY was used, in which
2462  * case it is ordered by the ordinal column (the last one). See if we
2463  * care, by checking for uses of that Var in equivalence classes.
2464  */
2465  if (rte->funcordinality)
2466  {
2467  AttrNumber ordattno = rel->max_attr;
2468  Var *var = NULL;
2469  ListCell *lc;
2470 
2471  /*
2472  * Is there a Var for it in rel's targetlist? If not, the query did
2473  * not reference the ordinality column, or at least not in any way
2474  * that would be interesting for sorting.
2475  */
2476  foreach(lc, rel->reltarget->exprs)
2477  {
2478  Var *node = (Var *) lfirst(lc);
2479 
2480  /* checking varno/varlevelsup is just paranoia */
2481  if (IsA(node, Var) &&
2482  node->varattno == ordattno &&
2483  node->varno == rel->relid &&
2484  node->varlevelsup == 0)
2485  {
2486  var = node;
2487  break;
2488  }
2489  }
2490 
2491  /*
2492  * Try to build pathkeys for this Var with int8 sorting. We tell
2493  * build_expression_pathkey not to build any new equivalence class; if
2494  * the Var isn't already mentioned in some EC, it means that nothing
2495  * cares about the ordering.
2496  */
2497  if (var)
2498  pathkeys = build_expression_pathkey(root,
2499  (Expr *) var,
2500  NULL, /* below outer joins */
2501  Int8LessOperator,
2502  rel->relids,
2503  false);
2504  }
2505 
2506  /* Generate appropriate path */
2507  add_path(rel, create_functionscan_path(root, rel,
2508  pathkeys, required_outer));
2509 }
2510 
2511 /*
2512  * set_values_pathlist
2513  * Build the (single) access path for a VALUES RTE
2514  */
2515 static void
2517 {
2518  Relids required_outer;
2519 
2520  /*
2521  * We don't support pushing join clauses into the quals of a values scan,
2522  * but it could still have required parameterization due to LATERAL refs
2523  * in the values expressions.
2524  */
2525  required_outer = rel->lateral_relids;
2526 
2527  /* Generate appropriate path */
2528  add_path(rel, create_valuesscan_path(root, rel, required_outer));
2529 }
2530 
2531 /*
2532  * set_tablefunc_pathlist
2533  * Build the (single) access path for a table func RTE
2534  */
2535 static void
2537 {
2538  Relids required_outer;
2539 
2540  /*
2541  * We don't support pushing join clauses into the quals of a tablefunc
2542  * scan, but it could still have required parameterization due to LATERAL
2543  * refs in the function expression.
2544  */
2545  required_outer = rel->lateral_relids;
2546 
2547  /* Generate appropriate path */
2548  add_path(rel, create_tablefuncscan_path(root, rel,
2549  required_outer));
2550 }
2551 
2552 /*
2553  * set_cte_pathlist
2554  * Build the (single) access path for a non-self-reference CTE RTE
2555  *
2556  * There's no need for a separate set_cte_size phase, since we don't
2557  * support join-qual-parameterized paths for CTEs.
2558  */
2559 static void
2561 {
2562  Plan *cteplan;
2563  PlannerInfo *cteroot;
2564  Index levelsup;
2565  int ndx;
2566  ListCell *lc;
2567  int plan_id;
2568  Relids required_outer;
2569 
2570  /*
2571  * Find the referenced CTE, and locate the plan previously made for it.
2572  */
2573  levelsup = rte->ctelevelsup;
2574  cteroot = root;
2575  while (levelsup-- > 0)
2576  {
2577  cteroot = cteroot->parent_root;
2578  if (!cteroot) /* shouldn't happen */
2579  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
2580  }
2581 
2582  /*
2583  * Note: cte_plan_ids can be shorter than cteList, if we are still working
2584  * on planning the CTEs (ie, this is a side-reference from another CTE).
2585  * So we mustn't use forboth here.
2586  */
2587  ndx = 0;
2588  foreach(lc, cteroot->parse->cteList)
2589  {
2590  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
2591 
2592  if (strcmp(cte->ctename, rte->ctename) == 0)
2593  break;
2594  ndx++;
2595  }
2596  if (lc == NULL) /* shouldn't happen */
2597  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
2598  if (ndx >= list_length(cteroot->cte_plan_ids))
2599  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
2600  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
2601  Assert(plan_id > 0);
2602  cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
2603 
2604  /* Mark rel with estimated output rows, width, etc */
2605  set_cte_size_estimates(root, rel, cteplan->plan_rows);
2606 
2607  /*
2608  * We don't support pushing join clauses into the quals of a CTE scan, but
2609  * it could still have required parameterization due to LATERAL refs in
2610  * its tlist.
2611  */
2612  required_outer = rel->lateral_relids;
2613 
2614  /* Generate appropriate path */
2615  add_path(rel, create_ctescan_path(root, rel, required_outer));
2616 }
2617 
2618 /*
2619  * set_namedtuplestore_pathlist
2620  * Build the (single) access path for a named tuplestore RTE
2621  *
2622  * There's no need for a separate set_namedtuplestore_size phase, since we
2623  * don't support join-qual-parameterized paths for tuplestores.
2624  */
2625 static void
2627  RangeTblEntry *rte)
2628 {
2629  Relids required_outer;
2630 
2631  /* Mark rel with estimated output rows, width, etc */
2633 
2634  /*
2635  * We don't support pushing join clauses into the quals of a tuplestore
2636  * scan, but it could still have required parameterization due to LATERAL
2637  * refs in its tlist.
2638  */
2639  required_outer = rel->lateral_relids;
2640 
2641  /* Generate appropriate path */
2642  add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
2643 
2644  /* Select cheapest path (pretty easy in this case...) */
2645  set_cheapest(rel);
2646 }
2647 
2648 /*
2649  * set_result_pathlist
2650  * Build the (single) access path for an RTE_RESULT RTE
2651  *
2652  * There's no need for a separate set_result_size phase, since we
2653  * don't support join-qual-parameterized paths for these RTEs.
2654  */
2655 static void
2657  RangeTblEntry *rte)
2658 {
2659  Relids required_outer;
2660 
2661  /* Mark rel with estimated output rows, width, etc */
2662  set_result_size_estimates(root, rel);
2663 
2664  /*
2665  * We don't support pushing join clauses into the quals of a Result scan,
2666  * but it could still have required parameterization due to LATERAL refs
2667  * in its tlist.
2668  */
2669  required_outer = rel->lateral_relids;
2670 
2671  /* Generate appropriate path */
2672  add_path(rel, create_resultscan_path(root, rel, required_outer));
2673 
2674  /* Select cheapest path (pretty easy in this case...) */
2675  set_cheapest(rel);
2676 }
2677 
2678 /*
2679  * set_worktable_pathlist
2680  * Build the (single) access path for a self-reference CTE RTE
2681  *
2682  * There's no need for a separate set_worktable_size phase, since we don't
2683  * support join-qual-parameterized paths for CTEs.
2684  */
2685 static void
2687 {
2688  Path *ctepath;
2689  PlannerInfo *cteroot;
2690  Index levelsup;
2691  Relids required_outer;
2692 
2693  /*
2694  * We need to find the non-recursive term's path, which is in the plan
2695  * level that's processing the recursive UNION, which is one level *below*
2696  * where the CTE comes from.
2697  */
2698  levelsup = rte->ctelevelsup;
2699  if (levelsup == 0) /* shouldn't happen */
2700  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
2701  levelsup--;
2702  cteroot = root;
2703  while (levelsup-- > 0)
2704  {
2705  cteroot = cteroot->parent_root;
2706  if (!cteroot) /* shouldn't happen */
2707  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
2708  }
2709  ctepath = cteroot->non_recursive_path;
2710  if (!ctepath) /* shouldn't happen */
2711  elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
2712 
2713  /* Mark rel with estimated output rows, width, etc */
2714  set_cte_size_estimates(root, rel, ctepath->rows);
2715 
2716  /*
2717  * We don't support pushing join clauses into the quals of a worktable
2718  * scan, but it could still have required parameterization due to LATERAL
2719  * refs in its tlist. (I'm not sure this is actually possible given the
2720  * restrictions on recursive references, but it's easy enough to support.)
2721  */
2722  required_outer = rel->lateral_relids;
2723 
2724  /* Generate appropriate path */
2725  add_path(rel, create_worktablescan_path(root, rel, required_outer));
2726 }
2727 
2728 /*
2729  * generate_gather_paths
2730  * Generate parallel access paths for a relation by pushing a Gather or
2731  * Gather Merge on top of a partial path.
2732  *
2733  * This must not be called until after we're done creating all partial paths
2734  * for the specified relation. (Otherwise, add_partial_path might delete a
2735  * path that some GatherPath or GatherMergePath has a reference to.)
2736  *
2737  * If we're generating paths for a scan or join relation, override_rows will
2738  * be false, and we'll just use the relation's size estimate. When we're
2739  * being called for a partially-grouped path, though, we need to override
2740  * the rowcount estimate. (It's not clear that the particular value we're
2741  * using here is actually best, but the underlying rel has no estimate so
2742  * we must do something.)
2743  */
2744 void
2745 generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
2746 {
2747  Path *cheapest_partial_path;
2748  Path *simple_gather_path;
2749  ListCell *lc;
2750  double rows;
2751  double *rowsp = NULL;
2752 
2753  /* If there are no partial paths, there's nothing to do here. */
2754  if (rel->partial_pathlist == NIL)
2755  return;
2756 
2757  /* Should we override the rel's rowcount estimate? */
2758  if (override_rows)
2759  rowsp = &rows;
2760 
2761  /*
2762  * The output of Gather is always unsorted, so there's only one partial
2763  * path of interest: the cheapest one. That will be the one at the front
2764  * of partial_pathlist because of the way add_partial_path works.
2765  */
2766  cheapest_partial_path = linitial(rel->partial_pathlist);
2767  rows =
2768  cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
2769  simple_gather_path = (Path *)
2770  create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
2771  NULL, rowsp);
2772  add_path(rel, simple_gather_path);
2773 
2774  /*
2775  * For each useful ordering, we can consider an order-preserving Gather
2776  * Merge.
2777  */
2778  foreach(lc, rel->partial_pathlist)
2779  {
2780  Path *subpath = (Path *) lfirst(lc);
2781  GatherMergePath *path;
2782 
2783  if (subpath->pathkeys == NIL)
2784  continue;
2785 
2786  rows = subpath->rows * subpath->parallel_workers;
2787  path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
2788  subpath->pathkeys, NULL, rowsp);
2789  add_path(rel, &path->path);
2790  }
2791 }
2792 
2793 /*
2794  * get_useful_pathkeys_for_relation
2795  * Determine which orderings of a relation might be useful.
2796  *
2797  * Getting data in sorted order can be useful either because the requested
2798  * order matches the final output ordering for the overall query we're
2799  * planning, or because it enables an efficient merge join. Here, we try
2800  * to figure out which pathkeys to consider.
2801  *
2802  * This allows us to do incremental sort on top of an index scan under a gather
2803  * merge node, i.e. parallelized.
2804  *
2805  * XXX At the moment this can only ever return a list with a single element,
2806  * because it looks at query_pathkeys only. So we might return the pathkeys
2807  * directly, but it seems plausible we'll want to consider other orderings
2808  * in the future. For example, we might want to consider pathkeys useful for
2809  * merge joins.
2810  */
2811 static List *
2813 {
2814  List *useful_pathkeys_list = NIL;
2815 
2816  /*
2817  * Considering query_pathkeys is always worth it, because it might allow
2818  * us to avoid a total sort when we have a partially presorted path
2819  * available or to push the total sort into the parallel portion of the
2820  * query.
2821  */
2822  if (root->query_pathkeys)
2823  {
2824  ListCell *lc;
2825  int npathkeys = 0; /* useful pathkeys */
2826 
2827  foreach(lc, root->query_pathkeys)
2828  {
2829  PathKey *pathkey = (PathKey *) lfirst(lc);
2830  EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
2831 
2832  /*
2833  * We can only build a sort for pathkeys which contain an EC
2834  * member in the current relation's target, so ignore any suffix
2835  * of the list as soon as we find a pathkey without an EC member
2836  * in the relation.
2837  *
2838  * By still returning the prefix of the pathkeys list that does
2839  * meet criteria of EC membership in the current relation, we
2840  * enable not just an incremental sort on the entirety of
2841  * query_pathkeys but also incremental sort below a JOIN.
2842  */
2843  if (!find_em_expr_usable_for_sorting_rel(pathkey_ec, rel))
2844  break;
2845 
2846  npathkeys++;
2847  }
2848 
2849  /*
2850  * The whole query_pathkeys list matches, so append it directly, to
2851  * allow comparing pathkeys easily by comparing list pointer. If we
2852  * have to truncate the pathkeys, we gotta do a copy though.
2853  */
2854  if (npathkeys == list_length(root->query_pathkeys))
2855  useful_pathkeys_list = lappend(useful_pathkeys_list,
2856  root->query_pathkeys);
2857  else if (npathkeys > 0)
2858  useful_pathkeys_list = lappend(useful_pathkeys_list,
2860  npathkeys));
2861  }
2862 
2863  return useful_pathkeys_list;
2864 }
2865 
2866 /*
2867  * generate_useful_gather_paths
2868  * Generate parallel access paths for a relation by pushing a Gather or
2869  * Gather Merge on top of a partial path.
2870  *
2871  * Unlike plain generate_gather_paths, this looks both at pathkeys of input
2872  * paths (aiming to preserve the ordering), but also considers ordering that
2873  * might be useful for nodes above the gather merge node, and tries to add
2874  * a sort (regular or incremental) to provide that.
2875  */
2876 void
2877 generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
2878 {
2879  ListCell *lc;
2880  double rows;
2881  double *rowsp = NULL;
2882  List *useful_pathkeys_list = NIL;
2883  Path *cheapest_partial_path = NULL;
2884 
2885  /* If there are no partial paths, there's nothing to do here. */
2886  if (rel->partial_pathlist == NIL)
2887  return;
2888 
2889  /* Should we override the rel's rowcount estimate? */
2890  if (override_rows)
2891  rowsp = &rows;
2892 
2893  /* generate the regular gather (merge) paths */
2894  generate_gather_paths(root, rel, override_rows);
2895 
2896  /* consider incremental sort for interesting orderings */
2897  useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel);
2898 
2899  /* used for explicit (full) sort paths */
2900  cheapest_partial_path = linitial(rel->partial_pathlist);
2901 
2902  /*
2903  * Consider incremental sort paths for each interesting ordering.
2904  */
2905  foreach(lc, useful_pathkeys_list)
2906  {
2907  List *useful_pathkeys = lfirst(lc);
2908  ListCell *lc2;
2909  bool is_sorted;
2910  int presorted_keys;
2911 
2912  foreach(lc2, rel->partial_pathlist)
2913  {
2914  Path *subpath = (Path *) lfirst(lc2);
2915  GatherMergePath *path;
2916 
2917  /*
2918  * If the path has no ordering at all, then we can't use either
2919  * incremental sort or rely on implicit sorting with a gather
2920  * merge.
2921  */
2922  if (subpath->pathkeys == NIL)
2923  continue;
2924 
2925  is_sorted = pathkeys_count_contained_in(useful_pathkeys,
2926  subpath->pathkeys,
2927  &presorted_keys);
2928 
2929  /*
2930  * We don't need to consider the case where a subpath is already
2931  * fully sorted because generate_gather_paths already creates a
2932  * gather merge path for every subpath that has pathkeys present.
2933  *
2934  * But since the subpath is already sorted, we know we don't need
2935  * to consider adding a sort (other either kind) on top of it, so
2936  * we can continue here.
2937  */
2938  if (is_sorted)
2939  continue;
2940 
2941  /*
2942  * Consider regular sort for the cheapest partial path (for each
2943  * useful pathkeys). We know the path is not sorted, because we'd
2944  * not get here otherwise.
2945  *
2946  * This is not redundant with the gather paths created in
2947  * generate_gather_paths, because that doesn't generate ordered
2948  * output. Here we add an explicit sort to match the useful
2949  * ordering.
2950  */
2951  if (cheapest_partial_path == subpath)
2952  {
2953  Path *tmp;
2954 
2955  tmp = (Path *) create_sort_path(root,
2956  rel,
2957  subpath,
2958  useful_pathkeys,
2959  -1.0);
2960 
2961  rows = tmp->rows * tmp->parallel_workers;
2962 
2963  path = create_gather_merge_path(root, rel,
2964  tmp,
2965  rel->reltarget,
2966  tmp->pathkeys,
2967  NULL,
2968  rowsp);
2969 
2970  add_path(rel, &path->path);
2971 
2972  /* Fall through */
2973  }
2974 
2975  /*
2976  * Consider incremental sort, but only when the subpath is already
2977  * partially sorted on a pathkey prefix.
2978  */
2979  if (enable_incremental_sort && presorted_keys > 0)
2980  {
2981  Path *tmp;
2982 
2983  /*
2984  * We should have already excluded pathkeys of length 1
2985  * because then presorted_keys > 0 would imply is_sorted was
2986  * true.
2987  */
2988  Assert(list_length(useful_pathkeys) != 1);
2989 
2990  tmp = (Path *) create_incremental_sort_path(root,
2991  rel,
2992  subpath,
2993  useful_pathkeys,
2994  presorted_keys,
2995  -1);
2996 
2997  path = create_gather_merge_path(root, rel,
2998  tmp,
2999  rel->reltarget,
3000  tmp->pathkeys,
3001  NULL,
3002  rowsp);
3003 
3004  add_path(rel, &path->path);
3005  }
3006  }
3007  }
3008 }
3009 
3010 /*
3011  * make_rel_from_joinlist
3012  * Build access paths using a "joinlist" to guide the join path search.
3013  *
3014  * See comments for deconstruct_jointree() for definition of the joinlist
3015  * data structure.
3016  */
3017 static RelOptInfo *
3019 {
3020  int levels_needed;
3021  List *initial_rels;
3022  ListCell *jl;
3023 
3024  /*
3025  * Count the number of child joinlist nodes. This is the depth of the
3026  * dynamic-programming algorithm we must employ to consider all ways of
3027  * joining the child nodes.
3028  */
3029  levels_needed = list_length(joinlist);
3030 
3031  if (levels_needed <= 0)
3032  return NULL; /* nothing to do? */
3033 
3034  /*
3035  * Construct a list of rels corresponding to the child joinlist nodes.
3036  * This may contain both base rels and rels constructed according to
3037  * sub-joinlists.
3038  */
3039  initial_rels = NIL;
3040  foreach(jl, joinlist)
3041  {
3042  Node *jlnode = (Node *) lfirst(jl);
3043  RelOptInfo *thisrel;
3044 
3045  if (IsA(jlnode, RangeTblRef))
3046  {
3047  int varno = ((RangeTblRef *) jlnode)->rtindex;
3048 
3049  thisrel = find_base_rel(root, varno);
3050  }
3051  else if (IsA(jlnode, List))
3052  {
3053  /* Recurse to handle subproblem */
3054  thisrel = make_rel_from_joinlist(root, (List *) jlnode);
3055  }
3056  else
3057  {
3058  elog(ERROR, "unrecognized joinlist node type: %d",
3059  (int) nodeTag(jlnode));
3060  thisrel = NULL; /* keep compiler quiet */
3061  }
3062 
3063  initial_rels = lappend(initial_rels, thisrel);
3064  }
3065 
3066  if (levels_needed == 1)
3067  {
3068  /*
3069  * Single joinlist node, so we're done.
3070  */
3071  return (RelOptInfo *) linitial(initial_rels);
3072  }
3073  else
3074  {
3075  /*
3076  * Consider the different orders in which we could join the rels,
3077  * using a plugin, GEQO, or the regular join search code.
3078  *
3079  * We put the initial_rels list into a PlannerInfo field because
3080  * has_legal_joinclause() needs to look at it (ugly :-().
3081  */
3082  root->initial_rels = initial_rels;
3083 
3084  if (join_search_hook)
3085  return (*join_search_hook) (root, levels_needed, initial_rels);
3086  else if (enable_geqo && levels_needed >= geqo_threshold)
3087  return geqo(root, levels_needed, initial_rels);
3088  else
3089  return standard_join_search(root, levels_needed, initial_rels);
3090  }
3091 }
3092 
3093 /*
3094  * standard_join_search
3095  * Find possible joinpaths for a query by successively finding ways
3096  * to join component relations into join relations.
3097  *
3098  * 'levels_needed' is the number of iterations needed, ie, the number of
3099  * independent jointree items in the query. This is > 1.
3100  *
3101  * 'initial_rels' is a list of RelOptInfo nodes for each independent
3102  * jointree item. These are the components to be joined together.
3103  * Note that levels_needed == list_length(initial_rels).
3104  *
3105  * Returns the final level of join relations, i.e., the relation that is
3106  * the result of joining all the original relations together.
3107  * At least one implementation path must be provided for this relation and
3108  * all required sub-relations.
3109  *
3110  * To support loadable plugins that modify planner behavior by changing the
3111  * join searching algorithm, we provide a hook variable that lets a plugin
3112  * replace or supplement this function. Any such hook must return the same
3113  * final join relation as the standard code would, but it might have a
3114  * different set of implementation paths attached, and only the sub-joinrels
3115  * needed for these paths need have been instantiated.
3116  *
3117  * Note to plugin authors: the functions invoked during standard_join_search()
3118  * modify root->join_rel_list and root->join_rel_hash. If you want to do more
3119  * than one join-order search, you'll probably need to save and restore the
3120  * original states of those data structures. See geqo_eval() for an example.
3121  */
3122 RelOptInfo *
3123 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
3124 {
3125  int lev;
3126  RelOptInfo *rel;
3127 
3128  /*
3129  * This function cannot be invoked recursively within any one planning
3130  * problem, so join_rel_level[] can't be in use already.
3131  */
3132  Assert(root->join_rel_level == NULL);
3133 
3134  /*
3135  * We employ a simple "dynamic programming" algorithm: we first find all
3136  * ways to build joins of two jointree items, then all ways to build joins
3137  * of three items (from two-item joins and single items), then four-item
3138  * joins, and so on until we have considered all ways to join all the
3139  * items into one rel.
3140  *
3141  * root->join_rel_level[j] is a list of all the j-item rels. Initially we
3142  * set root->join_rel_level[1] to represent all the single-jointree-item
3143  * relations.
3144  */
3145  root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
3146 
3147  root->join_rel_level[1] = initial_rels;
3148 
3149  for (lev = 2; lev <= levels_needed; lev++)
3150  {
3151  ListCell *lc;
3152 
3153  /*
3154  * Determine all possible pairs of relations to be joined at this
3155  * level, and build paths for making each one from every available
3156  * pair of lower-level relations.
3157  */
3158  join_search_one_level(root, lev);
3159 
3160  /*
3161  * Run generate_partitionwise_join_paths() and generate_gather_paths()
3162  * for each just-processed joinrel. We could not do this earlier
3163  * because both regular and partial paths can get added to a
3164  * particular joinrel at multiple times within join_search_one_level.
3165  *
3166  * After that, we're done creating paths for the joinrel, so run
3167  * set_cheapest().
3168  */
3169  foreach(lc, root->join_rel_level[lev])
3170  {
3171  rel = (RelOptInfo *) lfirst(lc);
3172 
3173  /* Create paths for partitionwise joins. */
3175 
3176  /*
3177  * Except for the topmost scan/join rel, consider gathering
3178  * partial paths. We'll do the same for the topmost scan/join rel
3179  * once we know the final targetlist (see grouping_planner).
3180  */
3181  if (lev < levels_needed)
3182  generate_useful_gather_paths(root, rel, false);
3183 
3184  /* Find and save the cheapest paths for this rel */
3185  set_cheapest(rel);
3186 
3187 #ifdef OPTIMIZER_DEBUG
3188  debug_print_rel(root, rel);
3189 #endif
3190  }
3191  }
3192 
3193  /*
3194  * We should have a single rel at the final level.
3195  */
3196  if (root->join_rel_level[levels_needed] == NIL)
3197  elog(ERROR, "failed to build any %d-way joins", levels_needed);
3198  Assert(list_length(root->join_rel_level[levels_needed]) == 1);
3199 
3200  rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
3201 
3202  root->join_rel_level = NULL;
3203 
3204  return rel;
3205 }
3206 
3207 /*****************************************************************************
3208  * PUSHING QUALS DOWN INTO SUBQUERIES
3209  *****************************************************************************/
3210 
3211 /*
3212  * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
3213  *
3214  * subquery is the particular component query being checked. topquery
3215  * is the top component of a set-operations tree (the same Query if no
3216  * set-op is involved).
3217  *
3218  * Conditions checked here:
3219  *
3220  * 1. If the subquery has a LIMIT clause, we must not push down any quals,
3221  * since that could change the set of rows returned.
3222  *
3223  * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
3224  * quals into it, because that could change the results.
3225  *
3226  * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
3227  * This is because upper-level quals should semantically be evaluated only
3228  * once per distinct row, not once per original row, and if the qual is
3229  * volatile then extra evaluations could change the results. (This issue
3230  * does not apply to other forms of aggregation such as GROUP BY, because
3231  * when those are present we push into HAVING not WHERE, so that the quals
3232  * are still applied after aggregation.)
3233  *
3234  * 4. If the subquery contains window functions, we cannot push volatile quals
3235  * into it. The issue here is a bit different from DISTINCT: a volatile qual
3236  * might succeed for some rows of a window partition and fail for others,
3237  * thereby changing the partition contents and thus the window functions'
3238  * results for rows that remain.
3239  *
3240  * 5. If the subquery contains any set-returning functions in its targetlist,
3241  * we cannot push volatile quals into it. That would push them below the SRFs
3242  * and thereby change the number of times they are evaluated. Also, a
3243  * volatile qual could succeed for some SRF output rows and fail for others,
3244  * a behavior that cannot occur if it's evaluated before SRF expansion.
3245  *
3246  * 6. If the subquery has nonempty grouping sets, we cannot push down any
3247  * quals. The concern here is that a qual referencing a "constant" grouping
3248  * column could get constant-folded, which would be improper because the value
3249  * is potentially nullable by grouping-set expansion. This restriction could
3250  * be removed if we had a parsetree representation that shows that such
3251  * grouping columns are not really constant. (There are other ideas that
3252  * could be used to relax this restriction, but that's the approach most
3253  * likely to get taken in the future. Note that there's not much to be gained
3254  * so long as subquery_planner can't move HAVING clauses to WHERE within such
3255  * a subquery.)
3256  *
3257  * In addition, we make several checks on the subquery's output columns to see
3258  * if it is safe to reference them in pushed-down quals. If output column k
3259  * is found to be unsafe to reference, we set safetyInfo->unsafeColumns[k]
3260  * to true, but we don't reject the subquery overall since column k might not
3261  * be referenced by some/all quals. The unsafeColumns[] array will be
3262  * consulted later by qual_is_pushdown_safe(). It's better to do it this way
3263  * than to make the checks directly in qual_is_pushdown_safe(), because when
3264  * the subquery involves set operations we have to check the output
3265  * expressions in each arm of the set op.
3266  *
3267  * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
3268  * we're effectively assuming that the quals cannot distinguish values that
3269  * the DISTINCT's equality operator sees as equal, yet there are many
3270  * counterexamples to that assumption. However use of such a qual with a
3271  * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
3272  * "equal" value will be chosen as the output value by the DISTINCT operation.
3273  * So we don't worry too much about that. Another objection is that if the
3274  * qual is expensive to evaluate, running it for each original row might cost
3275  * more than we save by eliminating rows before the DISTINCT step. But it
3276  * would be very hard to estimate that at this stage, and in practice pushdown
3277  * seldom seems to make things worse, so we ignore that problem too.
3278  *
3279  * Note: likewise, pushing quals into a subquery with window functions is a
3280  * bit dubious: the quals might remove some rows of a window partition while
3281  * leaving others, causing changes in the window functions' results for the
3282  * surviving rows. We insist that such a qual reference only partitioning
3283  * columns, but again that only protects us if the qual does not distinguish
3284  * values that the partitioning equality operator sees as equal. The risks
3285  * here are perhaps larger than for DISTINCT, since no de-duplication of rows
3286  * occurs and thus there is no theoretical problem with such a qual. But
3287  * we'll do this anyway because the potential performance benefits are very
3288  * large, and we've seen no field complaints about the longstanding comparable
3289  * behavior with DISTINCT.
3290  */
3291 static bool
3293  pushdown_safety_info *safetyInfo)
3294 {
3295  SetOperationStmt *topop;
3296 
3297  /* Check point 1 */
3298  if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
3299  return false;
3300 
3301  /* Check point 6 */
3302  if (subquery->groupClause && subquery->groupingSets)
3303  return false;
3304 
3305  /* Check points 3, 4, and 5 */
3306  if (subquery->distinctClause ||
3307  subquery->hasWindowFuncs ||
3308  subquery->hasTargetSRFs)
3309  safetyInfo->unsafeVolatile = true;
3310 
3311  /*
3312  * If we're at a leaf query, check for unsafe expressions in its target
3313  * list, and mark any unsafe ones in unsafeColumns[]. (Non-leaf nodes in
3314  * setop trees have only simple Vars in their tlists, so no need to check
3315  * them.)
3316  */
3317  if (subquery->setOperations == NULL)
3318  check_output_expressions(subquery, safetyInfo);
3319 
3320  /* Are we at top level, or looking at a setop component? */
3321  if (subquery == topquery)
3322  {
3323  /* Top level, so check any component queries */
3324  if (subquery->setOperations != NULL)
3325  if (!recurse_pushdown_safe(subquery->setOperations, topquery,
3326  safetyInfo))
3327  return false;
3328  }
3329  else
3330  {
3331  /* Setop component must not have more components (too weird) */
3332  if (subquery->setOperations != NULL)
3333  return false;
3334  /* Check whether setop component output types match top level */
3335  topop = castNode(SetOperationStmt, topquery->setOperations);
3336  Assert(topop);
3338  topop->colTypes,
3339  safetyInfo);
3340  }
3341  return true;
3342 }
3343 
3344 /*
3345  * Helper routine to recurse through setOperations tree
3346  */
3347 static bool
3349  pushdown_safety_info *safetyInfo)
3350 {
3351  if (IsA(setOp, RangeTblRef))
3352  {
3353  RangeTblRef *rtr = (RangeTblRef *) setOp;
3354  RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
3355  Query *subquery = rte->subquery;
3356 
3357  Assert(subquery != NULL);
3358  return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
3359  }
3360  else if (IsA(setOp, SetOperationStmt))
3361  {
3362  SetOperationStmt *op = (SetOperationStmt *) setOp;
3363 
3364  /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
3365  if (op->op == SETOP_EXCEPT)
3366  return false;
3367  /* Else recurse */
3368  if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
3369  return false;
3370  if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
3371  return false;
3372  }
3373  else
3374  {
3375  elog(ERROR, "unrecognized node type: %d",
3376  (int) nodeTag(setOp));
3377  }
3378  return true;
3379 }
3380 
3381 /*
3382  * check_output_expressions - check subquery's output expressions for safety
3383  *
3384  * There are several cases in which it's unsafe to push down an upper-level
3385  * qual if it references a particular output column of a subquery. We check
3386  * each output column of the subquery and set unsafeColumns[k] to true if
3387  * that column is unsafe for a pushed-down qual to reference. The conditions
3388  * checked here are:
3389  *
3390  * 1. We must not push down any quals that refer to subselect outputs that
3391  * return sets, else we'd introduce functions-returning-sets into the
3392  * subquery's WHERE/HAVING quals.
3393  *
3394  * 2. We must not push down any quals that refer to subselect outputs that
3395  * contain volatile functions, for fear of introducing strange results due
3396  * to multiple evaluation of a volatile function.
3397  *
3398  * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
3399  * refer to non-DISTINCT output columns, because that could change the set
3400  * of rows returned. (This condition is vacuous for DISTINCT, because then
3401  * there are no non-DISTINCT output columns, so we needn't check. Note that
3402  * subquery_is_pushdown_safe already reported that we can't use volatile
3403  * quals if there's DISTINCT or DISTINCT ON.)
3404  *
3405  * 4. If the subquery has any window functions, we must not push down quals
3406  * that reference any output columns that are not listed in all the subquery's
3407  * window PARTITION BY clauses. We can push down quals that use only
3408  * partitioning columns because they should succeed or fail identically for
3409  * every row of any one window partition, and totally excluding some
3410  * partitions will not change a window function's results for remaining
3411  * partitions. (Again, this also requires nonvolatile quals, but
3412  * subquery_is_pushdown_safe handles that.)
3413  */
3414 static void
3416 {
3417  ListCell *lc;
3418 
3419  foreach(lc, subquery->targetList)
3420  {
3421  TargetEntry *tle = (TargetEntry *) lfirst(lc);
3422 
3423  if (tle->resjunk)
3424  continue; /* ignore resjunk columns */
3425 
3426  /* We need not check further if output col is already known unsafe */
3427  if (safetyInfo->unsafeColumns[tle->resno])
3428  continue;
3429 
3430  /* Functions returning sets are unsafe (point 1) */
3431  if (subquery->hasTargetSRFs &&
3432  expression_returns_set((Node *) tle->expr))
3433  {
3434  safetyInfo->unsafeColumns[tle->resno] = true;
3435  continue;
3436  }
3437 
3438  /* Volatile functions are unsafe (point 2) */
3439  if (contain_volatile_functions((Node *) tle->expr))
3440  {
3441  safetyInfo->unsafeColumns[tle->resno] = true;
3442  continue;
3443  }
3444 
3445  /* If subquery uses DISTINCT ON, check point 3 */
3446  if (subquery->hasDistinctOn &&
3447  !targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
3448  {
3449  /* non-DISTINCT column, so mark it unsafe */
3450  safetyInfo->unsafeColumns[tle->resno] = true;
3451  continue;
3452  }
3453 
3454  /* If subquery uses window functions, check point 4 */
3455  if (subquery->hasWindowFuncs &&
3456  !targetIsInAllPartitionLists(tle, subquery))
3457  {
3458  /* not present in all PARTITION BY clauses, so mark it unsafe */
3459  safetyInfo->unsafeColumns[tle->resno] = true;
3460  continue;
3461  }
3462  }
3463 }
3464 
3465 /*
3466  * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
3467  * push quals into each component query, but the quals can only reference
3468  * subquery columns that suffer no type coercions in the set operation.
3469  * Otherwise there are possible semantic gotchas. So, we check the
3470  * component queries to see if any of them have output types different from
3471  * the top-level setop outputs. unsafeColumns[k] is set true if column k
3472  * has different type in any component.
3473  *
3474  * We don't have to care about typmods here: the only allowed difference
3475  * between set-op input and output typmods is input is a specific typmod
3476  * and output is -1, and that does not require a coercion.
3477  *
3478  * tlist is a subquery tlist.
3479  * colTypes is an OID list of the top-level setop's output column types.
3480  * safetyInfo->unsafeColumns[] is the result array.
3481  */
3482 static void
3484  pushdown_safety_info *safetyInfo)
3485 {
3486  ListCell *l;
3487  ListCell *colType = list_head(colTypes);
3488 
3489  foreach(l, tlist)
3490  {
3491  TargetEntry *tle = (TargetEntry *) lfirst(l);
3492 
3493  if (tle->resjunk)
3494  continue; /* ignore resjunk columns */
3495  if (colType == NULL)
3496  elog(ERROR, "wrong number of tlist entries");
3497  if (exprType((Node *) tle->expr) != lfirst_oid(colType))
3498  safetyInfo->unsafeColumns[tle->resno] = true;
3499  colType = lnext(colTypes, colType);
3500  }
3501  if (colType != NULL)
3502  elog(ERROR, "wrong number of tlist entries");
3503 }
3504 
3505 /*
3506  * targetIsInAllPartitionLists
3507  * True if the TargetEntry is listed in the PARTITION BY clause
3508  * of every window defined in the query.
3509  *
3510  * It would be safe to ignore windows not actually used by any window
3511  * function, but it's not easy to get that info at this stage; and it's
3512  * unlikely to be useful to spend any extra cycles getting it, since
3513  * unreferenced window definitions are probably infrequent in practice.
3514  */
3515 static bool
3517 {
3518  ListCell *lc;
3519 
3520  foreach(lc, query->windowClause)
3521  {
3522  WindowClause *wc = (WindowClause *) lfirst(lc);
3523 
3525  return false;
3526  }
3527  return true;
3528 }
3529 
3530 /*
3531  * qual_is_pushdown_safe - is a particular qual safe to push down?
3532  *
3533  * qual is a restriction clause applying to the given subquery (whose RTE
3534  * has index rti in the parent query).
3535  *
3536  * Conditions checked here:
3537  *
3538  * 1. The qual must not contain any SubPlans (mainly because I'm not sure
3539  * it will work correctly: SubLinks will already have been transformed into
3540  * SubPlans in the qual, but not in the subquery). Note that SubLinks that
3541  * transform to initplans are safe, and will be accepted here because what
3542  * we'll see in the qual is just a Param referencing the initplan output.
3543  *
3544  * 2. If unsafeVolatile is set, the qual must not contain any volatile
3545  * functions.
3546  *
3547  * 3. If unsafeLeaky is set, the qual must not contain any leaky functions
3548  * that are passed Var nodes, and therefore might reveal values from the
3549  * subquery as side effects.
3550  *
3551  * 4. The qual must not refer to the whole-row output of the subquery
3552  * (since there is no easy way to name that within the subquery itself).
3553  *
3554  * 5. The qual must not refer to any subquery output columns that were
3555  * found to be unsafe to reference by subquery_is_pushdown_safe().
3556  */
3557 static bool
3558 qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
3559  pushdown_safety_info *safetyInfo)
3560 {
3561  bool safe = true;
3562  List *vars;
3563  ListCell *vl;
3564 
3565  /* Refuse subselects (point 1) */
3566  if (contain_subplans(qual))
3567  return false;
3568 
3569  /* Refuse volatile quals if we found they'd be unsafe (point 2) */
3570  if (safetyInfo->unsafeVolatile &&
3572  return false;
3573 
3574  /* Refuse leaky quals if told to (point 3) */
3575  if (safetyInfo->unsafeLeaky &&
3576  contain_leaked_vars(qual))
3577  return false;
3578 
3579  /*
3580  * It would be unsafe to push down window function calls, but at least for
3581  * the moment we could never see any in a qual anyhow. (The same applies
3582  * to aggregates, which we check for in pull_var_clause below.)
3583  */
3585 
3586  /*
3587  * Examine all Vars used in clause. Since it's a restriction clause, all
3588  * such Vars must refer to subselect output columns ... unless this is
3589  * part of a LATERAL subquery, in which case there could be lateral
3590  * references.
3591  */
3593  foreach(vl, vars)
3594  {
3595  Var *var = (Var *) lfirst(vl);
3596 
3597  /*
3598  * XXX Punt if we find any PlaceHolderVars in the restriction clause.
3599  * It's not clear whether a PHV could safely be pushed down, and even
3600  * less clear whether such a situation could arise in any cases of
3601  * practical interest anyway. So for the moment, just refuse to push
3602  * down.
3603  */
3604  if (!IsA(var, Var))
3605  {
3606  safe = false;
3607  break;
3608  }
3609 
3610  /*
3611  * Punt if we find any lateral references. It would be safe to push
3612  * these down, but we'd have to convert them into outer references,
3613  * which subquery_push_qual lacks the infrastructure to do. The case
3614  * arises so seldom that it doesn't seem worth working hard on.
3615  */
3616  if (var->varno != rti)
3617  {
3618  safe = false;
3619  break;
3620  }
3621 
3622  /* Subqueries have no system columns */
3623  Assert(var->varattno >= 0);
3624 
3625  /* Check point 4 */
3626  if (var->varattno == 0)
3627  {
3628  safe = false;
3629  break;
3630  }
3631 
3632  /* Check point 5 */
3633  if (safetyInfo->unsafeColumns[var->varattno])
3634  {
3635  safe = false;
3636  break;
3637  }
3638  }
3639 
3640  list_free(vars);
3641 
3642  return safe;
3643 }
3644 
3645 /*
3646  * subquery_push_qual - push down a qual that we have determined is safe
3647  */
3648 static void
3649 subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
3650 {
3651  if (subquery->setOperations != NULL)
3652  {
3653  /* Recurse to push it separately to each component query */
3654  recurse_push_qual(subquery->setOperations, subquery,
3655  rte, rti, qual);
3656  }
3657  else
3658  {
3659  /*
3660  * We need to replace Vars in the qual (which must refer to outputs of
3661  * the subquery) with copies of the subquery's targetlist expressions.
3662  * Note that at this point, any uplevel Vars in the qual should have
3663  * been replaced with Params, so they need no work.
3664  *
3665  * This step also ensures that when we are pushing into a setop tree,
3666  * each component query gets its own copy of the qual.
3667  */
3668  qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
3669  subquery->targetList,
3671  &subquery->hasSubLinks);
3672 
3673  /*
3674  * Now attach the qual to the proper place: normally WHERE, but if the
3675  * subquery uses grouping or aggregation, put it in HAVING (since the
3676  * qual really refers to the group-result rows).
3677  */
3678  if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
3679  subquery->havingQual = make_and_qual(subquery->havingQual, qual);
3680  else
3681  subquery->jointree->quals =
3682  make_and_qual(subquery->jointree->quals, qual);
3683 
3684  /*
3685  * We need not change the subquery's hasAggs or hasSubLinks flags,
3686  * since we can't be pushing down any aggregates that weren't there
3687  * before, and we don't push down subselects at all.
3688  */
3689  }
3690 }
3691 
3692 /*
3693  * Helper routine to recurse through setOperations tree
3694  */
3695 static void
3696 recurse_push_qual(Node *setOp, Query *topquery,
3697  RangeTblEntry *rte, Index rti, Node *qual)
3698 {
3699  if (IsA(setOp, RangeTblRef))
3700  {
3701  RangeTblRef *rtr = (RangeTblRef *) setOp;
3702  RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
3703  Query *subquery = subrte->subquery;
3704 
3705  Assert(subquery != NULL);
3706  subquery_push_qual(subquery, rte, rti, qual);
3707  }
3708  else if (IsA(setOp, SetOperationStmt))
3709  {
3710  SetOperationStmt *op = (SetOperationStmt *) setOp;
3711 
3712  recurse_push_qual(op->larg, topquery, rte, rti, qual);
3713  recurse_push_qual(op->rarg, topquery, rte, rti, qual);
3714  }
3715  else
3716  {
3717  elog(ERROR, "unrecognized node type: %d",
3718  (int) nodeTag(setOp));
3719  }
3720 }
3721 
3722 /*****************************************************************************
3723  * SIMPLIFYING SUBQUERY TARGETLISTS
3724  *****************************************************************************/
3725 
3726 /*
3727  * remove_unused_subquery_outputs
3728  * Remove subquery targetlist items we don't need
3729  *
3730  * It's possible, even likely, that the upper query does not read all the
3731  * output columns of the subquery. We can remove any such outputs that are
3732  * not needed by the subquery itself (e.g., as sort/group columns) and do not
3733  * affect semantics otherwise (e.g., volatile functions can't be removed).
3734  * This is useful not only because we might be able to remove expensive-to-
3735  * compute expressions, but because deletion of output columns might allow
3736  * optimizations such as join removal to occur within the subquery.
3737  *
3738  * To avoid affecting column numbering in the targetlist, we don't physically
3739  * remove unused tlist entries, but rather replace their expressions with NULL
3740  * constants. This is implemented by modifying subquery->targetList.
3741  */
3742 static void
3744 {
3745  Bitmapset *attrs_used = NULL;
3746  ListCell *lc;
3747 
3748  /*
3749  * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
3750  * could update all the child SELECTs' tlists, but it seems not worth the
3751  * trouble presently.
3752  */
3753  if (subquery->setOperations)
3754  return;
3755 
3756  /*
3757  * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
3758  * time: all its output columns must be used in the distinctClause.
3759  */
3760  if (subquery->distinctClause && !subquery->hasDistinctOn)
3761  return;
3762 
3763  /*
3764  * Collect a bitmap of all the output column numbers used by the upper
3765  * query.
3766  *
3767  * Add all the attributes needed for joins or final output. Note: we must
3768  * look at rel's targetlist, not the attr_needed data, because attr_needed
3769  * isn't computed for inheritance child rels, cf set_append_rel_size().
3770  * (XXX might be worth changing that sometime.)
3771  */
3772  pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
3773 
3774  /* Add all the attributes used by un-pushed-down restriction clauses. */
3775  foreach(lc, rel->baserestrictinfo)
3776  {
3777  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3778 
3779  pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
3780  }
3781 
3782  /*
3783  * If there's a whole-row reference to the subquery, we can't remove
3784  * anything.
3785  */
3787  return;
3788 
3789  /*
3790  * Run through the tlist and zap entries we don't need. It's okay to
3791  * modify the tlist items in-place because set_subquery_pathlist made a
3792  * copy of the subquery.
3793  */
3794  foreach(lc, subquery->targetList)
3795  {
3796  TargetEntry *tle = (TargetEntry *) lfirst(lc);
3797  Node *texpr = (Node *) tle->expr;
3798 
3799  /*
3800  * If it has a sortgroupref number, it's used in some sort/group
3801  * clause so we'd better not remove it. Also, don't remove any
3802  * resjunk columns, since their reason for being has nothing to do
3803  * with anybody reading the subquery's output. (It's likely that
3804  * resjunk columns in a sub-SELECT would always have ressortgroupref
3805  * set, but even if they don't, it seems imprudent to remove them.)
3806  */
3807  if (tle->ressortgroupref || tle->resjunk)
3808  continue;
3809 
3810  /*
3811  * If it's used by the upper query, we can't remove it.
3812  */
3814  attrs_used))
3815  continue;
3816 
3817  /*
3818  * If it contains a set-returning function, we can't remove it since
3819  * that could change the number of rows returned by the subquery.
3820  */
3821  if (subquery->hasTargetSRFs &&
3822  expression_returns_set(texpr))
3823  continue;
3824 
3825  /*
3826  * If it contains volatile functions, we daren't remove it for fear
3827  * that the user is expecting their side-effects to happen.
3828  */
3829  if (contain_volatile_functions(texpr))
3830  continue;
3831 
3832  /*
3833  * OK, we don't need it. Replace the expression with a NULL constant.
3834  * Preserve the exposed type of the expression, in case something
3835  * looks at the rowtype of the subquery's result.
3836  */
3837  tle->expr = (Expr *) makeNullConst(exprType(texpr),
3838  exprTypmod(texpr),
3839  exprCollation(texpr));
3840  }
3841 }
3842 
3843 /*
3844  * create_partial_bitmap_paths
3845  * Build partial bitmap heap path for the relation
3846  */
3847 void
3849  Path *bitmapqual)
3850 {
3851  int parallel_workers;
3852  double pages_fetched;
3853 
3854  /* Compute heap pages for bitmap heap scan */
3855  pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
3856  NULL, NULL);
3857 
3858  parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
3860 
3861  if (parallel_workers <= 0)
3862  return;
3863 
3864  add_partial_path(rel, (Path *) create_bitmap_heap_path(root, rel,
3865  bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
3866 }
3867 
3868 /*
3869  * Compute the number of parallel workers that should be used to scan a
3870  * relation. We compute the parallel workers based on the size of the heap to
3871  * be scanned and the size of the index to be scanned, then choose a minimum
3872  * of those.
3873  *
3874  * "heap_pages" is the number of pages from the table that we expect to scan, or
3875  * -1 if we don't expect to scan any.
3876  *
3877  * "index_pages" is the number of pages from the index that we expect to scan, or
3878  * -1 if we don't expect to scan any.
3879  *
3880  * "max_workers" is caller's limit on the number of workers. This typically
3881  * comes from a GUC.
3882  */
3883 int
3884 compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
3885  int max_workers)
3886 {
3887  int parallel_workers = 0;
3888 
3889  /*
3890  * If the user has set the parallel_workers reloption, use that; otherwise
3891  * select a default number of workers.
3892  */
3893  if (rel->rel_parallel_workers != -1)
3894  parallel_workers = rel->rel_parallel_workers;
3895  else
3896  {
3897  /*
3898  * If the number of pages being scanned is insufficient to justify a
3899  * parallel scan, just return zero ... unless it's an inheritance
3900  * child. In that case, we want to generate a parallel path here
3901  * anyway. It might not be worthwhile just for this relation, but
3902  * when combined with all of its inheritance siblings it may well pay
3903  * off.
3904  */
3905  if (rel->reloptkind == RELOPT_BASEREL &&
3906  ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
3907  (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
3908  return 0;
3909 
3910  if (heap_pages >= 0)
3911  {
3912  int heap_parallel_threshold;
3913  int heap_parallel_workers = 1;
3914 
3915  /*
3916  * Select the number of workers based on the log of the size of
3917  * the relation. This probably needs to be a good deal more
3918  * sophisticated, but we need something here for now. Note that
3919  * the upper limit of the min_parallel_table_scan_size GUC is
3920  * chosen to prevent overflow here.
3921  */
3922  heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
3923  while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
3924  {
3925  heap_parallel_workers++;
3926  heap_parallel_threshold *= 3;
3927  if (heap_parallel_threshold > INT_MAX / 3)
3928  break; /* avoid overflow */
3929  }
3930 
3931  parallel_workers = heap_parallel_workers;
3932  }
3933 
3934  if (index_pages >= 0)
3935  {
3936  int index_parallel_workers = 1;
3937  int index_parallel_threshold;
3938 
3939  /* same calculation as for heap_pages above */
3940  index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
3941  while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
3942  {
3943  index_parallel_workers++;
3944  index_parallel_threshold *= 3;
3945  if (index_parallel_threshold > INT_MAX / 3)
3946  break; /* avoid overflow */
3947  }
3948 
3949  if (parallel_workers > 0)
3950  parallel_workers = Min(parallel_workers, index_parallel_workers);
3951  else
3952  parallel_workers = index_parallel_workers;
3953  }
3954  }
3955 
3956  /* In no case use more than caller supplied maximum number of workers */
3957  parallel_workers = Min(parallel_workers, max_workers);
3958 
3959  return parallel_workers;
3960 }
3961 
3962 /*
3963  * generate_partitionwise_join_paths
3964  * Create paths representing partitionwise join for given partitioned
3965  * join relation.
3966  *
3967  * This must not be called until after we are done adding paths for all
3968  * child-joins. Otherwise, add_path might delete a path to which some path
3969  * generated here has a reference.
3970  */
3971 void
3973 {
3974  List *live_children = NIL;
3975  int cnt_parts;
3976  int num_parts;
3977  RelOptInfo **part_rels;
3978 
3979  /* Handle only join relations here. */
3980  if (!IS_JOIN_REL(rel))
3981  return;
3982 
3983  /* We've nothing to do if the relation is not partitioned. */
3984  if (!IS_PARTITIONED_REL(rel))
3985  return;
3986 
3987  /* The relation should have consider_partitionwise_join set. */
3989 
3990  /* Guard against stack overflow due to overly deep partition hierarchy. */
3992 
3993  num_parts = rel->nparts;
3994  part_rels = rel->part_rels;
3995 
3996  /* Collect non-dummy child-joins. */
3997  for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
3998  {
3999  RelOptInfo *child_rel = part_rels[cnt_parts];
4000 
4001  /* If it's been pruned entirely, it's certainly dummy. */
4002  if (child_rel == NULL)
4003  continue;
4004 
4005  /* Add partitionwise join paths for partitioned child-joins. */
4006  generate_partitionwise_join_paths(root, child_rel);
4007 
4008  set_cheapest(child_rel);
4009 
4010  /* Dummy children will not be scanned, so ignore those. */
4011  if (IS_DUMMY_REL(child_rel))
4012  continue;
4013 
4014 #ifdef OPTIMIZER_DEBUG
4015  debug_print_rel(root, child_rel);
4016 #endif
4017 
4018  live_children = lappend(live_children, child_rel);
4019  }
4020 
4021  /* If all child-joins are dummy, parent join is also dummy. */
4022  if (!live_children)
4023  {
4024  mark_dummy_rel(rel);
4025  return;
4026  }
4027 
4028  /* Build additional paths for this rel from child-join paths. */
4029  add_paths_to_append_rel(root, rel, live_children);
4030  list_free(live_children);
4031 }
4032 
4033 
4034 /*****************************************************************************
4035  * DEBUG SUPPORT
4036  *****************************************************************************/
4037 
4038 #ifdef OPTIMIZER_DEBUG
4039 
4040 static void
4041 print_relids(PlannerInfo *root, Relids relids)
4042 {
4043  int x;
4044  bool first = true;
4045 
4046  x = -1;
4047  while ((x = bms_next_member(relids, x)) >= 0)
4048  {
4049  if (!first)
4050  printf(" ");
4051  if (x < root->simple_rel_array_size &&
4052  root->simple_rte_array[x])
4053  printf("%s", root->simple_rte_array[x]->eref->aliasname);
4054  else
4055  printf("%d", x);
4056  first = false;
4057  }
4058 }
4059 
4060 static void
4061 print_restrictclauses(PlannerInfo *root, List *clauses)
4062 {
4063  ListCell *l;
4064 
4065  foreach(l, clauses)
4066  {
4067  RestrictInfo *c = lfirst(l);
4068 
4069  print_expr((Node *) c->clause, root->parse->rtable);
4070  if (lnext(clauses, l))
4071  printf(", ");
4072  }
4073 }
4074 
4075 static void
4076 print_path(PlannerInfo *root, Path *path, int indent)
4077 {
4078  const char *ptype;
4079  bool join = false;
4080  Path *subpath = NULL;
4081  int i;
4082 
4083  switch (nodeTag(path))
4084  {
4085  case T_Path:
4086  switch (path->pathtype)
4087  {
4088  case T_SeqScan:
4089  ptype = "SeqScan";
4090  break;
4091  case T_SampleScan:
4092  ptype = "SampleScan";
4093  break;
4094  case T_FunctionScan:
4095  ptype = "FunctionScan";
4096  break;
4097  case T_TableFuncScan:
4098  ptype = "TableFuncScan";
4099  break;
4100  case T_ValuesScan:
4101  ptype = "ValuesScan";
4102  break;
4103  case T_CteScan:
4104  ptype = "CteScan";
4105  break;
4106  case T_NamedTuplestoreScan:
4107  ptype = "NamedTuplestoreScan";
4108  break;
4109  case T_Result:
4110  ptype = "Result";
4111  break;
4112  case T_WorkTableScan:
4113  ptype = "WorkTableScan";
4114  break;
4115  default:
4116  ptype = "???Path";
4117  break;
4118  }
4119  break;
4120  case T_IndexPath:
4121  ptype = "IdxScan";
4122  break;
4123  case T_BitmapHeapPath:
4124  ptype = "BitmapHeapScan";
4125  break;
4126  case T_BitmapAndPath:
4127  ptype = "BitmapAndPath";
4128  break;
4129  case T_BitmapOrPath:
4130  ptype = "BitmapOrPath";
4131  break;
4132  case T_TidPath:
4133  ptype = "TidScan";
4134  break;
4135  case T_SubqueryScanPath:
4136  ptype = "SubqueryScan";
4137  break;
4138  case T_ForeignPath:
4139  ptype = "ForeignScan";
4140  break;
4141  case T_CustomPath:
4142  ptype = "CustomScan";
4143  break;
4144  case T_NestPath:
4145  ptype = "NestLoop";
4146  join = true;
4147  break;
4148  case T_MergePath:
4149  ptype = "MergeJoin";
4150  join = true;
4151  break;
4152  case T_HashPath:
4153  ptype = "HashJoin";
4154  join = true;
4155  break;
4156  case T_AppendPath:
4157  ptype = "Append";
4158  break;
4159  case T_MergeAppendPath:
4160  ptype = "MergeAppend";
4161  break;
4162  case T_GroupResultPath:
4163  ptype = "GroupResult";
4164  break;
4165  case T_MaterialPath:
4166  ptype = "Material";
4167  subpath = ((MaterialPath *) path)->subpath;
4168  break;
4169  case T_UniquePath:
4170  ptype = "Unique";
4171  subpath = ((UniquePath *) path)->subpath;
4172  break;
4173  case T_GatherPath:
4174  ptype = "Gather";
4175  subpath = ((GatherPath *) path)->subpath;
4176  break;
4177  case T_GatherMergePath:
4178  ptype = "GatherMerge";
4179  subpath = ((GatherMergePath *) path)->subpath;
4180  break;
4181  case T_ProjectionPath:
4182  ptype = "Projection";
4183  subpath = ((ProjectionPath *) path)->subpath;
4184  break;
4185  case T_ProjectSetPath:
4186  ptype = "ProjectSet";
4187  subpath = ((ProjectSetPath *) path)->subpath;
4188  break;
4189  case T_SortPath:
4190  ptype = "Sort";
4191  subpath = ((SortPath *) path)->subpath;
4192  break;
4193  case T_IncrementalSortPath:
4194  ptype = "IncrementalSort";
4195  subpath = ((SortPath *) path)->subpath;
4196  break;
4197  case T_GroupPath:
4198  ptype = "Group";
4199  subpath = ((GroupPath *) path)->subpath;
4200  break;
4201  case T_UpperUniquePath:
4202  ptype = "UpperUnique";
4203  subpath = ((UpperUniquePath *) path)->subpath;
4204  break;
4205  case T_AggPath:
4206  ptype = "Agg";
4207  subpath = ((AggPath *) path)->subpath;
4208  break;
4209  case T_GroupingSetsPath:
4210  ptype = "GroupingSets";
4211  subpath = ((GroupingSetsPath *) path)->subpath;
4212  break;
4213  case T_MinMaxAggPath:
4214  ptype = "MinMaxAgg";
4215  break;
4216  case T_WindowAggPath:
4217  ptype = "WindowAgg";
4218  subpath = ((WindowAggPath *) path)->subpath;
4219  break;
4220  case T_SetOpPath:
4221  ptype = "SetOp";
4222  subpath = ((SetOpPath *) path)->subpath;
4223  break;
4224  case T_RecursiveUnionPath:
4225  ptype = "RecursiveUnion";
4226  break;
4227  case T_LockRowsPath:
4228  ptype = "LockRows";
4229  subpath = ((LockRowsPath *) path)->subpath;
4230  break;
4231  case T_ModifyTablePath:
4232  ptype = "ModifyTable";
4233  break;
4234  case T_LimitPath:
4235  ptype = "Limit";
4236  subpath = ((LimitPath *) path)->subpath;
4237  break;
4238  default:
4239  ptype = "???Path";
4240  break;
4241  }
4242 
4243  for (i = 0; i < indent; i++)
4244  printf("\t");
4245  printf("%s", ptype);
4246 
4247  if (path->parent)
4248  {
4249  printf("(");
4250  print_relids(root, path->parent->relids);
4251  printf(")");
4252  }
4253  if (path->param_info)
4254  {
4255  printf(" required_outer (");
4256  print_relids(root, path->param_info->ppi_req_outer);
4257  printf(")");
4258  }
4259  printf(" rows=%.0f cost=%.2f..%.2f\n",
4260  path->rows, path->startup_cost, path->total_cost);
4261 
4262  if (path->pathkeys)
4263  {
4264  for (i = 0; i < indent; i++)
4265  printf("\t");
4266  printf(" pathkeys: ");
4267  print_pathkeys(path->pathkeys, root->parse->rtable);
4268  }
4269 
4270  if (join)
4271  {
4272  JoinPath *jp = (JoinPath *) path;
4273 
4274  for (i = 0; i < indent; i++)
4275  printf("\t");
4276  printf(" clauses: ");
4277  print_restrictclauses(root, jp->joinrestrictinfo);
4278  printf("\n");
4279 
4280  if (IsA(path, MergePath))
4281  {
4282  MergePath *mp = (MergePath *) path;
4283 
4284  for (i = 0; i < indent; i++)
4285  printf("\t");
4286  printf(" sortouter=%d sortinner=%d materializeinner=%d\n",
4287  ((mp->outersortkeys) ? 1 : 0),
4288  ((mp->innersortkeys) ? 1 : 0),
4289  ((mp->materialize_inner) ? 1 : 0));
4290  }
4291 
4292  print_path(root, jp->outerjoinpath, indent + 1);
4293  print_path(root, jp->innerjoinpath, indent + 1);
4294  }
4295 
4296  if (subpath)
4297  print_path(root, subpath, indent + 1);
4298 }
4299 
4300 void
4301 debug_print_rel(PlannerInfo *root, RelOptInfo *rel)
4302 {
4303  ListCell *l;
4304 
4305  printf("RELOPTINFO (");
4306  print_relids(root, rel->relids);
4307  printf("): rows=%.0f width=%d\n", rel->rows, rel->reltarget->width);
4308 
4309  if (rel->baserestrictinfo)
4310  {
4311  printf("\tbaserestrictinfo: ");
4312  print_restrictclauses(root, rel->baserestrictinfo);
4313  printf("\n");
4314  }
4315 
4316  if (rel->joininfo)
4317  {
4318  printf("\tjoininfo: ");
4319  print_restrictclauses(root, rel->joininfo);
4320  printf("\n");
4321  }
4322 
4323  printf("\tpath list:\n");
4324  foreach(l, rel->pathlist)
4325  print_path(root, lfirst(l), 1);
4327  {
4328  printf("\n\tcheapest parameterized paths:\n");
4329  foreach(l, rel->cheapest_parameterized_paths)
4330  print_path(root, lfirst(l), 1);
4331  }
4332  if (rel->cheapest_startup_path)
4333  {
4334  printf("\n\tcheapest startup path:\n");
4335  print_path(root, rel->cheapest_startup_path, 1);
4336  }
4337  if (rel->cheapest_total_path)
4338  {
4339  printf("\n\tcheapest total path:\n");
4340  print_path(root, rel->cheapest_total_path, 1);
4341  }
4342  printf("\n");
4343  fflush(stdout);
4344 }
4345 
4346 #endif /* OPTIMIZER_DEBUG */
bool has_eclass_joins
Definition: pathnodes.h:734
Path * get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, Relids required_outer, CostSelector cost_criterion, bool require_parallel_safe)
Definition: pathkeys.c:404
void set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5206
RelOptInfo * standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: allpaths.c:3123
Node * limitOffset
Definition: parsenodes.h:160
#define NIL
Definition: pg_list.h:65
bool contain_leaked_vars(Node *clause)
Definition: clauses.c:1073
List * outersortkeys
Definition: pathnodes.h:1588
double plan_rows
Definition: plannodes.h:123
RelOptInfo * make_one_rel(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:157
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1878
#define IsA(nodeptr, _type_)
Definition: nodes.h:578
PathTarget * pathtarget
Definition: pathnodes.h:1149
List * build_expression_pathkey(PlannerInfo *root, Expr *expr, Relids nullable_relids, Oid opno, Relids rel, bool create_it)
Definition: pathkeys.c:782
Query * parse
Definition: pathnodes.h:173
Index varlevelsup
Definition: primnodes.h:191
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:434
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1917
RelOptInfo *(* join_search_hook_type)(PlannerInfo *root, int levels_needed, List *initial_rels)
Definition: paths.h:45
List * plan_params
Definition: pathnodes.h:187
bool enable_incremental_sort
Definition: costsize.c:138
List * sortClause
Definition: parsenodes.h:158
RelOptKind reloptkind
Definition: pathnodes.h:663
static void set_base_rel_sizes(PlannerInfo *root)
Definition: allpaths.c:296
Relids * attr_needed
Definition: pathnodes.h:699
List * query_pathkeys
Definition: pathnodes.h:292
List * join_info_list
Definition: pathnodes.h:277
static Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: allpaths.c:1945
SortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2802
bool materialize_inner
Definition: pathnodes.h:1591
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:310
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2560
FromExpr * jointree
Definition: parsenodes.h:138
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:909
PlannerInfo * parent_root
Definition: pathnodes.h:179
#define castNode(_type_, nodeptr)
Definition: nodes.h:596
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:3884
List * build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel, ScanDirection scandir, bool *partialkeys)
Definition: pathkeys.c:699
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:275
Path * innerjoinpath
Definition: pathnodes.h:1532
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1289
void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
Definition: costsize.c:5378
struct Path * cheapest_startup_path
Definition: pathnodes.h:683
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2074
double tuples
Definition: pathnodes.h:706
List * baserestrictinfo
Definition: pathnodes.h:728
static void accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths, List **partitioned_rels, bool flatten_partitioned_rels)
Definition: allpaths.c:2094
bool hasAggs
Definition: parsenodes.h:125
void create_index_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:231
#define Min(x, y)
Definition: c.h:982
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:718
int parallel_workers
Definition: pathnodes.h:1155
set_rel_pathlist_hook_type set_rel_pathlist_hook
Definition: allpaths.c:68
bool pseudoconstant
Definition: pathnodes.h:1999
bool consider_param_startup
Definition: pathnodes.h:673
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1527
bool relation_excluded_by_constraints(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: plancat.c:1397
List * list_truncate(List *list, int new_size)
Definition: list.c:585
ParamPathInfo * param_info
Definition: pathnodes.h:1151
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:3972
List * groupingSets
Definition: parsenodes.h:150
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2626
List * list_copy(const List *oldlist)
Definition: list.c:1403
bool limit_needed(Query *parse)
Definition: planner.c:2995
Definition: nodes.h:527
#define IS_JOIN_REL(rel)
Definition: pathnodes.h:644
List * list_concat(List *list1, const List *list2)
Definition: list.c:515
List * partial_pathlist
Definition: pathnodes.h:682
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:639
AttrNumber varattno
Definition: primnodes.h:186
bool bms_get_singleton_member(const Bitmapset *a, int *member)
Definition: bitmapset.c:615
bool * unsafeColumns
Definition: allpaths.c:56
bool enable_parallel_append
Definition: costsize.c:147
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:951
#define printf(...)
Definition: port.h:221
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
uint32 BlockNumber
Definition: block.h:31
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:2745
List * cheapest_parameterized_paths
Definition: pathnodes.h:686
Node * make_and_qual(Node *qual1, Node *qual2)
Definition: makefuncs.c:689
List * pull_var_clause(Node *node, int flags)
Definition: var.c:535
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:285
void add_child_rel_equivalences(PlannerInfo *root, AppendRelInfo *appinfo, RelOptInfo *parent_rel, RelOptInfo *child_rel)
Definition: equivclass.c:2393
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:436
struct pushdown_safety_info pushdown_safety_info
List * list_copy_tail(const List *oldlist, int nskip)
Definition: list.c:1422
bool funcordinality
Definition: parsenodes.h:1069
List * partitioned_rels
Definition: pathnodes.h:1433
static bool recurse_pushdown_safe(Node *setOp, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3348
Definition: primnodes.h:181
static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2656
static int list_nth_int(const List *list, int n)
Definition: pg_list.h:277
static void compare_tlist_datatypes(List *tlist, List *colTypes, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3483
void set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5448
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1945
static void set_base_rel_consider_startup(PlannerInfo *root)
Definition: allpaths.c:253
List * values_lists
Definition: parsenodes.h:1079
Node * quals
Definition: primnodes.h:1526
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:639
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition: var.c:219
bool hasDistinctOn
Definition: parsenodes.h:129
signed int int32
Definition: c.h:417
List * windowClause
Definition: parsenodes.h:154
List * targetList
Definition: parsenodes.h:140
Const * makeNullConst(Oid consttype, int32 consttypmod, Oid constcollid)
Definition: makefuncs.c:337
int first_partial_path
Definition: pathnodes.h:1411
int fls(int mask)
Definition: fls.c:55
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1997
struct RelOptInfo ** simple_rel_array
Definition: pathnodes.h:197
NodeTag pathtype
Definition: pathnodes.h:1146
Relids syn_righthand
Definition: pathnodes.h:2184
#define list_make1(x1)
Definition: pg_list.h:206
List * subpaths
Definition: pathnodes.h:1409
PlannerInfo * subroot
Definition: pathnodes.h:710
bool contain_subplans(Node *clause)
Definition: clauses.c:322
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:343
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:566
Relids lateral_relids
Definition: pathnodes.h:691
double tuple_fraction
Definition: pathnodes.h:330
void pfree(void *pointer)
Definition: mcxt.c:1057
void create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: tidpath.c:385
bool resjunk
Definition: primnodes.h:1429
#define linitial(l)
Definition: pg_list.h:174
IsForeignScanParallelSafe_function IsForeignScanParallelSafe
Definition: fdwapi.h:240
Definition: nodes.h:46
List * rtable
Definition: parsenodes.h:137
List * distinctClause
Definition: parsenodes.h:156
Relids all_baserels
Definition: pathnodes.h:221
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:2877
#define ERROR
Definition: elog.h:43
List * partitionClause
Definition: parsenodes.h:1358
static void * list_nth(const List *list, int n)
Definition: pg_list.h:266
Cost startup_cost
Definition: pathnodes.h:1159
List * joinrestrictinfo
Definition: pathnodes.h:1534
bool enable_geqo
Definition: allpaths.c:62
bool parallelModeOK
Definition: pathnodes.h:137
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1423
RelOptInfo * parent
Definition: pathnodes.h:1148
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:315
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1189
static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
Definition: allpaths.c:3516
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:71
void check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
Definition: indxpath.c:3308
struct Path * cheapest_total_path
Definition: pathnodes.h:684
static RelOptInfo * make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
Definition: allpaths.c:3018
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:1219
Node * limitCount
Definition: parsenodes.h:161
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1167
char * c
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:186
Expr * find_em_expr_usable_for_sorting_rel(EquivalenceClass *ec, RelOptInfo *rel)
Definition: equivclass.c:806
static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:596
List * subplans
Definition: pathnodes.h:107
static void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition: allpaths.c:2185
List * joininfo
Definition: pathnodes.h:732
List * convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *subquery_pathkeys, List *subquery_tlist)
Definition: pathkeys.c:838
void check_stack_depth(void)
Definition: postgres.c:3312
PlannerGlobal * glob
Definition: pathnodes.h:175
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel)
Definition: allpaths.c:3743
struct FdwRoutine * fdwroutine
Definition: pathnodes.h:719
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:2244
int nparts
Definition: pathnodes.h:744
GetForeignRelSize_function GetForeignRelSize
Definition: fdwapi.h:188
AttrNumber resno
Definition: primnodes.h:1423
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:933
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer, List *partitioned_rels)
Definition: pathnode.c:1377
Relids relids
Definition: pathnodes.h:666
int min_parallel_index_scan_size
Definition: allpaths.c:65
double total_table_pages
Definition: pathnodes.h:327
int simple_rel_array_size
Definition: pathnodes.h:198
void join_search_one_level(PlannerInfo *root, int level)
Definition: joinrels.c:71
void print_expr(const Node *expr, const List *rtable)
Definition: print.c:321
static Path * get_singleton_append_subpath(Path *path)
Definition: allpaths.c:2151
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
static List * get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:2812
Path * get_cheapest_parallel_safe_total_inner(List *paths)
Definition: pathkeys.c:482
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, List *partitioned_rels, double rows)
Definition: pathnode.c:1215
Index relid
Definition: pathnodes.h:694
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2100
bool contain_window_function(Node *clause)
Definition: clauses.c:206
List * lappend(List *list, void *datum)
Definition: list.c:321
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:821
RangeTblEntry ** simple_rte_array
Definition: pathnodes.h:205
GetForeignPaths_function GetForeignPaths
Definition: fdwapi.h:189
Expr * clause
Definition: pathnodes.h:1991
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:771
SampleScanGetSampleSize_function SampleScanGetSampleSize
Definition: tsmapi.h:68
Index varno
Definition: primnodes.h:184
static void check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3415
Node * ReplaceVarsFromTargetList(Node *node, int target_varno, int sublevels_up, RangeTblEntry *target_rte, List *targetlist, ReplaceVarsNoMatchOption nomatch_option, int nomatch_varno, bool *outer_hasSubLinks)
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
List * exprs
Definition: pathnodes.h:1078
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3558
void print_pathkeys(const List *pathkeys, const List *rtable)
Definition: print.c:426
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:175
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:324
Path * outerjoinpath
Definition: pathnodes.h:1531
static List * accumulate_partitioned_rels(List *partitioned_rels, List *sub_partitioned_rels, bool flatten_partitioned_rels)
Definition: allpaths.c:2017
void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5415
bool consider_partitionwise_join
Definition: pathnodes.h:737
join_search_hook_type join_search_hook
Definition: allpaths.c:71
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:672
void * palloc0(Size size)
Definition: mcxt.c:981
static void set_base_rel_pathlists(PlannerInfo *root)
Definition: allpaths.c:339
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1261
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:954
int rel_parallel_workers
Definition: pathnodes.h:712
List * append_rel_list
Definition: pathnodes.h:284
List * cte_plan_ids
Definition: pathnodes.h:255
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1971
bool self_reference
Definition: parsenodes.h:1086
void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:4651
struct PartitionBoundInfoData * boundinfo
Definition: pathnodes.h:747
unsigned int Index
Definition: c.h:537
int geqo_threshold
Definition: allpaths.c:63
RTEKind rtekind
Definition: pathnodes.h:696
RelOptInfo * geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Definition: geqo_main.c:67
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:861
bool security_barrier
Definition: parsenodes.h:1017
static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2536
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2471
double rows
Definition: pathnodes.h:669
#define InvalidOid
Definition: postgres_ext.h:36
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1787
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2851
Cost total_cost
Definition: pathnodes.h:1160
bool hasTargetSRFs
Definition: parsenodes.h:127
List * lcons(void *datum, List *list)
Definition: list.c:453
List * pathkeys
Definition: pathnodes.h:1162
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:475
TsmRoutine * GetTsmRoutine(Oid tsmhandler)
Definition: tablesample.c:27
#define Max(x, y)
Definition: c.h:976
bool partitions_are_ordered(PartitionBoundInfo boundinfo, int nparts)
Definition: partbounds.c:2748
BlockNumber pages
Definition: pathnodes.h:705
#define Assert(condition)
Definition: c.h:800
#define lfirst(lc)
Definition: pg_list.h:169
char * aliasname
Definition: primnodes.h:42
bool hasWindowFuncs
Definition: parsenodes.h:126
void(* set_rel_pathlist_hook_type)(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: paths.h:29
List ** join_rel_level
Definition: pathnodes.h:250
List * functions
Definition: parsenodes.h:1068
static bool has_multiple_baserels(PlannerInfo *root)
Definition: allpaths.c:2211
double rows
Definition: pathnodes.h:1158
Expr * expr
Definition: primnodes.h:1422
char func_parallel(Oid funcid)
Definition: lsyscache.c:1716
JoinType jointype
Definition: pathnodes.h:2185
EquivalenceClass * pk_eclass
Definition: pathnodes.h:1045
struct RelOptInfo ** part_rels
Definition: pathnodes.h:751
struct Path * non_recursive_path
Definition: pathnodes.h:358
bool enable_partitionwise_join
Definition: costsize.c:145
static void subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:3649
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:41
static int list_length(const List *l)
Definition: pg_list.h:149
int min_parallel_table_scan_size
Definition: allpaths.c:64
#define for_each_from(cell, lst, N)
Definition: pg_list.h:381
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:768
SetOperation op
Definition: parsenodes.h:1663
static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual)
Definition: allpaths.c:3696
Index ctelevelsup
Definition: parsenodes.h:1085
bool consider_parallel
Definition: pathnodes.h:674
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
List * innersortkeys
Definition: pathnodes.h:1589
bool repeatable_across_scans
Definition: tsmapi.h:65
List * partitioned_rels
Definition: pathnodes.h:1406
Bitmapset * Relids
Definition: pathnodes.h:28
Index query_level
Definition: pathnodes.h:177
#define InvalidAttrNumber
Definition: attnum.h:23
#define nodeTag(nodeptr)
Definition: nodes.h:532
char get_rel_persistence(Oid relid)
Definition: lsyscache.c:1995
RTEKind rtekind
Definition: parsenodes.h:981
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, pushdown_safety_info *safetyInfo)
Definition: allpaths.c:3292
List * cteList
Definition: parsenodes.h:135
char * ctename
Definition: parsenodes.h:1084
Node * setOperations
Definition: parsenodes.h:166
Query * subquery
Definition: parsenodes.h:1016
List * groupClause
Definition: parsenodes.h:148
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3761
AttrNumber max_attr
Definition: pathnodes.h:698
void set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5346
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:766
bool hasSubLinks
Definition: parsenodes.h:128
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:579
void list_free(List *list)
Definition: list.c:1376
#define elog(elevel,...)
Definition: elog.h:228
int i
Index ressortgroupref
Definition: primnodes.h:1425
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:1910
PartitionScheme part_scheme
Definition: pathnodes.h:743
bool parallel_aware
Definition: pathnodes.h:1153
List * initial_rels
Definition: pathnodes.h:302
void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5324
List * pathlist
Definition: pathnodes.h:680
static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: allpaths.c:801
Relids ppi_req_outer
Definition: pathnodes.h:1107
void set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5286
Index child_relid
Definition: pathnodes.h:2236
Alias * eref
Definition: parsenodes.h:1120
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:373
#define copyObject(obj)
Definition: nodes.h:643
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2448
Node * havingQual
Definition: parsenodes.h:152
Index parent_relid
Definition: pathnodes.h:2235
int32 * attr_widths
Definition: pathnodes.h:700
void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual)
Definition: allpaths.c:3848
void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Definition: costsize.c:5477
Definition: nodes.h:226
double clamp_row_est(double nrows)
Definition: costsize.c:196
static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, List *live_childrels, List *all_child_pathkeys, List *partitioned_rels)
Definition: allpaths.c:1718
Definition: regcomp.c:224
int max_parallel_workers_per_gather
Definition: costsize.c:130
Definition: pg_list.h:50
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
struct PathTarget * reltarget
Definition: pathnodes.h:677
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2516
struct TableSampleClause * tablesample
Definition: parsenodes.h:1011
int16 AttrNumber
Definition: attnum.h:21
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
Definition: allpaths.c:366
List * subplan_params
Definition: pathnodes.h:711
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction)
Definition: planner.c:592
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:929
double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, int loop_count, Cost *cost, double *tuple)
Definition: costsize.c:5802
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:793
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
#define lfirst_oid(lc)
Definition: pg_list.h:171
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:648
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:194
unsigned char bool
Definition: c.h:379
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1046
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2048
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
Definition: allpaths.c:2686
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2023
AttrNumber min_attr
Definition: pathnodes.h:697