PostgreSQL Source Code  git master
pathnode.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pathnode.c
4  * Routines to manipulate pathlists and create path nodes
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/optimizer/util/pathnode.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <math.h>
18 
19 #include "foreign/fdwapi.h"
20 #include "miscadmin.h"
21 #include "nodes/extensible.h"
22 #include "nodes/nodeFuncs.h"
23 #include "optimizer/appendinfo.h"
24 #include "optimizer/clauses.h"
25 #include "optimizer/cost.h"
26 #include "optimizer/optimizer.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/prep.h"
31 #include "optimizer/restrictinfo.h"
32 #include "optimizer/tlist.h"
33 #include "parser/parsetree.h"
34 #include "utils/lsyscache.h"
35 #include "utils/memutils.h"
36 #include "utils/selfuncs.h"
37 
38 typedef enum
39 {
40  COSTS_EQUAL, /* path costs are fuzzily equal */
41  COSTS_BETTER1, /* first path is cheaper than second */
42  COSTS_BETTER2, /* second path is cheaper than first */
43  COSTS_DIFFERENT /* neither path dominates the other on cost */
45 
46 /*
47  * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
48  * XXX is it worth making this user-controllable? It provides a tradeoff
49  * between planner runtime and the accuracy of path cost comparisons.
50  */
51 #define STD_FUZZ_FACTOR 1.01
52 
53 static List *translate_sub_tlist(List *tlist, int relid);
54 static int append_total_cost_compare(const ListCell *a, const ListCell *b);
55 static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
57  List *pathlist,
58  RelOptInfo *child_rel);
59 
60 
61 /*****************************************************************************
62  * MISC. PATH UTILITIES
63  *****************************************************************************/
64 
65 /*
66  * compare_path_costs
67  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
68  * or more expensive than path2 for the specified criterion.
69  */
70 int
71 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
72 {
73  if (criterion == STARTUP_COST)
74  {
75  if (path1->startup_cost < path2->startup_cost)
76  return -1;
77  if (path1->startup_cost > path2->startup_cost)
78  return +1;
79 
80  /*
81  * If paths have the same startup cost (not at all unlikely), order
82  * them by total cost.
83  */
84  if (path1->total_cost < path2->total_cost)
85  return -1;
86  if (path1->total_cost > path2->total_cost)
87  return +1;
88  }
89  else
90  {
91  if (path1->total_cost < path2->total_cost)
92  return -1;
93  if (path1->total_cost > path2->total_cost)
94  return +1;
95 
96  /*
97  * If paths have the same total cost, order them by startup cost.
98  */
99  if (path1->startup_cost < path2->startup_cost)
100  return -1;
101  if (path1->startup_cost > path2->startup_cost)
102  return +1;
103  }
104  return 0;
105 }
106 
107 /*
108  * compare_path_fractional_costs
109  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
110  * or more expensive than path2 for fetching the specified fraction
111  * of the total tuples.
112  *
113  * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
114  * path with the cheaper total_cost.
115  */
116 int
118  double fraction)
119 {
120  Cost cost1,
121  cost2;
122 
123  if (fraction <= 0.0 || fraction >= 1.0)
124  return compare_path_costs(path1, path2, TOTAL_COST);
125  cost1 = path1->startup_cost +
126  fraction * (path1->total_cost - path1->startup_cost);
127  cost2 = path2->startup_cost +
128  fraction * (path2->total_cost - path2->startup_cost);
129  if (cost1 < cost2)
130  return -1;
131  if (cost1 > cost2)
132  return +1;
133  return 0;
134 }
135 
136 /*
137  * compare_path_costs_fuzzily
138  * Compare the costs of two paths to see if either can be said to
139  * dominate the other.
140  *
141  * We use fuzzy comparisons so that add_path() can avoid keeping both of
142  * a pair of paths that really have insignificantly different cost.
143  *
144  * The fuzz_factor argument must be 1.0 plus delta, where delta is the
145  * fraction of the smaller cost that is considered to be a significant
146  * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
147  * be 1% of the smaller cost.
148  *
149  * The two paths are said to have "equal" costs if both startup and total
150  * costs are fuzzily the same. Path1 is said to be better than path2 if
151  * it has fuzzily better startup cost and fuzzily no worse total cost,
152  * or if it has fuzzily better total cost and fuzzily no worse startup cost.
153  * Path2 is better than path1 if the reverse holds. Finally, if one path
154  * is fuzzily better than the other on startup cost and fuzzily worse on
155  * total cost, we just say that their costs are "different", since neither
156  * dominates the other across the whole performance spectrum.
157  *
158  * This function also enforces a policy rule that paths for which the relevant
159  * one of parent->consider_startup and parent->consider_param_startup is false
160  * cannot survive comparisons solely on the grounds of good startup cost, so
161  * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
162  * (But if total costs are fuzzily equal, we compare startup costs anyway,
163  * in hopes of eliminating one path or the other.)
164  */
165 static PathCostComparison
166 compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
167 {
168 #define CONSIDER_PATH_STARTUP_COST(p) \
169  ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
170 
171  /*
172  * Check total cost first since it's more likely to be different; many
173  * paths have zero startup cost.
174  */
175  if (path1->total_cost > path2->total_cost * fuzz_factor)
176  {
177  /* path1 fuzzily worse on total cost */
178  if (CONSIDER_PATH_STARTUP_COST(path1) &&
179  path2->startup_cost > path1->startup_cost * fuzz_factor)
180  {
181  /* ... but path2 fuzzily worse on startup, so DIFFERENT */
182  return COSTS_DIFFERENT;
183  }
184  /* else path2 dominates */
185  return COSTS_BETTER2;
186  }
187  if (path2->total_cost > path1->total_cost * fuzz_factor)
188  {
189  /* path2 fuzzily worse on total cost */
190  if (CONSIDER_PATH_STARTUP_COST(path2) &&
191  path1->startup_cost > path2->startup_cost * fuzz_factor)
192  {
193  /* ... but path1 fuzzily worse on startup, so DIFFERENT */
194  return COSTS_DIFFERENT;
195  }
196  /* else path1 dominates */
197  return COSTS_BETTER1;
198  }
199  /* fuzzily the same on total cost ... */
200  if (path1->startup_cost > path2->startup_cost * fuzz_factor)
201  {
202  /* ... but path1 fuzzily worse on startup, so path2 wins */
203  return COSTS_BETTER2;
204  }
205  if (path2->startup_cost > path1->startup_cost * fuzz_factor)
206  {
207  /* ... but path2 fuzzily worse on startup, so path1 wins */
208  return COSTS_BETTER1;
209  }
210  /* fuzzily the same on both costs */
211  return COSTS_EQUAL;
212 
213 #undef CONSIDER_PATH_STARTUP_COST
214 }
215 
216 /*
217  * set_cheapest
218  * Find the minimum-cost paths from among a relation's paths,
219  * and save them in the rel's cheapest-path fields.
220  *
221  * cheapest_total_path is normally the cheapest-total-cost unparameterized
222  * path; but if there are no unparameterized paths, we assign it to be the
223  * best (cheapest least-parameterized) parameterized path. However, only
224  * unparameterized paths are considered candidates for cheapest_startup_path,
225  * so that will be NULL if there are no unparameterized paths.
226  *
227  * The cheapest_parameterized_paths list collects all parameterized paths
228  * that have survived the add_path() tournament for this relation. (Since
229  * add_path ignores pathkeys for a parameterized path, these will be paths
230  * that have best cost or best row count for their parameterization. We
231  * may also have both a parallel-safe and a non-parallel-safe path in some
232  * cases for the same parameterization in some cases, but this should be
233  * relatively rare since, most typically, all paths for the same relation
234  * will be parallel-safe or none of them will.)
235  *
236  * cheapest_parameterized_paths always includes the cheapest-total
237  * unparameterized path, too, if there is one; the users of that list find
238  * it more convenient if that's included.
239  *
240  * This is normally called only after we've finished constructing the path
241  * list for the rel node.
242  */
243 void
245 {
246  Path *cheapest_startup_path;
247  Path *cheapest_total_path;
248  Path *best_param_path;
249  List *parameterized_paths;
250  ListCell *p;
251 
252  Assert(IsA(parent_rel, RelOptInfo));
253 
254  if (parent_rel->pathlist == NIL)
255  elog(ERROR, "could not devise a query plan for the given query");
256 
257  cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
258  parameterized_paths = NIL;
259 
260  foreach(p, parent_rel->pathlist)
261  {
262  Path *path = (Path *) lfirst(p);
263  int cmp;
264 
265  if (path->param_info)
266  {
267  /* Parameterized path, so add it to parameterized_paths */
268  parameterized_paths = lappend(parameterized_paths, path);
269 
270  /*
271  * If we have an unparameterized cheapest-total, we no longer care
272  * about finding the best parameterized path, so move on.
273  */
274  if (cheapest_total_path)
275  continue;
276 
277  /*
278  * Otherwise, track the best parameterized path, which is the one
279  * with least total cost among those of the minimum
280  * parameterization.
281  */
282  if (best_param_path == NULL)
283  best_param_path = path;
284  else
285  {
286  switch (bms_subset_compare(PATH_REQ_OUTER(path),
287  PATH_REQ_OUTER(best_param_path)))
288  {
289  case BMS_EQUAL:
290  /* keep the cheaper one */
291  if (compare_path_costs(path, best_param_path,
292  TOTAL_COST) < 0)
293  best_param_path = path;
294  break;
295  case BMS_SUBSET1:
296  /* new path is less-parameterized */
297  best_param_path = path;
298  break;
299  case BMS_SUBSET2:
300  /* old path is less-parameterized, keep it */
301  break;
302  case BMS_DIFFERENT:
303 
304  /*
305  * This means that neither path has the least possible
306  * parameterization for the rel. We'll sit on the old
307  * path until something better comes along.
308  */
309  break;
310  }
311  }
312  }
313  else
314  {
315  /* Unparameterized path, so consider it for cheapest slots */
316  if (cheapest_total_path == NULL)
317  {
318  cheapest_startup_path = cheapest_total_path = path;
319  continue;
320  }
321 
322  /*
323  * If we find two paths of identical costs, try to keep the
324  * better-sorted one. The paths might have unrelated sort
325  * orderings, in which case we can only guess which might be
326  * better to keep, but if one is superior then we definitely
327  * should keep that one.
328  */
329  cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
330  if (cmp > 0 ||
331  (cmp == 0 &&
332  compare_pathkeys(cheapest_startup_path->pathkeys,
333  path->pathkeys) == PATHKEYS_BETTER2))
334  cheapest_startup_path = path;
335 
336  cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
337  if (cmp > 0 ||
338  (cmp == 0 &&
339  compare_pathkeys(cheapest_total_path->pathkeys,
340  path->pathkeys) == PATHKEYS_BETTER2))
341  cheapest_total_path = path;
342  }
343  }
344 
345  /* Add cheapest unparameterized path, if any, to parameterized_paths */
346  if (cheapest_total_path)
347  parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
348 
349  /*
350  * If there is no unparameterized path, use the best parameterized path as
351  * cheapest_total_path (but not as cheapest_startup_path).
352  */
353  if (cheapest_total_path == NULL)
354  cheapest_total_path = best_param_path;
355  Assert(cheapest_total_path != NULL);
356 
357  parent_rel->cheapest_startup_path = cheapest_startup_path;
358  parent_rel->cheapest_total_path = cheapest_total_path;
359  parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
360  parent_rel->cheapest_parameterized_paths = parameterized_paths;
361 }
362 
363 /*
364  * add_path
365  * Consider a potential implementation path for the specified parent rel,
366  * and add it to the rel's pathlist if it is worthy of consideration.
367  * A path is worthy if it has a better sort order (better pathkeys) or
368  * cheaper cost (on either dimension), or generates fewer rows, than any
369  * existing path that has the same or superset parameterization rels.
370  * We also consider parallel-safe paths more worthy than others.
371  *
372  * We also remove from the rel's pathlist any old paths that are dominated
373  * by new_path --- that is, new_path is cheaper, at least as well ordered,
374  * generates no more rows, requires no outer rels not required by the old
375  * path, and is no less parallel-safe.
376  *
377  * In most cases, a path with a superset parameterization will generate
378  * fewer rows (since it has more join clauses to apply), so that those two
379  * figures of merit move in opposite directions; this means that a path of
380  * one parameterization can seldom dominate a path of another. But such
381  * cases do arise, so we make the full set of checks anyway.
382  *
383  * There are two policy decisions embedded in this function, along with
384  * its sibling add_path_precheck. First, we treat all parameterized paths
385  * as having NIL pathkeys, so that they cannot win comparisons on the
386  * basis of sort order. This is to reduce the number of parameterized
387  * paths that are kept; see discussion in src/backend/optimizer/README.
388  *
389  * Second, we only consider cheap startup cost to be interesting if
390  * parent_rel->consider_startup is true for an unparameterized path, or
391  * parent_rel->consider_param_startup is true for a parameterized one.
392  * Again, this allows discarding useless paths sooner.
393  *
394  * The pathlist is kept sorted by total_cost, with cheaper paths
395  * at the front. Within this routine, that's simply a speed hack:
396  * doing it that way makes it more likely that we will reject an inferior
397  * path after a few comparisons, rather than many comparisons.
398  * However, add_path_precheck relies on this ordering to exit early
399  * when possible.
400  *
401  * NOTE: discarded Path objects are immediately pfree'd to reduce planner
402  * memory consumption. We dare not try to free the substructure of a Path,
403  * since much of it may be shared with other Paths or the query tree itself;
404  * but just recycling discarded Path nodes is a very useful savings in
405  * a large join tree. We can recycle the List nodes of pathlist, too.
406  *
407  * As noted in optimizer/README, deleting a previously-accepted Path is
408  * safe because we know that Paths of this rel cannot yet be referenced
409  * from any other rel, such as a higher-level join. However, in some cases
410  * it is possible that a Path is referenced by another Path for its own
411  * rel; we must not delete such a Path, even if it is dominated by the new
412  * Path. Currently this occurs only for IndexPath objects, which may be
413  * referenced as children of BitmapHeapPaths as well as being paths in
414  * their own right. Hence, we don't pfree IndexPaths when rejecting them.
415  *
416  * 'parent_rel' is the relation entry to which the path corresponds.
417  * 'new_path' is a potential path for parent_rel.
418  *
419  * Returns nothing, but modifies parent_rel->pathlist.
420  */
421 void
422 add_path(RelOptInfo *parent_rel, Path *new_path)
423 {
424  bool accept_new = true; /* unless we find a superior old path */
425  int insert_at = 0; /* where to insert new item */
426  List *new_path_pathkeys;
427  ListCell *p1;
428 
429  /*
430  * This is a convenient place to check for query cancel --- no part of the
431  * planner goes very long without calling add_path().
432  */
434 
435  /* Pretend parameterized paths have no pathkeys, per comment above */
436  new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
437 
438  /*
439  * Loop to check proposed new path against old paths. Note it is possible
440  * for more than one old path to be tossed out because new_path dominates
441  * it.
442  */
443  foreach(p1, parent_rel->pathlist)
444  {
445  Path *old_path = (Path *) lfirst(p1);
446  bool remove_old = false; /* unless new proves superior */
447  PathCostComparison costcmp;
448  PathKeysComparison keyscmp;
449  BMS_Comparison outercmp;
450 
451  /*
452  * Do a fuzzy cost comparison with standard fuzziness limit.
453  */
454  costcmp = compare_path_costs_fuzzily(new_path, old_path,
456 
457  /*
458  * If the two paths compare differently for startup and total cost,
459  * then we want to keep both, and we can skip comparing pathkeys and
460  * required_outer rels. If they compare the same, proceed with the
461  * other comparisons. Row count is checked last. (We make the tests
462  * in this order because the cost comparison is most likely to turn
463  * out "different", and the pathkeys comparison next most likely. As
464  * explained above, row count very seldom makes a difference, so even
465  * though it's cheap to compare there's not much point in checking it
466  * earlier.)
467  */
468  if (costcmp != COSTS_DIFFERENT)
469  {
470  /* Similarly check to see if either dominates on pathkeys */
471  List *old_path_pathkeys;
472 
473  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
474  keyscmp = compare_pathkeys(new_path_pathkeys,
475  old_path_pathkeys);
476  if (keyscmp != PATHKEYS_DIFFERENT)
477  {
478  switch (costcmp)
479  {
480  case COSTS_EQUAL:
481  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
482  PATH_REQ_OUTER(old_path));
483  if (keyscmp == PATHKEYS_BETTER1)
484  {
485  if ((outercmp == BMS_EQUAL ||
486  outercmp == BMS_SUBSET1) &&
487  new_path->rows <= old_path->rows &&
488  new_path->parallel_safe >= old_path->parallel_safe)
489  remove_old = true; /* new dominates old */
490  }
491  else if (keyscmp == PATHKEYS_BETTER2)
492  {
493  if ((outercmp == BMS_EQUAL ||
494  outercmp == BMS_SUBSET2) &&
495  new_path->rows >= old_path->rows &&
496  new_path->parallel_safe <= old_path->parallel_safe)
497  accept_new = false; /* old dominates new */
498  }
499  else /* keyscmp == PATHKEYS_EQUAL */
500  {
501  if (outercmp == BMS_EQUAL)
502  {
503  /*
504  * Same pathkeys and outer rels, and fuzzily
505  * the same cost, so keep just one; to decide
506  * which, first check parallel-safety, then
507  * rows, then do a fuzzy cost comparison with
508  * very small fuzz limit. (We used to do an
509  * exact cost comparison, but that results in
510  * annoying platform-specific plan variations
511  * due to roundoff in the cost estimates.) If
512  * things are still tied, arbitrarily keep
513  * only the old path. Notice that we will
514  * keep only the old path even if the
515  * less-fuzzy comparison decides the startup
516  * and total costs compare differently.
517  */
518  if (new_path->parallel_safe >
519  old_path->parallel_safe)
520  remove_old = true; /* new dominates old */
521  else if (new_path->parallel_safe <
522  old_path->parallel_safe)
523  accept_new = false; /* old dominates new */
524  else if (new_path->rows < old_path->rows)
525  remove_old = true; /* new dominates old */
526  else if (new_path->rows > old_path->rows)
527  accept_new = false; /* old dominates new */
528  else if (compare_path_costs_fuzzily(new_path,
529  old_path,
530  1.0000000001) == COSTS_BETTER1)
531  remove_old = true; /* new dominates old */
532  else
533  accept_new = false; /* old equals or
534  * dominates new */
535  }
536  else if (outercmp == BMS_SUBSET1 &&
537  new_path->rows <= old_path->rows &&
538  new_path->parallel_safe >= old_path->parallel_safe)
539  remove_old = true; /* new dominates old */
540  else if (outercmp == BMS_SUBSET2 &&
541  new_path->rows >= old_path->rows &&
542  new_path->parallel_safe <= old_path->parallel_safe)
543  accept_new = false; /* old dominates new */
544  /* else different parameterizations, keep both */
545  }
546  break;
547  case COSTS_BETTER1:
548  if (keyscmp != PATHKEYS_BETTER2)
549  {
550  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
551  PATH_REQ_OUTER(old_path));
552  if ((outercmp == BMS_EQUAL ||
553  outercmp == BMS_SUBSET1) &&
554  new_path->rows <= old_path->rows &&
555  new_path->parallel_safe >= old_path->parallel_safe)
556  remove_old = true; /* new dominates old */
557  }
558  break;
559  case COSTS_BETTER2:
560  if (keyscmp != PATHKEYS_BETTER1)
561  {
562  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
563  PATH_REQ_OUTER(old_path));
564  if ((outercmp == BMS_EQUAL ||
565  outercmp == BMS_SUBSET2) &&
566  new_path->rows >= old_path->rows &&
567  new_path->parallel_safe <= old_path->parallel_safe)
568  accept_new = false; /* old dominates new */
569  }
570  break;
571  case COSTS_DIFFERENT:
572 
573  /*
574  * can't get here, but keep this case to keep compiler
575  * quiet
576  */
577  break;
578  }
579  }
580  }
581 
582  /*
583  * Remove current element from pathlist if dominated by new.
584  */
585  if (remove_old)
586  {
587  parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
588  p1);
589 
590  /*
591  * Delete the data pointed-to by the deleted cell, if possible
592  */
593  if (!IsA(old_path, IndexPath))
594  pfree(old_path);
595  }
596  else
597  {
598  /* new belongs after this old path if it has cost >= old's */
599  if (new_path->total_cost >= old_path->total_cost)
600  insert_at = foreach_current_index(p1) + 1;
601  }
602 
603  /*
604  * If we found an old path that dominates new_path, we can quit
605  * scanning the pathlist; we will not add new_path, and we assume
606  * new_path cannot dominate any other elements of the pathlist.
607  */
608  if (!accept_new)
609  break;
610  }
611 
612  if (accept_new)
613  {
614  /* Accept the new path: insert it at proper place in pathlist */
615  parent_rel->pathlist =
616  list_insert_nth(parent_rel->pathlist, insert_at, new_path);
617  }
618  else
619  {
620  /* Reject and recycle the new path */
621  if (!IsA(new_path, IndexPath))
622  pfree(new_path);
623  }
624 }
625 
626 /*
627  * add_path_precheck
628  * Check whether a proposed new path could possibly get accepted.
629  * We assume we know the path's pathkeys and parameterization accurately,
630  * and have lower bounds for its costs.
631  *
632  * Note that we do not know the path's rowcount, since getting an estimate for
633  * that is too expensive to do before prechecking. We assume here that paths
634  * of a superset parameterization will generate fewer rows; if that holds,
635  * then paths with different parameterizations cannot dominate each other
636  * and so we can simply ignore existing paths of another parameterization.
637  * (In the infrequent cases where that rule of thumb fails, add_path will
638  * get rid of the inferior path.)
639  *
640  * At the time this is called, we haven't actually built a Path structure,
641  * so the required information has to be passed piecemeal.
642  */
643 bool
645  Cost startup_cost, Cost total_cost,
646  List *pathkeys, Relids required_outer)
647 {
648  List *new_path_pathkeys;
649  bool consider_startup;
650  ListCell *p1;
651 
652  /* Pretend parameterized paths have no pathkeys, per add_path policy */
653  new_path_pathkeys = required_outer ? NIL : pathkeys;
654 
655  /* Decide whether new path's startup cost is interesting */
656  consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
657 
658  foreach(p1, parent_rel->pathlist)
659  {
660  Path *old_path = (Path *) lfirst(p1);
661  PathKeysComparison keyscmp;
662 
663  /*
664  * We are looking for an old_path with the same parameterization (and
665  * by assumption the same rowcount) that dominates the new path on
666  * pathkeys as well as both cost metrics. If we find one, we can
667  * reject the new path.
668  *
669  * Cost comparisons here should match compare_path_costs_fuzzily.
670  */
671  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
672  {
673  /* new path can win on startup cost only if consider_startup */
674  if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
675  !consider_startup)
676  {
677  /* new path loses on cost, so check pathkeys... */
678  List *old_path_pathkeys;
679 
680  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
681  keyscmp = compare_pathkeys(new_path_pathkeys,
682  old_path_pathkeys);
683  if (keyscmp == PATHKEYS_EQUAL ||
684  keyscmp == PATHKEYS_BETTER2)
685  {
686  /* new path does not win on pathkeys... */
687  if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
688  {
689  /* Found an old path that dominates the new one */
690  return false;
691  }
692  }
693  }
694  }
695  else
696  {
697  /*
698  * Since the pathlist is sorted by total_cost, we can stop looking
699  * once we reach a path with a total_cost larger than the new
700  * path's.
701  */
702  break;
703  }
704  }
705 
706  return true;
707 }
708 
709 /*
710  * add_partial_path
711  * Like add_path, our goal here is to consider whether a path is worthy
712  * of being kept around, but the considerations here are a bit different.
713  * A partial path is one which can be executed in any number of workers in
714  * parallel such that each worker will generate a subset of the path's
715  * overall result.
716  *
717  * As in add_path, the partial_pathlist is kept sorted with the cheapest
718  * total path in front. This is depended on by multiple places, which
719  * just take the front entry as the cheapest path without searching.
720  *
721  * We don't generate parameterized partial paths for several reasons. Most
722  * importantly, they're not safe to execute, because there's nothing to
723  * make sure that a parallel scan within the parameterized portion of the
724  * plan is running with the same value in every worker at the same time.
725  * Fortunately, it seems unlikely to be worthwhile anyway, because having
726  * each worker scan the entire outer relation and a subset of the inner
727  * relation will generally be a terrible plan. The inner (parameterized)
728  * side of the plan will be small anyway. There could be rare cases where
729  * this wins big - e.g. if join order constraints put a 1-row relation on
730  * the outer side of the topmost join with a parameterized plan on the inner
731  * side - but we'll have to be content not to handle such cases until
732  * somebody builds an executor infrastructure that can cope with them.
733  *
734  * Because we don't consider parameterized paths here, we also don't
735  * need to consider the row counts as a measure of quality: every path will
736  * produce the same number of rows. Neither do we need to consider startup
737  * costs: parallelism is only used for plans that will be run to completion.
738  * Therefore, this routine is much simpler than add_path: it needs to
739  * consider only pathkeys and total cost.
740  *
741  * As with add_path, we pfree paths that are found to be dominated by
742  * another partial path; this requires that there be no other references to
743  * such paths yet. Hence, GatherPaths must not be created for a rel until
744  * we're done creating all partial paths for it. Unlike add_path, we don't
745  * take an exception for IndexPaths as partial index paths won't be
746  * referenced by partial BitmapHeapPaths.
747  */
748 void
749 add_partial_path(RelOptInfo *parent_rel, Path *new_path)
750 {
751  bool accept_new = true; /* unless we find a superior old path */
752  int insert_at = 0; /* where to insert new item */
753  ListCell *p1;
754 
755  /* Check for query cancel. */
757 
758  /* Path to be added must be parallel safe. */
759  Assert(new_path->parallel_safe);
760 
761  /* Relation should be OK for parallelism, too. */
762  Assert(parent_rel->consider_parallel);
763 
764  /*
765  * As in add_path, throw out any paths which are dominated by the new
766  * path, but throw out the new path if some existing path dominates it.
767  */
768  foreach(p1, parent_rel->partial_pathlist)
769  {
770  Path *old_path = (Path *) lfirst(p1);
771  bool remove_old = false; /* unless new proves superior */
772  PathKeysComparison keyscmp;
773 
774  /* Compare pathkeys. */
775  keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
776 
777  /* Unless pathkeys are incompatible, keep just one of the two paths. */
778  if (keyscmp != PATHKEYS_DIFFERENT)
779  {
780  if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
781  {
782  /* New path costs more; keep it only if pathkeys are better. */
783  if (keyscmp != PATHKEYS_BETTER1)
784  accept_new = false;
785  }
786  else if (old_path->total_cost > new_path->total_cost
787  * STD_FUZZ_FACTOR)
788  {
789  /* Old path costs more; keep it only if pathkeys are better. */
790  if (keyscmp != PATHKEYS_BETTER2)
791  remove_old = true;
792  }
793  else if (keyscmp == PATHKEYS_BETTER1)
794  {
795  /* Costs are about the same, new path has better pathkeys. */
796  remove_old = true;
797  }
798  else if (keyscmp == PATHKEYS_BETTER2)
799  {
800  /* Costs are about the same, old path has better pathkeys. */
801  accept_new = false;
802  }
803  else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
804  {
805  /* Pathkeys are the same, and the old path costs more. */
806  remove_old = true;
807  }
808  else
809  {
810  /*
811  * Pathkeys are the same, and new path isn't materially
812  * cheaper.
813  */
814  accept_new = false;
815  }
816  }
817 
818  /*
819  * Remove current element from partial_pathlist if dominated by new.
820  */
821  if (remove_old)
822  {
823  parent_rel->partial_pathlist =
824  foreach_delete_current(parent_rel->partial_pathlist, p1);
825  pfree(old_path);
826  }
827  else
828  {
829  /* new belongs after this old path if it has cost >= old's */
830  if (new_path->total_cost >= old_path->total_cost)
831  insert_at = foreach_current_index(p1) + 1;
832  }
833 
834  /*
835  * If we found an old path that dominates new_path, we can quit
836  * scanning the partial_pathlist; we will not add new_path, and we
837  * assume new_path cannot dominate any later path.
838  */
839  if (!accept_new)
840  break;
841  }
842 
843  if (accept_new)
844  {
845  /* Accept the new path: insert it at proper place */
846  parent_rel->partial_pathlist =
847  list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
848  }
849  else
850  {
851  /* Reject and recycle the new path */
852  pfree(new_path);
853  }
854 }
855 
856 /*
857  * add_partial_path_precheck
858  * Check whether a proposed new partial path could possibly get accepted.
859  *
860  * Unlike add_path_precheck, we can ignore startup cost and parameterization,
861  * since they don't matter for partial paths (see add_partial_path). But
862  * we do want to make sure we don't add a partial path if there's already
863  * a complete path that dominates it, since in that case the proposed path
864  * is surely a loser.
865  */
866 bool
867 add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
868  List *pathkeys)
869 {
870  ListCell *p1;
871 
872  /*
873  * Our goal here is twofold. First, we want to find out whether this path
874  * is clearly inferior to some existing partial path. If so, we want to
875  * reject it immediately. Second, we want to find out whether this path
876  * is clearly superior to some existing partial path -- at least, modulo
877  * final cost computations. If so, we definitely want to consider it.
878  *
879  * Unlike add_path(), we always compare pathkeys here. This is because we
880  * expect partial_pathlist to be very short, and getting a definitive
881  * answer at this stage avoids the need to call add_path_precheck.
882  */
883  foreach(p1, parent_rel->partial_pathlist)
884  {
885  Path *old_path = (Path *) lfirst(p1);
886  PathKeysComparison keyscmp;
887 
888  keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
889  if (keyscmp != PATHKEYS_DIFFERENT)
890  {
891  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
892  keyscmp != PATHKEYS_BETTER1)
893  return false;
894  if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
895  keyscmp != PATHKEYS_BETTER2)
896  return true;
897  }
898  }
899 
900  /*
901  * This path is neither clearly inferior to an existing partial path nor
902  * clearly good enough that it might replace one. Compare it to
903  * non-parallel plans. If it loses even before accounting for the cost of
904  * the Gather node, we should definitely reject it.
905  *
906  * Note that we pass the total_cost to add_path_precheck twice. This is
907  * because it's never advantageous to consider the startup cost of a
908  * partial path; the resulting plans, if run in parallel, will be run to
909  * completion.
910  */
911  if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
912  NULL))
913  return false;
914 
915  return true;
916 }
917 
918 
919 /*****************************************************************************
920  * PATH NODE CREATION ROUTINES
921  *****************************************************************************/
922 
923 /*
924  * create_seqscan_path
925  * Creates a path corresponding to a sequential scan, returning the
926  * pathnode.
927  */
928 Path *
930  Relids required_outer, int parallel_workers)
931 {
932  Path *pathnode = makeNode(Path);
933 
934  pathnode->pathtype = T_SeqScan;
935  pathnode->parent = rel;
936  pathnode->pathtarget = rel->reltarget;
937  pathnode->param_info = get_baserel_parampathinfo(root, rel,
938  required_outer);
939  pathnode->parallel_aware = parallel_workers > 0 ? true : false;
940  pathnode->parallel_safe = rel->consider_parallel;
941  pathnode->parallel_workers = parallel_workers;
942  pathnode->pathkeys = NIL; /* seqscan has unordered result */
943 
944  cost_seqscan(pathnode, root, rel, pathnode->param_info);
945 
946  return pathnode;
947 }
948 
949 /*
950  * create_samplescan_path
951  * Creates a path node for a sampled table scan.
952  */
953 Path *
955 {
956  Path *pathnode = makeNode(Path);
957 
958  pathnode->pathtype = T_SampleScan;
959  pathnode->parent = rel;
960  pathnode->pathtarget = rel->reltarget;
961  pathnode->param_info = get_baserel_parampathinfo(root, rel,
962  required_outer);
963  pathnode->parallel_aware = false;
964  pathnode->parallel_safe = rel->consider_parallel;
965  pathnode->parallel_workers = 0;
966  pathnode->pathkeys = NIL; /* samplescan has unordered result */
967 
968  cost_samplescan(pathnode, root, rel, pathnode->param_info);
969 
970  return pathnode;
971 }
972 
973 /*
974  * create_index_path
975  * Creates a path node for an index scan.
976  *
977  * 'index' is a usable index.
978  * 'indexclauses' is a list of IndexClause nodes representing clauses
979  * to be enforced as qual conditions in the scan.
980  * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
981  * to be used as index ordering operators in the scan.
982  * 'indexorderbycols' is an integer list of index column numbers (zero based)
983  * the ordering operators can be used with.
984  * 'pathkeys' describes the ordering of the path.
985  * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
986  * for an ordered index, or NoMovementScanDirection for
987  * an unordered index.
988  * 'indexonly' is true if an index-only scan is wanted.
989  * 'required_outer' is the set of outer relids for a parameterized path.
990  * 'loop_count' is the number of repetitions of the indexscan to factor into
991  * estimates of caching behavior.
992  * 'partial_path' is true if constructing a parallel index scan path.
993  *
994  * Returns the new path node.
995  */
996 IndexPath *
999  List *indexclauses,
1000  List *indexorderbys,
1001  List *indexorderbycols,
1002  List *pathkeys,
1003  ScanDirection indexscandir,
1004  bool indexonly,
1005  Relids required_outer,
1006  double loop_count,
1007  bool partial_path)
1008 {
1009  IndexPath *pathnode = makeNode(IndexPath);
1010  RelOptInfo *rel = index->rel;
1011 
1012  pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1013  pathnode->path.parent = rel;
1014  pathnode->path.pathtarget = rel->reltarget;
1015  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1016  required_outer);
1017  pathnode->path.parallel_aware = false;
1018  pathnode->path.parallel_safe = rel->consider_parallel;
1019  pathnode->path.parallel_workers = 0;
1020  pathnode->path.pathkeys = pathkeys;
1021 
1022  pathnode->indexinfo = index;
1023  pathnode->indexclauses = indexclauses;
1024  pathnode->indexorderbys = indexorderbys;
1025  pathnode->indexorderbycols = indexorderbycols;
1026  pathnode->indexscandir = indexscandir;
1027 
1028  cost_index(pathnode, root, loop_count, partial_path);
1029 
1030  return pathnode;
1031 }
1032 
1033 /*
1034  * create_bitmap_heap_path
1035  * Creates a path node for a bitmap scan.
1036  *
1037  * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1038  * 'required_outer' is the set of outer relids for a parameterized path.
1039  * 'loop_count' is the number of repetitions of the indexscan to factor into
1040  * estimates of caching behavior.
1041  *
1042  * loop_count should match the value used when creating the component
1043  * IndexPaths.
1044  */
1047  RelOptInfo *rel,
1048  Path *bitmapqual,
1049  Relids required_outer,
1050  double loop_count,
1051  int parallel_degree)
1052 {
1053  BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1054 
1055  pathnode->path.pathtype = T_BitmapHeapScan;
1056  pathnode->path.parent = rel;
1057  pathnode->path.pathtarget = rel->reltarget;
1058  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1059  required_outer);
1060  pathnode->path.parallel_aware = parallel_degree > 0 ? true : false;
1061  pathnode->path.parallel_safe = rel->consider_parallel;
1062  pathnode->path.parallel_workers = parallel_degree;
1063  pathnode->path.pathkeys = NIL; /* always unordered */
1064 
1065  pathnode->bitmapqual = bitmapqual;
1066 
1067  cost_bitmap_heap_scan(&pathnode->path, root, rel,
1068  pathnode->path.param_info,
1069  bitmapqual, loop_count);
1070 
1071  return pathnode;
1072 }
1073 
1074 /*
1075  * create_bitmap_and_path
1076  * Creates a path node representing a BitmapAnd.
1077  */
1078 BitmapAndPath *
1080  RelOptInfo *rel,
1081  List *bitmapquals)
1082 {
1083  BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1084 
1085  pathnode->path.pathtype = T_BitmapAnd;
1086  pathnode->path.parent = rel;
1087  pathnode->path.pathtarget = rel->reltarget;
1088  pathnode->path.param_info = NULL; /* not used in bitmap trees */
1089 
1090  /*
1091  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1092  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1093  * set the flag for this path based only on the relation-level flag,
1094  * without actually iterating over the list of children.
1095  */
1096  pathnode->path.parallel_aware = false;
1097  pathnode->path.parallel_safe = rel->consider_parallel;
1098  pathnode->path.parallel_workers = 0;
1099 
1100  pathnode->path.pathkeys = NIL; /* always unordered */
1101 
1102  pathnode->bitmapquals = bitmapquals;
1103 
1104  /* this sets bitmapselectivity as well as the regular cost fields: */
1105  cost_bitmap_and_node(pathnode, root);
1106 
1107  return pathnode;
1108 }
1109 
1110 /*
1111  * create_bitmap_or_path
1112  * Creates a path node representing a BitmapOr.
1113  */
1114 BitmapOrPath *
1116  RelOptInfo *rel,
1117  List *bitmapquals)
1118 {
1119  BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1120 
1121  pathnode->path.pathtype = T_BitmapOr;
1122  pathnode->path.parent = rel;
1123  pathnode->path.pathtarget = rel->reltarget;
1124  pathnode->path.param_info = NULL; /* not used in bitmap trees */
1125 
1126  /*
1127  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1128  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1129  * set the flag for this path based only on the relation-level flag,
1130  * without actually iterating over the list of children.
1131  */
1132  pathnode->path.parallel_aware = false;
1133  pathnode->path.parallel_safe = rel->consider_parallel;
1134  pathnode->path.parallel_workers = 0;
1135 
1136  pathnode->path.pathkeys = NIL; /* always unordered */
1137 
1138  pathnode->bitmapquals = bitmapquals;
1139 
1140  /* this sets bitmapselectivity as well as the regular cost fields: */
1141  cost_bitmap_or_node(pathnode, root);
1142 
1143  return pathnode;
1144 }
1145 
1146 /*
1147  * create_tidscan_path
1148  * Creates a path corresponding to a scan by TID, returning the pathnode.
1149  */
1150 TidPath *
1152  Relids required_outer)
1153 {
1154  TidPath *pathnode = makeNode(TidPath);
1155 
1156  pathnode->path.pathtype = T_TidScan;
1157  pathnode->path.parent = rel;
1158  pathnode->path.pathtarget = rel->reltarget;
1159  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1160  required_outer);
1161  pathnode->path.parallel_aware = false;
1162  pathnode->path.parallel_safe = rel->consider_parallel;
1163  pathnode->path.parallel_workers = 0;
1164  pathnode->path.pathkeys = NIL; /* always unordered */
1165 
1166  pathnode->tidquals = tidquals;
1167 
1168  cost_tidscan(&pathnode->path, root, rel, tidquals,
1169  pathnode->path.param_info);
1170 
1171  return pathnode;
1172 }
1173 
1174 /*
1175  * create_append_path
1176  * Creates a path corresponding to an Append plan, returning the
1177  * pathnode.
1178  *
1179  * Note that we must handle subpaths = NIL, representing a dummy access path.
1180  * Also, there are callers that pass root = NULL.
1181  */
1182 AppendPath *
1184  RelOptInfo *rel,
1185  List *subpaths, List *partial_subpaths,
1186  List *pathkeys, Relids required_outer,
1187  int parallel_workers, bool parallel_aware,
1188  List *partitioned_rels, double rows)
1189 {
1190  AppendPath *pathnode = makeNode(AppendPath);
1191  ListCell *l;
1192 
1193  Assert(!parallel_aware || parallel_workers > 0);
1194 
1195  pathnode->path.pathtype = T_Append;
1196  pathnode->path.parent = rel;
1197  pathnode->path.pathtarget = rel->reltarget;
1198 
1199  /*
1200  * When generating an Append path for a partitioned table, there may be
1201  * parameters that are useful so we can eliminate certain partitions
1202  * during execution. Here we'll go all the way and fully populate the
1203  * parameter info data as we do for normal base relations. However, we
1204  * need only bother doing this for RELOPT_BASEREL rels, as
1205  * RELOPT_OTHER_MEMBER_REL's Append paths are merged into the base rel's
1206  * Append subpaths. It would do no harm to do this, we just avoid it to
1207  * save wasting effort.
1208  */
1209  if (partitioned_rels != NIL && root && rel->reloptkind == RELOPT_BASEREL)
1210  pathnode->path.param_info = get_baserel_parampathinfo(root,
1211  rel,
1212  required_outer);
1213  else
1215  required_outer);
1216 
1217  pathnode->path.parallel_aware = parallel_aware;
1218  pathnode->path.parallel_safe = rel->consider_parallel;
1219  pathnode->path.parallel_workers = parallel_workers;
1220  pathnode->path.pathkeys = pathkeys;
1221  pathnode->partitioned_rels = list_copy(partitioned_rels);
1222 
1223  /*
1224  * For parallel append, non-partial paths are sorted by descending total
1225  * costs. That way, the total time to finish all non-partial paths is
1226  * minimized. Also, the partial paths are sorted by descending startup
1227  * costs. There may be some paths that require to do startup work by a
1228  * single worker. In such case, it's better for workers to choose the
1229  * expensive ones first, whereas the leader should choose the cheapest
1230  * startup plan.
1231  */
1232  if (pathnode->path.parallel_aware)
1233  {
1234  /*
1235  * We mustn't fiddle with the order of subpaths when the Append has
1236  * pathkeys. The order they're listed in is critical to keeping the
1237  * pathkeys valid.
1238  */
1239  Assert(pathkeys == NIL);
1240 
1242  list_sort(partial_subpaths, append_startup_cost_compare);
1243  }
1244  pathnode->first_partial_path = list_length(subpaths);
1245  pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1246 
1247  /*
1248  * Apply query-wide LIMIT if known and path is for sole base relation.
1249  * (Handling this at this low level is a bit klugy.)
1250  */
1251  if (root != NULL && bms_equal(rel->relids, root->all_baserels))
1252  pathnode->limit_tuples = root->limit_tuples;
1253  else
1254  pathnode->limit_tuples = -1.0;
1255 
1256  foreach(l, pathnode->subpaths)
1257  {
1258  Path *subpath = (Path *) lfirst(l);
1259 
1260  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1261  subpath->parallel_safe;
1262 
1263  /* All child paths must have same parameterization */
1264  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1265  }
1266 
1267  Assert(!parallel_aware || pathnode->path.parallel_safe);
1268 
1269  /*
1270  * If there's exactly one child path, the Append is a no-op and will be
1271  * discarded later (in setrefs.c); therefore, we can inherit the child's
1272  * size and cost, as well as its pathkeys if any (overriding whatever the
1273  * caller might've said). Otherwise, we must do the normal costsize
1274  * calculation.
1275  */
1276  if (list_length(pathnode->subpaths) == 1)
1277  {
1278  Path *child = (Path *) linitial(pathnode->subpaths);
1279 
1280  pathnode->path.rows = child->rows;
1281  pathnode->path.startup_cost = child->startup_cost;
1282  pathnode->path.total_cost = child->total_cost;
1283  pathnode->path.pathkeys = child->pathkeys;
1284  }
1285  else
1286  cost_append(pathnode);
1287 
1288  /* If the caller provided a row estimate, override the computed value. */
1289  if (rows >= 0)
1290  pathnode->path.rows = rows;
1291 
1292  return pathnode;
1293 }
1294 
1295 /*
1296  * append_total_cost_compare
1297  * list_sort comparator for sorting append child paths
1298  * by total_cost descending
1299  *
1300  * For equal total costs, we fall back to comparing startup costs; if those
1301  * are equal too, break ties using bms_compare on the paths' relids.
1302  * (This is to avoid getting unpredictable results from list_sort.)
1303  */
1304 static int
1306 {
1307  Path *path1 = (Path *) lfirst(a);
1308  Path *path2 = (Path *) lfirst(b);
1309  int cmp;
1310 
1311  cmp = compare_path_costs(path1, path2, TOTAL_COST);
1312  if (cmp != 0)
1313  return -cmp;
1314  return bms_compare(path1->parent->relids, path2->parent->relids);
1315 }
1316 
1317 /*
1318  * append_startup_cost_compare
1319  * list_sort comparator for sorting append child paths
1320  * by startup_cost descending
1321  *
1322  * For equal startup costs, we fall back to comparing total costs; if those
1323  * are equal too, break ties using bms_compare on the paths' relids.
1324  * (This is to avoid getting unpredictable results from list_sort.)
1325  */
1326 static int
1328 {
1329  Path *path1 = (Path *) lfirst(a);
1330  Path *path2 = (Path *) lfirst(b);
1331  int cmp;
1332 
1333  cmp = compare_path_costs(path1, path2, STARTUP_COST);
1334  if (cmp != 0)
1335  return -cmp;
1336  return bms_compare(path1->parent->relids, path2->parent->relids);
1337 }
1338 
1339 /*
1340  * create_merge_append_path
1341  * Creates a path corresponding to a MergeAppend plan, returning the
1342  * pathnode.
1343  */
1346  RelOptInfo *rel,
1347  List *subpaths,
1348  List *pathkeys,
1349  Relids required_outer,
1350  List *partitioned_rels)
1351 {
1353  Cost input_startup_cost;
1354  Cost input_total_cost;
1355  ListCell *l;
1356 
1357  pathnode->path.pathtype = T_MergeAppend;
1358  pathnode->path.parent = rel;
1359  pathnode->path.pathtarget = rel->reltarget;
1361  required_outer);
1362  pathnode->path.parallel_aware = false;
1363  pathnode->path.parallel_safe = rel->consider_parallel;
1364  pathnode->path.parallel_workers = 0;
1365  pathnode->path.pathkeys = pathkeys;
1366  pathnode->partitioned_rels = list_copy(partitioned_rels);
1367  pathnode->subpaths = subpaths;
1368 
1369  /*
1370  * Apply query-wide LIMIT if known and path is for sole base relation.
1371  * (Handling this at this low level is a bit klugy.)
1372  */
1373  if (bms_equal(rel->relids, root->all_baserels))
1374  pathnode->limit_tuples = root->limit_tuples;
1375  else
1376  pathnode->limit_tuples = -1.0;
1377 
1378  /*
1379  * Add up the sizes and costs of the input paths.
1380  */
1381  pathnode->path.rows = 0;
1382  input_startup_cost = 0;
1383  input_total_cost = 0;
1384  foreach(l, subpaths)
1385  {
1386  Path *subpath = (Path *) lfirst(l);
1387 
1388  pathnode->path.rows += subpath->rows;
1389  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1390  subpath->parallel_safe;
1391 
1392  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1393  {
1394  /* Subpath is adequately ordered, we won't need to sort it */
1395  input_startup_cost += subpath->startup_cost;
1396  input_total_cost += subpath->total_cost;
1397  }
1398  else
1399  {
1400  /* We'll need to insert a Sort node, so include cost for that */
1401  Path sort_path; /* dummy for result of cost_sort */
1402 
1403  cost_sort(&sort_path,
1404  root,
1405  pathkeys,
1406  subpath->total_cost,
1407  subpath->parent->tuples,
1408  subpath->pathtarget->width,
1409  0.0,
1410  work_mem,
1411  pathnode->limit_tuples);
1412  input_startup_cost += sort_path.startup_cost;
1413  input_total_cost += sort_path.total_cost;
1414  }
1415 
1416  /* All child paths must have same parameterization */
1417  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1418  }
1419 
1420  /*
1421  * Now we can compute total costs of the MergeAppend. If there's exactly
1422  * one child path, the MergeAppend is a no-op and will be discarded later
1423  * (in setrefs.c); otherwise we do the normal cost calculation.
1424  */
1425  if (list_length(subpaths) == 1)
1426  {
1427  pathnode->path.startup_cost = input_startup_cost;
1428  pathnode->path.total_cost = input_total_cost;
1429  }
1430  else
1431  cost_merge_append(&pathnode->path, root,
1432  pathkeys, list_length(subpaths),
1433  input_startup_cost, input_total_cost,
1434  pathnode->path.rows);
1435 
1436  return pathnode;
1437 }
1438 
1439 /*
1440  * create_group_result_path
1441  * Creates a path representing a Result-and-nothing-else plan.
1442  *
1443  * This is only used for degenerate grouping cases, in which we know we
1444  * need to produce one result row, possibly filtered by a HAVING qual.
1445  */
1448  PathTarget *target, List *havingqual)
1449 {
1451 
1452  pathnode->path.pathtype = T_Result;
1453  pathnode->path.parent = rel;
1454  pathnode->path.pathtarget = target;
1455  pathnode->path.param_info = NULL; /* there are no other rels... */
1456  pathnode->path.parallel_aware = false;
1457  pathnode->path.parallel_safe = rel->consider_parallel;
1458  pathnode->path.parallel_workers = 0;
1459  pathnode->path.pathkeys = NIL;
1460  pathnode->quals = havingqual;
1461 
1462  /*
1463  * We can't quite use cost_resultscan() because the quals we want to
1464  * account for are not baserestrict quals of the rel. Might as well just
1465  * hack it here.
1466  */
1467  pathnode->path.rows = 1;
1468  pathnode->path.startup_cost = target->cost.startup;
1469  pathnode->path.total_cost = target->cost.startup +
1470  cpu_tuple_cost + target->cost.per_tuple;
1471 
1472  /*
1473  * Add cost of qual, if any --- but we ignore its selectivity, since our
1474  * rowcount estimate should be 1 no matter what the qual is.
1475  */
1476  if (havingqual)
1477  {
1478  QualCost qual_cost;
1479 
1480  cost_qual_eval(&qual_cost, havingqual, root);
1481  /* havingqual is evaluated once at startup */
1482  pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1483  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1484  }
1485 
1486  return pathnode;
1487 }
1488 
1489 /*
1490  * create_material_path
1491  * Creates a path corresponding to a Material plan, returning the
1492  * pathnode.
1493  */
1494 MaterialPath *
1496 {
1497  MaterialPath *pathnode = makeNode(MaterialPath);
1498 
1499  Assert(subpath->parent == rel);
1500 
1501  pathnode->path.pathtype = T_Material;
1502  pathnode->path.parent = rel;
1503  pathnode->path.pathtarget = rel->reltarget;
1504  pathnode->path.param_info = subpath->param_info;
1505  pathnode->path.parallel_aware = false;
1506  pathnode->path.parallel_safe = rel->consider_parallel &&
1507  subpath->parallel_safe;
1508  pathnode->path.parallel_workers = subpath->parallel_workers;
1509  pathnode->path.pathkeys = subpath->pathkeys;
1510 
1511  pathnode->subpath = subpath;
1512 
1513  cost_material(&pathnode->path,
1514  subpath->startup_cost,
1515  subpath->total_cost,
1516  subpath->rows,
1517  subpath->pathtarget->width);
1518 
1519  return pathnode;
1520 }
1521 
1522 /*
1523  * create_unique_path
1524  * Creates a path representing elimination of distinct rows from the
1525  * input data. Distinct-ness is defined according to the needs of the
1526  * semijoin represented by sjinfo. If it is not possible to identify
1527  * how to make the data unique, NULL is returned.
1528  *
1529  * If used at all, this is likely to be called repeatedly on the same rel;
1530  * and the input subpath should always be the same (the cheapest_total path
1531  * for the rel). So we cache the result.
1532  */
1533 UniquePath *
1535  SpecialJoinInfo *sjinfo)
1536 {
1537  UniquePath *pathnode;
1538  Path sort_path; /* dummy for result of cost_sort */
1539  Path agg_path; /* dummy for result of cost_agg */
1540  MemoryContext oldcontext;
1541  int numCols;
1542 
1543  /* Caller made a mistake if subpath isn't cheapest_total ... */
1544  Assert(subpath == rel->cheapest_total_path);
1545  Assert(subpath->parent == rel);
1546  /* ... or if SpecialJoinInfo is the wrong one */
1547  Assert(sjinfo->jointype == JOIN_SEMI);
1548  Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1549 
1550  /* If result already cached, return it */
1551  if (rel->cheapest_unique_path)
1552  return (UniquePath *) rel->cheapest_unique_path;
1553 
1554  /* If it's not possible to unique-ify, return NULL */
1555  if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1556  return NULL;
1557 
1558  /*
1559  * When called during GEQO join planning, we are in a short-lived memory
1560  * context. We must make sure that the path and any subsidiary data
1561  * structures created for a baserel survive the GEQO cycle, else the
1562  * baserel is trashed for future GEQO cycles. On the other hand, when we
1563  * are creating those for a joinrel during GEQO, we don't want them to
1564  * clutter the main planning context. Upshot is that the best solution is
1565  * to explicitly allocate memory in the same context the given RelOptInfo
1566  * is in.
1567  */
1568  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1569 
1570  pathnode = makeNode(UniquePath);
1571 
1572  pathnode->path.pathtype = T_Unique;
1573  pathnode->path.parent = rel;
1574  pathnode->path.pathtarget = rel->reltarget;
1575  pathnode->path.param_info = subpath->param_info;
1576  pathnode->path.parallel_aware = false;
1577  pathnode->path.parallel_safe = rel->consider_parallel &&
1578  subpath->parallel_safe;
1579  pathnode->path.parallel_workers = subpath->parallel_workers;
1580 
1581  /*
1582  * Assume the output is unsorted, since we don't necessarily have pathkeys
1583  * to represent it. (This might get overridden below.)
1584  */
1585  pathnode->path.pathkeys = NIL;
1586 
1587  pathnode->subpath = subpath;
1588  pathnode->in_operators = sjinfo->semi_operators;
1589  pathnode->uniq_exprs = sjinfo->semi_rhs_exprs;
1590 
1591  /*
1592  * If the input is a relation and it has a unique index that proves the
1593  * semi_rhs_exprs are unique, then we don't need to do anything. Note
1594  * that relation_has_unique_index_for automatically considers restriction
1595  * clauses for the rel, as well.
1596  */
1597  if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1599  sjinfo->semi_rhs_exprs,
1600  sjinfo->semi_operators))
1601  {
1602  pathnode->umethod = UNIQUE_PATH_NOOP;
1603  pathnode->path.rows = rel->rows;
1604  pathnode->path.startup_cost = subpath->startup_cost;
1605  pathnode->path.total_cost = subpath->total_cost;
1606  pathnode->path.pathkeys = subpath->pathkeys;
1607 
1608  rel->cheapest_unique_path = (Path *) pathnode;
1609 
1610  MemoryContextSwitchTo(oldcontext);
1611 
1612  return pathnode;
1613  }
1614 
1615  /*
1616  * If the input is a subquery whose output must be unique already, then we
1617  * don't need to do anything. The test for uniqueness has to consider
1618  * exactly which columns we are extracting; for example "SELECT DISTINCT
1619  * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1620  * this optimization unless semi_rhs_exprs consists only of simple Vars
1621  * referencing subquery outputs. (Possibly we could do something with
1622  * expressions in the subquery outputs, too, but for now keep it simple.)
1623  */
1624  if (rel->rtekind == RTE_SUBQUERY)
1625  {
1626  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1627 
1629  {
1630  List *sub_tlist_colnos;
1631 
1632  sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1633  rel->relid);
1634 
1635  if (sub_tlist_colnos &&
1637  sub_tlist_colnos,
1638  sjinfo->semi_operators))
1639  {
1640  pathnode->umethod = UNIQUE_PATH_NOOP;
1641  pathnode->path.rows = rel->rows;
1642  pathnode->path.startup_cost = subpath->startup_cost;
1643  pathnode->path.total_cost = subpath->total_cost;
1644  pathnode->path.pathkeys = subpath->pathkeys;
1645 
1646  rel->cheapest_unique_path = (Path *) pathnode;
1647 
1648  MemoryContextSwitchTo(oldcontext);
1649 
1650  return pathnode;
1651  }
1652  }
1653  }
1654 
1655  /* Estimate number of output rows */
1656  pathnode->path.rows = estimate_num_groups(root,
1657  sjinfo->semi_rhs_exprs,
1658  rel->rows,
1659  NULL);
1660  numCols = list_length(sjinfo->semi_rhs_exprs);
1661 
1662  if (sjinfo->semi_can_btree)
1663  {
1664  /*
1665  * Estimate cost for sort+unique implementation
1666  */
1667  cost_sort(&sort_path, root, NIL,
1668  subpath->total_cost,
1669  rel->rows,
1670  subpath->pathtarget->width,
1671  0.0,
1672  work_mem,
1673  -1.0);
1674 
1675  /*
1676  * Charge one cpu_operator_cost per comparison per input tuple. We
1677  * assume all columns get compared at most of the tuples. (XXX
1678  * probably this is an overestimate.) This should agree with
1679  * create_upper_unique_path.
1680  */
1681  sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1682  }
1683 
1684  if (sjinfo->semi_can_hash)
1685  {
1686  /*
1687  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1688  * planner.c).
1689  */
1690  int hashentrysize = subpath->pathtarget->width + 64;
1691 
1692  if (hashentrysize * pathnode->path.rows > work_mem * 1024L)
1693  {
1694  /*
1695  * We should not try to hash. Hack the SpecialJoinInfo to
1696  * remember this, in case we come through here again.
1697  */
1698  sjinfo->semi_can_hash = false;
1699  }
1700  else
1701  cost_agg(&agg_path, root,
1702  AGG_HASHED, NULL,
1703  numCols, pathnode->path.rows,
1704  NIL,
1705  subpath->startup_cost,
1706  subpath->total_cost,
1707  rel->rows);
1708  }
1709 
1710  if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1711  {
1712  if (agg_path.total_cost < sort_path.total_cost)
1713  pathnode->umethod = UNIQUE_PATH_HASH;
1714  else
1715  pathnode->umethod = UNIQUE_PATH_SORT;
1716  }
1717  else if (sjinfo->semi_can_btree)
1718  pathnode->umethod = UNIQUE_PATH_SORT;
1719  else if (sjinfo->semi_can_hash)
1720  pathnode->umethod = UNIQUE_PATH_HASH;
1721  else
1722  {
1723  /* we can get here only if we abandoned hashing above */
1724  MemoryContextSwitchTo(oldcontext);
1725  return NULL;
1726  }
1727 
1728  if (pathnode->umethod == UNIQUE_PATH_HASH)
1729  {
1730  pathnode->path.startup_cost = agg_path.startup_cost;
1731  pathnode->path.total_cost = agg_path.total_cost;
1732  }
1733  else
1734  {
1735  pathnode->path.startup_cost = sort_path.startup_cost;
1736  pathnode->path.total_cost = sort_path.total_cost;
1737  }
1738 
1739  rel->cheapest_unique_path = (Path *) pathnode;
1740 
1741  MemoryContextSwitchTo(oldcontext);
1742 
1743  return pathnode;
1744 }
1745 
1746 /*
1747  * create_gather_merge_path
1748  *
1749  * Creates a path corresponding to a gather merge scan, returning
1750  * the pathnode.
1751  */
1754  PathTarget *target, List *pathkeys,
1755  Relids required_outer, double *rows)
1756 {
1758  Cost input_startup_cost = 0;
1759  Cost input_total_cost = 0;
1760 
1761  Assert(subpath->parallel_safe);
1762  Assert(pathkeys);
1763 
1764  pathnode->path.pathtype = T_GatherMerge;
1765  pathnode->path.parent = rel;
1766  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1767  required_outer);
1768  pathnode->path.parallel_aware = false;
1769 
1770  pathnode->subpath = subpath;
1771  pathnode->num_workers = subpath->parallel_workers;
1772  pathnode->path.pathkeys = pathkeys;
1773  pathnode->path.pathtarget = target ? target : rel->reltarget;
1774  pathnode->path.rows += subpath->rows;
1775 
1776  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1777  {
1778  /* Subpath is adequately ordered, we won't need to sort it */
1779  input_startup_cost += subpath->startup_cost;
1780  input_total_cost += subpath->total_cost;
1781  }
1782  else
1783  {
1784  /* We'll need to insert a Sort node, so include cost for that */
1785  Path sort_path; /* dummy for result of cost_sort */
1786 
1787  cost_sort(&sort_path,
1788  root,
1789  pathkeys,
1790  subpath->total_cost,
1791  subpath->rows,
1792  subpath->pathtarget->width,
1793  0.0,
1794  work_mem,
1795  -1);
1796  input_startup_cost += sort_path.startup_cost;
1797  input_total_cost += sort_path.total_cost;
1798  }
1799 
1800  cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1801  input_startup_cost, input_total_cost, rows);
1802 
1803  return pathnode;
1804 }
1805 
1806 /*
1807  * translate_sub_tlist - get subquery column numbers represented by tlist
1808  *
1809  * The given targetlist usually contains only Vars referencing the given relid.
1810  * Extract their varattnos (ie, the column numbers of the subquery) and return
1811  * as an integer List.
1812  *
1813  * If any of the tlist items is not a simple Var, we cannot determine whether
1814  * the subquery's uniqueness condition (if any) matches ours, so punt and
1815  * return NIL.
1816  */
1817 static List *
1818 translate_sub_tlist(List *tlist, int relid)
1819 {
1820  List *result = NIL;
1821  ListCell *l;
1822 
1823  foreach(l, tlist)
1824  {
1825  Var *var = (Var *) lfirst(l);
1826 
1827  if (!var || !IsA(var, Var) ||
1828  var->varno != relid)
1829  return NIL; /* punt */
1830 
1831  result = lappend_int(result, var->varattno);
1832  }
1833  return result;
1834 }
1835 
1836 /*
1837  * create_gather_path
1838  * Creates a path corresponding to a gather scan, returning the
1839  * pathnode.
1840  *
1841  * 'rows' may optionally be set to override row estimates from other sources.
1842  */
1843 GatherPath *
1845  PathTarget *target, Relids required_outer, double *rows)
1846 {
1847  GatherPath *pathnode = makeNode(GatherPath);
1848 
1849  Assert(subpath->parallel_safe);
1850 
1851  pathnode->path.pathtype = T_Gather;
1852  pathnode->path.parent = rel;
1853  pathnode->path.pathtarget = target;
1854  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1855  required_outer);
1856  pathnode->path.parallel_aware = false;
1857  pathnode->path.parallel_safe = false;
1858  pathnode->path.parallel_workers = 0;
1859  pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1860 
1861  pathnode->subpath = subpath;
1862  pathnode->num_workers = subpath->parallel_workers;
1863  pathnode->single_copy = false;
1864 
1865  if (pathnode->num_workers == 0)
1866  {
1867  pathnode->path.pathkeys = subpath->pathkeys;
1868  pathnode->num_workers = 1;
1869  pathnode->single_copy = true;
1870  }
1871 
1872  cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1873 
1874  return pathnode;
1875 }
1876 
1877 /*
1878  * create_subqueryscan_path
1879  * Creates a path corresponding to a scan of a subquery,
1880  * returning the pathnode.
1881  */
1884  List *pathkeys, Relids required_outer)
1885 {
1887 
1888  pathnode->path.pathtype = T_SubqueryScan;
1889  pathnode->path.parent = rel;
1890  pathnode->path.pathtarget = rel->reltarget;
1891  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1892  required_outer);
1893  pathnode->path.parallel_aware = false;
1894  pathnode->path.parallel_safe = rel->consider_parallel &&
1895  subpath->parallel_safe;
1896  pathnode->path.parallel_workers = subpath->parallel_workers;
1897  pathnode->path.pathkeys = pathkeys;
1898  pathnode->subpath = subpath;
1899 
1900  cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info);
1901 
1902  return pathnode;
1903 }
1904 
1905 /*
1906  * create_functionscan_path
1907  * Creates a path corresponding to a sequential scan of a function,
1908  * returning the pathnode.
1909  */
1910 Path *
1912  List *pathkeys, Relids required_outer)
1913 {
1914  Path *pathnode = makeNode(Path);
1915 
1916  pathnode->pathtype = T_FunctionScan;
1917  pathnode->parent = rel;
1918  pathnode->pathtarget = rel->reltarget;
1919  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1920  required_outer);
1921  pathnode->parallel_aware = false;
1922  pathnode->parallel_safe = rel->consider_parallel;
1923  pathnode->parallel_workers = 0;
1924  pathnode->pathkeys = pathkeys;
1925 
1926  cost_functionscan(pathnode, root, rel, pathnode->param_info);
1927 
1928  return pathnode;
1929 }
1930 
1931 /*
1932  * create_tablefuncscan_path
1933  * Creates a path corresponding to a sequential scan of a table function,
1934  * returning the pathnode.
1935  */
1936 Path *
1938  Relids required_outer)
1939 {
1940  Path *pathnode = makeNode(Path);
1941 
1942  pathnode->pathtype = T_TableFuncScan;
1943  pathnode->parent = rel;
1944  pathnode->pathtarget = rel->reltarget;
1945  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1946  required_outer);
1947  pathnode->parallel_aware = false;
1948  pathnode->parallel_safe = rel->consider_parallel;
1949  pathnode->parallel_workers = 0;
1950  pathnode->pathkeys = NIL; /* result is always unordered */
1951 
1952  cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
1953 
1954  return pathnode;
1955 }
1956 
1957 /*
1958  * create_valuesscan_path
1959  * Creates a path corresponding to a scan of a VALUES list,
1960  * returning the pathnode.
1961  */
1962 Path *
1964  Relids required_outer)
1965 {
1966  Path *pathnode = makeNode(Path);
1967 
1968  pathnode->pathtype = T_ValuesScan;
1969  pathnode->parent = rel;
1970  pathnode->pathtarget = rel->reltarget;
1971  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1972  required_outer);
1973  pathnode->parallel_aware = false;
1974  pathnode->parallel_safe = rel->consider_parallel;
1975  pathnode->parallel_workers = 0;
1976  pathnode->pathkeys = NIL; /* result is always unordered */
1977 
1978  cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1979 
1980  return pathnode;
1981 }
1982 
1983 /*
1984  * create_ctescan_path
1985  * Creates a path corresponding to a scan of a non-self-reference CTE,
1986  * returning the pathnode.
1987  */
1988 Path *
1989 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
1990 {
1991  Path *pathnode = makeNode(Path);
1992 
1993  pathnode->pathtype = T_CteScan;
1994  pathnode->parent = rel;
1995  pathnode->pathtarget = rel->reltarget;
1996  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1997  required_outer);
1998  pathnode->parallel_aware = false;
1999  pathnode->parallel_safe = rel->consider_parallel;
2000  pathnode->parallel_workers = 0;
2001  pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
2002 
2003  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2004 
2005  return pathnode;
2006 }
2007 
2008 /*
2009  * create_namedtuplestorescan_path
2010  * Creates a path corresponding to a scan of a named tuplestore, returning
2011  * the pathnode.
2012  */
2013 Path *
2015  Relids required_outer)
2016 {
2017  Path *pathnode = makeNode(Path);
2018 
2019  pathnode->pathtype = T_NamedTuplestoreScan;
2020  pathnode->parent = rel;
2021  pathnode->pathtarget = rel->reltarget;
2022  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2023  required_outer);
2024  pathnode->parallel_aware = false;
2025  pathnode->parallel_safe = rel->consider_parallel;
2026  pathnode->parallel_workers = 0;
2027  pathnode->pathkeys = NIL; /* result is always unordered */
2028 
2029  cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2030 
2031  return pathnode;
2032 }
2033 
2034 /*
2035  * create_resultscan_path
2036  * Creates a path corresponding to a scan of an RTE_RESULT relation,
2037  * returning the pathnode.
2038  */
2039 Path *
2041  Relids required_outer)
2042 {
2043  Path *pathnode = makeNode(Path);
2044 
2045  pathnode->pathtype = T_Result;
2046  pathnode->parent = rel;
2047  pathnode->pathtarget = rel->reltarget;
2048  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2049  required_outer);
2050  pathnode->parallel_aware = false;
2051  pathnode->parallel_safe = rel->consider_parallel;
2052  pathnode->parallel_workers = 0;
2053  pathnode->pathkeys = NIL; /* result is always unordered */
2054 
2055  cost_resultscan(pathnode, root, rel, pathnode->param_info);
2056 
2057  return pathnode;
2058 }
2059 
2060 /*
2061  * create_worktablescan_path
2062  * Creates a path corresponding to a scan of a self-reference CTE,
2063  * returning the pathnode.
2064  */
2065 Path *
2067  Relids required_outer)
2068 {
2069  Path *pathnode = makeNode(Path);
2070 
2071  pathnode->pathtype = T_WorkTableScan;
2072  pathnode->parent = rel;
2073  pathnode->pathtarget = rel->reltarget;
2074  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2075  required_outer);
2076  pathnode->parallel_aware = false;
2077  pathnode->parallel_safe = rel->consider_parallel;
2078  pathnode->parallel_workers = 0;
2079  pathnode->pathkeys = NIL; /* result is always unordered */
2080 
2081  /* Cost is the same as for a regular CTE scan */
2082  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2083 
2084  return pathnode;
2085 }
2086 
2087 /*
2088  * create_foreignscan_path
2089  * Creates a path corresponding to a scan of a foreign base table,
2090  * returning the pathnode.
2091  *
2092  * This function is never called from core Postgres; rather, it's expected
2093  * to be called by the GetForeignPaths function of a foreign data wrapper.
2094  * We make the FDW supply all fields of the path, since we do not have any way
2095  * to calculate them in core. However, there is a usually-sane default for
2096  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2097  */
2098 ForeignPath *
2100  PathTarget *target,
2101  double rows, Cost startup_cost, Cost total_cost,
2102  List *pathkeys,
2103  Relids required_outer,
2104  Path *fdw_outerpath,
2105  List *fdw_private)
2106 {
2107  ForeignPath *pathnode = makeNode(ForeignPath);
2108 
2109  /* Historically some FDWs were confused about when to use this */
2110  Assert(IS_SIMPLE_REL(rel));
2111 
2112  pathnode->path.pathtype = T_ForeignScan;
2113  pathnode->path.parent = rel;
2114  pathnode->path.pathtarget = target ? target : rel->reltarget;
2115  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2116  required_outer);
2117  pathnode->path.parallel_aware = false;
2118  pathnode->path.parallel_safe = rel->consider_parallel;
2119  pathnode->path.parallel_workers = 0;
2120  pathnode->path.rows = rows;
2121  pathnode->path.startup_cost = startup_cost;
2122  pathnode->path.total_cost = total_cost;
2123  pathnode->path.pathkeys = pathkeys;
2124 
2125  pathnode->fdw_outerpath = fdw_outerpath;
2126  pathnode->fdw_private = fdw_private;
2127 
2128  return pathnode;
2129 }
2130 
2131 /*
2132  * create_foreign_join_path
2133  * Creates a path corresponding to a scan of a foreign join,
2134  * returning the pathnode.
2135  *
2136  * This function is never called from core Postgres; rather, it's expected
2137  * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2138  * We make the FDW supply all fields of the path, since we do not have any way
2139  * to calculate them in core. However, there is a usually-sane default for
2140  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2141  */
2142 ForeignPath *
2144  PathTarget *target,
2145  double rows, Cost startup_cost, Cost total_cost,
2146  List *pathkeys,
2147  Relids required_outer,
2148  Path *fdw_outerpath,
2149  List *fdw_private)
2150 {
2151  ForeignPath *pathnode = makeNode(ForeignPath);
2152 
2153  /*
2154  * We should use get_joinrel_parampathinfo to handle parameterized paths,
2155  * but the API of this function doesn't support it, and existing
2156  * extensions aren't yet trying to build such paths anyway. For the
2157  * moment just throw an error if someone tries it; eventually we should
2158  * revisit this.
2159  */
2160  if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2161  elog(ERROR, "parameterized foreign joins are not supported yet");
2162 
2163  pathnode->path.pathtype = T_ForeignScan;
2164  pathnode->path.parent = rel;
2165  pathnode->path.pathtarget = target ? target : rel->reltarget;
2166  pathnode->path.param_info = NULL; /* XXX see above */
2167  pathnode->path.parallel_aware = false;
2168  pathnode->path.parallel_safe = rel->consider_parallel;
2169  pathnode->path.parallel_workers = 0;
2170  pathnode->path.rows = rows;
2171  pathnode->path.startup_cost = startup_cost;
2172  pathnode->path.total_cost = total_cost;
2173  pathnode->path.pathkeys = pathkeys;
2174 
2175  pathnode->fdw_outerpath = fdw_outerpath;
2176  pathnode->fdw_private = fdw_private;
2177 
2178  return pathnode;
2179 }
2180 
2181 /*
2182  * create_foreign_upper_path
2183  * Creates a path corresponding to an upper relation that's computed
2184  * directly by an FDW, returning the pathnode.
2185  *
2186  * This function is never called from core Postgres; rather, it's expected to
2187  * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2188  * We make the FDW supply all fields of the path, since we do not have any way
2189  * to calculate them in core. However, there is a usually-sane default for
2190  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2191  */
2192 ForeignPath *
2194  PathTarget *target,
2195  double rows, Cost startup_cost, Cost total_cost,
2196  List *pathkeys,
2197  Path *fdw_outerpath,
2198  List *fdw_private)
2199 {
2200  ForeignPath *pathnode = makeNode(ForeignPath);
2201 
2202  /*
2203  * Upper relations should never have any lateral references, since joining
2204  * is complete.
2205  */
2207 
2208  pathnode->path.pathtype = T_ForeignScan;
2209  pathnode->path.parent = rel;
2210  pathnode->path.pathtarget = target ? target : rel->reltarget;
2211  pathnode->path.param_info = NULL;
2212  pathnode->path.parallel_aware = false;
2213  pathnode->path.parallel_safe = rel->consider_parallel;
2214  pathnode->path.parallel_workers = 0;
2215  pathnode->path.rows = rows;
2216  pathnode->path.startup_cost = startup_cost;
2217  pathnode->path.total_cost = total_cost;
2218  pathnode->path.pathkeys = pathkeys;
2219 
2220  pathnode->fdw_outerpath = fdw_outerpath;
2221  pathnode->fdw_private = fdw_private;
2222 
2223  return pathnode;
2224 }
2225 
2226 /*
2227  * calc_nestloop_required_outer
2228  * Compute the required_outer set for a nestloop join path
2229  *
2230  * Note: result must not share storage with either input
2231  */
2232 Relids
2234  Relids outer_paramrels,
2235  Relids innerrelids,
2236  Relids inner_paramrels)
2237 {
2238  Relids required_outer;
2239 
2240  /* inner_path can require rels from outer path, but not vice versa */
2241  Assert(!bms_overlap(outer_paramrels, innerrelids));
2242  /* easy case if inner path is not parameterized */
2243  if (!inner_paramrels)
2244  return bms_copy(outer_paramrels);
2245  /* else, form the union ... */
2246  required_outer = bms_union(outer_paramrels, inner_paramrels);
2247  /* ... and remove any mention of now-satisfied outer rels */
2248  required_outer = bms_del_members(required_outer,
2249  outerrelids);
2250  /* maintain invariant that required_outer is exactly NULL if empty */
2251  if (bms_is_empty(required_outer))
2252  {
2253  bms_free(required_outer);
2254  required_outer = NULL;
2255  }
2256  return required_outer;
2257 }
2258 
2259 /*
2260  * calc_non_nestloop_required_outer
2261  * Compute the required_outer set for a merge or hash join path
2262  *
2263  * Note: result must not share storage with either input
2264  */
2265 Relids
2266 calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2267 {
2268  Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2269  Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2270  Relids required_outer;
2271 
2272  /* neither path can require rels from the other */
2273  Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
2274  Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
2275  /* form the union ... */
2276  required_outer = bms_union(outer_paramrels, inner_paramrels);
2277  /* we do not need an explicit test for empty; bms_union gets it right */
2278  return required_outer;
2279 }
2280 
2281 /*
2282  * create_nestloop_path
2283  * Creates a pathnode corresponding to a nestloop join between two
2284  * relations.
2285  *
2286  * 'joinrel' is the join relation.
2287  * 'jointype' is the type of join required
2288  * 'workspace' is the result from initial_cost_nestloop
2289  * 'extra' contains various information about the join
2290  * 'outer_path' is the outer path
2291  * 'inner_path' is the inner path
2292  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2293  * 'pathkeys' are the path keys of the new join path
2294  * 'required_outer' is the set of required outer rels
2295  *
2296  * Returns the resulting path node.
2297  */
2298 NestPath *
2300  RelOptInfo *joinrel,
2301  JoinType jointype,
2302  JoinCostWorkspace *workspace,
2303  JoinPathExtraData *extra,
2304  Path *outer_path,
2305  Path *inner_path,
2306  List *restrict_clauses,
2307  List *pathkeys,
2308  Relids required_outer)
2309 {
2310  NestPath *pathnode = makeNode(NestPath);
2311  Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2312 
2313  /*
2314  * If the inner path is parameterized by the outer, we must drop any
2315  * restrict_clauses that are due to be moved into the inner path. We have
2316  * to do this now, rather than postpone the work till createplan time,
2317  * because the restrict_clauses list can affect the size and cost
2318  * estimates for this path.
2319  */
2320  if (bms_overlap(inner_req_outer, outer_path->parent->relids))
2321  {
2322  Relids inner_and_outer = bms_union(inner_path->parent->relids,
2323  inner_req_outer);
2324  List *jclauses = NIL;
2325  ListCell *lc;
2326 
2327  foreach(lc, restrict_clauses)
2328  {
2329  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2330 
2331  if (!join_clause_is_movable_into(rinfo,
2332  inner_path->parent->relids,
2333  inner_and_outer))
2334  jclauses = lappend(jclauses, rinfo);
2335  }
2336  restrict_clauses = jclauses;
2337  }
2338 
2339  pathnode->path.pathtype = T_NestLoop;
2340  pathnode->path.parent = joinrel;
2341  pathnode->path.pathtarget = joinrel->reltarget;
2342  pathnode->path.param_info =
2344  joinrel,
2345  outer_path,
2346  inner_path,
2347  extra->sjinfo,
2348  required_outer,
2349  &restrict_clauses);
2350  pathnode->path.parallel_aware = false;
2351  pathnode->path.parallel_safe = joinrel->consider_parallel &&
2352  outer_path->parallel_safe && inner_path->parallel_safe;
2353  /* This is a foolish way to estimate parallel_workers, but for now... */
2354  pathnode->path.parallel_workers = outer_path->parallel_workers;
2355  pathnode->path.pathkeys = pathkeys;
2356  pathnode->jointype = jointype;
2357  pathnode->inner_unique = extra->inner_unique;
2358  pathnode->outerjoinpath = outer_path;
2359  pathnode->innerjoinpath = inner_path;
2360  pathnode->joinrestrictinfo = restrict_clauses;
2361 
2362  final_cost_nestloop(root, pathnode, workspace, extra);
2363 
2364  return pathnode;
2365 }
2366 
2367 /*
2368  * create_mergejoin_path
2369  * Creates a pathnode corresponding to a mergejoin join between
2370  * two relations
2371  *
2372  * 'joinrel' is the join relation
2373  * 'jointype' is the type of join required
2374  * 'workspace' is the result from initial_cost_mergejoin
2375  * 'extra' contains various information about the join
2376  * 'outer_path' is the outer path
2377  * 'inner_path' is the inner path
2378  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2379  * 'pathkeys' are the path keys of the new join path
2380  * 'required_outer' is the set of required outer rels
2381  * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2382  * (this should be a subset of the restrict_clauses list)
2383  * 'outersortkeys' are the sort varkeys for the outer relation
2384  * 'innersortkeys' are the sort varkeys for the inner relation
2385  */
2386 MergePath *
2388  RelOptInfo *joinrel,
2389  JoinType jointype,
2390  JoinCostWorkspace *workspace,
2391  JoinPathExtraData *extra,
2392  Path *outer_path,
2393  Path *inner_path,
2394  List *restrict_clauses,
2395  List *pathkeys,
2396  Relids required_outer,
2397  List *mergeclauses,
2398  List *outersortkeys,
2399  List *innersortkeys)
2400 {
2401  MergePath *pathnode = makeNode(MergePath);
2402 
2403  pathnode->jpath.path.pathtype = T_MergeJoin;
2404  pathnode->jpath.path.parent = joinrel;
2405  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2406  pathnode->jpath.path.param_info =
2408  joinrel,
2409  outer_path,
2410  inner_path,
2411  extra->sjinfo,
2412  required_outer,
2413  &restrict_clauses);
2414  pathnode->jpath.path.parallel_aware = false;
2415  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2416  outer_path->parallel_safe && inner_path->parallel_safe;
2417  /* This is a foolish way to estimate parallel_workers, but for now... */
2418  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2419  pathnode->jpath.path.pathkeys = pathkeys;
2420  pathnode->jpath.jointype = jointype;
2421  pathnode->jpath.inner_unique = extra->inner_unique;
2422  pathnode->jpath.outerjoinpath = outer_path;
2423  pathnode->jpath.innerjoinpath = inner_path;
2424  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2425  pathnode->path_mergeclauses = mergeclauses;
2426  pathnode->outersortkeys = outersortkeys;
2427  pathnode->innersortkeys = innersortkeys;
2428  /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2429  /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2430 
2431  final_cost_mergejoin(root, pathnode, workspace, extra);
2432 
2433  return pathnode;
2434 }
2435 
2436 /*
2437  * create_hashjoin_path
2438  * Creates a pathnode corresponding to a hash join between two relations.
2439  *
2440  * 'joinrel' is the join relation
2441  * 'jointype' is the type of join required
2442  * 'workspace' is the result from initial_cost_hashjoin
2443  * 'extra' contains various information about the join
2444  * 'outer_path' is the cheapest outer path
2445  * 'inner_path' is the cheapest inner path
2446  * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2447  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2448  * 'required_outer' is the set of required outer rels
2449  * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2450  * (this should be a subset of the restrict_clauses list)
2451  */
2452 HashPath *
2454  RelOptInfo *joinrel,
2455  JoinType jointype,
2456  JoinCostWorkspace *workspace,
2457  JoinPathExtraData *extra,
2458  Path *outer_path,
2459  Path *inner_path,
2460  bool parallel_hash,
2461  List *restrict_clauses,
2462  Relids required_outer,
2463  List *hashclauses)
2464 {
2465  HashPath *pathnode = makeNode(HashPath);
2466 
2467  pathnode->jpath.path.pathtype = T_HashJoin;
2468  pathnode->jpath.path.parent = joinrel;
2469  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2470  pathnode->jpath.path.param_info =
2472  joinrel,
2473  outer_path,
2474  inner_path,
2475  extra->sjinfo,
2476  required_outer,
2477  &restrict_clauses);
2478  pathnode->jpath.path.parallel_aware =
2479  joinrel->consider_parallel && parallel_hash;
2480  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2481  outer_path->parallel_safe && inner_path->parallel_safe;
2482  /* This is a foolish way to estimate parallel_workers, but for now... */
2483  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2484 
2485  /*
2486  * A hashjoin never has pathkeys, since its output ordering is
2487  * unpredictable due to possible batching. XXX If the inner relation is
2488  * small enough, we could instruct the executor that it must not batch,
2489  * and then we could assume that the output inherits the outer relation's
2490  * ordering, which might save a sort step. However there is considerable
2491  * downside if our estimate of the inner relation size is badly off. For
2492  * the moment we don't risk it. (Note also that if we wanted to take this
2493  * seriously, joinpath.c would have to consider many more paths for the
2494  * outer rel than it does now.)
2495  */
2496  pathnode->jpath.path.pathkeys = NIL;
2497  pathnode->jpath.jointype = jointype;
2498  pathnode->jpath.inner_unique = extra->inner_unique;
2499  pathnode->jpath.outerjoinpath = outer_path;
2500  pathnode->jpath.innerjoinpath = inner_path;
2501  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2502  pathnode->path_hashclauses = hashclauses;
2503  /* final_cost_hashjoin will fill in pathnode->num_batches */
2504 
2505  final_cost_hashjoin(root, pathnode, workspace, extra);
2506 
2507  return pathnode;
2508 }
2509 
2510 /*
2511  * create_projection_path
2512  * Creates a pathnode that represents performing a projection.
2513  *
2514  * 'rel' is the parent relation associated with the result
2515  * 'subpath' is the path representing the source of data
2516  * 'target' is the PathTarget to be computed
2517  */
2520  RelOptInfo *rel,
2521  Path *subpath,
2522  PathTarget *target)
2523 {
2524  ProjectionPath *pathnode = makeNode(ProjectionPath);
2525  PathTarget *oldtarget = subpath->pathtarget;
2526 
2527  pathnode->path.pathtype = T_Result;
2528  pathnode->path.parent = rel;
2529  pathnode->path.pathtarget = target;
2530  /* For now, assume we are above any joins, so no parameterization */
2531  pathnode->path.param_info = NULL;
2532  pathnode->path.parallel_aware = false;
2533  pathnode->path.parallel_safe = rel->consider_parallel &&
2534  subpath->parallel_safe &&
2535  is_parallel_safe(root, (Node *) target->exprs);
2536  pathnode->path.parallel_workers = subpath->parallel_workers;
2537  /* Projection does not change the sort order */
2538  pathnode->path.pathkeys = subpath->pathkeys;
2539 
2540  pathnode->subpath = subpath;
2541 
2542  /*
2543  * We might not need a separate Result node. If the input plan node type
2544  * can project, we can just tell it to project something else. Or, if it
2545  * can't project but the desired target has the same expression list as
2546  * what the input will produce anyway, we can still give it the desired
2547  * tlist (possibly changing its ressortgroupref labels, but nothing else).
2548  * Note: in the latter case, create_projection_plan has to recheck our
2549  * conclusion; see comments therein.
2550  */
2551  if (is_projection_capable_path(subpath) ||
2552  equal(oldtarget->exprs, target->exprs))
2553  {
2554  /* No separate Result node needed */
2555  pathnode->dummypp = true;
2556 
2557  /*
2558  * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2559  */
2560  pathnode->path.rows = subpath->rows;
2561  pathnode->path.startup_cost = subpath->startup_cost +
2562  (target->cost.startup - oldtarget->cost.startup);
2563  pathnode->path.total_cost = subpath->total_cost +
2564  (target->cost.startup - oldtarget->cost.startup) +
2565  (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2566  }
2567  else
2568  {
2569  /* We really do need the Result node */
2570  pathnode->dummypp = false;
2571 
2572  /*
2573  * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2574  * evaluating the tlist. There is no qual to worry about.
2575  */
2576  pathnode->path.rows = subpath->rows;
2577  pathnode->path.startup_cost = subpath->startup_cost +
2578  target->cost.startup;
2579  pathnode->path.total_cost = subpath->total_cost +
2580  target->cost.startup +
2581  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2582  }
2583 
2584  return pathnode;
2585 }
2586 
2587 /*
2588  * apply_projection_to_path
2589  * Add a projection step, or just apply the target directly to given path.
2590  *
2591  * This has the same net effect as create_projection_path(), except that if
2592  * a separate Result plan node isn't needed, we just replace the given path's
2593  * pathtarget with the desired one. This must be used only when the caller
2594  * knows that the given path isn't referenced elsewhere and so can be modified
2595  * in-place.
2596  *
2597  * If the input path is a GatherPath or GatherMergePath, we try to push the
2598  * new target down to its input as well; this is a yet more invasive
2599  * modification of the input path, which create_projection_path() can't do.
2600  *
2601  * Note that we mustn't change the source path's parent link; so when it is
2602  * add_path'd to "rel" things will be a bit inconsistent. So far that has
2603  * not caused any trouble.
2604  *
2605  * 'rel' is the parent relation associated with the result
2606  * 'path' is the path representing the source of data
2607  * 'target' is the PathTarget to be computed
2608  */
2609 Path *
2611  RelOptInfo *rel,
2612  Path *path,
2613  PathTarget *target)
2614 {
2615  QualCost oldcost;
2616 
2617  /*
2618  * If given path can't project, we might need a Result node, so make a
2619  * separate ProjectionPath.
2620  */
2621  if (!is_projection_capable_path(path))
2622  return (Path *) create_projection_path(root, rel, path, target);
2623 
2624  /*
2625  * We can just jam the desired tlist into the existing path, being sure to
2626  * update its cost estimates appropriately.
2627  */
2628  oldcost = path->pathtarget->cost;
2629  path->pathtarget = target;
2630 
2631  path->startup_cost += target->cost.startup - oldcost.startup;
2632  path->total_cost += target->cost.startup - oldcost.startup +
2633  (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2634 
2635  /*
2636  * If the path happens to be a Gather or GatherMerge path, we'd like to
2637  * arrange for the subpath to return the required target list so that
2638  * workers can help project. But if there is something that is not
2639  * parallel-safe in the target expressions, then we can't.
2640  */
2641  if ((IsA(path, GatherPath) ||IsA(path, GatherMergePath)) &&
2642  is_parallel_safe(root, (Node *) target->exprs))
2643  {
2644  /*
2645  * We always use create_projection_path here, even if the subpath is
2646  * projection-capable, so as to avoid modifying the subpath in place.
2647  * It seems unlikely at present that there could be any other
2648  * references to the subpath, but better safe than sorry.
2649  *
2650  * Note that we don't change the parallel path's cost estimates; it
2651  * might be appropriate to do so, to reflect the fact that the bulk of
2652  * the target evaluation will happen in workers.
2653  */
2654  if (IsA(path, GatherPath))
2655  {
2656  GatherPath *gpath = (GatherPath *) path;
2657 
2658  gpath->subpath = (Path *)
2660  gpath->subpath->parent,
2661  gpath->subpath,
2662  target);
2663  }
2664  else
2665  {
2666  GatherMergePath *gmpath = (GatherMergePath *) path;
2667 
2668  gmpath->subpath = (Path *)
2670  gmpath->subpath->parent,
2671  gmpath->subpath,
2672  target);
2673  }
2674  }
2675  else if (path->parallel_safe &&
2676  !is_parallel_safe(root, (Node *) target->exprs))
2677  {
2678  /*
2679  * We're inserting a parallel-restricted target list into a path
2680  * currently marked parallel-safe, so we have to mark it as no longer
2681  * safe.
2682  */
2683  path->parallel_safe = false;
2684  }
2685 
2686  return path;
2687 }
2688 
2689 /*
2690  * create_set_projection_path
2691  * Creates a pathnode that represents performing a projection that
2692  * includes set-returning functions.
2693  *
2694  * 'rel' is the parent relation associated with the result
2695  * 'subpath' is the path representing the source of data
2696  * 'target' is the PathTarget to be computed
2697  */
2700  RelOptInfo *rel,
2701  Path *subpath,
2702  PathTarget *target)
2703 {
2704  ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2705  double tlist_rows;
2706  ListCell *lc;
2707 
2708  pathnode->path.pathtype = T_ProjectSet;
2709  pathnode->path.parent = rel;
2710  pathnode->path.pathtarget = target;
2711  /* For now, assume we are above any joins, so no parameterization */
2712  pathnode->path.param_info = NULL;
2713  pathnode->path.parallel_aware = false;
2714  pathnode->path.parallel_safe = rel->consider_parallel &&
2715  subpath->parallel_safe &&
2716  is_parallel_safe(root, (Node *) target->exprs);
2717  pathnode->path.parallel_workers = subpath->parallel_workers;
2718  /* Projection does not change the sort order XXX? */
2719  pathnode->path.pathkeys = subpath->pathkeys;
2720 
2721  pathnode->subpath = subpath;
2722 
2723  /*
2724  * Estimate number of rows produced by SRFs for each row of input; if
2725  * there's more than one in this node, use the maximum.
2726  */
2727  tlist_rows = 1;
2728  foreach(lc, target->exprs)
2729  {
2730  Node *node = (Node *) lfirst(lc);
2731  double itemrows;
2732 
2733  itemrows = expression_returns_set_rows(root, node);
2734  if (tlist_rows < itemrows)
2735  tlist_rows = itemrows;
2736  }
2737 
2738  /*
2739  * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2740  * per input row, and half of cpu_tuple_cost for each added output row.
2741  * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2742  * this estimate later.
2743  */
2744  pathnode->path.rows = subpath->rows * tlist_rows;
2745  pathnode->path.startup_cost = subpath->startup_cost +
2746  target->cost.startup;
2747  pathnode->path.total_cost = subpath->total_cost +
2748  target->cost.startup +
2749  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2750  (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2751 
2752  return pathnode;
2753 }
2754 
2755 /*
2756  * create_sort_path
2757  * Creates a pathnode that represents performing an explicit sort.
2758  *
2759  * 'rel' is the parent relation associated with the result
2760  * 'subpath' is the path representing the source of data
2761  * 'pathkeys' represents the desired sort order
2762  * 'limit_tuples' is the estimated bound on the number of output tuples,
2763  * or -1 if no LIMIT or couldn't estimate
2764  */
2765 SortPath *
2767  RelOptInfo *rel,
2768  Path *subpath,
2769  List *pathkeys,
2770  double limit_tuples)
2771 {
2772  SortPath *pathnode = makeNode(SortPath);
2773 
2774  pathnode->path.pathtype = T_Sort;
2775  pathnode->path.parent = rel;
2776  /* Sort doesn't project, so use source path's pathtarget */
2777  pathnode->path.pathtarget = subpath->pathtarget;
2778  /* For now, assume we are above any joins, so no parameterization */
2779  pathnode->path.param_info = NULL;
2780  pathnode->path.parallel_aware = false;
2781  pathnode->path.parallel_safe = rel->consider_parallel &&
2782  subpath->parallel_safe;
2783  pathnode->path.parallel_workers = subpath->parallel_workers;
2784  pathnode->path.pathkeys = pathkeys;
2785 
2786  pathnode->subpath = subpath;
2787 
2788  cost_sort(&pathnode->path, root, pathkeys,
2789  subpath->total_cost,
2790  subpath->rows,
2791  subpath->pathtarget->width,
2792  0.0, /* XXX comparison_cost shouldn't be 0? */
2793  work_mem, limit_tuples);
2794 
2795  return pathnode;
2796 }
2797 
2798 /*
2799  * create_group_path
2800  * Creates a pathnode that represents performing grouping of presorted input
2801  *
2802  * 'rel' is the parent relation associated with the result
2803  * 'subpath' is the path representing the source of data
2804  * 'target' is the PathTarget to be computed
2805  * 'groupClause' is a list of SortGroupClause's representing the grouping
2806  * 'qual' is the HAVING quals if any
2807  * 'numGroups' is the estimated number of groups
2808  */
2809 GroupPath *
2811  RelOptInfo *rel,
2812  Path *subpath,
2813  List *groupClause,
2814  List *qual,
2815  double numGroups)
2816 {
2817  GroupPath *pathnode = makeNode(GroupPath);
2818  PathTarget *target = rel->reltarget;
2819 
2820  pathnode->path.pathtype = T_Group;
2821  pathnode->path.parent = rel;
2822  pathnode->path.pathtarget = target;
2823  /* For now, assume we are above any joins, so no parameterization */
2824  pathnode->path.param_info = NULL;
2825  pathnode->path.parallel_aware = false;
2826  pathnode->path.parallel_safe = rel->consider_parallel &&
2827  subpath->parallel_safe;
2828  pathnode->path.parallel_workers = subpath->parallel_workers;
2829  /* Group doesn't change sort ordering */
2830  pathnode->path.pathkeys = subpath->pathkeys;
2831 
2832  pathnode->subpath = subpath;
2833 
2834  pathnode->groupClause = groupClause;
2835  pathnode->qual = qual;
2836 
2837  cost_group(&pathnode->path, root,
2838  list_length(groupClause),
2839  numGroups,
2840  qual,
2841  subpath->startup_cost, subpath->total_cost,
2842  subpath->rows);
2843 
2844  /* add tlist eval cost for each output row */
2845  pathnode->path.startup_cost += target->cost.startup;
2846  pathnode->path.total_cost += target->cost.startup +
2847  target->cost.per_tuple * pathnode->path.rows;
2848 
2849  return pathnode;
2850 }
2851 
2852 /*
2853  * create_upper_unique_path
2854  * Creates a pathnode that represents performing an explicit Unique step
2855  * on presorted input.
2856  *
2857  * This produces a Unique plan node, but the use-case is so different from
2858  * create_unique_path that it doesn't seem worth trying to merge the two.
2859  *
2860  * 'rel' is the parent relation associated with the result
2861  * 'subpath' is the path representing the source of data
2862  * 'numCols' is the number of grouping columns
2863  * 'numGroups' is the estimated number of groups
2864  *
2865  * The input path must be sorted on the grouping columns, plus possibly
2866  * additional columns; so the first numCols pathkeys are the grouping columns
2867  */
2870  RelOptInfo *rel,
2871  Path *subpath,
2872  int numCols,
2873  double numGroups)
2874 {
2876 
2877  pathnode->path.pathtype = T_Unique;
2878  pathnode->path.parent = rel;
2879  /* Unique doesn't project, so use source path's pathtarget */
2880  pathnode->path.pathtarget = subpath->pathtarget;
2881  /* For now, assume we are above any joins, so no parameterization */
2882  pathnode->path.param_info = NULL;
2883  pathnode->path.parallel_aware = false;
2884  pathnode->path.parallel_safe = rel->consider_parallel &&
2885  subpath->parallel_safe;
2886  pathnode->path.parallel_workers = subpath->parallel_workers;
2887  /* Unique doesn't change the input ordering */
2888  pathnode->path.pathkeys = subpath->pathkeys;
2889 
2890  pathnode->subpath = subpath;
2891  pathnode->numkeys = numCols;
2892 
2893  /*
2894  * Charge one cpu_operator_cost per comparison per input tuple. We assume
2895  * all columns get compared at most of the tuples. (XXX probably this is
2896  * an overestimate.)
2897  */
2898  pathnode->path.startup_cost = subpath->startup_cost;
2899  pathnode->path.total_cost = subpath->total_cost +
2900  cpu_operator_cost * subpath->rows * numCols;
2901  pathnode->path.rows = numGroups;
2902 
2903  return pathnode;
2904 }
2905 
2906 /*
2907  * create_agg_path
2908  * Creates a pathnode that represents performing aggregation/grouping
2909  *
2910  * 'rel' is the parent relation associated with the result
2911  * 'subpath' is the path representing the source of data
2912  * 'target' is the PathTarget to be computed
2913  * 'aggstrategy' is the Agg node's basic implementation strategy
2914  * 'aggsplit' is the Agg node's aggregate-splitting mode
2915  * 'groupClause' is a list of SortGroupClause's representing the grouping
2916  * 'qual' is the HAVING quals if any
2917  * 'aggcosts' contains cost info about the aggregate functions to be computed
2918  * 'numGroups' is the estimated number of groups (1 if not grouping)
2919  */
2920 AggPath *
2922  RelOptInfo *rel,
2923  Path *subpath,
2924  PathTarget *target,
2925  AggStrategy aggstrategy,
2926  AggSplit aggsplit,
2927  List *groupClause,
2928  List *qual,
2929  const AggClauseCosts *aggcosts,
2930  double numGroups)
2931 {
2932  AggPath *pathnode = makeNode(AggPath);
2933 
2934  pathnode->path.pathtype = T_Agg;
2935  pathnode->path.parent = rel;
2936  pathnode->path.pathtarget = target;
2937  /* For now, assume we are above any joins, so no parameterization */
2938  pathnode->path.param_info = NULL;
2939  pathnode->path.parallel_aware = false;
2940  pathnode->path.parallel_safe = rel->consider_parallel &&
2941  subpath->parallel_safe;
2942  pathnode->path.parallel_workers = subpath->parallel_workers;
2943  if (aggstrategy == AGG_SORTED)
2944  pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
2945  else
2946  pathnode->path.pathkeys = NIL; /* output is unordered */
2947  pathnode->subpath = subpath;
2948 
2949  pathnode->aggstrategy = aggstrategy;
2950  pathnode->aggsplit = aggsplit;
2951  pathnode->numGroups = numGroups;
2952  pathnode->groupClause = groupClause;
2953  pathnode->qual = qual;
2954 
2955  cost_agg(&pathnode->path, root,
2956  aggstrategy, aggcosts,
2957  list_length(groupClause), numGroups,
2958  qual,
2959  subpath->startup_cost, subpath->total_cost,
2960  subpath->rows);
2961 
2962  /* add tlist eval cost for each output row */
2963  pathnode->path.startup_cost += target->cost.startup;
2964  pathnode->path.total_cost += target->cost.startup +
2965  target->cost.per_tuple * pathnode->path.rows;
2966 
2967  return pathnode;
2968 }
2969 
2970 /*
2971  * create_groupingsets_path
2972  * Creates a pathnode that represents performing GROUPING SETS aggregation
2973  *
2974  * GroupingSetsPath represents sorted grouping with one or more grouping sets.
2975  * The input path's result must be sorted to match the last entry in
2976  * rollup_groupclauses.
2977  *
2978  * 'rel' is the parent relation associated with the result
2979  * 'subpath' is the path representing the source of data
2980  * 'target' is the PathTarget to be computed
2981  * 'having_qual' is the HAVING quals if any
2982  * 'rollups' is a list of RollupData nodes
2983  * 'agg_costs' contains cost info about the aggregate functions to be computed
2984  * 'numGroups' is the estimated total number of groups
2985  */
2988  RelOptInfo *rel,
2989  Path *subpath,
2990  List *having_qual,
2991  AggStrategy aggstrategy,
2992  List *rollups,
2993  const AggClauseCosts *agg_costs,
2994  double numGroups)
2995 {
2997  PathTarget *target = rel->reltarget;
2998  ListCell *lc;
2999  bool is_first = true;
3000  bool is_first_sort = true;
3001 
3002  /* The topmost generated Plan node will be an Agg */
3003  pathnode->path.pathtype = T_Agg;
3004  pathnode->path.parent = rel;
3005  pathnode->path.pathtarget = target;
3006  pathnode->path.param_info = subpath->param_info;
3007  pathnode->path.parallel_aware = false;
3008  pathnode->path.parallel_safe = rel->consider_parallel &&
3009  subpath->parallel_safe;
3010  pathnode->path.parallel_workers = subpath->parallel_workers;
3011  pathnode->subpath = subpath;
3012 
3013  /*
3014  * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3015  * to AGG_HASHED, here if possible.
3016  */
3017  if (aggstrategy == AGG_SORTED &&
3018  list_length(rollups) == 1 &&
3019  ((RollupData *) linitial(rollups))->groupClause == NIL)
3020  aggstrategy = AGG_PLAIN;
3021 
3022  if (aggstrategy == AGG_MIXED &&
3023  list_length(rollups) == 1)
3024  aggstrategy = AGG_HASHED;
3025 
3026  /*
3027  * Output will be in sorted order by group_pathkeys if, and only if, there
3028  * is a single rollup operation on a non-empty list of grouping
3029  * expressions.
3030  */
3031  if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3032  pathnode->path.pathkeys = root->group_pathkeys;
3033  else
3034  pathnode->path.pathkeys = NIL;
3035 
3036  pathnode->aggstrategy = aggstrategy;
3037  pathnode->rollups = rollups;
3038  pathnode->qual = having_qual;
3039 
3040  Assert(rollups != NIL);
3041  Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3042  Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3043 
3044  foreach(lc, rollups)
3045  {
3046  RollupData *rollup = lfirst(lc);
3047  List *gsets = rollup->gsets;
3048  int numGroupCols = list_length(linitial(gsets));
3049 
3050  /*
3051  * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3052  * (already-sorted) input, and following ones do their own sort.
3053  *
3054  * In AGG_HASHED mode, there is one rollup for each grouping set.
3055  *
3056  * In AGG_MIXED mode, the first rollups are hashed, the first
3057  * non-hashed one takes the (already-sorted) input, and following ones
3058  * do their own sort.
3059  */
3060  if (is_first)
3061  {
3062  cost_agg(&pathnode->path, root,
3063  aggstrategy,
3064  agg_costs,
3065  numGroupCols,
3066  rollup->numGroups,
3067  having_qual,
3068  subpath->startup_cost,
3069  subpath->total_cost,
3070  subpath->rows);
3071  is_first = false;
3072  if (!rollup->is_hashed)
3073  is_first_sort = false;
3074  }
3075  else
3076  {
3077  Path sort_path; /* dummy for result of cost_sort */
3078  Path agg_path; /* dummy for result of cost_agg */
3079 
3080  if (rollup->is_hashed || is_first_sort)
3081  {
3082  /*
3083  * Account for cost of aggregation, but don't charge input
3084  * cost again
3085  */
3086  cost_agg(&agg_path, root,
3087  rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3088  agg_costs,
3089  numGroupCols,
3090  rollup->numGroups,
3091  having_qual,
3092  0.0, 0.0,
3093  subpath->rows);
3094  if (!rollup->is_hashed)
3095  is_first_sort = false;
3096  }
3097  else
3098  {
3099  /* Account for cost of sort, but don't charge input cost again */
3100  cost_sort(&sort_path, root, NIL,
3101  0.0,
3102  subpath->rows,
3103  subpath->pathtarget->width,
3104  0.0,
3105  work_mem,
3106  -1.0);
3107 
3108  /* Account for cost of aggregation */
3109 
3110  cost_agg(&agg_path, root,
3111  AGG_SORTED,
3112  agg_costs,
3113  numGroupCols,
3114  rollup->numGroups,
3115  having_qual,
3116  sort_path.startup_cost,
3117  sort_path.total_cost,
3118  sort_path.rows);
3119  }
3120 
3121  pathnode->path.total_cost += agg_path.total_cost;
3122  pathnode->path.rows += agg_path.rows;
3123  }
3124  }
3125 
3126  /* add tlist eval cost for each output row */
3127  pathnode->path.startup_cost += target->cost.startup;
3128  pathnode->path.total_cost += target->cost.startup +
3129  target->cost.per_tuple * pathnode->path.rows;
3130 
3131  return pathnode;
3132 }
3133 
3134 /*
3135  * create_minmaxagg_path
3136  * Creates a pathnode that represents computation of MIN/MAX aggregates
3137  *
3138  * 'rel' is the parent relation associated with the result
3139  * 'target' is the PathTarget to be computed
3140  * 'mmaggregates' is a list of MinMaxAggInfo structs
3141  * 'quals' is the HAVING quals if any
3142  */
3143 MinMaxAggPath *
3145  RelOptInfo *rel,
3146  PathTarget *target,
3147  List *mmaggregates,
3148  List *quals)
3149 {
3150  MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3151  Cost initplan_cost;
3152  ListCell *lc;
3153 
3154  /* The topmost generated Plan node will be a Result */
3155  pathnode->path.pathtype = T_Result;
3156  pathnode->path.parent = rel;
3157  pathnode->path.pathtarget = target;
3158  /* For now, assume we are above any joins, so no parameterization */
3159  pathnode->path.param_info = NULL;
3160  pathnode->path.parallel_aware = false;
3161  /* A MinMaxAggPath implies use of subplans, so cannot be parallel-safe */
3162  pathnode->path.parallel_safe = false;
3163  pathnode->path.parallel_workers = 0;
3164  /* Result is one unordered row */
3165  pathnode->path.rows = 1;
3166  pathnode->path.pathkeys = NIL;
3167 
3168  pathnode->mmaggregates = mmaggregates;
3169  pathnode->quals = quals;
3170 
3171  /* Calculate cost of all the initplans ... */
3172  initplan_cost = 0;
3173  foreach(lc, mmaggregates)
3174  {
3175  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3176 
3177  initplan_cost += mminfo->pathcost;
3178  }
3179 
3180  /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3181  pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3182  pathnode->path.total_cost = initplan_cost + target->cost.startup +
3183  target->cost.per_tuple + cpu_tuple_cost;
3184 
3185  /*
3186  * Add cost of qual, if any --- but we ignore its selectivity, since our
3187  * rowcount estimate should be 1 no matter what the qual is.
3188  */
3189  if (quals)
3190  {
3191  QualCost qual_cost;
3192 
3193  cost_qual_eval(&qual_cost, quals, root);
3194  pathnode->path.startup_cost += qual_cost.startup;
3195  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3196  }
3197 
3198  return pathnode;
3199 }
3200 
3201 /*
3202  * create_windowagg_path
3203  * Creates a pathnode that represents computation of window functions
3204  *
3205  * 'rel' is the parent relation associated with the result
3206  * 'subpath' is the path representing the source of data
3207  * 'target' is the PathTarget to be computed
3208  * 'windowFuncs' is a list of WindowFunc structs
3209  * 'winclause' is a WindowClause that is common to all the WindowFuncs
3210  *
3211  * The input must be sorted according to the WindowClause's PARTITION keys
3212  * plus ORDER BY keys.
3213  */
3214 WindowAggPath *
3216  RelOptInfo *rel,
3217  Path *subpath,
3218  PathTarget *target,
3219  List *windowFuncs,
3220  WindowClause *winclause)
3221 {
3222  WindowAggPath *pathnode = makeNode(WindowAggPath);
3223 
3224  pathnode->path.pathtype = T_WindowAgg;
3225  pathnode->path.parent = rel;
3226  pathnode->path.pathtarget = target;
3227  /* For now, assume we are above any joins, so no parameterization */
3228  pathnode->path.param_info = NULL;
3229  pathnode->path.parallel_aware = false;
3230  pathnode->path.parallel_safe = rel->consider_parallel &&
3231  subpath->parallel_safe;
3232  pathnode->path.parallel_workers = subpath->parallel_workers;
3233  /* WindowAgg preserves the input sort order */
3234  pathnode->path.pathkeys = subpath->pathkeys;
3235 
3236  pathnode->subpath = subpath;
3237  pathnode->winclause = winclause;
3238 
3239  /*
3240  * For costing purposes, assume that there are no redundant partitioning
3241  * or ordering columns; it's not worth the trouble to deal with that
3242  * corner case here. So we just pass the unmodified list lengths to
3243  * cost_windowagg.
3244  */
3245  cost_windowagg(&pathnode->path, root,
3246  windowFuncs,
3247  list_length(winclause->partitionClause),
3248  list_length(winclause->orderClause),
3249  subpath->startup_cost,
3250  subpath->total_cost,
3251  subpath->rows);
3252 
3253  /* add tlist eval cost for each output row */
3254  pathnode->path.startup_cost += target->cost.startup;
3255  pathnode->path.total_cost += target->cost.startup +
3256  target->cost.per_tuple * pathnode->path.rows;
3257 
3258  return pathnode;
3259 }
3260 
3261 /*
3262  * create_setop_path
3263  * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3264  *
3265  * 'rel' is the parent relation associated with the result
3266  * 'subpath' is the path representing the source of data
3267  * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3268  * 'strategy' is the implementation strategy (sorted or hashed)
3269  * 'distinctList' is a list of SortGroupClause's representing the grouping
3270  * 'flagColIdx' is the column number where the flag column will be, if any
3271  * 'firstFlag' is the flag value for the first input relation when hashing;
3272  * or -1 when sorting
3273  * 'numGroups' is the estimated number of distinct groups
3274  * 'outputRows' is the estimated number of output rows
3275  */
3276 SetOpPath *
3278  RelOptInfo *rel,
3279  Path *subpath,
3280  SetOpCmd cmd,
3281  SetOpStrategy strategy,
3282  List *distinctList,
3283  AttrNumber flagColIdx,
3284  int firstFlag,
3285  double numGroups,
3286  double outputRows)
3287 {
3288  SetOpPath *pathnode = makeNode(SetOpPath);
3289 
3290  pathnode->path.pathtype = T_SetOp;
3291  pathnode->path.parent = rel;
3292  /* SetOp doesn't project, so use source path's pathtarget */
3293  pathnode->path.pathtarget = subpath->pathtarget;
3294  /* For now, assume we are above any joins, so no parameterization */
3295  pathnode->path.param_info = NULL;
3296  pathnode->path.parallel_aware = false;
3297  pathnode->path.parallel_safe = rel->consider_parallel &&
3298  subpath->parallel_safe;
3299  pathnode->path.parallel_workers = subpath->parallel_workers;
3300  /* SetOp preserves the input sort order if in sort mode */
3301  pathnode->path.pathkeys =
3302  (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3303 
3304  pathnode->subpath = subpath;
3305  pathnode->cmd = cmd;
3306  pathnode->strategy = strategy;
3307  pathnode->distinctList = distinctList;
3308  pathnode->flagColIdx = flagColIdx;
3309  pathnode->firstFlag = firstFlag;
3310  pathnode->numGroups = numGroups;
3311 
3312  /*
3313  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3314  * all columns get compared at most of the tuples.
3315  */
3316  pathnode->path.startup_cost = subpath->startup_cost;
3317  pathnode->path.total_cost = subpath->total_cost +
3318  cpu_operator_cost * subpath->rows * list_length(distinctList);
3319  pathnode->path.rows = outputRows;
3320 
3321  return pathnode;
3322 }
3323 
3324 /*
3325  * create_recursiveunion_path
3326  * Creates a pathnode that represents a recursive UNION node
3327  *
3328  * 'rel' is the parent relation associated with the result
3329  * 'leftpath' is the source of data for the non-recursive term
3330  * 'rightpath' is the source of data for the recursive term
3331  * 'target' is the PathTarget to be computed
3332  * 'distinctList' is a list of SortGroupClause's representing the grouping
3333  * 'wtParam' is the ID of Param representing work table
3334  * 'numGroups' is the estimated number of groups
3335  *
3336  * For recursive UNION ALL, distinctList is empty and numGroups is zero
3337  */
3340  RelOptInfo *rel,
3341  Path *leftpath,
3342  Path *rightpath,
3343  PathTarget *target,
3344  List *distinctList,
3345  int wtParam,
3346  double numGroups)
3347 {
3349 
3350  pathnode->path.pathtype = T_RecursiveUnion;
3351  pathnode->path.parent = rel;
3352  pathnode->path.pathtarget = target;
3353  /* For now, assume we are above any joins, so no parameterization */
3354  pathnode->path.param_info = NULL;
3355  pathnode->path.parallel_aware = false;
3356  pathnode->path.parallel_safe = rel->consider_parallel &&
3357  leftpath->parallel_safe && rightpath->parallel_safe;
3358  /* Foolish, but we'll do it like joins for now: */
3359  pathnode->path.parallel_workers = leftpath->parallel_workers;
3360  /* RecursiveUnion result is always unsorted */
3361  pathnode->path.pathkeys = NIL;
3362 
3363  pathnode->leftpath = leftpath;
3364  pathnode->rightpath = rightpath;
3365  pathnode->distinctList = distinctList;
3366  pathnode->wtParam = wtParam;
3367  pathnode->numGroups = numGroups;
3368 
3369  cost_recursive_union(&pathnode->path, leftpath, rightpath);
3370 
3371  return pathnode;
3372 }
3373 
3374 /*
3375  * create_lockrows_path
3376  * Creates a pathnode that represents acquiring row locks
3377  *
3378  * 'rel' is the parent relation associated with the result
3379  * 'subpath' is the path representing the source of data
3380  * 'rowMarks' is a list of PlanRowMark's
3381  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3382  */
3383 LockRowsPath *
3385  Path *subpath, List *rowMarks, int epqParam)
3386 {
3387  LockRowsPath *pathnode = makeNode(LockRowsPath);
3388 
3389  pathnode->path.pathtype = T_LockRows;
3390  pathnode->path.parent = rel;
3391  /* LockRows doesn't project, so use source path's pathtarget */
3392  pathnode->path.pathtarget = subpath->pathtarget;
3393  /* For now, assume we are above any joins, so no parameterization */
3394  pathnode->path.param_info = NULL;
3395  pathnode->path.parallel_aware = false;
3396  pathnode->path.parallel_safe = false;
3397  pathnode->path.parallel_workers = 0;
3398  pathnode->path.rows = subpath->rows;
3399 
3400  /*
3401  * The result cannot be assumed sorted, since locking might cause the sort
3402  * key columns to be replaced with new values.
3403  */
3404  pathnode->path.pathkeys = NIL;
3405 
3406  pathnode->subpath = subpath;
3407  pathnode->rowMarks = rowMarks;
3408  pathnode->epqParam = epqParam;
3409 
3410  /*
3411  * We should charge something extra for the costs of row locking and
3412  * possible refetches, but it's hard to say how much. For now, use
3413  * cpu_tuple_cost per row.
3414  */
3415  pathnode->path.startup_cost = subpath->startup_cost;
3416  pathnode->path.total_cost = subpath->total_cost +
3417  cpu_tuple_cost * subpath->rows;
3418 
3419  return pathnode;
3420 }
3421 
3422 /*
3423  * create_modifytable_path
3424  * Creates a pathnode that represents performing INSERT/UPDATE/DELETE mods
3425  *
3426  * 'rel' is the parent relation associated with the result
3427  * 'operation' is the operation type
3428  * 'canSetTag' is true if we set the command tag/es_processed
3429  * 'nominalRelation' is the parent RT index for use of EXPLAIN
3430  * 'rootRelation' is the partitioned table root RT index, or 0 if none
3431  * 'partColsUpdated' is true if any partitioning columns are being updated,
3432  * either from the target relation or a descendent partitioned table.
3433  * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3434  * 'subpaths' is a list of Path(s) producing source data (one per rel)
3435  * 'subroots' is a list of PlannerInfo structs (one per rel)
3436  * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3437  * 'returningLists' is a list of RETURNING tlists (one per rel)
3438  * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3439  * 'onconflict' is the ON CONFLICT clause, or NULL
3440  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3441  */
3444  CmdType operation, bool canSetTag,
3445  Index nominalRelation, Index rootRelation,
3446  bool partColsUpdated,
3447  List *resultRelations, List *subpaths,
3448  List *subroots,
3449  List *withCheckOptionLists, List *returningLists,
3450  List *rowMarks, OnConflictExpr *onconflict,
3451  int epqParam)
3452 {
3454  double total_size;
3455  ListCell *lc;
3456 
3457  Assert(list_length(resultRelations) == list_length(subpaths));
3458  Assert(list_length(resultRelations) == list_length(subroots));
3459  Assert(withCheckOptionLists == NIL ||
3460  list_length(resultRelations) == list_length(withCheckOptionLists));
3461  Assert(returningLists == NIL ||
3462  list_length(resultRelations) == list_length(returningLists));
3463 
3464  pathnode->path.pathtype = T_ModifyTable;
3465  pathnode->path.parent = rel;
3466  /* pathtarget is not interesting, just make it minimally valid */
3467  pathnode->path.pathtarget = rel->reltarget;
3468  /* For now, assume we are above any joins, so no parameterization */
3469  pathnode->path.param_info = NULL;
3470  pathnode->path.parallel_aware = false;
3471  pathnode->path.parallel_safe = false;
3472  pathnode->path.parallel_workers = 0;
3473  pathnode->path.pathkeys = NIL;
3474 
3475  /*
3476  * Compute cost & rowcount as sum of subpath costs & rowcounts.
3477  *
3478  * Currently, we don't charge anything extra for the actual table
3479  * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3480  * expressions if any. It would only be window dressing, since
3481  * ModifyTable is always a top-level node and there is no way for the
3482  * costs to change any higher-level planning choices. But we might want
3483  * to make it look better sometime.
3484  */
3485  pathnode->path.startup_cost = 0;
3486  pathnode->path.total_cost = 0;
3487  pathnode->path.rows = 0;
3488  total_size = 0;
3489  foreach(lc, subpaths)
3490  {
3491  Path *subpath = (Path *) lfirst(lc);
3492 
3493  if (lc == list_head(subpaths)) /* first node? */
3494  pathnode->path.startup_cost = subpath->startup_cost;
3495  pathnode->path.total_cost += subpath->total_cost;
3496  pathnode->path.rows += subpath->rows;
3497  total_size += subpath->pathtarget->width * subpath->rows;
3498  }
3499 
3500  /*
3501  * Set width to the average width of the subpath outputs. XXX this is
3502  * totally wrong: we should report zero if no RETURNING, else an average
3503  * of the RETURNING tlist widths. But it's what happened historically,
3504  * and improving it is a task for another day.
3505  */
3506  if (pathnode->path.rows > 0)
3507  total_size /= pathnode->path.rows;
3508  pathnode->path.pathtarget->width = rint(total_size);
3509 
3510  pathnode->operation = operation;
3511  pathnode->canSetTag = canSetTag;
3512  pathnode->nominalRelation = nominalRelation;
3513  pathnode->rootRelation = rootRelation;
3514  pathnode->partColsUpdated = partColsUpdated;
3515  pathnode->resultRelations = resultRelations;
3516  pathnode->subpaths = subpaths;
3517  pathnode->subroots = subroots;
3518  pathnode->withCheckOptionLists = withCheckOptionLists;
3519  pathnode->returningLists = returningLists;
3520  pathnode->rowMarks = rowMarks;
3521  pathnode->onconflict = onconflict;
3522  pathnode->epqParam = epqParam;
3523 
3524  return pathnode;
3525 }
3526 
3527 /*
3528  * create_limit_path
3529  * Creates a pathnode that represents performing LIMIT/OFFSET
3530  *
3531  * In addition to providing the actual OFFSET and LIMIT expressions,
3532  * the caller must provide estimates of their values for costing purposes.
3533  * The estimates are as computed by preprocess_limit(), ie, 0 represents
3534  * the clause not being present, and -1 means it's present but we could
3535  * not estimate its value.
3536  *
3537  * 'rel' is the parent relation associated with the result
3538  * 'subpath' is the path representing the source of data
3539  * 'limitOffset' is the actual OFFSET expression, or NULL
3540  * 'limitCount' is the actual LIMIT expression, or NULL
3541  * 'offset_est' is the estimated value of the OFFSET expression
3542  * 'count_est' is the estimated value of the LIMIT expression
3543  */
3544 LimitPath *
3546  Path *subpath,
3547  Node *limitOffset, Node *limitCount,
3548  int64 offset_est, int64 count_est)
3549 {
3550  LimitPath *pathnode = makeNode(LimitPath);
3551 
3552  pathnode->path.pathtype = T_Limit;
3553  pathnode->path.parent = rel;
3554  /* Limit doesn't project, so use source path's pathtarget */
3555  pathnode->path.pathtarget = subpath->pathtarget;
3556  /* For now, assume we are above any joins, so no parameterization */
3557  pathnode->path.param_info = NULL;
3558  pathnode->path.parallel_aware = false;
3559  pathnode->path.parallel_safe = rel->consider_parallel &&
3560  subpath->parallel_safe;
3561  pathnode->path.parallel_workers = subpath->parallel_workers;
3562  pathnode->path.rows = subpath->rows;
3563  pathnode->path.startup_cost = subpath->startup_cost;
3564  pathnode->path.total_cost = subpath->total_cost;
3565  pathnode->path.pathkeys = subpath->pathkeys;
3566  pathnode->subpath = subpath;
3567  pathnode->limitOffset = limitOffset;
3568  pathnode->limitCount = limitCount;
3569 
3570  /*
3571  * Adjust the output rows count and costs according to the offset/limit.
3572  */
3573  adjust_limit_rows_costs(&pathnode->path.rows,
3574  &pathnode->path.startup_cost,
3575  &pathnode->path.total_cost,
3576  offset_est, count_est);
3577 
3578  return pathnode;
3579 }
3580 
3581 /*
3582  * adjust_limit_rows_costs
3583  * Adjust the size and cost estimates for a LimitPath node according to the
3584  * offset/limit.
3585  *
3586  * This is only a cosmetic issue if we are at top level, but if we are
3587  * building a subquery then it's important to report correct info to the outer
3588  * planner.
3589  *
3590  * When the offset or count couldn't be estimated, use 10% of the estimated
3591  * number of rows emitted from the subpath.
3592  *
3593  * XXX we don't bother to add eval costs of the offset/limit expressions
3594  * themselves to the path costs. In theory we should, but in most cases those
3595  * expressions are trivial and it's just not worth the trouble.
3596  */
3597 void
3598 adjust_limit_rows_costs(double *rows, /* in/out parameter */
3599  Cost *startup_cost, /* in/out parameter */
3600  Cost *total_cost, /* in/out parameter */
3601  int64 offset_est,
3602  int64 count_est)
3603 {
3604  double input_rows = *rows;
3605  Cost input_startup_cost = *startup_cost;
3606  Cost input_total_cost = *total_cost;
3607 
3608  if (offset_est != 0)
3609  {
3610  double offset_rows;
3611 
3612  if (offset_est > 0)
3613  offset_rows = (double) offset_est;
3614  else
3615  offset_rows = clamp_row_est(input_rows * 0.10);
3616  if (offset_rows > *rows)
3617  offset_rows = *rows;
3618  if (input_rows > 0)
3619  *startup_cost +=
3620  (input_total_cost - input_startup_cost)
3621  * offset_rows / input_rows;
3622  *rows -= offset_rows;
3623  if (*rows < 1)
3624  *rows = 1;
3625  }
3626 
3627  if (count_est != 0)
3628  {
3629  double count_rows;
3630 
3631  if (count_est > 0)
3632  count_rows = (double) count_est;
3633  else
3634  count_rows = clamp_row_est(input_rows * 0.10);
3635  if (count_rows > *rows)
3636  count_rows = *rows;
3637  if (input_rows > 0)
3638  *total_cost = *startup_cost +
3639  (input_total_cost - input_startup_cost)
3640  * count_rows / input_rows;
3641  *rows = count_rows;
3642  if (*rows < 1)
3643  *rows = 1;
3644  }
3645 }
3646 
3647 
3648 /*
3649  * reparameterize_path
3650  * Attempt to modify a Path to have greater parameterization
3651  *
3652  * We use this to attempt to bring all child paths of an appendrel to the
3653  * same parameterization level, ensuring that they all enforce the same set
3654  * of join quals (and thus that that parameterization can be attributed to
3655  * an append path built from such paths). Currently, only a few path types
3656  * are supported here, though more could be added at need. We return NULL
3657  * if we can't reparameterize the given path.
3658  *
3659  * Note: we intentionally do not pass created paths to add_path(); it would
3660  * possibly try to delete them on the grounds of being cost-inferior to the
3661  * paths they were made from, and we don't want that. Paths made here are
3662  * not necessarily of general-purpose usefulness, but they can be useful
3663  * as members of an append path.
3664  */
3665 Path *
3667  Relids required_outer,
3668  double loop_count)
3669 {
3670  RelOptInfo *rel = path->parent;
3671 
3672  /* Can only increase, not decrease, path's parameterization */
3673  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3674  return NULL;
3675  switch (path->pathtype)
3676  {
3677  case T_SeqScan:
3678  return create_seqscan_path(root, rel, required_outer, 0);
3679  case T_SampleScan:
3680  return (Path *) create_samplescan_path(root, rel, required_outer);
3681  case T_IndexScan:
3682  case T_IndexOnlyScan:
3683  {
3684  IndexPath *ipath = (IndexPath *) path;
3685  IndexPath *newpath = makeNode(IndexPath);
3686 
3687  /*
3688  * We can't use create_index_path directly, and would not want
3689  * to because it would re-compute the indexqual conditions
3690  * which is wasted effort. Instead we hack things a bit:
3691  * flat-copy the path node, revise its param_info, and redo
3692  * the cost estimate.
3693  */
3694  memcpy(newpath, ipath, sizeof(IndexPath));
3695  newpath->path.param_info =
3696  get_baserel_parampathinfo(root, rel, required_outer);
3697  cost_index(newpath, root, loop_count, false);
3698  return (Path *) newpath;
3699  }
3700  case T_BitmapHeapScan:
3701  {
3702  BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3703 
3704  return (Path *) create_bitmap_heap_path(root,
3705  rel,
3706  bpath->bitmapqual,
3707  required_outer,
3708  loop_count, 0);
3709  }
3710  case T_SubqueryScan:
3711  {
3712  SubqueryScanPath *spath = (SubqueryScanPath *) path;
3713 
3714  return (Path *) create_subqueryscan_path(root,
3715  rel,
3716  spath->subpath,
3717  spath->path.pathkeys,
3718  required_outer);
3719  }
3720  case T_Result:
3721  /* Supported only for RTE_RESULT scan paths */
3722  if (IsA(path, Path))
3723  return create_resultscan_path(root, rel, required_outer);
3724  break;
3725  case T_Append:
3726  {
3727  AppendPath *apath = (AppendPath *) path;
3728  List *childpaths = NIL;
3729  List *partialpaths = NIL;
3730  int i;
3731  ListCell *lc;
3732 
3733  /* Reparameterize the children */
3734  i = 0;
3735  foreach(lc, apath->subpaths)
3736  {
3737  Path *spath = (Path *) lfirst(lc);
3738 
3739  spath = reparameterize_path(root, spath,
3740  required_outer,
3741  loop_count);
3742  if (spath == NULL)
3743  return NULL;
3744  /* We have to re-split the regular and partial paths */
3745  if (i < apath->first_partial_path)
3746  childpaths = lappend(childpaths, spath);
3747  else
3748  partialpaths = lappend(partialpaths, spath);
3749  i++;
3750  }
3751  return (Path *)
3752  create_append_path(root, rel, childpaths, partialpaths,
3753  apath->path.pathkeys, required_outer,
3754  apath->path.parallel_workers,
3755  apath->path.parallel_aware,
3756  apath->partitioned_rels,
3757  -1);
3758  }
3759  default:
3760  break;
3761  }
3762  return NULL;
3763 }
3764 
3765 /*
3766  * reparameterize_path_by_child
3767  * Given a path parameterized by the parent of the given child relation,
3768  * translate the path to be parameterized by the given child relation.
3769  *
3770  * The function creates a new path of the same type as the given path, but
3771  * parameterized by the given child relation. Most fields from the original
3772  * path can simply be flat-copied, but any expressions must be adjusted to
3773  * refer to the correct varnos, and any paths must be recursively
3774  * reparameterized. Other fields that refer to specific relids also need
3775  * adjustment.
3776  *
3777  * The cost, number of rows, width and parallel path properties depend upon
3778  * path->parent, which does not change during the translation. Hence those
3779  * members are copied as they are.
3780  *
3781  * If the given path can not be reparameterized, the function returns NULL.
3782  */
3783 Path *
3785  RelOptInfo *child_rel)
3786 {
3787 
3788 #define FLAT_COPY_PATH(newnode, node, nodetype) \
3789  ( (newnode) = makeNode(nodetype), \
3790  memcpy((newnode), (node), sizeof(nodetype)) )
3791 
3792 #define ADJUST_CHILD_ATTRS(node) \
3793  ((node) = \
3794  (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \
3795  child_rel->relids, \
3796  child_rel->top_parent_relids))
3797 
3798 #define REPARAMETERIZE_CHILD_PATH(path) \
3799 do { \
3800  (path) = reparameterize_path_by_child(root, (path), child_rel); \
3801  if ((path) == NULL) \
3802  return NULL; \
3803 } while(0);
3804 
3805 #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
3806 do { \
3807  if ((pathlist) != NIL) \
3808  { \
3809  (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
3810  child_rel); \
3811  if ((pathlist) == NIL) \
3812  return NULL; \
3813  } \
3814 } while(0);
3815 
3816  Path *new_path;
3817  ParamPathInfo *new_ppi;
3818  ParamPathInfo *old_ppi;
3819  Relids required_outer;
3820 
3821  /*
3822  * If the path is not parameterized by parent of the given relation, it
3823  * doesn't need reparameterization.
3824  */
3825  if (!path->param_info ||
3826  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
3827  return path;
3828 
3829  /* Reparameterize a copy of given path. */
3830  switch (nodeTag(path))
3831  {
3832  case T_Path:
3833  FLAT_COPY_PATH(new_path, path, Path);
3834  break;
3835 
3836  case T_IndexPath:
3837  {
3838  IndexPath *ipath;
3839 
3840  FLAT_COPY_PATH(ipath, path, IndexPath);
3842  new_path = (Path *) ipath;
3843  }
3844  break;
3845 
3846  case T_BitmapHeapPath:
3847  {
3848  BitmapHeapPath *bhpath;
3849 
3850  FLAT_COPY_PATH(bhpath, path, BitmapHeapPath);
3852  new_path = (Path *) bhpath;
3853  }
3854  break;
3855 
3856  case T_BitmapAndPath:
3857  {
3858  BitmapAndPath *bapath;
3859 
3860  FLAT_COPY_PATH(bapath, path, BitmapAndPath);
3862  new_path = (Path *) bapath;
3863  }
3864  break;
3865 
3866  case T_BitmapOrPath:
3867  {
3868  BitmapOrPath *bopath;
3869 
3870  FLAT_COPY_PATH(bopath, path, BitmapOrPath);
3872  new_path = (Path *) bopath;
3873  }
3874  break;
3875 
3876  case T_TidPath:
3877  {
3878  TidPath *tpath;
3879 
3880  FLAT_COPY_PATH(tpath, path, TidPath);
3881  ADJUST_CHILD_ATTRS(tpath->tidquals);
3882  new_path = (Path *) tpath;
3883  }
3884  break;
3885 
3886  case T_ForeignPath:
3887  {
3888  ForeignPath *fpath;
3890 
3891  FLAT_COPY_PATH(fpath, path, ForeignPath);
3892  if (fpath->fdw_outerpath)
3894 
3895  /* Hand over to FDW if needed. */
3896  rfpc_func =
3898  if (rfpc_func)
3899  fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
3900  child_rel);
3901  new_path = (Path *) fpath;
3902  }
3903  break;
3904 
3905  case T_CustomPath:
3906  {
3907  CustomPath *cpath;
3908 
3909  FLAT_COPY_PATH(cpath, path, CustomPath);
3911  if (cpath->methods &&
3913  cpath->custom_private =
3915  cpath->custom_private,
3916  child_rel);
3917  new_path = (Path *) cpath;
3918  }
3919  break;
3920 
3921  case T_NestPath:
3922  {
3923  JoinPath *jpath;
3924 
3925  FLAT_COPY_PATH(jpath, path, NestPath);
3926 
3930  new_path = (Path *) jpath;
3931  }
3932  break;
3933 
3934  case T_MergePath:
3935  {
3936  JoinPath *jpath;
3937  MergePath *mpath;
3938 
3939  FLAT_COPY_PATH(mpath, path, MergePath);
3940 
3941  jpath = (JoinPath *) mpath;
3946  new_path = (Path *) mpath;
3947  }
3948  break;
3949 
3950  case T_HashPath:
3951  {
3952  JoinPath *jpath;
3953  HashPath *hpath;
3954 
3955  FLAT_COPY_PATH(hpath, path, HashPath);
3956 
3957  jpath = (JoinPath *) hpath;
3962  new_path = (Path *) hpath;
3963  }
3964  break;
3965 
3966  case T_AppendPath:
3967  {
3968  AppendPath *apath;
3969 
3970  FLAT_COPY_PATH(apath, path, AppendPath);
3972  new_path = (Path *) apath;
3973  }
3974  break;
3975 
3976  case T_MergeAppendPath:
3977  {
3978  MergeAppendPath *mapath;
3979 
3980  FLAT_COPY_PATH(mapath, path, MergeAppendPath);
3982  new_path = (Path *) mapath;
3983  }
3984  break;
3985 
3986  case T_MaterialPath:
3987  {
3988  MaterialPath *mpath;
3989 
3990  FLAT_COPY_PATH(mpath, path, MaterialPath);
3992  new_path = (Path *) mpath;
3993  }
3994  break;
3995 
3996  case T_UniquePath:
3997  {
3998  UniquePath *upath;
3999 
4000  FLAT_COPY_PATH(upath, path, UniquePath);
4003  new_path = (Path *) upath;
4004  }
4005  break;
4006 
4007  case T_GatherPath:
4008  {
4009  GatherPath *gpath;
4010 
4011  FLAT_COPY_PATH(gpath, path, GatherPath);
4013  new_path = (Path *) gpath;
4014  }
4015  break;
4016 
4017  case T_GatherMergePath:
4018  {
4019  GatherMergePath *gmpath;
4020 
4021  FLAT_COPY_PATH(gmpath, path, GatherMergePath);
4023  new_path = (Path *) gmpath;
4024  }
4025  break;
4026 
4027  default:
4028 
4029  /* We don't know how to reparameterize this path. */
4030  return NULL;
4031  }
4032 
4033  /*
4034  * Adjust the parameterization information, which refers to the topmost
4035  * parent. The topmost parent can be multiple levels away from the given
4036  * child, hence use multi-level expression adjustment routines.
4037  */
4038  old_ppi = new_path->param_info;
4039  required_outer =
4041  child_rel->relids,
4042  child_rel->top_parent_relids);
4043 
4044  /* If we already have a PPI for this parameterization, just return it */
4045  new_ppi = find_param_path_info(new_path->parent, required_outer);
4046 
4047  /*
4048  * If not, build a new one and link it to the list of PPIs. For the same
4049  * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4050  * context the given RelOptInfo is in.
4051  */
4052  if (new_ppi == NULL)
4053  {
4054  MemoryContext oldcontext;
4055  RelOptInfo *rel = path->parent;
4056 
4057  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4058 
4059  new_ppi = makeNode(ParamPathInfo);
4060  new_ppi->ppi_req_outer = bms_copy(required_outer);
4061  new_ppi->ppi_rows = old_ppi->ppi_rows;
4062  new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4063  ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4064  rel->ppilist = lappend(rel->ppilist, new_ppi);
4065 
4066  MemoryContextSwitchTo(oldcontext);
4067  }
4068  bms_free(required_outer);
4069 
4070  new_path->param_info = new_ppi;
4071 
4072  /*
4073  * Adjust the path target if the parent of the outer relation is
4074  * referenced in the targetlist. This can happen when only the parent of
4075  * outer relation is laterally referenced in this relation.
4076  */
4077  if (bms_overlap(path->parent->lateral_relids,
4078  child_rel->top_parent_relids))
4079  {
4080  new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4081  ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4082  }
4083 
4084  return new_path;
4085 }
4086 
4087 /*
4088  * reparameterize_pathlist_by_child
4089  * Helper function to reparameterize a list of paths by given child rel.
4090  */
4091 static List *
4093  List *pathlist,
4094  RelOptInfo *child_rel)
4095 {
4096  ListCell *lc;
4097  List *result = NIL;
4098 
4099  foreach(lc, pathlist)
4100  {
4101  Path *path = reparameterize_path_by_child(root, lfirst(lc),
4102  child_rel);
4103 
4104  if (path == NULL)
4105  {
4106  list_free(result);
4107  return NIL;
4108  }
4109 
4110  result = lappend(result, path);
4111  }
4112 
4113  return result;
4114 }
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2610
struct Path * cheapest_unique_path
Definition: pathnodes.h:662
List * indexorderbycols
Definition: pathnodes.h:1182
List * group_pathkeys
Definition: pathnodes.h:300
#define NIL
Definition: pg_list.h:65
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:672
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3364
List * qual
Definition: pathnodes.h:1636
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:569
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1593
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset)
Definition: selfuncs.c:3044
List * path_mergeclauses
Definition: pathnodes.h:1553
List * outersortkeys
Definition: pathnodes.h:1554
List * distinctList
Definition: pathnodes.h:1734
MinMaxAggPath * create_minmaxagg_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
Definition: pathnode.c:3144
Definition: nodes.h:78
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1844
#define IsA(nodeptr, _type_)
Definition: nodes.h:576
JoinPath jpath
Definition: pathnodes.h:1571
PathTarget * pathtarget
Definition: pathnodes.h:1117
ForeignPath * create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2193
List * returningLists
Definition: pathnodes.h:1783
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
Definition: analyzejoins.c:775
OnConflictExpr * onconflict
Definition: pathnodes.h:1785
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:940
Node * limitOffset
Definition: pathnodes.h:1796
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
Path path
Definition: pathnodes.h:1178
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1883
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
Path * subpath
Definition: pathnodes.h:1620
IndexOptInfo * indexinfo
Definition: pathnodes.h:1179
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1264
Index nominalRelation
Definition: pathnodes.h:1776
Path * fdw_outerpath
Definition: pathnodes.h:1321
RelOptKind reloptkind
Definition: pathnodes.h:640
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1176
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, int numPartCols, int numOrderCols, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2272
List * custom_paths
Definition: pathnodes.h:1351
Definition: nodes.h:80
SetOpStrategy strategy
Definition: pathnodes.h:1733
AggStrategy aggstrategy
Definition: pathnodes.h:1663
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3384
int bms_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:147
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:401
SetOpPath * create_setop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SetOpCmd cmd, SetOpStrategy strategy, List *distinctList, AttrNumber flagColIdx, int firstFlag, double numGroups, double outputRows)
Definition: pathnode.c:3277
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1354
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3011
List * qual
Definition: pathnodes.h:1667
double limit_tuples
Definition: pathnodes.h:1379
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:2869
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2151
bool add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost, List *pathkeys)
Definition: pathnode.c:867
bool add_path_precheck(RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:644
Path * innerjoinpath
Definition: pathnodes.h:1498
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1539
struct Path * cheapest_startup_path
Definition: pathnodes.h:660
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2040
double tuples
Definition: pathnodes.h:683
Path * subpath
Definition: pathnodes.h:1731
BitmapOrPath * create_bitmap_or_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1115
int parallel_workers
Definition: pathnodes.h:1123
bool consider_param_startup
Definition: pathnodes.h:650
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1084
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1495
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:99
bool is_hashed
Definition: pathnodes.h:1689
ParamPathInfo * param_info
Definition: pathnodes.h:1119
Relids calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
Definition: pathnode.c:2266
List * list_copy(const List *oldlist)
Definition: list.c:1404
Definition: nodes.h:525
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2519
List * list_concat(List *list1, const List *list2)
Definition: list.c:516
Definition: nodes.h:49
List * partial_pathlist
Definition: pathnodes.h:659
HashPath * create_hashjoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
Definition: pathnode.c:2453
AttrNumber varattno
Definition: primnodes.h:172
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1448
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1498
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, Relids child_relids, Relids top_parent_relids)
Definition: appendinfo.c:571
List * cheapest_parameterized_paths
Definition: pathnodes.h:663
bool single_copy
Definition: pathnodes.h:1468
UniquePathMethod umethod
Definition: pathnodes.h:1454
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:285
Definition: nodes.h:76
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
Definition: pathnode.c:1534
Path * subpath
Definition: pathnodes.h:1428
List * indexclauses
Definition: pathnodes.h:1180
AggSplit aggsplit
Definition: pathnodes.h:1664
List * partitioned_rels
Definition: pathnodes.h:1401
MergePath * create_mergejoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
Definition: pathnode.c:2387
List * quals
Definition: pathnodes.h:1712
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1305
Definition: primnodes.h:167
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, int64 offset_est, int64 count_est)
Definition: pathnode.c:3545
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1911
double numGroups
Definition: pathnodes.h:1665
double numGroups
Definition: pathnodes.h:1737
SetOpStrategy
Definition: nodes.h:806
#define ADJUST_CHILD_ATTRS(node)
List * rowMarks
Definition: pathnodes.h:1760
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:616
double numGroups
Definition: pathnodes.h:1687
Cost startup
Definition: pathnodes.h:45
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3598
List * bitmapquals
Definition: pathnodes.h:1264
List * custom_private
Definition: pathnodes.h:1352
JoinType
Definition: nodes.h:692
int first_partial_path
Definition: pathnodes.h:1378
WindowClause * winclause
Definition: pathnodes.h:1722
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1963
List * bitmapquals
Definition: pathnodes.h:1277
int num_workers
Definition: pathnodes.h:1469
Definition: type.h:89
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:368
NodeTag pathtype
Definition: pathnodes.h:1114
Relids syn_righthand
Definition: pathnodes.h:2138
List * subpaths
Definition: pathnodes.h:1376
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:401
SetOpCmd cmd
Definition: pathnodes.h:1732
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2491
static List * reparameterize_pathlist_by_child(PlannerInfo *root, List *pathlist, RelOptInfo *child_rel)
Definition: pathnode.c:4092
#define true
Definition: c.h:313
bool consider_startup
Definition: pathnodes.h:649
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:854
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:211
Relids lateral_relids
Definition: pathnodes.h:668
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1327
Cost per_tuple
Definition: pathnodes.h:46
const struct CustomPathMethods * methods
Definition: pathnodes.h:1353
Path * subpath
Definition: pathnodes.h:1721
void pfree(void *pointer)
Definition: mcxt.c:1056
RelOptInfo * rel
Definition: pathnodes.h:793
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:2387
#define linitial(l)
Definition: pg_list.h:195
Definition: nodes.h:46
Relids all_baserels
Definition: pathnodes.h:227
#define ERROR
Definition: elog.h:43
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam)
Definition: pathnode.c:3443
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:1818
double limit_tuples
Definition: pathnodes.h:337
List * partitionClause
Definition: parsenodes.h:1330
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3819
Cost startup_cost
Definition: pathnodes.h:1127
RecursiveUnionPath * create_recursiveunion_path(PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
Definition: pathnode.c:3339
List * semi_rhs_exprs
Definition: pathnodes.h:2146
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1282
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2346
Path * subpath
Definition: pathnodes.h:1795
List * joinrestrictinfo
Definition: pathnodes.h:1500
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:373
bool partColsUpdated
Definition: pathnodes.h:1778
RelOptInfo * parent
Definition: pathnodes.h:1116
List * uniq_exprs
Definition: pathnodes.h:1456
Path * reparameterize_path_by_child(PlannerInfo *root, Path *path, RelOptInfo *child_rel)
Definition: pathnode.c:3784
Path * bitmapqual
Definition: pathnodes.h:1252
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:315
Definition: nodes.h:77
Path path
Definition: pathnodes.h:1633
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:71
NestPath * create_nestloop_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2299
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2921
struct Path * cheapest_total_path
Definition: pathnodes.h:661
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:997
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1135
ForeignPath * create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2099
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2699
struct FdwRoutine * fdwroutine
Definition: pathnodes.h:696
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:166
ScanDirection
Definition: sdir.h:22
List * groupClause
Definition: pathnodes.h:1666
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
ForeignPath * create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2143
AttrNumber flagColIdx
Definition: pathnodes.h:1735
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer, List *partitioned_rels)
Definition: pathnode.c:1345
Relids relids
Definition: pathnodes.h:643
AggStrategy aggstrategy
Definition: pathnodes.h:1700
double cpu_operator_cost
Definition: costsize.c:114
Path * subpath
Definition: pathnodes.h:1467
double rint(double x)
Definition: rint.c:21
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:288
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:577
List * ppilist
Definition: pathnodes.h:658
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, List *partitioned_rels, double rows)
Definition: pathnode.c:1183
List * lappend_int(List *list, int datum)
Definition: list.c:340
Index relid
Definition: pathnodes.h:671
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2066
List * lappend(List *list, void *datum)
Definition: list.c:322
Path * subpath
Definition: pathnodes.h:1759
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
#define FLAT_COPY_PATH(newnode, node, nodetype)
Index varno
Definition: primnodes.h:170
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
List * exprs
Definition: pathnodes.h:1046
#define REPARAMETERIZE_CHILD_PATH(path)
BitmapAndPath * create_bitmap_and_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1079
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:324
Path * outerjoinpath
Definition: pathnodes.h:1497
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:476
List * indexorderbys
Definition: pathnodes.h:1181
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1613
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:954
List * groupClause
Definition: pathnodes.h:1635
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1937
List * mmaggregates
Definition: pathnodes.h:1711
List * tidquals
Definition: pathnodes.h:1291
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1693
int work_mem
Definition: globals.c:121
Path * subpath
Definition: pathnodes.h:1634
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
unsigned int Index
Definition: c.h:476
int64 total_size
Definition: pg_checksums.c:68
RTEKind rtekind
Definition: pathnodes.h:673
PathCostComparison
Definition: pathnode.c:38
List * in_operators
Definition: pathnodes.h:1455
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1576
double rows
Definition: pathnodes.h:646
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1753
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2766
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:352
Cost total_cost
Definition: pathnodes.h:1128
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2097
CostSelector
Definition: pathnodes.h:34
int firstFlag
Definition: pathnodes.h:1736
List * lcons(void *datum, List *list)
Definition: list.c:454
List * pathkeys
Definition: pathnodes.h:1130
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause)
Definition: pathnode.c:3215
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:573
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1392
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2048
#define CONSIDER_PATH_STARTUP_COST(p)
Path path
Definition: pathnodes.h:1490
#define Assert(condition)
Definition: c.h:739
#define lfirst(lc)
Definition: pg_list.h:190
void cost_append(AppendPath *apath)
Definition: costsize.c:1875
Path * subpath
Definition: pathnodes.h:1606
double rows
Definition: pathnodes.h:1126
bool parallel_safe
Definition: pathnodes.h:1122
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:117
Index rootRelation
Definition: pathnodes.h:1777
JoinType jointype
Definition: pathnodes.h:2139
List * ppi_clauses
Definition: pathnodes.h:1077
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:225
#define STD_FUZZ_FACTOR
Definition: pathnode.c:51
QualCost cost
Definition: pathnodes.h:1048
AggSplit
Definition: nodes.h:776
static int list_length(const List *l)
Definition: pg_list.h:169
Relids calc_nestloop_required_outer(Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
Definition: pathnode.c:2233
bool inner_unique
Definition: pathnodes.h:1494
bool consider_parallel
Definition: pathnodes.h:651
List * innersortkeys
Definition: pathnodes.h:1555
double cpu_tuple_cost
Definition: costsize.c:112
Path * subpath
Definition: pathnodes.h:1662
bool query_supports_distinctness(Query *query)
Definition: analyzejoins.c:738
List * partitioned_rels
Definition: pathnodes.h:1375
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:363
#define nodeTag(nodeptr)
Definition: nodes.h:530
double ppi_rows
Definition: pathnodes.h:1076
Path path
Definition: pathnodes.h:1794
Path path
Definition: pathnodes.h:1290
List * withCheckOptionLists
Definition: pathnodes.h:1782
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:928
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
Definition: nodes.h:84
List * orderClause
Definition: parsenodes.h:1331
PathKeysComparison
Definition: paths.h:181
Query * subquery
Definition: parsenodes.h:1009
AggStrategy
Definition: nodes.h:754
bool is_projection_capable_path(Path *path)
Definition: createplan.c:6766
TidPath * create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
Definition: pathnode.c:1151
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3666
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1331
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1482
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
List * fdw_private
Definition: pathnodes.h:1322
SetOpCmd
Definition: nodes.h:798
JoinType jointype
Definition: pathnodes.h:1492
List * semi_operators
Definition: pathnodes.h:2145
ScanDirection indexscandir
Definition: pathnodes.h:1183
CmdType operation
Definition: pathnodes.h:1774
void list_free(List *list)
Definition: list.c:1377
Definition: nodes.h:81
#define elog(elevel,...)
Definition: elog.h:228
int i
List * resultRelations
Definition: pathnodes.h:1779
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1128
JoinPath jpath
Definition: pathnodes.h:1552
bool parallel_aware
Definition: pathnodes.h:1121
List * path_hashclauses
Definition: pathnodes.h:1572
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1447
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:169
List * pathlist
Definition: pathnodes.h:657
Relids ppi_req_outer
Definition: pathnodes.h:1075
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1561
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:3586
Path * subpath
Definition: pathnodes.h:1594
Definition: nodes.h:226
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2810
double clamp_row_est(double nrows)
Definition: costsize.c:187
Node * limitCount
Definition: pathnodes.h:1797
Definition: pg_list.h:50
Path path
Definition: pathnodes.h:1619
struct PathTarget * reltarget
Definition: pathnodes.h:654
int16 AttrNumber
Definition: attnum.h:21
Path path
Definition: pathnodes.h:1730
CmdType
Definition: nodes.h:668
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:929
Path path
Definition: pathnodes.h:1661
double limit_tuples
Definition: pathnodes.h:1403
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs, double numGroups)
Definition: pathnode.c:2987
BMS_Comparison
Definition: bitmapset.h:57
double Cost
Definition: nodes.h:659
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
#define foreach_current_index(cell)
Definition: pg_list.h:381
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
Relids top_parent_relids
Definition: pathnodes.h:716
static MemoryContext GetMemoryChunkContext(void *pointer)
Definition: memutils.h:113
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2928
List * gsets
Definition: pathnodes.h:1685
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742
Path * subpath
Definition: pathnodes.h:1453
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1046
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2014
Definition: nodes.h:86
ReparameterizeForeignPathByChild_function ReparameterizeForeignPathByChild
Definition: fdwapi.h:248
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1989