PostgreSQL Source Code  git master
pathnode.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pathnode.c
4  * Routines to manipulate pathlists and create path nodes
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/optimizer/util/pathnode.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <math.h>
18 
19 #include "miscadmin.h"
20 #include "foreign/fdwapi.h"
21 #include "nodes/extensible.h"
22 #include "nodes/nodeFuncs.h"
23 #include "optimizer/appendinfo.h"
24 #include "optimizer/clauses.h"
25 #include "optimizer/cost.h"
26 #include "optimizer/optimizer.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/prep.h"
31 #include "optimizer/restrictinfo.h"
32 #include "optimizer/tlist.h"
33 #include "parser/parsetree.h"
34 #include "utils/lsyscache.h"
35 #include "utils/memutils.h"
36 #include "utils/selfuncs.h"
37 
38 
39 typedef enum
40 {
41  COSTS_EQUAL, /* path costs are fuzzily equal */
42  COSTS_BETTER1, /* first path is cheaper than second */
43  COSTS_BETTER2, /* second path is cheaper than first */
44  COSTS_DIFFERENT /* neither path dominates the other on cost */
46 
47 /*
48  * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
49  * XXX is it worth making this user-controllable? It provides a tradeoff
50  * between planner runtime and the accuracy of path cost comparisons.
51  */
52 #define STD_FUZZ_FACTOR 1.01
53 
54 static List *translate_sub_tlist(List *tlist, int relid);
55 static int append_total_cost_compare(const ListCell *a, const ListCell *b);
56 static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
58  List *pathlist,
59  RelOptInfo *child_rel);
60 
61 
62 /*****************************************************************************
63  * MISC. PATH UTILITIES
64  *****************************************************************************/
65 
66 /*
67  * compare_path_costs
68  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
69  * or more expensive than path2 for the specified criterion.
70  */
71 int
72 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
73 {
74  if (criterion == STARTUP_COST)
75  {
76  if (path1->startup_cost < path2->startup_cost)
77  return -1;
78  if (path1->startup_cost > path2->startup_cost)
79  return +1;
80 
81  /*
82  * If paths have the same startup cost (not at all unlikely), order
83  * them by total cost.
84  */
85  if (path1->total_cost < path2->total_cost)
86  return -1;
87  if (path1->total_cost > path2->total_cost)
88  return +1;
89  }
90  else
91  {
92  if (path1->total_cost < path2->total_cost)
93  return -1;
94  if (path1->total_cost > path2->total_cost)
95  return +1;
96 
97  /*
98  * If paths have the same total cost, order them by startup cost.
99  */
100  if (path1->startup_cost < path2->startup_cost)
101  return -1;
102  if (path1->startup_cost > path2->startup_cost)
103  return +1;
104  }
105  return 0;
106 }
107 
108 /*
109  * compare_path_fractional_costs
110  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
111  * or more expensive than path2 for fetching the specified fraction
112  * of the total tuples.
113  *
114  * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
115  * path with the cheaper total_cost.
116  */
117 int
119  double fraction)
120 {
121  Cost cost1,
122  cost2;
123 
124  if (fraction <= 0.0 || fraction >= 1.0)
125  return compare_path_costs(path1, path2, TOTAL_COST);
126  cost1 = path1->startup_cost +
127  fraction * (path1->total_cost - path1->startup_cost);
128  cost2 = path2->startup_cost +
129  fraction * (path2->total_cost - path2->startup_cost);
130  if (cost1 < cost2)
131  return -1;
132  if (cost1 > cost2)
133  return +1;
134  return 0;
135 }
136 
137 /*
138  * compare_path_costs_fuzzily
139  * Compare the costs of two paths to see if either can be said to
140  * dominate the other.
141  *
142  * We use fuzzy comparisons so that add_path() can avoid keeping both of
143  * a pair of paths that really have insignificantly different cost.
144  *
145  * The fuzz_factor argument must be 1.0 plus delta, where delta is the
146  * fraction of the smaller cost that is considered to be a significant
147  * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
148  * be 1% of the smaller cost.
149  *
150  * The two paths are said to have "equal" costs if both startup and total
151  * costs are fuzzily the same. Path1 is said to be better than path2 if
152  * it has fuzzily better startup cost and fuzzily no worse total cost,
153  * or if it has fuzzily better total cost and fuzzily no worse startup cost.
154  * Path2 is better than path1 if the reverse holds. Finally, if one path
155  * is fuzzily better than the other on startup cost and fuzzily worse on
156  * total cost, we just say that their costs are "different", since neither
157  * dominates the other across the whole performance spectrum.
158  *
159  * This function also enforces a policy rule that paths for which the relevant
160  * one of parent->consider_startup and parent->consider_param_startup is false
161  * cannot survive comparisons solely on the grounds of good startup cost, so
162  * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
163  * (But if total costs are fuzzily equal, we compare startup costs anyway,
164  * in hopes of eliminating one path or the other.)
165  */
166 static PathCostComparison
167 compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
168 {
169 #define CONSIDER_PATH_STARTUP_COST(p) \
170  ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
171 
172  /*
173  * Check total cost first since it's more likely to be different; many
174  * paths have zero startup cost.
175  */
176  if (path1->total_cost > path2->total_cost * fuzz_factor)
177  {
178  /* path1 fuzzily worse on total cost */
179  if (CONSIDER_PATH_STARTUP_COST(path1) &&
180  path2->startup_cost > path1->startup_cost * fuzz_factor)
181  {
182  /* ... but path2 fuzzily worse on startup, so DIFFERENT */
183  return COSTS_DIFFERENT;
184  }
185  /* else path2 dominates */
186  return COSTS_BETTER2;
187  }
188  if (path2->total_cost > path1->total_cost * fuzz_factor)
189  {
190  /* path2 fuzzily worse on total cost */
191  if (CONSIDER_PATH_STARTUP_COST(path2) &&
192  path1->startup_cost > path2->startup_cost * fuzz_factor)
193  {
194  /* ... but path1 fuzzily worse on startup, so DIFFERENT */
195  return COSTS_DIFFERENT;
196  }
197  /* else path1 dominates */
198  return COSTS_BETTER1;
199  }
200  /* fuzzily the same on total cost ... */
201  if (path1->startup_cost > path2->startup_cost * fuzz_factor)
202  {
203  /* ... but path1 fuzzily worse on startup, so path2 wins */
204  return COSTS_BETTER2;
205  }
206  if (path2->startup_cost > path1->startup_cost * fuzz_factor)
207  {
208  /* ... but path2 fuzzily worse on startup, so path1 wins */
209  return COSTS_BETTER1;
210  }
211  /* fuzzily the same on both costs */
212  return COSTS_EQUAL;
213 
214 #undef CONSIDER_PATH_STARTUP_COST
215 }
216 
217 /*
218  * set_cheapest
219  * Find the minimum-cost paths from among a relation's paths,
220  * and save them in the rel's cheapest-path fields.
221  *
222  * cheapest_total_path is normally the cheapest-total-cost unparameterized
223  * path; but if there are no unparameterized paths, we assign it to be the
224  * best (cheapest least-parameterized) parameterized path. However, only
225  * unparameterized paths are considered candidates for cheapest_startup_path,
226  * so that will be NULL if there are no unparameterized paths.
227  *
228  * The cheapest_parameterized_paths list collects all parameterized paths
229  * that have survived the add_path() tournament for this relation. (Since
230  * add_path ignores pathkeys for a parameterized path, these will be paths
231  * that have best cost or best row count for their parameterization. We
232  * may also have both a parallel-safe and a non-parallel-safe path in some
233  * cases for the same parameterization in some cases, but this should be
234  * relatively rare since, most typically, all paths for the same relation
235  * will be parallel-safe or none of them will.)
236  *
237  * cheapest_parameterized_paths always includes the cheapest-total
238  * unparameterized path, too, if there is one; the users of that list find
239  * it more convenient if that's included.
240  *
241  * This is normally called only after we've finished constructing the path
242  * list for the rel node.
243  */
244 void
246 {
247  Path *cheapest_startup_path;
248  Path *cheapest_total_path;
249  Path *best_param_path;
250  List *parameterized_paths;
251  ListCell *p;
252 
253  Assert(IsA(parent_rel, RelOptInfo));
254 
255  if (parent_rel->pathlist == NIL)
256  elog(ERROR, "could not devise a query plan for the given query");
257 
258  cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
259  parameterized_paths = NIL;
260 
261  foreach(p, parent_rel->pathlist)
262  {
263  Path *path = (Path *) lfirst(p);
264  int cmp;
265 
266  if (path->param_info)
267  {
268  /* Parameterized path, so add it to parameterized_paths */
269  parameterized_paths = lappend(parameterized_paths, path);
270 
271  /*
272  * If we have an unparameterized cheapest-total, we no longer care
273  * about finding the best parameterized path, so move on.
274  */
275  if (cheapest_total_path)
276  continue;
277 
278  /*
279  * Otherwise, track the best parameterized path, which is the one
280  * with least total cost among those of the minimum
281  * parameterization.
282  */
283  if (best_param_path == NULL)
284  best_param_path = path;
285  else
286  {
287  switch (bms_subset_compare(PATH_REQ_OUTER(path),
288  PATH_REQ_OUTER(best_param_path)))
289  {
290  case BMS_EQUAL:
291  /* keep the cheaper one */
292  if (compare_path_costs(path, best_param_path,
293  TOTAL_COST) < 0)
294  best_param_path = path;
295  break;
296  case BMS_SUBSET1:
297  /* new path is less-parameterized */
298  best_param_path = path;
299  break;
300  case BMS_SUBSET2:
301  /* old path is less-parameterized, keep it */
302  break;
303  case BMS_DIFFERENT:
304 
305  /*
306  * This means that neither path has the least possible
307  * parameterization for the rel. We'll sit on the old
308  * path until something better comes along.
309  */
310  break;
311  }
312  }
313  }
314  else
315  {
316  /* Unparameterized path, so consider it for cheapest slots */
317  if (cheapest_total_path == NULL)
318  {
319  cheapest_startup_path = cheapest_total_path = path;
320  continue;
321  }
322 
323  /*
324  * If we find two paths of identical costs, try to keep the
325  * better-sorted one. The paths might have unrelated sort
326  * orderings, in which case we can only guess which might be
327  * better to keep, but if one is superior then we definitely
328  * should keep that one.
329  */
330  cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
331  if (cmp > 0 ||
332  (cmp == 0 &&
333  compare_pathkeys(cheapest_startup_path->pathkeys,
334  path->pathkeys) == PATHKEYS_BETTER2))
335  cheapest_startup_path = path;
336 
337  cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
338  if (cmp > 0 ||
339  (cmp == 0 &&
340  compare_pathkeys(cheapest_total_path->pathkeys,
341  path->pathkeys) == PATHKEYS_BETTER2))
342  cheapest_total_path = path;
343  }
344  }
345 
346  /* Add cheapest unparameterized path, if any, to parameterized_paths */
347  if (cheapest_total_path)
348  parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
349 
350  /*
351  * If there is no unparameterized path, use the best parameterized path as
352  * cheapest_total_path (but not as cheapest_startup_path).
353  */
354  if (cheapest_total_path == NULL)
355  cheapest_total_path = best_param_path;
356  Assert(cheapest_total_path != NULL);
357 
358  parent_rel->cheapest_startup_path = cheapest_startup_path;
359  parent_rel->cheapest_total_path = cheapest_total_path;
360  parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
361  parent_rel->cheapest_parameterized_paths = parameterized_paths;
362 }
363 
364 /*
365  * add_path
366  * Consider a potential implementation path for the specified parent rel,
367  * and add it to the rel's pathlist if it is worthy of consideration.
368  * A path is worthy if it has a better sort order (better pathkeys) or
369  * cheaper cost (on either dimension), or generates fewer rows, than any
370  * existing path that has the same or superset parameterization rels.
371  * We also consider parallel-safe paths more worthy than others.
372  *
373  * We also remove from the rel's pathlist any old paths that are dominated
374  * by new_path --- that is, new_path is cheaper, at least as well ordered,
375  * generates no more rows, requires no outer rels not required by the old
376  * path, and is no less parallel-safe.
377  *
378  * In most cases, a path with a superset parameterization will generate
379  * fewer rows (since it has more join clauses to apply), so that those two
380  * figures of merit move in opposite directions; this means that a path of
381  * one parameterization can seldom dominate a path of another. But such
382  * cases do arise, so we make the full set of checks anyway.
383  *
384  * There are two policy decisions embedded in this function, along with
385  * its sibling add_path_precheck. First, we treat all parameterized paths
386  * as having NIL pathkeys, so that they cannot win comparisons on the
387  * basis of sort order. This is to reduce the number of parameterized
388  * paths that are kept; see discussion in src/backend/optimizer/README.
389  *
390  * Second, we only consider cheap startup cost to be interesting if
391  * parent_rel->consider_startup is true for an unparameterized path, or
392  * parent_rel->consider_param_startup is true for a parameterized one.
393  * Again, this allows discarding useless paths sooner.
394  *
395  * The pathlist is kept sorted by total_cost, with cheaper paths
396  * at the front. Within this routine, that's simply a speed hack:
397  * doing it that way makes it more likely that we will reject an inferior
398  * path after a few comparisons, rather than many comparisons.
399  * However, add_path_precheck relies on this ordering to exit early
400  * when possible.
401  *
402  * NOTE: discarded Path objects are immediately pfree'd to reduce planner
403  * memory consumption. We dare not try to free the substructure of a Path,
404  * since much of it may be shared with other Paths or the query tree itself;
405  * but just recycling discarded Path nodes is a very useful savings in
406  * a large join tree. We can recycle the List nodes of pathlist, too.
407  *
408  * As noted in optimizer/README, deleting a previously-accepted Path is
409  * safe because we know that Paths of this rel cannot yet be referenced
410  * from any other rel, such as a higher-level join. However, in some cases
411  * it is possible that a Path is referenced by another Path for its own
412  * rel; we must not delete such a Path, even if it is dominated by the new
413  * Path. Currently this occurs only for IndexPath objects, which may be
414  * referenced as children of BitmapHeapPaths as well as being paths in
415  * their own right. Hence, we don't pfree IndexPaths when rejecting them.
416  *
417  * 'parent_rel' is the relation entry to which the path corresponds.
418  * 'new_path' is a potential path for parent_rel.
419  *
420  * Returns nothing, but modifies parent_rel->pathlist.
421  */
422 void
423 add_path(RelOptInfo *parent_rel, Path *new_path)
424 {
425  bool accept_new = true; /* unless we find a superior old path */
426  int insert_at = 0; /* where to insert new item */
427  List *new_path_pathkeys;
428  ListCell *p1;
429 
430  /*
431  * This is a convenient place to check for query cancel --- no part of the
432  * planner goes very long without calling add_path().
433  */
435 
436  /* Pretend parameterized paths have no pathkeys, per comment above */
437  new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
438 
439  /*
440  * Loop to check proposed new path against old paths. Note it is possible
441  * for more than one old path to be tossed out because new_path dominates
442  * it.
443  */
444  foreach(p1, parent_rel->pathlist)
445  {
446  Path *old_path = (Path *) lfirst(p1);
447  bool remove_old = false; /* unless new proves superior */
448  PathCostComparison costcmp;
449  PathKeysComparison keyscmp;
450  BMS_Comparison outercmp;
451 
452  /*
453  * Do a fuzzy cost comparison with standard fuzziness limit.
454  */
455  costcmp = compare_path_costs_fuzzily(new_path, old_path,
457 
458  /*
459  * If the two paths compare differently for startup and total cost,
460  * then we want to keep both, and we can skip comparing pathkeys and
461  * required_outer rels. If they compare the same, proceed with the
462  * other comparisons. Row count is checked last. (We make the tests
463  * in this order because the cost comparison is most likely to turn
464  * out "different", and the pathkeys comparison next most likely. As
465  * explained above, row count very seldom makes a difference, so even
466  * though it's cheap to compare there's not much point in checking it
467  * earlier.)
468  */
469  if (costcmp != COSTS_DIFFERENT)
470  {
471  /* Similarly check to see if either dominates on pathkeys */
472  List *old_path_pathkeys;
473 
474  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
475  keyscmp = compare_pathkeys(new_path_pathkeys,
476  old_path_pathkeys);
477  if (keyscmp != PATHKEYS_DIFFERENT)
478  {
479  switch (costcmp)
480  {
481  case COSTS_EQUAL:
482  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
483  PATH_REQ_OUTER(old_path));
484  if (keyscmp == PATHKEYS_BETTER1)
485  {
486  if ((outercmp == BMS_EQUAL ||
487  outercmp == BMS_SUBSET1) &&
488  new_path->rows <= old_path->rows &&
489  new_path->parallel_safe >= old_path->parallel_safe)
490  remove_old = true; /* new dominates old */
491  }
492  else if (keyscmp == PATHKEYS_BETTER2)
493  {
494  if ((outercmp == BMS_EQUAL ||
495  outercmp == BMS_SUBSET2) &&
496  new_path->rows >= old_path->rows &&
497  new_path->parallel_safe <= old_path->parallel_safe)
498  accept_new = false; /* old dominates new */
499  }
500  else /* keyscmp == PATHKEYS_EQUAL */
501  {
502  if (outercmp == BMS_EQUAL)
503  {
504  /*
505  * Same pathkeys and outer rels, and fuzzily
506  * the same cost, so keep just one; to decide
507  * which, first check parallel-safety, then
508  * rows, then do a fuzzy cost comparison with
509  * very small fuzz limit. (We used to do an
510  * exact cost comparison, but that results in
511  * annoying platform-specific plan variations
512  * due to roundoff in the cost estimates.) If
513  * things are still tied, arbitrarily keep
514  * only the old path. Notice that we will
515  * keep only the old path even if the
516  * less-fuzzy comparison decides the startup
517  * and total costs compare differently.
518  */
519  if (new_path->parallel_safe >
520  old_path->parallel_safe)
521  remove_old = true; /* new dominates old */
522  else if (new_path->parallel_safe <
523  old_path->parallel_safe)
524  accept_new = false; /* old dominates new */
525  else if (new_path->rows < old_path->rows)
526  remove_old = true; /* new dominates old */
527  else if (new_path->rows > old_path->rows)
528  accept_new = false; /* old dominates new */
529  else if (compare_path_costs_fuzzily(new_path,
530  old_path,
531  1.0000000001) == COSTS_BETTER1)
532  remove_old = true; /* new dominates old */
533  else
534  accept_new = false; /* old equals or
535  * dominates new */
536  }
537  else if (outercmp == BMS_SUBSET1 &&
538  new_path->rows <= old_path->rows &&
539  new_path->parallel_safe >= old_path->parallel_safe)
540  remove_old = true; /* new dominates old */
541  else if (outercmp == BMS_SUBSET2 &&
542  new_path->rows >= old_path->rows &&
543  new_path->parallel_safe <= old_path->parallel_safe)
544  accept_new = false; /* old dominates new */
545  /* else different parameterizations, keep both */
546  }
547  break;
548  case COSTS_BETTER1:
549  if (keyscmp != PATHKEYS_BETTER2)
550  {
551  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
552  PATH_REQ_OUTER(old_path));
553  if ((outercmp == BMS_EQUAL ||
554  outercmp == BMS_SUBSET1) &&
555  new_path->rows <= old_path->rows &&
556  new_path->parallel_safe >= old_path->parallel_safe)
557  remove_old = true; /* new dominates old */
558  }
559  break;
560  case COSTS_BETTER2:
561  if (keyscmp != PATHKEYS_BETTER1)
562  {
563  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
564  PATH_REQ_OUTER(old_path));
565  if ((outercmp == BMS_EQUAL ||
566  outercmp == BMS_SUBSET2) &&
567  new_path->rows >= old_path->rows &&
568  new_path->parallel_safe <= old_path->parallel_safe)
569  accept_new = false; /* old dominates new */
570  }
571  break;
572  case COSTS_DIFFERENT:
573 
574  /*
575  * can't get here, but keep this case to keep compiler
576  * quiet
577  */
578  break;
579  }
580  }
581  }
582 
583  /*
584  * Remove current element from pathlist if dominated by new.
585  */
586  if (remove_old)
587  {
588  parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
589  p1);
590 
591  /*
592  * Delete the data pointed-to by the deleted cell, if possible
593  */
594  if (!IsA(old_path, IndexPath))
595  pfree(old_path);
596  }
597  else
598  {
599  /* new belongs after this old path if it has cost >= old's */
600  if (new_path->total_cost >= old_path->total_cost)
601  insert_at = foreach_current_index(p1) + 1;
602  }
603 
604  /*
605  * If we found an old path that dominates new_path, we can quit
606  * scanning the pathlist; we will not add new_path, and we assume
607  * new_path cannot dominate any other elements of the pathlist.
608  */
609  if (!accept_new)
610  break;
611  }
612 
613  if (accept_new)
614  {
615  /* Accept the new path: insert it at proper place in pathlist */
616  parent_rel->pathlist =
617  list_insert_nth(parent_rel->pathlist, insert_at, new_path);
618  }
619  else
620  {
621  /* Reject and recycle the new path */
622  if (!IsA(new_path, IndexPath))
623  pfree(new_path);
624  }
625 }
626 
627 /*
628  * add_path_precheck
629  * Check whether a proposed new path could possibly get accepted.
630  * We assume we know the path's pathkeys and parameterization accurately,
631  * and have lower bounds for its costs.
632  *
633  * Note that we do not know the path's rowcount, since getting an estimate for
634  * that is too expensive to do before prechecking. We assume here that paths
635  * of a superset parameterization will generate fewer rows; if that holds,
636  * then paths with different parameterizations cannot dominate each other
637  * and so we can simply ignore existing paths of another parameterization.
638  * (In the infrequent cases where that rule of thumb fails, add_path will
639  * get rid of the inferior path.)
640  *
641  * At the time this is called, we haven't actually built a Path structure,
642  * so the required information has to be passed piecemeal.
643  */
644 bool
646  Cost startup_cost, Cost total_cost,
647  List *pathkeys, Relids required_outer)
648 {
649  List *new_path_pathkeys;
650  bool consider_startup;
651  ListCell *p1;
652 
653  /* Pretend parameterized paths have no pathkeys, per add_path policy */
654  new_path_pathkeys = required_outer ? NIL : pathkeys;
655 
656  /* Decide whether new path's startup cost is interesting */
657  consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
658 
659  foreach(p1, parent_rel->pathlist)
660  {
661  Path *old_path = (Path *) lfirst(p1);
662  PathKeysComparison keyscmp;
663 
664  /*
665  * We are looking for an old_path with the same parameterization (and
666  * by assumption the same rowcount) that dominates the new path on
667  * pathkeys as well as both cost metrics. If we find one, we can
668  * reject the new path.
669  *
670  * Cost comparisons here should match compare_path_costs_fuzzily.
671  */
672  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
673  {
674  /* new path can win on startup cost only if consider_startup */
675  if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
676  !consider_startup)
677  {
678  /* new path loses on cost, so check pathkeys... */
679  List *old_path_pathkeys;
680 
681  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
682  keyscmp = compare_pathkeys(new_path_pathkeys,
683  old_path_pathkeys);
684  if (keyscmp == PATHKEYS_EQUAL ||
685  keyscmp == PATHKEYS_BETTER2)
686  {
687  /* new path does not win on pathkeys... */
688  if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
689  {
690  /* Found an old path that dominates the new one */
691  return false;
692  }
693  }
694  }
695  }
696  else
697  {
698  /*
699  * Since the pathlist is sorted by total_cost, we can stop looking
700  * once we reach a path with a total_cost larger than the new
701  * path's.
702  */
703  break;
704  }
705  }
706 
707  return true;
708 }
709 
710 /*
711  * add_partial_path
712  * Like add_path, our goal here is to consider whether a path is worthy
713  * of being kept around, but the considerations here are a bit different.
714  * A partial path is one which can be executed in any number of workers in
715  * parallel such that each worker will generate a subset of the path's
716  * overall result.
717  *
718  * As in add_path, the partial_pathlist is kept sorted with the cheapest
719  * total path in front. This is depended on by multiple places, which
720  * just take the front entry as the cheapest path without searching.
721  *
722  * We don't generate parameterized partial paths for several reasons. Most
723  * importantly, they're not safe to execute, because there's nothing to
724  * make sure that a parallel scan within the parameterized portion of the
725  * plan is running with the same value in every worker at the same time.
726  * Fortunately, it seems unlikely to be worthwhile anyway, because having
727  * each worker scan the entire outer relation and a subset of the inner
728  * relation will generally be a terrible plan. The inner (parameterized)
729  * side of the plan will be small anyway. There could be rare cases where
730  * this wins big - e.g. if join order constraints put a 1-row relation on
731  * the outer side of the topmost join with a parameterized plan on the inner
732  * side - but we'll have to be content not to handle such cases until
733  * somebody builds an executor infrastructure that can cope with them.
734  *
735  * Because we don't consider parameterized paths here, we also don't
736  * need to consider the row counts as a measure of quality: every path will
737  * produce the same number of rows. Neither do we need to consider startup
738  * costs: parallelism is only used for plans that will be run to completion.
739  * Therefore, this routine is much simpler than add_path: it needs to
740  * consider only pathkeys and total cost.
741  *
742  * As with add_path, we pfree paths that are found to be dominated by
743  * another partial path; this requires that there be no other references to
744  * such paths yet. Hence, GatherPaths must not be created for a rel until
745  * we're done creating all partial paths for it. Unlike add_path, we don't
746  * take an exception for IndexPaths as partial index paths won't be
747  * referenced by partial BitmapHeapPaths.
748  */
749 void
750 add_partial_path(RelOptInfo *parent_rel, Path *new_path)
751 {
752  bool accept_new = true; /* unless we find a superior old path */
753  int insert_at = 0; /* where to insert new item */
754  ListCell *p1;
755 
756  /* Check for query cancel. */
758 
759  /* Path to be added must be parallel safe. */
760  Assert(new_path->parallel_safe);
761 
762  /* Relation should be OK for parallelism, too. */
763  Assert(parent_rel->consider_parallel);
764 
765  /*
766  * As in add_path, throw out any paths which are dominated by the new
767  * path, but throw out the new path if some existing path dominates it.
768  */
769  foreach(p1, parent_rel->partial_pathlist)
770  {
771  Path *old_path = (Path *) lfirst(p1);
772  bool remove_old = false; /* unless new proves superior */
773  PathKeysComparison keyscmp;
774 
775  /* Compare pathkeys. */
776  keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
777 
778  /* Unless pathkeys are incompatible, keep just one of the two paths. */
779  if (keyscmp != PATHKEYS_DIFFERENT)
780  {
781  if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
782  {
783  /* New path costs more; keep it only if pathkeys are better. */
784  if (keyscmp != PATHKEYS_BETTER1)
785  accept_new = false;
786  }
787  else if (old_path->total_cost > new_path->total_cost
788  * STD_FUZZ_FACTOR)
789  {
790  /* Old path costs more; keep it only if pathkeys are better. */
791  if (keyscmp != PATHKEYS_BETTER2)
792  remove_old = true;
793  }
794  else if (keyscmp == PATHKEYS_BETTER1)
795  {
796  /* Costs are about the same, new path has better pathkeys. */
797  remove_old = true;
798  }
799  else if (keyscmp == PATHKEYS_BETTER2)
800  {
801  /* Costs are about the same, old path has better pathkeys. */
802  accept_new = false;
803  }
804  else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
805  {
806  /* Pathkeys are the same, and the old path costs more. */
807  remove_old = true;
808  }
809  else
810  {
811  /*
812  * Pathkeys are the same, and new path isn't materially
813  * cheaper.
814  */
815  accept_new = false;
816  }
817  }
818 
819  /*
820  * Remove current element from partial_pathlist if dominated by new.
821  */
822  if (remove_old)
823  {
824  parent_rel->partial_pathlist =
825  foreach_delete_current(parent_rel->partial_pathlist, p1);
826  pfree(old_path);
827  }
828  else
829  {
830  /* new belongs after this old path if it has cost >= old's */
831  if (new_path->total_cost >= old_path->total_cost)
832  insert_at = foreach_current_index(p1) + 1;
833  }
834 
835  /*
836  * If we found an old path that dominates new_path, we can quit
837  * scanning the partial_pathlist; we will not add new_path, and we
838  * assume new_path cannot dominate any later path.
839  */
840  if (!accept_new)
841  break;
842  }
843 
844  if (accept_new)
845  {
846  /* Accept the new path: insert it at proper place */
847  parent_rel->partial_pathlist =
848  list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
849  }
850  else
851  {
852  /* Reject and recycle the new path */
853  pfree(new_path);
854  }
855 }
856 
857 /*
858  * add_partial_path_precheck
859  * Check whether a proposed new partial path could possibly get accepted.
860  *
861  * Unlike add_path_precheck, we can ignore startup cost and parameterization,
862  * since they don't matter for partial paths (see add_partial_path). But
863  * we do want to make sure we don't add a partial path if there's already
864  * a complete path that dominates it, since in that case the proposed path
865  * is surely a loser.
866  */
867 bool
868 add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
869  List *pathkeys)
870 {
871  ListCell *p1;
872 
873  /*
874  * Our goal here is twofold. First, we want to find out whether this path
875  * is clearly inferior to some existing partial path. If so, we want to
876  * reject it immediately. Second, we want to find out whether this path
877  * is clearly superior to some existing partial path -- at least, modulo
878  * final cost computations. If so, we definitely want to consider it.
879  *
880  * Unlike add_path(), we always compare pathkeys here. This is because we
881  * expect partial_pathlist to be very short, and getting a definitive
882  * answer at this stage avoids the need to call add_path_precheck.
883  */
884  foreach(p1, parent_rel->partial_pathlist)
885  {
886  Path *old_path = (Path *) lfirst(p1);
887  PathKeysComparison keyscmp;
888 
889  keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
890  if (keyscmp != PATHKEYS_DIFFERENT)
891  {
892  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
893  keyscmp != PATHKEYS_BETTER1)
894  return false;
895  if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
896  keyscmp != PATHKEYS_BETTER2)
897  return true;
898  }
899  }
900 
901  /*
902  * This path is neither clearly inferior to an existing partial path nor
903  * clearly good enough that it might replace one. Compare it to
904  * non-parallel plans. If it loses even before accounting for the cost of
905  * the Gather node, we should definitely reject it.
906  *
907  * Note that we pass the total_cost to add_path_precheck twice. This is
908  * because it's never advantageous to consider the startup cost of a
909  * partial path; the resulting plans, if run in parallel, will be run to
910  * completion.
911  */
912  if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
913  NULL))
914  return false;
915 
916  return true;
917 }
918 
919 
920 /*****************************************************************************
921  * PATH NODE CREATION ROUTINES
922  *****************************************************************************/
923 
924 /*
925  * create_seqscan_path
926  * Creates a path corresponding to a sequential scan, returning the
927  * pathnode.
928  */
929 Path *
931  Relids required_outer, int parallel_workers)
932 {
933  Path *pathnode = makeNode(Path);
934 
935  pathnode->pathtype = T_SeqScan;
936  pathnode->parent = rel;
937  pathnode->pathtarget = rel->reltarget;
938  pathnode->param_info = get_baserel_parampathinfo(root, rel,
939  required_outer);
940  pathnode->parallel_aware = parallel_workers > 0 ? true : false;
941  pathnode->parallel_safe = rel->consider_parallel;
942  pathnode->parallel_workers = parallel_workers;
943  pathnode->pathkeys = NIL; /* seqscan has unordered result */
944 
945  cost_seqscan(pathnode, root, rel, pathnode->param_info);
946 
947  return pathnode;
948 }
949 
950 /*
951  * create_samplescan_path
952  * Creates a path node for a sampled table scan.
953  */
954 Path *
956 {
957  Path *pathnode = makeNode(Path);
958 
959  pathnode->pathtype = T_SampleScan;
960  pathnode->parent = rel;
961  pathnode->pathtarget = rel->reltarget;
962  pathnode->param_info = get_baserel_parampathinfo(root, rel,
963  required_outer);
964  pathnode->parallel_aware = false;
965  pathnode->parallel_safe = rel->consider_parallel;
966  pathnode->parallel_workers = 0;
967  pathnode->pathkeys = NIL; /* samplescan has unordered result */
968 
969  cost_samplescan(pathnode, root, rel, pathnode->param_info);
970 
971  return pathnode;
972 }
973 
974 /*
975  * create_index_path
976  * Creates a path node for an index scan.
977  *
978  * 'index' is a usable index.
979  * 'indexclauses' is a list of IndexClause nodes representing clauses
980  * to be enforced as qual conditions in the scan.
981  * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
982  * to be used as index ordering operators in the scan.
983  * 'indexorderbycols' is an integer list of index column numbers (zero based)
984  * the ordering operators can be used with.
985  * 'pathkeys' describes the ordering of the path.
986  * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
987  * for an ordered index, or NoMovementScanDirection for
988  * an unordered index.
989  * 'indexonly' is true if an index-only scan is wanted.
990  * 'required_outer' is the set of outer relids for a parameterized path.
991  * 'loop_count' is the number of repetitions of the indexscan to factor into
992  * estimates of caching behavior.
993  * 'partial_path' is true if constructing a parallel index scan path.
994  *
995  * Returns the new path node.
996  */
997 IndexPath *
1000  List *indexclauses,
1001  List *indexorderbys,
1002  List *indexorderbycols,
1003  List *pathkeys,
1004  ScanDirection indexscandir,
1005  bool indexonly,
1006  Relids required_outer,
1007  double loop_count,
1008  bool partial_path)
1009 {
1010  IndexPath *pathnode = makeNode(IndexPath);
1011  RelOptInfo *rel = index->rel;
1012 
1013  pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1014  pathnode->path.parent = rel;
1015  pathnode->path.pathtarget = rel->reltarget;
1016  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1017  required_outer);
1018  pathnode->path.parallel_aware = false;
1019  pathnode->path.parallel_safe = rel->consider_parallel;
1020  pathnode->path.parallel_workers = 0;
1021  pathnode->path.pathkeys = pathkeys;
1022 
1023  pathnode->indexinfo = index;
1024  pathnode->indexclauses = indexclauses;
1025  pathnode->indexorderbys = indexorderbys;
1026  pathnode->indexorderbycols = indexorderbycols;
1027  pathnode->indexscandir = indexscandir;
1028 
1029  cost_index(pathnode, root, loop_count, partial_path);
1030 
1031  return pathnode;
1032 }
1033 
1034 /*
1035  * create_bitmap_heap_path
1036  * Creates a path node for a bitmap scan.
1037  *
1038  * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1039  * 'required_outer' is the set of outer relids for a parameterized path.
1040  * 'loop_count' is the number of repetitions of the indexscan to factor into
1041  * estimates of caching behavior.
1042  *
1043  * loop_count should match the value used when creating the component
1044  * IndexPaths.
1045  */
1048  RelOptInfo *rel,
1049  Path *bitmapqual,
1050  Relids required_outer,
1051  double loop_count,
1052  int parallel_degree)
1053 {
1054  BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1055 
1056  pathnode->path.pathtype = T_BitmapHeapScan;
1057  pathnode->path.parent = rel;
1058  pathnode->path.pathtarget = rel->reltarget;
1059  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1060  required_outer);
1061  pathnode->path.parallel_aware = parallel_degree > 0 ? true : false;
1062  pathnode->path.parallel_safe = rel->consider_parallel;
1063  pathnode->path.parallel_workers = parallel_degree;
1064  pathnode->path.pathkeys = NIL; /* always unordered */
1065 
1066  pathnode->bitmapqual = bitmapqual;
1067 
1068  cost_bitmap_heap_scan(&pathnode->path, root, rel,
1069  pathnode->path.param_info,
1070  bitmapqual, loop_count);
1071 
1072  return pathnode;
1073 }
1074 
1075 /*
1076  * create_bitmap_and_path
1077  * Creates a path node representing a BitmapAnd.
1078  */
1079 BitmapAndPath *
1081  RelOptInfo *rel,
1082  List *bitmapquals)
1083 {
1084  BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1085 
1086  pathnode->path.pathtype = T_BitmapAnd;
1087  pathnode->path.parent = rel;
1088  pathnode->path.pathtarget = rel->reltarget;
1089  pathnode->path.param_info = NULL; /* not used in bitmap trees */
1090 
1091  /*
1092  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1093  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1094  * set the flag for this path based only on the relation-level flag,
1095  * without actually iterating over the list of children.
1096  */
1097  pathnode->path.parallel_aware = false;
1098  pathnode->path.parallel_safe = rel->consider_parallel;
1099  pathnode->path.parallel_workers = 0;
1100 
1101  pathnode->path.pathkeys = NIL; /* always unordered */
1102 
1103  pathnode->bitmapquals = bitmapquals;
1104 
1105  /* this sets bitmapselectivity as well as the regular cost fields: */
1106  cost_bitmap_and_node(pathnode, root);
1107 
1108  return pathnode;
1109 }
1110 
1111 /*
1112  * create_bitmap_or_path
1113  * Creates a path node representing a BitmapOr.
1114  */
1115 BitmapOrPath *
1117  RelOptInfo *rel,
1118  List *bitmapquals)
1119 {
1120  BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1121 
1122  pathnode->path.pathtype = T_BitmapOr;
1123  pathnode->path.parent = rel;
1124  pathnode->path.pathtarget = rel->reltarget;
1125  pathnode->path.param_info = NULL; /* not used in bitmap trees */
1126 
1127  /*
1128  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1129  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1130  * set the flag for this path based only on the relation-level flag,
1131  * without actually iterating over the list of children.
1132  */
1133  pathnode->path.parallel_aware = false;
1134  pathnode->path.parallel_safe = rel->consider_parallel;
1135  pathnode->path.parallel_workers = 0;
1136 
1137  pathnode->path.pathkeys = NIL; /* always unordered */
1138 
1139  pathnode->bitmapquals = bitmapquals;
1140 
1141  /* this sets bitmapselectivity as well as the regular cost fields: */
1142  cost_bitmap_or_node(pathnode, root);
1143 
1144  return pathnode;
1145 }
1146 
1147 /*
1148  * create_tidscan_path
1149  * Creates a path corresponding to a scan by TID, returning the pathnode.
1150  */
1151 TidPath *
1153  Relids required_outer)
1154 {
1155  TidPath *pathnode = makeNode(TidPath);
1156 
1157  pathnode->path.pathtype = T_TidScan;
1158  pathnode->path.parent = rel;
1159  pathnode->path.pathtarget = rel->reltarget;
1160  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1161  required_outer);
1162  pathnode->path.parallel_aware = false;
1163  pathnode->path.parallel_safe = rel->consider_parallel;
1164  pathnode->path.parallel_workers = 0;
1165  pathnode->path.pathkeys = NIL; /* always unordered */
1166 
1167  pathnode->tidquals = tidquals;
1168 
1169  cost_tidscan(&pathnode->path, root, rel, tidquals,
1170  pathnode->path.param_info);
1171 
1172  return pathnode;
1173 }
1174 
1175 /*
1176  * create_append_path
1177  * Creates a path corresponding to an Append plan, returning the
1178  * pathnode.
1179  *
1180  * Note that we must handle subpaths = NIL, representing a dummy access path.
1181  * Also, there are callers that pass root = NULL.
1182  */
1183 AppendPath *
1185  RelOptInfo *rel,
1186  List *subpaths, List *partial_subpaths,
1187  List *pathkeys, Relids required_outer,
1188  int parallel_workers, bool parallel_aware,
1189  List *partitioned_rels, double rows)
1190 {
1191  AppendPath *pathnode = makeNode(AppendPath);
1192  ListCell *l;
1193 
1194  Assert(!parallel_aware || parallel_workers > 0);
1195 
1196  pathnode->path.pathtype = T_Append;
1197  pathnode->path.parent = rel;
1198  pathnode->path.pathtarget = rel->reltarget;
1199 
1200  /*
1201  * When generating an Append path for a partitioned table, there may be
1202  * parameters that are useful so we can eliminate certain partitions
1203  * during execution. Here we'll go all the way and fully populate the
1204  * parameter info data as we do for normal base relations. However, we
1205  * need only bother doing this for RELOPT_BASEREL rels, as
1206  * RELOPT_OTHER_MEMBER_REL's Append paths are merged into the base rel's
1207  * Append subpaths. It would do no harm to do this, we just avoid it to
1208  * save wasting effort.
1209  */
1210  if (partitioned_rels != NIL && root && rel->reloptkind == RELOPT_BASEREL)
1211  pathnode->path.param_info = get_baserel_parampathinfo(root,
1212  rel,
1213  required_outer);
1214  else
1216  required_outer);
1217 
1218  pathnode->path.parallel_aware = parallel_aware;
1219  pathnode->path.parallel_safe = rel->consider_parallel;
1220  pathnode->path.parallel_workers = parallel_workers;
1221  pathnode->path.pathkeys = pathkeys;
1222  pathnode->partitioned_rels = list_copy(partitioned_rels);
1223 
1224  /*
1225  * For parallel append, non-partial paths are sorted by descending total
1226  * costs. That way, the total time to finish all non-partial paths is
1227  * minimized. Also, the partial paths are sorted by descending startup
1228  * costs. There may be some paths that require to do startup work by a
1229  * single worker. In such case, it's better for workers to choose the
1230  * expensive ones first, whereas the leader should choose the cheapest
1231  * startup plan.
1232  */
1233  if (pathnode->path.parallel_aware)
1234  {
1235  /*
1236  * We mustn't fiddle with the order of subpaths when the Append has
1237  * pathkeys. The order they're listed in is critical to keeping the
1238  * pathkeys valid.
1239  */
1240  Assert(pathkeys == NIL);
1241 
1243  list_sort(partial_subpaths, append_startup_cost_compare);
1244  }
1245  pathnode->first_partial_path = list_length(subpaths);
1246  pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1247 
1248  /*
1249  * Apply query-wide LIMIT if known and path is for sole base relation.
1250  * (Handling this at this low level is a bit klugy.)
1251  */
1252  if (root != NULL && bms_equal(rel->relids, root->all_baserels))
1253  pathnode->limit_tuples = root->limit_tuples;
1254  else
1255  pathnode->limit_tuples = -1.0;
1256 
1257  foreach(l, pathnode->subpaths)
1258  {
1259  Path *subpath = (Path *) lfirst(l);
1260 
1261  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1262  subpath->parallel_safe;
1263 
1264  /* All child paths must have same parameterization */
1265  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1266  }
1267 
1268  Assert(!parallel_aware || pathnode->path.parallel_safe);
1269 
1270  /*
1271  * If there's exactly one child path, the Append is a no-op and will be
1272  * discarded later (in setrefs.c); therefore, we can inherit the child's
1273  * size and cost, as well as its pathkeys if any (overriding whatever the
1274  * caller might've said). Otherwise, we must do the normal costsize
1275  * calculation.
1276  */
1277  if (list_length(pathnode->subpaths) == 1)
1278  {
1279  Path *child = (Path *) linitial(pathnode->subpaths);
1280 
1281  pathnode->path.rows = child->rows;
1282  pathnode->path.startup_cost = child->startup_cost;
1283  pathnode->path.total_cost = child->total_cost;
1284  pathnode->path.pathkeys = child->pathkeys;
1285  }
1286  else
1287  cost_append(pathnode);
1288 
1289  /* If the caller provided a row estimate, override the computed value. */
1290  if (rows >= 0)
1291  pathnode->path.rows = rows;
1292 
1293  return pathnode;
1294 }
1295 
1296 /*
1297  * append_total_cost_compare
1298  * list_sort comparator for sorting append child paths
1299  * by total_cost descending
1300  *
1301  * For equal total costs, we fall back to comparing startup costs; if those
1302  * are equal too, break ties using bms_compare on the paths' relids.
1303  * (This is to avoid getting unpredictable results from list_sort.)
1304  */
1305 static int
1307 {
1308  Path *path1 = (Path *) lfirst(a);
1309  Path *path2 = (Path *) lfirst(b);
1310  int cmp;
1311 
1312  cmp = compare_path_costs(path1, path2, TOTAL_COST);
1313  if (cmp != 0)
1314  return -cmp;
1315  return bms_compare(path1->parent->relids, path2->parent->relids);
1316 }
1317 
1318 /*
1319  * append_startup_cost_compare
1320  * list_sort comparator for sorting append child paths
1321  * by startup_cost descending
1322  *
1323  * For equal startup costs, we fall back to comparing total costs; if those
1324  * are equal too, break ties using bms_compare on the paths' relids.
1325  * (This is to avoid getting unpredictable results from list_sort.)
1326  */
1327 static int
1329 {
1330  Path *path1 = (Path *) lfirst(a);
1331  Path *path2 = (Path *) lfirst(b);
1332  int cmp;
1333 
1334  cmp = compare_path_costs(path1, path2, STARTUP_COST);
1335  if (cmp != 0)
1336  return -cmp;
1337  return bms_compare(path1->parent->relids, path2->parent->relids);
1338 }
1339 
1340 /*
1341  * create_merge_append_path
1342  * Creates a path corresponding to a MergeAppend plan, returning the
1343  * pathnode.
1344  */
1347  RelOptInfo *rel,
1348  List *subpaths,
1349  List *pathkeys,
1350  Relids required_outer,
1351  List *partitioned_rels)
1352 {
1354  Cost input_startup_cost;
1355  Cost input_total_cost;
1356  ListCell *l;
1357 
1358  pathnode->path.pathtype = T_MergeAppend;
1359  pathnode->path.parent = rel;
1360  pathnode->path.pathtarget = rel->reltarget;
1362  required_outer);
1363  pathnode->path.parallel_aware = false;
1364  pathnode->path.parallel_safe = rel->consider_parallel;
1365  pathnode->path.parallel_workers = 0;
1366  pathnode->path.pathkeys = pathkeys;
1367  pathnode->partitioned_rels = list_copy(partitioned_rels);
1368  pathnode->subpaths = subpaths;
1369 
1370  /*
1371  * Apply query-wide LIMIT if known and path is for sole base relation.
1372  * (Handling this at this low level is a bit klugy.)
1373  */
1374  if (bms_equal(rel->relids, root->all_baserels))
1375  pathnode->limit_tuples = root->limit_tuples;
1376  else
1377  pathnode->limit_tuples = -1.0;
1378 
1379  /*
1380  * Add up the sizes and costs of the input paths.
1381  */
1382  pathnode->path.rows = 0;
1383  input_startup_cost = 0;
1384  input_total_cost = 0;
1385  foreach(l, subpaths)
1386  {
1387  Path *subpath = (Path *) lfirst(l);
1388 
1389  pathnode->path.rows += subpath->rows;
1390  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1391  subpath->parallel_safe;
1392 
1393  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1394  {
1395  /* Subpath is adequately ordered, we won't need to sort it */
1396  input_startup_cost += subpath->startup_cost;
1397  input_total_cost += subpath->total_cost;
1398  }
1399  else
1400  {
1401  /* We'll need to insert a Sort node, so include cost for that */
1402  Path sort_path; /* dummy for result of cost_sort */
1403 
1404  cost_sort(&sort_path,
1405  root,
1406  pathkeys,
1407  subpath->total_cost,
1408  subpath->parent->tuples,
1409  subpath->pathtarget->width,
1410  0.0,
1411  work_mem,
1412  pathnode->limit_tuples);
1413  input_startup_cost += sort_path.startup_cost;
1414  input_total_cost += sort_path.total_cost;
1415  }
1416 
1417  /* All child paths must have same parameterization */
1418  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1419  }
1420 
1421  /*
1422  * Now we can compute total costs of the MergeAppend. If there's exactly
1423  * one child path, the MergeAppend is a no-op and will be discarded later
1424  * (in setrefs.c); otherwise we do the normal cost calculation.
1425  */
1426  if (list_length(subpaths) == 1)
1427  {
1428  pathnode->path.startup_cost = input_startup_cost;
1429  pathnode->path.total_cost = input_total_cost;
1430  }
1431  else
1432  cost_merge_append(&pathnode->path, root,
1433  pathkeys, list_length(subpaths),
1434  input_startup_cost, input_total_cost,
1435  pathnode->path.rows);
1436 
1437  return pathnode;
1438 }
1439 
1440 /*
1441  * create_group_result_path
1442  * Creates a path representing a Result-and-nothing-else plan.
1443  *
1444  * This is only used for degenerate grouping cases, in which we know we
1445  * need to produce one result row, possibly filtered by a HAVING qual.
1446  */
1449  PathTarget *target, List *havingqual)
1450 {
1452 
1453  pathnode->path.pathtype = T_Result;
1454  pathnode->path.parent = rel;
1455  pathnode->path.pathtarget = target;
1456  pathnode->path.param_info = NULL; /* there are no other rels... */
1457  pathnode->path.parallel_aware = false;
1458  pathnode->path.parallel_safe = rel->consider_parallel;
1459  pathnode->path.parallel_workers = 0;
1460  pathnode->path.pathkeys = NIL;
1461  pathnode->quals = havingqual;
1462 
1463  /*
1464  * We can't quite use cost_resultscan() because the quals we want to
1465  * account for are not baserestrict quals of the rel. Might as well just
1466  * hack it here.
1467  */
1468  pathnode->path.rows = 1;
1469  pathnode->path.startup_cost = target->cost.startup;
1470  pathnode->path.total_cost = target->cost.startup +
1471  cpu_tuple_cost + target->cost.per_tuple;
1472 
1473  /*
1474  * Add cost of qual, if any --- but we ignore its selectivity, since our
1475  * rowcount estimate should be 1 no matter what the qual is.
1476  */
1477  if (havingqual)
1478  {
1479  QualCost qual_cost;
1480 
1481  cost_qual_eval(&qual_cost, havingqual, root);
1482  /* havingqual is evaluated once at startup */
1483  pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1484  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1485  }
1486 
1487  return pathnode;
1488 }
1489 
1490 /*
1491  * create_material_path
1492  * Creates a path corresponding to a Material plan, returning the
1493  * pathnode.
1494  */
1495 MaterialPath *
1497 {
1498  MaterialPath *pathnode = makeNode(MaterialPath);
1499 
1500  Assert(subpath->parent == rel);
1501 
1502  pathnode->path.pathtype = T_Material;
1503  pathnode->path.parent = rel;
1504  pathnode->path.pathtarget = rel->reltarget;
1505  pathnode->path.param_info = subpath->param_info;
1506  pathnode->path.parallel_aware = false;
1507  pathnode->path.parallel_safe = rel->consider_parallel &&
1508  subpath->parallel_safe;
1509  pathnode->path.parallel_workers = subpath->parallel_workers;
1510  pathnode->path.pathkeys = subpath->pathkeys;
1511 
1512  pathnode->subpath = subpath;
1513 
1514  cost_material(&pathnode->path,
1515  subpath->startup_cost,
1516  subpath->total_cost,
1517  subpath->rows,
1518  subpath->pathtarget->width);
1519 
1520  return pathnode;
1521 }
1522 
1523 /*
1524  * create_unique_path
1525  * Creates a path representing elimination of distinct rows from the
1526  * input data. Distinct-ness is defined according to the needs of the
1527  * semijoin represented by sjinfo. If it is not possible to identify
1528  * how to make the data unique, NULL is returned.
1529  *
1530  * If used at all, this is likely to be called repeatedly on the same rel;
1531  * and the input subpath should always be the same (the cheapest_total path
1532  * for the rel). So we cache the result.
1533  */
1534 UniquePath *
1536  SpecialJoinInfo *sjinfo)
1537 {
1538  UniquePath *pathnode;
1539  Path sort_path; /* dummy for result of cost_sort */
1540  Path agg_path; /* dummy for result of cost_agg */
1541  MemoryContext oldcontext;
1542  int numCols;
1543 
1544  /* Caller made a mistake if subpath isn't cheapest_total ... */
1545  Assert(subpath == rel->cheapest_total_path);
1546  Assert(subpath->parent == rel);
1547  /* ... or if SpecialJoinInfo is the wrong one */
1548  Assert(sjinfo->jointype == JOIN_SEMI);
1549  Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1550 
1551  /* If result already cached, return it */
1552  if (rel->cheapest_unique_path)
1553  return (UniquePath *) rel->cheapest_unique_path;
1554 
1555  /* If it's not possible to unique-ify, return NULL */
1556  if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1557  return NULL;
1558 
1559  /*
1560  * When called during GEQO join planning, we are in a short-lived memory
1561  * context. We must make sure that the path and any subsidiary data
1562  * structures created for a baserel survive the GEQO cycle, else the
1563  * baserel is trashed for future GEQO cycles. On the other hand, when we
1564  * are creating those for a joinrel during GEQO, we don't want them to
1565  * clutter the main planning context. Upshot is that the best solution is
1566  * to explicitly allocate memory in the same context the given RelOptInfo
1567  * is in.
1568  */
1569  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1570 
1571  pathnode = makeNode(UniquePath);
1572 
1573  pathnode->path.pathtype = T_Unique;
1574  pathnode->path.parent = rel;
1575  pathnode->path.pathtarget = rel->reltarget;
1576  pathnode->path.param_info = subpath->param_info;
1577  pathnode->path.parallel_aware = false;
1578  pathnode->path.parallel_safe = rel->consider_parallel &&
1579  subpath->parallel_safe;
1580  pathnode->path.parallel_workers = subpath->parallel_workers;
1581 
1582  /*
1583  * Assume the output is unsorted, since we don't necessarily have pathkeys
1584  * to represent it. (This might get overridden below.)
1585  */
1586  pathnode->path.pathkeys = NIL;
1587 
1588  pathnode->subpath = subpath;
1589  pathnode->in_operators = sjinfo->semi_operators;
1590  pathnode->uniq_exprs = sjinfo->semi_rhs_exprs;
1591 
1592  /*
1593  * If the input is a relation and it has a unique index that proves the
1594  * semi_rhs_exprs are unique, then we don't need to do anything. Note
1595  * that relation_has_unique_index_for automatically considers restriction
1596  * clauses for the rel, as well.
1597  */
1598  if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1600  sjinfo->semi_rhs_exprs,
1601  sjinfo->semi_operators))
1602  {
1603  pathnode->umethod = UNIQUE_PATH_NOOP;
1604  pathnode->path.rows = rel->rows;
1605  pathnode->path.startup_cost = subpath->startup_cost;
1606  pathnode->path.total_cost = subpath->total_cost;
1607  pathnode->path.pathkeys = subpath->pathkeys;
1608 
1609  rel->cheapest_unique_path = (Path *) pathnode;
1610 
1611  MemoryContextSwitchTo(oldcontext);
1612 
1613  return pathnode;
1614  }
1615 
1616  /*
1617  * If the input is a subquery whose output must be unique already, then we
1618  * don't need to do anything. The test for uniqueness has to consider
1619  * exactly which columns we are extracting; for example "SELECT DISTINCT
1620  * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1621  * this optimization unless semi_rhs_exprs consists only of simple Vars
1622  * referencing subquery outputs. (Possibly we could do something with
1623  * expressions in the subquery outputs, too, but for now keep it simple.)
1624  */
1625  if (rel->rtekind == RTE_SUBQUERY)
1626  {
1627  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1628 
1630  {
1631  List *sub_tlist_colnos;
1632 
1633  sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1634  rel->relid);
1635 
1636  if (sub_tlist_colnos &&
1638  sub_tlist_colnos,
1639  sjinfo->semi_operators))
1640  {
1641  pathnode->umethod = UNIQUE_PATH_NOOP;
1642  pathnode->path.rows = rel->rows;
1643  pathnode->path.startup_cost = subpath->startup_cost;
1644  pathnode->path.total_cost = subpath->total_cost;
1645  pathnode->path.pathkeys = subpath->pathkeys;
1646 
1647  rel->cheapest_unique_path = (Path *) pathnode;
1648 
1649  MemoryContextSwitchTo(oldcontext);
1650 
1651  return pathnode;
1652  }
1653  }
1654  }
1655 
1656  /* Estimate number of output rows */
1657  pathnode->path.rows = estimate_num_groups(root,
1658  sjinfo->semi_rhs_exprs,
1659  rel->rows,
1660  NULL);
1661  numCols = list_length(sjinfo->semi_rhs_exprs);
1662 
1663  if (sjinfo->semi_can_btree)
1664  {
1665  /*
1666  * Estimate cost for sort+unique implementation
1667  */
1668  cost_sort(&sort_path, root, NIL,
1669  subpath->total_cost,
1670  rel->rows,
1671  subpath->pathtarget->width,
1672  0.0,
1673  work_mem,
1674  -1.0);
1675 
1676  /*
1677  * Charge one cpu_operator_cost per comparison per input tuple. We
1678  * assume all columns get compared at most of the tuples. (XXX
1679  * probably this is an overestimate.) This should agree with
1680  * create_upper_unique_path.
1681  */
1682  sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1683  }
1684 
1685  if (sjinfo->semi_can_hash)
1686  {
1687  /*
1688  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1689  * planner.c).
1690  */
1691  int hashentrysize = subpath->pathtarget->width + 64;
1692 
1693  if (hashentrysize * pathnode->path.rows > work_mem * 1024L)
1694  {
1695  /*
1696  * We should not try to hash. Hack the SpecialJoinInfo to
1697  * remember this, in case we come through here again.
1698  */
1699  sjinfo->semi_can_hash = false;
1700  }
1701  else
1702  cost_agg(&agg_path, root,
1703  AGG_HASHED, NULL,
1704  numCols, pathnode->path.rows,
1705  NIL,
1706  subpath->startup_cost,
1707  subpath->total_cost,
1708  rel->rows);
1709  }
1710 
1711  if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1712  {
1713  if (agg_path.total_cost < sort_path.total_cost)
1714  pathnode->umethod = UNIQUE_PATH_HASH;
1715  else
1716  pathnode->umethod = UNIQUE_PATH_SORT;
1717  }
1718  else if (sjinfo->semi_can_btree)
1719  pathnode->umethod = UNIQUE_PATH_SORT;
1720  else if (sjinfo->semi_can_hash)
1721  pathnode->umethod = UNIQUE_PATH_HASH;
1722  else
1723  {
1724  /* we can get here only if we abandoned hashing above */
1725  MemoryContextSwitchTo(oldcontext);
1726  return NULL;
1727  }
1728 
1729  if (pathnode->umethod == UNIQUE_PATH_HASH)
1730  {
1731  pathnode->path.startup_cost = agg_path.startup_cost;
1732  pathnode->path.total_cost = agg_path.total_cost;
1733  }
1734  else
1735  {
1736  pathnode->path.startup_cost = sort_path.startup_cost;
1737  pathnode->path.total_cost = sort_path.total_cost;
1738  }
1739 
1740  rel->cheapest_unique_path = (Path *) pathnode;
1741 
1742  MemoryContextSwitchTo(oldcontext);
1743 
1744  return pathnode;
1745 }
1746 
1747 /*
1748  * create_gather_merge_path
1749  *
1750  * Creates a path corresponding to a gather merge scan, returning
1751  * the pathnode.
1752  */
1755  PathTarget *target, List *pathkeys,
1756  Relids required_outer, double *rows)
1757 {
1759  Cost input_startup_cost = 0;
1760  Cost input_total_cost = 0;
1761 
1762  Assert(subpath->parallel_safe);
1763  Assert(pathkeys);
1764 
1765  pathnode->path.pathtype = T_GatherMerge;
1766  pathnode->path.parent = rel;
1767  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1768  required_outer);
1769  pathnode->path.parallel_aware = false;
1770 
1771  pathnode->subpath = subpath;
1772  pathnode->num_workers = subpath->parallel_workers;
1773  pathnode->path.pathkeys = pathkeys;
1774  pathnode->path.pathtarget = target ? target : rel->reltarget;
1775  pathnode->path.rows += subpath->rows;
1776 
1777  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1778  {
1779  /* Subpath is adequately ordered, we won't need to sort it */
1780  input_startup_cost += subpath->startup_cost;
1781  input_total_cost += subpath->total_cost;
1782  }
1783  else
1784  {
1785  /* We'll need to insert a Sort node, so include cost for that */
1786  Path sort_path; /* dummy for result of cost_sort */
1787 
1788  cost_sort(&sort_path,
1789  root,
1790  pathkeys,
1791  subpath->total_cost,
1792  subpath->rows,
1793  subpath->pathtarget->width,
1794  0.0,
1795  work_mem,
1796  -1);
1797  input_startup_cost += sort_path.startup_cost;
1798  input_total_cost += sort_path.total_cost;
1799  }
1800 
1801  cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1802  input_startup_cost, input_total_cost, rows);
1803 
1804  return pathnode;
1805 }
1806 
1807 /*
1808  * translate_sub_tlist - get subquery column numbers represented by tlist
1809  *
1810  * The given targetlist usually contains only Vars referencing the given relid.
1811  * Extract their varattnos (ie, the column numbers of the subquery) and return
1812  * as an integer List.
1813  *
1814  * If any of the tlist items is not a simple Var, we cannot determine whether
1815  * the subquery's uniqueness condition (if any) matches ours, so punt and
1816  * return NIL.
1817  */
1818 static List *
1819 translate_sub_tlist(List *tlist, int relid)
1820 {
1821  List *result = NIL;
1822  ListCell *l;
1823 
1824  foreach(l, tlist)
1825  {
1826  Var *var = (Var *) lfirst(l);
1827 
1828  if (!var || !IsA(var, Var) ||
1829  var->varno != relid)
1830  return NIL; /* punt */
1831 
1832  result = lappend_int(result, var->varattno);
1833  }
1834  return result;
1835 }
1836 
1837 /*
1838  * create_gather_path
1839  * Creates a path corresponding to a gather scan, returning the
1840  * pathnode.
1841  *
1842  * 'rows' may optionally be set to override row estimates from other sources.
1843  */
1844 GatherPath *
1846  PathTarget *target, Relids required_outer, double *rows)
1847 {
1848  GatherPath *pathnode = makeNode(GatherPath);
1849 
1850  Assert(subpath->parallel_safe);
1851 
1852  pathnode->path.pathtype = T_Gather;
1853  pathnode->path.parent = rel;
1854  pathnode->path.pathtarget = target;
1855  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1856  required_outer);
1857  pathnode->path.parallel_aware = false;
1858  pathnode->path.parallel_safe = false;
1859  pathnode->path.parallel_workers = 0;
1860  pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1861 
1862  pathnode->subpath = subpath;
1863  pathnode->num_workers = subpath->parallel_workers;
1864  pathnode->single_copy = false;
1865 
1866  if (pathnode->num_workers == 0)
1867  {
1868  pathnode->path.pathkeys = subpath->pathkeys;
1869  pathnode->num_workers = 1;
1870  pathnode->single_copy = true;
1871  }
1872 
1873  cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1874 
1875  return pathnode;
1876 }
1877 
1878 /*
1879  * create_subqueryscan_path
1880  * Creates a path corresponding to a scan of a subquery,
1881  * returning the pathnode.
1882  */
1885  List *pathkeys, Relids required_outer)
1886 {
1888 
1889  pathnode->path.pathtype = T_SubqueryScan;
1890  pathnode->path.parent = rel;
1891  pathnode->path.pathtarget = rel->reltarget;
1892  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1893  required_outer);
1894  pathnode->path.parallel_aware = false;
1895  pathnode->path.parallel_safe = rel->consider_parallel &&
1896  subpath->parallel_safe;
1897  pathnode->path.parallel_workers = subpath->parallel_workers;
1898  pathnode->path.pathkeys = pathkeys;
1899  pathnode->subpath = subpath;
1900 
1901  cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info);
1902 
1903  return pathnode;
1904 }
1905 
1906 /*
1907  * create_functionscan_path
1908  * Creates a path corresponding to a sequential scan of a function,
1909  * returning the pathnode.
1910  */
1911 Path *
1913  List *pathkeys, Relids required_outer)
1914 {
1915  Path *pathnode = makeNode(Path);
1916 
1917  pathnode->pathtype = T_FunctionScan;
1918  pathnode->parent = rel;
1919  pathnode->pathtarget = rel->reltarget;
1920  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1921  required_outer);
1922  pathnode->parallel_aware = false;
1923  pathnode->parallel_safe = rel->consider_parallel;
1924  pathnode->parallel_workers = 0;
1925  pathnode->pathkeys = pathkeys;
1926 
1927  cost_functionscan(pathnode, root, rel, pathnode->param_info);
1928 
1929  return pathnode;
1930 }
1931 
1932 /*
1933  * create_tablefuncscan_path
1934  * Creates a path corresponding to a sequential scan of a table function,
1935  * returning the pathnode.
1936  */
1937 Path *
1939  Relids required_outer)
1940 {
1941  Path *pathnode = makeNode(Path);
1942 
1943  pathnode->pathtype = T_TableFuncScan;
1944  pathnode->parent = rel;
1945  pathnode->pathtarget = rel->reltarget;
1946  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1947  required_outer);
1948  pathnode->parallel_aware = false;
1949  pathnode->parallel_safe = rel->consider_parallel;
1950  pathnode->parallel_workers = 0;
1951  pathnode->pathkeys = NIL; /* result is always unordered */
1952 
1953  cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
1954 
1955  return pathnode;
1956 }
1957 
1958 /*
1959  * create_valuesscan_path
1960  * Creates a path corresponding to a scan of a VALUES list,
1961  * returning the pathnode.
1962  */
1963 Path *
1965  Relids required_outer)
1966 {
1967  Path *pathnode = makeNode(Path);
1968 
1969  pathnode->pathtype = T_ValuesScan;
1970  pathnode->parent = rel;
1971  pathnode->pathtarget = rel->reltarget;
1972  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1973  required_outer);
1974  pathnode->parallel_aware = false;
1975  pathnode->parallel_safe = rel->consider_parallel;
1976  pathnode->parallel_workers = 0;
1977  pathnode->pathkeys = NIL; /* result is always unordered */
1978 
1979  cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1980 
1981  return pathnode;
1982 }
1983 
1984 /*
1985  * create_ctescan_path
1986  * Creates a path corresponding to a scan of a non-self-reference CTE,
1987  * returning the pathnode.
1988  */
1989 Path *
1990 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
1991 {
1992  Path *pathnode = makeNode(Path);
1993 
1994  pathnode->pathtype = T_CteScan;
1995  pathnode->parent = rel;
1996  pathnode->pathtarget = rel->reltarget;
1997  pathnode->param_info = get_baserel_parampathinfo(root, rel,
1998  required_outer);
1999  pathnode->parallel_aware = false;
2000  pathnode->parallel_safe = rel->consider_parallel;
2001  pathnode->parallel_workers = 0;
2002  pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
2003 
2004  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2005 
2006  return pathnode;
2007 }
2008 
2009 /*
2010  * create_namedtuplestorescan_path
2011  * Creates a path corresponding to a scan of a named tuplestore, returning
2012  * the pathnode.
2013  */
2014 Path *
2016  Relids required_outer)
2017 {
2018  Path *pathnode = makeNode(Path);
2019 
2020  pathnode->pathtype = T_NamedTuplestoreScan;
2021  pathnode->parent = rel;
2022  pathnode->pathtarget = rel->reltarget;
2023  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2024  required_outer);
2025  pathnode->parallel_aware = false;
2026  pathnode->parallel_safe = rel->consider_parallel;
2027  pathnode->parallel_workers = 0;
2028  pathnode->pathkeys = NIL; /* result is always unordered */
2029 
2030  cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2031 
2032  return pathnode;
2033 }
2034 
2035 /*
2036  * create_resultscan_path
2037  * Creates a path corresponding to a scan of an RTE_RESULT relation,
2038  * returning the pathnode.
2039  */
2040 Path *
2042  Relids required_outer)
2043 {
2044  Path *pathnode = makeNode(Path);
2045 
2046  pathnode->pathtype = T_Result;
2047  pathnode->parent = rel;
2048  pathnode->pathtarget = rel->reltarget;
2049  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2050  required_outer);
2051  pathnode->parallel_aware = false;
2052  pathnode->parallel_safe = rel->consider_parallel;
2053  pathnode->parallel_workers = 0;
2054  pathnode->pathkeys = NIL; /* result is always unordered */
2055 
2056  cost_resultscan(pathnode, root, rel, pathnode->param_info);
2057 
2058  return pathnode;
2059 }
2060 
2061 /*
2062  * create_worktablescan_path
2063  * Creates a path corresponding to a scan of a self-reference CTE,
2064  * returning the pathnode.
2065  */
2066 Path *
2068  Relids required_outer)
2069 {
2070  Path *pathnode = makeNode(Path);
2071 
2072  pathnode->pathtype = T_WorkTableScan;
2073  pathnode->parent = rel;
2074  pathnode->pathtarget = rel->reltarget;
2075  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2076  required_outer);
2077  pathnode->parallel_aware = false;
2078  pathnode->parallel_safe = rel->consider_parallel;
2079  pathnode->parallel_workers = 0;
2080  pathnode->pathkeys = NIL; /* result is always unordered */
2081 
2082  /* Cost is the same as for a regular CTE scan */
2083  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2084 
2085  return pathnode;
2086 }
2087 
2088 /*
2089  * create_foreignscan_path
2090  * Creates a path corresponding to a scan of a foreign base table,
2091  * returning the pathnode.
2092  *
2093  * This function is never called from core Postgres; rather, it's expected
2094  * to be called by the GetForeignPaths function of a foreign data wrapper.
2095  * We make the FDW supply all fields of the path, since we do not have any way
2096  * to calculate them in core. However, there is a usually-sane default for
2097  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2098  */
2099 ForeignPath *
2101  PathTarget *target,
2102  double rows, Cost startup_cost, Cost total_cost,
2103  List *pathkeys,
2104  Relids required_outer,
2105  Path *fdw_outerpath,
2106  List *fdw_private)
2107 {
2108  ForeignPath *pathnode = makeNode(ForeignPath);
2109 
2110  /* Historically some FDWs were confused about when to use this */
2111  Assert(IS_SIMPLE_REL(rel));
2112 
2113  pathnode->path.pathtype = T_ForeignScan;
2114  pathnode->path.parent = rel;
2115  pathnode->path.pathtarget = target ? target : rel->reltarget;
2116  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2117  required_outer);
2118  pathnode->path.parallel_aware = false;
2119  pathnode->path.parallel_safe = rel->consider_parallel;
2120  pathnode->path.parallel_workers = 0;
2121  pathnode->path.rows = rows;
2122  pathnode->path.startup_cost = startup_cost;
2123  pathnode->path.total_cost = total_cost;
2124  pathnode->path.pathkeys = pathkeys;
2125 
2126  pathnode->fdw_outerpath = fdw_outerpath;
2127  pathnode->fdw_private = fdw_private;
2128 
2129  return pathnode;
2130 }
2131 
2132 /*
2133  * create_foreign_join_path
2134  * Creates a path corresponding to a scan of a foreign join,
2135  * returning the pathnode.
2136  *
2137  * This function is never called from core Postgres; rather, it's expected
2138  * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2139  * We make the FDW supply all fields of the path, since we do not have any way
2140  * to calculate them in core. However, there is a usually-sane default for
2141  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2142  */
2143 ForeignPath *
2145  PathTarget *target,
2146  double rows, Cost startup_cost, Cost total_cost,
2147  List *pathkeys,
2148  Relids required_outer,
2149  Path *fdw_outerpath,
2150  List *fdw_private)
2151 {
2152  ForeignPath *pathnode = makeNode(ForeignPath);
2153 
2154  /*
2155  * We should use get_joinrel_parampathinfo to handle parameterized paths,
2156  * but the API of this function doesn't support it, and existing
2157  * extensions aren't yet trying to build such paths anyway. For the
2158  * moment just throw an error if someone tries it; eventually we should
2159  * revisit this.
2160  */
2161  if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2162  elog(ERROR, "parameterized foreign joins are not supported yet");
2163 
2164  pathnode->path.pathtype = T_ForeignScan;
2165  pathnode->path.parent = rel;
2166  pathnode->path.pathtarget = target ? target : rel->reltarget;
2167  pathnode->path.param_info = NULL; /* XXX see above */
2168  pathnode->path.parallel_aware = false;
2169  pathnode->path.parallel_safe = rel->consider_parallel;
2170  pathnode->path.parallel_workers = 0;
2171  pathnode->path.rows = rows;
2172  pathnode->path.startup_cost = startup_cost;
2173  pathnode->path.total_cost = total_cost;
2174  pathnode->path.pathkeys = pathkeys;
2175 
2176  pathnode->fdw_outerpath = fdw_outerpath;
2177  pathnode->fdw_private = fdw_private;
2178 
2179  return pathnode;
2180 }
2181 
2182 /*
2183  * create_foreign_upper_path
2184  * Creates a path corresponding to an upper relation that's computed
2185  * directly by an FDW, returning the pathnode.
2186  *
2187  * This function is never called from core Postgres; rather, it's expected to
2188  * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2189  * We make the FDW supply all fields of the path, since we do not have any way
2190  * to calculate them in core. However, there is a usually-sane default for
2191  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2192  */
2193 ForeignPath *
2195  PathTarget *target,
2196  double rows, Cost startup_cost, Cost total_cost,
2197  List *pathkeys,
2198  Path *fdw_outerpath,
2199  List *fdw_private)
2200 {
2201  ForeignPath *pathnode = makeNode(ForeignPath);
2202 
2203  /*
2204  * Upper relations should never have any lateral references, since joining
2205  * is complete.
2206  */
2208 
2209  pathnode->path.pathtype = T_ForeignScan;
2210  pathnode->path.parent = rel;
2211  pathnode->path.pathtarget = target ? target : rel->reltarget;
2212  pathnode->path.param_info = NULL;
2213  pathnode->path.parallel_aware = false;
2214  pathnode->path.parallel_safe = rel->consider_parallel;
2215  pathnode->path.parallel_workers = 0;
2216  pathnode->path.rows = rows;
2217  pathnode->path.startup_cost = startup_cost;
2218  pathnode->path.total_cost = total_cost;
2219  pathnode->path.pathkeys = pathkeys;
2220 
2221  pathnode->fdw_outerpath = fdw_outerpath;
2222  pathnode->fdw_private = fdw_private;
2223 
2224  return pathnode;
2225 }
2226 
2227 /*
2228  * calc_nestloop_required_outer
2229  * Compute the required_outer set for a nestloop join path
2230  *
2231  * Note: result must not share storage with either input
2232  */
2233 Relids
2235  Relids outer_paramrels,
2236  Relids innerrelids,
2237  Relids inner_paramrels)
2238 {
2239  Relids required_outer;
2240 
2241  /* inner_path can require rels from outer path, but not vice versa */
2242  Assert(!bms_overlap(outer_paramrels, innerrelids));
2243  /* easy case if inner path is not parameterized */
2244  if (!inner_paramrels)
2245  return bms_copy(outer_paramrels);
2246  /* else, form the union ... */
2247  required_outer = bms_union(outer_paramrels, inner_paramrels);
2248  /* ... and remove any mention of now-satisfied outer rels */
2249  required_outer = bms_del_members(required_outer,
2250  outerrelids);
2251  /* maintain invariant that required_outer is exactly NULL if empty */
2252  if (bms_is_empty(required_outer))
2253  {
2254  bms_free(required_outer);
2255  required_outer = NULL;
2256  }
2257  return required_outer;
2258 }
2259 
2260 /*
2261  * calc_non_nestloop_required_outer
2262  * Compute the required_outer set for a merge or hash join path
2263  *
2264  * Note: result must not share storage with either input
2265  */
2266 Relids
2267 calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2268 {
2269  Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2270  Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2271  Relids required_outer;
2272 
2273  /* neither path can require rels from the other */
2274  Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
2275  Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
2276  /* form the union ... */
2277  required_outer = bms_union(outer_paramrels, inner_paramrels);
2278  /* we do not need an explicit test for empty; bms_union gets it right */
2279  return required_outer;
2280 }
2281 
2282 /*
2283  * create_nestloop_path
2284  * Creates a pathnode corresponding to a nestloop join between two
2285  * relations.
2286  *
2287  * 'joinrel' is the join relation.
2288  * 'jointype' is the type of join required
2289  * 'workspace' is the result from initial_cost_nestloop
2290  * 'extra' contains various information about the join
2291  * 'outer_path' is the outer path
2292  * 'inner_path' is the inner path
2293  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2294  * 'pathkeys' are the path keys of the new join path
2295  * 'required_outer' is the set of required outer rels
2296  *
2297  * Returns the resulting path node.
2298  */
2299 NestPath *
2301  RelOptInfo *joinrel,
2302  JoinType jointype,
2303  JoinCostWorkspace *workspace,
2304  JoinPathExtraData *extra,
2305  Path *outer_path,
2306  Path *inner_path,
2307  List *restrict_clauses,
2308  List *pathkeys,
2309  Relids required_outer)
2310 {
2311  NestPath *pathnode = makeNode(NestPath);
2312  Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2313 
2314  /*
2315  * If the inner path is parameterized by the outer, we must drop any
2316  * restrict_clauses that are due to be moved into the inner path. We have
2317  * to do this now, rather than postpone the work till createplan time,
2318  * because the restrict_clauses list can affect the size and cost
2319  * estimates for this path.
2320  */
2321  if (bms_overlap(inner_req_outer, outer_path->parent->relids))
2322  {
2323  Relids inner_and_outer = bms_union(inner_path->parent->relids,
2324  inner_req_outer);
2325  List *jclauses = NIL;
2326  ListCell *lc;
2327 
2328  foreach(lc, restrict_clauses)
2329  {
2330  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2331 
2332  if (!join_clause_is_movable_into(rinfo,
2333  inner_path->parent->relids,
2334  inner_and_outer))
2335  jclauses = lappend(jclauses, rinfo);
2336  }
2337  restrict_clauses = jclauses;
2338  }
2339 
2340  pathnode->path.pathtype = T_NestLoop;
2341  pathnode->path.parent = joinrel;
2342  pathnode->path.pathtarget = joinrel->reltarget;
2343  pathnode->path.param_info =
2345  joinrel,
2346  outer_path,
2347  inner_path,
2348  extra->sjinfo,
2349  required_outer,
2350  &restrict_clauses);
2351  pathnode->path.parallel_aware = false;
2352  pathnode->path.parallel_safe = joinrel->consider_parallel &&
2353  outer_path->parallel_safe && inner_path->parallel_safe;
2354  /* This is a foolish way to estimate parallel_workers, but for now... */
2355  pathnode->path.parallel_workers = outer_path->parallel_workers;
2356  pathnode->path.pathkeys = pathkeys;
2357  pathnode->jointype = jointype;
2358  pathnode->inner_unique = extra->inner_unique;
2359  pathnode->outerjoinpath = outer_path;
2360  pathnode->innerjoinpath = inner_path;
2361  pathnode->joinrestrictinfo = restrict_clauses;
2362 
2363  final_cost_nestloop(root, pathnode, workspace, extra);
2364 
2365  return pathnode;
2366 }
2367 
2368 /*
2369  * create_mergejoin_path
2370  * Creates a pathnode corresponding to a mergejoin join between
2371  * two relations
2372  *
2373  * 'joinrel' is the join relation
2374  * 'jointype' is the type of join required
2375  * 'workspace' is the result from initial_cost_mergejoin
2376  * 'extra' contains various information about the join
2377  * 'outer_path' is the outer path
2378  * 'inner_path' is the inner path
2379  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2380  * 'pathkeys' are the path keys of the new join path
2381  * 'required_outer' is the set of required outer rels
2382  * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2383  * (this should be a subset of the restrict_clauses list)
2384  * 'outersortkeys' are the sort varkeys for the outer relation
2385  * 'innersortkeys' are the sort varkeys for the inner relation
2386  */
2387 MergePath *
2389  RelOptInfo *joinrel,
2390  JoinType jointype,
2391  JoinCostWorkspace *workspace,
2392  JoinPathExtraData *extra,
2393  Path *outer_path,
2394  Path *inner_path,
2395  List *restrict_clauses,
2396  List *pathkeys,
2397  Relids required_outer,
2398  List *mergeclauses,
2399  List *outersortkeys,
2400  List *innersortkeys)
2401 {
2402  MergePath *pathnode = makeNode(MergePath);
2403 
2404  pathnode->jpath.path.pathtype = T_MergeJoin;
2405  pathnode->jpath.path.parent = joinrel;
2406  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2407  pathnode->jpath.path.param_info =
2409  joinrel,
2410  outer_path,
2411  inner_path,
2412  extra->sjinfo,
2413  required_outer,
2414  &restrict_clauses);
2415  pathnode->jpath.path.parallel_aware = false;
2416  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2417  outer_path->parallel_safe && inner_path->parallel_safe;
2418  /* This is a foolish way to estimate parallel_workers, but for now... */
2419  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2420  pathnode->jpath.path.pathkeys = pathkeys;
2421  pathnode->jpath.jointype = jointype;
2422  pathnode->jpath.inner_unique = extra->inner_unique;
2423  pathnode->jpath.outerjoinpath = outer_path;
2424  pathnode->jpath.innerjoinpath = inner_path;
2425  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2426  pathnode->path_mergeclauses = mergeclauses;
2427  pathnode->outersortkeys = outersortkeys;
2428  pathnode->innersortkeys = innersortkeys;
2429  /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2430  /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2431 
2432  final_cost_mergejoin(root, pathnode, workspace, extra);
2433 
2434  return pathnode;
2435 }
2436 
2437 /*
2438  * create_hashjoin_path
2439  * Creates a pathnode corresponding to a hash join between two relations.
2440  *
2441  * 'joinrel' is the join relation
2442  * 'jointype' is the type of join required
2443  * 'workspace' is the result from initial_cost_hashjoin
2444  * 'extra' contains various information about the join
2445  * 'outer_path' is the cheapest outer path
2446  * 'inner_path' is the cheapest inner path
2447  * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2448  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2449  * 'required_outer' is the set of required outer rels
2450  * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2451  * (this should be a subset of the restrict_clauses list)
2452  */
2453 HashPath *
2455  RelOptInfo *joinrel,
2456  JoinType jointype,
2457  JoinCostWorkspace *workspace,
2458  JoinPathExtraData *extra,
2459  Path *outer_path,
2460  Path *inner_path,
2461  bool parallel_hash,
2462  List *restrict_clauses,
2463  Relids required_outer,
2464  List *hashclauses)
2465 {
2466  HashPath *pathnode = makeNode(HashPath);
2467 
2468  pathnode->jpath.path.pathtype = T_HashJoin;
2469  pathnode->jpath.path.parent = joinrel;
2470  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2471  pathnode->jpath.path.param_info =
2473  joinrel,
2474  outer_path,
2475  inner_path,
2476  extra->sjinfo,
2477  required_outer,
2478  &restrict_clauses);
2479  pathnode->jpath.path.parallel_aware =
2480  joinrel->consider_parallel && parallel_hash;
2481  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2482  outer_path->parallel_safe && inner_path->parallel_safe;
2483  /* This is a foolish way to estimate parallel_workers, but for now... */
2484  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2485 
2486  /*
2487  * A hashjoin never has pathkeys, since its output ordering is
2488  * unpredictable due to possible batching. XXX If the inner relation is
2489  * small enough, we could instruct the executor that it must not batch,
2490  * and then we could assume that the output inherits the outer relation's
2491  * ordering, which might save a sort step. However there is considerable
2492  * downside if our estimate of the inner relation size is badly off. For
2493  * the moment we don't risk it. (Note also that if we wanted to take this
2494  * seriously, joinpath.c would have to consider many more paths for the
2495  * outer rel than it does now.)
2496  */
2497  pathnode->jpath.path.pathkeys = NIL;
2498  pathnode->jpath.jointype = jointype;
2499  pathnode->jpath.inner_unique = extra->inner_unique;
2500  pathnode->jpath.outerjoinpath = outer_path;
2501  pathnode->jpath.innerjoinpath = inner_path;
2502  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2503  pathnode->path_hashclauses = hashclauses;
2504  /* final_cost_hashjoin will fill in pathnode->num_batches */
2505 
2506  final_cost_hashjoin(root, pathnode, workspace, extra);
2507 
2508  return pathnode;
2509 }
2510 
2511 /*
2512  * create_projection_path
2513  * Creates a pathnode that represents performing a projection.
2514  *
2515  * 'rel' is the parent relation associated with the result
2516  * 'subpath' is the path representing the source of data
2517  * 'target' is the PathTarget to be computed
2518  */
2521  RelOptInfo *rel,
2522  Path *subpath,
2523  PathTarget *target)
2524 {
2525  ProjectionPath *pathnode = makeNode(ProjectionPath);
2526  PathTarget *oldtarget = subpath->pathtarget;
2527 
2528  pathnode->path.pathtype = T_Result;
2529  pathnode->path.parent = rel;
2530  pathnode->path.pathtarget = target;
2531  /* For now, assume we are above any joins, so no parameterization */
2532  pathnode->path.param_info = NULL;
2533  pathnode->path.parallel_aware = false;
2534  pathnode->path.parallel_safe = rel->consider_parallel &&
2535  subpath->parallel_safe &&
2536  is_parallel_safe(root, (Node *) target->exprs);
2537  pathnode->path.parallel_workers = subpath->parallel_workers;
2538  /* Projection does not change the sort order */
2539  pathnode->path.pathkeys = subpath->pathkeys;
2540 
2541  pathnode->subpath = subpath;
2542 
2543  /*
2544  * We might not need a separate Result node. If the input plan node type
2545  * can project, we can just tell it to project something else. Or, if it
2546  * can't project but the desired target has the same expression list as
2547  * what the input will produce anyway, we can still give it the desired
2548  * tlist (possibly changing its ressortgroupref labels, but nothing else).
2549  * Note: in the latter case, create_projection_plan has to recheck our
2550  * conclusion; see comments therein.
2551  */
2552  if (is_projection_capable_path(subpath) ||
2553  equal(oldtarget->exprs, target->exprs))
2554  {
2555  /* No separate Result node needed */
2556  pathnode->dummypp = true;
2557 
2558  /*
2559  * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2560  */
2561  pathnode->path.rows = subpath->rows;
2562  pathnode->path.startup_cost = subpath->startup_cost +
2563  (target->cost.startup - oldtarget->cost.startup);
2564  pathnode->path.total_cost = subpath->total_cost +
2565  (target->cost.startup - oldtarget->cost.startup) +
2566  (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2567  }
2568  else
2569  {
2570  /* We really do need the Result node */
2571  pathnode->dummypp = false;
2572 
2573  /*
2574  * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2575  * evaluating the tlist. There is no qual to worry about.
2576  */
2577  pathnode->path.rows = subpath->rows;
2578  pathnode->path.startup_cost = subpath->startup_cost +
2579  target->cost.startup;
2580  pathnode->path.total_cost = subpath->total_cost +
2581  target->cost.startup +
2582  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2583  }
2584 
2585  return pathnode;
2586 }
2587 
2588 /*
2589  * apply_projection_to_path
2590  * Add a projection step, or just apply the target directly to given path.
2591  *
2592  * This has the same net effect as create_projection_path(), except that if
2593  * a separate Result plan node isn't needed, we just replace the given path's
2594  * pathtarget with the desired one. This must be used only when the caller
2595  * knows that the given path isn't referenced elsewhere and so can be modified
2596  * in-place.
2597  *
2598  * If the input path is a GatherPath or GatherMergePath, we try to push the
2599  * new target down to its input as well; this is a yet more invasive
2600  * modification of the input path, which create_projection_path() can't do.
2601  *
2602  * Note that we mustn't change the source path's parent link; so when it is
2603  * add_path'd to "rel" things will be a bit inconsistent. So far that has
2604  * not caused any trouble.
2605  *
2606  * 'rel' is the parent relation associated with the result
2607  * 'path' is the path representing the source of data
2608  * 'target' is the PathTarget to be computed
2609  */
2610 Path *
2612  RelOptInfo *rel,
2613  Path *path,
2614  PathTarget *target)
2615 {
2616  QualCost oldcost;
2617 
2618  /*
2619  * If given path can't project, we might need a Result node, so make a
2620  * separate ProjectionPath.
2621  */
2622  if (!is_projection_capable_path(path))
2623  return (Path *) create_projection_path(root, rel, path, target);
2624 
2625  /*
2626  * We can just jam the desired tlist into the existing path, being sure to
2627  * update its cost estimates appropriately.
2628  */
2629  oldcost = path->pathtarget->cost;
2630  path->pathtarget = target;
2631 
2632  path->startup_cost += target->cost.startup - oldcost.startup;
2633  path->total_cost += target->cost.startup - oldcost.startup +
2634  (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2635 
2636  /*
2637  * If the path happens to be a Gather or GatherMerge path, we'd like to
2638  * arrange for the subpath to return the required target list so that
2639  * workers can help project. But if there is something that is not
2640  * parallel-safe in the target expressions, then we can't.
2641  */
2642  if ((IsA(path, GatherPath) ||IsA(path, GatherMergePath)) &&
2643  is_parallel_safe(root, (Node *) target->exprs))
2644  {
2645  /*
2646  * We always use create_projection_path here, even if the subpath is
2647  * projection-capable, so as to avoid modifying the subpath in place.
2648  * It seems unlikely at present that there could be any other
2649  * references to the subpath, but better safe than sorry.
2650  *
2651  * Note that we don't change the parallel path's cost estimates; it
2652  * might be appropriate to do so, to reflect the fact that the bulk of
2653  * the target evaluation will happen in workers.
2654  */
2655  if (IsA(path, GatherPath))
2656  {
2657  GatherPath *gpath = (GatherPath *) path;
2658 
2659  gpath->subpath = (Path *)
2661  gpath->subpath->parent,
2662  gpath->subpath,
2663  target);
2664  }
2665  else
2666  {
2667  GatherMergePath *gmpath = (GatherMergePath *) path;
2668 
2669  gmpath->subpath = (Path *)
2671  gmpath->subpath->parent,
2672  gmpath->subpath,
2673  target);
2674  }
2675  }
2676  else if (path->parallel_safe &&
2677  !is_parallel_safe(root, (Node *) target->exprs))
2678  {
2679  /*
2680  * We're inserting a parallel-restricted target list into a path
2681  * currently marked parallel-safe, so we have to mark it as no longer
2682  * safe.
2683  */
2684  path->parallel_safe = false;
2685  }
2686 
2687  return path;
2688 }
2689 
2690 /*
2691  * create_set_projection_path
2692  * Creates a pathnode that represents performing a projection that
2693  * includes set-returning functions.
2694  *
2695  * 'rel' is the parent relation associated with the result
2696  * 'subpath' is the path representing the source of data
2697  * 'target' is the PathTarget to be computed
2698  */
2701  RelOptInfo *rel,
2702  Path *subpath,
2703  PathTarget *target)
2704 {
2705  ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2706  double tlist_rows;
2707  ListCell *lc;
2708 
2709  pathnode->path.pathtype = T_ProjectSet;
2710  pathnode->path.parent = rel;
2711  pathnode->path.pathtarget = target;
2712  /* For now, assume we are above any joins, so no parameterization */
2713  pathnode->path.param_info = NULL;
2714  pathnode->path.parallel_aware = false;
2715  pathnode->path.parallel_safe = rel->consider_parallel &&
2716  subpath->parallel_safe &&
2717  is_parallel_safe(root, (Node *) target->exprs);
2718  pathnode->path.parallel_workers = subpath->parallel_workers;
2719  /* Projection does not change the sort order XXX? */
2720  pathnode->path.pathkeys = subpath->pathkeys;
2721 
2722  pathnode->subpath = subpath;
2723 
2724  /*
2725  * Estimate number of rows produced by SRFs for each row of input; if
2726  * there's more than one in this node, use the maximum.
2727  */
2728  tlist_rows = 1;
2729  foreach(lc, target->exprs)
2730  {
2731  Node *node = (Node *) lfirst(lc);
2732  double itemrows;
2733 
2734  itemrows = expression_returns_set_rows(root, node);
2735  if (tlist_rows < itemrows)
2736  tlist_rows = itemrows;
2737  }
2738 
2739  /*
2740  * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2741  * per input row, and half of cpu_tuple_cost for each added output row.
2742  * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2743  * this estimate later.
2744  */
2745  pathnode->path.rows = subpath->rows * tlist_rows;
2746  pathnode->path.startup_cost = subpath->startup_cost +
2747  target->cost.startup;
2748  pathnode->path.total_cost = subpath->total_cost +
2749  target->cost.startup +
2750  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2751  (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2752 
2753  return pathnode;
2754 }
2755 
2756 /*
2757  * create_sort_path
2758  * Creates a pathnode that represents performing an explicit sort.
2759  *
2760  * 'rel' is the parent relation associated with the result
2761  * 'subpath' is the path representing the source of data
2762  * 'pathkeys' represents the desired sort order
2763  * 'limit_tuples' is the estimated bound on the number of output tuples,
2764  * or -1 if no LIMIT or couldn't estimate
2765  */
2766 SortPath *
2768  RelOptInfo *rel,
2769  Path *subpath,
2770  List *pathkeys,
2771  double limit_tuples)
2772 {
2773  SortPath *pathnode = makeNode(SortPath);
2774 
2775  pathnode->path.pathtype = T_Sort;
2776  pathnode->path.parent = rel;
2777  /* Sort doesn't project, so use source path's pathtarget */
2778  pathnode->path.pathtarget = subpath->pathtarget;
2779  /* For now, assume we are above any joins, so no parameterization */
2780  pathnode->path.param_info = NULL;
2781  pathnode->path.parallel_aware = false;
2782  pathnode->path.parallel_safe = rel->consider_parallel &&
2783  subpath->parallel_safe;
2784  pathnode->path.parallel_workers = subpath->parallel_workers;
2785  pathnode->path.pathkeys = pathkeys;
2786 
2787  pathnode->subpath = subpath;
2788 
2789  cost_sort(&pathnode->path, root, pathkeys,
2790  subpath->total_cost,
2791  subpath->rows,
2792  subpath->pathtarget->width,
2793  0.0, /* XXX comparison_cost shouldn't be 0? */
2794  work_mem, limit_tuples);
2795 
2796  return pathnode;
2797 }
2798 
2799 /*
2800  * create_group_path
2801  * Creates a pathnode that represents performing grouping of presorted input
2802  *
2803  * 'rel' is the parent relation associated with the result
2804  * 'subpath' is the path representing the source of data
2805  * 'target' is the PathTarget to be computed
2806  * 'groupClause' is a list of SortGroupClause's representing the grouping
2807  * 'qual' is the HAVING quals if any
2808  * 'numGroups' is the estimated number of groups
2809  */
2810 GroupPath *
2812  RelOptInfo *rel,
2813  Path *subpath,
2814  List *groupClause,
2815  List *qual,
2816  double numGroups)
2817 {
2818  GroupPath *pathnode = makeNode(GroupPath);
2819  PathTarget *target = rel->reltarget;
2820 
2821  pathnode->path.pathtype = T_Group;
2822  pathnode->path.parent = rel;
2823  pathnode->path.pathtarget = target;
2824  /* For now, assume we are above any joins, so no parameterization */
2825  pathnode->path.param_info = NULL;
2826  pathnode->path.parallel_aware = false;
2827  pathnode->path.parallel_safe = rel->consider_parallel &&
2828  subpath->parallel_safe;
2829  pathnode->path.parallel_workers = subpath->parallel_workers;
2830  /* Group doesn't change sort ordering */
2831  pathnode->path.pathkeys = subpath->pathkeys;
2832 
2833  pathnode->subpath = subpath;
2834 
2835  pathnode->groupClause = groupClause;
2836  pathnode->qual = qual;
2837 
2838  cost_group(&pathnode->path, root,
2839  list_length(groupClause),
2840  numGroups,
2841  qual,
2842  subpath->startup_cost, subpath->total_cost,
2843  subpath->rows);
2844 
2845  /* add tlist eval cost for each output row */
2846  pathnode->path.startup_cost += target->cost.startup;
2847  pathnode->path.total_cost += target->cost.startup +
2848  target->cost.per_tuple * pathnode->path.rows;
2849 
2850  return pathnode;
2851 }
2852 
2853 /*
2854  * create_upper_unique_path
2855  * Creates a pathnode that represents performing an explicit Unique step
2856  * on presorted input.
2857  *
2858  * This produces a Unique plan node, but the use-case is so different from
2859  * create_unique_path that it doesn't seem worth trying to merge the two.
2860  *
2861  * 'rel' is the parent relation associated with the result
2862  * 'subpath' is the path representing the source of data
2863  * 'numCols' is the number of grouping columns
2864  * 'numGroups' is the estimated number of groups
2865  *
2866  * The input path must be sorted on the grouping columns, plus possibly
2867  * additional columns; so the first numCols pathkeys are the grouping columns
2868  */
2871  RelOptInfo *rel,
2872  Path *subpath,
2873  int numCols,
2874  double numGroups)
2875 {
2877 
2878  pathnode->path.pathtype = T_Unique;
2879  pathnode->path.parent = rel;
2880  /* Unique doesn't project, so use source path's pathtarget */
2881  pathnode->path.pathtarget = subpath->pathtarget;
2882  /* For now, assume we are above any joins, so no parameterization */
2883  pathnode->path.param_info = NULL;
2884  pathnode->path.parallel_aware = false;
2885  pathnode->path.parallel_safe = rel->consider_parallel &&
2886  subpath->parallel_safe;
2887  pathnode->path.parallel_workers = subpath->parallel_workers;
2888  /* Unique doesn't change the input ordering */
2889  pathnode->path.pathkeys = subpath->pathkeys;
2890 
2891  pathnode->subpath = subpath;
2892  pathnode->numkeys = numCols;
2893 
2894  /*
2895  * Charge one cpu_operator_cost per comparison per input tuple. We assume
2896  * all columns get compared at most of the tuples. (XXX probably this is
2897  * an overestimate.)
2898  */
2899  pathnode->path.startup_cost = subpath->startup_cost;
2900  pathnode->path.total_cost = subpath->total_cost +
2901  cpu_operator_cost * subpath->rows * numCols;
2902  pathnode->path.rows = numGroups;
2903 
2904  return pathnode;
2905 }
2906 
2907 /*
2908  * create_agg_path
2909  * Creates a pathnode that represents performing aggregation/grouping
2910  *
2911  * 'rel' is the parent relation associated with the result
2912  * 'subpath' is the path representing the source of data
2913  * 'target' is the PathTarget to be computed
2914  * 'aggstrategy' is the Agg node's basic implementation strategy
2915  * 'aggsplit' is the Agg node's aggregate-splitting mode
2916  * 'groupClause' is a list of SortGroupClause's representing the grouping
2917  * 'qual' is the HAVING quals if any
2918  * 'aggcosts' contains cost info about the aggregate functions to be computed
2919  * 'numGroups' is the estimated number of groups (1 if not grouping)
2920  */
2921 AggPath *
2923  RelOptInfo *rel,
2924  Path *subpath,
2925  PathTarget *target,
2926  AggStrategy aggstrategy,
2927  AggSplit aggsplit,
2928  List *groupClause,
2929  List *qual,
2930  const AggClauseCosts *aggcosts,
2931  double numGroups)
2932 {
2933  AggPath *pathnode = makeNode(AggPath);
2934 
2935  pathnode->path.pathtype = T_Agg;
2936  pathnode->path.parent = rel;
2937  pathnode->path.pathtarget = target;
2938  /* For now, assume we are above any joins, so no parameterization */
2939  pathnode->path.param_info = NULL;
2940  pathnode->path.parallel_aware = false;
2941  pathnode->path.parallel_safe = rel->consider_parallel &&
2942  subpath->parallel_safe;
2943  pathnode->path.parallel_workers = subpath->parallel_workers;
2944  if (aggstrategy == AGG_SORTED)
2945  pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
2946  else
2947  pathnode->path.pathkeys = NIL; /* output is unordered */
2948  pathnode->subpath = subpath;
2949 
2950  pathnode->aggstrategy = aggstrategy;
2951  pathnode->aggsplit = aggsplit;
2952  pathnode->numGroups = numGroups;
2953  pathnode->groupClause = groupClause;
2954  pathnode->qual = qual;
2955 
2956  cost_agg(&pathnode->path, root,
2957  aggstrategy, aggcosts,
2958  list_length(groupClause), numGroups,
2959  qual,
2960  subpath->startup_cost, subpath->total_cost,
2961  subpath->rows);
2962 
2963  /* add tlist eval cost for each output row */
2964  pathnode->path.startup_cost += target->cost.startup;
2965  pathnode->path.total_cost += target->cost.startup +
2966  target->cost.per_tuple * pathnode->path.rows;
2967 
2968  return pathnode;
2969 }
2970 
2971 /*
2972  * create_groupingsets_path
2973  * Creates a pathnode that represents performing GROUPING SETS aggregation
2974  *
2975  * GroupingSetsPath represents sorted grouping with one or more grouping sets.
2976  * The input path's result must be sorted to match the last entry in
2977  * rollup_groupclauses.
2978  *
2979  * 'rel' is the parent relation associated with the result
2980  * 'subpath' is the path representing the source of data
2981  * 'target' is the PathTarget to be computed
2982  * 'having_qual' is the HAVING quals if any
2983  * 'rollups' is a list of RollupData nodes
2984  * 'agg_costs' contains cost info about the aggregate functions to be computed
2985  * 'numGroups' is the estimated total number of groups
2986  */
2989  RelOptInfo *rel,
2990  Path *subpath,
2991  List *having_qual,
2992  AggStrategy aggstrategy,
2993  List *rollups,
2994  const AggClauseCosts *agg_costs,
2995  double numGroups)
2996 {
2998  PathTarget *target = rel->reltarget;
2999  ListCell *lc;
3000  bool is_first = true;
3001  bool is_first_sort = true;
3002 
3003  /* The topmost generated Plan node will be an Agg */
3004  pathnode->path.pathtype = T_Agg;
3005  pathnode->path.parent = rel;
3006  pathnode->path.pathtarget = target;
3007  pathnode->path.param_info = subpath->param_info;
3008  pathnode->path.parallel_aware = false;
3009  pathnode->path.parallel_safe = rel->consider_parallel &&
3010  subpath->parallel_safe;
3011  pathnode->path.parallel_workers = subpath->parallel_workers;
3012  pathnode->subpath = subpath;
3013 
3014  /*
3015  * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3016  * to AGG_HASHED, here if possible.
3017  */
3018  if (aggstrategy == AGG_SORTED &&
3019  list_length(rollups) == 1 &&
3020  ((RollupData *) linitial(rollups))->groupClause == NIL)
3021  aggstrategy = AGG_PLAIN;
3022 
3023  if (aggstrategy == AGG_MIXED &&
3024  list_length(rollups) == 1)
3025  aggstrategy = AGG_HASHED;
3026 
3027  /*
3028  * Output will be in sorted order by group_pathkeys if, and only if, there
3029  * is a single rollup operation on a non-empty list of grouping
3030  * expressions.
3031  */
3032  if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3033  pathnode->path.pathkeys = root->group_pathkeys;
3034  else
3035  pathnode->path.pathkeys = NIL;
3036 
3037  pathnode->aggstrategy = aggstrategy;
3038  pathnode->rollups = rollups;
3039  pathnode->qual = having_qual;
3040 
3041  Assert(rollups != NIL);
3042  Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3043  Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3044 
3045  foreach(lc, rollups)
3046  {
3047  RollupData *rollup = lfirst(lc);
3048  List *gsets = rollup->gsets;
3049  int numGroupCols = list_length(linitial(gsets));
3050 
3051  /*
3052  * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3053  * (already-sorted) input, and following ones do their own sort.
3054  *
3055  * In AGG_HASHED mode, there is one rollup for each grouping set.
3056  *
3057  * In AGG_MIXED mode, the first rollups are hashed, the first
3058  * non-hashed one takes the (already-sorted) input, and following ones
3059  * do their own sort.
3060  */
3061  if (is_first)
3062  {
3063  cost_agg(&pathnode->path, root,
3064  aggstrategy,
3065  agg_costs,
3066  numGroupCols,
3067  rollup->numGroups,
3068  having_qual,
3069  subpath->startup_cost,
3070  subpath->total_cost,
3071  subpath->rows);
3072  is_first = false;
3073  if (!rollup->is_hashed)
3074  is_first_sort = false;
3075  }
3076  else
3077  {
3078  Path sort_path; /* dummy for result of cost_sort */
3079  Path agg_path; /* dummy for result of cost_agg */
3080 
3081  if (rollup->is_hashed || is_first_sort)
3082  {
3083  /*
3084  * Account for cost of aggregation, but don't charge input
3085  * cost again
3086  */
3087  cost_agg(&agg_path, root,
3088  rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3089  agg_costs,
3090  numGroupCols,
3091  rollup->numGroups,
3092  having_qual,
3093  0.0, 0.0,
3094  subpath->rows);
3095  if (!rollup->is_hashed)
3096  is_first_sort = false;
3097  }
3098  else
3099  {
3100  /* Account for cost of sort, but don't charge input cost again */
3101  cost_sort(&sort_path, root, NIL,
3102  0.0,
3103  subpath->rows,
3104  subpath->pathtarget->width,
3105  0.0,
3106  work_mem,
3107  -1.0);
3108 
3109  /* Account for cost of aggregation */
3110 
3111  cost_agg(&agg_path, root,
3112  AGG_SORTED,
3113  agg_costs,
3114  numGroupCols,
3115  rollup->numGroups,
3116  having_qual,
3117  sort_path.startup_cost,
3118  sort_path.total_cost,
3119  sort_path.rows);
3120  }
3121 
3122  pathnode->path.total_cost += agg_path.total_cost;
3123  pathnode->path.rows += agg_path.rows;
3124  }
3125  }
3126 
3127  /* add tlist eval cost for each output row */
3128  pathnode->path.startup_cost += target->cost.startup;
3129  pathnode->path.total_cost += target->cost.startup +
3130  target->cost.per_tuple * pathnode->path.rows;
3131 
3132  return pathnode;
3133 }
3134 
3135 /*
3136  * create_minmaxagg_path
3137  * Creates a pathnode that represents computation of MIN/MAX aggregates
3138  *
3139  * 'rel' is the parent relation associated with the result
3140  * 'target' is the PathTarget to be computed
3141  * 'mmaggregates' is a list of MinMaxAggInfo structs
3142  * 'quals' is the HAVING quals if any
3143  */
3144 MinMaxAggPath *
3146  RelOptInfo *rel,
3147  PathTarget *target,
3148  List *mmaggregates,
3149  List *quals)
3150 {
3151  MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3152  Cost initplan_cost;
3153  ListCell *lc;
3154 
3155  /* The topmost generated Plan node will be a Result */
3156  pathnode->path.pathtype = T_Result;
3157  pathnode->path.parent = rel;
3158  pathnode->path.pathtarget = target;
3159  /* For now, assume we are above any joins, so no parameterization */
3160  pathnode->path.param_info = NULL;
3161  pathnode->path.parallel_aware = false;
3162  /* A MinMaxAggPath implies use of subplans, so cannot be parallel-safe */
3163  pathnode->path.parallel_safe = false;
3164  pathnode->path.parallel_workers = 0;
3165  /* Result is one unordered row */
3166  pathnode->path.rows = 1;
3167  pathnode->path.pathkeys = NIL;
3168 
3169  pathnode->mmaggregates = mmaggregates;
3170  pathnode->quals = quals;
3171 
3172  /* Calculate cost of all the initplans ... */
3173  initplan_cost = 0;
3174  foreach(lc, mmaggregates)
3175  {
3176  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3177 
3178  initplan_cost += mminfo->pathcost;
3179  }
3180 
3181  /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3182  pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3183  pathnode->path.total_cost = initplan_cost + target->cost.startup +
3184  target->cost.per_tuple + cpu_tuple_cost;
3185 
3186  /*
3187  * Add cost of qual, if any --- but we ignore its selectivity, since our
3188  * rowcount estimate should be 1 no matter what the qual is.
3189  */
3190  if (quals)
3191  {
3192  QualCost qual_cost;
3193 
3194  cost_qual_eval(&qual_cost, quals, root);
3195  pathnode->path.startup_cost += qual_cost.startup;
3196  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3197  }
3198 
3199  return pathnode;
3200 }
3201 
3202 /*
3203  * create_windowagg_path
3204  * Creates a pathnode that represents computation of window functions
3205  *
3206  * 'rel' is the parent relation associated with the result
3207  * 'subpath' is the path representing the source of data
3208  * 'target' is the PathTarget to be computed
3209  * 'windowFuncs' is a list of WindowFunc structs
3210  * 'winclause' is a WindowClause that is common to all the WindowFuncs
3211  *
3212  * The input must be sorted according to the WindowClause's PARTITION keys
3213  * plus ORDER BY keys.
3214  */
3215 WindowAggPath *
3217  RelOptInfo *rel,
3218  Path *subpath,
3219  PathTarget *target,
3220  List *windowFuncs,
3221  WindowClause *winclause)
3222 {
3223  WindowAggPath *pathnode = makeNode(WindowAggPath);
3224 
3225  pathnode->path.pathtype = T_WindowAgg;
3226  pathnode->path.parent = rel;
3227  pathnode->path.pathtarget = target;
3228  /* For now, assume we are above any joins, so no parameterization */
3229  pathnode->path.param_info = NULL;
3230  pathnode->path.parallel_aware = false;
3231  pathnode->path.parallel_safe = rel->consider_parallel &&
3232  subpath->parallel_safe;
3233  pathnode->path.parallel_workers = subpath->parallel_workers;
3234  /* WindowAgg preserves the input sort order */
3235  pathnode->path.pathkeys = subpath->pathkeys;
3236 
3237  pathnode->subpath = subpath;
3238  pathnode->winclause = winclause;
3239 
3240  /*
3241  * For costing purposes, assume that there are no redundant partitioning
3242  * or ordering columns; it's not worth the trouble to deal with that
3243  * corner case here. So we just pass the unmodified list lengths to
3244  * cost_windowagg.
3245  */
3246  cost_windowagg(&pathnode->path, root,
3247  windowFuncs,
3248  list_length(winclause->partitionClause),
3249  list_length(winclause->orderClause),
3250  subpath->startup_cost,
3251  subpath->total_cost,
3252  subpath->rows);
3253 
3254  /* add tlist eval cost for each output row */
3255  pathnode->path.startup_cost += target->cost.startup;
3256  pathnode->path.total_cost += target->cost.startup +
3257  target->cost.per_tuple * pathnode->path.rows;
3258 
3259  return pathnode;
3260 }
3261 
3262 /*
3263  * create_setop_path
3264  * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3265  *
3266  * 'rel' is the parent relation associated with the result
3267  * 'subpath' is the path representing the source of data
3268  * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3269  * 'strategy' is the implementation strategy (sorted or hashed)
3270  * 'distinctList' is a list of SortGroupClause's representing the grouping
3271  * 'flagColIdx' is the column number where the flag column will be, if any
3272  * 'firstFlag' is the flag value for the first input relation when hashing;
3273  * or -1 when sorting
3274  * 'numGroups' is the estimated number of distinct groups
3275  * 'outputRows' is the estimated number of output rows
3276  */
3277 SetOpPath *
3279  RelOptInfo *rel,
3280  Path *subpath,
3281  SetOpCmd cmd,
3282  SetOpStrategy strategy,
3283  List *distinctList,
3284  AttrNumber flagColIdx,
3285  int firstFlag,
3286  double numGroups,
3287  double outputRows)
3288 {
3289  SetOpPath *pathnode = makeNode(SetOpPath);
3290 
3291  pathnode->path.pathtype = T_SetOp;
3292  pathnode->path.parent = rel;
3293  /* SetOp doesn't project, so use source path's pathtarget */
3294  pathnode->path.pathtarget = subpath->pathtarget;
3295  /* For now, assume we are above any joins, so no parameterization */
3296  pathnode->path.param_info = NULL;
3297  pathnode->path.parallel_aware = false;
3298  pathnode->path.parallel_safe = rel->consider_parallel &&
3299  subpath->parallel_safe;
3300  pathnode->path.parallel_workers = subpath->parallel_workers;
3301  /* SetOp preserves the input sort order if in sort mode */
3302  pathnode->path.pathkeys =
3303  (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3304 
3305  pathnode->subpath = subpath;
3306  pathnode->cmd = cmd;
3307  pathnode->strategy = strategy;
3308  pathnode->distinctList = distinctList;
3309  pathnode->flagColIdx = flagColIdx;
3310  pathnode->firstFlag = firstFlag;
3311  pathnode->numGroups = numGroups;
3312 
3313  /*
3314  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3315  * all columns get compared at most of the tuples.
3316  */
3317  pathnode->path.startup_cost = subpath->startup_cost;
3318  pathnode->path.total_cost = subpath->total_cost +
3319  cpu_operator_cost * subpath->rows * list_length(distinctList);
3320  pathnode->path.rows = outputRows;
3321 
3322  return pathnode;
3323 }
3324 
3325 /*
3326  * create_recursiveunion_path
3327  * Creates a pathnode that represents a recursive UNION node
3328  *
3329  * 'rel' is the parent relation associated with the result
3330  * 'leftpath' is the source of data for the non-recursive term
3331  * 'rightpath' is the source of data for the recursive term
3332  * 'target' is the PathTarget to be computed
3333  * 'distinctList' is a list of SortGroupClause's representing the grouping
3334  * 'wtParam' is the ID of Param representing work table
3335  * 'numGroups' is the estimated number of groups
3336  *
3337  * For recursive UNION ALL, distinctList is empty and numGroups is zero
3338  */
3341  RelOptInfo *rel,
3342  Path *leftpath,
3343  Path *rightpath,
3344  PathTarget *target,
3345  List *distinctList,
3346  int wtParam,
3347  double numGroups)
3348 {
3350 
3351  pathnode->path.pathtype = T_RecursiveUnion;
3352  pathnode->path.parent = rel;
3353  pathnode->path.pathtarget = target;
3354  /* For now, assume we are above any joins, so no parameterization */
3355  pathnode->path.param_info = NULL;
3356  pathnode->path.parallel_aware = false;
3357  pathnode->path.parallel_safe = rel->consider_parallel &&
3358  leftpath->parallel_safe && rightpath->parallel_safe;
3359  /* Foolish, but we'll do it like joins for now: */
3360  pathnode->path.parallel_workers = leftpath->parallel_workers;
3361  /* RecursiveUnion result is always unsorted */
3362  pathnode->path.pathkeys = NIL;
3363 
3364  pathnode->leftpath = leftpath;
3365  pathnode->rightpath = rightpath;
3366  pathnode->distinctList = distinctList;
3367  pathnode->wtParam = wtParam;
3368  pathnode->numGroups = numGroups;
3369 
3370  cost_recursive_union(&pathnode->path, leftpath, rightpath);
3371 
3372  return pathnode;
3373 }
3374 
3375 /*
3376  * create_lockrows_path
3377  * Creates a pathnode that represents acquiring row locks
3378  *
3379  * 'rel' is the parent relation associated with the result
3380  * 'subpath' is the path representing the source of data
3381  * 'rowMarks' is a list of PlanRowMark's
3382  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3383  */
3384 LockRowsPath *
3386  Path *subpath, List *rowMarks, int epqParam)
3387 {
3388  LockRowsPath *pathnode = makeNode(LockRowsPath);
3389 
3390  pathnode->path.pathtype = T_LockRows;
3391  pathnode->path.parent = rel;
3392  /* LockRows doesn't project, so use source path's pathtarget */
3393  pathnode->path.pathtarget = subpath->pathtarget;
3394  /* For now, assume we are above any joins, so no parameterization */
3395  pathnode->path.param_info = NULL;
3396  pathnode->path.parallel_aware = false;
3397  pathnode->path.parallel_safe = false;
3398  pathnode->path.parallel_workers = 0;
3399  pathnode->path.rows = subpath->rows;
3400 
3401  /*
3402  * The result cannot be assumed sorted, since locking might cause the sort
3403  * key columns to be replaced with new values.
3404  */
3405  pathnode->path.pathkeys = NIL;
3406 
3407  pathnode->subpath = subpath;
3408  pathnode->rowMarks = rowMarks;
3409  pathnode->epqParam = epqParam;
3410 
3411  /*
3412  * We should charge something extra for the costs of row locking and
3413  * possible refetches, but it's hard to say how much. For now, use
3414  * cpu_tuple_cost per row.
3415  */
3416  pathnode->path.startup_cost = subpath->startup_cost;
3417  pathnode->path.total_cost = subpath->total_cost +
3418  cpu_tuple_cost * subpath->rows;
3419 
3420  return pathnode;
3421 }
3422 
3423 /*
3424  * create_modifytable_path
3425  * Creates a pathnode that represents performing INSERT/UPDATE/DELETE mods
3426  *
3427  * 'rel' is the parent relation associated with the result
3428  * 'operation' is the operation type
3429  * 'canSetTag' is true if we set the command tag/es_processed
3430  * 'nominalRelation' is the parent RT index for use of EXPLAIN
3431  * 'rootRelation' is the partitioned table root RT index, or 0 if none
3432  * 'partColsUpdated' is true if any partitioning columns are being updated,
3433  * either from the target relation or a descendent partitioned table.
3434  * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3435  * 'subpaths' is a list of Path(s) producing source data (one per rel)
3436  * 'subroots' is a list of PlannerInfo structs (one per rel)
3437  * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3438  * 'returningLists' is a list of RETURNING tlists (one per rel)
3439  * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3440  * 'onconflict' is the ON CONFLICT clause, or NULL
3441  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3442  */
3445  CmdType operation, bool canSetTag,
3446  Index nominalRelation, Index rootRelation,
3447  bool partColsUpdated,
3448  List *resultRelations, List *subpaths,
3449  List *subroots,
3450  List *withCheckOptionLists, List *returningLists,
3451  List *rowMarks, OnConflictExpr *onconflict,
3452  int epqParam)
3453 {
3455  double total_size;
3456  ListCell *lc;
3457 
3458  Assert(list_length(resultRelations) == list_length(subpaths));
3459  Assert(list_length(resultRelations) == list_length(subroots));
3460  Assert(withCheckOptionLists == NIL ||
3461  list_length(resultRelations) == list_length(withCheckOptionLists));
3462  Assert(returningLists == NIL ||
3463  list_length(resultRelations) == list_length(returningLists));
3464 
3465  pathnode->path.pathtype = T_ModifyTable;
3466  pathnode->path.parent = rel;
3467  /* pathtarget is not interesting, just make it minimally valid */
3468  pathnode->path.pathtarget = rel->reltarget;
3469  /* For now, assume we are above any joins, so no parameterization */
3470  pathnode->path.param_info = NULL;
3471  pathnode->path.parallel_aware = false;
3472  pathnode->path.parallel_safe = false;
3473  pathnode->path.parallel_workers = 0;
3474  pathnode->path.pathkeys = NIL;
3475 
3476  /*
3477  * Compute cost & rowcount as sum of subpath costs & rowcounts.
3478  *
3479  * Currently, we don't charge anything extra for the actual table
3480  * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3481  * expressions if any. It would only be window dressing, since
3482  * ModifyTable is always a top-level node and there is no way for the
3483  * costs to change any higher-level planning choices. But we might want
3484  * to make it look better sometime.
3485  */
3486  pathnode->path.startup_cost = 0;
3487  pathnode->path.total_cost = 0;
3488  pathnode->path.rows = 0;
3489  total_size = 0;
3490  foreach(lc, subpaths)
3491  {
3492  Path *subpath = (Path *) lfirst(lc);
3493 
3494  if (lc == list_head(subpaths)) /* first node? */
3495  pathnode->path.startup_cost = subpath->startup_cost;
3496  pathnode->path.total_cost += subpath->total_cost;
3497  pathnode->path.rows += subpath->rows;
3498  total_size += subpath->pathtarget->width * subpath->rows;
3499  }
3500 
3501  /*
3502  * Set width to the average width of the subpath outputs. XXX this is
3503  * totally wrong: we should report zero if no RETURNING, else an average
3504  * of the RETURNING tlist widths. But it's what happened historically,
3505  * and improving it is a task for another day.
3506  */
3507  if (pathnode->path.rows > 0)
3508  total_size /= pathnode->path.rows;
3509  pathnode->path.pathtarget->width = rint(total_size);
3510 
3511  pathnode->operation = operation;
3512  pathnode->canSetTag = canSetTag;
3513  pathnode->nominalRelation = nominalRelation;
3514  pathnode->rootRelation = rootRelation;
3515  pathnode->partColsUpdated = partColsUpdated;
3516  pathnode->resultRelations = resultRelations;
3517  pathnode->subpaths = subpaths;
3518  pathnode->subroots = subroots;
3519  pathnode->withCheckOptionLists = withCheckOptionLists;
3520  pathnode->returningLists = returningLists;
3521  pathnode->rowMarks = rowMarks;
3522  pathnode->onconflict = onconflict;
3523  pathnode->epqParam = epqParam;
3524 
3525  return pathnode;
3526 }
3527 
3528 /*
3529  * create_limit_path
3530  * Creates a pathnode that represents performing LIMIT/OFFSET
3531  *
3532  * In addition to providing the actual OFFSET and LIMIT expressions,
3533  * the caller must provide estimates of their values for costing purposes.
3534  * The estimates are as computed by preprocess_limit(), ie, 0 represents
3535  * the clause not being present, and -1 means it's present but we could
3536  * not estimate its value.
3537  *
3538  * 'rel' is the parent relation associated with the result
3539  * 'subpath' is the path representing the source of data
3540  * 'limitOffset' is the actual OFFSET expression, or NULL
3541  * 'limitCount' is the actual LIMIT expression, or NULL
3542  * 'offset_est' is the estimated value of the OFFSET expression
3543  * 'count_est' is the estimated value of the LIMIT expression
3544  */
3545 LimitPath *
3547  Path *subpath,
3548  Node *limitOffset, Node *limitCount,
3549  int64 offset_est, int64 count_est)
3550 {
3551  LimitPath *pathnode = makeNode(LimitPath);
3552 
3553  pathnode->path.pathtype = T_Limit;
3554  pathnode->path.parent = rel;
3555  /* Limit doesn't project, so use source path's pathtarget */
3556  pathnode->path.pathtarget = subpath->pathtarget;
3557  /* For now, assume we are above any joins, so no parameterization */
3558  pathnode->path.param_info = NULL;
3559  pathnode->path.parallel_aware = false;
3560  pathnode->path.parallel_safe = rel->consider_parallel &&
3561  subpath->parallel_safe;
3562  pathnode->path.parallel_workers = subpath->parallel_workers;
3563  pathnode->path.rows = subpath->rows;
3564  pathnode->path.startup_cost = subpath->startup_cost;
3565  pathnode->path.total_cost = subpath->total_cost;
3566  pathnode->path.pathkeys = subpath->pathkeys;
3567  pathnode->subpath = subpath;
3568  pathnode->limitOffset = limitOffset;
3569  pathnode->limitCount = limitCount;
3570 
3571  /*
3572  * Adjust the output rows count and costs according to the offset/limit.
3573  */
3574  adjust_limit_rows_costs(&pathnode->path.rows,
3575  &pathnode->path.startup_cost,
3576  &pathnode->path.total_cost,
3577  offset_est, count_est);
3578 
3579  return pathnode;
3580 }
3581 
3582 /*
3583  * adjust_limit_rows_costs
3584  * Adjust the size and cost estimates for a LimitPath node according to the
3585  * offset/limit.
3586  *
3587  * This is only a cosmetic issue if we are at top level, but if we are
3588  * building a subquery then it's important to report correct info to the outer
3589  * planner.
3590  *
3591  * When the offset or count couldn't be estimated, use 10% of the estimated
3592  * number of rows emitted from the subpath.
3593  *
3594  * XXX we don't bother to add eval costs of the offset/limit expressions
3595  * themselves to the path costs. In theory we should, but in most cases those
3596  * expressions are trivial and it's just not worth the trouble.
3597  */
3598 void
3599 adjust_limit_rows_costs(double *rows, /* in/out parameter */
3600  Cost *startup_cost, /* in/out parameter */
3601  Cost *total_cost, /* in/out parameter */
3602  int64 offset_est,
3603  int64 count_est)
3604 {
3605  double input_rows = *rows;
3606  Cost input_startup_cost = *startup_cost;
3607  Cost input_total_cost = *total_cost;
3608 
3609  if (offset_est != 0)
3610  {
3611  double offset_rows;
3612 
3613  if (offset_est > 0)
3614  offset_rows = (double) offset_est;
3615  else
3616  offset_rows = clamp_row_est(input_rows * 0.10);
3617  if (offset_rows > *rows)
3618  offset_rows = *rows;
3619  if (input_rows > 0)
3620  *startup_cost +=
3621  (input_total_cost - input_startup_cost)
3622  * offset_rows / input_rows;
3623  *rows -= offset_rows;
3624  if (*rows < 1)
3625  *rows = 1;
3626  }
3627 
3628  if (count_est != 0)
3629  {
3630  double count_rows;
3631 
3632  if (count_est > 0)
3633  count_rows = (double) count_est;
3634  else
3635  count_rows = clamp_row_est(input_rows * 0.10);
3636  if (count_rows > *rows)
3637  count_rows = *rows;
3638  if (input_rows > 0)
3639  *total_cost = *startup_cost +
3640  (input_total_cost - input_startup_cost)
3641  * count_rows / input_rows;
3642  *rows = count_rows;
3643  if (*rows < 1)
3644  *rows = 1;
3645  }
3646 }
3647 
3648 
3649 /*
3650  * reparameterize_path
3651  * Attempt to modify a Path to have greater parameterization
3652  *
3653  * We use this to attempt to bring all child paths of an appendrel to the
3654  * same parameterization level, ensuring that they all enforce the same set
3655  * of join quals (and thus that that parameterization can be attributed to
3656  * an append path built from such paths). Currently, only a few path types
3657  * are supported here, though more could be added at need. We return NULL
3658  * if we can't reparameterize the given path.
3659  *
3660  * Note: we intentionally do not pass created paths to add_path(); it would
3661  * possibly try to delete them on the grounds of being cost-inferior to the
3662  * paths they were made from, and we don't want that. Paths made here are
3663  * not necessarily of general-purpose usefulness, but they can be useful
3664  * as members of an append path.
3665  */
3666 Path *
3668  Relids required_outer,
3669  double loop_count)
3670 {
3671  RelOptInfo *rel = path->parent;
3672 
3673  /* Can only increase, not decrease, path's parameterization */
3674  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3675  return NULL;
3676  switch (path->pathtype)
3677  {
3678  case T_SeqScan:
3679  return create_seqscan_path(root, rel, required_outer, 0);
3680  case T_SampleScan:
3681  return (Path *) create_samplescan_path(root, rel, required_outer);
3682  case T_IndexScan:
3683  case T_IndexOnlyScan:
3684  {
3685  IndexPath *ipath = (IndexPath *) path;
3686  IndexPath *newpath = makeNode(IndexPath);
3687 
3688  /*
3689  * We can't use create_index_path directly, and would not want
3690  * to because it would re-compute the indexqual conditions
3691  * which is wasted effort. Instead we hack things a bit:
3692  * flat-copy the path node, revise its param_info, and redo
3693  * the cost estimate.
3694  */
3695  memcpy(newpath, ipath, sizeof(IndexPath));
3696  newpath->path.param_info =
3697  get_baserel_parampathinfo(root, rel, required_outer);
3698  cost_index(newpath, root, loop_count, false);
3699  return (Path *) newpath;
3700  }
3701  case T_BitmapHeapScan:
3702  {
3703  BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3704 
3705  return (Path *) create_bitmap_heap_path(root,
3706  rel,
3707  bpath->bitmapqual,
3708  required_outer,
3709  loop_count, 0);
3710  }
3711  case T_SubqueryScan:
3712  {
3713  SubqueryScanPath *spath = (SubqueryScanPath *) path;
3714 
3715  return (Path *) create_subqueryscan_path(root,
3716  rel,
3717  spath->subpath,
3718  spath->path.pathkeys,
3719  required_outer);
3720  }
3721  case T_Result:
3722  /* Supported only for RTE_RESULT scan paths */
3723  if (IsA(path, Path))
3724  return create_resultscan_path(root, rel, required_outer);
3725  break;
3726  case T_Append:
3727  {
3728  AppendPath *apath = (AppendPath *) path;
3729  List *childpaths = NIL;
3730  List *partialpaths = NIL;
3731  int i;
3732  ListCell *lc;
3733 
3734  /* Reparameterize the children */
3735  i = 0;
3736  foreach(lc, apath->subpaths)
3737  {
3738  Path *spath = (Path *) lfirst(lc);
3739 
3740  spath = reparameterize_path(root, spath,
3741  required_outer,
3742  loop_count);
3743  if (spath == NULL)
3744  return NULL;
3745  /* We have to re-split the regular and partial paths */
3746  if (i < apath->first_partial_path)
3747  childpaths = lappend(childpaths, spath);
3748  else
3749  partialpaths = lappend(partialpaths, spath);
3750  i++;
3751  }
3752  return (Path *)
3753  create_append_path(root, rel, childpaths, partialpaths,
3754  apath->path.pathkeys, required_outer,
3755  apath->path.parallel_workers,
3756  apath->path.parallel_aware,
3757  apath->partitioned_rels,
3758  -1);
3759  }
3760  default:
3761  break;
3762  }
3763  return NULL;
3764 }
3765 
3766 /*
3767  * reparameterize_path_by_child
3768  * Given a path parameterized by the parent of the given child relation,
3769  * translate the path to be parameterized by the given child relation.
3770  *
3771  * The function creates a new path of the same type as the given path, but
3772  * parameterized by the given child relation. Most fields from the original
3773  * path can simply be flat-copied, but any expressions must be adjusted to
3774  * refer to the correct varnos, and any paths must be recursively
3775  * reparameterized. Other fields that refer to specific relids also need
3776  * adjustment.
3777  *
3778  * The cost, number of rows, width and parallel path properties depend upon
3779  * path->parent, which does not change during the translation. Hence those
3780  * members are copied as they are.
3781  *
3782  * If the given path can not be reparameterized, the function returns NULL.
3783  */
3784 Path *
3786  RelOptInfo *child_rel)
3787 {
3788 
3789 #define FLAT_COPY_PATH(newnode, node, nodetype) \
3790  ( (newnode) = makeNode(nodetype), \
3791  memcpy((newnode), (node), sizeof(nodetype)) )
3792 
3793 #define ADJUST_CHILD_ATTRS(node) \
3794  ((node) = \
3795  (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \
3796  child_rel->relids, \
3797  child_rel->top_parent_relids))
3798 
3799 #define REPARAMETERIZE_CHILD_PATH(path) \
3800 do { \
3801  (path) = reparameterize_path_by_child(root, (path), child_rel); \
3802  if ((path) == NULL) \
3803  return NULL; \
3804 } while(0);
3805 
3806 #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
3807 do { \
3808  if ((pathlist) != NIL) \
3809  { \
3810  (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
3811  child_rel); \
3812  if ((pathlist) == NIL) \
3813  return NULL; \
3814  } \
3815 } while(0);
3816 
3817  Path *new_path;
3818  ParamPathInfo *new_ppi;
3819  ParamPathInfo *old_ppi;
3820  Relids required_outer;
3821 
3822  /*
3823  * If the path is not parameterized by parent of the given relation, it
3824  * doesn't need reparameterization.
3825  */
3826  if (!path->param_info ||
3827  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
3828  return path;
3829 
3830  /* Reparameterize a copy of given path. */
3831  switch (nodeTag(path))
3832  {
3833  case T_Path:
3834  FLAT_COPY_PATH(new_path, path, Path);
3835  break;
3836 
3837  case T_IndexPath:
3838  {
3839  IndexPath *ipath;
3840 
3841  FLAT_COPY_PATH(ipath, path, IndexPath);
3843  new_path = (Path *) ipath;
3844  }
3845  break;
3846 
3847  case T_BitmapHeapPath:
3848  {
3849  BitmapHeapPath *bhpath;
3850 
3851  FLAT_COPY_PATH(bhpath, path, BitmapHeapPath);
3853  new_path = (Path *) bhpath;
3854  }
3855  break;
3856 
3857  case T_BitmapAndPath:
3858  {
3859  BitmapAndPath *bapath;
3860 
3861  FLAT_COPY_PATH(bapath, path, BitmapAndPath);
3863  new_path = (Path *) bapath;
3864  }
3865  break;
3866 
3867  case T_BitmapOrPath:
3868  {
3869  BitmapOrPath *bopath;
3870 
3871  FLAT_COPY_PATH(bopath, path, BitmapOrPath);
3873  new_path = (Path *) bopath;
3874  }
3875  break;
3876 
3877  case T_TidPath:
3878  {
3879  TidPath *tpath;
3880 
3881  FLAT_COPY_PATH(tpath, path, TidPath);
3882  ADJUST_CHILD_ATTRS(tpath->tidquals);
3883  new_path = (Path *) tpath;
3884  }
3885  break;
3886 
3887  case T_ForeignPath:
3888  {
3889  ForeignPath *fpath;
3891 
3892  FLAT_COPY_PATH(fpath, path, ForeignPath);
3893  if (fpath->fdw_outerpath)
3895 
3896  /* Hand over to FDW if needed. */
3897  rfpc_func =
3899  if (rfpc_func)
3900  fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
3901  child_rel);
3902  new_path = (Path *) fpath;
3903  }
3904  break;
3905 
3906  case T_CustomPath:
3907  {
3908  CustomPath *cpath;
3909 
3910  FLAT_COPY_PATH(cpath, path, CustomPath);
3912  if (cpath->methods &&
3914  cpath->custom_private =
3916  cpath->custom_private,
3917  child_rel);
3918  new_path = (Path *) cpath;
3919  }
3920  break;
3921 
3922  case T_NestPath:
3923  {
3924  JoinPath *jpath;
3925 
3926  FLAT_COPY_PATH(jpath, path, NestPath);
3927 
3931  new_path = (Path *) jpath;
3932  }
3933  break;
3934 
3935  case T_MergePath:
3936  {
3937  JoinPath *jpath;
3938  MergePath *mpath;
3939 
3940  FLAT_COPY_PATH(mpath, path, MergePath);
3941 
3942  jpath = (JoinPath *) mpath;
3947  new_path = (Path *) mpath;
3948  }
3949  break;
3950 
3951  case T_HashPath:
3952  {
3953  JoinPath *jpath;
3954  HashPath *hpath;
3955 
3956  FLAT_COPY_PATH(hpath, path, HashPath);
3957 
3958  jpath = (JoinPath *) hpath;
3963  new_path = (Path *) hpath;
3964  }
3965  break;
3966 
3967  case T_AppendPath:
3968  {
3969  AppendPath *apath;
3970 
3971  FLAT_COPY_PATH(apath, path, AppendPath);
3973  new_path = (Path *) apath;
3974  }
3975  break;
3976 
3977  case T_MergeAppendPath:
3978  {
3979  MergeAppendPath *mapath;
3980 
3981  FLAT_COPY_PATH(mapath, path, MergeAppendPath);
3983  new_path = (Path *) mapath;
3984  }
3985  break;
3986 
3987  case T_MaterialPath:
3988  {
3989  MaterialPath *mpath;
3990 
3991  FLAT_COPY_PATH(mpath, path, MaterialPath);
3993  new_path = (Path *) mpath;
3994  }
3995  break;
3996 
3997  case T_UniquePath:
3998  {
3999  UniquePath *upath;
4000 
4001  FLAT_COPY_PATH(upath, path, UniquePath);
4004  new_path = (Path *) upath;
4005  }
4006  break;
4007 
4008  case T_GatherPath:
4009  {
4010  GatherPath *gpath;
4011 
4012  FLAT_COPY_PATH(gpath, path, GatherPath);
4014  new_path = (Path *) gpath;
4015  }
4016  break;
4017 
4018  case T_GatherMergePath:
4019  {
4020  GatherMergePath *gmpath;
4021 
4022  FLAT_COPY_PATH(gmpath, path, GatherMergePath);
4024  new_path = (Path *) gmpath;
4025  }
4026  break;
4027 
4028  default:
4029 
4030  /* We don't know how to reparameterize this path. */
4031  return NULL;
4032  }
4033 
4034  /*
4035  * Adjust the parameterization information, which refers to the topmost
4036  * parent. The topmost parent can be multiple levels away from the given
4037  * child, hence use multi-level expression adjustment routines.
4038  */
4039  old_ppi = new_path->param_info;
4040  required_outer =
4042  child_rel->relids,
4043  child_rel->top_parent_relids);
4044 
4045  /* If we already have a PPI for this parameterization, just return it */
4046  new_ppi = find_param_path_info(new_path->parent, required_outer);
4047 
4048  /*
4049  * If not, build a new one and link it to the list of PPIs. For the same
4050  * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4051  * context the given RelOptInfo is in.
4052  */
4053  if (new_ppi == NULL)
4054  {
4055  MemoryContext oldcontext;
4056  RelOptInfo *rel = path->parent;
4057 
4058  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4059 
4060  new_ppi = makeNode(ParamPathInfo);
4061  new_ppi->ppi_req_outer = bms_copy(required_outer);
4062  new_ppi->ppi_rows = old_ppi->ppi_rows;
4063  new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4064  ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4065  rel->ppilist = lappend(rel->ppilist, new_ppi);
4066 
4067  MemoryContextSwitchTo(oldcontext);
4068  }
4069  bms_free(required_outer);
4070 
4071  new_path->param_info = new_ppi;
4072 
4073  /*
4074  * Adjust the path target if the parent of the outer relation is
4075  * referenced in the targetlist. This can happen when only the parent of
4076  * outer relation is laterally referenced in this relation.
4077  */
4078  if (bms_overlap(path->parent->lateral_relids,
4079  child_rel->top_parent_relids))
4080  {
4081  new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4082  ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4083  }
4084 
4085  return new_path;
4086 }
4087 
4088 /*
4089  * reparameterize_pathlist_by_child
4090  * Helper function to reparameterize a list of paths by given child rel.
4091  */
4092 static List *
4094  List *pathlist,
4095  RelOptInfo *child_rel)
4096 {
4097  ListCell *lc;
4098  List *result = NIL;
4099 
4100  foreach(lc, pathlist)
4101  {
4102  Path *path = reparameterize_path_by_child(root, lfirst(lc),
4103  child_rel);
4104 
4105  if (path == NULL)
4106  {
4107  list_free(result);
4108  return NIL;
4109  }
4110 
4111  result = lappend(result, path);
4112  }
4113 
4114  return result;
4115 }
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2611
struct Path * cheapest_unique_path
Definition: pathnodes.h:660
List * indexorderbycols
Definition: pathnodes.h:1180
List * group_pathkeys
Definition: pathnodes.h:298
#define NIL
Definition: pg_list.h:65
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:672
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3364
List * qual
Definition: pathnodes.h:1634
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:569
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1580
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset)
Definition: selfuncs.c:3043
List * path_mergeclauses
Definition: pathnodes.h:1551
List * outersortkeys
Definition: pathnodes.h:1552
List * distinctList
Definition: pathnodes.h:1732
MinMaxAggPath * create_minmaxagg_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
Definition: pathnode.c:3145
Definition: nodes.h:78
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1845
#define IsA(nodeptr, _type_)
Definition: nodes.h:576
JoinPath jpath
Definition: pathnodes.h:1569
PathTarget * pathtarget
Definition: pathnodes.h:1115
ForeignPath * create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2194
List * returningLists
Definition: pathnodes.h:1781
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
Definition: analyzejoins.c:775
OnConflictExpr * onconflict
Definition: pathnodes.h:1783
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:940
Node * limitOffset
Definition: pathnodes.h:1794
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:423
Path path
Definition: pathnodes.h:1176
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1884
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
Path * subpath
Definition: pathnodes.h:1618
IndexOptInfo * indexinfo
Definition: pathnodes.h:1177
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1251
Index nominalRelation
Definition: pathnodes.h:1774
Path * fdw_outerpath
Definition: pathnodes.h:1319
RelOptKind reloptkind
Definition: pathnodes.h:638
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1176
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, int numPartCols, int numOrderCols, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2272
List * custom_paths
Definition: pathnodes.h:1349
Definition: nodes.h:80
SetOpStrategy strategy
Definition: pathnodes.h:1731
AggStrategy aggstrategy
Definition: pathnodes.h:1661
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3385
int bms_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:147
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:401
SetOpPath * create_setop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SetOpCmd cmd, SetOpStrategy strategy, List *distinctList, AttrNumber flagColIdx, int firstFlag, double numGroups, double outputRows)
Definition: pathnode.c:3278
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1341
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3008
List * qual
Definition: pathnodes.h:1665
double limit_tuples
Definition: pathnodes.h:1377
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:2870
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2151
bool add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost, List *pathkeys)
Definition: pathnode.c:868
bool add_path_precheck(RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:645
Path * innerjoinpath
Definition: pathnodes.h:1496
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1539
struct Path * cheapest_startup_path
Definition: pathnodes.h:658
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2041
double tuples
Definition: pathnodes.h:681
Path * subpath
Definition: pathnodes.h:1729
BitmapOrPath * create_bitmap_or_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1116
int parallel_workers
Definition: pathnodes.h:1121
bool consider_param_startup
Definition: pathnodes.h:648
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1084
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1496
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:99
bool is_hashed
Definition: pathnodes.h:1687
ParamPathInfo * param_info
Definition: pathnodes.h:1117
Relids calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
Definition: pathnode.c:2267
List * list_copy(const List *oldlist)
Definition: list.c:1404
Definition: nodes.h:525
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2520
List * list_concat(List *list1, const List *list2)
Definition: list.c:516
Definition: nodes.h:49
List * partial_pathlist
Definition: pathnodes.h:657
HashPath * create_hashjoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
Definition: pathnode.c:2454
AttrNumber varattno
Definition: primnodes.h:172
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1448
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1498
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, Relids child_relids, Relids top_parent_relids)
Definition: appendinfo.c:557
List * cheapest_parameterized_paths
Definition: pathnodes.h:661
bool single_copy
Definition: pathnodes.h:1466
UniquePathMethod umethod
Definition: pathnodes.h:1452
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:285
Definition: nodes.h:76
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
Definition: pathnode.c:1535
Path * subpath
Definition: pathnodes.h:1426
List * indexclauses
Definition: pathnodes.h:1178
AggSplit aggsplit
Definition: pathnodes.h:1662
List * partitioned_rels
Definition: pathnodes.h:1399
MergePath * create_mergejoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
Definition: pathnode.c:2388
List * quals
Definition: pathnodes.h:1710
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1306
Definition: primnodes.h:167
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, int64 offset_est, int64 count_est)
Definition: pathnode.c:3546
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1912
double numGroups
Definition: pathnodes.h:1663
double numGroups
Definition: pathnodes.h:1735
SetOpStrategy
Definition: nodes.h:806
#define ADJUST_CHILD_ATTRS(node)
List * rowMarks
Definition: pathnodes.h:1758
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:614
double numGroups
Definition: pathnodes.h:1685
Cost startup
Definition: pathnodes.h:45
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3599
List * bitmapquals
Definition: pathnodes.h:1262
List * custom_private
Definition: pathnodes.h:1350
JoinType
Definition: nodes.h:692
int first_partial_path
Definition: pathnodes.h:1376
WindowClause * winclause
Definition: pathnodes.h:1720
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1964
List * bitmapquals
Definition: pathnodes.h:1275
int num_workers
Definition: pathnodes.h:1467
Definition: type.h:89
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:368
NodeTag pathtype
Definition: pathnodes.h:1112
Relids syn_righthand
Definition: pathnodes.h:2136
List * subpaths
Definition: pathnodes.h:1374
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:401
SetOpCmd cmd
Definition: pathnodes.h:1730
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2491
static List * reparameterize_pathlist_by_child(PlannerInfo *root, List *pathlist, RelOptInfo *child_rel)
Definition: pathnode.c:4093
#define true
Definition: c.h:312
bool consider_startup
Definition: pathnodes.h:647
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:854
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:211
Relids lateral_relids
Definition: pathnodes.h:666
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1328
Cost per_tuple
Definition: pathnodes.h:46
const struct CustomPathMethods * methods
Definition: pathnodes.h:1351
Path * subpath
Definition: pathnodes.h:1719
void pfree(void *pointer)
Definition: mcxt.c:1056
RelOptInfo * rel
Definition: pathnodes.h:791
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:2380
#define linitial(l)
Definition: pg_list.h:195
Definition: nodes.h:46
Relids all_baserels
Definition: pathnodes.h:225
#define ERROR
Definition: elog.h:43
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam)
Definition: pathnode.c:3444
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:1819
double limit_tuples
Definition: pathnodes.h:335
List * partitionClause
Definition: parsenodes.h:1330
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3819
Cost startup_cost
Definition: pathnodes.h:1125
RecursiveUnionPath * create_recursiveunion_path(PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
Definition: pathnode.c:3340
List * semi_rhs_exprs
Definition: pathnodes.h:2144
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1282
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:2346
Path * subpath
Definition: pathnodes.h:1793
List * joinrestrictinfo
Definition: pathnodes.h:1498
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:371
bool partColsUpdated
Definition: pathnodes.h:1776
RelOptInfo * parent
Definition: pathnodes.h:1114
List * uniq_exprs
Definition: pathnodes.h:1454
Path * reparameterize_path_by_child(PlannerInfo *root, Path *path, RelOptInfo *child_rel)
Definition: pathnode.c:3785
Path * bitmapqual
Definition: pathnodes.h:1250
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:315
Definition: nodes.h:77
Path path
Definition: pathnodes.h:1631
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:72
NestPath * create_nestloop_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2300
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2922
struct Path * cheapest_total_path
Definition: pathnodes.h:659
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:998
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1133
ForeignPath * create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2100
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2700
struct FdwRoutine * fdwroutine
Definition: pathnodes.h:694
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:167
ScanDirection
Definition: sdir.h:22
List * groupClause
Definition: pathnodes.h:1664
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
ForeignPath * create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_private)
Definition: pathnode.c:2144
AttrNumber flagColIdx
Definition: pathnodes.h:1733
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer, List *partitioned_rels)
Definition: pathnode.c:1346
Relids relids
Definition: pathnodes.h:641
AggStrategy aggstrategy
Definition: pathnodes.h:1698
double cpu_operator_cost
Definition: costsize.c:114
Path * subpath
Definition: pathnodes.h:1465
double rint(double x)
Definition: rint.c:21
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:288
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:577
List * ppilist
Definition: pathnodes.h:656
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, List *partitioned_rels, double rows)
Definition: pathnode.c:1184
List * lappend_int(List *list, int datum)
Definition: list.c:340
Index relid
Definition: pathnodes.h:669
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2067
List * lappend(List *list, void *datum)
Definition: list.c:322
Path * subpath
Definition: pathnodes.h:1757
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
#define FLAT_COPY_PATH(newnode, node, nodetype)
Index varno
Definition: primnodes.h:170
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:245
List * exprs
Definition: pathnodes.h:1044
#define REPARAMETERIZE_CHILD_PATH(path)
BitmapAndPath * create_bitmap_and_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1080
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:324
Path * outerjoinpath
Definition: pathnodes.h:1495
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:476
List * indexorderbys
Definition: pathnodes.h:1179
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1613
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:955
List * groupClause
Definition: pathnodes.h:1633
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1938
List * mmaggregates
Definition: pathnodes.h:1709
List * tidquals
Definition: pathnodes.h:1289
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1693
int work_mem
Definition: globals.c:121
Path * subpath
Definition: pathnodes.h:1632
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
unsigned int Index
Definition: c.h:475
int64 total_size
Definition: pg_checksums.c:68
RTEKind rtekind
Definition: pathnodes.h:671
PathCostComparison
Definition: pathnode.c:39
List * in_operators
Definition: pathnodes.h:1453
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1576
double rows
Definition: pathnodes.h:644
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1754
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2767
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:352
Cost total_cost
Definition: pathnodes.h:1126
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2097
CostSelector
Definition: pathnodes.h:34
int firstFlag
Definition: pathnodes.h:1734
List * lcons(void *datum, List *list)
Definition: list.c:454
List * pathkeys
Definition: pathnodes.h:1128
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause)
Definition: pathnode.c:3216
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:573
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1392
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2048
#define CONSIDER_PATH_STARTUP_COST(p)
Path path
Definition: pathnodes.h:1488
#define Assert(condition)
Definition: c.h:732
#define lfirst(lc)
Definition: pg_list.h:190
void cost_append(AppendPath *apath)
Definition: costsize.c:1875
Path * subpath
Definition: pathnodes.h:1604
double rows
Definition: pathnodes.h:1124
bool parallel_safe
Definition: pathnodes.h:1120
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:118
Index rootRelation
Definition: pathnodes.h:1775
JoinType jointype
Definition: pathnodes.h:2137
List * ppi_clauses
Definition: pathnodes.h:1075
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:225
#define STD_FUZZ_FACTOR
Definition: pathnode.c:52
QualCost cost
Definition: pathnodes.h:1046
AggSplit
Definition: nodes.h:776
static int list_length(const List *l)
Definition: pg_list.h:169
Relids calc_nestloop_required_outer(Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
Definition: pathnode.c:2234
bool inner_unique
Definition: pathnodes.h:1492
bool consider_parallel
Definition: pathnodes.h:649
List * innersortkeys
Definition: pathnodes.h:1553
double cpu_tuple_cost
Definition: costsize.c:112
Path * subpath
Definition: pathnodes.h:1660
bool query_supports_distinctness(Query *query)
Definition: analyzejoins.c:738
List * partitioned_rels
Definition: pathnodes.h:1373
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:363
#define nodeTag(nodeptr)
Definition: nodes.h:530
double ppi_rows
Definition: pathnodes.h:1074
Path path
Definition: pathnodes.h:1792
Path path
Definition: pathnodes.h:1288
List * withCheckOptionLists
Definition: pathnodes.h:1780
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:928
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
Definition: nodes.h:84
List * orderClause
Definition: parsenodes.h:1331
PathKeysComparison
Definition: paths.h:176
Query * subquery
Definition: parsenodes.h:1009
AggStrategy
Definition: nodes.h:754
bool is_projection_capable_path(Path *path)
Definition: createplan.c:6761
TidPath * create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
Definition: pathnode.c:1152
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3667
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1331
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1482
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:750
List * fdw_private
Definition: pathnodes.h:1320
SetOpCmd
Definition: nodes.h:798
JoinType jointype
Definition: pathnodes.h:1490
List * semi_operators
Definition: pathnodes.h:2143
ScanDirection indexscandir
Definition: pathnodes.h:1181
CmdType operation
Definition: pathnodes.h:1772
void list_free(List *list)
Definition: list.c:1377
Definition: nodes.h:81
#define elog(elevel,...)
Definition: elog.h:226
int i
List * resultRelations
Definition: pathnodes.h:1777
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1128
JoinPath jpath
Definition: pathnodes.h:1550
bool parallel_aware
Definition: pathnodes.h:1119
List * path_hashclauses
Definition: pathnodes.h:1570
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1448
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:169
List * pathlist
Definition: pathnodes.h:655
Relids ppi_req_outer
Definition: pathnodes.h:1073
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1548
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:3586
Path * subpath
Definition: pathnodes.h:1592
Definition: nodes.h:226
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2811
double clamp_row_est(double nrows)
Definition: costsize.c:187
Node * limitCount
Definition: pathnodes.h:1795
Definition: pg_list.h:50
Path path
Definition: pathnodes.h:1617
struct PathTarget * reltarget
Definition: pathnodes.h:652
int16 AttrNumber
Definition: attnum.h:21
Path path
Definition: pathnodes.h:1728
CmdType
Definition: nodes.h:668
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:930
Path path
Definition: pathnodes.h:1659
double limit_tuples
Definition: pathnodes.h:1401
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs, double numGroups)
Definition: pathnode.c:2988
BMS_Comparison
Definition: bitmapset.h:57
double Cost
Definition: nodes.h:659
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
#define foreach_current_index(cell)
Definition: pg_list.h:381
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
Relids top_parent_relids
Definition: pathnodes.h:714
static MemoryContext GetMemoryChunkContext(void *pointer)
Definition: memutils.h:113
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:2928
List * gsets
Definition: pathnodes.h:1683
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742
Path * subpath
Definition: pathnodes.h:1451
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1047
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2015
Definition: nodes.h:86
ReparameterizeForeignPathByChild_function ReparameterizeForeignPathByChild
Definition: fdwapi.h:248
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1990