PostgreSQL Source Code  git master
pathnode.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * pathnode.c
4  * Routines to manipulate pathlists and create path nodes
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/optimizer/util/pathnode.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <math.h>
18 
19 #include "foreign/fdwapi.h"
20 #include "miscadmin.h"
21 #include "nodes/extensible.h"
22 #include "nodes/nodeFuncs.h"
23 #include "optimizer/appendinfo.h"
24 #include "optimizer/clauses.h"
25 #include "optimizer/cost.h"
26 #include "optimizer/optimizer.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/prep.h"
31 #include "optimizer/restrictinfo.h"
32 #include "optimizer/tlist.h"
33 #include "parser/parsetree.h"
34 #include "utils/lsyscache.h"
35 #include "utils/memutils.h"
36 #include "utils/selfuncs.h"
37 
38 typedef enum
39 {
40  COSTS_EQUAL, /* path costs are fuzzily equal */
41  COSTS_BETTER1, /* first path is cheaper than second */
42  COSTS_BETTER2, /* second path is cheaper than first */
43  COSTS_DIFFERENT, /* neither path dominates the other on cost */
45 
46 /*
47  * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
48  * XXX is it worth making this user-controllable? It provides a tradeoff
49  * between planner runtime and the accuracy of path cost comparisons.
50  */
51 #define STD_FUZZ_FACTOR 1.01
52 
53 static List *translate_sub_tlist(List *tlist, int relid);
54 static int append_total_cost_compare(const ListCell *a, const ListCell *b);
55 static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
57  List *pathlist,
58  RelOptInfo *child_rel);
59 
60 
61 /*****************************************************************************
62  * MISC. PATH UTILITIES
63  *****************************************************************************/
64 
65 /*
66  * compare_path_costs
67  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
68  * or more expensive than path2 for the specified criterion.
69  */
70 int
71 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
72 {
73  if (criterion == STARTUP_COST)
74  {
75  if (path1->startup_cost < path2->startup_cost)
76  return -1;
77  if (path1->startup_cost > path2->startup_cost)
78  return +1;
79 
80  /*
81  * If paths have the same startup cost (not at all unlikely), order
82  * them by total cost.
83  */
84  if (path1->total_cost < path2->total_cost)
85  return -1;
86  if (path1->total_cost > path2->total_cost)
87  return +1;
88  }
89  else
90  {
91  if (path1->total_cost < path2->total_cost)
92  return -1;
93  if (path1->total_cost > path2->total_cost)
94  return +1;
95 
96  /*
97  * If paths have the same total cost, order them by startup cost.
98  */
99  if (path1->startup_cost < path2->startup_cost)
100  return -1;
101  if (path1->startup_cost > path2->startup_cost)
102  return +1;
103  }
104  return 0;
105 }
106 
107 /*
108  * compare_fractional_path_costs
109  * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
110  * or more expensive than path2 for fetching the specified fraction
111  * of the total tuples.
112  *
113  * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
114  * path with the cheaper total_cost.
115  */
116 int
118  double fraction)
119 {
120  Cost cost1,
121  cost2;
122 
123  if (fraction <= 0.0 || fraction >= 1.0)
124  return compare_path_costs(path1, path2, TOTAL_COST);
125  cost1 = path1->startup_cost +
126  fraction * (path1->total_cost - path1->startup_cost);
127  cost2 = path2->startup_cost +
128  fraction * (path2->total_cost - path2->startup_cost);
129  if (cost1 < cost2)
130  return -1;
131  if (cost1 > cost2)
132  return +1;
133  return 0;
134 }
135 
136 /*
137  * compare_path_costs_fuzzily
138  * Compare the costs of two paths to see if either can be said to
139  * dominate the other.
140  *
141  * We use fuzzy comparisons so that add_path() can avoid keeping both of
142  * a pair of paths that really have insignificantly different cost.
143  *
144  * The fuzz_factor argument must be 1.0 plus delta, where delta is the
145  * fraction of the smaller cost that is considered to be a significant
146  * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
147  * be 1% of the smaller cost.
148  *
149  * The two paths are said to have "equal" costs if both startup and total
150  * costs are fuzzily the same. Path1 is said to be better than path2 if
151  * it has fuzzily better startup cost and fuzzily no worse total cost,
152  * or if it has fuzzily better total cost and fuzzily no worse startup cost.
153  * Path2 is better than path1 if the reverse holds. Finally, if one path
154  * is fuzzily better than the other on startup cost and fuzzily worse on
155  * total cost, we just say that their costs are "different", since neither
156  * dominates the other across the whole performance spectrum.
157  *
158  * This function also enforces a policy rule that paths for which the relevant
159  * one of parent->consider_startup and parent->consider_param_startup is false
160  * cannot survive comparisons solely on the grounds of good startup cost, so
161  * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
162  * (But if total costs are fuzzily equal, we compare startup costs anyway,
163  * in hopes of eliminating one path or the other.)
164  */
165 static PathCostComparison
166 compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
167 {
168 #define CONSIDER_PATH_STARTUP_COST(p) \
169  ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
170 
171  /*
172  * Check total cost first since it's more likely to be different; many
173  * paths have zero startup cost.
174  */
175  if (path1->total_cost > path2->total_cost * fuzz_factor)
176  {
177  /* path1 fuzzily worse on total cost */
178  if (CONSIDER_PATH_STARTUP_COST(path1) &&
179  path2->startup_cost > path1->startup_cost * fuzz_factor)
180  {
181  /* ... but path2 fuzzily worse on startup, so DIFFERENT */
182  return COSTS_DIFFERENT;
183  }
184  /* else path2 dominates */
185  return COSTS_BETTER2;
186  }
187  if (path2->total_cost > path1->total_cost * fuzz_factor)
188  {
189  /* path2 fuzzily worse on total cost */
190  if (CONSIDER_PATH_STARTUP_COST(path2) &&
191  path1->startup_cost > path2->startup_cost * fuzz_factor)
192  {
193  /* ... but path1 fuzzily worse on startup, so DIFFERENT */
194  return COSTS_DIFFERENT;
195  }
196  /* else path1 dominates */
197  return COSTS_BETTER1;
198  }
199  /* fuzzily the same on total cost ... */
200  if (path1->startup_cost > path2->startup_cost * fuzz_factor)
201  {
202  /* ... but path1 fuzzily worse on startup, so path2 wins */
203  return COSTS_BETTER2;
204  }
205  if (path2->startup_cost > path1->startup_cost * fuzz_factor)
206  {
207  /* ... but path2 fuzzily worse on startup, so path1 wins */
208  return COSTS_BETTER1;
209  }
210  /* fuzzily the same on both costs */
211  return COSTS_EQUAL;
212 
213 #undef CONSIDER_PATH_STARTUP_COST
214 }
215 
216 /*
217  * set_cheapest
218  * Find the minimum-cost paths from among a relation's paths,
219  * and save them in the rel's cheapest-path fields.
220  *
221  * cheapest_total_path is normally the cheapest-total-cost unparameterized
222  * path; but if there are no unparameterized paths, we assign it to be the
223  * best (cheapest least-parameterized) parameterized path. However, only
224  * unparameterized paths are considered candidates for cheapest_startup_path,
225  * so that will be NULL if there are no unparameterized paths.
226  *
227  * The cheapest_parameterized_paths list collects all parameterized paths
228  * that have survived the add_path() tournament for this relation. (Since
229  * add_path ignores pathkeys for a parameterized path, these will be paths
230  * that have best cost or best row count for their parameterization. We
231  * may also have both a parallel-safe and a non-parallel-safe path in some
232  * cases for the same parameterization in some cases, but this should be
233  * relatively rare since, most typically, all paths for the same relation
234  * will be parallel-safe or none of them will.)
235  *
236  * cheapest_parameterized_paths always includes the cheapest-total
237  * unparameterized path, too, if there is one; the users of that list find
238  * it more convenient if that's included.
239  *
240  * This is normally called only after we've finished constructing the path
241  * list for the rel node.
242  */
243 void
245 {
246  Path *cheapest_startup_path;
247  Path *cheapest_total_path;
248  Path *best_param_path;
249  List *parameterized_paths;
250  ListCell *p;
251 
252  Assert(IsA(parent_rel, RelOptInfo));
253 
254  if (parent_rel->pathlist == NIL)
255  elog(ERROR, "could not devise a query plan for the given query");
256 
257  cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
258  parameterized_paths = NIL;
259 
260  foreach(p, parent_rel->pathlist)
261  {
262  Path *path = (Path *) lfirst(p);
263  int cmp;
264 
265  if (path->param_info)
266  {
267  /* Parameterized path, so add it to parameterized_paths */
268  parameterized_paths = lappend(parameterized_paths, path);
269 
270  /*
271  * If we have an unparameterized cheapest-total, we no longer care
272  * about finding the best parameterized path, so move on.
273  */
274  if (cheapest_total_path)
275  continue;
276 
277  /*
278  * Otherwise, track the best parameterized path, which is the one
279  * with least total cost among those of the minimum
280  * parameterization.
281  */
282  if (best_param_path == NULL)
283  best_param_path = path;
284  else
285  {
286  switch (bms_subset_compare(PATH_REQ_OUTER(path),
287  PATH_REQ_OUTER(best_param_path)))
288  {
289  case BMS_EQUAL:
290  /* keep the cheaper one */
291  if (compare_path_costs(path, best_param_path,
292  TOTAL_COST) < 0)
293  best_param_path = path;
294  break;
295  case BMS_SUBSET1:
296  /* new path is less-parameterized */
297  best_param_path = path;
298  break;
299  case BMS_SUBSET2:
300  /* old path is less-parameterized, keep it */
301  break;
302  case BMS_DIFFERENT:
303 
304  /*
305  * This means that neither path has the least possible
306  * parameterization for the rel. We'll sit on the old
307  * path until something better comes along.
308  */
309  break;
310  }
311  }
312  }
313  else
314  {
315  /* Unparameterized path, so consider it for cheapest slots */
316  if (cheapest_total_path == NULL)
317  {
318  cheapest_startup_path = cheapest_total_path = path;
319  continue;
320  }
321 
322  /*
323  * If we find two paths of identical costs, try to keep the
324  * better-sorted one. The paths might have unrelated sort
325  * orderings, in which case we can only guess which might be
326  * better to keep, but if one is superior then we definitely
327  * should keep that one.
328  */
329  cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
330  if (cmp > 0 ||
331  (cmp == 0 &&
332  compare_pathkeys(cheapest_startup_path->pathkeys,
333  path->pathkeys) == PATHKEYS_BETTER2))
334  cheapest_startup_path = path;
335 
336  cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
337  if (cmp > 0 ||
338  (cmp == 0 &&
339  compare_pathkeys(cheapest_total_path->pathkeys,
340  path->pathkeys) == PATHKEYS_BETTER2))
341  cheapest_total_path = path;
342  }
343  }
344 
345  /* Add cheapest unparameterized path, if any, to parameterized_paths */
346  if (cheapest_total_path)
347  parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
348 
349  /*
350  * If there is no unparameterized path, use the best parameterized path as
351  * cheapest_total_path (but not as cheapest_startup_path).
352  */
353  if (cheapest_total_path == NULL)
354  cheapest_total_path = best_param_path;
355  Assert(cheapest_total_path != NULL);
356 
357  parent_rel->cheapest_startup_path = cheapest_startup_path;
358  parent_rel->cheapest_total_path = cheapest_total_path;
359  parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
360  parent_rel->cheapest_parameterized_paths = parameterized_paths;
361 }
362 
363 /*
364  * add_path
365  * Consider a potential implementation path for the specified parent rel,
366  * and add it to the rel's pathlist if it is worthy of consideration.
367  * A path is worthy if it has a better sort order (better pathkeys) or
368  * cheaper cost (on either dimension), or generates fewer rows, than any
369  * existing path that has the same or superset parameterization rels.
370  * We also consider parallel-safe paths more worthy than others.
371  *
372  * We also remove from the rel's pathlist any old paths that are dominated
373  * by new_path --- that is, new_path is cheaper, at least as well ordered,
374  * generates no more rows, requires no outer rels not required by the old
375  * path, and is no less parallel-safe.
376  *
377  * In most cases, a path with a superset parameterization will generate
378  * fewer rows (since it has more join clauses to apply), so that those two
379  * figures of merit move in opposite directions; this means that a path of
380  * one parameterization can seldom dominate a path of another. But such
381  * cases do arise, so we make the full set of checks anyway.
382  *
383  * There are two policy decisions embedded in this function, along with
384  * its sibling add_path_precheck. First, we treat all parameterized paths
385  * as having NIL pathkeys, so that they cannot win comparisons on the
386  * basis of sort order. This is to reduce the number of parameterized
387  * paths that are kept; see discussion in src/backend/optimizer/README.
388  *
389  * Second, we only consider cheap startup cost to be interesting if
390  * parent_rel->consider_startup is true for an unparameterized path, or
391  * parent_rel->consider_param_startup is true for a parameterized one.
392  * Again, this allows discarding useless paths sooner.
393  *
394  * The pathlist is kept sorted by total_cost, with cheaper paths
395  * at the front. Within this routine, that's simply a speed hack:
396  * doing it that way makes it more likely that we will reject an inferior
397  * path after a few comparisons, rather than many comparisons.
398  * However, add_path_precheck relies on this ordering to exit early
399  * when possible.
400  *
401  * NOTE: discarded Path objects are immediately pfree'd to reduce planner
402  * memory consumption. We dare not try to free the substructure of a Path,
403  * since much of it may be shared with other Paths or the query tree itself;
404  * but just recycling discarded Path nodes is a very useful savings in
405  * a large join tree. We can recycle the List nodes of pathlist, too.
406  *
407  * As noted in optimizer/README, deleting a previously-accepted Path is
408  * safe because we know that Paths of this rel cannot yet be referenced
409  * from any other rel, such as a higher-level join. However, in some cases
410  * it is possible that a Path is referenced by another Path for its own
411  * rel; we must not delete such a Path, even if it is dominated by the new
412  * Path. Currently this occurs only for IndexPath objects, which may be
413  * referenced as children of BitmapHeapPaths as well as being paths in
414  * their own right. Hence, we don't pfree IndexPaths when rejecting them.
415  *
416  * 'parent_rel' is the relation entry to which the path corresponds.
417  * 'new_path' is a potential path for parent_rel.
418  *
419  * Returns nothing, but modifies parent_rel->pathlist.
420  */
421 void
422 add_path(RelOptInfo *parent_rel, Path *new_path)
423 {
424  bool accept_new = true; /* unless we find a superior old path */
425  int insert_at = 0; /* where to insert new item */
426  List *new_path_pathkeys;
427  ListCell *p1;
428 
429  /*
430  * This is a convenient place to check for query cancel --- no part of the
431  * planner goes very long without calling add_path().
432  */
434 
435  /* Pretend parameterized paths have no pathkeys, per comment above */
436  new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
437 
438  /*
439  * Loop to check proposed new path against old paths. Note it is possible
440  * for more than one old path to be tossed out because new_path dominates
441  * it.
442  */
443  foreach(p1, parent_rel->pathlist)
444  {
445  Path *old_path = (Path *) lfirst(p1);
446  bool remove_old = false; /* unless new proves superior */
447  PathCostComparison costcmp;
448  PathKeysComparison keyscmp;
449  BMS_Comparison outercmp;
450 
451  /*
452  * Do a fuzzy cost comparison with standard fuzziness limit.
453  */
454  costcmp = compare_path_costs_fuzzily(new_path, old_path,
456 
457  /*
458  * If the two paths compare differently for startup and total cost,
459  * then we want to keep both, and we can skip comparing pathkeys and
460  * required_outer rels. If they compare the same, proceed with the
461  * other comparisons. Row count is checked last. (We make the tests
462  * in this order because the cost comparison is most likely to turn
463  * out "different", and the pathkeys comparison next most likely. As
464  * explained above, row count very seldom makes a difference, so even
465  * though it's cheap to compare there's not much point in checking it
466  * earlier.)
467  */
468  if (costcmp != COSTS_DIFFERENT)
469  {
470  /* Similarly check to see if either dominates on pathkeys */
471  List *old_path_pathkeys;
472 
473  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
474  keyscmp = compare_pathkeys(new_path_pathkeys,
475  old_path_pathkeys);
476  if (keyscmp != PATHKEYS_DIFFERENT)
477  {
478  switch (costcmp)
479  {
480  case COSTS_EQUAL:
481  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
482  PATH_REQ_OUTER(old_path));
483  if (keyscmp == PATHKEYS_BETTER1)
484  {
485  if ((outercmp == BMS_EQUAL ||
486  outercmp == BMS_SUBSET1) &&
487  new_path->rows <= old_path->rows &&
488  new_path->parallel_safe >= old_path->parallel_safe)
489  remove_old = true; /* new dominates old */
490  }
491  else if (keyscmp == PATHKEYS_BETTER2)
492  {
493  if ((outercmp == BMS_EQUAL ||
494  outercmp == BMS_SUBSET2) &&
495  new_path->rows >= old_path->rows &&
496  new_path->parallel_safe <= old_path->parallel_safe)
497  accept_new = false; /* old dominates new */
498  }
499  else /* keyscmp == PATHKEYS_EQUAL */
500  {
501  if (outercmp == BMS_EQUAL)
502  {
503  /*
504  * Same pathkeys and outer rels, and fuzzily
505  * the same cost, so keep just one; to decide
506  * which, first check parallel-safety, then
507  * rows, then do a fuzzy cost comparison with
508  * very small fuzz limit. (We used to do an
509  * exact cost comparison, but that results in
510  * annoying platform-specific plan variations
511  * due to roundoff in the cost estimates.) If
512  * things are still tied, arbitrarily keep
513  * only the old path. Notice that we will
514  * keep only the old path even if the
515  * less-fuzzy comparison decides the startup
516  * and total costs compare differently.
517  */
518  if (new_path->parallel_safe >
519  old_path->parallel_safe)
520  remove_old = true; /* new dominates old */
521  else if (new_path->parallel_safe <
522  old_path->parallel_safe)
523  accept_new = false; /* old dominates new */
524  else if (new_path->rows < old_path->rows)
525  remove_old = true; /* new dominates old */
526  else if (new_path->rows > old_path->rows)
527  accept_new = false; /* old dominates new */
528  else if (compare_path_costs_fuzzily(new_path,
529  old_path,
530  1.0000000001) == COSTS_BETTER1)
531  remove_old = true; /* new dominates old */
532  else
533  accept_new = false; /* old equals or
534  * dominates new */
535  }
536  else if (outercmp == BMS_SUBSET1 &&
537  new_path->rows <= old_path->rows &&
538  new_path->parallel_safe >= old_path->parallel_safe)
539  remove_old = true; /* new dominates old */
540  else if (outercmp == BMS_SUBSET2 &&
541  new_path->rows >= old_path->rows &&
542  new_path->parallel_safe <= old_path->parallel_safe)
543  accept_new = false; /* old dominates new */
544  /* else different parameterizations, keep both */
545  }
546  break;
547  case COSTS_BETTER1:
548  if (keyscmp != PATHKEYS_BETTER2)
549  {
550  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
551  PATH_REQ_OUTER(old_path));
552  if ((outercmp == BMS_EQUAL ||
553  outercmp == BMS_SUBSET1) &&
554  new_path->rows <= old_path->rows &&
555  new_path->parallel_safe >= old_path->parallel_safe)
556  remove_old = true; /* new dominates old */
557  }
558  break;
559  case COSTS_BETTER2:
560  if (keyscmp != PATHKEYS_BETTER1)
561  {
562  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
563  PATH_REQ_OUTER(old_path));
564  if ((outercmp == BMS_EQUAL ||
565  outercmp == BMS_SUBSET2) &&
566  new_path->rows >= old_path->rows &&
567  new_path->parallel_safe <= old_path->parallel_safe)
568  accept_new = false; /* old dominates new */
569  }
570  break;
571  case COSTS_DIFFERENT:
572 
573  /*
574  * can't get here, but keep this case to keep compiler
575  * quiet
576  */
577  break;
578  }
579  }
580  }
581 
582  /*
583  * Remove current element from pathlist if dominated by new.
584  */
585  if (remove_old)
586  {
587  parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
588  p1);
589 
590  /*
591  * Delete the data pointed-to by the deleted cell, if possible
592  */
593  if (!IsA(old_path, IndexPath))
594  pfree(old_path);
595  }
596  else
597  {
598  /* new belongs after this old path if it has cost >= old's */
599  if (new_path->total_cost >= old_path->total_cost)
600  insert_at = foreach_current_index(p1) + 1;
601  }
602 
603  /*
604  * If we found an old path that dominates new_path, we can quit
605  * scanning the pathlist; we will not add new_path, and we assume
606  * new_path cannot dominate any other elements of the pathlist.
607  */
608  if (!accept_new)
609  break;
610  }
611 
612  if (accept_new)
613  {
614  /* Accept the new path: insert it at proper place in pathlist */
615  parent_rel->pathlist =
616  list_insert_nth(parent_rel->pathlist, insert_at, new_path);
617  }
618  else
619  {
620  /* Reject and recycle the new path */
621  if (!IsA(new_path, IndexPath))
622  pfree(new_path);
623  }
624 }
625 
626 /*
627  * add_path_precheck
628  * Check whether a proposed new path could possibly get accepted.
629  * We assume we know the path's pathkeys and parameterization accurately,
630  * and have lower bounds for its costs.
631  *
632  * Note that we do not know the path's rowcount, since getting an estimate for
633  * that is too expensive to do before prechecking. We assume here that paths
634  * of a superset parameterization will generate fewer rows; if that holds,
635  * then paths with different parameterizations cannot dominate each other
636  * and so we can simply ignore existing paths of another parameterization.
637  * (In the infrequent cases where that rule of thumb fails, add_path will
638  * get rid of the inferior path.)
639  *
640  * At the time this is called, we haven't actually built a Path structure,
641  * so the required information has to be passed piecemeal.
642  */
643 bool
645  Cost startup_cost, Cost total_cost,
646  List *pathkeys, Relids required_outer)
647 {
648  List *new_path_pathkeys;
649  bool consider_startup;
650  ListCell *p1;
651 
652  /* Pretend parameterized paths have no pathkeys, per add_path policy */
653  new_path_pathkeys = required_outer ? NIL : pathkeys;
654 
655  /* Decide whether new path's startup cost is interesting */
656  consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
657 
658  foreach(p1, parent_rel->pathlist)
659  {
660  Path *old_path = (Path *) lfirst(p1);
661  PathKeysComparison keyscmp;
662 
663  /*
664  * We are looking for an old_path with the same parameterization (and
665  * by assumption the same rowcount) that dominates the new path on
666  * pathkeys as well as both cost metrics. If we find one, we can
667  * reject the new path.
668  *
669  * Cost comparisons here should match compare_path_costs_fuzzily.
670  */
671  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
672  {
673  /* new path can win on startup cost only if consider_startup */
674  if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
675  !consider_startup)
676  {
677  /* new path loses on cost, so check pathkeys... */
678  List *old_path_pathkeys;
679 
680  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
681  keyscmp = compare_pathkeys(new_path_pathkeys,
682  old_path_pathkeys);
683  if (keyscmp == PATHKEYS_EQUAL ||
684  keyscmp == PATHKEYS_BETTER2)
685  {
686  /* new path does not win on pathkeys... */
687  if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
688  {
689  /* Found an old path that dominates the new one */
690  return false;
691  }
692  }
693  }
694  }
695  else
696  {
697  /*
698  * Since the pathlist is sorted by total_cost, we can stop looking
699  * once we reach a path with a total_cost larger than the new
700  * path's.
701  */
702  break;
703  }
704  }
705 
706  return true;
707 }
708 
709 /*
710  * add_partial_path
711  * Like add_path, our goal here is to consider whether a path is worthy
712  * of being kept around, but the considerations here are a bit different.
713  * A partial path is one which can be executed in any number of workers in
714  * parallel such that each worker will generate a subset of the path's
715  * overall result.
716  *
717  * As in add_path, the partial_pathlist is kept sorted with the cheapest
718  * total path in front. This is depended on by multiple places, which
719  * just take the front entry as the cheapest path without searching.
720  *
721  * We don't generate parameterized partial paths for several reasons. Most
722  * importantly, they're not safe to execute, because there's nothing to
723  * make sure that a parallel scan within the parameterized portion of the
724  * plan is running with the same value in every worker at the same time.
725  * Fortunately, it seems unlikely to be worthwhile anyway, because having
726  * each worker scan the entire outer relation and a subset of the inner
727  * relation will generally be a terrible plan. The inner (parameterized)
728  * side of the plan will be small anyway. There could be rare cases where
729  * this wins big - e.g. if join order constraints put a 1-row relation on
730  * the outer side of the topmost join with a parameterized plan on the inner
731  * side - but we'll have to be content not to handle such cases until
732  * somebody builds an executor infrastructure that can cope with them.
733  *
734  * Because we don't consider parameterized paths here, we also don't
735  * need to consider the row counts as a measure of quality: every path will
736  * produce the same number of rows. Neither do we need to consider startup
737  * costs: parallelism is only used for plans that will be run to completion.
738  * Therefore, this routine is much simpler than add_path: it needs to
739  * consider only pathkeys and total cost.
740  *
741  * As with add_path, we pfree paths that are found to be dominated by
742  * another partial path; this requires that there be no other references to
743  * such paths yet. Hence, GatherPaths must not be created for a rel until
744  * we're done creating all partial paths for it. Unlike add_path, we don't
745  * take an exception for IndexPaths as partial index paths won't be
746  * referenced by partial BitmapHeapPaths.
747  */
748 void
749 add_partial_path(RelOptInfo *parent_rel, Path *new_path)
750 {
751  bool accept_new = true; /* unless we find a superior old path */
752  int insert_at = 0; /* where to insert new item */
753  ListCell *p1;
754 
755  /* Check for query cancel. */
757 
758  /* Path to be added must be parallel safe. */
759  Assert(new_path->parallel_safe);
760 
761  /* Relation should be OK for parallelism, too. */
762  Assert(parent_rel->consider_parallel);
763 
764  /*
765  * As in add_path, throw out any paths which are dominated by the new
766  * path, but throw out the new path if some existing path dominates it.
767  */
768  foreach(p1, parent_rel->partial_pathlist)
769  {
770  Path *old_path = (Path *) lfirst(p1);
771  bool remove_old = false; /* unless new proves superior */
772  PathKeysComparison keyscmp;
773 
774  /* Compare pathkeys. */
775  keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
776 
777  /* Unless pathkeys are incompatible, keep just one of the two paths. */
778  if (keyscmp != PATHKEYS_DIFFERENT)
779  {
780  if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
781  {
782  /* New path costs more; keep it only if pathkeys are better. */
783  if (keyscmp != PATHKEYS_BETTER1)
784  accept_new = false;
785  }
786  else if (old_path->total_cost > new_path->total_cost
787  * STD_FUZZ_FACTOR)
788  {
789  /* Old path costs more; keep it only if pathkeys are better. */
790  if (keyscmp != PATHKEYS_BETTER2)
791  remove_old = true;
792  }
793  else if (keyscmp == PATHKEYS_BETTER1)
794  {
795  /* Costs are about the same, new path has better pathkeys. */
796  remove_old = true;
797  }
798  else if (keyscmp == PATHKEYS_BETTER2)
799  {
800  /* Costs are about the same, old path has better pathkeys. */
801  accept_new = false;
802  }
803  else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
804  {
805  /* Pathkeys are the same, and the old path costs more. */
806  remove_old = true;
807  }
808  else
809  {
810  /*
811  * Pathkeys are the same, and new path isn't materially
812  * cheaper.
813  */
814  accept_new = false;
815  }
816  }
817 
818  /*
819  * Remove current element from partial_pathlist if dominated by new.
820  */
821  if (remove_old)
822  {
823  parent_rel->partial_pathlist =
824  foreach_delete_current(parent_rel->partial_pathlist, p1);
825  pfree(old_path);
826  }
827  else
828  {
829  /* new belongs after this old path if it has cost >= old's */
830  if (new_path->total_cost >= old_path->total_cost)
831  insert_at = foreach_current_index(p1) + 1;
832  }
833 
834  /*
835  * If we found an old path that dominates new_path, we can quit
836  * scanning the partial_pathlist; we will not add new_path, and we
837  * assume new_path cannot dominate any later path.
838  */
839  if (!accept_new)
840  break;
841  }
842 
843  if (accept_new)
844  {
845  /* Accept the new path: insert it at proper place */
846  parent_rel->partial_pathlist =
847  list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
848  }
849  else
850  {
851  /* Reject and recycle the new path */
852  pfree(new_path);
853  }
854 }
855 
856 /*
857  * add_partial_path_precheck
858  * Check whether a proposed new partial path could possibly get accepted.
859  *
860  * Unlike add_path_precheck, we can ignore startup cost and parameterization,
861  * since they don't matter for partial paths (see add_partial_path). But
862  * we do want to make sure we don't add a partial path if there's already
863  * a complete path that dominates it, since in that case the proposed path
864  * is surely a loser.
865  */
866 bool
867 add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
868  List *pathkeys)
869 {
870  ListCell *p1;
871 
872  /*
873  * Our goal here is twofold. First, we want to find out whether this path
874  * is clearly inferior to some existing partial path. If so, we want to
875  * reject it immediately. Second, we want to find out whether this path
876  * is clearly superior to some existing partial path -- at least, modulo
877  * final cost computations. If so, we definitely want to consider it.
878  *
879  * Unlike add_path(), we always compare pathkeys here. This is because we
880  * expect partial_pathlist to be very short, and getting a definitive
881  * answer at this stage avoids the need to call add_path_precheck.
882  */
883  foreach(p1, parent_rel->partial_pathlist)
884  {
885  Path *old_path = (Path *) lfirst(p1);
886  PathKeysComparison keyscmp;
887 
888  keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
889  if (keyscmp != PATHKEYS_DIFFERENT)
890  {
891  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
892  keyscmp != PATHKEYS_BETTER1)
893  return false;
894  if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
895  keyscmp != PATHKEYS_BETTER2)
896  return true;
897  }
898  }
899 
900  /*
901  * This path is neither clearly inferior to an existing partial path nor
902  * clearly good enough that it might replace one. Compare it to
903  * non-parallel plans. If it loses even before accounting for the cost of
904  * the Gather node, we should definitely reject it.
905  *
906  * Note that we pass the total_cost to add_path_precheck twice. This is
907  * because it's never advantageous to consider the startup cost of a
908  * partial path; the resulting plans, if run in parallel, will be run to
909  * completion.
910  */
911  if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
912  NULL))
913  return false;
914 
915  return true;
916 }
917 
918 
919 /*****************************************************************************
920  * PATH NODE CREATION ROUTINES
921  *****************************************************************************/
922 
923 /*
924  * create_seqscan_path
925  * Creates a path corresponding to a sequential scan, returning the
926  * pathnode.
927  */
928 Path *
930  Relids required_outer, int parallel_workers)
931 {
932  Path *pathnode = makeNode(Path);
933 
934  pathnode->pathtype = T_SeqScan;
935  pathnode->parent = rel;
936  pathnode->pathtarget = rel->reltarget;
937  pathnode->param_info = get_baserel_parampathinfo(root, rel,
938  required_outer);
939  pathnode->parallel_aware = (parallel_workers > 0);
940  pathnode->parallel_safe = rel->consider_parallel;
941  pathnode->parallel_workers = parallel_workers;
942  pathnode->pathkeys = NIL; /* seqscan has unordered result */
943 
944  cost_seqscan(pathnode, root, rel, pathnode->param_info);
945 
946  return pathnode;
947 }
948 
949 /*
950  * create_samplescan_path
951  * Creates a path node for a sampled table scan.
952  */
953 Path *
955 {
956  Path *pathnode = makeNode(Path);
957 
958  pathnode->pathtype = T_SampleScan;
959  pathnode->parent = rel;
960  pathnode->pathtarget = rel->reltarget;
961  pathnode->param_info = get_baserel_parampathinfo(root, rel,
962  required_outer);
963  pathnode->parallel_aware = false;
964  pathnode->parallel_safe = rel->consider_parallel;
965  pathnode->parallel_workers = 0;
966  pathnode->pathkeys = NIL; /* samplescan has unordered result */
967 
968  cost_samplescan(pathnode, root, rel, pathnode->param_info);
969 
970  return pathnode;
971 }
972 
973 /*
974  * create_index_path
975  * Creates a path node for an index scan.
976  *
977  * 'index' is a usable index.
978  * 'indexclauses' is a list of IndexClause nodes representing clauses
979  * to be enforced as qual conditions in the scan.
980  * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
981  * to be used as index ordering operators in the scan.
982  * 'indexorderbycols' is an integer list of index column numbers (zero based)
983  * the ordering operators can be used with.
984  * 'pathkeys' describes the ordering of the path.
985  * 'indexscandir' is either ForwardScanDirection or BackwardScanDirection.
986  * 'indexonly' is true if an index-only scan is wanted.
987  * 'required_outer' is the set of outer relids for a parameterized path.
988  * 'loop_count' is the number of repetitions of the indexscan to factor into
989  * estimates of caching behavior.
990  * 'partial_path' is true if constructing a parallel index scan path.
991  *
992  * Returns the new path node.
993  */
994 IndexPath *
997  List *indexclauses,
998  List *indexorderbys,
999  List *indexorderbycols,
1000  List *pathkeys,
1001  ScanDirection indexscandir,
1002  bool indexonly,
1003  Relids required_outer,
1004  double loop_count,
1005  bool partial_path)
1006 {
1007  IndexPath *pathnode = makeNode(IndexPath);
1008  RelOptInfo *rel = index->rel;
1009 
1010  pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1011  pathnode->path.parent = rel;
1012  pathnode->path.pathtarget = rel->reltarget;
1013  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1014  required_outer);
1015  pathnode->path.parallel_aware = false;
1016  pathnode->path.parallel_safe = rel->consider_parallel;
1017  pathnode->path.parallel_workers = 0;
1018  pathnode->path.pathkeys = pathkeys;
1019 
1020  pathnode->indexinfo = index;
1021  pathnode->indexclauses = indexclauses;
1022  pathnode->indexorderbys = indexorderbys;
1023  pathnode->indexorderbycols = indexorderbycols;
1024  pathnode->indexscandir = indexscandir;
1025 
1026  cost_index(pathnode, root, loop_count, partial_path);
1027 
1028  return pathnode;
1029 }
1030 
1031 /*
1032  * create_bitmap_heap_path
1033  * Creates a path node for a bitmap scan.
1034  *
1035  * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1036  * 'required_outer' is the set of outer relids for a parameterized path.
1037  * 'loop_count' is the number of repetitions of the indexscan to factor into
1038  * estimates of caching behavior.
1039  *
1040  * loop_count should match the value used when creating the component
1041  * IndexPaths.
1042  */
1045  RelOptInfo *rel,
1046  Path *bitmapqual,
1047  Relids required_outer,
1048  double loop_count,
1049  int parallel_degree)
1050 {
1051  BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1052 
1053  pathnode->path.pathtype = T_BitmapHeapScan;
1054  pathnode->path.parent = rel;
1055  pathnode->path.pathtarget = rel->reltarget;
1056  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1057  required_outer);
1058  pathnode->path.parallel_aware = (parallel_degree > 0);
1059  pathnode->path.parallel_safe = rel->consider_parallel;
1060  pathnode->path.parallel_workers = parallel_degree;
1061  pathnode->path.pathkeys = NIL; /* always unordered */
1062 
1063  pathnode->bitmapqual = bitmapqual;
1064 
1065  cost_bitmap_heap_scan(&pathnode->path, root, rel,
1066  pathnode->path.param_info,
1067  bitmapqual, loop_count);
1068 
1069  return pathnode;
1070 }
1071 
1072 /*
1073  * create_bitmap_and_path
1074  * Creates a path node representing a BitmapAnd.
1075  */
1076 BitmapAndPath *
1078  RelOptInfo *rel,
1079  List *bitmapquals)
1080 {
1081  BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1082  Relids required_outer = NULL;
1083  ListCell *lc;
1084 
1085  pathnode->path.pathtype = T_BitmapAnd;
1086  pathnode->path.parent = rel;
1087  pathnode->path.pathtarget = rel->reltarget;
1088 
1089  /*
1090  * Identify the required outer rels as the union of what the child paths
1091  * depend on. (Alternatively, we could insist that the caller pass this
1092  * in, but it's more convenient and reliable to compute it here.)
1093  */
1094  foreach(lc, bitmapquals)
1095  {
1096  Path *bitmapqual = (Path *) lfirst(lc);
1097 
1098  required_outer = bms_add_members(required_outer,
1099  PATH_REQ_OUTER(bitmapqual));
1100  }
1101  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1102  required_outer);
1103 
1104  /*
1105  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1106  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1107  * set the flag for this path based only on the relation-level flag,
1108  * without actually iterating over the list of children.
1109  */
1110  pathnode->path.parallel_aware = false;
1111  pathnode->path.parallel_safe = rel->consider_parallel;
1112  pathnode->path.parallel_workers = 0;
1113 
1114  pathnode->path.pathkeys = NIL; /* always unordered */
1115 
1116  pathnode->bitmapquals = bitmapquals;
1117 
1118  /* this sets bitmapselectivity as well as the regular cost fields: */
1119  cost_bitmap_and_node(pathnode, root);
1120 
1121  return pathnode;
1122 }
1123 
1124 /*
1125  * create_bitmap_or_path
1126  * Creates a path node representing a BitmapOr.
1127  */
1128 BitmapOrPath *
1130  RelOptInfo *rel,
1131  List *bitmapquals)
1132 {
1133  BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1134  Relids required_outer = NULL;
1135  ListCell *lc;
1136 
1137  pathnode->path.pathtype = T_BitmapOr;
1138  pathnode->path.parent = rel;
1139  pathnode->path.pathtarget = rel->reltarget;
1140 
1141  /*
1142  * Identify the required outer rels as the union of what the child paths
1143  * depend on. (Alternatively, we could insist that the caller pass this
1144  * in, but it's more convenient and reliable to compute it here.)
1145  */
1146  foreach(lc, bitmapquals)
1147  {
1148  Path *bitmapqual = (Path *) lfirst(lc);
1149 
1150  required_outer = bms_add_members(required_outer,
1151  PATH_REQ_OUTER(bitmapqual));
1152  }
1153  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1154  required_outer);
1155 
1156  /*
1157  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1158  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1159  * set the flag for this path based only on the relation-level flag,
1160  * without actually iterating over the list of children.
1161  */
1162  pathnode->path.parallel_aware = false;
1163  pathnode->path.parallel_safe = rel->consider_parallel;
1164  pathnode->path.parallel_workers = 0;
1165 
1166  pathnode->path.pathkeys = NIL; /* always unordered */
1167 
1168  pathnode->bitmapquals = bitmapquals;
1169 
1170  /* this sets bitmapselectivity as well as the regular cost fields: */
1171  cost_bitmap_or_node(pathnode, root);
1172 
1173  return pathnode;
1174 }
1175 
1176 /*
1177  * create_tidscan_path
1178  * Creates a path corresponding to a scan by TID, returning the pathnode.
1179  */
1180 TidPath *
1182  Relids required_outer)
1183 {
1184  TidPath *pathnode = makeNode(TidPath);
1185 
1186  pathnode->path.pathtype = T_TidScan;
1187  pathnode->path.parent = rel;
1188  pathnode->path.pathtarget = rel->reltarget;
1189  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1190  required_outer);
1191  pathnode->path.parallel_aware = false;
1192  pathnode->path.parallel_safe = rel->consider_parallel;
1193  pathnode->path.parallel_workers = 0;
1194  pathnode->path.pathkeys = NIL; /* always unordered */
1195 
1196  pathnode->tidquals = tidquals;
1197 
1198  cost_tidscan(&pathnode->path, root, rel, tidquals,
1199  pathnode->path.param_info);
1200 
1201  return pathnode;
1202 }
1203 
1204 /*
1205  * create_tidrangescan_path
1206  * Creates a path corresponding to a scan by a range of TIDs, returning
1207  * the pathnode.
1208  */
1209 TidRangePath *
1211  List *tidrangequals, Relids required_outer)
1212 {
1213  TidRangePath *pathnode = makeNode(TidRangePath);
1214 
1215  pathnode->path.pathtype = T_TidRangeScan;
1216  pathnode->path.parent = rel;
1217  pathnode->path.pathtarget = rel->reltarget;
1218  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1219  required_outer);
1220  pathnode->path.parallel_aware = false;
1221  pathnode->path.parallel_safe = rel->consider_parallel;
1222  pathnode->path.parallel_workers = 0;
1223  pathnode->path.pathkeys = NIL; /* always unordered */
1224 
1225  pathnode->tidrangequals = tidrangequals;
1226 
1227  cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1228  pathnode->path.param_info);
1229 
1230  return pathnode;
1231 }
1232 
1233 /*
1234  * create_append_path
1235  * Creates a path corresponding to an Append plan, returning the
1236  * pathnode.
1237  *
1238  * Note that we must handle subpaths = NIL, representing a dummy access path.
1239  * Also, there are callers that pass root = NULL.
1240  */
1241 AppendPath *
1243  RelOptInfo *rel,
1244  List *subpaths, List *partial_subpaths,
1245  List *pathkeys, Relids required_outer,
1246  int parallel_workers, bool parallel_aware,
1247  double rows)
1248 {
1249  AppendPath *pathnode = makeNode(AppendPath);
1250  ListCell *l;
1251 
1252  Assert(!parallel_aware || parallel_workers > 0);
1253 
1254  pathnode->path.pathtype = T_Append;
1255  pathnode->path.parent = rel;
1256  pathnode->path.pathtarget = rel->reltarget;
1257 
1258  /*
1259  * If this is for a baserel (not a join or non-leaf partition), we prefer
1260  * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1261  * for the path. This supports building a Memoize path atop this path,
1262  * and if this is a partitioned table the info may be useful for run-time
1263  * pruning (cf make_partition_pruneinfo()).
1264  *
1265  * However, if we don't have "root" then that won't work and we fall back
1266  * on the simpler get_appendrel_parampathinfo. There's no point in doing
1267  * the more expensive thing for a dummy path, either.
1268  */
1269  if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1270  pathnode->path.param_info = get_baserel_parampathinfo(root,
1271  rel,
1272  required_outer);
1273  else
1274  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1275  required_outer);
1276 
1277  pathnode->path.parallel_aware = parallel_aware;
1278  pathnode->path.parallel_safe = rel->consider_parallel;
1279  pathnode->path.parallel_workers = parallel_workers;
1280  pathnode->path.pathkeys = pathkeys;
1281 
1282  /*
1283  * For parallel append, non-partial paths are sorted by descending total
1284  * costs. That way, the total time to finish all non-partial paths is
1285  * minimized. Also, the partial paths are sorted by descending startup
1286  * costs. There may be some paths that require to do startup work by a
1287  * single worker. In such case, it's better for workers to choose the
1288  * expensive ones first, whereas the leader should choose the cheapest
1289  * startup plan.
1290  */
1291  if (pathnode->path.parallel_aware)
1292  {
1293  /*
1294  * We mustn't fiddle with the order of subpaths when the Append has
1295  * pathkeys. The order they're listed in is critical to keeping the
1296  * pathkeys valid.
1297  */
1298  Assert(pathkeys == NIL);
1299 
1301  list_sort(partial_subpaths, append_startup_cost_compare);
1302  }
1303  pathnode->first_partial_path = list_length(subpaths);
1304  pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1305 
1306  /*
1307  * Apply query-wide LIMIT if known and path is for sole base relation.
1308  * (Handling this at this low level is a bit klugy.)
1309  */
1310  if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1311  pathnode->limit_tuples = root->limit_tuples;
1312  else
1313  pathnode->limit_tuples = -1.0;
1314 
1315  foreach(l, pathnode->subpaths)
1316  {
1317  Path *subpath = (Path *) lfirst(l);
1318 
1319  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1320  subpath->parallel_safe;
1321 
1322  /* All child paths must have same parameterization */
1323  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1324  }
1325 
1326  Assert(!parallel_aware || pathnode->path.parallel_safe);
1327 
1328  /*
1329  * If there's exactly one child path then the output of the Append is
1330  * necessarily ordered the same as the child's, so we can inherit the
1331  * child's pathkeys if any, overriding whatever the caller might've said.
1332  * Furthermore, if the child's parallel awareness matches the Append's,
1333  * then the Append is a no-op and will be discarded later (in setrefs.c).
1334  * Then we can inherit the child's size and cost too, effectively charging
1335  * zero for the Append. Otherwise, we must do the normal costsize
1336  * calculation.
1337  */
1338  if (list_length(pathnode->subpaths) == 1)
1339  {
1340  Path *child = (Path *) linitial(pathnode->subpaths);
1341 
1342  if (child->parallel_aware == parallel_aware)
1343  {
1344  pathnode->path.rows = child->rows;
1345  pathnode->path.startup_cost = child->startup_cost;
1346  pathnode->path.total_cost = child->total_cost;
1347  }
1348  else
1349  cost_append(pathnode);
1350  /* Must do this last, else cost_append complains */
1351  pathnode->path.pathkeys = child->pathkeys;
1352  }
1353  else
1354  cost_append(pathnode);
1355 
1356  /* If the caller provided a row estimate, override the computed value. */
1357  if (rows >= 0)
1358  pathnode->path.rows = rows;
1359 
1360  return pathnode;
1361 }
1362 
1363 /*
1364  * append_total_cost_compare
1365  * list_sort comparator for sorting append child paths
1366  * by total_cost descending
1367  *
1368  * For equal total costs, we fall back to comparing startup costs; if those
1369  * are equal too, break ties using bms_compare on the paths' relids.
1370  * (This is to avoid getting unpredictable results from list_sort.)
1371  */
1372 static int
1374 {
1375  Path *path1 = (Path *) lfirst(a);
1376  Path *path2 = (Path *) lfirst(b);
1377  int cmp;
1378 
1379  cmp = compare_path_costs(path1, path2, TOTAL_COST);
1380  if (cmp != 0)
1381  return -cmp;
1382  return bms_compare(path1->parent->relids, path2->parent->relids);
1383 }
1384 
1385 /*
1386  * append_startup_cost_compare
1387  * list_sort comparator for sorting append child paths
1388  * by startup_cost descending
1389  *
1390  * For equal startup costs, we fall back to comparing total costs; if those
1391  * are equal too, break ties using bms_compare on the paths' relids.
1392  * (This is to avoid getting unpredictable results from list_sort.)
1393  */
1394 static int
1396 {
1397  Path *path1 = (Path *) lfirst(a);
1398  Path *path2 = (Path *) lfirst(b);
1399  int cmp;
1400 
1401  cmp = compare_path_costs(path1, path2, STARTUP_COST);
1402  if (cmp != 0)
1403  return -cmp;
1404  return bms_compare(path1->parent->relids, path2->parent->relids);
1405 }
1406 
1407 /*
1408  * create_merge_append_path
1409  * Creates a path corresponding to a MergeAppend plan, returning the
1410  * pathnode.
1411  */
1414  RelOptInfo *rel,
1415  List *subpaths,
1416  List *pathkeys,
1417  Relids required_outer)
1418 {
1420  Cost input_startup_cost;
1421  Cost input_total_cost;
1422  ListCell *l;
1423 
1424  pathnode->path.pathtype = T_MergeAppend;
1425  pathnode->path.parent = rel;
1426  pathnode->path.pathtarget = rel->reltarget;
1427  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1428  required_outer);
1429  pathnode->path.parallel_aware = false;
1430  pathnode->path.parallel_safe = rel->consider_parallel;
1431  pathnode->path.parallel_workers = 0;
1432  pathnode->path.pathkeys = pathkeys;
1433  pathnode->subpaths = subpaths;
1434 
1435  /*
1436  * Apply query-wide LIMIT if known and path is for sole base relation.
1437  * (Handling this at this low level is a bit klugy.)
1438  */
1439  if (bms_equal(rel->relids, root->all_query_rels))
1440  pathnode->limit_tuples = root->limit_tuples;
1441  else
1442  pathnode->limit_tuples = -1.0;
1443 
1444  /*
1445  * Add up the sizes and costs of the input paths.
1446  */
1447  pathnode->path.rows = 0;
1448  input_startup_cost = 0;
1449  input_total_cost = 0;
1450  foreach(l, subpaths)
1451  {
1452  Path *subpath = (Path *) lfirst(l);
1453 
1454  pathnode->path.rows += subpath->rows;
1455  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1456  subpath->parallel_safe;
1457 
1458  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1459  {
1460  /* Subpath is adequately ordered, we won't need to sort it */
1461  input_startup_cost += subpath->startup_cost;
1462  input_total_cost += subpath->total_cost;
1463  }
1464  else
1465  {
1466  /* We'll need to insert a Sort node, so include cost for that */
1467  Path sort_path; /* dummy for result of cost_sort */
1468 
1469  cost_sort(&sort_path,
1470  root,
1471  pathkeys,
1472  subpath->total_cost,
1473  subpath->parent->tuples,
1474  subpath->pathtarget->width,
1475  0.0,
1476  work_mem,
1477  pathnode->limit_tuples);
1478  input_startup_cost += sort_path.startup_cost;
1479  input_total_cost += sort_path.total_cost;
1480  }
1481 
1482  /* All child paths must have same parameterization */
1483  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1484  }
1485 
1486  /*
1487  * Now we can compute total costs of the MergeAppend. If there's exactly
1488  * one child path and its parallel awareness matches that of the
1489  * MergeAppend, then the MergeAppend is a no-op and will be discarded
1490  * later (in setrefs.c); otherwise we do the normal cost calculation.
1491  */
1492  if (list_length(subpaths) == 1 &&
1493  ((Path *) linitial(subpaths))->parallel_aware ==
1494  pathnode->path.parallel_aware)
1495  {
1496  pathnode->path.startup_cost = input_startup_cost;
1497  pathnode->path.total_cost = input_total_cost;
1498  }
1499  else
1500  cost_merge_append(&pathnode->path, root,
1501  pathkeys, list_length(subpaths),
1502  input_startup_cost, input_total_cost,
1503  pathnode->path.rows);
1504 
1505  return pathnode;
1506 }
1507 
1508 /*
1509  * create_group_result_path
1510  * Creates a path representing a Result-and-nothing-else plan.
1511  *
1512  * This is only used for degenerate grouping cases, in which we know we
1513  * need to produce one result row, possibly filtered by a HAVING qual.
1514  */
1517  PathTarget *target, List *havingqual)
1518 {
1520 
1521  pathnode->path.pathtype = T_Result;
1522  pathnode->path.parent = rel;
1523  pathnode->path.pathtarget = target;
1524  pathnode->path.param_info = NULL; /* there are no other rels... */
1525  pathnode->path.parallel_aware = false;
1526  pathnode->path.parallel_safe = rel->consider_parallel;
1527  pathnode->path.parallel_workers = 0;
1528  pathnode->path.pathkeys = NIL;
1529  pathnode->quals = havingqual;
1530 
1531  /*
1532  * We can't quite use cost_resultscan() because the quals we want to
1533  * account for are not baserestrict quals of the rel. Might as well just
1534  * hack it here.
1535  */
1536  pathnode->path.rows = 1;
1537  pathnode->path.startup_cost = target->cost.startup;
1538  pathnode->path.total_cost = target->cost.startup +
1539  cpu_tuple_cost + target->cost.per_tuple;
1540 
1541  /*
1542  * Add cost of qual, if any --- but we ignore its selectivity, since our
1543  * rowcount estimate should be 1 no matter what the qual is.
1544  */
1545  if (havingqual)
1546  {
1547  QualCost qual_cost;
1548 
1549  cost_qual_eval(&qual_cost, havingqual, root);
1550  /* havingqual is evaluated once at startup */
1551  pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1552  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1553  }
1554 
1555  return pathnode;
1556 }
1557 
1558 /*
1559  * create_material_path
1560  * Creates a path corresponding to a Material plan, returning the
1561  * pathnode.
1562  */
1563 MaterialPath *
1565 {
1566  MaterialPath *pathnode = makeNode(MaterialPath);
1567 
1568  Assert(subpath->parent == rel);
1569 
1570  pathnode->path.pathtype = T_Material;
1571  pathnode->path.parent = rel;
1572  pathnode->path.pathtarget = rel->reltarget;
1573  pathnode->path.param_info = subpath->param_info;
1574  pathnode->path.parallel_aware = false;
1575  pathnode->path.parallel_safe = rel->consider_parallel &&
1576  subpath->parallel_safe;
1577  pathnode->path.parallel_workers = subpath->parallel_workers;
1578  pathnode->path.pathkeys = subpath->pathkeys;
1579 
1580  pathnode->subpath = subpath;
1581 
1582  cost_material(&pathnode->path,
1583  subpath->startup_cost,
1584  subpath->total_cost,
1585  subpath->rows,
1586  subpath->pathtarget->width);
1587 
1588  return pathnode;
1589 }
1590 
1591 /*
1592  * create_memoize_path
1593  * Creates a path corresponding to a Memoize plan, returning the pathnode.
1594  */
1595 MemoizePath *
1597  List *param_exprs, List *hash_operators,
1598  bool singlerow, bool binary_mode, double calls)
1599 {
1600  MemoizePath *pathnode = makeNode(MemoizePath);
1601 
1602  Assert(subpath->parent == rel);
1603 
1604  pathnode->path.pathtype = T_Memoize;
1605  pathnode->path.parent = rel;
1606  pathnode->path.pathtarget = rel->reltarget;
1607  pathnode->path.param_info = subpath->param_info;
1608  pathnode->path.parallel_aware = false;
1609  pathnode->path.parallel_safe = rel->consider_parallel &&
1610  subpath->parallel_safe;
1611  pathnode->path.parallel_workers = subpath->parallel_workers;
1612  pathnode->path.pathkeys = subpath->pathkeys;
1613 
1614  pathnode->subpath = subpath;
1615  pathnode->hash_operators = hash_operators;
1616  pathnode->param_exprs = param_exprs;
1617  pathnode->singlerow = singlerow;
1618  pathnode->binary_mode = binary_mode;
1619  pathnode->calls = calls;
1620 
1621  /*
1622  * For now we set est_entries to 0. cost_memoize_rescan() does all the
1623  * hard work to determine how many cache entries there are likely to be,
1624  * so it seems best to leave it up to that function to fill this field in.
1625  * If left at 0, the executor will make a guess at a good value.
1626  */
1627  pathnode->est_entries = 0;
1628 
1629  /*
1630  * Add a small additional charge for caching the first entry. All the
1631  * harder calculations for rescans are performed in cost_memoize_rescan().
1632  */
1633  pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1634  pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1635  pathnode->path.rows = subpath->rows;
1636 
1637  return pathnode;
1638 }
1639 
1640 /*
1641  * create_unique_path
1642  * Creates a path representing elimination of distinct rows from the
1643  * input data. Distinct-ness is defined according to the needs of the
1644  * semijoin represented by sjinfo. If it is not possible to identify
1645  * how to make the data unique, NULL is returned.
1646  *
1647  * If used at all, this is likely to be called repeatedly on the same rel;
1648  * and the input subpath should always be the same (the cheapest_total path
1649  * for the rel). So we cache the result.
1650  */
1651 UniquePath *
1653  SpecialJoinInfo *sjinfo)
1654 {
1655  UniquePath *pathnode;
1656  Path sort_path; /* dummy for result of cost_sort */
1657  Path agg_path; /* dummy for result of cost_agg */
1658  MemoryContext oldcontext;
1659  int numCols;
1660 
1661  /* Caller made a mistake if subpath isn't cheapest_total ... */
1663  Assert(subpath->parent == rel);
1664  /* ... or if SpecialJoinInfo is the wrong one */
1665  Assert(sjinfo->jointype == JOIN_SEMI);
1666  Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1667 
1668  /* If result already cached, return it */
1669  if (rel->cheapest_unique_path)
1670  return (UniquePath *) rel->cheapest_unique_path;
1671 
1672  /* If it's not possible to unique-ify, return NULL */
1673  if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1674  return NULL;
1675 
1676  /*
1677  * When called during GEQO join planning, we are in a short-lived memory
1678  * context. We must make sure that the path and any subsidiary data
1679  * structures created for a baserel survive the GEQO cycle, else the
1680  * baserel is trashed for future GEQO cycles. On the other hand, when we
1681  * are creating those for a joinrel during GEQO, we don't want them to
1682  * clutter the main planning context. Upshot is that the best solution is
1683  * to explicitly allocate memory in the same context the given RelOptInfo
1684  * is in.
1685  */
1686  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1687 
1688  pathnode = makeNode(UniquePath);
1689 
1690  pathnode->path.pathtype = T_Unique;
1691  pathnode->path.parent = rel;
1692  pathnode->path.pathtarget = rel->reltarget;
1693  pathnode->path.param_info = subpath->param_info;
1694  pathnode->path.parallel_aware = false;
1695  pathnode->path.parallel_safe = rel->consider_parallel &&
1696  subpath->parallel_safe;
1697  pathnode->path.parallel_workers = subpath->parallel_workers;
1698 
1699  /*
1700  * Assume the output is unsorted, since we don't necessarily have pathkeys
1701  * to represent it. (This might get overridden below.)
1702  */
1703  pathnode->path.pathkeys = NIL;
1704 
1705  pathnode->subpath = subpath;
1706  pathnode->in_operators = sjinfo->semi_operators;
1707  pathnode->uniq_exprs = sjinfo->semi_rhs_exprs;
1708 
1709  /*
1710  * If the input is a relation and it has a unique index that proves the
1711  * semi_rhs_exprs are unique, then we don't need to do anything. Note
1712  * that relation_has_unique_index_for automatically considers restriction
1713  * clauses for the rel, as well.
1714  */
1715  if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1717  sjinfo->semi_rhs_exprs,
1718  sjinfo->semi_operators))
1719  {
1720  pathnode->umethod = UNIQUE_PATH_NOOP;
1721  pathnode->path.rows = rel->rows;
1722  pathnode->path.startup_cost = subpath->startup_cost;
1723  pathnode->path.total_cost = subpath->total_cost;
1724  pathnode->path.pathkeys = subpath->pathkeys;
1725 
1726  rel->cheapest_unique_path = (Path *) pathnode;
1727 
1728  MemoryContextSwitchTo(oldcontext);
1729 
1730  return pathnode;
1731  }
1732 
1733  /*
1734  * If the input is a subquery whose output must be unique already, then we
1735  * don't need to do anything. The test for uniqueness has to consider
1736  * exactly which columns we are extracting; for example "SELECT DISTINCT
1737  * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1738  * this optimization unless semi_rhs_exprs consists only of simple Vars
1739  * referencing subquery outputs. (Possibly we could do something with
1740  * expressions in the subquery outputs, too, but for now keep it simple.)
1741  */
1742  if (rel->rtekind == RTE_SUBQUERY)
1743  {
1744  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1745 
1747  {
1748  List *sub_tlist_colnos;
1749 
1750  sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1751  rel->relid);
1752 
1753  if (sub_tlist_colnos &&
1755  sub_tlist_colnos,
1756  sjinfo->semi_operators))
1757  {
1758  pathnode->umethod = UNIQUE_PATH_NOOP;
1759  pathnode->path.rows = rel->rows;
1760  pathnode->path.startup_cost = subpath->startup_cost;
1761  pathnode->path.total_cost = subpath->total_cost;
1762  pathnode->path.pathkeys = subpath->pathkeys;
1763 
1764  rel->cheapest_unique_path = (Path *) pathnode;
1765 
1766  MemoryContextSwitchTo(oldcontext);
1767 
1768  return pathnode;
1769  }
1770  }
1771  }
1772 
1773  /* Estimate number of output rows */
1774  pathnode->path.rows = estimate_num_groups(root,
1775  sjinfo->semi_rhs_exprs,
1776  rel->rows,
1777  NULL,
1778  NULL);
1779  numCols = list_length(sjinfo->semi_rhs_exprs);
1780 
1781  if (sjinfo->semi_can_btree)
1782  {
1783  /*
1784  * Estimate cost for sort+unique implementation
1785  */
1786  cost_sort(&sort_path, root, NIL,
1787  subpath->total_cost,
1788  rel->rows,
1789  subpath->pathtarget->width,
1790  0.0,
1791  work_mem,
1792  -1.0);
1793 
1794  /*
1795  * Charge one cpu_operator_cost per comparison per input tuple. We
1796  * assume all columns get compared at most of the tuples. (XXX
1797  * probably this is an overestimate.) This should agree with
1798  * create_upper_unique_path.
1799  */
1800  sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1801  }
1802 
1803  if (sjinfo->semi_can_hash)
1804  {
1805  /*
1806  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1807  * planner.c).
1808  */
1809  int hashentrysize = subpath->pathtarget->width + 64;
1810 
1811  if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1812  {
1813  /*
1814  * We should not try to hash. Hack the SpecialJoinInfo to
1815  * remember this, in case we come through here again.
1816  */
1817  sjinfo->semi_can_hash = false;
1818  }
1819  else
1820  cost_agg(&agg_path, root,
1821  AGG_HASHED, NULL,
1822  numCols, pathnode->path.rows,
1823  NIL,
1824  subpath->startup_cost,
1825  subpath->total_cost,
1826  rel->rows,
1827  subpath->pathtarget->width);
1828  }
1829 
1830  if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1831  {
1832  if (agg_path.total_cost < sort_path.total_cost)
1833  pathnode->umethod = UNIQUE_PATH_HASH;
1834  else
1835  pathnode->umethod = UNIQUE_PATH_SORT;
1836  }
1837  else if (sjinfo->semi_can_btree)
1838  pathnode->umethod = UNIQUE_PATH_SORT;
1839  else if (sjinfo->semi_can_hash)
1840  pathnode->umethod = UNIQUE_PATH_HASH;
1841  else
1842  {
1843  /* we can get here only if we abandoned hashing above */
1844  MemoryContextSwitchTo(oldcontext);
1845  return NULL;
1846  }
1847 
1848  if (pathnode->umethod == UNIQUE_PATH_HASH)
1849  {
1850  pathnode->path.startup_cost = agg_path.startup_cost;
1851  pathnode->path.total_cost = agg_path.total_cost;
1852  }
1853  else
1854  {
1855  pathnode->path.startup_cost = sort_path.startup_cost;
1856  pathnode->path.total_cost = sort_path.total_cost;
1857  }
1858 
1859  rel->cheapest_unique_path = (Path *) pathnode;
1860 
1861  MemoryContextSwitchTo(oldcontext);
1862 
1863  return pathnode;
1864 }
1865 
1866 /*
1867  * create_gather_merge_path
1868  *
1869  * Creates a path corresponding to a gather merge scan, returning
1870  * the pathnode.
1871  */
1874  PathTarget *target, List *pathkeys,
1875  Relids required_outer, double *rows)
1876 {
1878  Cost input_startup_cost = 0;
1879  Cost input_total_cost = 0;
1880 
1881  Assert(subpath->parallel_safe);
1882  Assert(pathkeys);
1883 
1884  pathnode->path.pathtype = T_GatherMerge;
1885  pathnode->path.parent = rel;
1886  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1887  required_outer);
1888  pathnode->path.parallel_aware = false;
1889 
1890  pathnode->subpath = subpath;
1891  pathnode->num_workers = subpath->parallel_workers;
1892  pathnode->path.pathkeys = pathkeys;
1893  pathnode->path.pathtarget = target ? target : rel->reltarget;
1894  pathnode->path.rows += subpath->rows;
1895 
1896  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1897  {
1898  /* Subpath is adequately ordered, we won't need to sort it */
1899  input_startup_cost += subpath->startup_cost;
1900  input_total_cost += subpath->total_cost;
1901  }
1902  else
1903  {
1904  /* We'll need to insert a Sort node, so include cost for that */
1905  Path sort_path; /* dummy for result of cost_sort */
1906 
1907  cost_sort(&sort_path,
1908  root,
1909  pathkeys,
1910  subpath->total_cost,
1911  subpath->rows,
1912  subpath->pathtarget->width,
1913  0.0,
1914  work_mem,
1915  -1);
1916  input_startup_cost += sort_path.startup_cost;
1917  input_total_cost += sort_path.total_cost;
1918  }
1919 
1920  cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1921  input_startup_cost, input_total_cost, rows);
1922 
1923  return pathnode;
1924 }
1925 
1926 /*
1927  * translate_sub_tlist - get subquery column numbers represented by tlist
1928  *
1929  * The given targetlist usually contains only Vars referencing the given relid.
1930  * Extract their varattnos (ie, the column numbers of the subquery) and return
1931  * as an integer List.
1932  *
1933  * If any of the tlist items is not a simple Var, we cannot determine whether
1934  * the subquery's uniqueness condition (if any) matches ours, so punt and
1935  * return NIL.
1936  */
1937 static List *
1938 translate_sub_tlist(List *tlist, int relid)
1939 {
1940  List *result = NIL;
1941  ListCell *l;
1942 
1943  foreach(l, tlist)
1944  {
1945  Var *var = (Var *) lfirst(l);
1946 
1947  if (!var || !IsA(var, Var) ||
1948  var->varno != relid)
1949  return NIL; /* punt */
1950 
1951  result = lappend_int(result, var->varattno);
1952  }
1953  return result;
1954 }
1955 
1956 /*
1957  * create_gather_path
1958  * Creates a path corresponding to a gather scan, returning the
1959  * pathnode.
1960  *
1961  * 'rows' may optionally be set to override row estimates from other sources.
1962  */
1963 GatherPath *
1965  PathTarget *target, Relids required_outer, double *rows)
1966 {
1967  GatherPath *pathnode = makeNode(GatherPath);
1968 
1969  Assert(subpath->parallel_safe);
1970 
1971  pathnode->path.pathtype = T_Gather;
1972  pathnode->path.parent = rel;
1973  pathnode->path.pathtarget = target;
1974  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1975  required_outer);
1976  pathnode->path.parallel_aware = false;
1977  pathnode->path.parallel_safe = false;
1978  pathnode->path.parallel_workers = 0;
1979  pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1980 
1981  pathnode->subpath = subpath;
1982  pathnode->num_workers = subpath->parallel_workers;
1983  pathnode->single_copy = false;
1984 
1985  if (pathnode->num_workers == 0)
1986  {
1987  pathnode->path.pathkeys = subpath->pathkeys;
1988  pathnode->num_workers = 1;
1989  pathnode->single_copy = true;
1990  }
1991 
1992  cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1993 
1994  return pathnode;
1995 }
1996 
1997 /*
1998  * create_subqueryscan_path
1999  * Creates a path corresponding to a scan of a subquery,
2000  * returning the pathnode.
2001  *
2002  * Caller must pass trivial_pathtarget = true if it believes rel->reltarget to
2003  * be trivial, ie just a fetch of all the subquery output columns in order.
2004  * While we could determine that here, the caller can usually do it more
2005  * efficiently (or at least amortize it over multiple calls).
2006  */
2009  bool trivial_pathtarget,
2010  List *pathkeys, Relids required_outer)
2011 {
2013 
2014  pathnode->path.pathtype = T_SubqueryScan;
2015  pathnode->path.parent = rel;
2016  pathnode->path.pathtarget = rel->reltarget;
2017  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2018  required_outer);
2019  pathnode->path.parallel_aware = false;
2020  pathnode->path.parallel_safe = rel->consider_parallel &&
2021  subpath->parallel_safe;
2022  pathnode->path.parallel_workers = subpath->parallel_workers;
2023  pathnode->path.pathkeys = pathkeys;
2024  pathnode->subpath = subpath;
2025 
2026  cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2027  trivial_pathtarget);
2028 
2029  return pathnode;
2030 }
2031 
2032 /*
2033  * create_functionscan_path
2034  * Creates a path corresponding to a sequential scan of a function,
2035  * returning the pathnode.
2036  */
2037 Path *
2039  List *pathkeys, Relids required_outer)
2040 {
2041  Path *pathnode = makeNode(Path);
2042 
2043  pathnode->pathtype = T_FunctionScan;
2044  pathnode->parent = rel;
2045  pathnode->pathtarget = rel->reltarget;
2046  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2047  required_outer);
2048  pathnode->parallel_aware = false;
2049  pathnode->parallel_safe = rel->consider_parallel;
2050  pathnode->parallel_workers = 0;
2051  pathnode->pathkeys = pathkeys;
2052 
2053  cost_functionscan(pathnode, root, rel, pathnode->param_info);
2054 
2055  return pathnode;
2056 }
2057 
2058 /*
2059  * create_tablefuncscan_path
2060  * Creates a path corresponding to a sequential scan of a table function,
2061  * returning the pathnode.
2062  */
2063 Path *
2065  Relids required_outer)
2066 {
2067  Path *pathnode = makeNode(Path);
2068 
2069  pathnode->pathtype = T_TableFuncScan;
2070  pathnode->parent = rel;
2071  pathnode->pathtarget = rel->reltarget;
2072  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2073  required_outer);
2074  pathnode->parallel_aware = false;
2075  pathnode->parallel_safe = rel->consider_parallel;
2076  pathnode->parallel_workers = 0;
2077  pathnode->pathkeys = NIL; /* result is always unordered */
2078 
2079  cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2080 
2081  return pathnode;
2082 }
2083 
2084 /*
2085  * create_valuesscan_path
2086  * Creates a path corresponding to a scan of a VALUES list,
2087  * returning the pathnode.
2088  */
2089 Path *
2091  Relids required_outer)
2092 {
2093  Path *pathnode = makeNode(Path);
2094 
2095  pathnode->pathtype = T_ValuesScan;
2096  pathnode->parent = rel;
2097  pathnode->pathtarget = rel->reltarget;
2098  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2099  required_outer);
2100  pathnode->parallel_aware = false;
2101  pathnode->parallel_safe = rel->consider_parallel;
2102  pathnode->parallel_workers = 0;
2103  pathnode->pathkeys = NIL; /* result is always unordered */
2104 
2105  cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2106 
2107  return pathnode;
2108 }
2109 
2110 /*
2111  * create_ctescan_path
2112  * Creates a path corresponding to a scan of a non-self-reference CTE,
2113  * returning the pathnode.
2114  */
2115 Path *
2116 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
2117 {
2118  Path *pathnode = makeNode(Path);
2119 
2120  pathnode->pathtype = T_CteScan;
2121  pathnode->parent = rel;
2122  pathnode->pathtarget = rel->reltarget;
2123  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2124  required_outer);
2125  pathnode->parallel_aware = false;
2126  pathnode->parallel_safe = rel->consider_parallel;
2127  pathnode->parallel_workers = 0;
2128  pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
2129 
2130  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2131 
2132  return pathnode;
2133 }
2134 
2135 /*
2136  * create_namedtuplestorescan_path
2137  * Creates a path corresponding to a scan of a named tuplestore, returning
2138  * the pathnode.
2139  */
2140 Path *
2142  Relids required_outer)
2143 {
2144  Path *pathnode = makeNode(Path);
2145 
2146  pathnode->pathtype = T_NamedTuplestoreScan;
2147  pathnode->parent = rel;
2148  pathnode->pathtarget = rel->reltarget;
2149  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2150  required_outer);
2151  pathnode->parallel_aware = false;
2152  pathnode->parallel_safe = rel->consider_parallel;
2153  pathnode->parallel_workers = 0;
2154  pathnode->pathkeys = NIL; /* result is always unordered */
2155 
2156  cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2157 
2158  return pathnode;
2159 }
2160 
2161 /*
2162  * create_resultscan_path
2163  * Creates a path corresponding to a scan of an RTE_RESULT relation,
2164  * returning the pathnode.
2165  */
2166 Path *
2168  Relids required_outer)
2169 {
2170  Path *pathnode = makeNode(Path);
2171 
2172  pathnode->pathtype = T_Result;
2173  pathnode->parent = rel;
2174  pathnode->pathtarget = rel->reltarget;
2175  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2176  required_outer);
2177  pathnode->parallel_aware = false;
2178  pathnode->parallel_safe = rel->consider_parallel;
2179  pathnode->parallel_workers = 0;
2180  pathnode->pathkeys = NIL; /* result is always unordered */
2181 
2182  cost_resultscan(pathnode, root, rel, pathnode->param_info);
2183 
2184  return pathnode;
2185 }
2186 
2187 /*
2188  * create_worktablescan_path
2189  * Creates a path corresponding to a scan of a self-reference CTE,
2190  * returning the pathnode.
2191  */
2192 Path *
2194  Relids required_outer)
2195 {
2196  Path *pathnode = makeNode(Path);
2197 
2198  pathnode->pathtype = T_WorkTableScan;
2199  pathnode->parent = rel;
2200  pathnode->pathtarget = rel->reltarget;
2201  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2202  required_outer);
2203  pathnode->parallel_aware = false;
2204  pathnode->parallel_safe = rel->consider_parallel;
2205  pathnode->parallel_workers = 0;
2206  pathnode->pathkeys = NIL; /* result is always unordered */
2207 
2208  /* Cost is the same as for a regular CTE scan */
2209  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2210 
2211  return pathnode;
2212 }
2213 
2214 /*
2215  * create_foreignscan_path
2216  * Creates a path corresponding to a scan of a foreign base table,
2217  * returning the pathnode.
2218  *
2219  * This function is never called from core Postgres; rather, it's expected
2220  * to be called by the GetForeignPaths function of a foreign data wrapper.
2221  * We make the FDW supply all fields of the path, since we do not have any way
2222  * to calculate them in core. However, there is a usually-sane default for
2223  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2224  */
2225 ForeignPath *
2227  PathTarget *target,
2228  double rows, Cost startup_cost, Cost total_cost,
2229  List *pathkeys,
2230  Relids required_outer,
2231  Path *fdw_outerpath,
2232  List *fdw_restrictinfo,
2233  List *fdw_private)
2234 {
2235  ForeignPath *pathnode = makeNode(ForeignPath);
2236 
2237  /* Historically some FDWs were confused about when to use this */
2238  Assert(IS_SIMPLE_REL(rel));
2239 
2240  pathnode->path.pathtype = T_ForeignScan;
2241  pathnode->path.parent = rel;
2242  pathnode->path.pathtarget = target ? target : rel->reltarget;
2243  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2244  required_outer);
2245  pathnode->path.parallel_aware = false;
2246  pathnode->path.parallel_safe = rel->consider_parallel;
2247  pathnode->path.parallel_workers = 0;
2248  pathnode->path.rows = rows;
2249  pathnode->path.startup_cost = startup_cost;
2250  pathnode->path.total_cost = total_cost;
2251  pathnode->path.pathkeys = pathkeys;
2252 
2253  pathnode->fdw_outerpath = fdw_outerpath;
2254  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2255  pathnode->fdw_private = fdw_private;
2256 
2257  return pathnode;
2258 }
2259 
2260 /*
2261  * create_foreign_join_path
2262  * Creates a path corresponding to a scan of a foreign join,
2263  * returning the pathnode.
2264  *
2265  * This function is never called from core Postgres; rather, it's expected
2266  * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2267  * We make the FDW supply all fields of the path, since we do not have any way
2268  * to calculate them in core. However, there is a usually-sane default for
2269  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2270  */
2271 ForeignPath *
2273  PathTarget *target,
2274  double rows, Cost startup_cost, Cost total_cost,
2275  List *pathkeys,
2276  Relids required_outer,
2277  Path *fdw_outerpath,
2278  List *fdw_restrictinfo,
2279  List *fdw_private)
2280 {
2281  ForeignPath *pathnode = makeNode(ForeignPath);
2282 
2283  /*
2284  * We should use get_joinrel_parampathinfo to handle parameterized paths,
2285  * but the API of this function doesn't support it, and existing
2286  * extensions aren't yet trying to build such paths anyway. For the
2287  * moment just throw an error if someone tries it; eventually we should
2288  * revisit this.
2289  */
2290  if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2291  elog(ERROR, "parameterized foreign joins are not supported yet");
2292 
2293  pathnode->path.pathtype = T_ForeignScan;
2294  pathnode->path.parent = rel;
2295  pathnode->path.pathtarget = target ? target : rel->reltarget;
2296  pathnode->path.param_info = NULL; /* XXX see above */
2297  pathnode->path.parallel_aware = false;
2298  pathnode->path.parallel_safe = rel->consider_parallel;
2299  pathnode->path.parallel_workers = 0;
2300  pathnode->path.rows = rows;
2301  pathnode->path.startup_cost = startup_cost;
2302  pathnode->path.total_cost = total_cost;
2303  pathnode->path.pathkeys = pathkeys;
2304 
2305  pathnode->fdw_outerpath = fdw_outerpath;
2306  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2307  pathnode->fdw_private = fdw_private;
2308 
2309  return pathnode;
2310 }
2311 
2312 /*
2313  * create_foreign_upper_path
2314  * Creates a path corresponding to an upper relation that's computed
2315  * directly by an FDW, returning the pathnode.
2316  *
2317  * This function is never called from core Postgres; rather, it's expected to
2318  * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2319  * We make the FDW supply all fields of the path, since we do not have any way
2320  * to calculate them in core. However, there is a usually-sane default for
2321  * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2322  */
2323 ForeignPath *
2325  PathTarget *target,
2326  double rows, Cost startup_cost, Cost total_cost,
2327  List *pathkeys,
2328  Path *fdw_outerpath,
2329  List *fdw_restrictinfo,
2330  List *fdw_private)
2331 {
2332  ForeignPath *pathnode = makeNode(ForeignPath);
2333 
2334  /*
2335  * Upper relations should never have any lateral references, since joining
2336  * is complete.
2337  */
2339 
2340  pathnode->path.pathtype = T_ForeignScan;
2341  pathnode->path.parent = rel;
2342  pathnode->path.pathtarget = target ? target : rel->reltarget;
2343  pathnode->path.param_info = NULL;
2344  pathnode->path.parallel_aware = false;
2345  pathnode->path.parallel_safe = rel->consider_parallel;
2346  pathnode->path.parallel_workers = 0;
2347  pathnode->path.rows = rows;
2348  pathnode->path.startup_cost = startup_cost;
2349  pathnode->path.total_cost = total_cost;
2350  pathnode->path.pathkeys = pathkeys;
2351 
2352  pathnode->fdw_outerpath = fdw_outerpath;
2353  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2354  pathnode->fdw_private = fdw_private;
2355 
2356  return pathnode;
2357 }
2358 
2359 /*
2360  * calc_nestloop_required_outer
2361  * Compute the required_outer set for a nestloop join path
2362  *
2363  * Note: result must not share storage with either input
2364  */
2365 Relids
2367  Relids outer_paramrels,
2368  Relids innerrelids,
2369  Relids inner_paramrels)
2370 {
2371  Relids required_outer;
2372 
2373  /* inner_path can require rels from outer path, but not vice versa */
2374  Assert(!bms_overlap(outer_paramrels, innerrelids));
2375  /* easy case if inner path is not parameterized */
2376  if (!inner_paramrels)
2377  return bms_copy(outer_paramrels);
2378  /* else, form the union ... */
2379  required_outer = bms_union(outer_paramrels, inner_paramrels);
2380  /* ... and remove any mention of now-satisfied outer rels */
2381  required_outer = bms_del_members(required_outer,
2382  outerrelids);
2383  return required_outer;
2384 }
2385 
2386 /*
2387  * calc_non_nestloop_required_outer
2388  * Compute the required_outer set for a merge or hash join path
2389  *
2390  * Note: result must not share storage with either input
2391  */
2392 Relids
2393 calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2394 {
2395  Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2396  Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2397  Relids required_outer;
2398 
2399  /* neither path can require rels from the other */
2400  Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
2401  Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
2402  /* form the union ... */
2403  required_outer = bms_union(outer_paramrels, inner_paramrels);
2404  /* we do not need an explicit test for empty; bms_union gets it right */
2405  return required_outer;
2406 }
2407 
2408 /*
2409  * create_nestloop_path
2410  * Creates a pathnode corresponding to a nestloop join between two
2411  * relations.
2412  *
2413  * 'joinrel' is the join relation.
2414  * 'jointype' is the type of join required
2415  * 'workspace' is the result from initial_cost_nestloop
2416  * 'extra' contains various information about the join
2417  * 'outer_path' is the outer path
2418  * 'inner_path' is the inner path
2419  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2420  * 'pathkeys' are the path keys of the new join path
2421  * 'required_outer' is the set of required outer rels
2422  *
2423  * Returns the resulting path node.
2424  */
2425 NestPath *
2427  RelOptInfo *joinrel,
2428  JoinType jointype,
2429  JoinCostWorkspace *workspace,
2430  JoinPathExtraData *extra,
2431  Path *outer_path,
2432  Path *inner_path,
2433  List *restrict_clauses,
2434  List *pathkeys,
2435  Relids required_outer)
2436 {
2437  NestPath *pathnode = makeNode(NestPath);
2438  Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2439 
2440  /*
2441  * If the inner path is parameterized by the outer, we must drop any
2442  * restrict_clauses that are due to be moved into the inner path. We have
2443  * to do this now, rather than postpone the work till createplan time,
2444  * because the restrict_clauses list can affect the size and cost
2445  * estimates for this path. We detect such clauses by checking for serial
2446  * number match to clauses already enforced in the inner path.
2447  */
2448  if (bms_overlap(inner_req_outer, outer_path->parent->relids))
2449  {
2450  Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2451  List *jclauses = NIL;
2452  ListCell *lc;
2453 
2454  foreach(lc, restrict_clauses)
2455  {
2456  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2457 
2458  if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2459  jclauses = lappend(jclauses, rinfo);
2460  }
2461  restrict_clauses = jclauses;
2462  }
2463 
2464  pathnode->jpath.path.pathtype = T_NestLoop;
2465  pathnode->jpath.path.parent = joinrel;
2466  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2467  pathnode->jpath.path.param_info =
2469  joinrel,
2470  outer_path,
2471  inner_path,
2472  extra->sjinfo,
2473  required_outer,
2474  &restrict_clauses);
2475  pathnode->jpath.path.parallel_aware = false;
2476  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2477  outer_path->parallel_safe && inner_path->parallel_safe;
2478  /* This is a foolish way to estimate parallel_workers, but for now... */
2479  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2480  pathnode->jpath.path.pathkeys = pathkeys;
2481  pathnode->jpath.jointype = jointype;
2482  pathnode->jpath.inner_unique = extra->inner_unique;
2483  pathnode->jpath.outerjoinpath = outer_path;
2484  pathnode->jpath.innerjoinpath = inner_path;
2485  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2486 
2487  final_cost_nestloop(root, pathnode, workspace, extra);
2488 
2489  return pathnode;
2490 }
2491 
2492 /*
2493  * create_mergejoin_path
2494  * Creates a pathnode corresponding to a mergejoin join between
2495  * two relations
2496  *
2497  * 'joinrel' is the join relation
2498  * 'jointype' is the type of join required
2499  * 'workspace' is the result from initial_cost_mergejoin
2500  * 'extra' contains various information about the join
2501  * 'outer_path' is the outer path
2502  * 'inner_path' is the inner path
2503  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2504  * 'pathkeys' are the path keys of the new join path
2505  * 'required_outer' is the set of required outer rels
2506  * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2507  * (this should be a subset of the restrict_clauses list)
2508  * 'outersortkeys' are the sort varkeys for the outer relation
2509  * 'innersortkeys' are the sort varkeys for the inner relation
2510  */
2511 MergePath *
2513  RelOptInfo *joinrel,
2514  JoinType jointype,
2515  JoinCostWorkspace *workspace,
2516  JoinPathExtraData *extra,
2517  Path *outer_path,
2518  Path *inner_path,
2519  List *restrict_clauses,
2520  List *pathkeys,
2521  Relids required_outer,
2522  List *mergeclauses,
2523  List *outersortkeys,
2524  List *innersortkeys)
2525 {
2526  MergePath *pathnode = makeNode(MergePath);
2527 
2528  pathnode->jpath.path.pathtype = T_MergeJoin;
2529  pathnode->jpath.path.parent = joinrel;
2530  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2531  pathnode->jpath.path.param_info =
2533  joinrel,
2534  outer_path,
2535  inner_path,
2536  extra->sjinfo,
2537  required_outer,
2538  &restrict_clauses);
2539  pathnode->jpath.path.parallel_aware = false;
2540  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2541  outer_path->parallel_safe && inner_path->parallel_safe;
2542  /* This is a foolish way to estimate parallel_workers, but for now... */
2543  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2544  pathnode->jpath.path.pathkeys = pathkeys;
2545  pathnode->jpath.jointype = jointype;
2546  pathnode->jpath.inner_unique = extra->inner_unique;
2547  pathnode->jpath.outerjoinpath = outer_path;
2548  pathnode->jpath.innerjoinpath = inner_path;
2549  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2550  pathnode->path_mergeclauses = mergeclauses;
2551  pathnode->outersortkeys = outersortkeys;
2552  pathnode->innersortkeys = innersortkeys;
2553  /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2554  /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2555 
2556  final_cost_mergejoin(root, pathnode, workspace, extra);
2557 
2558  return pathnode;
2559 }
2560 
2561 /*
2562  * create_hashjoin_path
2563  * Creates a pathnode corresponding to a hash join between two relations.
2564  *
2565  * 'joinrel' is the join relation
2566  * 'jointype' is the type of join required
2567  * 'workspace' is the result from initial_cost_hashjoin
2568  * 'extra' contains various information about the join
2569  * 'outer_path' is the cheapest outer path
2570  * 'inner_path' is the cheapest inner path
2571  * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2572  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2573  * 'required_outer' is the set of required outer rels
2574  * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2575  * (this should be a subset of the restrict_clauses list)
2576  */
2577 HashPath *
2579  RelOptInfo *joinrel,
2580  JoinType jointype,
2581  JoinCostWorkspace *workspace,
2582  JoinPathExtraData *extra,
2583  Path *outer_path,
2584  Path *inner_path,
2585  bool parallel_hash,
2586  List *restrict_clauses,
2587  Relids required_outer,
2588  List *hashclauses)
2589 {
2590  HashPath *pathnode = makeNode(HashPath);
2591 
2592  pathnode->jpath.path.pathtype = T_HashJoin;
2593  pathnode->jpath.path.parent = joinrel;
2594  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2595  pathnode->jpath.path.param_info =
2597  joinrel,
2598  outer_path,
2599  inner_path,
2600  extra->sjinfo,
2601  required_outer,
2602  &restrict_clauses);
2603  pathnode->jpath.path.parallel_aware =
2604  joinrel->consider_parallel && parallel_hash;
2605  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2606  outer_path->parallel_safe && inner_path->parallel_safe;
2607  /* This is a foolish way to estimate parallel_workers, but for now... */
2608  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2609 
2610  /*
2611  * A hashjoin never has pathkeys, since its output ordering is
2612  * unpredictable due to possible batching. XXX If the inner relation is
2613  * small enough, we could instruct the executor that it must not batch,
2614  * and then we could assume that the output inherits the outer relation's
2615  * ordering, which might save a sort step. However there is considerable
2616  * downside if our estimate of the inner relation size is badly off. For
2617  * the moment we don't risk it. (Note also that if we wanted to take this
2618  * seriously, joinpath.c would have to consider many more paths for the
2619  * outer rel than it does now.)
2620  */
2621  pathnode->jpath.path.pathkeys = NIL;
2622  pathnode->jpath.jointype = jointype;
2623  pathnode->jpath.inner_unique = extra->inner_unique;
2624  pathnode->jpath.outerjoinpath = outer_path;
2625  pathnode->jpath.innerjoinpath = inner_path;
2626  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2627  pathnode->path_hashclauses = hashclauses;
2628  /* final_cost_hashjoin will fill in pathnode->num_batches */
2629 
2630  final_cost_hashjoin(root, pathnode, workspace, extra);
2631 
2632  return pathnode;
2633 }
2634 
2635 /*
2636  * create_projection_path
2637  * Creates a pathnode that represents performing a projection.
2638  *
2639  * 'rel' is the parent relation associated with the result
2640  * 'subpath' is the path representing the source of data
2641  * 'target' is the PathTarget to be computed
2642  */
2645  RelOptInfo *rel,
2646  Path *subpath,
2647  PathTarget *target)
2648 {
2649  ProjectionPath *pathnode = makeNode(ProjectionPath);
2650  PathTarget *oldtarget;
2651 
2652  /*
2653  * We mustn't put a ProjectionPath directly above another; it's useless
2654  * and will confuse create_projection_plan. Rather than making sure all
2655  * callers handle that, let's implement it here, by stripping off any
2656  * ProjectionPath in what we're given. Given this rule, there won't be
2657  * more than one.
2658  */
2659  if (IsA(subpath, ProjectionPath))
2660  {
2661  ProjectionPath *subpp = (ProjectionPath *) subpath;
2662 
2663  Assert(subpp->path.parent == rel);
2664  subpath = subpp->subpath;
2666  }
2667 
2668  pathnode->path.pathtype = T_Result;
2669  pathnode->path.parent = rel;
2670  pathnode->path.pathtarget = target;
2671  /* For now, assume we are above any joins, so no parameterization */
2672  pathnode->path.param_info = NULL;
2673  pathnode->path.parallel_aware = false;
2674  pathnode->path.parallel_safe = rel->consider_parallel &&
2675  subpath->parallel_safe &&
2676  is_parallel_safe(root, (Node *) target->exprs);
2677  pathnode->path.parallel_workers = subpath->parallel_workers;
2678  /* Projection does not change the sort order */
2679  pathnode->path.pathkeys = subpath->pathkeys;
2680 
2681  pathnode->subpath = subpath;
2682 
2683  /*
2684  * We might not need a separate Result node. If the input plan node type
2685  * can project, we can just tell it to project something else. Or, if it
2686  * can't project but the desired target has the same expression list as
2687  * what the input will produce anyway, we can still give it the desired
2688  * tlist (possibly changing its ressortgroupref labels, but nothing else).
2689  * Note: in the latter case, create_projection_plan has to recheck our
2690  * conclusion; see comments therein.
2691  */
2692  oldtarget = subpath->pathtarget;
2694  equal(oldtarget->exprs, target->exprs))
2695  {
2696  /* No separate Result node needed */
2697  pathnode->dummypp = true;
2698 
2699  /*
2700  * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2701  */
2702  pathnode->path.rows = subpath->rows;
2703  pathnode->path.startup_cost = subpath->startup_cost +
2704  (target->cost.startup - oldtarget->cost.startup);
2705  pathnode->path.total_cost = subpath->total_cost +
2706  (target->cost.startup - oldtarget->cost.startup) +
2707  (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2708  }
2709  else
2710  {
2711  /* We really do need the Result node */
2712  pathnode->dummypp = false;
2713 
2714  /*
2715  * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2716  * evaluating the tlist. There is no qual to worry about.
2717  */
2718  pathnode->path.rows = subpath->rows;
2719  pathnode->path.startup_cost = subpath->startup_cost +
2720  target->cost.startup;
2721  pathnode->path.total_cost = subpath->total_cost +
2722  target->cost.startup +
2723  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2724  }
2725 
2726  return pathnode;
2727 }
2728 
2729 /*
2730  * apply_projection_to_path
2731  * Add a projection step, or just apply the target directly to given path.
2732  *
2733  * This has the same net effect as create_projection_path(), except that if
2734  * a separate Result plan node isn't needed, we just replace the given path's
2735  * pathtarget with the desired one. This must be used only when the caller
2736  * knows that the given path isn't referenced elsewhere and so can be modified
2737  * in-place.
2738  *
2739  * If the input path is a GatherPath or GatherMergePath, we try to push the
2740  * new target down to its input as well; this is a yet more invasive
2741  * modification of the input path, which create_projection_path() can't do.
2742  *
2743  * Note that we mustn't change the source path's parent link; so when it is
2744  * add_path'd to "rel" things will be a bit inconsistent. So far that has
2745  * not caused any trouble.
2746  *
2747  * 'rel' is the parent relation associated with the result
2748  * 'path' is the path representing the source of data
2749  * 'target' is the PathTarget to be computed
2750  */
2751 Path *
2753  RelOptInfo *rel,
2754  Path *path,
2755  PathTarget *target)
2756 {
2757  QualCost oldcost;
2758 
2759  /*
2760  * If given path can't project, we might need a Result node, so make a
2761  * separate ProjectionPath.
2762  */
2763  if (!is_projection_capable_path(path))
2764  return (Path *) create_projection_path(root, rel, path, target);
2765 
2766  /*
2767  * We can just jam the desired tlist into the existing path, being sure to
2768  * update its cost estimates appropriately.
2769  */
2770  oldcost = path->pathtarget->cost;
2771  path->pathtarget = target;
2772 
2773  path->startup_cost += target->cost.startup - oldcost.startup;
2774  path->total_cost += target->cost.startup - oldcost.startup +
2775  (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2776 
2777  /*
2778  * If the path happens to be a Gather or GatherMerge path, we'd like to
2779  * arrange for the subpath to return the required target list so that
2780  * workers can help project. But if there is something that is not
2781  * parallel-safe in the target expressions, then we can't.
2782  */
2783  if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2784  is_parallel_safe(root, (Node *) target->exprs))
2785  {
2786  /*
2787  * We always use create_projection_path here, even if the subpath is
2788  * projection-capable, so as to avoid modifying the subpath in place.
2789  * It seems unlikely at present that there could be any other
2790  * references to the subpath, but better safe than sorry.
2791  *
2792  * Note that we don't change the parallel path's cost estimates; it
2793  * might be appropriate to do so, to reflect the fact that the bulk of
2794  * the target evaluation will happen in workers.
2795  */
2796  if (IsA(path, GatherPath))
2797  {
2798  GatherPath *gpath = (GatherPath *) path;
2799 
2800  gpath->subpath = (Path *)
2802  gpath->subpath->parent,
2803  gpath->subpath,
2804  target);
2805  }
2806  else
2807  {
2808  GatherMergePath *gmpath = (GatherMergePath *) path;
2809 
2810  gmpath->subpath = (Path *)
2812  gmpath->subpath->parent,
2813  gmpath->subpath,
2814  target);
2815  }
2816  }
2817  else if (path->parallel_safe &&
2818  !is_parallel_safe(root, (Node *) target->exprs))
2819  {
2820  /*
2821  * We're inserting a parallel-restricted target list into a path
2822  * currently marked parallel-safe, so we have to mark it as no longer
2823  * safe.
2824  */
2825  path->parallel_safe = false;
2826  }
2827 
2828  return path;
2829 }
2830 
2831 /*
2832  * create_set_projection_path
2833  * Creates a pathnode that represents performing a projection that
2834  * includes set-returning functions.
2835  *
2836  * 'rel' is the parent relation associated with the result
2837  * 'subpath' is the path representing the source of data
2838  * 'target' is the PathTarget to be computed
2839  */
2842  RelOptInfo *rel,
2843  Path *subpath,
2844  PathTarget *target)
2845 {
2846  ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2847  double tlist_rows;
2848  ListCell *lc;
2849 
2850  pathnode->path.pathtype = T_ProjectSet;
2851  pathnode->path.parent = rel;
2852  pathnode->path.pathtarget = target;
2853  /* For now, assume we are above any joins, so no parameterization */
2854  pathnode->path.param_info = NULL;
2855  pathnode->path.parallel_aware = false;
2856  pathnode->path.parallel_safe = rel->consider_parallel &&
2857  subpath->parallel_safe &&
2858  is_parallel_safe(root, (Node *) target->exprs);
2859  pathnode->path.parallel_workers = subpath->parallel_workers;
2860  /* Projection does not change the sort order XXX? */
2861  pathnode->path.pathkeys = subpath->pathkeys;
2862 
2863  pathnode->subpath = subpath;
2864 
2865  /*
2866  * Estimate number of rows produced by SRFs for each row of input; if
2867  * there's more than one in this node, use the maximum.
2868  */
2869  tlist_rows = 1;
2870  foreach(lc, target->exprs)
2871  {
2872  Node *node = (Node *) lfirst(lc);
2873  double itemrows;
2874 
2875  itemrows = expression_returns_set_rows(root, node);
2876  if (tlist_rows < itemrows)
2877  tlist_rows = itemrows;
2878  }
2879 
2880  /*
2881  * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2882  * per input row, and half of cpu_tuple_cost for each added output row.
2883  * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2884  * this estimate later.
2885  */
2886  pathnode->path.rows = subpath->rows * tlist_rows;
2887  pathnode->path.startup_cost = subpath->startup_cost +
2888  target->cost.startup;
2889  pathnode->path.total_cost = subpath->total_cost +
2890  target->cost.startup +
2891  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2892  (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2893 
2894  return pathnode;
2895 }
2896 
2897 /*
2898  * create_incremental_sort_path
2899  * Creates a pathnode that represents performing an incremental sort.
2900  *
2901  * 'rel' is the parent relation associated with the result
2902  * 'subpath' is the path representing the source of data
2903  * 'pathkeys' represents the desired sort order
2904  * 'presorted_keys' is the number of keys by which the input path is
2905  * already sorted
2906  * 'limit_tuples' is the estimated bound on the number of output tuples,
2907  * or -1 if no LIMIT or couldn't estimate
2908  */
2911  RelOptInfo *rel,
2912  Path *subpath,
2913  List *pathkeys,
2914  int presorted_keys,
2915  double limit_tuples)
2916 {
2918  SortPath *pathnode = &sort->spath;
2919 
2920  pathnode->path.pathtype = T_IncrementalSort;
2921  pathnode->path.parent = rel;
2922  /* Sort doesn't project, so use source path's pathtarget */
2923  pathnode->path.pathtarget = subpath->pathtarget;
2924  /* For now, assume we are above any joins, so no parameterization */
2925  pathnode->path.param_info = NULL;
2926  pathnode->path.parallel_aware = false;
2927  pathnode->path.parallel_safe = rel->consider_parallel &&
2928  subpath->parallel_safe;
2929  pathnode->path.parallel_workers = subpath->parallel_workers;
2930  pathnode->path.pathkeys = pathkeys;
2931 
2932  pathnode->subpath = subpath;
2933 
2934  cost_incremental_sort(&pathnode->path,
2935  root, pathkeys, presorted_keys,
2936  subpath->startup_cost,
2937  subpath->total_cost,
2938  subpath->rows,
2939  subpath->pathtarget->width,
2940  0.0, /* XXX comparison_cost shouldn't be 0? */
2941  work_mem, limit_tuples);
2942 
2943  sort->nPresortedCols = presorted_keys;
2944 
2945  return sort;
2946 }
2947 
2948 /*
2949  * create_sort_path
2950  * Creates a pathnode that represents performing an explicit sort.
2951  *
2952  * 'rel' is the parent relation associated with the result
2953  * 'subpath' is the path representing the source of data
2954  * 'pathkeys' represents the desired sort order
2955  * 'limit_tuples' is the estimated bound on the number of output tuples,
2956  * or -1 if no LIMIT or couldn't estimate
2957  */
2958 SortPath *
2960  RelOptInfo *rel,
2961  Path *subpath,
2962  List *pathkeys,
2963  double limit_tuples)
2964 {
2965  SortPath *pathnode = makeNode(SortPath);
2966 
2967  pathnode->path.pathtype = T_Sort;
2968  pathnode->path.parent = rel;
2969  /* Sort doesn't project, so use source path's pathtarget */
2970  pathnode->path.pathtarget = subpath->pathtarget;
2971  /* For now, assume we are above any joins, so no parameterization */
2972  pathnode->path.param_info = NULL;
2973  pathnode->path.parallel_aware = false;
2974  pathnode->path.parallel_safe = rel->consider_parallel &&
2975  subpath->parallel_safe;
2976  pathnode->path.parallel_workers = subpath->parallel_workers;
2977  pathnode->path.pathkeys = pathkeys;
2978 
2979  pathnode->subpath = subpath;
2980 
2981  cost_sort(&pathnode->path, root, pathkeys,
2982  subpath->total_cost,
2983  subpath->rows,
2984  subpath->pathtarget->width,
2985  0.0, /* XXX comparison_cost shouldn't be 0? */
2986  work_mem, limit_tuples);
2987 
2988  return pathnode;
2989 }
2990 
2991 /*
2992  * create_group_path
2993  * Creates a pathnode that represents performing grouping of presorted input
2994  *
2995  * 'rel' is the parent relation associated with the result
2996  * 'subpath' is the path representing the source of data
2997  * 'target' is the PathTarget to be computed
2998  * 'groupClause' is a list of SortGroupClause's representing the grouping
2999  * 'qual' is the HAVING quals if any
3000  * 'numGroups' is the estimated number of groups
3001  */
3002 GroupPath *
3004  RelOptInfo *rel,
3005  Path *subpath,
3006  List *groupClause,
3007  List *qual,
3008  double numGroups)
3009 {
3010  GroupPath *pathnode = makeNode(GroupPath);
3011  PathTarget *target = rel->reltarget;
3012 
3013  pathnode->path.pathtype = T_Group;
3014  pathnode->path.parent = rel;
3015  pathnode->path.pathtarget = target;
3016  /* For now, assume we are above any joins, so no parameterization */
3017  pathnode->path.param_info = NULL;
3018  pathnode->path.parallel_aware = false;
3019  pathnode->path.parallel_safe = rel->consider_parallel &&
3020  subpath->parallel_safe;
3021  pathnode->path.parallel_workers = subpath->parallel_workers;
3022  /* Group doesn't change sort ordering */
3023  pathnode->path.pathkeys = subpath->pathkeys;
3024 
3025  pathnode->subpath = subpath;
3026 
3027  pathnode->groupClause = groupClause;
3028  pathnode->qual = qual;
3029 
3030  cost_group(&pathnode->path, root,
3031  list_length(groupClause),
3032  numGroups,
3033  qual,
3034  subpath->startup_cost, subpath->total_cost,
3035  subpath->rows);
3036 
3037  /* add tlist eval cost for each output row */
3038  pathnode->path.startup_cost += target->cost.startup;
3039  pathnode->path.total_cost += target->cost.startup +
3040  target->cost.per_tuple * pathnode->path.rows;
3041 
3042  return pathnode;
3043 }
3044 
3045 /*
3046  * create_upper_unique_path
3047  * Creates a pathnode that represents performing an explicit Unique step
3048  * on presorted input.
3049  *
3050  * This produces a Unique plan node, but the use-case is so different from
3051  * create_unique_path that it doesn't seem worth trying to merge the two.
3052  *
3053  * 'rel' is the parent relation associated with the result
3054  * 'subpath' is the path representing the source of data
3055  * 'numCols' is the number of grouping columns
3056  * 'numGroups' is the estimated number of groups
3057  *
3058  * The input path must be sorted on the grouping columns, plus possibly
3059  * additional columns; so the first numCols pathkeys are the grouping columns
3060  */
3063  RelOptInfo *rel,
3064  Path *subpath,
3065  int numCols,
3066  double numGroups)
3067 {
3069 
3070  pathnode->path.pathtype = T_Unique;
3071  pathnode->path.parent = rel;
3072  /* Unique doesn't project, so use source path's pathtarget */
3073  pathnode->path.pathtarget = subpath->pathtarget;
3074  /* For now, assume we are above any joins, so no parameterization */
3075  pathnode->path.param_info = NULL;
3076  pathnode->path.parallel_aware = false;
3077  pathnode->path.parallel_safe = rel->consider_parallel &&
3078  subpath->parallel_safe;
3079  pathnode->path.parallel_workers = subpath->parallel_workers;
3080  /* Unique doesn't change the input ordering */
3081  pathnode->path.pathkeys = subpath->pathkeys;
3082 
3083  pathnode->subpath = subpath;
3084  pathnode->numkeys = numCols;
3085 
3086  /*
3087  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3088  * all columns get compared at most of the tuples. (XXX probably this is
3089  * an overestimate.)
3090  */
3091  pathnode->path.startup_cost = subpath->startup_cost;
3092  pathnode->path.total_cost = subpath->total_cost +
3093  cpu_operator_cost * subpath->rows * numCols;
3094  pathnode->path.rows = numGroups;
3095 
3096  return pathnode;
3097 }
3098 
3099 /*
3100  * create_agg_path
3101  * Creates a pathnode that represents performing aggregation/grouping
3102  *
3103  * 'rel' is the parent relation associated with the result
3104  * 'subpath' is the path representing the source of data
3105  * 'target' is the PathTarget to be computed
3106  * 'aggstrategy' is the Agg node's basic implementation strategy
3107  * 'aggsplit' is the Agg node's aggregate-splitting mode
3108  * 'groupClause' is a list of SortGroupClause's representing the grouping
3109  * 'qual' is the HAVING quals if any
3110  * 'aggcosts' contains cost info about the aggregate functions to be computed
3111  * 'numGroups' is the estimated number of groups (1 if not grouping)
3112  */
3113 AggPath *
3115  RelOptInfo *rel,
3116  Path *subpath,
3117  PathTarget *target,
3118  AggStrategy aggstrategy,
3119  AggSplit aggsplit,
3120  List *groupClause,
3121  List *qual,
3122  const AggClauseCosts *aggcosts,
3123  double numGroups)
3124 {
3125  AggPath *pathnode = makeNode(AggPath);
3126 
3127  pathnode->path.pathtype = T_Agg;
3128  pathnode->path.parent = rel;
3129  pathnode->path.pathtarget = target;
3130  /* For now, assume we are above any joins, so no parameterization */
3131  pathnode->path.param_info = NULL;
3132  pathnode->path.parallel_aware = false;
3133  pathnode->path.parallel_safe = rel->consider_parallel &&
3134  subpath->parallel_safe;
3135  pathnode->path.parallel_workers = subpath->parallel_workers;
3136 
3137  if (aggstrategy == AGG_SORTED)
3138  {
3139  /*
3140  * Attempt to preserve the order of the subpath. Additional pathkeys
3141  * may have been added in adjust_group_pathkeys_for_groupagg() to
3142  * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3143  * belong to columns within the aggregate function, so we must strip
3144  * these additional pathkeys off as those columns are unavailable
3145  * above the aggregate node.
3146  */
3147  if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3148  pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3149  root->num_groupby_pathkeys);
3150  else
3151  pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3152  }
3153  else
3154  pathnode->path.pathkeys = NIL; /* output is unordered */
3155 
3156  pathnode->subpath = subpath;
3157 
3158  pathnode->aggstrategy = aggstrategy;
3159  pathnode->aggsplit = aggsplit;
3160  pathnode->numGroups = numGroups;
3161  pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3162  pathnode->groupClause = groupClause;
3163  pathnode->qual = qual;
3164 
3165  cost_agg(&pathnode->path, root,
3166  aggstrategy, aggcosts,
3167  list_length(groupClause), numGroups,
3168  qual,
3169  subpath->startup_cost, subpath->total_cost,
3170  subpath->rows, subpath->pathtarget->width);
3171 
3172  /* add tlist eval cost for each output row */
3173  pathnode->path.startup_cost += target->cost.startup;
3174  pathnode->path.total_cost += target->cost.startup +
3175  target->cost.per_tuple * pathnode->path.rows;
3176 
3177  return pathnode;
3178 }
3179 
3180 /*
3181  * create_groupingsets_path
3182  * Creates a pathnode that represents performing GROUPING SETS aggregation
3183  *
3184  * GroupingSetsPath represents sorted grouping with one or more grouping sets.
3185  * The input path's result must be sorted to match the last entry in
3186  * rollup_groupclauses.
3187  *
3188  * 'rel' is the parent relation associated with the result
3189  * 'subpath' is the path representing the source of data
3190  * 'target' is the PathTarget to be computed
3191  * 'having_qual' is the HAVING quals if any
3192  * 'rollups' is a list of RollupData nodes
3193  * 'agg_costs' contains cost info about the aggregate functions to be computed
3194  */
3197  RelOptInfo *rel,
3198  Path *subpath,
3199  List *having_qual,
3200  AggStrategy aggstrategy,
3201  List *rollups,
3202  const AggClauseCosts *agg_costs)
3203 {
3205  PathTarget *target = rel->reltarget;
3206  ListCell *lc;
3207  bool is_first = true;
3208  bool is_first_sort = true;
3209 
3210  /* The topmost generated Plan node will be an Agg */
3211  pathnode->path.pathtype = T_Agg;
3212  pathnode->path.parent = rel;
3213  pathnode->path.pathtarget = target;
3214  pathnode->path.param_info = subpath->param_info;
3215  pathnode->path.parallel_aware = false;
3216  pathnode->path.parallel_safe = rel->consider_parallel &&
3217  subpath->parallel_safe;
3218  pathnode->path.parallel_workers = subpath->parallel_workers;
3219  pathnode->subpath = subpath;
3220 
3221  /*
3222  * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3223  * to AGG_HASHED, here if possible.
3224  */
3225  if (aggstrategy == AGG_SORTED &&
3226  list_length(rollups) == 1 &&
3227  ((RollupData *) linitial(rollups))->groupClause == NIL)
3228  aggstrategy = AGG_PLAIN;
3229 
3230  if (aggstrategy == AGG_MIXED &&
3231  list_length(rollups) == 1)
3232  aggstrategy = AGG_HASHED;
3233 
3234  /*
3235  * Output will be in sorted order by group_pathkeys if, and only if, there
3236  * is a single rollup operation on a non-empty list of grouping
3237  * expressions.
3238  */
3239  if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3240  pathnode->path.pathkeys = root->group_pathkeys;
3241  else
3242  pathnode->path.pathkeys = NIL;
3243 
3244  pathnode->aggstrategy = aggstrategy;
3245  pathnode->rollups = rollups;
3246  pathnode->qual = having_qual;
3247  pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3248 
3249  Assert(rollups != NIL);
3250  Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3251  Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3252 
3253  foreach(lc, rollups)
3254  {
3255  RollupData *rollup = lfirst(lc);
3256  List *gsets = rollup->gsets;
3257  int numGroupCols = list_length(linitial(gsets));
3258 
3259  /*
3260  * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3261  * (already-sorted) input, and following ones do their own sort.
3262  *
3263  * In AGG_HASHED mode, there is one rollup for each grouping set.
3264  *
3265  * In AGG_MIXED mode, the first rollups are hashed, the first
3266  * non-hashed one takes the (already-sorted) input, and following ones
3267  * do their own sort.
3268  */
3269  if (is_first)
3270  {
3271  cost_agg(&pathnode->path, root,
3272  aggstrategy,
3273  agg_costs,
3274  numGroupCols,
3275  rollup->numGroups,
3276  having_qual,
3277  subpath->startup_cost,
3278  subpath->total_cost,
3279  subpath->rows,
3280  subpath->pathtarget->width);
3281  is_first = false;
3282  if (!rollup->is_hashed)
3283  is_first_sort = false;
3284  }
3285  else
3286  {
3287  Path sort_path; /* dummy for result of cost_sort */
3288  Path agg_path; /* dummy for result of cost_agg */
3289 
3290  if (rollup->is_hashed || is_first_sort)
3291  {
3292  /*
3293  * Account for cost of aggregation, but don't charge input
3294  * cost again
3295  */
3296  cost_agg(&agg_path, root,
3297  rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3298  agg_costs,
3299  numGroupCols,
3300  rollup->numGroups,
3301  having_qual,
3302  0.0, 0.0,
3303  subpath->rows,
3304  subpath->pathtarget->width);
3305  if (!rollup->is_hashed)
3306  is_first_sort = false;
3307  }
3308  else
3309  {
3310  /* Account for cost of sort, but don't charge input cost again */
3311  cost_sort(&sort_path, root, NIL,
3312  0.0,
3313  subpath->rows,
3314  subpath->pathtarget->width,
3315  0.0,
3316  work_mem,
3317  -1.0);
3318 
3319  /* Account for cost of aggregation */
3320 
3321  cost_agg(&agg_path, root,
3322  AGG_SORTED,
3323  agg_costs,
3324  numGroupCols,
3325  rollup->numGroups,
3326  having_qual,
3327  sort_path.startup_cost,
3328  sort_path.total_cost,
3329  sort_path.rows,
3330  subpath->pathtarget->width);
3331  }
3332 
3333  pathnode->path.total_cost += agg_path.total_cost;
3334  pathnode->path.rows += agg_path.rows;
3335  }
3336  }
3337 
3338  /* add tlist eval cost for each output row */
3339  pathnode->path.startup_cost += target->cost.startup;
3340  pathnode->path.total_cost += target->cost.startup +
3341  target->cost.per_tuple * pathnode->path.rows;
3342 
3343  return pathnode;
3344 }
3345 
3346 /*
3347  * create_minmaxagg_path
3348  * Creates a pathnode that represents computation of MIN/MAX aggregates
3349  *
3350  * 'rel' is the parent relation associated with the result
3351  * 'target' is the PathTarget to be computed
3352  * 'mmaggregates' is a list of MinMaxAggInfo structs
3353  * 'quals' is the HAVING quals if any
3354  */
3355 MinMaxAggPath *
3357  RelOptInfo *rel,
3358  PathTarget *target,
3359  List *mmaggregates,
3360  List *quals)
3361 {
3362  MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3363  Cost initplan_cost;
3364  ListCell *lc;
3365 
3366  /* The topmost generated Plan node will be a Result */
3367  pathnode->path.pathtype = T_Result;
3368  pathnode->path.parent = rel;
3369  pathnode->path.pathtarget = target;
3370  /* For now, assume we are above any joins, so no parameterization */
3371  pathnode->path.param_info = NULL;
3372  pathnode->path.parallel_aware = false;
3373  pathnode->path.parallel_safe = true; /* might change below */
3374  pathnode->path.parallel_workers = 0;
3375  /* Result is one unordered row */
3376  pathnode->path.rows = 1;
3377  pathnode->path.pathkeys = NIL;
3378 
3379  pathnode->mmaggregates = mmaggregates;
3380  pathnode->quals = quals;
3381 
3382  /* Calculate cost of all the initplans, and check parallel safety */
3383  initplan_cost = 0;
3384  foreach(lc, mmaggregates)
3385  {
3386  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3387 
3388  initplan_cost += mminfo->pathcost;
3389  if (!mminfo->path->parallel_safe)
3390  pathnode->path.parallel_safe = false;
3391  }
3392 
3393  /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3394  pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3395  pathnode->path.total_cost = initplan_cost + target->cost.startup +
3396  target->cost.per_tuple + cpu_tuple_cost;
3397 
3398  /*
3399  * Add cost of qual, if any --- but we ignore its selectivity, since our
3400  * rowcount estimate should be 1 no matter what the qual is.
3401  */
3402  if (quals)
3403  {
3404  QualCost qual_cost;
3405 
3406  cost_qual_eval(&qual_cost, quals, root);
3407  pathnode->path.startup_cost += qual_cost.startup;
3408  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3409  }
3410 
3411  /*
3412  * If the initplans were all parallel-safe, also check safety of the
3413  * target and quals. (The Result node itself isn't parallelizable, but if
3414  * we are in a subquery then it can be useful for the outer query to know
3415  * that this one is parallel-safe.)
3416  */
3417  if (pathnode->path.parallel_safe)
3418  pathnode->path.parallel_safe =
3419  is_parallel_safe(root, (Node *) target->exprs) &&
3420  is_parallel_safe(root, (Node *) quals);
3421 
3422  return pathnode;
3423 }
3424 
3425 /*
3426  * create_windowagg_path
3427  * Creates a pathnode that represents computation of window functions
3428  *
3429  * 'rel' is the parent relation associated with the result
3430  * 'subpath' is the path representing the source of data
3431  * 'target' is the PathTarget to be computed
3432  * 'windowFuncs' is a list of WindowFunc structs
3433  * 'winclause' is a WindowClause that is common to all the WindowFuncs
3434  * 'qual' WindowClause.runconditions from lower-level WindowAggPaths.
3435  * Must always be NIL when topwindow == false
3436  * 'topwindow' pass as true only for the top-level WindowAgg. False for all
3437  * intermediate WindowAggs.
3438  *
3439  * The input must be sorted according to the WindowClause's PARTITION keys
3440  * plus ORDER BY keys.
3441  */
3442 WindowAggPath *
3444  RelOptInfo *rel,
3445  Path *subpath,
3446  PathTarget *target,
3447  List *windowFuncs,
3448  WindowClause *winclause,
3449  List *qual,
3450  bool topwindow)
3451 {
3452  WindowAggPath *pathnode = makeNode(WindowAggPath);
3453 
3454  /* qual can only be set for the topwindow */
3455  Assert(qual == NIL || topwindow);
3456 
3457  pathnode->path.pathtype = T_WindowAgg;
3458  pathnode->path.parent = rel;
3459  pathnode->path.pathtarget = target;
3460  /* For now, assume we are above any joins, so no parameterization */
3461  pathnode->path.param_info = NULL;
3462  pathnode->path.parallel_aware = false;
3463  pathnode->path.parallel_safe = rel->consider_parallel &&
3464  subpath->parallel_safe;
3465  pathnode->path.parallel_workers = subpath->parallel_workers;
3466  /* WindowAgg preserves the input sort order */
3467  pathnode->path.pathkeys = subpath->pathkeys;
3468 
3469  pathnode->subpath = subpath;
3470  pathnode->winclause = winclause;
3471  pathnode->qual = qual;
3472  pathnode->topwindow = topwindow;
3473 
3474  /*
3475  * For costing purposes, assume that there are no redundant partitioning
3476  * or ordering columns; it's not worth the trouble to deal with that
3477  * corner case here. So we just pass the unmodified list lengths to
3478  * cost_windowagg.
3479  */
3480  cost_windowagg(&pathnode->path, root,
3481  windowFuncs,
3482  winclause,
3483  subpath->startup_cost,
3484  subpath->total_cost,
3485  subpath->rows);
3486 
3487  /* add tlist eval cost for each output row */
3488  pathnode->path.startup_cost += target->cost.startup;
3489  pathnode->path.total_cost += target->cost.startup +
3490  target->cost.per_tuple * pathnode->path.rows;
3491 
3492  return pathnode;
3493 }
3494 
3495 /*
3496  * create_setop_path
3497  * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3498  *
3499  * 'rel' is the parent relation associated with the result
3500  * 'subpath' is the path representing the source of data
3501  * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3502  * 'strategy' is the implementation strategy (sorted or hashed)
3503  * 'distinctList' is a list of SortGroupClause's representing the grouping
3504  * 'flagColIdx' is the column number where the flag column will be, if any
3505  * 'firstFlag' is the flag value for the first input relation when hashing;
3506  * or -1 when sorting
3507  * 'numGroups' is the estimated number of distinct groups
3508  * 'outputRows' is the estimated number of output rows
3509  */
3510 SetOpPath *
3512  RelOptInfo *rel,
3513  Path *subpath,
3514  SetOpCmd cmd,
3515  SetOpStrategy strategy,
3516  List *distinctList,
3517  AttrNumber flagColIdx,
3518  int firstFlag,
3519  double numGroups,
3520  double outputRows)
3521 {
3522  SetOpPath *pathnode = makeNode(SetOpPath);
3523 
3524  pathnode->path.pathtype = T_SetOp;
3525  pathnode->path.parent = rel;
3526  /* SetOp doesn't project, so use source path's pathtarget */
3527  pathnode->path.pathtarget = subpath->pathtarget;
3528  /* For now, assume we are above any joins, so no parameterization */
3529  pathnode->path.param_info = NULL;
3530  pathnode->path.parallel_aware = false;
3531  pathnode->path.parallel_safe = rel->consider_parallel &&
3532  subpath->parallel_safe;
3533  pathnode->path.parallel_workers = subpath->parallel_workers;
3534  /* SetOp preserves the input sort order if in sort mode */
3535  pathnode->path.pathkeys =
3536  (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3537 
3538  pathnode->subpath = subpath;
3539  pathnode->cmd = cmd;
3540  pathnode->strategy = strategy;
3541  pathnode->distinctList = distinctList;
3542  pathnode->flagColIdx = flagColIdx;
3543  pathnode->firstFlag = firstFlag;
3544  pathnode->numGroups = numGroups;
3545 
3546  /*
3547  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3548  * all columns get compared at most of the tuples.
3549  */
3550  pathnode->path.startup_cost = subpath->startup_cost;
3551  pathnode->path.total_cost = subpath->total_cost +
3552  cpu_operator_cost * subpath->rows * list_length(distinctList);
3553  pathnode->path.rows = outputRows;
3554 
3555  return pathnode;
3556 }
3557 
3558 /*
3559  * create_recursiveunion_path
3560  * Creates a pathnode that represents a recursive UNION node
3561  *
3562  * 'rel' is the parent relation associated with the result
3563  * 'leftpath' is the source of data for the non-recursive term
3564  * 'rightpath' is the source of data for the recursive term
3565  * 'target' is the PathTarget to be computed
3566  * 'distinctList' is a list of SortGroupClause's representing the grouping
3567  * 'wtParam' is the ID of Param representing work table
3568  * 'numGroups' is the estimated number of groups
3569  *
3570  * For recursive UNION ALL, distinctList is empty and numGroups is zero
3571  */
3574  RelOptInfo *rel,
3575  Path *leftpath,
3576  Path *rightpath,
3577  PathTarget *target,
3578  List *distinctList,
3579  int wtParam,
3580  double numGroups)
3581 {
3583 
3584  pathnode->path.pathtype = T_RecursiveUnion;
3585  pathnode->path.parent = rel;
3586  pathnode->path.pathtarget = target;
3587  /* For now, assume we are above any joins, so no parameterization */
3588  pathnode->path.param_info = NULL;
3589  pathnode->path.parallel_aware = false;
3590  pathnode->path.parallel_safe = rel->consider_parallel &&
3591  leftpath->parallel_safe && rightpath->parallel_safe;
3592  /* Foolish, but we'll do it like joins for now: */
3593  pathnode->path.parallel_workers = leftpath->parallel_workers;
3594  /* RecursiveUnion result is always unsorted */
3595  pathnode->path.pathkeys = NIL;
3596 
3597  pathnode->leftpath = leftpath;
3598  pathnode->rightpath = rightpath;
3599  pathnode->distinctList = distinctList;
3600  pathnode->wtParam = wtParam;
3601  pathnode->numGroups = numGroups;
3602 
3603  cost_recursive_union(&pathnode->path, leftpath, rightpath);
3604 
3605  return pathnode;
3606 }
3607 
3608 /*
3609  * create_lockrows_path
3610  * Creates a pathnode that represents acquiring row locks
3611  *
3612  * 'rel' is the parent relation associated with the result
3613  * 'subpath' is the path representing the source of data
3614  * 'rowMarks' is a list of PlanRowMark's
3615  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3616  */
3617 LockRowsPath *
3619  Path *subpath, List *rowMarks, int epqParam)
3620 {
3621  LockRowsPath *pathnode = makeNode(LockRowsPath);
3622 
3623  pathnode->path.pathtype = T_LockRows;
3624  pathnode->path.parent = rel;
3625  /* LockRows doesn't project, so use source path's pathtarget */
3626  pathnode->path.pathtarget = subpath->pathtarget;
3627  /* For now, assume we are above any joins, so no parameterization */
3628  pathnode->path.param_info = NULL;
3629  pathnode->path.parallel_aware = false;
3630  pathnode->path.parallel_safe = false;
3631  pathnode->path.parallel_workers = 0;
3632  pathnode->path.rows = subpath->rows;
3633 
3634  /*
3635  * The result cannot be assumed sorted, since locking might cause the sort
3636  * key columns to be replaced with new values.
3637  */
3638  pathnode->path.pathkeys = NIL;
3639 
3640  pathnode->subpath = subpath;
3641  pathnode->rowMarks = rowMarks;
3642  pathnode->epqParam = epqParam;
3643 
3644  /*
3645  * We should charge something extra for the costs of row locking and
3646  * possible refetches, but it's hard to say how much. For now, use
3647  * cpu_tuple_cost per row.
3648  */
3649  pathnode->path.startup_cost = subpath->startup_cost;
3650  pathnode->path.total_cost = subpath->total_cost +
3651  cpu_tuple_cost * subpath->rows;
3652 
3653  return pathnode;
3654 }
3655 
3656 /*
3657  * create_modifytable_path
3658  * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE
3659  * mods
3660  *
3661  * 'rel' is the parent relation associated with the result
3662  * 'subpath' is a Path producing source data
3663  * 'operation' is the operation type
3664  * 'canSetTag' is true if we set the command tag/es_processed
3665  * 'nominalRelation' is the parent RT index for use of EXPLAIN
3666  * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none
3667  * 'partColsUpdated' is true if any partitioning columns are being updated,
3668  * either from the target relation or a descendent partitioned table.
3669  * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3670  * 'updateColnosLists' is a list of UPDATE target column number lists
3671  * (one sublist per rel); or NIL if not an UPDATE
3672  * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3673  * 'returningLists' is a list of RETURNING tlists (one per rel)
3674  * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3675  * 'onconflict' is the ON CONFLICT clause, or NULL
3676  * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3677  * 'mergeActionLists' is a list of lists of MERGE actions (one per rel)
3678  */
3681  Path *subpath,
3682  CmdType operation, bool canSetTag,
3683  Index nominalRelation, Index rootRelation,
3684  bool partColsUpdated,
3685  List *resultRelations,
3686  List *updateColnosLists,
3687  List *withCheckOptionLists, List *returningLists,
3688  List *rowMarks, OnConflictExpr *onconflict,
3689  List *mergeActionLists, int epqParam)
3690 {
3692 
3693  Assert(operation == CMD_MERGE ||
3694  (operation == CMD_UPDATE ?
3695  list_length(resultRelations) == list_length(updateColnosLists) :
3696  updateColnosLists == NIL));
3697  Assert(withCheckOptionLists == NIL ||
3698  list_length(resultRelations) == list_length(withCheckOptionLists));
3699  Assert(returningLists == NIL ||
3700  list_length(resultRelations) == list_length(returningLists));
3701 
3702  pathnode->path.pathtype = T_ModifyTable;
3703  pathnode->path.parent = rel;
3704  /* pathtarget is not interesting, just make it minimally valid */
3705  pathnode->path.pathtarget = rel->reltarget;
3706  /* For now, assume we are above any joins, so no parameterization */
3707  pathnode->path.param_info = NULL;
3708  pathnode->path.parallel_aware = false;
3709  pathnode->path.parallel_safe = false;
3710  pathnode->path.parallel_workers = 0;
3711  pathnode->path.pathkeys = NIL;
3712 
3713  /*
3714  * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3715  *
3716  * Currently, we don't charge anything extra for the actual table
3717  * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3718  * expressions if any. It would only be window dressing, since
3719  * ModifyTable is always a top-level node and there is no way for the
3720  * costs to change any higher-level planning choices. But we might want
3721  * to make it look better sometime.
3722  */
3723  pathnode->path.startup_cost = subpath->startup_cost;
3724  pathnode->path.total_cost = subpath->total_cost;
3725  if (returningLists != NIL)
3726  {
3727  pathnode->path.rows = subpath->rows;
3728 
3729  /*
3730  * Set width to match the subpath output. XXX this is totally wrong:
3731  * we should return an average of the RETURNING tlist widths. But
3732  * it's what happened historically, and improving it is a task for
3733  * another day. (Again, it's mostly window dressing.)
3734  */
3735  pathnode->path.pathtarget->width = subpath->pathtarget->width;
3736  }
3737  else
3738  {
3739  pathnode->path.rows = 0;
3740  pathnode->path.pathtarget->width = 0;
3741  }
3742 
3743  pathnode->subpath = subpath;
3744  pathnode->operation = operation;
3745  pathnode->canSetTag = canSetTag;
3746  pathnode->nominalRelation = nominalRelation;
3747  pathnode->rootRelation = rootRelation;
3748  pathnode->partColsUpdated = partColsUpdated;
3749  pathnode->resultRelations = resultRelations;
3750  pathnode->updateColnosLists = updateColnosLists;
3751  pathnode->withCheckOptionLists = withCheckOptionLists;
3752  pathnode->returningLists = returningLists;
3753  pathnode->rowMarks = rowMarks;
3754  pathnode->onconflict = onconflict;
3755  pathnode->epqParam = epqParam;
3756  pathnode->mergeActionLists = mergeActionLists;
3757 
3758  return pathnode;
3759 }
3760 
3761 /*
3762  * create_limit_path
3763  * Creates a pathnode that represents performing LIMIT/OFFSET
3764  *
3765  * In addition to providing the actual OFFSET and LIMIT expressions,
3766  * the caller must provide estimates of their values for costing purposes.
3767  * The estimates are as computed by preprocess_limit(), ie, 0 represents
3768  * the clause not being present, and -1 means it's present but we could
3769  * not estimate its value.
3770  *
3771  * 'rel' is the parent relation associated with the result
3772  * 'subpath' is the path representing the source of data
3773  * 'limitOffset' is the actual OFFSET expression, or NULL
3774  * 'limitCount' is the actual LIMIT expression, or NULL
3775  * 'offset_est' is the estimated value of the OFFSET expression
3776  * 'count_est' is the estimated value of the LIMIT expression
3777  */
3778 LimitPath *
3780  Path *subpath,
3781  Node *limitOffset, Node *limitCount,
3782  LimitOption limitOption,
3783  int64 offset_est, int64 count_est)
3784 {
3785  LimitPath *pathnode = makeNode(LimitPath);
3786 
3787  pathnode->path.pathtype = T_Limit;
3788  pathnode->path.parent = rel;
3789  /* Limit doesn't project, so use source path's pathtarget */
3790  pathnode->path.pathtarget = subpath->pathtarget;
3791  /* For now, assume we are above any joins, so no parameterization */
3792  pathnode->path.param_info = NULL;
3793  pathnode->path.parallel_aware = false;
3794  pathnode->path.parallel_safe = rel->consider_parallel &&
3795  subpath->parallel_safe;
3796  pathnode->path.parallel_workers = subpath->parallel_workers;
3797  pathnode->path.rows = subpath->rows;
3798  pathnode->path.startup_cost = subpath->startup_cost;
3799  pathnode->path.total_cost = subpath->total_cost;
3800  pathnode->path.pathkeys = subpath->pathkeys;
3801  pathnode->subpath = subpath;
3802  pathnode->limitOffset = limitOffset;
3803  pathnode->limitCount = limitCount;
3804  pathnode->limitOption = limitOption;
3805 
3806  /*
3807  * Adjust the output rows count and costs according to the offset/limit.
3808  */
3809  adjust_limit_rows_costs(&pathnode->path.rows,
3810  &pathnode->path.startup_cost,
3811  &pathnode->path.total_cost,
3812  offset_est, count_est);
3813 
3814  return pathnode;
3815 }
3816 
3817 /*
3818  * adjust_limit_rows_costs
3819  * Adjust the size and cost estimates for a LimitPath node according to the
3820  * offset/limit.
3821  *
3822  * This is only a cosmetic issue if we are at top level, but if we are
3823  * building a subquery then it's important to report correct info to the outer
3824  * planner.
3825  *
3826  * When the offset or count couldn't be estimated, use 10% of the estimated
3827  * number of rows emitted from the subpath.
3828  *
3829  * XXX we don't bother to add eval costs of the offset/limit expressions
3830  * themselves to the path costs. In theory we should, but in most cases those
3831  * expressions are trivial and it's just not worth the trouble.
3832  */
3833 void
3834 adjust_limit_rows_costs(double *rows, /* in/out parameter */
3835  Cost *startup_cost, /* in/out parameter */
3836  Cost *total_cost, /* in/out parameter */
3837  int64 offset_est,
3838  int64 count_est)
3839 {
3840  double input_rows = *rows;
3841  Cost input_startup_cost = *startup_cost;
3842  Cost input_total_cost = *total_cost;
3843 
3844  if (offset_est != 0)
3845  {
3846  double offset_rows;
3847 
3848  if (offset_est > 0)
3849  offset_rows = (double) offset_est;
3850  else
3851  offset_rows = clamp_row_est(input_rows * 0.10);
3852  if (offset_rows > *rows)
3853  offset_rows = *rows;
3854  if (input_rows > 0)
3855  *startup_cost +=
3856  (input_total_cost - input_startup_cost)
3857  * offset_rows / input_rows;
3858  *rows -= offset_rows;
3859  if (*rows < 1)
3860  *rows = 1;
3861  }
3862 
3863  if (count_est != 0)
3864  {
3865  double count_rows;
3866 
3867  if (count_est > 0)
3868  count_rows = (double) count_est;
3869  else
3870  count_rows = clamp_row_est(input_rows * 0.10);
3871  if (count_rows > *rows)
3872  count_rows = *rows;
3873  if (input_rows > 0)
3874  *total_cost = *startup_cost +
3875  (input_total_cost - input_startup_cost)
3876  * count_rows / input_rows;
3877  *rows = count_rows;
3878  if (*rows < 1)
3879  *rows = 1;
3880  }
3881 }
3882 
3883 
3884 /*
3885  * reparameterize_path
3886  * Attempt to modify a Path to have greater parameterization
3887  *
3888  * We use this to attempt to bring all child paths of an appendrel to the
3889  * same parameterization level, ensuring that they all enforce the same set
3890  * of join quals (and thus that that parameterization can be attributed to
3891  * an append path built from such paths). Currently, only a few path types
3892  * are supported here, though more could be added at need. We return NULL
3893  * if we can't reparameterize the given path.
3894  *
3895  * Note: we intentionally do not pass created paths to add_path(); it would
3896  * possibly try to delete them on the grounds of being cost-inferior to the
3897  * paths they were made from, and we don't want that. Paths made here are
3898  * not necessarily of general-purpose usefulness, but they can be useful
3899  * as members of an append path.
3900  */
3901 Path *
3903  Relids required_outer,
3904  double loop_count)
3905 {
3906  RelOptInfo *rel = path->parent;
3907 
3908  /* Can only increase, not decrease, path's parameterization */
3909  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3910  return NULL;
3911  switch (path->pathtype)
3912  {
3913  case T_SeqScan:
3914  return create_seqscan_path(root, rel, required_outer, 0);
3915  case T_SampleScan:
3916  return (Path *) create_samplescan_path(root, rel, required_outer);
3917  case T_IndexScan:
3918  case T_IndexOnlyScan:
3919  {
3920  IndexPath *ipath = (IndexPath *) path;
3921  IndexPath *newpath = makeNode(IndexPath);
3922 
3923  /*
3924  * We can't use create_index_path directly, and would not want
3925  * to because it would re-compute the indexqual conditions
3926  * which is wasted effort. Instead we hack things a bit:
3927  * flat-copy the path node, revise its param_info, and redo
3928  * the cost estimate.
3929  */
3930  memcpy(newpath, ipath, sizeof(IndexPath));
3931  newpath->path.param_info =
3932  get_baserel_parampathinfo(root, rel, required_outer);
3933  cost_index(newpath, root, loop_count, false);
3934  return (Path *) newpath;
3935  }
3936  case T_BitmapHeapScan:
3937  {
3938  BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3939 
3940  return (Path *) create_bitmap_heap_path(root,
3941  rel,
3942  bpath->bitmapqual,
3943  required_outer,
3944  loop_count, 0);
3945  }
3946  case T_SubqueryScan:
3947  {
3948  SubqueryScanPath *spath = (SubqueryScanPath *) path;
3949  Path *subpath = spath->subpath;
3950  bool trivial_pathtarget;
3951 
3952  /*
3953  * If existing node has zero extra cost, we must have decided
3954  * its target is trivial. (The converse is not true, because
3955  * it might have a trivial target but quals to enforce; but in
3956  * that case the new node will too, so it doesn't matter
3957  * whether we get the right answer here.)
3958  */
3959  trivial_pathtarget =
3960  (subpath->total_cost == spath->path.total_cost);
3961 
3962  return (Path *) create_subqueryscan_path(root,
3963  rel,
3964  subpath,
3965  trivial_pathtarget,
3966  spath->path.pathkeys,
3967  required_outer);
3968  }
3969  case T_Result:
3970  /* Supported only for RTE_RESULT scan paths */
3971  if (IsA(path, Path))
3972  return create_resultscan_path(root, rel, required_outer);
3973  break;
3974  case T_Append:
3975  {
3976  AppendPath *apath = (AppendPath *) path;
3977  List *childpaths = NIL;
3978  List *partialpaths = NIL;
3979  int i;
3980  ListCell *lc;
3981 
3982  /* Reparameterize the children */
3983  i = 0;
3984  foreach(lc, apath->subpaths)
3985  {
3986  Path *spath = (Path *) lfirst(lc);
3987 
3988  spath = reparameterize_path(root, spath,
3989  required_outer,
3990  loop_count);
3991  if (spath == NULL)
3992  return NULL;
3993  /* We have to re-split the regular and partial paths */
3994  if (i < apath->first_partial_path)
3995  childpaths = lappend(childpaths, spath);
3996  else
3997  partialpaths = lappend(partialpaths, spath);
3998  i++;
3999  }
4000  return (Path *)
4001  create_append_path(root, rel, childpaths, partialpaths,
4002  apath->path.pathkeys, required_outer,
4003  apath->path.parallel_workers,
4004  apath->path.parallel_aware,
4005  -1);
4006  }
4007  case T_Material:
4008  {
4009  MaterialPath *mpath = (MaterialPath *) path;
4010  Path *spath = mpath->subpath;
4011 
4012  spath = reparameterize_path(root, spath,
4013  required_outer,
4014  loop_count);
4015  if (spath == NULL)
4016  return NULL;
4017  return (Path *) create_material_path(rel, spath);
4018  }
4019  case T_Memoize:
4020  {
4021  MemoizePath *mpath = (MemoizePath *) path;
4022  Path *spath = mpath->subpath;
4023 
4024  spath = reparameterize_path(root, spath,
4025  required_outer,
4026  loop_count);
4027  if (spath == NULL)
4028  return NULL;
4029  return (Path *) create_memoize_path(root, rel,
4030  spath,
4031  mpath->param_exprs,
4032  mpath->hash_operators,
4033  mpath->singlerow,
4034  mpath->binary_mode,
4035  mpath->calls);
4036  }
4037  default:
4038  break;
4039  }
4040  return NULL;
4041 }
4042 
4043 /*
4044  * reparameterize_path_by_child
4045  * Given a path parameterized by the parent of the given child relation,
4046  * translate the path to be parameterized by the given child relation.
4047  *
4048  * The function creates a new path of the same type as the given path, but
4049  * parameterized by the given child relation. Most fields from the original
4050  * path can simply be flat-copied, but any expressions must be adjusted to
4051  * refer to the correct varnos, and any paths must be recursively
4052  * reparameterized. Other fields that refer to specific relids also need
4053  * adjustment.
4054  *
4055  * The cost, number of rows, width and parallel path properties depend upon
4056  * path->parent, which does not change during the translation. Hence those
4057  * members are copied as they are.
4058  *
4059  * Currently, only a few path types are supported here, though more could be
4060  * added at need. We return NULL if we can't reparameterize the given path.
4061  */
4062 Path *
4064  RelOptInfo *child_rel)
4065 {
4066 
4067 #define FLAT_COPY_PATH(newnode, node, nodetype) \
4068  ( (newnode) = makeNode(nodetype), \
4069  memcpy((newnode), (node), sizeof(nodetype)) )
4070 
4071 #define ADJUST_CHILD_ATTRS(node) \
4072  ((node) = \
4073  (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \
4074  child_rel, \
4075  child_rel->top_parent))
4076 
4077 #define REPARAMETERIZE_CHILD_PATH(path) \
4078 do { \
4079  (path) = reparameterize_path_by_child(root, (path), child_rel); \
4080  if ((path) == NULL) \
4081  return NULL; \
4082 } while(0)
4083 
4084 #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4085 do { \
4086  if ((pathlist) != NIL) \
4087  { \
4088  (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4089  child_rel); \
4090  if ((pathlist) == NIL) \
4091  return NULL; \
4092  } \
4093 } while(0)
4094 
4095  Path *new_path;
4096  ParamPathInfo *new_ppi;
4097  ParamPathInfo *old_ppi;
4098  Relids required_outer;
4099 
4100  /*
4101  * If the path is not parameterized by parent of the given relation, it
4102  * doesn't need reparameterization.
4103  */
4104  if (!path->param_info ||
4105  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4106  return path;
4107 
4108  /*
4109  * If possible, reparameterize the given path, making a copy.
4110  *
4111  * This function is currently only applied to the inner side of a nestloop
4112  * join that is being partitioned by the partitionwise-join code. Hence,
4113  * we need only support path types that plausibly arise in that context.
4114  * (In particular, supporting sorted path types would be a waste of code
4115  * and cycles: even if we translated them here, they'd just lose in
4116  * subsequent cost comparisons.) If we do see an unsupported path type,
4117  * that just means we won't be able to generate a partitionwise-join plan
4118  * using that path type.
4119  */
4120  switch (nodeTag(path))
4121  {
4122  case T_Path:
4123  FLAT_COPY_PATH(new_path, path, Path);
4124  break;
4125 
4126  case T_IndexPath:
4127  {
4128  IndexPath *ipath;
4129 
4130  FLAT_COPY_PATH(ipath, path, IndexPath);
4132  new_path = (Path *) ipath;
4133  }
4134  break;
4135 
4136  case T_BitmapHeapPath:
4137  {
4138  BitmapHeapPath *bhpath;
4139 
4140  FLAT_COPY_PATH(bhpath, path, BitmapHeapPath);
4142  new_path = (Path *) bhpath;
4143  }
4144  break;
4145 
4146  case T_BitmapAndPath:
4147  {
4148  BitmapAndPath *bapath;
4149 
4150  FLAT_COPY_PATH(bapath, path, BitmapAndPath);
4152  new_path = (Path *) bapath;
4153  }
4154  break;
4155 
4156  case T_BitmapOrPath:
4157  {
4158  BitmapOrPath *bopath;
4159 
4160  FLAT_COPY_PATH(bopath, path, BitmapOrPath);
4162  new_path = (Path *) bopath;
4163  }
4164  break;
4165 
4166  case T_ForeignPath:
4167  {
4168  ForeignPath *fpath;
4170 
4171  FLAT_COPY_PATH(fpath, path, ForeignPath);
4172  if (fpath->fdw_outerpath)
4174  if (fpath->fdw_restrictinfo)
4176 
4177  /* Hand over to FDW if needed. */
4178  rfpc_func =
4179  path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4180  if (rfpc_func)
4181  fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4182  child_rel);
4183  new_path = (Path *) fpath;
4184  }
4185  break;
4186 
4187  case T_CustomPath:
4188  {
4189  CustomPath *cpath;
4190 
4191  FLAT_COPY_PATH(cpath, path, CustomPath);
4193  if (cpath->custom_restrictinfo)
4195  if (cpath->methods &&
4197  cpath->custom_private =
4199  cpath->custom_private,
4200  child_rel);
4201  new_path = (Path *) cpath;
4202  }
4203  break;
4204 
4205  case T_NestPath:
4206  {
4207  JoinPath *jpath;
4208  NestPath *npath;
4209 
4210  FLAT_COPY_PATH(npath, path, NestPath);
4211 
4212  jpath = (JoinPath *) npath;
4216  new_path = (Path *) npath;
4217  }
4218  break;
4219 
4220  case T_MergePath:
4221  {
4222  JoinPath *jpath;
4223  MergePath *mpath;
4224 
4225  FLAT_COPY_PATH(mpath, path, MergePath);
4226 
4227  jpath = (JoinPath *) mpath;
4232  new_path = (Path *) mpath;
4233  }
4234  break;
4235 
4236  case T_HashPath:
4237  {
4238  JoinPath *jpath;
4239  HashPath *hpath;
4240 
4241  FLAT_COPY_PATH(hpath, path, HashPath);
4242 
4243  jpath = (JoinPath *) hpath;
4248  new_path = (Path *) hpath;
4249  }
4250  break;
4251 
4252  case T_AppendPath:
4253  {
4254  AppendPath *apath;
4255 
4256  FLAT_COPY_PATH(apath, path, AppendPath);
4258  new_path = (Path *) apath;
4259  }
4260  break;
4261 
4262  case T_MaterialPath:
4263  {
4264  MaterialPath *mpath;
4265 
4266  FLAT_COPY_PATH(mpath, path, MaterialPath);
4268  new_path = (Path *) mpath;
4269  }
4270  break;
4271 
4272  case T_MemoizePath:
4273  {
4274  MemoizePath *mpath;
4275 
4276  FLAT_COPY_PATH(mpath, path, MemoizePath);
4279  new_path = (Path *) mpath;
4280  }
4281  break;
4282 
4283  case T_GatherPath:
4284  {
4285  GatherPath *gpath;
4286 
4287  FLAT_COPY_PATH(gpath, path, GatherPath);
4289  new_path = (Path *) gpath;
4290  }
4291  break;
4292 
4293  default:
4294 
4295  /* We don't know how to reparameterize this path. */
4296  return NULL;
4297  }
4298 
4299  /*
4300  * Adjust the parameterization information, which refers to the topmost
4301  * parent. The topmost parent can be multiple levels away from the given
4302  * child, hence use multi-level expression adjustment routines.
4303  */
4304  old_ppi = new_path->param_info;
4305  required_outer =
4307  child_rel,
4308  child_rel->top_parent);
4309 
4310  /* If we already have a PPI for this parameterization, just return it */
4311  new_ppi = find_param_path_info(new_path->parent, required_outer);
4312 
4313  /*
4314  * If not, build a new one and link it to the list of PPIs. For the same
4315  * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4316  * context the given RelOptInfo is in.
4317  */
4318  if (new_ppi == NULL)
4319  {
4320  MemoryContext oldcontext;
4321  RelOptInfo *rel = path->parent;
4322 
4323  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4324 
4325  new_ppi = makeNode(ParamPathInfo);
4326  new_ppi->ppi_req_outer = bms_copy(required_outer);
4327  new_ppi->ppi_rows = old_ppi->ppi_rows;
4328  new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4329  ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4330  new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4331  rel->ppilist = lappend(rel->ppilist, new_ppi);
4332 
4333  MemoryContextSwitchTo(oldcontext);
4334  }
4335  bms_free(required_outer);
4336 
4337  new_path->param_info = new_ppi;
4338 
4339  /*
4340  * Adjust the path target if the parent of the outer relation is
4341  * referenced in the targetlist. This can happen when only the parent of
4342  * outer relation is laterally referenced in this relation.
4343  */
4344  if (bms_overlap(path->parent->lateral_relids,
4345  child_rel->top_parent_relids))
4346  {
4347  new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4348  ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4349  }
4350 
4351  return new_path;
4352 }
4353 
4354 /*
4355  * reparameterize_pathlist_by_child
4356  * Helper function to reparameterize a list of paths by given child rel.
4357  */
4358 static List *
4360  List *pathlist,
4361  RelOptInfo *child_rel)
4362 {
4363  ListCell *lc;
4364  List *result = NIL;
4365 
4366  foreach(lc, pathlist)
4367  {
4368  Path *path = reparameterize_path_by_child(root, lfirst(lc),
4369  child_rel);
4370 
4371  if (path == NULL)
4372  {
4373  list_free(result);
4374  return NIL;
4375  }
4376 
4377  result = lappend(result, path);
4378  }
4379 
4380  return result;
4381 }
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:195
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
bool query_supports_distinctness(Query *query)
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:588
int16 AttrNumber
Definition: attnum.h:21
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:97
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:396
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:363
void bms_free(Bitmapset *a)
Definition: bitmapset.c:194
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:460
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:211
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:835
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:993
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:527
int bms_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:138
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:80
#define bms_is_empty(a)
Definition: bitmapset.h:105
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_DIFFERENT
Definition: bitmapset.h:65
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
unsigned int Index
Definition: c.h:603
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:736
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:291
double cpu_operator_cost
Definition: costsize.c:124
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4153
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3717
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1503
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2425
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:985
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1329
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2376
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3280
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1785
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1958
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1564
double cpu_tuple_cost
Definition: costsize.c:122
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:333
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:408
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2622
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1711
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:256
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1620
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4612
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1748
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3040
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1129
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:446
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1221
double clamp_row_est(double nrows)
Definition: costsize.c:203
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1423
void cost_append(AppendPath *apath)
Definition: costsize.c:2203
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1670
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1173
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:521
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2096
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3135
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7177
#define ERROR
Definition: elog.h:39
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
int work_mem
Definition: globals.c:127
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:3494
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1673
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:438
List * lappend(List *list, void *datum)
Definition: list.c:338
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1592
List * lappend_int(List *list, int datum)
Definition: list.c:356
void list_free(List *list)
Definition: list.c:1545
List * list_concat(List *list1, const List *list2)
Definition: list.c:560
List * lcons(void *datum, List *list)
Definition: list.c:494
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:616
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3587
SetOpCmd
Definition: nodes.h:407
SetOpStrategy
Definition: nodes.h:415
@ SETOP_SORTED
Definition: nodes.h:416
#define IsA(nodeptr, _type_)
Definition: nodes.h:179
double Cost
Definition: nodes.h:262
#define nodeTag(nodeptr)
Definition: nodes.h:133
CmdType
Definition: nodes.h:274
@ CMD_MERGE
Definition: nodes.h:280
@ CMD_UPDATE
Definition: nodes.h:277
AggStrategy
Definition: nodes.h:363
@ AGG_SORTED
Definition: nodes.h:365
@ AGG_HASHED
Definition: nodes.h:366
@ AGG_MIXED
Definition: nodes.h:367
@ AGG_PLAIN
Definition: nodes.h:364
AggSplit
Definition: nodes.h:385
LimitOption
Definition: nodes.h:440
#define makeNode(_type_)
Definition: nodes.h:176
JoinType
Definition: nodes.h:299
@ JOIN_SEMI
Definition: nodes.h:318
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
@ RTE_SUBQUERY
Definition: parsenodes.h:1007
@ RTE_RELATION
Definition: parsenodes.h:1006
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:340
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:301
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1242
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1395
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2959
TidPath * create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
Definition: pathnode.c:1181
#define REPARAMETERIZE_CHILD_PATH(path)
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1516
NestPath * create_nestloop_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2426
Relids calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
Definition: pathnode.c:2393
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
Definition: pathnode.c:1652
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:166
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3443
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:995
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3779
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2644
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1873
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3062
#define STD_FUZZ_FACTOR
Definition: pathnode.c:51
Relids calc_nestloop_required_outer(Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
Definition: pathnode.c:2366
Path * create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2064
Path * create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2141
#define FLAT_COPY_PATH(newnode, node, nodetype)
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3618
Path * reparameterize_path_by_child(PlannerInfo *root, Path *path, RelOptInfo *child_rel)
Definition: pathnode.c:4063
MergePath * create_mergejoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
Definition: pathnode.c:2512
static List * reparameterize_pathlist_by_child(PlannerInfo *root, List *pathlist, RelOptInfo *child_rel)
Definition: pathnode.c:4359
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1964
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2167
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:954
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
Definition: pathnode.c:1596
MergeAppendPath * create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1413
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3902
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2841
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2008
ForeignPath * create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
Definition: pathnode.c:2226
Path * create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2116
ForeignPath * create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
Definition: pathnode.c:2324
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1564
Path * create_functionscan_path(PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2038
#define ADJUST_CHILD_ATTRS(node)
BitmapOrPath * create_bitmap_or_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1129
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:117
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3003
PathCostComparison
Definition: pathnode.c:39
@ COSTS_EQUAL
Definition: pathnode.c:40
@ COSTS_BETTER1
Definition: pathnode.c:41
@ COSTS_BETTER2
Definition: pathnode.c:42
@ COSTS_DIFFERENT
Definition: pathnode.c:43
HashPath * create_hashjoin_path(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
Definition: pathnode.c:2578
bool add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost, List *pathkeys)
Definition: pathnode.c:867
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3196
SetOpPath * create_setop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SetOpCmd cmd, SetOpStrategy strategy, List *distinctList, AttrNumber flagColIdx, int firstFlag, double numGroups, double outputRows)
Definition: pathnode.c:3511
RecursiveUnionPath * create_recursiveunion_path(PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
Definition: pathnode.c:3573
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2910
MinMaxAggPath * create_minmaxagg_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
Definition: pathnode.c:3356
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, int epqParam)
Definition: pathnode.c:3680
Path * create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2193
#define CONSIDER_PATH_STARTUP_COST(p)
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3114
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
Path * create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2090
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:71
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2752
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1373
BitmapAndPath * create_bitmap_and_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
Definition: pathnode.c:1077
TidRangePath * create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
Definition: pathnode.c:1210
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:929
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3834
ForeignPath * create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
Definition: pathnode.c:2272
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:1938
bool add_path_precheck(RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:644
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1044
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:824
@ UNIQUE_PATH_SORT
Definition: pathnodes.h:1990
@ UNIQUE_PATH_NOOP
Definition: pathnodes.h:1988
@ UNIQUE_PATH_HASH
Definition: pathnodes.h:1989
CostSelector
Definition: pathnodes.h:37
@ TOTAL_COST
Definition: pathnodes.h:38
@ STARTUP_COST
Definition: pathnodes.h:38
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1637
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:555
@ RELOPT_BASEREL
Definition: pathnodes.h:812
PathKeysComparison
Definition: paths.h:197
@ PATHKEYS_BETTER2
Definition: paths.h:200
@ PATHKEYS_BETTER1
Definition: paths.h:199
@ PATHKEYS_DIFFERENT
Definition: paths.h:201
@ PATHKEYS_EQUAL
Definition: paths.h:198
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define linitial(l)
Definition: pg_list.h:178
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:390
#define foreach_current_index(cell)
Definition: pg_list.h:403
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1530
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1829
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1632
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1862
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:1883
ScanDirection
Definition: sdir.h:25
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3386
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2222
Cardinality numGroups
Definition: pathnodes.h:2225
AggSplit aggsplit
Definition: pathnodes.h:2224
List * groupClause
Definition: pathnodes.h:2227
uint64 transitionSpace
Definition: pathnodes.h:2226
AggStrategy aggstrategy
Definition: pathnodes.h:2223
Path path
Definition: pathnodes.h:2221
List * qual
Definition: pathnodes.h:2228
int first_partial_path
Definition: pathnodes.h:1902
Cardinality limit_tuples
Definition: pathnodes.h:1903
List * subpaths
Definition: pathnodes.h:1900
List * bitmapquals
Definition: pathnodes.h:1765
Path * bitmapqual
Definition: pathnodes.h:1753
List * bitmapquals
Definition: pathnodes.h:1778
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:1879
List * custom_paths
Definition: pathnodes.h:1876
List * custom_private
Definition: pathnodes.h:1878
List * custom_restrictinfo
Definition: pathnodes.h:1877
Path * fdw_outerpath
Definition: pathnodes.h:1838
List * fdw_restrictinfo
Definition: pathnodes.h:1839
List * fdw_private
Definition: pathnodes.h:1840
bool single_copy
Definition: pathnodes.h:2011
Path * subpath
Definition: pathnodes.h:2010
int num_workers
Definition: pathnodes.h:2012
List * qual
Definition: pathnodes.h:2196
List * groupClause
Definition: pathnodes.h:2195
Path * subpath
Definition: pathnodes.h:2194
Path path
Definition: pathnodes.h:2193
uint64 transitionSpace
Definition: pathnodes.h:2268
AggStrategy aggstrategy
Definition: pathnodes.h:2265
List * path_hashclauses
Definition: pathnodes.h:2120
JoinPath jpath
Definition: pathnodes.h:2119
List * indexclauses
Definition: pathnodes.h:1679
ScanDirection indexscandir
Definition: pathnodes.h:1682
Path path
Definition: pathnodes.h:1677
List * indexorderbycols
Definition: pathnodes.h:1681
List * indexorderbys
Definition: pathnodes.h:1680
IndexOptInfo * indexinfo
Definition: pathnodes.h:1678
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3192
Path * outerjoinpath
Definition: pathnodes.h:2042
Path * innerjoinpath
Definition: pathnodes.h:2043
JoinType jointype
Definition: pathnodes.h:2037
bool inner_unique
Definition: pathnodes.h:2039
List * joinrestrictinfo
Definition: pathnodes.h:2045
Path path
Definition: pathnodes.h:2365
Path * subpath
Definition: pathnodes.h:2366
LimitOption limitOption
Definition: pathnodes.h:2369
Node * limitOffset
Definition: pathnodes.h:2367
Node * limitCount
Definition: pathnodes.h:2368
Definition: pg_list.h:54
Path * subpath
Definition: pathnodes.h:2328
List * rowMarks
Definition: pathnodes.h:2329
Path * subpath
Definition: pathnodes.h:1950
bool singlerow
Definition: pathnodes.h:1964
List * hash_operators
Definition: pathnodes.h:1962
uint32 est_entries
Definition: pathnodes.h:1969
bool binary_mode
Definition: pathnodes.h:1966
Cardinality calls
Definition: pathnodes.h:1968
Path * subpath
Definition: pathnodes.h:1961
List * param_exprs
Definition: pathnodes.h:1963
Cardinality limit_tuples
Definition: pathnodes.h:1925
List * outersortkeys
Definition: pathnodes.h:2102
List * innersortkeys
Definition: pathnodes.h:2103
JoinPath jpath
Definition: pathnodes.h:2100
List * path_mergeclauses
Definition: pathnodes.h:2101
List * quals
Definition: pathnodes.h:2278
List * mmaggregates
Definition: pathnodes.h:2277
bool partColsUpdated
Definition: pathnodes.h:2348
List * returningLists
Definition: pathnodes.h:2352
List * resultRelations
Definition: pathnodes.h:2349
List * withCheckOptionLists
Definition: pathnodes.h:2351
List * updateColnosLists
Definition: pathnodes.h:2350
OnConflictExpr * onconflict
Definition: pathnodes.h:2354
CmdType operation
Definition: pathnodes.h:2344
Index rootRelation
Definition: pathnodes.h:2347
Index nominalRelation
Definition: pathnodes.h:2346
List * mergeActionLists
Definition: pathnodes.h:2356
JoinPath jpath
Definition: pathnodes.h:2060
Definition: nodes.h:129
Cardinality ppi_rows
Definition: pathnodes.h:1548
List * ppi_clauses
Definition: pathnodes.h:1549
Bitmapset * ppi_serials
Definition: pathnodes.h:1550
Relids ppi_req_outer
Definition: pathnodes.h:1547
List * exprs
Definition: pathnodes.h:1501
QualCost cost
Definition: pathnodes.h:1507
List * pathkeys
Definition: pathnodes.h:1633
NodeTag pathtype
Definition: pathnodes.h:1594
Cardinality rows
Definition: pathnodes.h:1628
Cost startup_cost
Definition: pathnodes.h:1629
int parallel_workers
Definition: pathnodes.h:1625
Cost total_cost
Definition: pathnodes.h:1630
bool parallel_aware
Definition: pathnodes.h:1621
bool parallel_safe
Definition: pathnodes.h:1623
int num_groupby_pathkeys
Definition: pathnodes.h:392
Relids all_query_rels
Definition: pathnodes.h:266
List * group_pathkeys
Definition: pathnodes.h:385
Cardinality limit_tuples
Definition: pathnodes.h:480
Path * subpath
Definition: pathnodes.h:2154
Path * subpath
Definition: pathnodes.h:2142
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47
Query * subquery
Definition: parsenodes.h:1073
Cardinality numGroups
Definition: pathnodes.h:2319
bool consider_param_startup
Definition: pathnodes.h:870
List * ppilist
Definition: pathnodes.h:884
Relids relids
Definition: pathnodes.h:856
struct PathTarget * reltarget
Definition: pathnodes.h:878
Index relid
Definition: pathnodes.h:903
bool consider_parallel
Definition: pathnodes.h:872
Relids top_parent_relids
Definition: pathnodes.h:988
Relids lateral_relids
Definition: pathnodes.h:898
List * cheapest_parameterized_paths
Definition: pathnodes.h:889
List * pathlist
Definition: pathnodes.h:883
RelOptKind reloptkind
Definition: pathnodes.h:850
struct Path * cheapest_unique_path
Definition: pathnodes.h:888
struct Path * cheapest_startup_path
Definition: pathnodes.h:886
struct Path * cheapest_total_path
Definition: pathnodes.h:887
bool consider_startup
Definition: pathnodes.h:868
List * partial_pathlist
Definition: pathnodes.h:885
Cardinality rows
Definition: pathnodes.h:862
RTEKind rtekind
Definition: pathnodes.h:907
int rinfo_serial
Definition: pathnodes.h:2598
Cardinality numGroups
Definition: pathnodes.h:2252
List * gsets
Definition: pathnodes.h:2250
bool is_hashed
Definition: pathnodes.h:2254
List * distinctList
Definition: pathnodes.h:2303
Cardinality numGroups
Definition: pathnodes.h:2306
int firstFlag
Definition: pathnodes.h:2305
Path * subpath
Definition: pathnodes.h:2300
SetOpCmd cmd
Definition: pathnodes.h:2301
Path path
Definition: pathnodes.h:2299
SetOpStrategy strategy
Definition: pathnodes.h:2302
AttrNumber flagColIdx
Definition: pathnodes.h:2304
Path path
Definition: pathnodes.h:2167
Path * subpath
Definition: pathnodes.h:2168
List * semi_rhs_exprs
Definition: pathnodes.h:2868
JoinType jointype
Definition: pathnodes.h:2857
Relids syn_righthand
Definition: pathnodes.h:2856
List * semi_operators
Definition: pathnodes.h:2867
List * tidquals
Definition: pathnodes.h:1792
Path path
Definition: pathnodes.h:1791
List * tidrangequals
Definition: pathnodes.h:1804
Path * subpath
Definition: pathnodes.h:1996
List * uniq_exprs
Definition: pathnodes.h:1999
UniquePathMethod umethod
Definition: pathnodes.h:1997
List * in_operators
Definition: pathnodes.h:1998
Definition: primnodes.h:234
AttrNumber varattno
Definition: primnodes.h:246
int varno
Definition: primnodes.h:241
Path * subpath
Definition: pathnodes.h:2287
WindowClause * winclause
Definition: pathnodes.h:2288
Definition: type.h:95
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657