PostgreSQL Source Code  git master
createplan.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * createplan.c
4  * Routines to create the desired plan for processing a query.
5  * Planning is complete, we just need to convert the selected
6  * Path into a Plan.
7  *
8  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
9  * Portions Copyright (c) 1994, Regents of the University of California
10  *
11  *
12  * IDENTIFICATION
13  * src/backend/optimizer/plan/createplan.c
14  *
15  *-------------------------------------------------------------------------
16  */
17 #include "postgres.h"
18 
19 #include <math.h>
20 
21 #include "access/sysattr.h"
22 #include "catalog/pg_class.h"
23 #include "foreign/fdwapi.h"
24 #include "miscadmin.h"
25 #include "nodes/extensible.h"
26 #include "nodes/makefuncs.h"
27 #include "nodes/nodeFuncs.h"
28 #include "optimizer/clauses.h"
29 #include "optimizer/cost.h"
30 #include "optimizer/optimizer.h"
31 #include "optimizer/paramassign.h"
32 #include "optimizer/paths.h"
33 #include "optimizer/placeholder.h"
34 #include "optimizer/plancat.h"
35 #include "optimizer/planmain.h"
36 #include "optimizer/prep.h"
37 #include "optimizer/restrictinfo.h"
38 #include "optimizer/subselect.h"
39 #include "optimizer/tlist.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parsetree.h"
42 #include "partitioning/partprune.h"
43 #include "utils/lsyscache.h"
44 
45 
46 /*
47  * Flag bits that can appear in the flags argument of create_plan_recurse().
48  * These can be OR-ed together.
49  *
50  * CP_EXACT_TLIST specifies that the generated plan node must return exactly
51  * the tlist specified by the path's pathtarget (this overrides both
52  * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
53  * plan node is allowed to return just the Vars and PlaceHolderVars needed
54  * to evaluate the pathtarget.
55  *
56  * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
57  * passed down by parent nodes such as Sort and Hash, which will have to
58  * store the returned tuples.
59  *
60  * CP_LABEL_TLIST specifies that the plan node must return columns matching
61  * any sortgrouprefs specified in its pathtarget, with appropriate
62  * ressortgroupref labels. This is passed down by parent nodes such as Sort
63  * and Group, which need these values to be available in their inputs.
64  *
65  * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
66  * and therefore it doesn't matter a bit what target list gets generated.
67  */
68 #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
69 #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
70 #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
71 #define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
72 
73 
74 static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
75  int flags);
76 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
77  int flags);
78 static List *build_path_tlist(PlannerInfo *root, Path *path);
79 static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
80 static List *get_gating_quals(PlannerInfo *root, List *quals);
81 static Plan *create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
82  List *gating_quals);
83 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
84 static bool mark_async_capable_plan(Plan *plan, Path *path);
85 static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path,
86  int flags);
88  int flags);
90  GroupResultPath *best_path);
92 static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
93  int flags);
94 static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
95  int flags);
96 static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
97  int flags);
98 static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
100  ProjectionPath *best_path,
101  int flags);
102 static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
103 static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
105  IncrementalSortPath *best_path, int flags);
106 static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
108  int flags);
109 static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
110 static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
111 static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
112 static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
113 static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
114  int flags);
116 static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
117  int flags);
119 static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
120  int flags);
121 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
122  List *tlist, List *scan_clauses);
123 static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
124  List *tlist, List *scan_clauses);
125 static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
126  List *tlist, List *scan_clauses, bool indexonly);
128  BitmapHeapPath *best_path,
129  List *tlist, List *scan_clauses);
130 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
131  List **qual, List **indexqual, List **indexECs);
132 static void bitmap_subplan_mark_shared(Plan *plan);
133 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
134  List *tlist, List *scan_clauses);
136  TidRangePath *best_path,
137  List *tlist,
138  List *scan_clauses);
140  SubqueryScanPath *best_path,
141  List *tlist, List *scan_clauses);
142 static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
143  List *tlist, List *scan_clauses);
144 static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
145  List *tlist, List *scan_clauses);
146 static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
147  List *tlist, List *scan_clauses);
148 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
149  List *tlist, List *scan_clauses);
151  Path *best_path, List *tlist, List *scan_clauses);
152 static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
153  List *tlist, List *scan_clauses);
154 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
155  List *tlist, List *scan_clauses);
157  List *tlist, List *scan_clauses);
159  CustomPath *best_path,
160  List *tlist, List *scan_clauses);
161 static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path);
162 static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path);
163 static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path);
164 static Node *replace_nestloop_params(PlannerInfo *root, Node *expr);
166 static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
167  List **stripped_indexquals_p,
168  List **fixed_indexquals_p);
169 static List *fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path);
171  IndexOptInfo *index, int indexcol,
172  Node *clause, List *indexcolnos);
173 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
174 static List *get_switched_clauses(List *clauses, Relids outerrelids);
175 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
176 static void copy_generic_path_info(Plan *dest, Path *src);
177 static void copy_plan_costsize(Plan *dest, Plan *src);
178 static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
179  double limit_tuples);
180 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
181 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
182  TableSampleClause *tsc);
183 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
184  Oid indexid, List *indexqual, List *indexqualorig,
185  List *indexorderby, List *indexorderbyorig,
186  List *indexorderbyops,
187  ScanDirection indexscandir);
188 static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
189  Index scanrelid, Oid indexid,
190  List *indexqual, List *recheckqual,
191  List *indexorderby,
192  List *indextlist,
193  ScanDirection indexscandir);
194 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
195  List *indexqual,
196  List *indexqualorig);
197 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
198  List *qpqual,
199  Plan *lefttree,
200  List *bitmapqualorig,
201  Index scanrelid);
202 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
203  List *tidquals);
204 static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
205  Index scanrelid, List *tidrangequals);
206 static SubqueryScan *make_subqueryscan(List *qptlist,
207  List *qpqual,
208  Index scanrelid,
209  Plan *subplan);
210 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
211  Index scanrelid, List *functions, bool funcordinality);
212 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
213  Index scanrelid, List *values_lists);
214 static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
215  Index scanrelid, TableFunc *tablefunc);
216 static CteScan *make_ctescan(List *qptlist, List *qpqual,
217  Index scanrelid, int ctePlanId, int cteParam);
218 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
219  Index scanrelid, char *enrname);
220 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
221  Index scanrelid, int wtParam);
223  Plan *lefttree,
224  Plan *righttree,
225  int wtParam,
226  List *distinctList,
227  long numGroups);
228 static BitmapAnd *make_bitmap_and(List *bitmapplans);
229 static BitmapOr *make_bitmap_or(List *bitmapplans);
230 static NestLoop *make_nestloop(List *tlist,
231  List *joinclauses, List *otherclauses, List *nestParams,
232  Plan *lefttree, Plan *righttree,
233  JoinType jointype, bool inner_unique);
234 static HashJoin *make_hashjoin(List *tlist,
235  List *joinclauses, List *otherclauses,
236  List *hashclauses,
237  List *hashoperators, List *hashcollations,
238  List *hashkeys,
239  Plan *lefttree, Plan *righttree,
240  JoinType jointype, bool inner_unique);
241 static Hash *make_hash(Plan *lefttree,
242  List *hashkeys,
243  Oid skewTable,
244  AttrNumber skewColumn,
245  bool skewInherit);
246 static MergeJoin *make_mergejoin(List *tlist,
247  List *joinclauses, List *otherclauses,
248  List *mergeclauses,
249  Oid *mergefamilies,
250  Oid *mergecollations,
251  int *mergestrategies,
252  bool *mergenullsfirst,
253  Plan *lefttree, Plan *righttree,
254  JoinType jointype, bool inner_unique,
255  bool skip_mark_restore);
256 static Sort *make_sort(Plan *lefttree, int numCols,
257  AttrNumber *sortColIdx, Oid *sortOperators,
258  Oid *collations, bool *nullsFirst);
259 static IncrementalSort *make_incrementalsort(Plan *lefttree,
260  int numCols, int nPresortedCols,
261  AttrNumber *sortColIdx, Oid *sortOperators,
262  Oid *collations, bool *nullsFirst);
263 static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
264  Relids relids,
265  const AttrNumber *reqColIdx,
266  bool adjust_tlist_in_place,
267  int *p_numsortkeys,
268  AttrNumber **p_sortColIdx,
269  Oid **p_sortOperators,
270  Oid **p_collations,
271  bool **p_nullsFirst);
272 static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
273  Relids relids);
275  List *pathkeys, Relids relids, int nPresortedCols);
276 static Sort *make_sort_from_groupcols(List *groupcls,
277  AttrNumber *grpColIdx,
278  Plan *lefttree);
279 static Material *make_material(Plan *lefttree);
280 static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
281  Oid *collations, List *param_exprs,
282  bool singlerow, bool binary_mode,
283  uint32 est_entries, Bitmapset *keyparamids);
284 static WindowAgg *make_windowagg(List *tlist, Index winref,
285  int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
286  int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
287  int frameOptions, Node *startOffset, Node *endOffset,
288  Oid startInRangeFunc, Oid endInRangeFunc,
289  Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
290  List *runCondition, List *qual, bool topWindow,
291  Plan *lefttree);
292 static Group *make_group(List *tlist, List *qual, int numGroupCols,
293  AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
294  Plan *lefttree);
295 static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
296 static Unique *make_unique_from_pathkeys(Plan *lefttree,
297  List *pathkeys, int numCols);
298 static Gather *make_gather(List *qptlist, List *qpqual,
299  int nworkers, int rescan_param, bool single_copy, Plan *subplan);
300 static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
301  List *distinctList, AttrNumber flagColIdx, int firstFlag,
302  long numGroups);
303 static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
304 static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
305 static ProjectSet *make_project_set(List *tlist, Plan *subplan);
306 static ModifyTable *make_modifytable(PlannerInfo *root, Plan *subplan,
307  CmdType operation, bool canSetTag,
308  Index nominalRelation, Index rootRelation,
309  bool partColsUpdated,
310  List *resultRelations,
311  List *updateColnosLists,
312  List *withCheckOptionLists, List *returningLists,
313  List *rowMarks, OnConflictExpr *onconflict,
314  List *mergeActionLists, int epqParam);
316  GatherMergePath *best_path);
317 
318 
319 /*
320  * create_plan
321  * Creates the access plan for a query by recursively processing the
322  * desired tree of pathnodes, starting at the node 'best_path'. For
323  * every pathnode found, we create a corresponding plan node containing
324  * appropriate id, target list, and qualification information.
325  *
326  * The tlists and quals in the plan tree are still in planner format,
327  * ie, Vars still correspond to the parser's numbering. This will be
328  * fixed later by setrefs.c.
329  *
330  * best_path is the best access path
331  *
332  * Returns a Plan tree.
333  */
334 Plan *
335 create_plan(PlannerInfo *root, Path *best_path)
336 {
337  Plan *plan;
338 
339  /* plan_params should not be in use in current query level */
340  Assert(root->plan_params == NIL);
341 
342  /* Initialize this module's workspace in PlannerInfo */
343  root->curOuterRels = NULL;
344  root->curOuterParams = NIL;
345 
346  /* Recursively process the path tree, demanding the correct tlist result */
347  plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
348 
349  /*
350  * Make sure the topmost plan node's targetlist exposes the original
351  * column names and other decorative info. Targetlists generated within
352  * the planner don't bother with that stuff, but we must have it on the
353  * top-level tlist seen at execution time. However, ModifyTable plan
354  * nodes don't have a tlist matching the querytree targetlist.
355  */
356  if (!IsA(plan, ModifyTable))
358 
359  /*
360  * Attach any initPlans created in this query level to the topmost plan
361  * node. (In principle the initplans could go in any plan node at or
362  * above where they're referenced, but there seems no reason to put them
363  * any lower than the topmost node for the query level. Also, see
364  * comments for SS_finalize_plan before you try to change this.)
365  */
366  SS_attach_initplans(root, plan);
367 
368  /* Check we successfully assigned all NestLoopParams to plan nodes */
369  if (root->curOuterParams != NIL)
370  elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
371 
372  /*
373  * Reset plan_params to ensure param IDs used for nestloop params are not
374  * re-used later
375  */
376  root->plan_params = NIL;
377 
378  return plan;
379 }
380 
381 /*
382  * create_plan_recurse
383  * Recursive guts of create_plan().
384  */
385 static Plan *
386 create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
387 {
388  Plan *plan;
389 
390  /* Guard against stack overflow due to overly complex plans */
392 
393  switch (best_path->pathtype)
394  {
395  case T_SeqScan:
396  case T_SampleScan:
397  case T_IndexScan:
398  case T_IndexOnlyScan:
399  case T_BitmapHeapScan:
400  case T_TidScan:
401  case T_TidRangeScan:
402  case T_SubqueryScan:
403  case T_FunctionScan:
404  case T_TableFuncScan:
405  case T_ValuesScan:
406  case T_CteScan:
407  case T_WorkTableScan:
408  case T_NamedTuplestoreScan:
409  case T_ForeignScan:
410  case T_CustomScan:
411  plan = create_scan_plan(root, best_path, flags);
412  break;
413  case T_HashJoin:
414  case T_MergeJoin:
415  case T_NestLoop:
416  plan = create_join_plan(root,
417  (JoinPath *) best_path);
418  break;
419  case T_Append:
420  plan = create_append_plan(root,
421  (AppendPath *) best_path,
422  flags);
423  break;
424  case T_MergeAppend:
425  plan = create_merge_append_plan(root,
426  (MergeAppendPath *) best_path,
427  flags);
428  break;
429  case T_Result:
430  if (IsA(best_path, ProjectionPath))
431  {
432  plan = create_projection_plan(root,
433  (ProjectionPath *) best_path,
434  flags);
435  }
436  else if (IsA(best_path, MinMaxAggPath))
437  {
438  plan = (Plan *) create_minmaxagg_plan(root,
439  (MinMaxAggPath *) best_path);
440  }
441  else if (IsA(best_path, GroupResultPath))
442  {
443  plan = (Plan *) create_group_result_plan(root,
444  (GroupResultPath *) best_path);
445  }
446  else
447  {
448  /* Simple RTE_RESULT base relation */
449  Assert(IsA(best_path, Path));
450  plan = create_scan_plan(root, best_path, flags);
451  }
452  break;
453  case T_ProjectSet:
454  plan = (Plan *) create_project_set_plan(root,
455  (ProjectSetPath *) best_path);
456  break;
457  case T_Material:
458  plan = (Plan *) create_material_plan(root,
459  (MaterialPath *) best_path,
460  flags);
461  break;
462  case T_Memoize:
463  plan = (Plan *) create_memoize_plan(root,
464  (MemoizePath *) best_path,
465  flags);
466  break;
467  case T_Unique:
468  if (IsA(best_path, UpperUniquePath))
469  {
470  plan = (Plan *) create_upper_unique_plan(root,
471  (UpperUniquePath *) best_path,
472  flags);
473  }
474  else
475  {
476  Assert(IsA(best_path, UniquePath));
477  plan = create_unique_plan(root,
478  (UniquePath *) best_path,
479  flags);
480  }
481  break;
482  case T_Gather:
483  plan = (Plan *) create_gather_plan(root,
484  (GatherPath *) best_path);
485  break;
486  case T_Sort:
487  plan = (Plan *) create_sort_plan(root,
488  (SortPath *) best_path,
489  flags);
490  break;
491  case T_IncrementalSort:
492  plan = (Plan *) create_incrementalsort_plan(root,
493  (IncrementalSortPath *) best_path,
494  flags);
495  break;
496  case T_Group:
497  plan = (Plan *) create_group_plan(root,
498  (GroupPath *) best_path);
499  break;
500  case T_Agg:
501  if (IsA(best_path, GroupingSetsPath))
502  plan = create_groupingsets_plan(root,
503  (GroupingSetsPath *) best_path);
504  else
505  {
506  Assert(IsA(best_path, AggPath));
507  plan = (Plan *) create_agg_plan(root,
508  (AggPath *) best_path);
509  }
510  break;
511  case T_WindowAgg:
512  plan = (Plan *) create_windowagg_plan(root,
513  (WindowAggPath *) best_path);
514  break;
515  case T_SetOp:
516  plan = (Plan *) create_setop_plan(root,
517  (SetOpPath *) best_path,
518  flags);
519  break;
520  case T_RecursiveUnion:
521  plan = (Plan *) create_recursiveunion_plan(root,
522  (RecursiveUnionPath *) best_path);
523  break;
524  case T_LockRows:
525  plan = (Plan *) create_lockrows_plan(root,
526  (LockRowsPath *) best_path,
527  flags);
528  break;
529  case T_ModifyTable:
530  plan = (Plan *) create_modifytable_plan(root,
531  (ModifyTablePath *) best_path);
532  break;
533  case T_Limit:
534  plan = (Plan *) create_limit_plan(root,
535  (LimitPath *) best_path,
536  flags);
537  break;
538  case T_GatherMerge:
539  plan = (Plan *) create_gather_merge_plan(root,
540  (GatherMergePath *) best_path);
541  break;
542  default:
543  elog(ERROR, "unrecognized node type: %d",
544  (int) best_path->pathtype);
545  plan = NULL; /* keep compiler quiet */
546  break;
547  }
548 
549  return plan;
550 }
551 
552 /*
553  * create_scan_plan
554  * Create a scan plan for the parent relation of 'best_path'.
555  */
556 static Plan *
557 create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
558 {
559  RelOptInfo *rel = best_path->parent;
560  List *scan_clauses;
561  List *gating_clauses;
562  List *tlist;
563  Plan *plan;
564 
565  /*
566  * Extract the relevant restriction clauses from the parent relation. The
567  * executor must apply all these restrictions during the scan, except for
568  * pseudoconstants which we'll take care of below.
569  *
570  * If this is a plain indexscan or index-only scan, we need not consider
571  * restriction clauses that are implied by the index's predicate, so use
572  * indrestrictinfo not baserestrictinfo. Note that we can't do that for
573  * bitmap indexscans, since there's not necessarily a single index
574  * involved; but it doesn't matter since create_bitmap_scan_plan() will be
575  * able to get rid of such clauses anyway via predicate proof.
576  */
577  switch (best_path->pathtype)
578  {
579  case T_IndexScan:
580  case T_IndexOnlyScan:
581  scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
582  break;
583  default:
584  scan_clauses = rel->baserestrictinfo;
585  break;
586  }
587 
588  /*
589  * If this is a parameterized scan, we also need to enforce all the join
590  * clauses available from the outer relation(s).
591  *
592  * For paranoia's sake, don't modify the stored baserestrictinfo list.
593  */
594  if (best_path->param_info)
595  scan_clauses = list_concat_copy(scan_clauses,
596  best_path->param_info->ppi_clauses);
597 
598  /*
599  * Detect whether we have any pseudoconstant quals to deal with. Then, if
600  * we'll need a gating Result node, it will be able to project, so there
601  * are no requirements on the child's tlist.
602  */
603  gating_clauses = get_gating_quals(root, scan_clauses);
604  if (gating_clauses)
605  flags = 0;
606 
607  /*
608  * For table scans, rather than using the relation targetlist (which is
609  * only those Vars actually needed by the query), we prefer to generate a
610  * tlist containing all Vars in order. This will allow the executor to
611  * optimize away projection of the table tuples, if possible.
612  *
613  * But if the caller is going to ignore our tlist anyway, then don't
614  * bother generating one at all. We use an exact equality test here, so
615  * that this only applies when CP_IGNORE_TLIST is the only flag set.
616  */
617  if (flags == CP_IGNORE_TLIST)
618  {
619  tlist = NULL;
620  }
621  else if (use_physical_tlist(root, best_path, flags))
622  {
623  if (best_path->pathtype == T_IndexOnlyScan)
624  {
625  /* For index-only scan, the preferred tlist is the index's */
626  tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
627 
628  /*
629  * Transfer sortgroupref data to the replacement tlist, if
630  * requested (use_physical_tlist checked that this will work).
631  */
632  if (flags & CP_LABEL_TLIST)
633  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
634  }
635  else
636  {
637  tlist = build_physical_tlist(root, rel);
638  if (tlist == NIL)
639  {
640  /* Failed because of dropped cols, so use regular method */
641  tlist = build_path_tlist(root, best_path);
642  }
643  else
644  {
645  /* As above, transfer sortgroupref data to replacement tlist */
646  if (flags & CP_LABEL_TLIST)
647  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
648  }
649  }
650  }
651  else
652  {
653  tlist = build_path_tlist(root, best_path);
654  }
655 
656  switch (best_path->pathtype)
657  {
658  case T_SeqScan:
659  plan = (Plan *) create_seqscan_plan(root,
660  best_path,
661  tlist,
662  scan_clauses);
663  break;
664 
665  case T_SampleScan:
666  plan = (Plan *) create_samplescan_plan(root,
667  best_path,
668  tlist,
669  scan_clauses);
670  break;
671 
672  case T_IndexScan:
673  plan = (Plan *) create_indexscan_plan(root,
674  (IndexPath *) best_path,
675  tlist,
676  scan_clauses,
677  false);
678  break;
679 
680  case T_IndexOnlyScan:
681  plan = (Plan *) create_indexscan_plan(root,
682  (IndexPath *) best_path,
683  tlist,
684  scan_clauses,
685  true);
686  break;
687 
688  case T_BitmapHeapScan:
689  plan = (Plan *) create_bitmap_scan_plan(root,
690  (BitmapHeapPath *) best_path,
691  tlist,
692  scan_clauses);
693  break;
694 
695  case T_TidScan:
696  plan = (Plan *) create_tidscan_plan(root,
697  (TidPath *) best_path,
698  tlist,
699  scan_clauses);
700  break;
701 
702  case T_TidRangeScan:
703  plan = (Plan *) create_tidrangescan_plan(root,
704  (TidRangePath *) best_path,
705  tlist,
706  scan_clauses);
707  break;
708 
709  case T_SubqueryScan:
710  plan = (Plan *) create_subqueryscan_plan(root,
711  (SubqueryScanPath *) best_path,
712  tlist,
713  scan_clauses);
714  break;
715 
716  case T_FunctionScan:
717  plan = (Plan *) create_functionscan_plan(root,
718  best_path,
719  tlist,
720  scan_clauses);
721  break;
722 
723  case T_TableFuncScan:
724  plan = (Plan *) create_tablefuncscan_plan(root,
725  best_path,
726  tlist,
727  scan_clauses);
728  break;
729 
730  case T_ValuesScan:
731  plan = (Plan *) create_valuesscan_plan(root,
732  best_path,
733  tlist,
734  scan_clauses);
735  break;
736 
737  case T_CteScan:
738  plan = (Plan *) create_ctescan_plan(root,
739  best_path,
740  tlist,
741  scan_clauses);
742  break;
743 
744  case T_NamedTuplestoreScan:
745  plan = (Plan *) create_namedtuplestorescan_plan(root,
746  best_path,
747  tlist,
748  scan_clauses);
749  break;
750 
751  case T_Result:
752  plan = (Plan *) create_resultscan_plan(root,
753  best_path,
754  tlist,
755  scan_clauses);
756  break;
757 
758  case T_WorkTableScan:
759  plan = (Plan *) create_worktablescan_plan(root,
760  best_path,
761  tlist,
762  scan_clauses);
763  break;
764 
765  case T_ForeignScan:
766  plan = (Plan *) create_foreignscan_plan(root,
767  (ForeignPath *) best_path,
768  tlist,
769  scan_clauses);
770  break;
771 
772  case T_CustomScan:
773  plan = (Plan *) create_customscan_plan(root,
774  (CustomPath *) best_path,
775  tlist,
776  scan_clauses);
777  break;
778 
779  default:
780  elog(ERROR, "unrecognized node type: %d",
781  (int) best_path->pathtype);
782  plan = NULL; /* keep compiler quiet */
783  break;
784  }
785 
786  /*
787  * If there are any pseudoconstant clauses attached to this node, insert a
788  * gating Result node that evaluates the pseudoconstants as one-time
789  * quals.
790  */
791  if (gating_clauses)
792  plan = create_gating_plan(root, best_path, plan, gating_clauses);
793 
794  return plan;
795 }
796 
797 /*
798  * Build a target list (ie, a list of TargetEntry) for the Path's output.
799  *
800  * This is almost just make_tlist_from_pathtarget(), but we also have to
801  * deal with replacing nestloop params.
802  */
803 static List *
805 {
806  List *tlist = NIL;
807  Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
808  int resno = 1;
809  ListCell *v;
810 
811  foreach(v, path->pathtarget->exprs)
812  {
813  Node *node = (Node *) lfirst(v);
814  TargetEntry *tle;
815 
816  /*
817  * If it's a parameterized path, there might be lateral references in
818  * the tlist, which need to be replaced with Params. There's no need
819  * to remake the TargetEntry nodes, so apply this to each list item
820  * separately.
821  */
822  if (path->param_info)
823  node = replace_nestloop_params(root, node);
824 
825  tle = makeTargetEntry((Expr *) node,
826  resno,
827  NULL,
828  false);
829  if (sortgrouprefs)
830  tle->ressortgroupref = sortgrouprefs[resno - 1];
831 
832  tlist = lappend(tlist, tle);
833  resno++;
834  }
835  return tlist;
836 }
837 
838 /*
839  * use_physical_tlist
840  * Decide whether to use a tlist matching relation structure,
841  * rather than only those Vars actually referenced.
842  */
843 static bool
844 use_physical_tlist(PlannerInfo *root, Path *path, int flags)
845 {
846  RelOptInfo *rel = path->parent;
847  int i;
848  ListCell *lc;
849 
850  /*
851  * Forget it if either exact tlist or small tlist is demanded.
852  */
853  if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
854  return false;
855 
856  /*
857  * We can do this for real relation scans, subquery scans, function scans,
858  * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
859  */
860  if (rel->rtekind != RTE_RELATION &&
861  rel->rtekind != RTE_SUBQUERY &&
862  rel->rtekind != RTE_FUNCTION &&
863  rel->rtekind != RTE_TABLEFUNC &&
864  rel->rtekind != RTE_VALUES &&
865  rel->rtekind != RTE_CTE)
866  return false;
867 
868  /*
869  * Can't do it with inheritance cases either (mainly because Append
870  * doesn't project; this test may be unnecessary now that
871  * create_append_plan instructs its children to return an exact tlist).
872  */
873  if (rel->reloptkind != RELOPT_BASEREL)
874  return false;
875 
876  /*
877  * Also, don't do it to a CustomPath; the premise that we're extracting
878  * columns from a simple physical tuple is unlikely to hold for those.
879  * (When it does make sense, the custom path creator can set up the path's
880  * pathtarget that way.)
881  */
882  if (IsA(path, CustomPath))
883  return false;
884 
885  /*
886  * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
887  * executor to skip heap page fetches, and in any case, the benefit of
888  * using a physical tlist instead would be minimal.
889  */
890  if (IsA(path, BitmapHeapPath) &&
891  path->pathtarget->exprs == NIL)
892  return false;
893 
894  /*
895  * Can't do it if any system columns or whole-row Vars are requested.
896  * (This could possibly be fixed but would take some fragile assumptions
897  * in setrefs.c, I think.)
898  */
899  for (i = rel->min_attr; i <= 0; i++)
900  {
901  if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
902  return false;
903  }
904 
905  /*
906  * Can't do it if the rel is required to emit any placeholder expressions,
907  * either.
908  */
909  foreach(lc, root->placeholder_list)
910  {
911  PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
912 
913  if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
914  bms_is_subset(phinfo->ph_eval_at, rel->relids))
915  return false;
916  }
917 
918  /*
919  * For an index-only scan, the "physical tlist" is the index's indextlist.
920  * We can only return that without a projection if all the index's columns
921  * are returnable.
922  */
923  if (path->pathtype == T_IndexOnlyScan)
924  {
925  IndexOptInfo *indexinfo = ((IndexPath *) path)->indexinfo;
926 
927  for (i = 0; i < indexinfo->ncolumns; i++)
928  {
929  if (!indexinfo->canreturn[i])
930  return false;
931  }
932  }
933 
934  /*
935  * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
936  * to emit any sort/group columns that are not simple Vars. (If they are
937  * simple Vars, they should appear in the physical tlist, and
938  * apply_pathtarget_labeling_to_tlist will take care of getting them
939  * labeled again.) We also have to check that no two sort/group columns
940  * are the same Var, else that element of the physical tlist would need
941  * conflicting ressortgroupref labels.
942  */
943  if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
944  {
945  Bitmapset *sortgroupatts = NULL;
946 
947  i = 0;
948  foreach(lc, path->pathtarget->exprs)
949  {
950  Expr *expr = (Expr *) lfirst(lc);
951 
952  if (path->pathtarget->sortgrouprefs[i])
953  {
954  if (expr && IsA(expr, Var))
955  {
956  int attno = ((Var *) expr)->varattno;
957 
959  if (bms_is_member(attno, sortgroupatts))
960  return false;
961  sortgroupatts = bms_add_member(sortgroupatts, attno);
962  }
963  else
964  return false;
965  }
966  i++;
967  }
968  }
969 
970  return true;
971 }
972 
973 /*
974  * get_gating_quals
975  * See if there are pseudoconstant quals in a node's quals list
976  *
977  * If the node's quals list includes any pseudoconstant quals,
978  * return just those quals.
979  */
980 static List *
982 {
983  /* No need to look if we know there are no pseudoconstants */
984  if (!root->hasPseudoConstantQuals)
985  return NIL;
986 
987  /* Sort into desirable execution order while still in RestrictInfo form */
988  quals = order_qual_clauses(root, quals);
989 
990  /* Pull out any pseudoconstant quals from the RestrictInfo list */
991  return extract_actual_clauses(quals, true);
992 }
993 
994 /*
995  * create_gating_plan
996  * Deal with pseudoconstant qual clauses
997  *
998  * Add a gating Result node atop the already-built plan.
999  */
1000 static Plan *
1002  List *gating_quals)
1003 {
1004  Plan *gplan;
1005  Plan *splan;
1006 
1007  Assert(gating_quals);
1008 
1009  /*
1010  * We might have a trivial Result plan already. Stacking one Result atop
1011  * another is silly, so if that applies, just discard the input plan.
1012  * (We're assuming its targetlist is uninteresting; it should be either
1013  * the same as the result of build_path_tlist, or a simplified version.)
1014  */
1015  splan = plan;
1016  if (IsA(plan, Result))
1017  {
1018  Result *rplan = (Result *) plan;
1019 
1020  if (rplan->plan.lefttree == NULL &&
1021  rplan->resconstantqual == NULL)
1022  splan = NULL;
1023  }
1024 
1025  /*
1026  * Since we need a Result node anyway, always return the path's requested
1027  * tlist; that's never a wrong choice, even if the parent node didn't ask
1028  * for CP_EXACT_TLIST.
1029  */
1030  gplan = (Plan *) make_result(build_path_tlist(root, path),
1031  (Node *) gating_quals,
1032  splan);
1033 
1034  /*
1035  * Notice that we don't change cost or size estimates when doing gating.
1036  * The costs of qual eval were already included in the subplan's cost.
1037  * Leaving the size alone amounts to assuming that the gating qual will
1038  * succeed, which is the conservative estimate for planning upper queries.
1039  * We certainly don't want to assume the output size is zero (unless the
1040  * gating qual is actually constant FALSE, and that case is dealt with in
1041  * clausesel.c). Interpolating between the two cases is silly, because it
1042  * doesn't reflect what will really happen at runtime, and besides which
1043  * in most cases we have only a very bad idea of the probability of the
1044  * gating qual being true.
1045  */
1046  copy_plan_costsize(gplan, plan);
1047 
1048  /* Gating quals could be unsafe, so better use the Path's safety flag */
1049  gplan->parallel_safe = path->parallel_safe;
1050 
1051  return gplan;
1052 }
1053 
1054 /*
1055  * create_join_plan
1056  * Create a join plan for 'best_path' and (recursively) plans for its
1057  * inner and outer paths.
1058  */
1059 static Plan *
1061 {
1062  Plan *plan;
1063  List *gating_clauses;
1064 
1065  switch (best_path->path.pathtype)
1066  {
1067  case T_MergeJoin:
1068  plan = (Plan *) create_mergejoin_plan(root,
1069  (MergePath *) best_path);
1070  break;
1071  case T_HashJoin:
1072  plan = (Plan *) create_hashjoin_plan(root,
1073  (HashPath *) best_path);
1074  break;
1075  case T_NestLoop:
1076  plan = (Plan *) create_nestloop_plan(root,
1077  (NestPath *) best_path);
1078  break;
1079  default:
1080  elog(ERROR, "unrecognized node type: %d",
1081  (int) best_path->path.pathtype);
1082  plan = NULL; /* keep compiler quiet */
1083  break;
1084  }
1085 
1086  /*
1087  * If there are any pseudoconstant clauses attached to this node, insert a
1088  * gating Result node that evaluates the pseudoconstants as one-time
1089  * quals.
1090  */
1091  gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
1092  if (gating_clauses)
1093  plan = create_gating_plan(root, (Path *) best_path, plan,
1094  gating_clauses);
1095 
1096 #ifdef NOT_USED
1097 
1098  /*
1099  * * Expensive function pullups may have pulled local predicates * into
1100  * this path node. Put them in the qpqual of the plan node. * JMH,
1101  * 6/15/92
1102  */
1103  if (get_loc_restrictinfo(best_path) != NIL)
1104  set_qpqual((Plan) plan,
1105  list_concat(get_qpqual((Plan) plan),
1106  get_actual_clauses(get_loc_restrictinfo(best_path))));
1107 #endif
1108 
1109  return plan;
1110 }
1111 
1112 /*
1113  * mark_async_capable_plan
1114  * Check whether the Plan node created from a Path node is async-capable,
1115  * and if so, mark the Plan node as such and return true, otherwise
1116  * return false.
1117  */
1118 static bool
1120 {
1121  switch (nodeTag(path))
1122  {
1123  case T_SubqueryScanPath:
1124  {
1125  SubqueryScan *scan_plan = (SubqueryScan *) plan;
1126 
1127  /*
1128  * If the generated plan node includes a gating Result node,
1129  * we can't execute it asynchronously.
1130  */
1131  if (IsA(plan, Result))
1132  return false;
1133 
1134  /*
1135  * If a SubqueryScan node atop of an async-capable plan node
1136  * is deletable, consider it as async-capable.
1137  */
1138  if (trivial_subqueryscan(scan_plan) &&
1139  mark_async_capable_plan(scan_plan->subplan,
1140  ((SubqueryScanPath *) path)->subpath))
1141  break;
1142  return false;
1143  }
1144  case T_ForeignPath:
1145  {
1146  FdwRoutine *fdwroutine = path->parent->fdwroutine;
1147 
1148  /*
1149  * If the generated plan node includes a gating Result node,
1150  * we can't execute it asynchronously.
1151  */
1152  if (IsA(plan, Result))
1153  return false;
1154 
1155  Assert(fdwroutine != NULL);
1156  if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
1157  fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
1158  break;
1159  return false;
1160  }
1161  case T_ProjectionPath:
1162 
1163  /*
1164  * If the generated plan node includes a Result node for the
1165  * projection, we can't execute it asynchronously.
1166  */
1167  if (IsA(plan, Result))
1168  return false;
1169 
1170  /*
1171  * create_projection_plan() would have pulled up the subplan, so
1172  * check the capability using the subpath.
1173  */
1174  if (mark_async_capable_plan(plan,
1175  ((ProjectionPath *) path)->subpath))
1176  return true;
1177  return false;
1178  default:
1179  return false;
1180  }
1181 
1182  plan->async_capable = true;
1183 
1184  return true;
1185 }
1186 
1187 /*
1188  * create_append_plan
1189  * Create an Append plan for 'best_path' and (recursively) plans
1190  * for its subpaths.
1191  *
1192  * Returns a Plan node.
1193  */
1194 static Plan *
1195 create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
1196 {
1197  Append *plan;
1198  List *tlist = build_path_tlist(root, &best_path->path);
1199  int orig_tlist_length = list_length(tlist);
1200  bool tlist_was_changed = false;
1201  List *pathkeys = best_path->path.pathkeys;
1202  List *subplans = NIL;
1203  ListCell *subpaths;
1204  int nasyncplans = 0;
1205  RelOptInfo *rel = best_path->path.parent;
1206  int nodenumsortkeys = 0;
1207  AttrNumber *nodeSortColIdx = NULL;
1208  Oid *nodeSortOperators = NULL;
1209  Oid *nodeCollations = NULL;
1210  bool *nodeNullsFirst = NULL;
1211  bool consider_async = false;
1212 
1213  /*
1214  * The subpaths list could be empty, if every child was proven empty by
1215  * constraint exclusion. In that case generate a dummy plan that returns
1216  * no rows.
1217  *
1218  * Note that an AppendPath with no members is also generated in certain
1219  * cases where there was no appending construct at all, but we know the
1220  * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
1221  */
1222  if (best_path->subpaths == NIL)
1223  {
1224  /* Generate a Result plan with constant-FALSE gating qual */
1225  Plan *plan;
1226 
1227  plan = (Plan *) make_result(tlist,
1228  (Node *) list_make1(makeBoolConst(false,
1229  false)),
1230  NULL);
1231 
1232  copy_generic_path_info(plan, (Path *) best_path);
1233 
1234  return plan;
1235  }
1236 
1237  /*
1238  * Otherwise build an Append plan. Note that if there's just one child,
1239  * the Append is pretty useless; but we wait till setrefs.c to get rid of
1240  * it. Doing so here doesn't work because the varno of the child scan
1241  * plan won't match the parent-rel Vars it'll be asked to emit.
1242  *
1243  * We don't have the actual creation of the Append node split out into a
1244  * separate make_xxx function. This is because we want to run
1245  * prepare_sort_from_pathkeys on it before we do so on the individual
1246  * child plans, to make cross-checking the sort info easier.
1247  */
1248  plan = makeNode(Append);
1249  plan->plan.targetlist = tlist;
1250  plan->plan.qual = NIL;
1251  plan->plan.lefttree = NULL;
1252  plan->plan.righttree = NULL;
1253  plan->apprelids = rel->relids;
1254 
1255  if (pathkeys != NIL)
1256  {
1257  /*
1258  * Compute sort column info, and adjust the Append's tlist as needed.
1259  * Because we pass adjust_tlist_in_place = true, we may ignore the
1260  * function result; it must be the same plan node. However, we then
1261  * need to detect whether any tlist entries were added.
1262  */
1263  (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
1264  best_path->path.parent->relids,
1265  NULL,
1266  true,
1267  &nodenumsortkeys,
1268  &nodeSortColIdx,
1269  &nodeSortOperators,
1270  &nodeCollations,
1271  &nodeNullsFirst);
1272  tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
1273  }
1274 
1275  /* If appropriate, consider async append */
1276  consider_async = (enable_async_append && pathkeys == NIL &&
1277  !best_path->path.parallel_safe &&
1278  list_length(best_path->subpaths) > 1);
1279 
1280  /* Build the plan for each child */
1281  foreach(subpaths, best_path->subpaths)
1282  {
1283  Path *subpath = (Path *) lfirst(subpaths);
1284  Plan *subplan;
1285 
1286  /* Must insist that all children return the same tlist */
1287  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1288 
1289  /*
1290  * For ordered Appends, we must insert a Sort node if subplan isn't
1291  * sufficiently ordered.
1292  */
1293  if (pathkeys != NIL)
1294  {
1295  int numsortkeys;
1296  AttrNumber *sortColIdx;
1297  Oid *sortOperators;
1298  Oid *collations;
1299  bool *nullsFirst;
1300 
1301  /*
1302  * Compute sort column info, and adjust subplan's tlist as needed.
1303  * We must apply prepare_sort_from_pathkeys even to subplans that
1304  * don't need an explicit sort, to make sure they are returning
1305  * the same sort key columns the Append expects.
1306  */
1307  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1308  subpath->parent->relids,
1309  nodeSortColIdx,
1310  false,
1311  &numsortkeys,
1312  &sortColIdx,
1313  &sortOperators,
1314  &collations,
1315  &nullsFirst);
1316 
1317  /*
1318  * Check that we got the same sort key information. We just
1319  * Assert that the sortops match, since those depend only on the
1320  * pathkeys; but it seems like a good idea to check the sort
1321  * column numbers explicitly, to ensure the tlists match up.
1322  */
1323  Assert(numsortkeys == nodenumsortkeys);
1324  if (memcmp(sortColIdx, nodeSortColIdx,
1325  numsortkeys * sizeof(AttrNumber)) != 0)
1326  elog(ERROR, "Append child's targetlist doesn't match Append");
1327  Assert(memcmp(sortOperators, nodeSortOperators,
1328  numsortkeys * sizeof(Oid)) == 0);
1329  Assert(memcmp(collations, nodeCollations,
1330  numsortkeys * sizeof(Oid)) == 0);
1331  Assert(memcmp(nullsFirst, nodeNullsFirst,
1332  numsortkeys * sizeof(bool)) == 0);
1333 
1334  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1335  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1336  {
1337  Sort *sort = make_sort(subplan, numsortkeys,
1338  sortColIdx, sortOperators,
1339  collations, nullsFirst);
1340 
1341  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1342  subplan = (Plan *) sort;
1343  }
1344  }
1345 
1346  /* If needed, check to see if subplan can be executed asynchronously */
1347  if (consider_async && mark_async_capable_plan(subplan, subpath))
1348  {
1349  Assert(subplan->async_capable);
1350  ++nasyncplans;
1351  }
1352 
1353  subplans = lappend(subplans, subplan);
1354  }
1355 
1356  /* Set below if we find quals that we can use to run-time prune */
1357  plan->part_prune_index = -1;
1358 
1359  /*
1360  * If any quals exist, they may be useful to perform further partition
1361  * pruning during execution. Gather information needed by the executor to
1362  * do partition pruning.
1363  */
1365  {
1366  List *prunequal;
1367 
1368  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1369 
1370  if (best_path->path.param_info)
1371  {
1372  List *prmquals = best_path->path.param_info->ppi_clauses;
1373 
1374  prmquals = extract_actual_clauses(prmquals, false);
1375  prmquals = (List *) replace_nestloop_params(root,
1376  (Node *) prmquals);
1377 
1378  prunequal = list_concat(prunequal, prmquals);
1379  }
1380 
1381  if (prunequal != NIL)
1382  plan->part_prune_index = make_partition_pruneinfo(root, rel,
1383  best_path->subpaths,
1384  prunequal);
1385  }
1386 
1387  plan->appendplans = subplans;
1388  plan->nasyncplans = nasyncplans;
1389  plan->first_partial_plan = best_path->first_partial_path;
1390 
1391  copy_generic_path_info(&plan->plan, (Path *) best_path);
1392 
1393  /*
1394  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1395  * produce either the exact tlist or a narrow tlist, we should get rid of
1396  * the sort columns again. We must inject a projection node to do so.
1397  */
1398  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1399  {
1400  tlist = list_copy_head(plan->plan.targetlist, orig_tlist_length);
1401  return inject_projection_plan((Plan *) plan, tlist,
1402  plan->plan.parallel_safe);
1403  }
1404  else
1405  return (Plan *) plan;
1406 }
1407 
1408 /*
1409  * create_merge_append_plan
1410  * Create a MergeAppend plan for 'best_path' and (recursively) plans
1411  * for its subpaths.
1412  *
1413  * Returns a Plan node.
1414  */
1415 static Plan *
1417  int flags)
1418 {
1419  MergeAppend *node = makeNode(MergeAppend);
1420  Plan *plan = &node->plan;
1421  List *tlist = build_path_tlist(root, &best_path->path);
1422  int orig_tlist_length = list_length(tlist);
1423  bool tlist_was_changed;
1424  List *pathkeys = best_path->path.pathkeys;
1425  List *subplans = NIL;
1426  ListCell *subpaths;
1427  RelOptInfo *rel = best_path->path.parent;
1428 
1429  /*
1430  * We don't have the actual creation of the MergeAppend node split out
1431  * into a separate make_xxx function. This is because we want to run
1432  * prepare_sort_from_pathkeys on it before we do so on the individual
1433  * child plans, to make cross-checking the sort info easier.
1434  */
1435  copy_generic_path_info(plan, (Path *) best_path);
1436  plan->targetlist = tlist;
1437  plan->qual = NIL;
1438  plan->lefttree = NULL;
1439  plan->righttree = NULL;
1440  node->apprelids = rel->relids;
1441 
1442  /*
1443  * Compute sort column info, and adjust MergeAppend's tlist as needed.
1444  * Because we pass adjust_tlist_in_place = true, we may ignore the
1445  * function result; it must be the same plan node. However, we then need
1446  * to detect whether any tlist entries were added.
1447  */
1448  (void) prepare_sort_from_pathkeys(plan, pathkeys,
1449  best_path->path.parent->relids,
1450  NULL,
1451  true,
1452  &node->numCols,
1453  &node->sortColIdx,
1454  &node->sortOperators,
1455  &node->collations,
1456  &node->nullsFirst);
1457  tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
1458 
1459  /*
1460  * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
1461  * even to subplans that don't need an explicit sort, to make sure they
1462  * are returning the same sort key columns the MergeAppend expects.
1463  */
1464  foreach(subpaths, best_path->subpaths)
1465  {
1466  Path *subpath = (Path *) lfirst(subpaths);
1467  Plan *subplan;
1468  int numsortkeys;
1469  AttrNumber *sortColIdx;
1470  Oid *sortOperators;
1471  Oid *collations;
1472  bool *nullsFirst;
1473 
1474  /* Build the child plan */
1475  /* Must insist that all children return the same tlist */
1476  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1477 
1478  /* Compute sort column info, and adjust subplan's tlist as needed */
1479  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1480  subpath->parent->relids,
1481  node->sortColIdx,
1482  false,
1483  &numsortkeys,
1484  &sortColIdx,
1485  &sortOperators,
1486  &collations,
1487  &nullsFirst);
1488 
1489  /*
1490  * Check that we got the same sort key information. We just Assert
1491  * that the sortops match, since those depend only on the pathkeys;
1492  * but it seems like a good idea to check the sort column numbers
1493  * explicitly, to ensure the tlists really do match up.
1494  */
1495  Assert(numsortkeys == node->numCols);
1496  if (memcmp(sortColIdx, node->sortColIdx,
1497  numsortkeys * sizeof(AttrNumber)) != 0)
1498  elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
1499  Assert(memcmp(sortOperators, node->sortOperators,
1500  numsortkeys * sizeof(Oid)) == 0);
1501  Assert(memcmp(collations, node->collations,
1502  numsortkeys * sizeof(Oid)) == 0);
1503  Assert(memcmp(nullsFirst, node->nullsFirst,
1504  numsortkeys * sizeof(bool)) == 0);
1505 
1506  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1507  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1508  {
1509  Sort *sort = make_sort(subplan, numsortkeys,
1510  sortColIdx, sortOperators,
1511  collations, nullsFirst);
1512 
1513  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1514  subplan = (Plan *) sort;
1515  }
1516 
1517  subplans = lappend(subplans, subplan);
1518  }
1519 
1520  /* Set below if we find quals that we can use to run-time prune */
1521  node->part_prune_index = -1;
1522 
1523  /*
1524  * If any quals exist, they may be useful to perform further partition
1525  * pruning during execution. Gather information needed by the executor to
1526  * do partition pruning.
1527  */
1529  {
1530  List *prunequal;
1531 
1532  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1533 
1534  /* We don't currently generate any parameterized MergeAppend paths */
1535  Assert(best_path->path.param_info == NULL);
1536 
1537  if (prunequal != NIL)
1538  node->part_prune_index = make_partition_pruneinfo(root, rel,
1539  best_path->subpaths,
1540  prunequal);
1541  }
1542 
1543  node->mergeplans = subplans;
1544 
1545 
1546  /*
1547  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1548  * produce either the exact tlist or a narrow tlist, we should get rid of
1549  * the sort columns again. We must inject a projection node to do so.
1550  */
1551  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1552  {
1553  tlist = list_copy_head(plan->targetlist, orig_tlist_length);
1554  return inject_projection_plan(plan, tlist, plan->parallel_safe);
1555  }
1556  else
1557  return plan;
1558 }
1559 
1560 /*
1561  * create_group_result_plan
1562  * Create a Result plan for 'best_path'.
1563  * This is only used for degenerate grouping cases.
1564  *
1565  * Returns a Plan node.
1566  */
1567 static Result *
1569 {
1570  Result *plan;
1571  List *tlist;
1572  List *quals;
1573 
1574  tlist = build_path_tlist(root, &best_path->path);
1575 
1576  /* best_path->quals is just bare clauses */
1577  quals = order_qual_clauses(root, best_path->quals);
1578 
1579  plan = make_result(tlist, (Node *) quals, NULL);
1580 
1581  copy_generic_path_info(&plan->plan, (Path *) best_path);
1582 
1583  return plan;
1584 }
1585 
1586 /*
1587  * create_project_set_plan
1588  * Create a ProjectSet plan for 'best_path'.
1589  *
1590  * Returns a Plan node.
1591  */
1592 static ProjectSet *
1594 {
1595  ProjectSet *plan;
1596  Plan *subplan;
1597  List *tlist;
1598 
1599  /* Since we intend to project, we don't need to constrain child tlist */
1600  subplan = create_plan_recurse(root, best_path->subpath, 0);
1601 
1602  tlist = build_path_tlist(root, &best_path->path);
1603 
1604  plan = make_project_set(tlist, subplan);
1605 
1606  copy_generic_path_info(&plan->plan, (Path *) best_path);
1607 
1608  return plan;
1609 }
1610 
1611 /*
1612  * create_material_plan
1613  * Create a Material plan for 'best_path' and (recursively) plans
1614  * for its subpaths.
1615  *
1616  * Returns a Plan node.
1617  */
1618 static Material *
1619 create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
1620 {
1621  Material *plan;
1622  Plan *subplan;
1623 
1624  /*
1625  * We don't want any excess columns in the materialized tuples, so request
1626  * a smaller tlist. Otherwise, since Material doesn't project, tlist
1627  * requirements pass through.
1628  */
1629  subplan = create_plan_recurse(root, best_path->subpath,
1630  flags | CP_SMALL_TLIST);
1631 
1632  plan = make_material(subplan);
1633 
1634  copy_generic_path_info(&plan->plan, (Path *) best_path);
1635 
1636  return plan;
1637 }
1638 
1639 /*
1640  * create_memoize_plan
1641  * Create a Memoize plan for 'best_path' and (recursively) plans for its
1642  * subpaths.
1643  *
1644  * Returns a Plan node.
1645  */
1646 static Memoize *
1647 create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
1648 {
1649  Memoize *plan;
1650  Bitmapset *keyparamids;
1651  Plan *subplan;
1652  Oid *operators;
1653  Oid *collations;
1654  List *param_exprs = NIL;
1655  ListCell *lc;
1656  ListCell *lc2;
1657  int nkeys;
1658  int i;
1659 
1660  subplan = create_plan_recurse(root, best_path->subpath,
1661  flags | CP_SMALL_TLIST);
1662 
1663  param_exprs = (List *) replace_nestloop_params(root, (Node *)
1664  best_path->param_exprs);
1665 
1666  nkeys = list_length(param_exprs);
1667  Assert(nkeys > 0);
1668  operators = palloc(nkeys * sizeof(Oid));
1669  collations = palloc(nkeys * sizeof(Oid));
1670 
1671  i = 0;
1672  forboth(lc, param_exprs, lc2, best_path->hash_operators)
1673  {
1674  Expr *param_expr = (Expr *) lfirst(lc);
1675  Oid opno = lfirst_oid(lc2);
1676 
1677  operators[i] = opno;
1678  collations[i] = exprCollation((Node *) param_expr);
1679  i++;
1680  }
1681 
1682  keyparamids = pull_paramids((Expr *) param_exprs);
1683 
1684  plan = make_memoize(subplan, operators, collations, param_exprs,
1685  best_path->singlerow, best_path->binary_mode,
1686  best_path->est_entries, keyparamids);
1687 
1688  copy_generic_path_info(&plan->plan, (Path *) best_path);
1689 
1690  return plan;
1691 }
1692 
1693 /*
1694  * create_unique_plan
1695  * Create a Unique plan for 'best_path' and (recursively) plans
1696  * for its subpaths.
1697  *
1698  * Returns a Plan node.
1699  */
1700 static Plan *
1701 create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
1702 {
1703  Plan *plan;
1704  Plan *subplan;
1705  List *in_operators;
1706  List *uniq_exprs;
1707  List *newtlist;
1708  int nextresno;
1709  bool newitems;
1710  int numGroupCols;
1711  AttrNumber *groupColIdx;
1712  Oid *groupCollations;
1713  int groupColPos;
1714  ListCell *l;
1715 
1716  /* Unique doesn't project, so tlist requirements pass through */
1717  subplan = create_plan_recurse(root, best_path->subpath, flags);
1718 
1719  /* Done if we don't need to do any actual unique-ifying */
1720  if (best_path->umethod == UNIQUE_PATH_NOOP)
1721  return subplan;
1722 
1723  /*
1724  * As constructed, the subplan has a "flat" tlist containing just the Vars
1725  * needed here and at upper levels. The values we are supposed to
1726  * unique-ify may be expressions in these variables. We have to add any
1727  * such expressions to the subplan's tlist.
1728  *
1729  * The subplan may have a "physical" tlist if it is a simple scan plan. If
1730  * we're going to sort, this should be reduced to the regular tlist, so
1731  * that we don't sort more data than we need to. For hashing, the tlist
1732  * should be left as-is if we don't need to add any expressions; but if we
1733  * do have to add expressions, then a projection step will be needed at
1734  * runtime anyway, so we may as well remove unneeded items. Therefore
1735  * newtlist starts from build_path_tlist() not just a copy of the
1736  * subplan's tlist; and we don't install it into the subplan unless we are
1737  * sorting or stuff has to be added.
1738  */
1739  in_operators = best_path->in_operators;
1740  uniq_exprs = best_path->uniq_exprs;
1741 
1742  /* initialize modified subplan tlist as just the "required" vars */
1743  newtlist = build_path_tlist(root, &best_path->path);
1744  nextresno = list_length(newtlist) + 1;
1745  newitems = false;
1746 
1747  foreach(l, uniq_exprs)
1748  {
1749  Expr *uniqexpr = lfirst(l);
1750  TargetEntry *tle;
1751 
1752  tle = tlist_member(uniqexpr, newtlist);
1753  if (!tle)
1754  {
1755  tle = makeTargetEntry((Expr *) uniqexpr,
1756  nextresno,
1757  NULL,
1758  false);
1759  newtlist = lappend(newtlist, tle);
1760  nextresno++;
1761  newitems = true;
1762  }
1763  }
1764 
1765  /* Use change_plan_targetlist in case we need to insert a Result node */
1766  if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
1767  subplan = change_plan_targetlist(subplan, newtlist,
1768  best_path->path.parallel_safe);
1769 
1770  /*
1771  * Build control information showing which subplan output columns are to
1772  * be examined by the grouping step. Unfortunately we can't merge this
1773  * with the previous loop, since we didn't then know which version of the
1774  * subplan tlist we'd end up using.
1775  */
1776  newtlist = subplan->targetlist;
1777  numGroupCols = list_length(uniq_exprs);
1778  groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
1779  groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
1780 
1781  groupColPos = 0;
1782  foreach(l, uniq_exprs)
1783  {
1784  Expr *uniqexpr = lfirst(l);
1785  TargetEntry *tle;
1786 
1787  tle = tlist_member(uniqexpr, newtlist);
1788  if (!tle) /* shouldn't happen */
1789  elog(ERROR, "failed to find unique expression in subplan tlist");
1790  groupColIdx[groupColPos] = tle->resno;
1791  groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
1792  groupColPos++;
1793  }
1794 
1795  if (best_path->umethod == UNIQUE_PATH_HASH)
1796  {
1797  Oid *groupOperators;
1798 
1799  /*
1800  * Get the hashable equality operators for the Agg node to use.
1801  * Normally these are the same as the IN clause operators, but if
1802  * those are cross-type operators then the equality operators are the
1803  * ones for the IN clause operators' RHS datatype.
1804  */
1805  groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
1806  groupColPos = 0;
1807  foreach(l, in_operators)
1808  {
1809  Oid in_oper = lfirst_oid(l);
1810  Oid eq_oper;
1811 
1812  if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
1813  elog(ERROR, "could not find compatible hash operator for operator %u",
1814  in_oper);
1815  groupOperators[groupColPos++] = eq_oper;
1816  }
1817 
1818  /*
1819  * Since the Agg node is going to project anyway, we can give it the
1820  * minimum output tlist, without any stuff we might have added to the
1821  * subplan tlist.
1822  */
1823  plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
1824  NIL,
1825  AGG_HASHED,
1827  numGroupCols,
1828  groupColIdx,
1829  groupOperators,
1830  groupCollations,
1831  NIL,
1832  NIL,
1833  best_path->path.rows,
1834  0,
1835  subplan);
1836  }
1837  else
1838  {
1839  List *sortList = NIL;
1840  Sort *sort;
1841 
1842  /* Create an ORDER BY list to sort the input compatibly */
1843  groupColPos = 0;
1844  foreach(l, in_operators)
1845  {
1846  Oid in_oper = lfirst_oid(l);
1847  Oid sortop;
1848  Oid eqop;
1849  TargetEntry *tle;
1850  SortGroupClause *sortcl;
1851 
1852  sortop = get_ordering_op_for_equality_op(in_oper, false);
1853  if (!OidIsValid(sortop)) /* shouldn't happen */
1854  elog(ERROR, "could not find ordering operator for equality operator %u",
1855  in_oper);
1856 
1857  /*
1858  * The Unique node will need equality operators. Normally these
1859  * are the same as the IN clause operators, but if those are
1860  * cross-type operators then the equality operators are the ones
1861  * for the IN clause operators' RHS datatype.
1862  */
1863  eqop = get_equality_op_for_ordering_op(sortop, NULL);
1864  if (!OidIsValid(eqop)) /* shouldn't happen */
1865  elog(ERROR, "could not find equality operator for ordering operator %u",
1866  sortop);
1867 
1868  tle = get_tle_by_resno(subplan->targetlist,
1869  groupColIdx[groupColPos]);
1870  Assert(tle != NULL);
1871 
1872  sortcl = makeNode(SortGroupClause);
1873  sortcl->tleSortGroupRef = assignSortGroupRef(tle,
1874  subplan->targetlist);
1875  sortcl->eqop = eqop;
1876  sortcl->sortop = sortop;
1877  sortcl->nulls_first = false;
1878  sortcl->hashable = false; /* no need to make this accurate */
1879  sortList = lappend(sortList, sortcl);
1880  groupColPos++;
1881  }
1882  sort = make_sort_from_sortclauses(sortList, subplan);
1883  label_sort_with_costsize(root, sort, -1.0);
1884  plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
1885  }
1886 
1887  /* Copy cost data from Path to Plan */
1888  copy_generic_path_info(plan, &best_path->path);
1889 
1890  return plan;
1891 }
1892 
1893 /*
1894  * create_gather_plan
1895  *
1896  * Create a Gather plan for 'best_path' and (recursively) plans
1897  * for its subpaths.
1898  */
1899 static Gather *
1901 {
1902  Gather *gather_plan;
1903  Plan *subplan;
1904  List *tlist;
1905 
1906  /*
1907  * Push projection down to the child node. That way, the projection work
1908  * is parallelized, and there can be no system columns in the result (they
1909  * can't travel through a tuple queue because it uses MinimalTuple
1910  * representation).
1911  */
1912  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1913 
1914  tlist = build_path_tlist(root, &best_path->path);
1915 
1916  gather_plan = make_gather(tlist,
1917  NIL,
1918  best_path->num_workers,
1920  best_path->single_copy,
1921  subplan);
1922 
1923  copy_generic_path_info(&gather_plan->plan, &best_path->path);
1924 
1925  /* use parallel mode for parallel plans. */
1926  root->glob->parallelModeNeeded = true;
1927 
1928  return gather_plan;
1929 }
1930 
1931 /*
1932  * create_gather_merge_plan
1933  *
1934  * Create a Gather Merge plan for 'best_path' and (recursively)
1935  * plans for its subpaths.
1936  */
1937 static GatherMerge *
1939 {
1940  GatherMerge *gm_plan;
1941  Plan *subplan;
1942  List *pathkeys = best_path->path.pathkeys;
1943  List *tlist = build_path_tlist(root, &best_path->path);
1944 
1945  /* As with Gather, project away columns in the workers. */
1946  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1947 
1948  /* Create a shell for a GatherMerge plan. */
1949  gm_plan = makeNode(GatherMerge);
1950  gm_plan->plan.targetlist = tlist;
1951  gm_plan->num_workers = best_path->num_workers;
1952  copy_generic_path_info(&gm_plan->plan, &best_path->path);
1953 
1954  /* Assign the rescan Param. */
1955  gm_plan->rescan_param = assign_special_exec_param(root);
1956 
1957  /* Gather Merge is pointless with no pathkeys; use Gather instead. */
1958  Assert(pathkeys != NIL);
1959 
1960  /* Compute sort column info, and adjust subplan's tlist as needed */
1961  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1962  best_path->subpath->parent->relids,
1963  gm_plan->sortColIdx,
1964  false,
1965  &gm_plan->numCols,
1966  &gm_plan->sortColIdx,
1967  &gm_plan->sortOperators,
1968  &gm_plan->collations,
1969  &gm_plan->nullsFirst);
1970 
1971 
1972  /*
1973  * All gather merge paths should have already guaranteed the necessary
1974  * sort order either by adding an explicit sort node or by using presorted
1975  * input. We can't simply add a sort here on additional pathkeys, because
1976  * we can't guarantee the sort would be safe. For example, expressions may
1977  * be volatile or otherwise parallel unsafe.
1978  */
1979  if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
1980  elog(ERROR, "gather merge input not sufficiently sorted");
1981 
1982  /* Now insert the subplan under GatherMerge. */
1983  gm_plan->plan.lefttree = subplan;
1984 
1985  /* use parallel mode for parallel plans. */
1986  root->glob->parallelModeNeeded = true;
1987 
1988  return gm_plan;
1989 }
1990 
1991 /*
1992  * create_projection_plan
1993  *
1994  * Create a plan tree to do a projection step and (recursively) plans
1995  * for its subpaths. We may need a Result node for the projection,
1996  * but sometimes we can just let the subplan do the work.
1997  */
1998 static Plan *
2000 {
2001  Plan *plan;
2002  Plan *subplan;
2003  List *tlist;
2004  bool needs_result_node = false;
2005 
2006  /*
2007  * Convert our subpath to a Plan and determine whether we need a Result
2008  * node.
2009  *
2010  * In most cases where we don't need to project, creation_projection_path
2011  * will have set dummypp, but not always. First, some createplan.c
2012  * routines change the tlists of their nodes. (An example is that
2013  * create_merge_append_plan might add resjunk sort columns to a
2014  * MergeAppend.) Second, create_projection_path has no way of knowing
2015  * what path node will be placed on top of the projection path and
2016  * therefore can't predict whether it will require an exact tlist. For
2017  * both of these reasons, we have to recheck here.
2018  */
2019  if (use_physical_tlist(root, &best_path->path, flags))
2020  {
2021  /*
2022  * Our caller doesn't really care what tlist we return, so we don't
2023  * actually need to project. However, we may still need to ensure
2024  * proper sortgroupref labels, if the caller cares about those.
2025  */
2026  subplan = create_plan_recurse(root, best_path->subpath, 0);
2027  tlist = subplan->targetlist;
2028  if (flags & CP_LABEL_TLIST)
2030  best_path->path.pathtarget);
2031  }
2032  else if (is_projection_capable_path(best_path->subpath))
2033  {
2034  /*
2035  * Our caller requires that we return the exact tlist, but no separate
2036  * result node is needed because the subpath is projection-capable.
2037  * Tell create_plan_recurse that we're going to ignore the tlist it
2038  * produces.
2039  */
2040  subplan = create_plan_recurse(root, best_path->subpath,
2041  CP_IGNORE_TLIST);
2043  tlist = build_path_tlist(root, &best_path->path);
2044  }
2045  else
2046  {
2047  /*
2048  * It looks like we need a result node, unless by good fortune the
2049  * requested tlist is exactly the one the child wants to produce.
2050  */
2051  subplan = create_plan_recurse(root, best_path->subpath, 0);
2052  tlist = build_path_tlist(root, &best_path->path);
2053  needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
2054  }
2055 
2056  /*
2057  * If we make a different decision about whether to include a Result node
2058  * than create_projection_path did, we'll have made slightly wrong cost
2059  * estimates; but label the plan with the cost estimates we actually used,
2060  * not "corrected" ones. (XXX this could be cleaned up if we moved more
2061  * of the sortcolumn setup logic into Path creation, but that would add
2062  * expense to creating Paths we might end up not using.)
2063  */
2064  if (!needs_result_node)
2065  {
2066  /* Don't need a separate Result, just assign tlist to subplan */
2067  plan = subplan;
2068  plan->targetlist = tlist;
2069 
2070  /* Label plan with the estimated costs we actually used */
2071  plan->startup_cost = best_path->path.startup_cost;
2072  plan->total_cost = best_path->path.total_cost;
2073  plan->plan_rows = best_path->path.rows;
2074  plan->plan_width = best_path->path.pathtarget->width;
2075  plan->parallel_safe = best_path->path.parallel_safe;
2076  /* ... but don't change subplan's parallel_aware flag */
2077  }
2078  else
2079  {
2080  /* We need a Result node */
2081  plan = (Plan *) make_result(tlist, NULL, subplan);
2082 
2083  copy_generic_path_info(plan, (Path *) best_path);
2084  }
2085 
2086  return plan;
2087 }
2088 
2089 /*
2090  * inject_projection_plan
2091  * Insert a Result node to do a projection step.
2092  *
2093  * This is used in a few places where we decide on-the-fly that we need a
2094  * projection step as part of the tree generated for some Path node.
2095  * We should try to get rid of this in favor of doing it more honestly.
2096  *
2097  * One reason it's ugly is we have to be told the right parallel_safe marking
2098  * to apply (since the tlist might be unsafe even if the child plan is safe).
2099  */
2100 static Plan *
2101 inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
2102 {
2103  Plan *plan;
2104 
2105  plan = (Plan *) make_result(tlist, NULL, subplan);
2106 
2107  /*
2108  * In principle, we should charge tlist eval cost plus cpu_per_tuple per
2109  * row for the Result node. But the former has probably been factored in
2110  * already and the latter was not accounted for during Path construction,
2111  * so being formally correct might just make the EXPLAIN output look less
2112  * consistent not more so. Hence, just copy the subplan's cost.
2113  */
2114  copy_plan_costsize(plan, subplan);
2115  plan->parallel_safe = parallel_safe;
2116 
2117  return plan;
2118 }
2119 
2120 /*
2121  * change_plan_targetlist
2122  * Externally available wrapper for inject_projection_plan.
2123  *
2124  * This is meant for use by FDW plan-generation functions, which might
2125  * want to adjust the tlist computed by some subplan tree. In general,
2126  * a Result node is needed to compute the new tlist, but we can optimize
2127  * some cases.
2128  *
2129  * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
2130  * flag of the FDW's own Path node.
2131  */
2132 Plan *
2133 change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
2134 {
2135  /*
2136  * If the top plan node can't do projections and its existing target list
2137  * isn't already what we need, we need to add a Result node to help it
2138  * along.
2139  */
2140  if (!is_projection_capable_plan(subplan) &&
2141  !tlist_same_exprs(tlist, subplan->targetlist))
2142  subplan = inject_projection_plan(subplan, tlist,
2143  subplan->parallel_safe &&
2144  tlist_parallel_safe);
2145  else
2146  {
2147  /* Else we can just replace the plan node's tlist */
2148  subplan->targetlist = tlist;
2149  subplan->parallel_safe &= tlist_parallel_safe;
2150  }
2151  return subplan;
2152 }
2153 
2154 /*
2155  * create_sort_plan
2156  *
2157  * Create a Sort plan for 'best_path' and (recursively) plans
2158  * for its subpaths.
2159  */
2160 static Sort *
2161 create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
2162 {
2163  Sort *plan;
2164  Plan *subplan;
2165 
2166  /*
2167  * We don't want any excess columns in the sorted tuples, so request a
2168  * smaller tlist. Otherwise, since Sort doesn't project, tlist
2169  * requirements pass through.
2170  */
2171  subplan = create_plan_recurse(root, best_path->subpath,
2172  flags | CP_SMALL_TLIST);
2173 
2174  /*
2175  * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
2176  * which will ignore any child EC members that don't belong to the given
2177  * relids. Thus, if this sort path is based on a child relation, we must
2178  * pass its relids.
2179  */
2180  plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
2181  IS_OTHER_REL(best_path->subpath->parent) ?
2182  best_path->path.parent->relids : NULL);
2183 
2184  copy_generic_path_info(&plan->plan, (Path *) best_path);
2185 
2186  return plan;
2187 }
2188 
2189 /*
2190  * create_incrementalsort_plan
2191  *
2192  * Do the same as create_sort_plan, but create IncrementalSort plan.
2193  */
2194 static IncrementalSort *
2196  int flags)
2197 {
2198  IncrementalSort *plan;
2199  Plan *subplan;
2200 
2201  /* See comments in create_sort_plan() above */
2202  subplan = create_plan_recurse(root, best_path->spath.subpath,
2203  flags | CP_SMALL_TLIST);
2204  plan = make_incrementalsort_from_pathkeys(subplan,
2205  best_path->spath.path.pathkeys,
2206  IS_OTHER_REL(best_path->spath.subpath->parent) ?
2207  best_path->spath.path.parent->relids : NULL,
2208  best_path->nPresortedCols);
2209 
2210  copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
2211 
2212  return plan;
2213 }
2214 
2215 /*
2216  * create_group_plan
2217  *
2218  * Create a Group plan for 'best_path' and (recursively) plans
2219  * for its subpaths.
2220  */
2221 static Group *
2223 {
2224  Group *plan;
2225  Plan *subplan;
2226  List *tlist;
2227  List *quals;
2228 
2229  /*
2230  * Group can project, so no need to be terribly picky about child tlist,
2231  * but we do need grouping columns to be available
2232  */
2233  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2234 
2235  tlist = build_path_tlist(root, &best_path->path);
2236 
2237  quals = order_qual_clauses(root, best_path->qual);
2238 
2239  plan = make_group(tlist,
2240  quals,
2241  list_length(best_path->groupClause),
2243  subplan->targetlist),
2244  extract_grouping_ops(best_path->groupClause),
2246  subplan->targetlist),
2247  subplan);
2248 
2249  copy_generic_path_info(&plan->plan, (Path *) best_path);
2250 
2251  return plan;
2252 }
2253 
2254 /*
2255  * create_upper_unique_plan
2256  *
2257  * Create a Unique plan for 'best_path' and (recursively) plans
2258  * for its subpaths.
2259  */
2260 static Unique *
2262 {
2263  Unique *plan;
2264  Plan *subplan;
2265 
2266  /*
2267  * Unique doesn't project, so tlist requirements pass through; moreover we
2268  * need grouping columns to be labeled.
2269  */
2270  subplan = create_plan_recurse(root, best_path->subpath,
2271  flags | CP_LABEL_TLIST);
2272 
2273  plan = make_unique_from_pathkeys(subplan,
2274  best_path->path.pathkeys,
2275  best_path->numkeys);
2276 
2277  copy_generic_path_info(&plan->plan, (Path *) best_path);
2278 
2279  return plan;
2280 }
2281 
2282 /*
2283  * create_agg_plan
2284  *
2285  * Create an Agg plan for 'best_path' and (recursively) plans
2286  * for its subpaths.
2287  */
2288 static Agg *
2290 {
2291  Agg *plan;
2292  Plan *subplan;
2293  List *tlist;
2294  List *quals;
2295 
2296  /*
2297  * Agg can project, so no need to be terribly picky about child tlist, but
2298  * we do need grouping columns to be available
2299  */
2300  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2301 
2302  tlist = build_path_tlist(root, &best_path->path);
2303 
2304  quals = order_qual_clauses(root, best_path->qual);
2305 
2306  plan = make_agg(tlist, quals,
2307  best_path->aggstrategy,
2308  best_path->aggsplit,
2309  list_length(best_path->groupClause),
2311  subplan->targetlist),
2312  extract_grouping_ops(best_path->groupClause),
2314  subplan->targetlist),
2315  NIL,
2316  NIL,
2317  best_path->numGroups,
2318  best_path->transitionSpace,
2319  subplan);
2320 
2321  copy_generic_path_info(&plan->plan, (Path *) best_path);
2322 
2323  return plan;
2324 }
2325 
2326 /*
2327  * Given a groupclause for a collection of grouping sets, produce the
2328  * corresponding groupColIdx.
2329  *
2330  * root->grouping_map maps the tleSortGroupRef to the actual column position in
2331  * the input tuple. So we get the ref from the entries in the groupclause and
2332  * look them up there.
2333  */
2334 static AttrNumber *
2335 remap_groupColIdx(PlannerInfo *root, List *groupClause)
2336 {
2337  AttrNumber *grouping_map = root->grouping_map;
2338  AttrNumber *new_grpColIdx;
2339  ListCell *lc;
2340  int i;
2341 
2342  Assert(grouping_map);
2343 
2344  new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
2345 
2346  i = 0;
2347  foreach(lc, groupClause)
2348  {
2349  SortGroupClause *clause = lfirst(lc);
2350 
2351  new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
2352  }
2353 
2354  return new_grpColIdx;
2355 }
2356 
2357 /*
2358  * create_groupingsets_plan
2359  * Create a plan for 'best_path' and (recursively) plans
2360  * for its subpaths.
2361  *
2362  * What we emit is an Agg plan with some vestigial Agg and Sort nodes
2363  * hanging off the side. The top Agg implements the last grouping set
2364  * specified in the GroupingSetsPath, and any additional grouping sets
2365  * each give rise to a subsidiary Agg and Sort node in the top Agg's
2366  * "chain" list. These nodes don't participate in the plan directly,
2367  * but they are a convenient way to represent the required data for
2368  * the extra steps.
2369  *
2370  * Returns a Plan node.
2371  */
2372 static Plan *
2374 {
2375  Agg *plan;
2376  Plan *subplan;
2377  List *rollups = best_path->rollups;
2378  AttrNumber *grouping_map;
2379  int maxref;
2380  List *chain;
2381  ListCell *lc;
2382 
2383  /* Shouldn't get here without grouping sets */
2384  Assert(root->parse->groupingSets);
2385  Assert(rollups != NIL);
2386 
2387  /*
2388  * Agg can project, so no need to be terribly picky about child tlist, but
2389  * we do need grouping columns to be available
2390  */
2391  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2392 
2393  /*
2394  * Compute the mapping from tleSortGroupRef to column index in the child's
2395  * tlist. First, identify max SortGroupRef in groupClause, for array
2396  * sizing.
2397  */
2398  maxref = 0;
2399  foreach(lc, root->processed_groupClause)
2400  {
2401  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2402 
2403  if (gc->tleSortGroupRef > maxref)
2404  maxref = gc->tleSortGroupRef;
2405  }
2406 
2407  grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
2408 
2409  /* Now look up the column numbers in the child's tlist */
2410  foreach(lc, root->processed_groupClause)
2411  {
2412  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2413  TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
2414 
2415  grouping_map[gc->tleSortGroupRef] = tle->resno;
2416  }
2417 
2418  /*
2419  * During setrefs.c, we'll need the grouping_map to fix up the cols lists
2420  * in GroupingFunc nodes. Save it for setrefs.c to use.
2421  */
2422  Assert(root->grouping_map == NULL);
2423  root->grouping_map = grouping_map;
2424 
2425  /*
2426  * Generate the side nodes that describe the other sort and group
2427  * operations besides the top one. Note that we don't worry about putting
2428  * accurate cost estimates in the side nodes; only the topmost Agg node's
2429  * costs will be shown by EXPLAIN.
2430  */
2431  chain = NIL;
2432  if (list_length(rollups) > 1)
2433  {
2434  bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
2435 
2436  for_each_from(lc, rollups, 1)
2437  {
2438  RollupData *rollup = lfirst(lc);
2439  AttrNumber *new_grpColIdx;
2440  Plan *sort_plan = NULL;
2441  Plan *agg_plan;
2442  AggStrategy strat;
2443 
2444  new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2445 
2446  if (!rollup->is_hashed && !is_first_sort)
2447  {
2448  sort_plan = (Plan *)
2450  new_grpColIdx,
2451  subplan);
2452  }
2453 
2454  if (!rollup->is_hashed)
2455  is_first_sort = false;
2456 
2457  if (rollup->is_hashed)
2458  strat = AGG_HASHED;
2459  else if (linitial(rollup->gsets) == NIL)
2460  strat = AGG_PLAIN;
2461  else
2462  strat = AGG_SORTED;
2463 
2464  agg_plan = (Plan *) make_agg(NIL,
2465  NIL,
2466  strat,
2468  list_length((List *) linitial(rollup->gsets)),
2469  new_grpColIdx,
2472  rollup->gsets,
2473  NIL,
2474  rollup->numGroups,
2475  best_path->transitionSpace,
2476  sort_plan);
2477 
2478  /*
2479  * Remove stuff we don't need to avoid bloating debug output.
2480  */
2481  if (sort_plan)
2482  {
2483  sort_plan->targetlist = NIL;
2484  sort_plan->lefttree = NULL;
2485  }
2486 
2487  chain = lappend(chain, agg_plan);
2488  }
2489  }
2490 
2491  /*
2492  * Now make the real Agg node
2493  */
2494  {
2495  RollupData *rollup = linitial(rollups);
2496  AttrNumber *top_grpColIdx;
2497  int numGroupCols;
2498 
2499  top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2500 
2501  numGroupCols = list_length((List *) linitial(rollup->gsets));
2502 
2503  plan = make_agg(build_path_tlist(root, &best_path->path),
2504  best_path->qual,
2505  best_path->aggstrategy,
2507  numGroupCols,
2508  top_grpColIdx,
2511  rollup->gsets,
2512  chain,
2513  rollup->numGroups,
2514  best_path->transitionSpace,
2515  subplan);
2516 
2517  /* Copy cost data from Path to Plan */
2518  copy_generic_path_info(&plan->plan, &best_path->path);
2519  }
2520 
2521  return (Plan *) plan;
2522 }
2523 
2524 /*
2525  * create_minmaxagg_plan
2526  *
2527  * Create a Result plan for 'best_path' and (recursively) plans
2528  * for its subpaths.
2529  */
2530 static Result *
2532 {
2533  Result *plan;
2534  List *tlist;
2535  ListCell *lc;
2536 
2537  /* Prepare an InitPlan for each aggregate's subquery. */
2538  foreach(lc, best_path->mmaggregates)
2539  {
2540  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
2541  PlannerInfo *subroot = mminfo->subroot;
2542  Query *subparse = subroot->parse;
2543  Plan *plan;
2544 
2545  /*
2546  * Generate the plan for the subquery. We already have a Path, but we
2547  * have to convert it to a Plan and attach a LIMIT node above it.
2548  * Since we are entering a different planner context (subroot),
2549  * recurse to create_plan not create_plan_recurse.
2550  */
2551  plan = create_plan(subroot, mminfo->path);
2552 
2553  plan = (Plan *) make_limit(plan,
2554  subparse->limitOffset,
2555  subparse->limitCount,
2556  subparse->limitOption,
2557  0, NULL, NULL, NULL);
2558 
2559  /* Must apply correct cost/width data to Limit node */
2560  plan->startup_cost = mminfo->path->startup_cost;
2561  plan->total_cost = mminfo->pathcost;
2562  plan->plan_rows = 1;
2563  plan->plan_width = mminfo->path->pathtarget->width;
2564  plan->parallel_aware = false;
2565  plan->parallel_safe = mminfo->path->parallel_safe;
2566 
2567  /* Convert the plan into an InitPlan in the outer query. */
2568  SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
2569  }
2570 
2571  /* Generate the output plan --- basically just a Result */
2572  tlist = build_path_tlist(root, &best_path->path);
2573 
2574  plan = make_result(tlist, (Node *) best_path->quals, NULL);
2575 
2576  copy_generic_path_info(&plan->plan, (Path *) best_path);
2577 
2578  /*
2579  * During setrefs.c, we'll need to replace references to the Agg nodes
2580  * with InitPlan output params. (We can't just do that locally in the
2581  * MinMaxAgg node, because path nodes above here may have Agg references
2582  * as well.) Save the mmaggregates list to tell setrefs.c to do that.
2583  */
2584  Assert(root->minmax_aggs == NIL);
2585  root->minmax_aggs = best_path->mmaggregates;
2586 
2587  return plan;
2588 }
2589 
2590 /*
2591  * create_windowagg_plan
2592  *
2593  * Create a WindowAgg plan for 'best_path' and (recursively) plans
2594  * for its subpaths.
2595  */
2596 static WindowAgg *
2598 {
2599  WindowAgg *plan;
2600  WindowClause *wc = best_path->winclause;
2601  int numPart = list_length(wc->partitionClause);
2602  int numOrder = list_length(wc->orderClause);
2603  Plan *subplan;
2604  List *tlist;
2605  int partNumCols;
2606  AttrNumber *partColIdx;
2607  Oid *partOperators;
2608  Oid *partCollations;
2609  int ordNumCols;
2610  AttrNumber *ordColIdx;
2611  Oid *ordOperators;
2612  Oid *ordCollations;
2613  ListCell *lc;
2614 
2615  /*
2616  * Choice of tlist here is motivated by the fact that WindowAgg will be
2617  * storing the input rows of window frames in a tuplestore; it therefore
2618  * behooves us to request a small tlist to avoid wasting space. We do of
2619  * course need grouping columns to be available.
2620  */
2621  subplan = create_plan_recurse(root, best_path->subpath,
2623 
2624  tlist = build_path_tlist(root, &best_path->path);
2625 
2626  /*
2627  * Convert SortGroupClause lists into arrays of attr indexes and equality
2628  * operators, as wanted by executor. (Note: in principle, it's possible
2629  * to drop some of the sort columns, if they were proved redundant by
2630  * pathkey logic. However, it doesn't seem worth going out of our way to
2631  * optimize such cases. In any case, we must *not* remove the ordering
2632  * column for RANGE OFFSET cases, as the executor needs that for in_range
2633  * tests even if it's known to be equal to some partitioning column.)
2634  */
2635  partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
2636  partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
2637  partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
2638 
2639  partNumCols = 0;
2640  foreach(lc, wc->partitionClause)
2641  {
2642  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2643  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2644 
2645  Assert(OidIsValid(sgc->eqop));
2646  partColIdx[partNumCols] = tle->resno;
2647  partOperators[partNumCols] = sgc->eqop;
2648  partCollations[partNumCols] = exprCollation((Node *) tle->expr);
2649  partNumCols++;
2650  }
2651 
2652  ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
2653  ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
2654  ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
2655 
2656  ordNumCols = 0;
2657  foreach(lc, wc->orderClause)
2658  {
2659  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2660  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2661 
2662  Assert(OidIsValid(sgc->eqop));
2663  ordColIdx[ordNumCols] = tle->resno;
2664  ordOperators[ordNumCols] = sgc->eqop;
2665  ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
2666  ordNumCols++;
2667  }
2668 
2669  /* And finally we can make the WindowAgg node */
2670  plan = make_windowagg(tlist,
2671  wc->winref,
2672  partNumCols,
2673  partColIdx,
2674  partOperators,
2675  partCollations,
2676  ordNumCols,
2677  ordColIdx,
2678  ordOperators,
2679  ordCollations,
2680  wc->frameOptions,
2681  wc->startOffset,
2682  wc->endOffset,
2683  wc->startInRangeFunc,
2684  wc->endInRangeFunc,
2685  wc->inRangeColl,
2686  wc->inRangeAsc,
2687  wc->inRangeNullsFirst,
2688  wc->runCondition,
2689  best_path->qual,
2690  best_path->topwindow,
2691  subplan);
2692 
2693  copy_generic_path_info(&plan->plan, (Path *) best_path);
2694 
2695  return plan;
2696 }
2697 
2698 /*
2699  * create_setop_plan
2700  *
2701  * Create a SetOp plan for 'best_path' and (recursively) plans
2702  * for its subpaths.
2703  */
2704 static SetOp *
2705 create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
2706 {
2707  SetOp *plan;
2708  Plan *subplan;
2709  long numGroups;
2710 
2711  /*
2712  * SetOp doesn't project, so tlist requirements pass through; moreover we
2713  * need grouping columns to be labeled.
2714  */
2715  subplan = create_plan_recurse(root, best_path->subpath,
2716  flags | CP_LABEL_TLIST);
2717 
2718  /* Convert numGroups to long int --- but 'ware overflow! */
2719  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2720 
2721  plan = make_setop(best_path->cmd,
2722  best_path->strategy,
2723  subplan,
2724  best_path->distinctList,
2725  best_path->flagColIdx,
2726  best_path->firstFlag,
2727  numGroups);
2728 
2729  copy_generic_path_info(&plan->plan, (Path *) best_path);
2730 
2731  return plan;
2732 }
2733 
2734 /*
2735  * create_recursiveunion_plan
2736  *
2737  * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
2738  * for its subpaths.
2739  */
2740 static RecursiveUnion *
2742 {
2743  RecursiveUnion *plan;
2744  Plan *leftplan;
2745  Plan *rightplan;
2746  List *tlist;
2747  long numGroups;
2748 
2749  /* Need both children to produce same tlist, so force it */
2750  leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
2751  rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
2752 
2753  tlist = build_path_tlist(root, &best_path->path);
2754 
2755  /* Convert numGroups to long int --- but 'ware overflow! */
2756  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2757 
2758  plan = make_recursive_union(tlist,
2759  leftplan,
2760  rightplan,
2761  best_path->wtParam,
2762  best_path->distinctList,
2763  numGroups);
2764 
2765  copy_generic_path_info(&plan->plan, (Path *) best_path);
2766 
2767  return plan;
2768 }
2769 
2770 /*
2771  * create_lockrows_plan
2772  *
2773  * Create a LockRows plan for 'best_path' and (recursively) plans
2774  * for its subpaths.
2775  */
2776 static LockRows *
2778  int flags)
2779 {
2780  LockRows *plan;
2781  Plan *subplan;
2782 
2783  /* LockRows doesn't project, so tlist requirements pass through */
2784  subplan = create_plan_recurse(root, best_path->subpath, flags);
2785 
2786  plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
2787 
2788  copy_generic_path_info(&plan->plan, (Path *) best_path);
2789 
2790  return plan;
2791 }
2792 
2793 /*
2794  * create_modifytable_plan
2795  * Create a ModifyTable plan for 'best_path'.
2796  *
2797  * Returns a Plan node.
2798  */
2799 static ModifyTable *
2801 {
2802  ModifyTable *plan;
2803  Path *subpath = best_path->subpath;
2804  Plan *subplan;
2805 
2806  /* Subplan must produce exactly the specified tlist */
2807  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
2808 
2809  /* Transfer resname/resjunk labeling, too, to keep executor happy */
2811 
2812  plan = make_modifytable(root,
2813  subplan,
2814  best_path->operation,
2815  best_path->canSetTag,
2816  best_path->nominalRelation,
2817  best_path->rootRelation,
2818  best_path->partColsUpdated,
2819  best_path->resultRelations,
2820  best_path->updateColnosLists,
2821  best_path->withCheckOptionLists,
2822  best_path->returningLists,
2823  best_path->rowMarks,
2824  best_path->onconflict,
2825  best_path->mergeActionLists,
2826  best_path->epqParam);
2827 
2828  copy_generic_path_info(&plan->plan, &best_path->path);
2829 
2830  return plan;
2831 }
2832 
2833 /*
2834  * create_limit_plan
2835  *
2836  * Create a Limit plan for 'best_path' and (recursively) plans
2837  * for its subpaths.
2838  */
2839 static Limit *
2840 create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
2841 {
2842  Limit *plan;
2843  Plan *subplan;
2844  int numUniqkeys = 0;
2845  AttrNumber *uniqColIdx = NULL;
2846  Oid *uniqOperators = NULL;
2847  Oid *uniqCollations = NULL;
2848 
2849  /* Limit doesn't project, so tlist requirements pass through */
2850  subplan = create_plan_recurse(root, best_path->subpath, flags);
2851 
2852  /* Extract information necessary for comparing rows for WITH TIES. */
2853  if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
2854  {
2855  Query *parse = root->parse;
2856  ListCell *l;
2857 
2858  numUniqkeys = list_length(parse->sortClause);
2859  uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
2860  uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2861  uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2862 
2863  numUniqkeys = 0;
2864  foreach(l, parse->sortClause)
2865  {
2866  SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
2867  TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
2868 
2869  uniqColIdx[numUniqkeys] = tle->resno;
2870  uniqOperators[numUniqkeys] = sortcl->eqop;
2871  uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
2872  numUniqkeys++;
2873  }
2874  }
2875 
2876  plan = make_limit(subplan,
2877  best_path->limitOffset,
2878  best_path->limitCount,
2879  best_path->limitOption,
2880  numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
2881 
2882  copy_generic_path_info(&plan->plan, (Path *) best_path);
2883 
2884  return plan;
2885 }
2886 
2887 
2888 /*****************************************************************************
2889  *
2890  * BASE-RELATION SCAN METHODS
2891  *
2892  *****************************************************************************/
2893 
2894 
2895 /*
2896  * create_seqscan_plan
2897  * Returns a seqscan plan for the base relation scanned by 'best_path'
2898  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2899  */
2900 static SeqScan *
2902  List *tlist, List *scan_clauses)
2903 {
2904  SeqScan *scan_plan;
2905  Index scan_relid = best_path->parent->relid;
2906 
2907  /* it should be a base rel... */
2908  Assert(scan_relid > 0);
2909  Assert(best_path->parent->rtekind == RTE_RELATION);
2910 
2911  /* Sort clauses into best execution order */
2912  scan_clauses = order_qual_clauses(root, scan_clauses);
2913 
2914  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2915  scan_clauses = extract_actual_clauses(scan_clauses, false);
2916 
2917  /* Replace any outer-relation variables with nestloop params */
2918  if (best_path->param_info)
2919  {
2920  scan_clauses = (List *)
2921  replace_nestloop_params(root, (Node *) scan_clauses);
2922  }
2923 
2924  scan_plan = make_seqscan(tlist,
2925  scan_clauses,
2926  scan_relid);
2927 
2928  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2929 
2930  return scan_plan;
2931 }
2932 
2933 /*
2934  * create_samplescan_plan
2935  * Returns a samplescan plan for the base relation scanned by 'best_path'
2936  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2937  */
2938 static SampleScan *
2940  List *tlist, List *scan_clauses)
2941 {
2942  SampleScan *scan_plan;
2943  Index scan_relid = best_path->parent->relid;
2944  RangeTblEntry *rte;
2945  TableSampleClause *tsc;
2946 
2947  /* it should be a base rel with a tablesample clause... */
2948  Assert(scan_relid > 0);
2949  rte = planner_rt_fetch(scan_relid, root);
2950  Assert(rte->rtekind == RTE_RELATION);
2951  tsc = rte->tablesample;
2952  Assert(tsc != NULL);
2953 
2954  /* Sort clauses into best execution order */
2955  scan_clauses = order_qual_clauses(root, scan_clauses);
2956 
2957  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2958  scan_clauses = extract_actual_clauses(scan_clauses, false);
2959 
2960  /* Replace any outer-relation variables with nestloop params */
2961  if (best_path->param_info)
2962  {
2963  scan_clauses = (List *)
2964  replace_nestloop_params(root, (Node *) scan_clauses);
2965  tsc = (TableSampleClause *)
2966  replace_nestloop_params(root, (Node *) tsc);
2967  }
2968 
2969  scan_plan = make_samplescan(tlist,
2970  scan_clauses,
2971  scan_relid,
2972  tsc);
2973 
2974  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2975 
2976  return scan_plan;
2977 }
2978 
2979 /*
2980  * create_indexscan_plan
2981  * Returns an indexscan plan for the base relation scanned by 'best_path'
2982  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2983  *
2984  * We use this for both plain IndexScans and IndexOnlyScans, because the
2985  * qual preprocessing work is the same for both. Note that the caller tells
2986  * us which to build --- we don't look at best_path->path.pathtype, because
2987  * create_bitmap_subplan needs to be able to override the prior decision.
2988  */
2989 static Scan *
2991  IndexPath *best_path,
2992  List *tlist,
2993  List *scan_clauses,
2994  bool indexonly)
2995 {
2996  Scan *scan_plan;
2997  List *indexclauses = best_path->indexclauses;
2998  List *indexorderbys = best_path->indexorderbys;
2999  Index baserelid = best_path->path.parent->relid;
3000  IndexOptInfo *indexinfo = best_path->indexinfo;
3001  Oid indexoid = indexinfo->indexoid;
3002  List *qpqual;
3003  List *stripped_indexquals;
3004  List *fixed_indexquals;
3005  List *fixed_indexorderbys;
3006  List *indexorderbyops = NIL;
3007  ListCell *l;
3008 
3009  /* it should be a base rel... */
3010  Assert(baserelid > 0);
3011  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3012  /* check the scan direction is valid */
3013  Assert(best_path->indexscandir == ForwardScanDirection ||
3014  best_path->indexscandir == BackwardScanDirection);
3015 
3016  /*
3017  * Extract the index qual expressions (stripped of RestrictInfos) from the
3018  * IndexClauses list, and prepare a copy with index Vars substituted for
3019  * table Vars. (This step also does replace_nestloop_params on the
3020  * fixed_indexquals.)
3021  */
3022  fix_indexqual_references(root, best_path,
3023  &stripped_indexquals,
3024  &fixed_indexquals);
3025 
3026  /*
3027  * Likewise fix up index attr references in the ORDER BY expressions.
3028  */
3029  fixed_indexorderbys = fix_indexorderby_references(root, best_path);
3030 
3031  /*
3032  * The qpqual list must contain all restrictions not automatically handled
3033  * by the index, other than pseudoconstant clauses which will be handled
3034  * by a separate gating plan node. All the predicates in the indexquals
3035  * will be checked (either by the index itself, or by nodeIndexscan.c),
3036  * but if there are any "special" operators involved then they must be
3037  * included in qpqual. The upshot is that qpqual must contain
3038  * scan_clauses minus whatever appears in indexquals.
3039  *
3040  * is_redundant_with_indexclauses() detects cases where a scan clause is
3041  * present in the indexclauses list or is generated from the same
3042  * EquivalenceClass as some indexclause, and is therefore redundant with
3043  * it, though not equal. (The latter happens when indxpath.c prefers a
3044  * different derived equality than what generate_join_implied_equalities
3045  * picked for a parameterized scan's ppi_clauses.) Note that it will not
3046  * match to lossy index clauses, which is critical because we have to
3047  * include the original clause in qpqual in that case.
3048  *
3049  * In some situations (particularly with OR'd index conditions) we may
3050  * have scan_clauses that are not equal to, but are logically implied by,
3051  * the index quals; so we also try a predicate_implied_by() check to see
3052  * if we can discard quals that way. (predicate_implied_by assumes its
3053  * first input contains only immutable functions, so we have to check
3054  * that.)
3055  *
3056  * Note: if you change this bit of code you should also look at
3057  * extract_nonindex_conditions() in costsize.c.
3058  */
3059  qpqual = NIL;
3060  foreach(l, scan_clauses)
3061  {
3062  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3063 
3064  if (rinfo->pseudoconstant)
3065  continue; /* we may drop pseudoconstants here */
3066  if (is_redundant_with_indexclauses(rinfo, indexclauses))
3067  continue; /* dup or derived from same EquivalenceClass */
3068  if (!contain_mutable_functions((Node *) rinfo->clause) &&
3069  predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
3070  false))
3071  continue; /* provably implied by indexquals */
3072  qpqual = lappend(qpqual, rinfo);
3073  }
3074 
3075  /* Sort clauses into best execution order */
3076  qpqual = order_qual_clauses(root, qpqual);
3077 
3078  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3079  qpqual = extract_actual_clauses(qpqual, false);
3080 
3081  /*
3082  * We have to replace any outer-relation variables with nestloop params in
3083  * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
3084  * annoying to have to do this separately from the processing in
3085  * fix_indexqual_references --- rethink this when generalizing the inner
3086  * indexscan support. But note we can't really do this earlier because
3087  * it'd break the comparisons to predicates above ... (or would it? Those
3088  * wouldn't have outer refs)
3089  */
3090  if (best_path->path.param_info)
3091  {
3092  stripped_indexquals = (List *)
3093  replace_nestloop_params(root, (Node *) stripped_indexquals);
3094  qpqual = (List *)
3095  replace_nestloop_params(root, (Node *) qpqual);
3096  indexorderbys = (List *)
3097  replace_nestloop_params(root, (Node *) indexorderbys);
3098  }
3099 
3100  /*
3101  * If there are ORDER BY expressions, look up the sort operators for their
3102  * result datatypes.
3103  */
3104  if (indexorderbys)
3105  {
3106  ListCell *pathkeyCell,
3107  *exprCell;
3108 
3109  /*
3110  * PathKey contains OID of the btree opfamily we're sorting by, but
3111  * that's not quite enough because we need the expression's datatype
3112  * to look up the sort operator in the operator family.
3113  */
3114  Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
3115  forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
3116  {
3117  PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
3118  Node *expr = (Node *) lfirst(exprCell);
3119  Oid exprtype = exprType(expr);
3120  Oid sortop;
3121 
3122  /* Get sort operator from opfamily */
3123  sortop = get_opfamily_member(pathkey->pk_opfamily,
3124  exprtype,
3125  exprtype,
3126  pathkey->pk_strategy);
3127  if (!OidIsValid(sortop))
3128  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
3129  pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
3130  indexorderbyops = lappend_oid(indexorderbyops, sortop);
3131  }
3132  }
3133 
3134  /*
3135  * For an index-only scan, we must mark indextlist entries as resjunk if
3136  * they are columns that the index AM can't return; this cues setrefs.c to
3137  * not generate references to those columns.
3138  */
3139  if (indexonly)
3140  {
3141  int i = 0;
3142 
3143  foreach(l, indexinfo->indextlist)
3144  {
3145  TargetEntry *indextle = (TargetEntry *) lfirst(l);
3146 
3147  indextle->resjunk = !indexinfo->canreturn[i];
3148  i++;
3149  }
3150  }
3151 
3152  /* Finally ready to build the plan node */
3153  if (indexonly)
3154  scan_plan = (Scan *) make_indexonlyscan(tlist,
3155  qpqual,
3156  baserelid,
3157  indexoid,
3158  fixed_indexquals,
3159  stripped_indexquals,
3160  fixed_indexorderbys,
3161  indexinfo->indextlist,
3162  best_path->indexscandir);
3163  else
3164  scan_plan = (Scan *) make_indexscan(tlist,
3165  qpqual,
3166  baserelid,
3167  indexoid,
3168  fixed_indexquals,
3169  stripped_indexquals,
3170  fixed_indexorderbys,
3171  indexorderbys,
3172  indexorderbyops,
3173  best_path->indexscandir);
3174 
3175  copy_generic_path_info(&scan_plan->plan, &best_path->path);
3176 
3177  return scan_plan;
3178 }
3179 
3180 /*
3181  * create_bitmap_scan_plan
3182  * Returns a bitmap scan plan for the base relation scanned by 'best_path'
3183  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3184  */
3185 static BitmapHeapScan *
3187  BitmapHeapPath *best_path,
3188  List *tlist,
3189  List *scan_clauses)
3190 {
3191  Index baserelid = best_path->path.parent->relid;
3192  Plan *bitmapqualplan;
3193  List *bitmapqualorig;
3194  List *indexquals;
3195  List *indexECs;
3196  List *qpqual;
3197  ListCell *l;
3198  BitmapHeapScan *scan_plan;
3199 
3200  /* it should be a base rel... */
3201  Assert(baserelid > 0);
3202  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3203 
3204  /* Process the bitmapqual tree into a Plan tree and qual lists */
3205  bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
3206  &bitmapqualorig, &indexquals,
3207  &indexECs);
3208 
3209  if (best_path->path.parallel_aware)
3210  bitmap_subplan_mark_shared(bitmapqualplan);
3211 
3212  /*
3213  * The qpqual list must contain all restrictions not automatically handled
3214  * by the index, other than pseudoconstant clauses which will be handled
3215  * by a separate gating plan node. All the predicates in the indexquals
3216  * will be checked (either by the index itself, or by
3217  * nodeBitmapHeapscan.c), but if there are any "special" operators
3218  * involved then they must be added to qpqual. The upshot is that qpqual
3219  * must contain scan_clauses minus whatever appears in indexquals.
3220  *
3221  * This loop is similar to the comparable code in create_indexscan_plan(),
3222  * but with some differences because it has to compare the scan clauses to
3223  * stripped (no RestrictInfos) indexquals. See comments there for more
3224  * info.
3225  *
3226  * In normal cases simple equal() checks will be enough to spot duplicate
3227  * clauses, so we try that first. We next see if the scan clause is
3228  * redundant with any top-level indexqual by virtue of being generated
3229  * from the same EC. After that, try predicate_implied_by().
3230  *
3231  * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
3232  * useful for getting rid of qpquals that are implied by index predicates,
3233  * because the predicate conditions are included in the "indexquals"
3234  * returned by create_bitmap_subplan(). Bitmap scans have to do it that
3235  * way because predicate conditions need to be rechecked if the scan
3236  * becomes lossy, so they have to be included in bitmapqualorig.
3237  */
3238  qpqual = NIL;
3239  foreach(l, scan_clauses)
3240  {
3241  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3242  Node *clause = (Node *) rinfo->clause;
3243 
3244  if (rinfo->pseudoconstant)
3245  continue; /* we may drop pseudoconstants here */
3246  if (list_member(indexquals, clause))
3247  continue; /* simple duplicate */
3248  if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
3249  continue; /* derived from same EquivalenceClass */
3250  if (!contain_mutable_functions(clause) &&
3251  predicate_implied_by(list_make1(clause), indexquals, false))
3252  continue; /* provably implied by indexquals */
3253  qpqual = lappend(qpqual, rinfo);
3254  }
3255 
3256  /* Sort clauses into best execution order */
3257  qpqual = order_qual_clauses(root, qpqual);
3258 
3259  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3260  qpqual = extract_actual_clauses(qpqual, false);
3261 
3262  /*
3263  * When dealing with special operators, we will at this point have
3264  * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
3265  * 'em from bitmapqualorig, since there's no point in making the tests
3266  * twice.
3267  */
3268  bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
3269 
3270  /*
3271  * We have to replace any outer-relation variables with nestloop params in
3272  * the qpqual and bitmapqualorig expressions. (This was already done for
3273  * expressions attached to plan nodes in the bitmapqualplan tree.)
3274  */
3275  if (best_path->path.param_info)
3276  {
3277  qpqual = (List *)
3278  replace_nestloop_params(root, (Node *) qpqual);
3279  bitmapqualorig = (List *)
3280  replace_nestloop_params(root, (Node *) bitmapqualorig);
3281  }
3282 
3283  /* Finally ready to build the plan node */
3284  scan_plan = make_bitmap_heapscan(tlist,
3285  qpqual,
3286  bitmapqualplan,
3287  bitmapqualorig,
3288  baserelid);
3289 
3290  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3291 
3292  return scan_plan;
3293 }
3294 
3295 /*
3296  * Given a bitmapqual tree, generate the Plan tree that implements it
3297  *
3298  * As byproducts, we also return in *qual and *indexqual the qual lists
3299  * (in implicit-AND form, without RestrictInfos) describing the original index
3300  * conditions and the generated indexqual conditions. (These are the same in
3301  * simple cases, but when special index operators are involved, the former
3302  * list includes the special conditions while the latter includes the actual
3303  * indexable conditions derived from them.) Both lists include partial-index
3304  * predicates, because we have to recheck predicates as well as index
3305  * conditions if the bitmap scan becomes lossy.
3306  *
3307  * In addition, we return a list of EquivalenceClass pointers for all the
3308  * top-level indexquals that were possibly-redundantly derived from ECs.
3309  * This allows removal of scan_clauses that are redundant with such quals.
3310  * (We do not attempt to detect such redundancies for quals that are within
3311  * OR subtrees. This could be done in a less hacky way if we returned the
3312  * indexquals in RestrictInfo form, but that would be slower and still pretty
3313  * messy, since we'd have to build new RestrictInfos in many cases.)
3314  */
3315 static Plan *
3317  List **qual, List **indexqual, List **indexECs)
3318 {
3319  Plan *plan;
3320 
3321  if (IsA(bitmapqual, BitmapAndPath))
3322  {
3323  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
3324  List *subplans = NIL;
3325  List *subquals = NIL;
3326  List *subindexquals = NIL;
3327  List *subindexECs = NIL;
3328  ListCell *l;
3329 
3330  /*
3331  * There may well be redundant quals among the subplans, since a
3332  * top-level WHERE qual might have gotten used to form several
3333  * different index quals. We don't try exceedingly hard to eliminate
3334  * redundancies, but we do eliminate obvious duplicates by using
3335  * list_concat_unique.
3336  */
3337  foreach(l, apath->bitmapquals)
3338  {
3339  Plan *subplan;
3340  List *subqual;
3341  List *subindexqual;
3342  List *subindexEC;
3343 
3344  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3345  &subqual, &subindexqual,
3346  &subindexEC);
3347  subplans = lappend(subplans, subplan);
3348  subquals = list_concat_unique(subquals, subqual);
3349  subindexquals = list_concat_unique(subindexquals, subindexqual);
3350  /* Duplicates in indexECs aren't worth getting rid of */
3351  subindexECs = list_concat(subindexECs, subindexEC);
3352  }
3353  plan = (Plan *) make_bitmap_and(subplans);
3354  plan->startup_cost = apath->path.startup_cost;
3355  plan->total_cost = apath->path.total_cost;
3356  plan->plan_rows =
3357  clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
3358  plan->plan_width = 0; /* meaningless */
3359  plan->parallel_aware = false;
3360  plan->parallel_safe = apath->path.parallel_safe;
3361  *qual = subquals;
3362  *indexqual = subindexquals;
3363  *indexECs = subindexECs;
3364  }
3365  else if (IsA(bitmapqual, BitmapOrPath))
3366  {
3367  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
3368  List *subplans = NIL;
3369  List *subquals = NIL;
3370  List *subindexquals = NIL;
3371  bool const_true_subqual = false;
3372  bool const_true_subindexqual = false;
3373  ListCell *l;
3374 
3375  /*
3376  * Here, we only detect qual-free subplans. A qual-free subplan would
3377  * cause us to generate "... OR true ..." which we may as well reduce
3378  * to just "true". We do not try to eliminate redundant subclauses
3379  * because (a) it's not as likely as in the AND case, and (b) we might
3380  * well be working with hundreds or even thousands of OR conditions,
3381  * perhaps from a long IN list. The performance of list_append_unique
3382  * would be unacceptable.
3383  */
3384  foreach(l, opath->bitmapquals)
3385  {
3386  Plan *subplan;
3387  List *subqual;
3388  List *subindexqual;
3389  List *subindexEC;
3390 
3391  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3392  &subqual, &subindexqual,
3393  &subindexEC);
3394  subplans = lappend(subplans, subplan);
3395  if (subqual == NIL)
3396  const_true_subqual = true;
3397  else if (!const_true_subqual)
3398  subquals = lappend(subquals,
3399  make_ands_explicit(subqual));
3400  if (subindexqual == NIL)
3401  const_true_subindexqual = true;
3402  else if (!const_true_subindexqual)
3403  subindexquals = lappend(subindexquals,
3404  make_ands_explicit(subindexqual));
3405  }
3406 
3407  /*
3408  * In the presence of ScalarArrayOpExpr quals, we might have built
3409  * BitmapOrPaths with just one subpath; don't add an OR step.
3410  */
3411  if (list_length(subplans) == 1)
3412  {
3413  plan = (Plan *) linitial(subplans);
3414  }
3415  else
3416  {
3417  plan = (Plan *) make_bitmap_or(subplans);
3418  plan->startup_cost = opath->path.startup_cost;
3419  plan->total_cost = opath->path.total_cost;
3420  plan->plan_rows =
3421  clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
3422  plan->plan_width = 0; /* meaningless */
3423  plan->parallel_aware = false;
3424  plan->parallel_safe = opath->path.parallel_safe;
3425  }
3426 
3427  /*
3428  * If there were constant-TRUE subquals, the OR reduces to constant
3429  * TRUE. Also, avoid generating one-element ORs, which could happen
3430  * due to redundancy elimination or ScalarArrayOpExpr quals.
3431  */
3432  if (const_true_subqual)
3433  *qual = NIL;
3434  else if (list_length(subquals) <= 1)
3435  *qual = subquals;
3436  else
3437  *qual = list_make1(make_orclause(subquals));
3438  if (const_true_subindexqual)
3439  *indexqual = NIL;
3440  else if (list_length(subindexquals) <= 1)
3441  *indexqual = subindexquals;
3442  else
3443  *indexqual = list_make1(make_orclause(subindexquals));
3444  *indexECs = NIL;
3445  }
3446  else if (IsA(bitmapqual, IndexPath))
3447  {
3448  IndexPath *ipath = (IndexPath *) bitmapqual;
3449  IndexScan *iscan;
3450  List *subquals;
3451  List *subindexquals;
3452  List *subindexECs;
3453  ListCell *l;
3454 
3455  /* Use the regular indexscan plan build machinery... */
3456  iscan = castNode(IndexScan,
3457  create_indexscan_plan(root, ipath,
3458  NIL, NIL, false));
3459  /* then convert to a bitmap indexscan */
3460  plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
3461  iscan->indexid,
3462  iscan->indexqual,
3463  iscan->indexqualorig);
3464  /* and set its cost/width fields appropriately */
3465  plan->startup_cost = 0.0;
3466  plan->total_cost = ipath->indextotalcost;
3467  plan->plan_rows =
3468  clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
3469  plan->plan_width = 0; /* meaningless */
3470  plan->parallel_aware = false;
3471  plan->parallel_safe = ipath->path.parallel_safe;
3472  /* Extract original index clauses, actual index quals, relevant ECs */
3473  subquals = NIL;
3474  subindexquals = NIL;
3475  subindexECs = NIL;
3476  foreach(l, ipath->indexclauses)
3477  {
3478  IndexClause *iclause = (IndexClause *) lfirst(l);
3479  RestrictInfo *rinfo = iclause->rinfo;
3480 
3481  Assert(!rinfo->pseudoconstant);
3482  subquals = lappend(subquals, rinfo->clause);
3483  subindexquals = list_concat(subindexquals,
3484  get_actual_clauses(iclause->indexquals));
3485  if (rinfo->parent_ec)
3486  subindexECs = lappend(subindexECs, rinfo->parent_ec);
3487  }
3488  /* We can add any index predicate conditions, too */
3489  foreach(l, ipath->indexinfo->indpred)
3490  {
3491  Expr *pred = (Expr *) lfirst(l);
3492 
3493  /*
3494  * We know that the index predicate must have been implied by the
3495  * query condition as a whole, but it may or may not be implied by
3496  * the conditions that got pushed into the bitmapqual. Avoid
3497  * generating redundant conditions.
3498  */
3499  if (!predicate_implied_by(list_make1(pred), subquals, false))
3500  {
3501  subquals = lappend(subquals, pred);
3502  subindexquals = lappend(subindexquals, pred);
3503  }
3504  }
3505  *qual = subquals;
3506  *indexqual = subindexquals;
3507  *indexECs = subindexECs;
3508  }
3509  else
3510  {
3511  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
3512  plan = NULL; /* keep compiler quiet */
3513  }
3514 
3515  return plan;
3516 }
3517 
3518 /*
3519  * create_tidscan_plan
3520  * Returns a tidscan plan for the base relation scanned by 'best_path'
3521  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3522  */
3523 static TidScan *
3525  List *tlist, List *scan_clauses)
3526 {
3527  TidScan *scan_plan;
3528  Index scan_relid = best_path->path.parent->relid;
3529  List *tidquals = best_path->tidquals;
3530 
3531  /* it should be a base rel... */
3532  Assert(scan_relid > 0);
3533  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3534 
3535  /*
3536  * The qpqual list must contain all restrictions not enforced by the
3537  * tidquals list. Since tidquals has OR semantics, we have to be careful
3538  * about matching it up to scan_clauses. It's convenient to handle the
3539  * single-tidqual case separately from the multiple-tidqual case. In the
3540  * single-tidqual case, we look through the scan_clauses while they are
3541  * still in RestrictInfo form, and drop any that are redundant with the
3542  * tidqual.
3543  *
3544  * In normal cases simple pointer equality checks will be enough to spot
3545  * duplicate RestrictInfos, so we try that first.
3546  *
3547  * Another common case is that a scan_clauses entry is generated from the
3548  * same EquivalenceClass as some tidqual, and is therefore redundant with
3549  * it, though not equal.
3550  *
3551  * Unlike indexpaths, we don't bother with predicate_implied_by(); the
3552  * number of cases where it could win are pretty small.
3553  */
3554  if (list_length(tidquals) == 1)
3555  {
3556  List *qpqual = NIL;
3557  ListCell *l;
3558 
3559  foreach(l, scan_clauses)
3560  {
3561  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3562 
3563  if (rinfo->pseudoconstant)
3564  continue; /* we may drop pseudoconstants here */
3565  if (list_member_ptr(tidquals, rinfo))
3566  continue; /* simple duplicate */
3567  if (is_redundant_derived_clause(rinfo, tidquals))
3568  continue; /* derived from same EquivalenceClass */
3569  qpqual = lappend(qpqual, rinfo);
3570  }
3571  scan_clauses = qpqual;
3572  }
3573 
3574  /* Sort clauses into best execution order */
3575  scan_clauses = order_qual_clauses(root, scan_clauses);
3576 
3577  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3578  tidquals = extract_actual_clauses(tidquals, false);
3579  scan_clauses = extract_actual_clauses(scan_clauses, false);
3580 
3581  /*
3582  * If we have multiple tidquals, it's more convenient to remove duplicate
3583  * scan_clauses after stripping the RestrictInfos. In this situation,
3584  * because the tidquals represent OR sub-clauses, they could not have come
3585  * from EquivalenceClasses so we don't have to worry about matching up
3586  * non-identical clauses. On the other hand, because tidpath.c will have
3587  * extracted those sub-clauses from some OR clause and built its own list,
3588  * we will certainly not have pointer equality to any scan clause. So
3589  * convert the tidquals list to an explicit OR clause and see if we can
3590  * match it via equal() to any scan clause.
3591  */
3592  if (list_length(tidquals) > 1)
3593  scan_clauses = list_difference(scan_clauses,
3594  list_make1(make_orclause(tidquals)));
3595 
3596  /* Replace any outer-relation variables with nestloop params */
3597  if (best_path->path.param_info)
3598  {
3599  tidquals = (List *)
3600  replace_nestloop_params(root, (Node *) tidquals);
3601  scan_clauses = (List *)
3602  replace_nestloop_params(root, (Node *) scan_clauses);
3603  }
3604 
3605  scan_plan = make_tidscan(tlist,
3606  scan_clauses,
3607  scan_relid,
3608  tidquals);
3609 
3610  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3611 
3612  return scan_plan;
3613 }
3614 
3615 /*
3616  * create_tidrangescan_plan
3617  * Returns a tidrangescan plan for the base relation scanned by 'best_path'
3618  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3619  */
3620 static TidRangeScan *
3622  List *tlist, List *scan_clauses)
3623 {
3624  TidRangeScan *scan_plan;
3625  Index scan_relid = best_path->path.parent->relid;
3626  List *tidrangequals = best_path->tidrangequals;
3627 
3628  /* it should be a base rel... */
3629  Assert(scan_relid > 0);
3630  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3631 
3632  /*
3633  * The qpqual list must contain all restrictions not enforced by the
3634  * tidrangequals list. tidrangequals has AND semantics, so we can simply
3635  * remove any qual that appears in it.
3636  */
3637  {
3638  List *qpqual = NIL;
3639  ListCell *l;
3640 
3641  foreach(l, scan_clauses)
3642  {
3643  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3644 
3645  if (rinfo->pseudoconstant)
3646  continue; /* we may drop pseudoconstants here */
3647  if (list_member_ptr(tidrangequals, rinfo))
3648  continue; /* simple duplicate */
3649  qpqual = lappend(qpqual, rinfo);
3650  }
3651  scan_clauses = qpqual;
3652  }
3653 
3654  /* Sort clauses into best execution order */
3655  scan_clauses = order_qual_clauses(root, scan_clauses);
3656 
3657  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3658  tidrangequals = extract_actual_clauses(tidrangequals, false);
3659  scan_clauses = extract_actual_clauses(scan_clauses, false);
3660 
3661  /* Replace any outer-relation variables with nestloop params */
3662  if (best_path->path.param_info)
3663  {
3664  tidrangequals = (List *)
3665  replace_nestloop_params(root, (Node *) tidrangequals);
3666  scan_clauses = (List *)
3667  replace_nestloop_params(root, (Node *) scan_clauses);
3668  }
3669 
3670  scan_plan = make_tidrangescan(tlist,
3671  scan_clauses,
3672  scan_relid,
3673  tidrangequals);
3674 
3675  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3676 
3677  return scan_plan;
3678 }
3679 
3680 /*
3681  * create_subqueryscan_plan
3682  * Returns a subqueryscan plan for the base relation scanned by 'best_path'
3683  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3684  */
3685 static SubqueryScan *
3687  List *tlist, List *scan_clauses)
3688 {
3689  SubqueryScan *scan_plan;
3690  RelOptInfo *rel = best_path->path.parent;
3691  Index scan_relid = rel->relid;
3692  Plan *subplan;
3693 
3694  /* it should be a subquery base rel... */
3695  Assert(scan_relid > 0);
3696  Assert(rel->rtekind == RTE_SUBQUERY);
3697 
3698  /*
3699  * Recursively create Plan from Path for subquery. Since we are entering
3700  * a different planner context (subroot), recurse to create_plan not
3701  * create_plan_recurse.
3702  */
3703  subplan = create_plan(rel->subroot, best_path->subpath);
3704 
3705  /* Sort clauses into best execution order */
3706  scan_clauses = order_qual_clauses(root, scan_clauses);
3707 
3708  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3709  scan_clauses = extract_actual_clauses(scan_clauses, false);
3710 
3711  /* Replace any outer-relation variables with nestloop params */
3712  if (best_path->path.param_info)
3713  {
3714  scan_clauses = (List *)
3715  replace_nestloop_params(root, (Node *) scan_clauses);
3717  rel->subplan_params);
3718  }
3719 
3720  scan_plan = make_subqueryscan(tlist,
3721  scan_clauses,
3722  scan_relid,
3723  subplan);
3724 
3725  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3726 
3727  return scan_plan;
3728 }
3729 
3730 /*
3731  * create_functionscan_plan
3732  * Returns a functionscan plan for the base relation scanned by 'best_path'
3733  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3734  */
3735 static FunctionScan *
3737  List *tlist, List *scan_clauses)
3738 {
3739  FunctionScan *scan_plan;
3740  Index scan_relid = best_path->parent->relid;
3741  RangeTblEntry *rte;
3742  List *functions;
3743 
3744  /* it should be a function base rel... */
3745  Assert(scan_relid > 0);
3746  rte = planner_rt_fetch(scan_relid, root);
3747  Assert(rte->rtekind == RTE_FUNCTION);
3748  functions = rte->functions;
3749 
3750  /* Sort clauses into best execution order */
3751  scan_clauses = order_qual_clauses(root, scan_clauses);
3752 
3753  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3754  scan_clauses = extract_actual_clauses(scan_clauses, false);
3755 
3756  /* Replace any outer-relation variables with nestloop params */
3757  if (best_path->param_info)
3758  {
3759  scan_clauses = (List *)
3760  replace_nestloop_params(root, (Node *) scan_clauses);
3761  /* The function expressions could contain nestloop params, too */
3763  }
3764 
3765  scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
3766  functions, rte->funcordinality);
3767 
3768  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3769 
3770  return scan_plan;
3771 }
3772 
3773 /*
3774  * create_tablefuncscan_plan
3775  * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
3776  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3777  */
3778 static TableFuncScan *
3780  List *tlist, List *scan_clauses)
3781 {
3782  TableFuncScan *scan_plan;
3783  Index scan_relid = best_path->parent->relid;
3784  RangeTblEntry *rte;
3785  TableFunc *tablefunc;
3786 
3787  /* it should be a function base rel... */
3788  Assert(scan_relid > 0);
3789  rte = planner_rt_fetch(scan_relid, root);
3790  Assert(rte->rtekind == RTE_TABLEFUNC);
3791  tablefunc = rte->tablefunc;
3792 
3793  /* Sort clauses into best execution order */
3794  scan_clauses = order_qual_clauses(root, scan_clauses);
3795 
3796  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3797  scan_clauses = extract_actual_clauses(scan_clauses, false);
3798 
3799  /* Replace any outer-relation variables with nestloop params */
3800  if (best_path->param_info)
3801  {
3802  scan_clauses = (List *)
3803  replace_nestloop_params(root, (Node *) scan_clauses);
3804  /* The function expressions could contain nestloop params, too */
3805  tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
3806  }
3807 
3808  scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
3809  tablefunc);
3810 
3811  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3812 
3813  return scan_plan;
3814 }
3815 
3816 /*
3817  * create_valuesscan_plan
3818  * Returns a valuesscan plan for the base relation scanned by 'best_path'
3819  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3820  */
3821 static ValuesScan *
3823  List *tlist, List *scan_clauses)
3824 {
3825  ValuesScan *scan_plan;
3826  Index scan_relid = best_path->parent->relid;
3827  RangeTblEntry *rte;
3828  List *values_lists;
3829 
3830  /* it should be a values base rel... */
3831  Assert(scan_relid > 0);
3832  rte = planner_rt_fetch(scan_relid, root);
3833  Assert(rte->rtekind == RTE_VALUES);
3834  values_lists = rte->values_lists;
3835 
3836  /* Sort clauses into best execution order */
3837  scan_clauses = order_qual_clauses(root, scan_clauses);
3838 
3839  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3840  scan_clauses = extract_actual_clauses(scan_clauses, false);
3841 
3842  /* Replace any outer-relation variables with nestloop params */
3843  if (best_path->param_info)
3844  {
3845  scan_clauses = (List *)
3846  replace_nestloop_params(root, (Node *) scan_clauses);
3847  /* The values lists could contain nestloop params, too */
3848  values_lists = (List *)
3849  replace_nestloop_params(root, (Node *) values_lists);
3850  }
3851 
3852  scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
3853  values_lists);
3854 
3855  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3856 
3857  return scan_plan;
3858 }
3859 
3860 /*
3861  * create_ctescan_plan
3862  * Returns a ctescan plan for the base relation scanned by 'best_path'
3863  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3864  */
3865 static CteScan *
3867  List *tlist, List *scan_clauses)
3868 {
3869  CteScan *scan_plan;
3870  Index scan_relid = best_path->parent->relid;
3871  RangeTblEntry *rte;
3872  SubPlan *ctesplan = NULL;
3873  int plan_id;
3874  int cte_param_id;
3875  PlannerInfo *cteroot;
3876  Index levelsup;
3877  int ndx;
3878  ListCell *lc;
3879 
3880  Assert(scan_relid > 0);
3881  rte = planner_rt_fetch(scan_relid, root);
3882  Assert(rte->rtekind == RTE_CTE);
3883  Assert(!rte->self_reference);
3884 
3885  /*
3886  * Find the referenced CTE, and locate the SubPlan previously made for it.
3887  */
3888  levelsup = rte->ctelevelsup;
3889  cteroot = root;
3890  while (levelsup-- > 0)
3891  {
3892  cteroot = cteroot->parent_root;
3893  if (!cteroot) /* shouldn't happen */
3894  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3895  }
3896 
3897  /*
3898  * Note: cte_plan_ids can be shorter than cteList, if we are still working
3899  * on planning the CTEs (ie, this is a side-reference from another CTE).
3900  * So we mustn't use forboth here.
3901  */
3902  ndx = 0;
3903  foreach(lc, cteroot->parse->cteList)
3904  {
3905  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3906 
3907  if (strcmp(cte->ctename, rte->ctename) == 0)
3908  break;
3909  ndx++;
3910  }
3911  if (lc == NULL) /* shouldn't happen */
3912  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3913  if (ndx >= list_length(cteroot->cte_plan_ids))
3914  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3915  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3916  if (plan_id <= 0)
3917  elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
3918  foreach(lc, cteroot->init_plans)
3919  {
3920  ctesplan = (SubPlan *) lfirst(lc);
3921  if (ctesplan->plan_id == plan_id)
3922  break;
3923  }
3924  if (lc == NULL) /* shouldn't happen */
3925  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3926 
3927  /*
3928  * We need the CTE param ID, which is the sole member of the SubPlan's
3929  * setParam list.
3930  */
3931  cte_param_id = linitial_int(ctesplan->setParam);
3932 
3933  /* Sort clauses into best execution order */
3934  scan_clauses = order_qual_clauses(root, scan_clauses);
3935 
3936  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3937  scan_clauses = extract_actual_clauses(scan_clauses, false);
3938 
3939  /* Replace any outer-relation variables with nestloop params */
3940  if (best_path->param_info)
3941  {
3942  scan_clauses = (List *)
3943  replace_nestloop_params(root, (Node *) scan_clauses);
3944  }
3945 
3946  scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
3947  plan_id, cte_param_id);
3948 
3949  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3950 
3951  return scan_plan;
3952 }
3953 
3954 /*
3955  * create_namedtuplestorescan_plan
3956  * Returns a tuplestorescan plan for the base relation scanned by
3957  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3958  * 'tlist'.
3959  */
3960 static NamedTuplestoreScan *
3962  List *tlist, List *scan_clauses)
3963 {
3964  NamedTuplestoreScan *scan_plan;
3965  Index scan_relid = best_path->parent->relid;
3966  RangeTblEntry *rte;
3967 
3968  Assert(scan_relid > 0);
3969  rte = planner_rt_fetch(scan_relid, root);
3971 
3972  /* Sort clauses into best execution order */
3973  scan_clauses = order_qual_clauses(root, scan_clauses);
3974 
3975  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3976  scan_clauses = extract_actual_clauses(scan_clauses, false);
3977 
3978  /* Replace any outer-relation variables with nestloop params */
3979  if (best_path->param_info)
3980  {
3981  scan_clauses = (List *)
3982  replace_nestloop_params(root, (Node *) scan_clauses);
3983  }
3984 
3985  scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
3986  rte->enrname);
3987 
3988  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3989 
3990  return scan_plan;
3991 }
3992 
3993 /*
3994  * create_resultscan_plan
3995  * Returns a Result plan for the RTE_RESULT base relation scanned by
3996  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3997  * 'tlist'.
3998  */
3999 static Result *
4001  List *tlist, List *scan_clauses)
4002 {
4003  Result *scan_plan;
4004  Index scan_relid = best_path->parent->relid;
4006 
4007  Assert(scan_relid > 0);
4008  rte = planner_rt_fetch(scan_relid, root);
4009  Assert(rte->rtekind == RTE_RESULT);
4010 
4011  /* Sort clauses into best execution order */
4012  scan_clauses = order_qual_clauses(root, scan_clauses);
4013 
4014  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4015  scan_clauses = extract_actual_clauses(scan_clauses, false);
4016 
4017  /* Replace any outer-relation variables with nestloop params */
4018  if (best_path->param_info)
4019  {
4020  scan_clauses = (List *)
4021  replace_nestloop_params(root, (Node *) scan_clauses);
4022  }
4023 
4024  scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
4025 
4026  copy_generic_path_info(&scan_plan->plan, best_path);
4027 
4028  return scan_plan;
4029 }
4030 
4031 /*
4032  * create_worktablescan_plan
4033  * Returns a worktablescan plan for the base relation scanned by 'best_path'
4034  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4035  */
4036 static WorkTableScan *
4038  List *tlist, List *scan_clauses)
4039 {
4040  WorkTableScan *scan_plan;
4041  Index scan_relid = best_path->parent->relid;
4042  RangeTblEntry *rte;
4043  Index levelsup;
4044  PlannerInfo *cteroot;
4045 
4046  Assert(scan_relid > 0);
4047  rte = planner_rt_fetch(scan_relid, root);
4048  Assert(rte->rtekind == RTE_CTE);
4049  Assert(rte->self_reference);
4050 
4051  /*
4052  * We need to find the worktable param ID, which is in the plan level
4053  * that's processing the recursive UNION, which is one level *below* where
4054  * the CTE comes from.
4055  */
4056  levelsup = rte->ctelevelsup;
4057  if (levelsup == 0) /* shouldn't happen */
4058  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4059  levelsup--;
4060  cteroot = root;
4061  while (levelsup-- > 0)
4062  {
4063  cteroot = cteroot->parent_root;
4064  if (!cteroot) /* shouldn't happen */
4065  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4066  }
4067  if (cteroot->wt_param_id < 0) /* shouldn't happen */
4068  elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
4069 
4070  /* Sort clauses into best execution order */
4071  scan_clauses = order_qual_clauses(root, scan_clauses);
4072 
4073  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4074  scan_clauses = extract_actual_clauses(scan_clauses, false);
4075 
4076  /* Replace any outer-relation variables with nestloop params */
4077  if (best_path->param_info)
4078  {
4079  scan_clauses = (List *)
4080  replace_nestloop_params(root, (Node *) scan_clauses);
4081  }
4082 
4083  scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
4084  cteroot->wt_param_id);
4085 
4086  copy_generic_path_info(&scan_plan->scan.plan, best_path);
4087 
4088  return scan_plan;
4089 }
4090 
4091 /*
4092  * create_foreignscan_plan
4093  * Returns a foreignscan plan for the relation scanned by 'best_path'
4094  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4095  */
4096 static ForeignScan *
4098  List *tlist, List *scan_clauses)
4099 {
4100  ForeignScan *scan_plan;
4101  RelOptInfo *rel = best_path->path.parent;
4102  Index scan_relid = rel->relid;
4103  Oid rel_oid = InvalidOid;
4104  Plan *outer_plan = NULL;
4105 
4106  Assert(rel->fdwroutine != NULL);
4107 
4108  /* transform the child path if any */
4109  if (best_path->fdw_outerpath)
4110  outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
4111  CP_EXACT_TLIST);
4112 
4113  /*
4114  * If we're scanning a base relation, fetch its OID. (Irrelevant if
4115  * scanning a join relation.)
4116  */
4117  if (scan_relid > 0)
4118  {
4119  RangeTblEntry *rte;
4120 
4121  Assert(rel->rtekind == RTE_RELATION);
4122  rte = planner_rt_fetch(scan_relid, root);
4123  Assert(rte->rtekind == RTE_RELATION);
4124  rel_oid = rte->relid;
4125  }
4126 
4127  /*
4128  * Sort clauses into best execution order. We do this first since the FDW
4129  * might have more info than we do and wish to adjust the ordering.
4130  */
4131  scan_clauses = order_qual_clauses(root, scan_clauses);
4132 
4133  /*
4134  * Let the FDW perform its processing on the restriction clauses and
4135  * generate the plan node. Note that the FDW might remove restriction
4136  * clauses that it intends to execute remotely, or even add more (if it
4137  * has selected some join clauses for remote use but also wants them
4138  * rechecked locally).
4139  */
4140  scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
4141  best_path,
4142  tlist, scan_clauses,
4143  outer_plan);
4144 
4145  /* Copy cost data from Path to Plan; no need to make FDW do this */
4146  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
4147 
4148  /* Copy user OID to access as; likewise no need to make FDW do this */
4149  scan_plan->checkAsUser = rel->userid;
4150 
4151  /* Copy foreign server OID; likewise, no need to make FDW do this */
4152  scan_plan->fs_server = rel->serverid;
4153 
4154  /*
4155  * Likewise, copy the relids that are represented by this foreign scan. An
4156  * upper rel doesn't have relids set, but it covers all the relations
4157  * participating in the underlying scan/join, so use root->all_query_rels.
4158  */
4159  if (rel->reloptkind == RELOPT_UPPER_REL)
4160  scan_plan->fs_relids = root->all_query_rels;
4161  else
4162  scan_plan->fs_relids = best_path->path.parent->relids;
4163 
4164  /*
4165  * Join relid sets include relevant outer joins, but FDWs may need to know
4166  * which are the included base rels. That's a bit tedious to get without
4167  * access to the plan-time data structures, so compute it here.
4168  */
4169  scan_plan->fs_base_relids = bms_difference(scan_plan->fs_relids,
4170  root->outer_join_rels);
4171 
4172  /*
4173  * If this is a foreign join, and to make it valid to push down we had to
4174  * assume that the current user is the same as some user explicitly named
4175  * in the query, mark the finished plan as depending on the current user.
4176  */
4177  if (rel->useridiscurrent)
4178  root->glob->dependsOnRole = true;
4179 
4180  /*
4181  * Replace any outer-relation variables with nestloop params in the qual,
4182  * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
4183  * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
4184  * fdw_recheck_quals could have come from join clauses, so doing this
4185  * beforehand on the scan_clauses wouldn't work.) We assume
4186  * fdw_scan_tlist contains no such variables.
4187  */
4188  if (best_path->path.param_info)
4189  {
4190  scan_plan->scan.plan.qual = (List *)
4191  replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
4192  scan_plan->fdw_exprs = (List *)
4193  replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
4194  scan_plan->fdw_recheck_quals = (List *)
4196  (Node *) scan_plan->fdw_recheck_quals);
4197  }
4198 
4199  /*
4200  * If rel is a base relation, detect whether any system columns are
4201  * requested from the rel. (If rel is a join relation, rel->relid will be
4202  * 0, but there can be no Var with relid 0 in the rel's targetlist or the
4203  * restriction clauses, so we skip this in that case. Note that any such
4204  * columns in base relations that were joined are assumed to be contained
4205  * in fdw_scan_tlist.) This is a bit of a kluge and might go away
4206  * someday, so we intentionally leave it out of the API presented to FDWs.
4207  */
4208  scan_plan->fsSystemCol = false;
4209  if (scan_relid > 0)
4210  {
4211  Bitmapset *attrs_used = NULL;
4212  ListCell *lc;
4213  int i;
4214 
4215  /*
4216  * First, examine all the attributes needed for joins or final output.
4217  * Note: we must look at rel's targetlist, not the attr_needed data,
4218  * because attr_needed isn't computed for inheritance child rels.
4219  */
4220  pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
4221 
4222  /* Add all the attributes used by restriction clauses. */
4223  foreach(lc, rel->baserestrictinfo)
4224  {
4225  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4226 
4227  pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
4228  }
4229 
4230  /* Now, are any system columns requested from rel? */
4231  for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
4232  {
4234  {
4235  scan_plan->fsSystemCol = true;
4236  break;
4237  }
4238  }
4239 
4240  bms_free(attrs_used);
4241  }
4242 
4243  return scan_plan;
4244 }
4245 
4246 /*
4247  * create_customscan_plan
4248  *
4249  * Transform a CustomPath into a Plan.
4250  */
4251 static CustomScan *
4253  List *tlist, List *scan_clauses)
4254 {
4255  CustomScan *cplan;
4256  RelOptInfo *rel = best_path->path.parent;
4257  List *custom_plans = NIL;
4258  ListCell *lc;
4259 
4260  /* Recursively transform child paths. */
4261  foreach(lc, best_path->custom_paths)
4262  {
4263  Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc),
4264  CP_EXACT_TLIST);
4265 
4266  custom_plans = lappend(custom_plans, plan);
4267  }
4268 
4269  /*
4270  * Sort clauses into the best execution order, although custom-scan
4271  * provider can reorder them again.
4272  */
4273  scan_clauses = order_qual_clauses(root, scan_clauses);
4274 
4275  /*
4276  * Invoke custom plan provider to create the Plan node represented by the
4277  * CustomPath.
4278  */
4279  cplan = castNode(CustomScan,
4280  best_path->methods->PlanCustomPath(root,
4281  rel,
4282  best_path,
4283  tlist,
4284  scan_clauses,
4285  custom_plans));
4286 
4287  /*
4288  * Copy cost data from Path to Plan; no need to make custom-plan providers
4289  * do this
4290  */
4291  copy_generic_path_info(&cplan->scan.plan, &best_path->path);
4292 
4293  /* Likewise, copy the relids that are represented by this custom scan */
4294  cplan->custom_relids = best_path->path.parent->relids;
4295 
4296  /*
4297  * Replace any outer-relation variables with nestloop params in the qual
4298  * and custom_exprs expressions. We do this last so that the custom-plan
4299  * provider doesn't have to be involved. (Note that parts of custom_exprs
4300  * could have come from join clauses, so doing this beforehand on the
4301  * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
4302  * such variables.
4303  */
4304  if (best_path->path.param_info)
4305  {
4306  cplan->scan.plan.qual = (List *)
4307  replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
4308  cplan->custom_exprs = (List *)
4309  replace_nestloop_params(root, (Node *) cplan->custom_exprs);
4310  }
4311 
4312  return cplan;
4313 }
4314 
4315 
4316 /*****************************************************************************
4317  *
4318  * JOIN METHODS
4319  *
4320  *****************************************************************************/
4321 
4322 static NestLoop *
4324  NestPath *best_path)
4325 {
4326  NestLoop *join_plan;
4327  Plan *outer_plan;
4328  Plan *inner_plan;
4329  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4330  List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
4331  List *joinclauses;
4332  List *otherclauses;
4333  Relids outerrelids;
4334  List *nestParams;
4335  Relids saveOuterRels = root->curOuterRels;
4336 
4337  /* NestLoop can project, so no need to be picky about child tlists */
4338  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
4339 
4340  /* For a nestloop, include outer relids in curOuterRels for inner side */
4341  root->curOuterRels = bms_union(root->curOuterRels,
4342  best_path->jpath.outerjoinpath->parent->relids);
4343 
4344  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
4345 
4346  /* Restore curOuterRels */
4347  bms_free(root->curOuterRels);
4348  root->curOuterRels = saveOuterRels;
4349 
4350  /* Sort join qual clauses into best execution order */
4351  joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
4352 
4353  /* Get the join qual clauses (in plain expression form) */
4354  /* Any pseudoconstant clauses are ignored here */
4355  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4356  {
4357  extract_actual_join_clauses(joinrestrictclauses,
4358  best_path->jpath.path.parent->relids,
4359  &joinclauses, &otherclauses);
4360  }
4361  else
4362  {
4363  /* We can treat all clauses alike for an inner join */
4364  joinclauses = extract_actual_clauses(joinrestrictclauses, false);
4365  otherclauses = NIL;
4366  }
4367 
4368  /* Replace any outer-relation variables with nestloop params */
4369  if (best_path->jpath.path.param_info)
4370  {
4371  joinclauses = (List *)
4372  replace_nestloop_params(root, (Node *) joinclauses);
4373  otherclauses = (List *)
4374  replace_nestloop_params(root, (Node *) otherclauses);
4375  }
4376 
4377  /*
4378  * Identify any nestloop parameters that should be supplied by this join
4379  * node, and remove them from root->curOuterParams.
4380  */
4381  outerrelids = best_path->jpath.outerjoinpath->parent->relids;
4382  nestParams = identify_current_nestloop_params(root, outerrelids);
4383 
4384  join_plan = make_nestloop(tlist,
4385  joinclauses,
4386  otherclauses,
4387  nestParams,
4388  outer_plan,
4389  inner_plan,
4390  best_path->jpath.jointype,
4391  best_path->jpath.inner_unique);
4392 
4393  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4394 
4395  return join_plan;
4396 }
4397 
4398 static MergeJoin *
4400  MergePath *best_path)
4401 {
4402  MergeJoin *join_plan;
4403  Plan *outer_plan;
4404  Plan *inner_plan;
4405  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4406  List *joinclauses;
4407  List *otherclauses;
4408  List *mergeclauses;
4409  List *outerpathkeys;
4410  List *innerpathkeys;
4411  int nClauses;
4412  Oid *mergefamilies;
4413  Oid *mergecollations;
4414  int *mergestrategies;
4415  bool *mergenullsfirst;
4416  PathKey *opathkey;
4417  EquivalenceClass *opeclass;
4418  int i;
4419  ListCell *lc;
4420  ListCell *lop;
4421  ListCell *lip;
4422  Path *outer_path = best_path->jpath.outerjoinpath;
4423  Path *inner_path = best_path->jpath.innerjoinpath;
4424 
4425  /*
4426  * MergeJoin can project, so we don't have to demand exact tlists from the
4427  * inputs. However, if we're intending to sort an input's result, it's
4428  * best to request a small tlist so we aren't sorting more data than
4429  * necessary.
4430  */
4431  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4432  (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4433 
4434  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4435  (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4436 
4437  /* Sort join qual clauses into best execution order */
4438  /* NB: do NOT reorder the mergeclauses */
4439  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4440 
4441  /* Get the join qual clauses (in plain expression form) */
4442  /* Any pseudoconstant clauses are ignored here */
4443  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4444  {
4445  extract_actual_join_clauses(joinclauses,
4446  best_path->jpath.path.parent->relids,
4447  &joinclauses, &otherclauses);
4448  }
4449  else
4450  {
4451  /* We can treat all clauses alike for an inner join */
4452  joinclauses = extract_actual_clauses(joinclauses, false);
4453  otherclauses = NIL;
4454  }
4455 
4456  /*
4457  * Remove the mergeclauses from the list of join qual clauses, leaving the
4458  * list of quals that must be checked as qpquals.
4459  */
4460  mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
4461  joinclauses = list_difference(joinclauses, mergeclauses);
4462 
4463  /*
4464  * Replace any outer-relation variables with nestloop params. There
4465  * should not be any in the mergeclauses.
4466  */
4467  if (best_path->jpath.path.param_info)
4468  {
4469  joinclauses = (List *)
4470  replace_nestloop_params(root, (Node *) joinclauses);
4471  otherclauses = (List *)
4472  replace_nestloop_params(root, (Node *) otherclauses);
4473  }
4474 
4475  /*
4476  * Rearrange mergeclauses, if needed, so that the outer variable is always
4477  * on the left; mark the mergeclause restrictinfos with correct
4478  * outer_is_left status.
4479  */
4480  mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
4481  best_path->jpath.outerjoinpath->parent->relids);
4482 
4483  /*
4484  * Create explicit sort nodes for the outer and inner paths if necessary.
4485  */
4486  if (best_path->outersortkeys)
4487  {
4488  Relids outer_relids = outer_path->parent->relids;
4489  Sort *sort = make_sort_from_pathkeys(outer_plan,
4490  best_path->outersortkeys,
4491  outer_relids);
4492 
4493  label_sort_with_costsize(root, sort, -1.0);
4494  outer_plan = (Plan *) sort;
4495  outerpathkeys = best_path->outersortkeys;
4496  }
4497  else
4498  outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
4499 
4500  if (best_path->innersortkeys)
4501  {
4502  Relids inner_relids = inner_path->parent->relids;
4503  Sort *sort = make_sort_from_pathkeys(inner_plan,
4504  best_path->innersortkeys,
4505  inner_relids);
4506 
4507  label_sort_with_costsize(root, sort, -1.0);
4508  inner_plan = (Plan *) sort;
4509  innerpathkeys = best_path->innersortkeys;
4510  }
4511  else
4512  innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
4513 
4514  /*
4515  * If specified, add a materialize node to shield the inner plan from the
4516  * need to handle mark/restore.
4517  */
4518  if (best_path->materialize_inner)
4519  {
4520  Plan *matplan = (Plan *) make_material(inner_plan);
4521 
4522  /*
4523  * We assume the materialize will not spill to disk, and therefore
4524  * charge just cpu_operator_cost per tuple. (Keep this estimate in
4525  * sync with final_cost_mergejoin.)
4526  */
4527  copy_plan_costsize(matplan, inner_plan);
4528  matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
4529 
4530  inner_plan = matplan;
4531  }
4532 
4533  /*
4534  * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
4535  * executor. The information is in the pathkeys for the two inputs, but
4536  * we need to be careful about the possibility of mergeclauses sharing a
4537  * pathkey, as well as the possibility that the inner pathkeys are not in
4538  * an order matching the mergeclauses.
4539  */
4540  nClauses = list_length(mergeclauses);
4541  Assert(nClauses == list_length(best_path->path_mergeclauses));
4542  mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
4543  mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
4544  mergestrategies = (int *) palloc(nClauses * sizeof(int));
4545  mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
4546 
4547  opathkey = NULL;
4548  opeclass = NULL;
4549  lop = list_head(outerpathkeys);
4550  lip = list_head(innerpathkeys);
4551  i = 0;
4552  foreach(lc, best_path->path_mergeclauses)
4553  {
4554  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
4555  EquivalenceClass *oeclass;
4556  EquivalenceClass *ieclass;
4557  PathKey *ipathkey = NULL;
4558  EquivalenceClass *ipeclass = NULL;
4559  bool first_inner_match = false;
4560 
4561  /* fetch outer/inner eclass from mergeclause */
4562  if (rinfo->outer_is_left)
4563  {
4564  oeclass = rinfo->left_ec;
4565  ieclass = rinfo->right_ec;
4566  }
4567  else
4568  {
4569  oeclass = rinfo->right_ec;
4570  ieclass = rinfo->left_ec;
4571  }
4572  Assert(oeclass != NULL);
4573  Assert(ieclass != NULL);
4574 
4575  /*
4576  * We must identify the pathkey elements associated with this clause
4577  * by matching the eclasses (which should give a unique match, since
4578  * the pathkey lists should be canonical). In typical cases the merge
4579  * clauses are one-to-one with the pathkeys, but when dealing with
4580  * partially redundant query conditions, things are more complicated.
4581  *
4582  * lop and lip reference the first as-yet-unmatched pathkey elements.
4583  * If they're NULL then all pathkey elements have been matched.
4584  *
4585  * The ordering of the outer pathkeys should match the mergeclauses,
4586  * by construction (see find_mergeclauses_for_outer_pathkeys()). There
4587  * could be more than one mergeclause for the same outer pathkey, but
4588  * no pathkey may be entirely skipped over.
4589  */
4590  if (oeclass != opeclass) /* multiple matches are not interesting */
4591  {
4592  /* doesn't match the current opathkey, so must match the next */
4593  if (lop == NULL)
4594  elog(ERROR, "outer pathkeys do not match mergeclauses");
4595  opathkey = (PathKey *) lfirst(lop);
4596  opeclass = opathkey->pk_eclass;
4597  lop = lnext(outerpathkeys, lop);
4598  if (oeclass != opeclass)
4599  elog(ERROR, "outer pathkeys do not match mergeclauses");
4600  }
4601 
4602  /*
4603  * The inner pathkeys likewise should not have skipped-over keys, but
4604  * it's possible for a mergeclause to reference some earlier inner
4605  * pathkey if we had redundant pathkeys. For example we might have
4606  * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
4607  * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
4608  * mechanism drops the second sort by x as redundant, and this code
4609  * must cope.
4610  *
4611  * It's also possible for the implied inner-rel ordering to be like
4612  * "ORDER BY x, y, x DESC". We still drop the second instance of x as
4613  * redundant; but this means that the sort ordering of a redundant
4614  * inner pathkey should not be considered significant. So we must
4615  * detect whether this is the first clause matching an inner pathkey.
4616  */
4617  if (lip)
4618  {
4619  ipathkey = (PathKey *) lfirst(lip);
4620  ipeclass = ipathkey->pk_eclass;
4621  if (ieclass == ipeclass)
4622  {
4623  /* successful first match to this inner pathkey */
4624  lip = lnext(innerpathkeys, lip);
4625  first_inner_match = true;
4626  }
4627  }
4628  if (!first_inner_match)
4629  {
4630  /* redundant clause ... must match something before lip */
4631  ListCell *l2;
4632 
4633  foreach(l2, innerpathkeys)
4634  {
4635  if (l2 == lip)
4636  break;
4637  ipathkey = (PathKey *) lfirst(l2);
4638  ipeclass = ipathkey->pk_eclass;
4639  if (ieclass == ipeclass)
4640  break;
4641  }
4642  if (ieclass != ipeclass)
4643  elog(ERROR, "inner pathkeys do not match mergeclauses");
4644  }
4645 
4646  /*
4647  * The pathkeys should always match each other as to opfamily and
4648  * collation (which affect equality), but if we're considering a
4649  * redundant inner pathkey, its sort ordering might not match. In
4650  * such cases we may ignore the inner pathkey's sort ordering and use
4651  * the outer's. (In effect, we're lying to the executor about the
4652  * sort direction of this inner column, but it does not matter since
4653  * the run-time row comparisons would only reach this column when
4654  * there's equality for the earlier column containing the same eclass.
4655  * There could be only one value in this column for the range of inner
4656  * rows having a given value in the earlier column, so it does not
4657  * matter which way we imagine this column to be ordered.) But a
4658  * non-redundant inner pathkey had better match outer's ordering too.
4659  */
4660  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4661  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
4662  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4663  if (first_inner_match &&
4664  (opathkey->pk_strategy != ipathkey->pk_strategy ||
4665  opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
4666  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4667 
4668  /* OK, save info for executor */
4669  mergefamilies[i] = opathkey->pk_opfamily;
4670  mergecollations[i] = opathkey->pk_eclass->ec_collation;
4671  mergestrategies[i] = opathkey->pk_strategy;
4672  mergenullsfirst[i] = opathkey->pk_nulls_first;
4673  i++;
4674  }
4675 
4676  /*
4677  * Note: it is not an error if we have additional pathkey elements (i.e.,
4678  * lop or lip isn't NULL here). The input paths might be better-sorted
4679  * than we need for the current mergejoin.
4680  */
4681 
4682  /*
4683  * Now we can build the mergejoin node.
4684  */
4685  join_plan = make_mergejoin(tlist,
4686  joinclauses,
4687  otherclauses,
4688  mergeclauses,
4689  mergefamilies,
4690  mergecollations,
4691  mergestrategies,
4692  mergenullsfirst,
4693  outer_plan,
4694  inner_plan,
4695  best_path->jpath.jointype,
4696  best_path->jpath.inner_unique,
4697  best_path->skip_mark_restore);
4698 
4699  /* Costs of sort and material steps are included in path cost already */
4700  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4701 
4702  return join_plan;
4703 }
4704 
4705 static HashJoin *
4707  HashPath *best_path)
4708 {
4709  HashJoin *join_plan;
4710  Hash *hash_plan;
4711  Plan *outer_plan;
4712  Plan *inner_plan;
4713  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4714  List *joinclauses;
4715  List *otherclauses;
4716  List *hashclauses;
4717  List *hashoperators = NIL;
4718  List *hashcollations = NIL;
4719  List *inner_hashkeys = NIL;
4720  List *outer_hashkeys = NIL;
4721  Oid skewTable = InvalidOid;
4722  AttrNumber skewColumn = InvalidAttrNumber;
4723  bool skewInherit = false;
4724  ListCell *lc;
4725 
4726  /*
4727  * HashJoin can project, so we don't have to demand exact tlists from the
4728  * inputs. However, it's best to request a small tlist from the inner
4729  * side, so that we aren't storing more data than necessary. Likewise, if
4730  * we anticipate batching, request a small tlist from the outer side so
4731  * that we don't put extra data in the outer batch files.
4732  */
4733  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4734  (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
4735 
4736  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4737  CP_SMALL_TLIST);
4738 
4739  /* Sort join qual clauses into best execution order */
4740  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4741  /* There's no point in sorting the hash clauses ... */
4742 
4743  /* Get the join qual clauses (in plain expression form) */
4744  /* Any pseudoconstant clauses are ignored here */
4745  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4746  {
4747  extract_actual_join_clauses(joinclauses,
4748  best_path->jpath.path.parent->relids,
4749  &joinclauses, &otherclauses);
4750  }
4751  else
4752  {
4753  /* We can treat all clauses alike for an inner join */
4754  joinclauses = extract_actual_clauses(joinclauses, false);
4755  otherclauses = NIL;
4756  }
4757 
4758  /*
4759  * Remove the hashclauses from the list of join qual clauses, leaving the
4760  * list of quals that must be checked as qpquals.
4761  */
4762  hashclauses = get_actual_clauses(best_path->path_hashclauses);
4763  joinclauses = list_difference(joinclauses, hashclauses);
4764 
4765  /*
4766  * Replace any outer-relation variables with nestloop params. There
4767  * should not be any in the hashclauses.
4768  */
4769  if (best_path->jpath.path.param_info)
4770  {
4771  joinclauses = (List *)
4772  replace_nestloop_params(root, (Node *) joinclauses);
4773  otherclauses = (List *)
4774  replace_nestloop_params(root, (Node *) otherclauses);
4775  }
4776 
4777  /*
4778  * Rearrange hashclauses, if needed, so that the outer variable is always
4779  * on the left.
4780  */
4781  hashclauses = get_switched_clauses(best_path->path_hashclauses,
4782  best_path->jpath.outerjoinpath->parent->relids);
4783 
4784  /*
4785  * If there is a single join clause and we can identify the outer variable
4786  * as a simple column reference, supply its identity for possible use in
4787  * skew optimization. (Note: in principle we could do skew optimization
4788  * with multiple join clauses, but we'd have to be able to determine the
4789  * most common combinations of outer values, which we don't currently have
4790  * enough stats for.)
4791  */
4792  if (list_length(hashclauses) == 1)
4793  {
4794  OpExpr *clause = (OpExpr *) linitial(hashclauses);
4795  Node *node;
4796 
4797  Assert(is_opclause(clause));
4798  node = (Node *) linitial(clause->args);
4799  if (IsA(node, RelabelType))
4800  node = (Node *) ((RelabelType *) node)->arg;
4801  if (IsA(node, Var))
4802  {
4803  Var *var = (Var *) node;
4804  RangeTblEntry *rte;
4805 
4806  rte = root->simple_rte_array[var->varno];
4807  if (rte->rtekind == RTE_RELATION)
4808  {
4809  skewTable = rte->relid;
4810  skewColumn = var->varattno;
4811  skewInherit = rte->inh;
4812  }
4813  }
4814  }
4815 
4816  /*
4817  * Collect hash related information. The hashed expressions are
4818  * deconstructed into outer/inner expressions, so they can be computed
4819  * separately (inner expressions are used to build the hashtable via Hash,
4820  * outer expressions to perform lookups of tuples from HashJoin's outer
4821  * plan in the hashtable). Also collect operator information necessary to
4822  * build the hashtable.
4823  */
4824  foreach(lc, hashclauses)
4825  {
4826  OpExpr *hclause = lfirst_node(OpExpr, lc);
4827 
4828  hashoperators = lappend_oid(hashoperators, hclause->opno);
4829  hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
4830  outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
4831  inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
4832  }
4833 
4834  /*
4835  * Build the hash node and hash join node.
4836  */
4837  hash_plan = make_hash(inner_plan,
4838  inner_hashkeys,
4839  skewTable,
4840  skewColumn,
4841  skewInherit);
4842 
4843  /*
4844  * Set Hash node's startup & total costs equal to total cost of input
4845  * plan; this only affects EXPLAIN display not decisions.
4846  */
4847  copy_plan_costsize(&hash_plan->plan, inner_plan);
4848  hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
4849 
4850  /*
4851  * If parallel-aware, the executor will also need an estimate of the total
4852  * number of rows expected from all participants so that it can size the
4853  * shared hash table.
4854  */
4855  if (best_path->jpath.path.parallel_aware)
4856  {
4857  hash_plan->plan.parallel_aware = true;
4858  hash_plan->rows_total = best_path->inner_rows_total;
4859  }
4860 
4861  join_plan = make_hashjoin(tlist,
4862  joinclauses,
4863  otherclauses,
4864  hashclauses,
4865  hashoperators,
4866  hashcollations,
4867  outer_hashkeys,
4868  outer_plan,
4869  (Plan *) hash_plan,
4870  best_path->jpath.jointype,
4871  best_path->jpath.inner_unique);
4872 
4873  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4874 
4875  return join_plan;
4876 }
4877 
4878 
4879 /*****************************************************************************
4880  *
4881  * SUPPORTING ROUTINES
4882  *
4883  *****************************************************************************/
4884 
4885 /*
4886  * replace_nestloop_params
4887  * Replace outer-relation Vars and PlaceHolderVars in the given expression
4888  * with nestloop Params
4889  *
4890  * All Vars and PlaceHolderVars belonging to the relation(s) identified by
4891  * root->curOuterRels are replaced by Params, and entries are added to
4892  * root->curOuterParams if not already present.
4893  */
4894 static Node *
4896 {
4897  /* No setup needed for tree walk, so away we go */
4898  return replace_nestloop_params_mutator(expr, root);
4899 }
4900 
4901 static Node *
4903 {
4904  if (node == NULL)
4905  return NULL;
4906  if (IsA(node, Var))
4907  {
4908  Var *var = (Var *) node;
4909 
4910  /* Upper-level Vars should be long gone at this point */
4911  Assert(var->varlevelsup == 0);
4912  /* If not to be replaced, we can just return the Var unmodified */
4913  if (IS_SPECIAL_VARNO(var->varno) ||
4914  !bms_is_member(var->varno, root->curOuterRels))
4915  return node;
4916  /* Replace the Var with a nestloop Param */
4917  return (Node *) replace_nestloop_param_var(root, var);
4918  }
4919  if (IsA(node, PlaceHolderVar))
4920  {
4921  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4922 
4923  /* Upper-level PlaceHolderVars should be long gone at this point */
4924  Assert(phv->phlevelsup == 0);
4925 
4926  /* Check whether we need to replace the PHV */
4927  if (!bms_is_subset(find_placeholder_info(root, phv)->ph_eval_at,
4928  root->curOuterRels))
4929  {
4930  /*
4931  * We can't replace the whole PHV, but we might still need to
4932  * replace Vars or PHVs within its expression, in case it ends up
4933  * actually getting evaluated here. (It might get evaluated in
4934  * this plan node, or some child node; in the latter case we don't
4935  * really need to process the expression here, but we haven't got
4936  * enough info to tell if that's the case.) Flat-copy the PHV
4937  * node and then recurse on its expression.
4938  *
4939  * Note that after doing this, we might have different
4940  * representations of the contents of the same PHV in different
4941  * parts of the plan tree. This is OK because equal() will just
4942  * match on phid/phlevelsup, so setrefs.c will still recognize an
4943  * upper-level reference to a lower-level copy of the same PHV.
4944  */
4946 
4947  memcpy(newphv, phv, sizeof(PlaceHolderVar));
4948  newphv->phexpr = (Expr *)
4949  replace_nestloop_params_mutator((Node *) phv->phexpr,
4950  root);
4951  return (Node *) newphv;
4952  }
4953  /* Replace the PlaceHolderVar with a nestloop Param */
4954  return (Node *) replace_nestloop_param_placeholdervar(root, phv);
4955  }
4956  return expression_tree_mutator(node,
4958  (void *) root);
4959 }
4960 
4961 /*
4962  * fix_indexqual_references
4963  * Adjust indexqual clauses to the form the executor's indexqual
4964  * machinery needs.
4965  *
4966  * We have three tasks here:
4967  * * Select the actual qual clauses out of the input IndexClause list,
4968  * and remove RestrictInfo nodes from the qual clauses.
4969  * * Replace any outer-relation Var or PHV nodes with nestloop Params.
4970  * (XXX eventually, that responsibility should go elsewhere?)
4971  * * Index keys must be represented by Var nodes with varattno set to the
4972  * index's attribute number, not the attribute number in the original rel.
4973  *
4974  * *stripped_indexquals_p receives a list of the actual qual clauses.
4975  *
4976  * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
4977  * that shares no substructure with the original; this is needed in case there
4978  * are subplans in it (we need two separate copies of the subplan tree, or
4979  * things will go awry).
4980  */
4981 static void
4983  List **stripped_indexquals_p, List **fixed_indexquals_p)
4984 {
4985  IndexOptInfo *index = index_path->indexinfo;
4986  List *stripped_indexquals;
4987  List *fixed_indexquals;
4988  ListCell *lc;
4989 
4990  stripped_indexquals = fixed_indexquals = NIL;
4991 
4992  foreach(lc, index_path->indexclauses)
4993  {
4994  IndexClause *iclause = lfirst_node(IndexClause, lc);
4995  int indexcol = iclause->indexcol;
4996  ListCell *lc2;
4997 
4998  foreach(lc2, iclause->indexquals)
4999  {
5000  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5001  Node *clause = (Node *) rinfo->clause;
5002 
5003  stripped_indexquals = lappend(stripped_indexquals, clause);
5004  clause = fix_indexqual_clause(root, index, indexcol,
5005  clause, iclause->indexcols);
5006  fixed_indexquals = lappend(fixed_indexquals, clause);
5007  }
5008  }
5009 
5010  *stripped_indexquals_p = stripped_indexquals;
5011  *fixed_indexquals_p = fixed_indexquals;
5012 }
5013 
5014 /*
5015  * fix_indexorderby_references
5016  * Adjust indexorderby clauses to the form the executor's index
5017  * machinery needs.
5018  *
5019  * This is a simplified version of fix_indexqual_references. The input is
5020  * bare clauses and a separate indexcol list, instead of IndexClauses.
5021  */
5022 static List *
5024 {
5025  IndexOptInfo *index = index_path->indexinfo;
5026  List *fixed_indexorderbys;
5027  ListCell *lcc,
5028  *lci;
5029 
5030  fixed_indexorderbys = NIL;
5031 
5032  forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
5033  {
5034  Node *clause = (Node *) lfirst(lcc);
5035  int indexcol = lfirst_int(lci);
5036 
5037  clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
5038  fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
5039  }
5040 
5041  return fixed_indexorderbys;
5042 }
5043 
5044 /*
5045  * fix_indexqual_clause
5046  * Convert a single indexqual clause to the form needed by the executor.
5047  *
5048  * We replace nestloop params here, and replace the index key variables
5049  * or expressions by index Var nodes.
5050  */
5051 static Node *
5053  Node *clause, List *indexcolnos)
5054 {
5055  /*
5056  * Replace any outer-relation variables with nestloop params.
5057  *
5058  * This also makes a copy of the clause, so it's safe to modify it
5059  * in-place below.
5060  */
5061  clause = replace_nestloop_params(root, clause);
5062 
5063  if (IsA(clause, OpExpr))
5064  {
5065  OpExpr *op = (OpExpr *) clause;
5066 
5067  /* Replace the indexkey expression with an index Var. */
5069  index,
5070  indexcol);
5071  }
5072  else if (IsA(clause, RowCompareExpr))
5073  {
5074  RowCompareExpr *rc = (RowCompareExpr *) clause;
5075  ListCell *lca,
5076  *lcai;
5077 
5078  /* Replace the indexkey expressions with index Vars. */
5079  Assert(list_length(rc->largs) == list_length(indexcolnos));
5080  forboth(lca, rc->largs, lcai, indexcolnos)
5081  {
5083  index,
5084  lfirst_int(lcai));
5085  }
5086  }
5087  else if (IsA(clause, ScalarArrayOpExpr))
5088  {
5089  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5090 
5091  /* Replace the indexkey expression with an index Var. */
5093  index,
5094  indexcol);
5095  }
5096  else if (IsA(clause, NullTest))
5097  {
5098  NullTest *nt = (NullTest *) clause;
5099 
5100  /* Replace the indexkey expression with an index Var. */
5101  nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
5102  index,
5103  indexcol);
5104  }
5105  else
5106  elog(ERROR, "unsupported indexqual type: %d",
5107  (int) nodeTag(clause));
5108 
5109  return clause;
5110 }
5111 
5112 /*
5113  * fix_indexqual_operand
5114  * Convert an indexqual expression to a Var referencing the index column.
5115  *
5116  * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
5117  * equal to the index's attribute number (index column position).
5118  *
5119  * Most of the code here is just for sanity cross-checking that the given
5120  * expression actually matches the index column it's claimed to.
5121  */
5122 static Node *
5124 {
5125  Var *result;
5126  int pos;
5127  ListCell *indexpr_item;
5128 
5129  /*
5130  * Remove any binary-compatible relabeling of the indexkey
5131  */
5132  if (IsA(node, RelabelType))
5133  node = (Node *) ((RelabelType *) node)->arg;
5134 
5135  Assert(indexcol >= 0 && indexcol < index->ncolumns);
5136 
5137  if (index->indexkeys[indexcol] != 0)
5138  {
5139  /* It's a simple index column */
5140  if (IsA(node, Var) &&
5141  ((Var *) node)->varno == index->rel->relid &&
5142  ((Var *) node)->varattno == index->indexkeys[indexcol])
5143  {
5144  result = (Var *) copyObject(node);
5145  result->varno = INDEX_VAR;
5146  result->varattno = indexcol + 1;
5147  return (Node *) result;
5148  }
5149  else
5150  elog(ERROR, "index key does not match expected index column");
5151  }
5152 
5153  /* It's an index expression, so find and cross-check the expression */
5154  indexpr_item = list_head(index->indexprs);
5155  for (pos = 0; pos < index->ncolumns; pos++)
5156  {
5157  if (index->indexkeys[pos] == 0)
5158  {
5159  if (indexpr_item == NULL)
5160  elog(ERROR, "too few entries in indexprs list");
5161  if (pos == indexcol)
5162  {
5163  Node *indexkey;
5164 
5165  indexkey = (Node *) lfirst(indexpr_item);
5166  if (indexkey && IsA(indexkey, RelabelType))
5167  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5168  if (equal(node, indexkey))
5169  {
5170  result = makeVar(INDEX_VAR, indexcol + 1,
5171  exprType(lfirst(indexpr_item)), -1,
5172  exprCollation(lfirst(indexpr_item)),
5173  0);
5174  return (Node *) result;
5175  }
5176  else
5177  elog(ERROR, "index key does not match expected index column");
5178  }
5179  indexpr_item = lnext(index->indexprs, indexpr_item);
5180  }
5181  }
5182 
5183  /* Oops... */
5184  elog(ERROR, "index key does not match expected index column");
5185  return NULL; /* keep compiler quiet */
5186 }
5187 
5188 /*
5189  * get_switched_clauses
5190  * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
5191  * extract the bare clauses, and rearrange the elements within the
5192  * clauses, if needed, so the outer join variable is on the left and
5193  * the inner is on the right. The original clause data structure is not
5194  * touched; a modified list is returned. We do, however, set the transient
5195  * outer_is_left field in each RestrictInfo to show which side was which.
5196  */
5197 static List *
5198 get_switched_clauses(List *clauses, Relids outerrelids)
5199 {
5200  List *t_list = NIL;
5201  ListCell *l;
5202 
5203  foreach(l, clauses)
5204  {
5205  RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
5206  OpExpr *clause = (OpExpr *) restrictinfo->clause;
5207 
5208  Assert(is_opclause(clause));
5209  if (bms_is_subset(restrictinfo->right_relids, outerrelids))
5210  {
5211  /*
5212  * Duplicate just enough of the structure to allow commuting the
5213  * clause without changing the original list. Could use
5214  * copyObject, but a complete deep copy is overkill.
5215  */
5216  OpExpr *temp = makeNode(OpExpr);
5217 
5218  temp->opno = clause->opno;
5219  temp->opfuncid = InvalidOid;
5220  temp->opresulttype = clause->opresulttype;
5221  temp->opretset = clause->opretset;
5222  temp->opcollid = clause->opcollid;
5223  temp->inputcollid = clause->inputcollid;
5224  temp->args = list_copy(clause->args);
5225  temp->location = clause->location;
5226  /* Commute it --- note this modifies the temp node in-place. */
5227  CommuteOpExpr(temp);
5228  t_list = lappend(t_list, temp);
5229  restrictinfo->outer_is_left = false;
5230  }
5231  else
5232  {
5233  Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
5234  t_list = lappend(t_list, clause);
5235  restrictinfo->outer_is_left = true;
5236  }
5237  }
5238  return t_list;
5239 }
5240 
5241 /*
5242  * order_qual_clauses
5243  * Given a list of qual clauses that will all be evaluated at the same
5244  * plan node, sort the list into the order we want to check the quals
5245  * in at runtime.
5246  *
5247  * When security barrier quals are used in the query, we may have quals with
5248  * different security levels in the list. Quals of lower security_level
5249  * must go before quals of higher security_level, except that we can grant
5250  * exceptions to move up quals that are leakproof. When security level
5251  * doesn't force the decision, we prefer to order clauses by estimated
5252  * execution cost, cheapest first.
5253  *
5254  * Ideally the order should be driven by a combination of execution cost and
5255  * selectivity, but it's not immediately clear how to account for both,
5256  * and given the uncertainty of the estimates the reliability of the decisions
5257  * would be doubtful anyway. So we just order by security level then
5258  * estimated per-tuple cost, being careful not to change the order when
5259  * (as is often the case) the estimates are identical.
5260  *
5261  * Although this will work on either bare clauses or RestrictInfos, it's
5262  * much faster to apply it to RestrictInfos, since it can re-use cost
5263  * information that is cached in RestrictInfos. XXX in the bare-clause
5264  * case, we are also not able to apply security considerations. That is
5265  * all right for the moment, because the bare-clause case doesn't occur
5266  * anywhere that barrier quals could be present, but it would be better to
5267  * get rid of it.
5268  *
5269  * Note: some callers pass lists that contain entries that will later be
5270  * removed; this is the easiest way to let this routine see RestrictInfos
5271  * instead of bare clauses. This is another reason why trying to consider
5272  * selectivity in the ordering would likely do the wrong thing.
5273  */
5274 static List *
5276 {
5277  typedef struct
5278  {
5279  Node *clause;
5280  Cost cost;
5281  Index security_level;
5282  } QualItem;
5283  int nitems = list_length(clauses);
5284  QualItem *items;
5285  ListCell *lc;
5286  int i;
5287  List *result;
5288 
5289  /* No need to work hard for 0 or 1 clause */
5290  if (nitems <= 1)
5291  return clauses;
5292 
5293  /*
5294  * Collect the items and costs into an array. This is to avoid repeated
5295  * cost_qual_eval work if the inputs aren't RestrictInfos.
5296  */
5297  items = (QualItem *) palloc(nitems * sizeof(QualItem));
5298  i = 0;
5299  foreach(lc, clauses)
5300  {
5301  Node *clause = (Node *) lfirst(lc);
5302  QualCost qcost;
5303 
5304  cost_qual_eval_node(&qcost, clause, root);
5305  items[i].clause = clause;
5306  items[i].cost = qcost.per_tuple;
5307  if (IsA(clause, RestrictInfo))
5308  {
5309  RestrictInfo *rinfo = (RestrictInfo *) clause;
5310 
5311  /*
5312  * If a clause is leakproof, it doesn't have to be constrained by
5313  * its nominal security level. If it's also reasonably cheap
5314  * (here defined as 10X cpu_operator_cost), pretend it has
5315  * security_level 0, which will allow it to go in front of
5316  * more-expensive quals of lower security levels. Of course, that
5317  * will also force it to go in front of cheaper quals of its own
5318  * security level, which is not so great, but we can alleviate
5319  * that risk by applying the cost limit cutoff.
5320  */
5321  if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
5322  items[i].security_level = 0;
5323  else
5324  items[i].security_level = rinfo->security_level;
5325  }
5326  else
5327  items[i].security_level = 0;
5328  i++;
5329  }
5330 
5331  /*
5332  * Sort. We don't use qsort() because it's not guaranteed stable for
5333  * equal keys. The expected number of entries is small enough that a
5334  * simple insertion sort should be good enough.
5335  */
5336  for (i = 1; i < nitems; i++)
5337  {
5338  QualItem newitem = items[i];
5339  int j;
5340 
5341  /* insert newitem into the already-sorted subarray */
5342  for (j = i; j > 0; j--)
5343  {
5344  QualItem *olditem = &items[j - 1];
5345 
5346  if (newitem.security_level > olditem->security_level ||
5347  (newitem.security_level == olditem->security_level &&
5348  newitem.cost >= olditem->cost))
5349  break;
5350  items[j] = *olditem;
5351  }
5352  items[j] = newitem;
5353  }
5354 
5355  /* Convert back to a list */
5356  result = NIL;
5357  for (i = 0; i < nitems; i++)
5358  result = lappend(result, items[i].clause);
5359 
5360  return result;
5361 }
5362 
5363 /*
5364  * Copy cost and size info from a Path node to the Plan node created from it.
5365  * The executor usually won't use this info, but it's needed by EXPLAIN.
5366  * Also copy the parallel-related flags, which the executor *will* use.
5367  */
5368 static void
5370 {
5371  dest->startup_cost = src->startup_cost;
5372  dest->total_cost = src->total_cost;
5373  dest->plan_rows = src->rows;
5374  dest->plan_width = src->pathtarget->width;
5375  dest->parallel_aware = src->parallel_aware;
5376  dest->parallel_safe = src->parallel_safe;
5377 }
5378 
5379 /*
5380  * Copy cost and size info from a lower plan node to an inserted node.
5381  * (Most callers alter the info after copying it.)
5382  */
5383 static void
5385 {
5386  dest->startup_cost = src->startup_cost;
5387  dest->total_cost = src->total_cost;
5388  dest->plan_rows = src->plan_rows;
5389  dest->plan_width = src->plan_width;
5390  /* Assume the inserted node is not parallel-aware. */
5391  dest->parallel_aware = false;
5392  /* Assume the inserted node is parallel-safe, if child plan is. */
5393  dest->parallel_safe = src->parallel_safe;
5394 }
5395 
5396 /*
5397  * Some places in this file build Sort nodes that don't have a directly
5398  * corresponding Path node. The cost of the sort is, or should have been,
5399  * included in the cost of the Path node we're working from, but since it's
5400  * not split out, we have to re-figure it using cost_sort(). This is just
5401  * to label the Sort node nicely for EXPLAIN.
5402  *
5403  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
5404  */
5405 static void
5406 label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
5407 {
5408  Plan *lefttree = plan->plan.lefttree;
5409  Path sort_path; /* dummy for result of cost_sort */
5410 
5411  /*
5412  * This function shouldn't have to deal with IncrementalSort plans because
5413  * they are only created from corresponding Path nodes.
5414  */
5415  Assert(IsA(plan, Sort));
5416 
5417  cost_sort(&sort_path, root, NIL,
5418  lefttree->total_cost,
5419  lefttree->plan_rows,
5420  lefttree->plan_width,
5421  0.0,
5422  work_mem,
5423  limit_tuples);
5424  plan->plan.startup_cost = sort_path.startup_cost;
5425  plan->plan.total_cost = sort_path.total_cost;
5426  plan->plan.plan_rows = lefttree->plan_rows;
5427  plan->plan.plan_width = lefttree->plan_width;
5428  plan->plan.parallel_aware = false;
5429  plan->plan.parallel_safe = lefttree->parallel_safe;
5430 }
5431 
5432 /*
5433  * bitmap_subplan_mark_shared
5434  * Set isshared flag in bitmap subplan so that it will be created in
5435  * shared memory.
5436  */
5437 static void
5439 {
5440  if (IsA(plan, BitmapAnd))
5441  bitmap_subplan_mark_shared(linitial(((BitmapAnd *) plan)->bitmapplans));
5442  else if (IsA(plan, BitmapOr))
5443  {
5444  ((BitmapOr *) plan)->isshared = true;
5445  bitmap_subplan_mark_shared(linitial(((BitmapOr *) plan)->bitmapplans));
5446  }
5447  else if (IsA(plan, BitmapIndexScan))
5448  ((BitmapIndexScan *) plan)->isshared = true;
5449  else
5450  elog(ERROR, "unrecognized node type: %d", nodeTag(plan));
5451 }
5452 
5453 /*****************************************************************************
5454  *
5455  * PLAN NODE BUILDING ROUTINES
5456  *
5457  * In general, these functions are not passed the original Path and therefore
5458  * leave it to the caller to fill in the cost/width fields from the Path,
5459  * typically by calling copy_generic_path_info(). This convention is
5460  * somewhat historical, but it does support a few places above where we build
5461  * a plan node without having an exactly corresponding Path node. Under no
5462  * circumstances should one of these functions do its own cost calculations,
5463  * as that would be redundant with calculations done while building Paths.
5464  *
5465  *****************************************************************************/
5466 
5467 static SeqScan *
5469  List *qpqual,
5470  Index scanrelid)
5471 {
5472  SeqScan *node = makeNode(SeqScan);
5473  Plan *plan = &node->scan.plan;
5474 
5475  plan->targetlist = qptlist;
5476  plan->qual = qpqual;
5477  plan->lefttree = NULL;
5478  plan->righttree = NULL;
5479  node->scan.scanrelid = scanrelid;
5480 
5481  return node;
5482 }
5483 
5484 static SampleScan *
5486  List *qpqual,
5487  Index scanrelid,
5488  TableSampleClause *tsc)
5489 {
5490  SampleScan *node = makeNode(SampleScan);
5491  Plan *plan = &node->scan.plan;
5492 
5493  plan->targetlist = qptlist;
5494  plan->qual = qpqual;
5495  plan->lefttree = NULL;
5496  plan->righttree = NULL;
5497  node->scan.scanrelid = scanrelid;
5498  node->tablesample = tsc;
5499 
5500  return node;
5501 }
5502 
5503 static IndexScan *
5505  List *qpqual,
5506  Index scanrelid,
5507  Oid indexid,
5508  List *indexqual,
5509  List *indexqualorig,
5510  List *indexorderby,
5511  List *indexorderbyorig,
5512  List *indexorderbyops,
5513  ScanDirection indexscandir)
5514 {
5515  IndexScan *node = makeNode(IndexScan);
5516  Plan *plan = &node->scan.plan;
5517 
5518  plan->targetlist = qptlist;
5519  plan->qual = qpqual;
5520  plan->lefttree = NULL;
5521  plan->righttree = NULL;
5522  node->scan.scanrelid = scanrelid;
5523  node->indexid = indexid;
5524  node->indexqual = indexqual;
5525  node->indexqualorig = indexqualorig;
5526  node->indexorderby = indexorderby;
5527  node->indexorderbyorig = indexorderbyorig;
5528  node->indexorderbyops = indexorderbyops;
5529  node->indexorderdir = indexscandir;
5530 
5531  return node;
5532 }
5533 
5534 static IndexOnlyScan *
5536  List *qpqual,
5537  Index scanrelid,
5538  Oid indexid,
5539  List *indexqual,
5540  List *recheckqual,
5541  List *indexorderby,
5542  List *indextlist,
5543  ScanDirection indexscandir)
5544 {
5546  Plan *plan = &node->scan.plan;
5547 
5548  plan->targetlist = qptlist;
5549  plan->qual = qpqual;
5550  plan->lefttree = NULL;
5551  plan->righttree = NULL;
5552  node->scan.scanrelid = scanrelid;
5553  node->indexid = indexid;
5554  node->indexqual = indexqual;
5555  node->recheckqual = recheckqual;
5556  node->indexorderby = indexorderby;
5557  node->indextlist = indextlist;
5558  node->indexorderdir = indexscandir;
5559 
5560  return node;
5561 }
5562 
5563 static BitmapIndexScan *
5565  Oid indexid,
5566  List *indexqual,
5567  List *indexqualorig)
5568 {
5570  Plan *plan = &node->scan.plan;
5571 
5572  plan->targetlist = NIL; /* not used */
5573  plan->qual = NIL; /* not used */
5574  plan->lefttree = NULL;
5575  plan->righttree = NULL;
5576  node->scan.scanrelid = scanrelid;
5577  node->indexid = indexid;
5578  node->indexqual = indexqual;
5579  node->indexqualorig = indexqualorig;
5580 
5581  return node;
5582 }
5583 
5584 static BitmapHeapScan *
5586  List *qpqual,
5587  Plan *lefttree,
5588  List *bitmapqualorig,
5589  Index scanrelid)
5590 {
5592  Plan *plan = &node->scan.plan;
5593 
5594  plan->targetlist = qptlist;
5595  plan->qual = qpqual;
5596  plan->lefttree = lefttree;
5597  plan->righttree = NULL;
5598  node->scan.scanrelid = scanrelid;
5599  node->bitmapqualorig = bitmapqualorig;
5600 
5601  return node;
5602 }
5603 
5604 static TidScan *
5606  List *qpqual,
5607  Index scanrelid,
5608  List *tidquals)
5609 {
5610  TidScan *node = makeNode(TidScan);
5611  Plan *plan = &node->scan.plan;
5612 
5613  plan->targetlist = qptlist;
5614  plan->qual = qpqual;
5615  plan->lefttree = NULL;
5616  plan->righttree = NULL;
5617  node->scan.scanrelid = scanrelid;
5618  node->tidquals = tidquals;
5619 
5620  return node;
5621 }
5622 
5623 static TidRangeScan *
5625  List *qpqual,
5626  Index scanrelid,
5627  List *tidrangequals)
5628 {
5630  Plan *plan = &node->scan.plan;
5631 
5632  plan->targetlist = qptlist;
5633  plan->qual = qpqual;
5634  plan->lefttree = NULL;
5635  plan->righttree = NULL;
5636  node->scan.scanrelid = scanrelid;
5637  node->tidrangequals = tidrangequals;
5638 
5639  return node;
5640 }
5641 
5642 static SubqueryScan *
5644  List *qpqual,
5645  Index scanrelid,
5646  Plan *subplan)
5647 {
5649  Plan *plan = &node->scan.plan;
5650 
5651  plan->targetlist = qptlist;
5652  plan->qual = qpqual;
5653  plan->lefttree = NULL;
5654  plan->righttree = NULL;
5655  node->scan.scanrelid = scanrelid;
5656  node->subplan = subplan;
5658 
5659  return node;
5660 }
5661 
5662 static FunctionScan *
5664  List *qpqual,
5665  Index scanrelid,
5666  List *functions,
5667  bool funcordinality)
5668 {
5670  Plan *plan = &node->scan.plan;
5671 
5672  plan->targetlist = qptlist;
5673  plan->qual = qpqual;
5674  plan->lefttree = NULL;
5675  plan->righttree = NULL;
5676  node->scan.scanrelid = scanrelid;
5677  node->functions = functions;
5678  node->funcordinality = funcordinality;
5679 
5680  return node;
5681 }
5682 
5683 static TableFuncScan *
5685  List *qpqual,
5686  Index scanrelid,
5687  TableFunc *tablefunc)
5688 {
5690  Plan *plan = &node->scan.plan;
5691 
5692  plan->targetlist = qptlist;
5693  plan->qual = qpqual;
5694  plan->lefttree = NULL;
5695  plan->righttree = NULL;
5696  node->scan.s