PostgreSQL Source Code  git master
createplan.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * createplan.c
4  * Routines to create the desired plan for processing a query.
5  * Planning is complete, we just need to convert the selected
6  * Path into a Plan.
7  *
8  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
9  * Portions Copyright (c) 1994, Regents of the University of California
10  *
11  *
12  * IDENTIFICATION
13  * src/backend/optimizer/plan/createplan.c
14  *
15  *-------------------------------------------------------------------------
16  */
17 #include "postgres.h"
18 
19 #include <math.h>
20 
21 #include "access/sysattr.h"
22 #include "catalog/pg_class.h"
23 #include "foreign/fdwapi.h"
24 #include "miscadmin.h"
25 #include "nodes/extensible.h"
26 #include "nodes/makefuncs.h"
27 #include "nodes/nodeFuncs.h"
28 #include "optimizer/clauses.h"
29 #include "optimizer/cost.h"
30 #include "optimizer/optimizer.h"
31 #include "optimizer/paramassign.h"
32 #include "optimizer/paths.h"
33 #include "optimizer/placeholder.h"
34 #include "optimizer/plancat.h"
35 #include "optimizer/planmain.h"
36 #include "optimizer/prep.h"
37 #include "optimizer/restrictinfo.h"
38 #include "optimizer/subselect.h"
39 #include "optimizer/tlist.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parsetree.h"
42 #include "partitioning/partprune.h"
43 #include "utils/lsyscache.h"
44 
45 
46 /*
47  * Flag bits that can appear in the flags argument of create_plan_recurse().
48  * These can be OR-ed together.
49  *
50  * CP_EXACT_TLIST specifies that the generated plan node must return exactly
51  * the tlist specified by the path's pathtarget (this overrides both
52  * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
53  * plan node is allowed to return just the Vars and PlaceHolderVars needed
54  * to evaluate the pathtarget.
55  *
56  * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
57  * passed down by parent nodes such as Sort and Hash, which will have to
58  * store the returned tuples.
59  *
60  * CP_LABEL_TLIST specifies that the plan node must return columns matching
61  * any sortgrouprefs specified in its pathtarget, with appropriate
62  * ressortgroupref labels. This is passed down by parent nodes such as Sort
63  * and Group, which need these values to be available in their inputs.
64  *
65  * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
66  * and therefore it doesn't matter a bit what target list gets generated.
67  */
68 #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
69 #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
70 #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
71 #define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
72 
73 
74 static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
75  int flags);
76 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
77  int flags);
78 static List *build_path_tlist(PlannerInfo *root, Path *path);
79 static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
80 static List *get_gating_quals(PlannerInfo *root, List *quals);
81 static Plan *create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
82  List *gating_quals);
83 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
84 static bool mark_async_capable_plan(Plan *plan, Path *path);
85 static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path,
86  int flags);
88  int flags);
90  GroupResultPath *best_path);
92 static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
93  int flags);
94 static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
95  int flags);
96 static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
97  int flags);
98 static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
100  ProjectionPath *best_path,
101  int flags);
102 static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
103 static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
105  IncrementalSortPath *best_path, int flags);
106 static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
108  int flags);
109 static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
110 static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
111 static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
112 static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
113 static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
114  int flags);
116 static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
117  int flags);
119 static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
120  int flags);
121 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
122  List *tlist, List *scan_clauses);
123 static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
124  List *tlist, List *scan_clauses);
125 static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
126  List *tlist, List *scan_clauses, bool indexonly);
128  BitmapHeapPath *best_path,
129  List *tlist, List *scan_clauses);
130 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
131  List **qual, List **indexqual, List **indexECs);
132 static void bitmap_subplan_mark_shared(Plan *plan);
133 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
134  List *tlist, List *scan_clauses);
136  TidRangePath *best_path,
137  List *tlist,
138  List *scan_clauses);
140  SubqueryScanPath *best_path,
141  List *tlist, List *scan_clauses);
142 static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
143  List *tlist, List *scan_clauses);
144 static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
145  List *tlist, List *scan_clauses);
146 static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
147  List *tlist, List *scan_clauses);
148 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
149  List *tlist, List *scan_clauses);
151  Path *best_path, List *tlist, List *scan_clauses);
152 static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
153  List *tlist, List *scan_clauses);
154 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
155  List *tlist, List *scan_clauses);
157  List *tlist, List *scan_clauses);
159  CustomPath *best_path,
160  List *tlist, List *scan_clauses);
161 static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path);
162 static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path);
163 static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path);
164 static Node *replace_nestloop_params(PlannerInfo *root, Node *expr);
166 static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
167  List **stripped_indexquals_p,
168  List **fixed_indexquals_p);
169 static List *fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path);
171  IndexOptInfo *index, int indexcol,
172  Node *clause, List *indexcolnos);
173 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
174 static List *get_switched_clauses(List *clauses, Relids outerrelids);
175 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
176 static void copy_generic_path_info(Plan *dest, Path *src);
177 static void copy_plan_costsize(Plan *dest, Plan *src);
178 static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
179  double limit_tuples);
180 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
181 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
182  TableSampleClause *tsc);
183 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
184  Oid indexid, List *indexqual, List *indexqualorig,
185  List *indexorderby, List *indexorderbyorig,
186  List *indexorderbyops,
187  ScanDirection indexscandir);
188 static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
189  Index scanrelid, Oid indexid,
190  List *indexqual, List *recheckqual,
191  List *indexorderby,
192  List *indextlist,
193  ScanDirection indexscandir);
194 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
195  List *indexqual,
196  List *indexqualorig);
197 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
198  List *qpqual,
199  Plan *lefttree,
200  List *bitmapqualorig,
201  Index scanrelid);
202 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
203  List *tidquals);
204 static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
205  Index scanrelid, List *tidrangequals);
206 static SubqueryScan *make_subqueryscan(List *qptlist,
207  List *qpqual,
208  Index scanrelid,
209  Plan *subplan);
210 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
211  Index scanrelid, List *functions, bool funcordinality);
212 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
213  Index scanrelid, List *values_lists);
214 static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
215  Index scanrelid, TableFunc *tablefunc);
216 static CteScan *make_ctescan(List *qptlist, List *qpqual,
217  Index scanrelid, int ctePlanId, int cteParam);
218 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
219  Index scanrelid, char *enrname);
220 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
221  Index scanrelid, int wtParam);
223  Plan *lefttree,
224  Plan *righttree,
225  int wtParam,
226  List *distinctList,
227  long numGroups);
228 static BitmapAnd *make_bitmap_and(List *bitmapplans);
229 static BitmapOr *make_bitmap_or(List *bitmapplans);
230 static NestLoop *make_nestloop(List *tlist,
231  List *joinclauses, List *otherclauses, List *nestParams,
232  Plan *lefttree, Plan *righttree,
233  JoinType jointype, bool inner_unique);
234 static HashJoin *make_hashjoin(List *tlist,
235  List *joinclauses, List *otherclauses,
236  List *hashclauses,
237  List *hashoperators, List *hashcollations,
238  List *hashkeys,
239  Plan *lefttree, Plan *righttree,
240  JoinType jointype, bool inner_unique);
241 static Hash *make_hash(Plan *lefttree,
242  List *hashkeys,
243  Oid skewTable,
244  AttrNumber skewColumn,
245  bool skewInherit);
246 static MergeJoin *make_mergejoin(List *tlist,
247  List *joinclauses, List *otherclauses,
248  List *mergeclauses,
249  Oid *mergefamilies,
250  Oid *mergecollations,
251  int *mergestrategies,
252  bool *mergenullsfirst,
253  Plan *lefttree, Plan *righttree,
254  JoinType jointype, bool inner_unique,
255  bool skip_mark_restore);
256 static Sort *make_sort(Plan *lefttree, int numCols,
257  AttrNumber *sortColIdx, Oid *sortOperators,
258  Oid *collations, bool *nullsFirst);
259 static IncrementalSort *make_incrementalsort(Plan *lefttree,
260  int numCols, int nPresortedCols,
261  AttrNumber *sortColIdx, Oid *sortOperators,
262  Oid *collations, bool *nullsFirst);
263 static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
264  Relids relids,
265  const AttrNumber *reqColIdx,
266  bool adjust_tlist_in_place,
267  int *p_numsortkeys,
268  AttrNumber **p_sortColIdx,
269  Oid **p_sortOperators,
270  Oid **p_collations,
271  bool **p_nullsFirst);
272 static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
273  Relids relids);
275  List *pathkeys, Relids relids, int nPresortedCols);
276 static Sort *make_sort_from_groupcols(List *groupcls,
277  AttrNumber *grpColIdx,
278  Plan *lefttree);
279 static Material *make_material(Plan *lefttree);
280 static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
281  Oid *collations, List *param_exprs,
282  bool singlerow, bool binary_mode,
283  uint32 est_entries, Bitmapset *keyparamids);
284 static WindowAgg *make_windowagg(List *tlist, Index winref,
285  int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
286  int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
287  int frameOptions, Node *startOffset, Node *endOffset,
288  Oid startInRangeFunc, Oid endInRangeFunc,
289  Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
290  List *runCondition, List *qual, bool topWindow,
291  Plan *lefttree);
292 static Group *make_group(List *tlist, List *qual, int numGroupCols,
293  AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
294  Plan *lefttree);
295 static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
296 static Unique *make_unique_from_pathkeys(Plan *lefttree,
297  List *pathkeys, int numCols);
298 static Gather *make_gather(List *qptlist, List *qpqual,
299  int nworkers, int rescan_param, bool single_copy, Plan *subplan);
300 static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
301  List *distinctList, AttrNumber flagColIdx, int firstFlag,
302  long numGroups);
303 static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
304 static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
305 static ProjectSet *make_project_set(List *tlist, Plan *subplan);
306 static ModifyTable *make_modifytable(PlannerInfo *root, Plan *subplan,
307  CmdType operation, bool canSetTag,
308  Index nominalRelation, Index rootRelation,
309  bool partColsUpdated,
310  List *resultRelations,
311  List *updateColnosLists,
312  List *withCheckOptionLists, List *returningLists,
313  List *rowMarks, OnConflictExpr *onconflict,
314  List *mergeActionLists, int epqParam);
316  GatherMergePath *best_path);
317 
318 
319 /*
320  * create_plan
321  * Creates the access plan for a query by recursively processing the
322  * desired tree of pathnodes, starting at the node 'best_path'. For
323  * every pathnode found, we create a corresponding plan node containing
324  * appropriate id, target list, and qualification information.
325  *
326  * The tlists and quals in the plan tree are still in planner format,
327  * ie, Vars still correspond to the parser's numbering. This will be
328  * fixed later by setrefs.c.
329  *
330  * best_path is the best access path
331  *
332  * Returns a Plan tree.
333  */
334 Plan *
335 create_plan(PlannerInfo *root, Path *best_path)
336 {
337  Plan *plan;
338 
339  /* plan_params should not be in use in current query level */
340  Assert(root->plan_params == NIL);
341 
342  /* Initialize this module's workspace in PlannerInfo */
343  root->curOuterRels = NULL;
344  root->curOuterParams = NIL;
345 
346  /* Recursively process the path tree, demanding the correct tlist result */
347  plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
348 
349  /*
350  * Make sure the topmost plan node's targetlist exposes the original
351  * column names and other decorative info. Targetlists generated within
352  * the planner don't bother with that stuff, but we must have it on the
353  * top-level tlist seen at execution time. However, ModifyTable plan
354  * nodes don't have a tlist matching the querytree targetlist.
355  */
356  if (!IsA(plan, ModifyTable))
357  apply_tlist_labeling(plan->targetlist, root->processed_tlist);
358 
359  /*
360  * Attach any initPlans created in this query level to the topmost plan
361  * node. (In principle the initplans could go in any plan node at or
362  * above where they're referenced, but there seems no reason to put them
363  * any lower than the topmost node for the query level. Also, see
364  * comments for SS_finalize_plan before you try to change this.)
365  */
366  SS_attach_initplans(root, plan);
367 
368  /* Check we successfully assigned all NestLoopParams to plan nodes */
369  if (root->curOuterParams != NIL)
370  elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
371 
372  /*
373  * Reset plan_params to ensure param IDs used for nestloop params are not
374  * re-used later
375  */
376  root->plan_params = NIL;
377 
378  return plan;
379 }
380 
381 /*
382  * create_plan_recurse
383  * Recursive guts of create_plan().
384  */
385 static Plan *
386 create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
387 {
388  Plan *plan;
389 
390  /* Guard against stack overflow due to overly complex plans */
392 
393  switch (best_path->pathtype)
394  {
395  case T_SeqScan:
396  case T_SampleScan:
397  case T_IndexScan:
398  case T_IndexOnlyScan:
399  case T_BitmapHeapScan:
400  case T_TidScan:
401  case T_TidRangeScan:
402  case T_SubqueryScan:
403  case T_FunctionScan:
404  case T_TableFuncScan:
405  case T_ValuesScan:
406  case T_CteScan:
407  case T_WorkTableScan:
408  case T_NamedTuplestoreScan:
409  case T_ForeignScan:
410  case T_CustomScan:
411  plan = create_scan_plan(root, best_path, flags);
412  break;
413  case T_HashJoin:
414  case T_MergeJoin:
415  case T_NestLoop:
416  plan = create_join_plan(root,
417  (JoinPath *) best_path);
418  break;
419  case T_Append:
420  plan = create_append_plan(root,
421  (AppendPath *) best_path,
422  flags);
423  break;
424  case T_MergeAppend:
426  (MergeAppendPath *) best_path,
427  flags);
428  break;
429  case T_Result:
430  if (IsA(best_path, ProjectionPath))
431  {
433  (ProjectionPath *) best_path,
434  flags);
435  }
436  else if (IsA(best_path, MinMaxAggPath))
437  {
438  plan = (Plan *) create_minmaxagg_plan(root,
439  (MinMaxAggPath *) best_path);
440  }
441  else if (IsA(best_path, GroupResultPath))
442  {
444  (GroupResultPath *) best_path);
445  }
446  else
447  {
448  /* Simple RTE_RESULT base relation */
449  Assert(IsA(best_path, Path));
450  plan = create_scan_plan(root, best_path, flags);
451  }
452  break;
453  case T_ProjectSet:
454  plan = (Plan *) create_project_set_plan(root,
455  (ProjectSetPath *) best_path);
456  break;
457  case T_Material:
458  plan = (Plan *) create_material_plan(root,
459  (MaterialPath *) best_path,
460  flags);
461  break;
462  case T_Memoize:
463  plan = (Plan *) create_memoize_plan(root,
464  (MemoizePath *) best_path,
465  flags);
466  break;
467  case T_Unique:
468  if (IsA(best_path, UpperUniquePath))
469  {
471  (UpperUniquePath *) best_path,
472  flags);
473  }
474  else
475  {
476  Assert(IsA(best_path, UniquePath));
477  plan = create_unique_plan(root,
478  (UniquePath *) best_path,
479  flags);
480  }
481  break;
482  case T_Gather:
483  plan = (Plan *) create_gather_plan(root,
484  (GatherPath *) best_path);
485  break;
486  case T_Sort:
487  plan = (Plan *) create_sort_plan(root,
488  (SortPath *) best_path,
489  flags);
490  break;
491  case T_IncrementalSort:
493  (IncrementalSortPath *) best_path,
494  flags);
495  break;
496  case T_Group:
497  plan = (Plan *) create_group_plan(root,
498  (GroupPath *) best_path);
499  break;
500  case T_Agg:
501  if (IsA(best_path, GroupingSetsPath))
503  (GroupingSetsPath *) best_path);
504  else
505  {
506  Assert(IsA(best_path, AggPath));
507  plan = (Plan *) create_agg_plan(root,
508  (AggPath *) best_path);
509  }
510  break;
511  case T_WindowAgg:
512  plan = (Plan *) create_windowagg_plan(root,
513  (WindowAggPath *) best_path);
514  break;
515  case T_SetOp:
516  plan = (Plan *) create_setop_plan(root,
517  (SetOpPath *) best_path,
518  flags);
519  break;
520  case T_RecursiveUnion:
522  (RecursiveUnionPath *) best_path);
523  break;
524  case T_LockRows:
525  plan = (Plan *) create_lockrows_plan(root,
526  (LockRowsPath *) best_path,
527  flags);
528  break;
529  case T_ModifyTable:
530  plan = (Plan *) create_modifytable_plan(root,
531  (ModifyTablePath *) best_path);
532  break;
533  case T_Limit:
534  plan = (Plan *) create_limit_plan(root,
535  (LimitPath *) best_path,
536  flags);
537  break;
538  case T_GatherMerge:
540  (GatherMergePath *) best_path);
541  break;
542  default:
543  elog(ERROR, "unrecognized node type: %d",
544  (int) best_path->pathtype);
545  plan = NULL; /* keep compiler quiet */
546  break;
547  }
548 
549  return plan;
550 }
551 
552 /*
553  * create_scan_plan
554  * Create a scan plan for the parent relation of 'best_path'.
555  */
556 static Plan *
557 create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
558 {
559  RelOptInfo *rel = best_path->parent;
560  List *scan_clauses;
561  List *gating_clauses;
562  List *tlist;
563  Plan *plan;
564 
565  /*
566  * Extract the relevant restriction clauses from the parent relation. The
567  * executor must apply all these restrictions during the scan, except for
568  * pseudoconstants which we'll take care of below.
569  *
570  * If this is a plain indexscan or index-only scan, we need not consider
571  * restriction clauses that are implied by the index's predicate, so use
572  * indrestrictinfo not baserestrictinfo. Note that we can't do that for
573  * bitmap indexscans, since there's not necessarily a single index
574  * involved; but it doesn't matter since create_bitmap_scan_plan() will be
575  * able to get rid of such clauses anyway via predicate proof.
576  */
577  switch (best_path->pathtype)
578  {
579  case T_IndexScan:
580  case T_IndexOnlyScan:
581  scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
582  break;
583  default:
584  scan_clauses = rel->baserestrictinfo;
585  break;
586  }
587 
588  /*
589  * If this is a parameterized scan, we also need to enforce all the join
590  * clauses available from the outer relation(s).
591  *
592  * For paranoia's sake, don't modify the stored baserestrictinfo list.
593  */
594  if (best_path->param_info)
595  scan_clauses = list_concat_copy(scan_clauses,
596  best_path->param_info->ppi_clauses);
597 
598  /*
599  * Detect whether we have any pseudoconstant quals to deal with. Then, if
600  * we'll need a gating Result node, it will be able to project, so there
601  * are no requirements on the child's tlist.
602  *
603  * If this replaces a join, it must be a foreign scan or a custom scan,
604  * and the FDW or the custom scan provider would have stored in the best
605  * path the list of RestrictInfo nodes to apply to the join; check against
606  * that list in that case.
607  */
608  if (IS_JOIN_REL(rel))
609  {
610  List *join_clauses;
611 
612  Assert(best_path->pathtype == T_ForeignScan ||
613  best_path->pathtype == T_CustomScan);
614  if (best_path->pathtype == T_ForeignScan)
615  join_clauses = ((ForeignPath *) best_path)->fdw_restrictinfo;
616  else
617  join_clauses = ((CustomPath *) best_path)->custom_restrictinfo;
618 
619  gating_clauses = get_gating_quals(root, join_clauses);
620  }
621  else
622  gating_clauses = get_gating_quals(root, scan_clauses);
623  if (gating_clauses)
624  flags = 0;
625 
626  /*
627  * For table scans, rather than using the relation targetlist (which is
628  * only those Vars actually needed by the query), we prefer to generate a
629  * tlist containing all Vars in order. This will allow the executor to
630  * optimize away projection of the table tuples, if possible.
631  *
632  * But if the caller is going to ignore our tlist anyway, then don't
633  * bother generating one at all. We use an exact equality test here, so
634  * that this only applies when CP_IGNORE_TLIST is the only flag set.
635  */
636  if (flags == CP_IGNORE_TLIST)
637  {
638  tlist = NULL;
639  }
640  else if (use_physical_tlist(root, best_path, flags))
641  {
642  if (best_path->pathtype == T_IndexOnlyScan)
643  {
644  /* For index-only scan, the preferred tlist is the index's */
645  tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
646 
647  /*
648  * Transfer sortgroupref data to the replacement tlist, if
649  * requested (use_physical_tlist checked that this will work).
650  */
651  if (flags & CP_LABEL_TLIST)
652  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
653  }
654  else
655  {
656  tlist = build_physical_tlist(root, rel);
657  if (tlist == NIL)
658  {
659  /* Failed because of dropped cols, so use regular method */
660  tlist = build_path_tlist(root, best_path);
661  }
662  else
663  {
664  /* As above, transfer sortgroupref data to replacement tlist */
665  if (flags & CP_LABEL_TLIST)
666  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
667  }
668  }
669  }
670  else
671  {
672  tlist = build_path_tlist(root, best_path);
673  }
674 
675  switch (best_path->pathtype)
676  {
677  case T_SeqScan:
678  plan = (Plan *) create_seqscan_plan(root,
679  best_path,
680  tlist,
681  scan_clauses);
682  break;
683 
684  case T_SampleScan:
685  plan = (Plan *) create_samplescan_plan(root,
686  best_path,
687  tlist,
688  scan_clauses);
689  break;
690 
691  case T_IndexScan:
692  plan = (Plan *) create_indexscan_plan(root,
693  (IndexPath *) best_path,
694  tlist,
695  scan_clauses,
696  false);
697  break;
698 
699  case T_IndexOnlyScan:
700  plan = (Plan *) create_indexscan_plan(root,
701  (IndexPath *) best_path,
702  tlist,
703  scan_clauses,
704  true);
705  break;
706 
707  case T_BitmapHeapScan:
708  plan = (Plan *) create_bitmap_scan_plan(root,
709  (BitmapHeapPath *) best_path,
710  tlist,
711  scan_clauses);
712  break;
713 
714  case T_TidScan:
715  plan = (Plan *) create_tidscan_plan(root,
716  (TidPath *) best_path,
717  tlist,
718  scan_clauses);
719  break;
720 
721  case T_TidRangeScan:
723  (TidRangePath *) best_path,
724  tlist,
725  scan_clauses);
726  break;
727 
728  case T_SubqueryScan:
730  (SubqueryScanPath *) best_path,
731  tlist,
732  scan_clauses);
733  break;
734 
735  case T_FunctionScan:
737  best_path,
738  tlist,
739  scan_clauses);
740  break;
741 
742  case T_TableFuncScan:
744  best_path,
745  tlist,
746  scan_clauses);
747  break;
748 
749  case T_ValuesScan:
750  plan = (Plan *) create_valuesscan_plan(root,
751  best_path,
752  tlist,
753  scan_clauses);
754  break;
755 
756  case T_CteScan:
757  plan = (Plan *) create_ctescan_plan(root,
758  best_path,
759  tlist,
760  scan_clauses);
761  break;
762 
763  case T_NamedTuplestoreScan:
765  best_path,
766  tlist,
767  scan_clauses);
768  break;
769 
770  case T_Result:
771  plan = (Plan *) create_resultscan_plan(root,
772  best_path,
773  tlist,
774  scan_clauses);
775  break;
776 
777  case T_WorkTableScan:
779  best_path,
780  tlist,
781  scan_clauses);
782  break;
783 
784  case T_ForeignScan:
785  plan = (Plan *) create_foreignscan_plan(root,
786  (ForeignPath *) best_path,
787  tlist,
788  scan_clauses);
789  break;
790 
791  case T_CustomScan:
792  plan = (Plan *) create_customscan_plan(root,
793  (CustomPath *) best_path,
794  tlist,
795  scan_clauses);
796  break;
797 
798  default:
799  elog(ERROR, "unrecognized node type: %d",
800  (int) best_path->pathtype);
801  plan = NULL; /* keep compiler quiet */
802  break;
803  }
804 
805  /*
806  * If there are any pseudoconstant clauses attached to this node, insert a
807  * gating Result node that evaluates the pseudoconstants as one-time
808  * quals.
809  */
810  if (gating_clauses)
811  plan = create_gating_plan(root, best_path, plan, gating_clauses);
812 
813  return plan;
814 }
815 
816 /*
817  * Build a target list (ie, a list of TargetEntry) for the Path's output.
818  *
819  * This is almost just make_tlist_from_pathtarget(), but we also have to
820  * deal with replacing nestloop params.
821  */
822 static List *
824 {
825  List *tlist = NIL;
826  Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
827  int resno = 1;
828  ListCell *v;
829 
830  foreach(v, path->pathtarget->exprs)
831  {
832  Node *node = (Node *) lfirst(v);
833  TargetEntry *tle;
834 
835  /*
836  * If it's a parameterized path, there might be lateral references in
837  * the tlist, which need to be replaced with Params. There's no need
838  * to remake the TargetEntry nodes, so apply this to each list item
839  * separately.
840  */
841  if (path->param_info)
842  node = replace_nestloop_params(root, node);
843 
844  tle = makeTargetEntry((Expr *) node,
845  resno,
846  NULL,
847  false);
848  if (sortgrouprefs)
849  tle->ressortgroupref = sortgrouprefs[resno - 1];
850 
851  tlist = lappend(tlist, tle);
852  resno++;
853  }
854  return tlist;
855 }
856 
857 /*
858  * use_physical_tlist
859  * Decide whether to use a tlist matching relation structure,
860  * rather than only those Vars actually referenced.
861  */
862 static bool
863 use_physical_tlist(PlannerInfo *root, Path *path, int flags)
864 {
865  RelOptInfo *rel = path->parent;
866  int i;
867  ListCell *lc;
868 
869  /*
870  * Forget it if either exact tlist or small tlist is demanded.
871  */
872  if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
873  return false;
874 
875  /*
876  * We can do this for real relation scans, subquery scans, function scans,
877  * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
878  */
879  if (rel->rtekind != RTE_RELATION &&
880  rel->rtekind != RTE_SUBQUERY &&
881  rel->rtekind != RTE_FUNCTION &&
882  rel->rtekind != RTE_TABLEFUNC &&
883  rel->rtekind != RTE_VALUES &&
884  rel->rtekind != RTE_CTE)
885  return false;
886 
887  /*
888  * Can't do it with inheritance cases either (mainly because Append
889  * doesn't project; this test may be unnecessary now that
890  * create_append_plan instructs its children to return an exact tlist).
891  */
892  if (rel->reloptkind != RELOPT_BASEREL)
893  return false;
894 
895  /*
896  * Also, don't do it to a CustomPath; the premise that we're extracting
897  * columns from a simple physical tuple is unlikely to hold for those.
898  * (When it does make sense, the custom path creator can set up the path's
899  * pathtarget that way.)
900  */
901  if (IsA(path, CustomPath))
902  return false;
903 
904  /*
905  * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
906  * executor to skip heap page fetches, and in any case, the benefit of
907  * using a physical tlist instead would be minimal.
908  */
909  if (IsA(path, BitmapHeapPath) &&
910  path->pathtarget->exprs == NIL)
911  return false;
912 
913  /*
914  * Can't do it if any system columns or whole-row Vars are requested.
915  * (This could possibly be fixed but would take some fragile assumptions
916  * in setrefs.c, I think.)
917  */
918  for (i = rel->min_attr; i <= 0; i++)
919  {
920  if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
921  return false;
922  }
923 
924  /*
925  * Can't do it if the rel is required to emit any placeholder expressions,
926  * either.
927  */
928  foreach(lc, root->placeholder_list)
929  {
930  PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
931 
932  if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
933  bms_is_subset(phinfo->ph_eval_at, rel->relids))
934  return false;
935  }
936 
937  /*
938  * For an index-only scan, the "physical tlist" is the index's indextlist.
939  * We can only return that without a projection if all the index's columns
940  * are returnable.
941  */
942  if (path->pathtype == T_IndexOnlyScan)
943  {
944  IndexOptInfo *indexinfo = ((IndexPath *) path)->indexinfo;
945 
946  for (i = 0; i < indexinfo->ncolumns; i++)
947  {
948  if (!indexinfo->canreturn[i])
949  return false;
950  }
951  }
952 
953  /*
954  * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
955  * to emit any sort/group columns that are not simple Vars. (If they are
956  * simple Vars, they should appear in the physical tlist, and
957  * apply_pathtarget_labeling_to_tlist will take care of getting them
958  * labeled again.) We also have to check that no two sort/group columns
959  * are the same Var, else that element of the physical tlist would need
960  * conflicting ressortgroupref labels.
961  */
962  if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
963  {
964  Bitmapset *sortgroupatts = NULL;
965 
966  i = 0;
967  foreach(lc, path->pathtarget->exprs)
968  {
969  Expr *expr = (Expr *) lfirst(lc);
970 
971  if (path->pathtarget->sortgrouprefs[i])
972  {
973  if (expr && IsA(expr, Var))
974  {
975  int attno = ((Var *) expr)->varattno;
976 
978  if (bms_is_member(attno, sortgroupatts))
979  return false;
980  sortgroupatts = bms_add_member(sortgroupatts, attno);
981  }
982  else
983  return false;
984  }
985  i++;
986  }
987  }
988 
989  return true;
990 }
991 
992 /*
993  * get_gating_quals
994  * See if there are pseudoconstant quals in a node's quals list
995  *
996  * If the node's quals list includes any pseudoconstant quals,
997  * return just those quals.
998  */
999 static List *
1001 {
1002  /* No need to look if we know there are no pseudoconstants */
1003  if (!root->hasPseudoConstantQuals)
1004  return NIL;
1005 
1006  /* Sort into desirable execution order while still in RestrictInfo form */
1007  quals = order_qual_clauses(root, quals);
1008 
1009  /* Pull out any pseudoconstant quals from the RestrictInfo list */
1010  return extract_actual_clauses(quals, true);
1011 }
1012 
1013 /*
1014  * create_gating_plan
1015  * Deal with pseudoconstant qual clauses
1016  *
1017  * Add a gating Result node atop the already-built plan.
1018  */
1019 static Plan *
1021  List *gating_quals)
1022 {
1023  Plan *gplan;
1024  Plan *splan;
1025 
1026  Assert(gating_quals);
1027 
1028  /*
1029  * We might have a trivial Result plan already. Stacking one Result atop
1030  * another is silly, so if that applies, just discard the input plan.
1031  * (We're assuming its targetlist is uninteresting; it should be either
1032  * the same as the result of build_path_tlist, or a simplified version.)
1033  */
1034  splan = plan;
1035  if (IsA(plan, Result))
1036  {
1037  Result *rplan = (Result *) plan;
1038 
1039  if (rplan->plan.lefttree == NULL &&
1040  rplan->resconstantqual == NULL)
1041  splan = NULL;
1042  }
1043 
1044  /*
1045  * Since we need a Result node anyway, always return the path's requested
1046  * tlist; that's never a wrong choice, even if the parent node didn't ask
1047  * for CP_EXACT_TLIST.
1048  */
1049  gplan = (Plan *) make_result(build_path_tlist(root, path),
1050  (Node *) gating_quals,
1051  splan);
1052 
1053  /*
1054  * Notice that we don't change cost or size estimates when doing gating.
1055  * The costs of qual eval were already included in the subplan's cost.
1056  * Leaving the size alone amounts to assuming that the gating qual will
1057  * succeed, which is the conservative estimate for planning upper queries.
1058  * We certainly don't want to assume the output size is zero (unless the
1059  * gating qual is actually constant FALSE, and that case is dealt with in
1060  * clausesel.c). Interpolating between the two cases is silly, because it
1061  * doesn't reflect what will really happen at runtime, and besides which
1062  * in most cases we have only a very bad idea of the probability of the
1063  * gating qual being true.
1064  */
1065  copy_plan_costsize(gplan, plan);
1066 
1067  /* Gating quals could be unsafe, so better use the Path's safety flag */
1068  gplan->parallel_safe = path->parallel_safe;
1069 
1070  return gplan;
1071 }
1072 
1073 /*
1074  * create_join_plan
1075  * Create a join plan for 'best_path' and (recursively) plans for its
1076  * inner and outer paths.
1077  */
1078 static Plan *
1080 {
1081  Plan *plan;
1082  List *gating_clauses;
1083 
1084  switch (best_path->path.pathtype)
1085  {
1086  case T_MergeJoin:
1087  plan = (Plan *) create_mergejoin_plan(root,
1088  (MergePath *) best_path);
1089  break;
1090  case T_HashJoin:
1091  plan = (Plan *) create_hashjoin_plan(root,
1092  (HashPath *) best_path);
1093  break;
1094  case T_NestLoop:
1095  plan = (Plan *) create_nestloop_plan(root,
1096  (NestPath *) best_path);
1097  break;
1098  default:
1099  elog(ERROR, "unrecognized node type: %d",
1100  (int) best_path->path.pathtype);
1101  plan = NULL; /* keep compiler quiet */
1102  break;
1103  }
1104 
1105  /*
1106  * If there are any pseudoconstant clauses attached to this node, insert a
1107  * gating Result node that evaluates the pseudoconstants as one-time
1108  * quals.
1109  */
1110  gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
1111  if (gating_clauses)
1112  plan = create_gating_plan(root, (Path *) best_path, plan,
1113  gating_clauses);
1114 
1115 #ifdef NOT_USED
1116 
1117  /*
1118  * * Expensive function pullups may have pulled local predicates * into
1119  * this path node. Put them in the qpqual of the plan node. * JMH,
1120  * 6/15/92
1121  */
1122  if (get_loc_restrictinfo(best_path) != NIL)
1123  set_qpqual((Plan) plan,
1124  list_concat(get_qpqual((Plan) plan),
1125  get_actual_clauses(get_loc_restrictinfo(best_path))));
1126 #endif
1127 
1128  return plan;
1129 }
1130 
1131 /*
1132  * mark_async_capable_plan
1133  * Check whether the Plan node created from a Path node is async-capable,
1134  * and if so, mark the Plan node as such and return true, otherwise
1135  * return false.
1136  */
1137 static bool
1139 {
1140  switch (nodeTag(path))
1141  {
1142  case T_SubqueryScanPath:
1143  {
1144  SubqueryScan *scan_plan = (SubqueryScan *) plan;
1145 
1146  /*
1147  * If the generated plan node includes a gating Result node,
1148  * we can't execute it asynchronously.
1149  */
1150  if (IsA(plan, Result))
1151  return false;
1152 
1153  /*
1154  * If a SubqueryScan node atop of an async-capable plan node
1155  * is deletable, consider it as async-capable.
1156  */
1157  if (trivial_subqueryscan(scan_plan) &&
1158  mark_async_capable_plan(scan_plan->subplan,
1159  ((SubqueryScanPath *) path)->subpath))
1160  break;
1161  return false;
1162  }
1163  case T_ForeignPath:
1164  {
1165  FdwRoutine *fdwroutine = path->parent->fdwroutine;
1166 
1167  /*
1168  * If the generated plan node includes a gating Result node,
1169  * we can't execute it asynchronously.
1170  */
1171  if (IsA(plan, Result))
1172  return false;
1173 
1174  Assert(fdwroutine != NULL);
1175  if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
1176  fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
1177  break;
1178  return false;
1179  }
1180  case T_ProjectionPath:
1181 
1182  /*
1183  * If the generated plan node includes a Result node for the
1184  * projection, we can't execute it asynchronously.
1185  */
1186  if (IsA(plan, Result))
1187  return false;
1188 
1189  /*
1190  * create_projection_plan() would have pulled up the subplan, so
1191  * check the capability using the subpath.
1192  */
1194  ((ProjectionPath *) path)->subpath))
1195  return true;
1196  return false;
1197  default:
1198  return false;
1199  }
1200 
1201  plan->async_capable = true;
1202 
1203  return true;
1204 }
1205 
1206 /*
1207  * create_append_plan
1208  * Create an Append plan for 'best_path' and (recursively) plans
1209  * for its subpaths.
1210  *
1211  * Returns a Plan node.
1212  */
1213 static Plan *
1214 create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
1215 {
1216  Append *plan;
1217  List *tlist = build_path_tlist(root, &best_path->path);
1218  int orig_tlist_length = list_length(tlist);
1219  bool tlist_was_changed = false;
1220  List *pathkeys = best_path->path.pathkeys;
1221  List *subplans = NIL;
1222  ListCell *subpaths;
1223  int nasyncplans = 0;
1224  RelOptInfo *rel = best_path->path.parent;
1225  PartitionPruneInfo *partpruneinfo = NULL;
1226  int nodenumsortkeys = 0;
1227  AttrNumber *nodeSortColIdx = NULL;
1228  Oid *nodeSortOperators = NULL;
1229  Oid *nodeCollations = NULL;
1230  bool *nodeNullsFirst = NULL;
1231  bool consider_async = false;
1232 
1233  /*
1234  * The subpaths list could be empty, if every child was proven empty by
1235  * constraint exclusion. In that case generate a dummy plan that returns
1236  * no rows.
1237  *
1238  * Note that an AppendPath with no members is also generated in certain
1239  * cases where there was no appending construct at all, but we know the
1240  * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
1241  */
1242  if (best_path->subpaths == NIL)
1243  {
1244  /* Generate a Result plan with constant-FALSE gating qual */
1245  Plan *plan;
1246 
1247  plan = (Plan *) make_result(tlist,
1248  (Node *) list_make1(makeBoolConst(false,
1249  false)),
1250  NULL);
1251 
1252  copy_generic_path_info(plan, (Path *) best_path);
1253 
1254  return plan;
1255  }
1256 
1257  /*
1258  * Otherwise build an Append plan. Note that if there's just one child,
1259  * the Append is pretty useless; but we wait till setrefs.c to get rid of
1260  * it. Doing so here doesn't work because the varno of the child scan
1261  * plan won't match the parent-rel Vars it'll be asked to emit.
1262  *
1263  * We don't have the actual creation of the Append node split out into a
1264  * separate make_xxx function. This is because we want to run
1265  * prepare_sort_from_pathkeys on it before we do so on the individual
1266  * child plans, to make cross-checking the sort info easier.
1267  */
1268  plan = makeNode(Append);
1269  plan->plan.targetlist = tlist;
1270  plan->plan.qual = NIL;
1271  plan->plan.lefttree = NULL;
1272  plan->plan.righttree = NULL;
1273  plan->apprelids = rel->relids;
1274 
1275  if (pathkeys != NIL)
1276  {
1277  /*
1278  * Compute sort column info, and adjust the Append's tlist as needed.
1279  * Because we pass adjust_tlist_in_place = true, we may ignore the
1280  * function result; it must be the same plan node. However, we then
1281  * need to detect whether any tlist entries were added.
1282  */
1283  (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
1284  best_path->path.parent->relids,
1285  NULL,
1286  true,
1287  &nodenumsortkeys,
1288  &nodeSortColIdx,
1289  &nodeSortOperators,
1290  &nodeCollations,
1291  &nodeNullsFirst);
1292  tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
1293  }
1294 
1295  /* If appropriate, consider async append */
1296  consider_async = (enable_async_append && pathkeys == NIL &&
1297  !best_path->path.parallel_safe &&
1298  list_length(best_path->subpaths) > 1);
1299 
1300  /* Build the plan for each child */
1301  foreach(subpaths, best_path->subpaths)
1302  {
1303  Path *subpath = (Path *) lfirst(subpaths);
1304  Plan *subplan;
1305 
1306  /* Must insist that all children return the same tlist */
1307  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1308 
1309  /*
1310  * For ordered Appends, we must insert a Sort node if subplan isn't
1311  * sufficiently ordered.
1312  */
1313  if (pathkeys != NIL)
1314  {
1315  int numsortkeys;
1316  AttrNumber *sortColIdx;
1317  Oid *sortOperators;
1318  Oid *collations;
1319  bool *nullsFirst;
1320 
1321  /*
1322  * Compute sort column info, and adjust subplan's tlist as needed.
1323  * We must apply prepare_sort_from_pathkeys even to subplans that
1324  * don't need an explicit sort, to make sure they are returning
1325  * the same sort key columns the Append expects.
1326  */
1327  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1328  subpath->parent->relids,
1329  nodeSortColIdx,
1330  false,
1331  &numsortkeys,
1332  &sortColIdx,
1333  &sortOperators,
1334  &collations,
1335  &nullsFirst);
1336 
1337  /*
1338  * Check that we got the same sort key information. We just
1339  * Assert that the sortops match, since those depend only on the
1340  * pathkeys; but it seems like a good idea to check the sort
1341  * column numbers explicitly, to ensure the tlists match up.
1342  */
1343  Assert(numsortkeys == nodenumsortkeys);
1344  if (memcmp(sortColIdx, nodeSortColIdx,
1345  numsortkeys * sizeof(AttrNumber)) != 0)
1346  elog(ERROR, "Append child's targetlist doesn't match Append");
1347  Assert(memcmp(sortOperators, nodeSortOperators,
1348  numsortkeys * sizeof(Oid)) == 0);
1349  Assert(memcmp(collations, nodeCollations,
1350  numsortkeys * sizeof(Oid)) == 0);
1351  Assert(memcmp(nullsFirst, nodeNullsFirst,
1352  numsortkeys * sizeof(bool)) == 0);
1353 
1354  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1355  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1356  {
1357  Sort *sort = make_sort(subplan, numsortkeys,
1358  sortColIdx, sortOperators,
1359  collations, nullsFirst);
1360 
1361  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1362  subplan = (Plan *) sort;
1363  }
1364  }
1365 
1366  /* If needed, check to see if subplan can be executed asynchronously */
1367  if (consider_async && mark_async_capable_plan(subplan, subpath))
1368  {
1369  Assert(subplan->async_capable);
1370  ++nasyncplans;
1371  }
1372 
1373  subplans = lappend(subplans, subplan);
1374  }
1375 
1376  /*
1377  * If any quals exist, they may be useful to perform further partition
1378  * pruning during execution. Gather information needed by the executor to
1379  * do partition pruning.
1380  */
1382  {
1383  List *prunequal;
1384 
1385  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1386 
1387  if (best_path->path.param_info)
1388  {
1389  List *prmquals = best_path->path.param_info->ppi_clauses;
1390 
1391  prmquals = extract_actual_clauses(prmquals, false);
1392  prmquals = (List *) replace_nestloop_params(root,
1393  (Node *) prmquals);
1394 
1395  prunequal = list_concat(prunequal, prmquals);
1396  }
1397 
1398  if (prunequal != NIL)
1399  partpruneinfo =
1400  make_partition_pruneinfo(root, rel,
1401  best_path->subpaths,
1402  prunequal);
1403  }
1404 
1405  plan->appendplans = subplans;
1406  plan->nasyncplans = nasyncplans;
1407  plan->first_partial_plan = best_path->first_partial_path;
1408  plan->part_prune_info = partpruneinfo;
1409 
1410  copy_generic_path_info(&plan->plan, (Path *) best_path);
1411 
1412  /*
1413  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1414  * produce either the exact tlist or a narrow tlist, we should get rid of
1415  * the sort columns again. We must inject a projection node to do so.
1416  */
1417  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1418  {
1419  tlist = list_copy_head(plan->plan.targetlist, orig_tlist_length);
1420  return inject_projection_plan((Plan *) plan, tlist,
1421  plan->plan.parallel_safe);
1422  }
1423  else
1424  return (Plan *) plan;
1425 }
1426 
1427 /*
1428  * create_merge_append_plan
1429  * Create a MergeAppend plan for 'best_path' and (recursively) plans
1430  * for its subpaths.
1431  *
1432  * Returns a Plan node.
1433  */
1434 static Plan *
1436  int flags)
1437 {
1438  MergeAppend *node = makeNode(MergeAppend);
1439  Plan *plan = &node->plan;
1440  List *tlist = build_path_tlist(root, &best_path->path);
1441  int orig_tlist_length = list_length(tlist);
1442  bool tlist_was_changed;
1443  List *pathkeys = best_path->path.pathkeys;
1444  List *subplans = NIL;
1445  ListCell *subpaths;
1446  RelOptInfo *rel = best_path->path.parent;
1447  PartitionPruneInfo *partpruneinfo = NULL;
1448 
1449  /*
1450  * We don't have the actual creation of the MergeAppend node split out
1451  * into a separate make_xxx function. This is because we want to run
1452  * prepare_sort_from_pathkeys on it before we do so on the individual
1453  * child plans, to make cross-checking the sort info easier.
1454  */
1455  copy_generic_path_info(plan, (Path *) best_path);
1456  plan->targetlist = tlist;
1457  plan->qual = NIL;
1458  plan->lefttree = NULL;
1459  plan->righttree = NULL;
1460  node->apprelids = rel->relids;
1461 
1462  /*
1463  * Compute sort column info, and adjust MergeAppend's tlist as needed.
1464  * Because we pass adjust_tlist_in_place = true, we may ignore the
1465  * function result; it must be the same plan node. However, we then need
1466  * to detect whether any tlist entries were added.
1467  */
1468  (void) prepare_sort_from_pathkeys(plan, pathkeys,
1469  best_path->path.parent->relids,
1470  NULL,
1471  true,
1472  &node->numCols,
1473  &node->sortColIdx,
1474  &node->sortOperators,
1475  &node->collations,
1476  &node->nullsFirst);
1477  tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
1478 
1479  /*
1480  * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
1481  * even to subplans that don't need an explicit sort, to make sure they
1482  * are returning the same sort key columns the MergeAppend expects.
1483  */
1484  foreach(subpaths, best_path->subpaths)
1485  {
1486  Path *subpath = (Path *) lfirst(subpaths);
1487  Plan *subplan;
1488  int numsortkeys;
1489  AttrNumber *sortColIdx;
1490  Oid *sortOperators;
1491  Oid *collations;
1492  bool *nullsFirst;
1493 
1494  /* Build the child plan */
1495  /* Must insist that all children return the same tlist */
1496  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1497 
1498  /* Compute sort column info, and adjust subplan's tlist as needed */
1499  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1500  subpath->parent->relids,
1501  node->sortColIdx,
1502  false,
1503  &numsortkeys,
1504  &sortColIdx,
1505  &sortOperators,
1506  &collations,
1507  &nullsFirst);
1508 
1509  /*
1510  * Check that we got the same sort key information. We just Assert
1511  * that the sortops match, since those depend only on the pathkeys;
1512  * but it seems like a good idea to check the sort column numbers
1513  * explicitly, to ensure the tlists really do match up.
1514  */
1515  Assert(numsortkeys == node->numCols);
1516  if (memcmp(sortColIdx, node->sortColIdx,
1517  numsortkeys * sizeof(AttrNumber)) != 0)
1518  elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
1519  Assert(memcmp(sortOperators, node->sortOperators,
1520  numsortkeys * sizeof(Oid)) == 0);
1521  Assert(memcmp(collations, node->collations,
1522  numsortkeys * sizeof(Oid)) == 0);
1523  Assert(memcmp(nullsFirst, node->nullsFirst,
1524  numsortkeys * sizeof(bool)) == 0);
1525 
1526  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1527  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1528  {
1529  Sort *sort = make_sort(subplan, numsortkeys,
1530  sortColIdx, sortOperators,
1531  collations, nullsFirst);
1532 
1533  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1534  subplan = (Plan *) sort;
1535  }
1536 
1537  subplans = lappend(subplans, subplan);
1538  }
1539 
1540  /*
1541  * If any quals exist, they may be useful to perform further partition
1542  * pruning during execution. Gather information needed by the executor to
1543  * do partition pruning.
1544  */
1546  {
1547  List *prunequal;
1548 
1549  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1550 
1551  /* We don't currently generate any parameterized MergeAppend paths */
1552  Assert(best_path->path.param_info == NULL);
1553 
1554  if (prunequal != NIL)
1555  partpruneinfo = make_partition_pruneinfo(root, rel,
1556  best_path->subpaths,
1557  prunequal);
1558  }
1559 
1560  node->mergeplans = subplans;
1561  node->part_prune_info = partpruneinfo;
1562 
1563  /*
1564  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1565  * produce either the exact tlist or a narrow tlist, we should get rid of
1566  * the sort columns again. We must inject a projection node to do so.
1567  */
1568  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1569  {
1570  tlist = list_copy_head(plan->targetlist, orig_tlist_length);
1571  return inject_projection_plan(plan, tlist, plan->parallel_safe);
1572  }
1573  else
1574  return plan;
1575 }
1576 
1577 /*
1578  * create_group_result_plan
1579  * Create a Result plan for 'best_path'.
1580  * This is only used for degenerate grouping cases.
1581  *
1582  * Returns a Plan node.
1583  */
1584 static Result *
1586 {
1587  Result *plan;
1588  List *tlist;
1589  List *quals;
1590 
1591  tlist = build_path_tlist(root, &best_path->path);
1592 
1593  /* best_path->quals is just bare clauses */
1594  quals = order_qual_clauses(root, best_path->quals);
1595 
1596  plan = make_result(tlist, (Node *) quals, NULL);
1597 
1598  copy_generic_path_info(&plan->plan, (Path *) best_path);
1599 
1600  return plan;
1601 }
1602 
1603 /*
1604  * create_project_set_plan
1605  * Create a ProjectSet plan for 'best_path'.
1606  *
1607  * Returns a Plan node.
1608  */
1609 static ProjectSet *
1611 {
1612  ProjectSet *plan;
1613  Plan *subplan;
1614  List *tlist;
1615 
1616  /* Since we intend to project, we don't need to constrain child tlist */
1617  subplan = create_plan_recurse(root, best_path->subpath, 0);
1618 
1619  tlist = build_path_tlist(root, &best_path->path);
1620 
1621  plan = make_project_set(tlist, subplan);
1622 
1623  copy_generic_path_info(&plan->plan, (Path *) best_path);
1624 
1625  return plan;
1626 }
1627 
1628 /*
1629  * create_material_plan
1630  * Create a Material plan for 'best_path' and (recursively) plans
1631  * for its subpaths.
1632  *
1633  * Returns a Plan node.
1634  */
1635 static Material *
1636 create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
1637 {
1638  Material *plan;
1639  Plan *subplan;
1640 
1641  /*
1642  * We don't want any excess columns in the materialized tuples, so request
1643  * a smaller tlist. Otherwise, since Material doesn't project, tlist
1644  * requirements pass through.
1645  */
1646  subplan = create_plan_recurse(root, best_path->subpath,
1647  flags | CP_SMALL_TLIST);
1648 
1649  plan = make_material(subplan);
1650 
1651  copy_generic_path_info(&plan->plan, (Path *) best_path);
1652 
1653  return plan;
1654 }
1655 
1656 /*
1657  * create_memoize_plan
1658  * Create a Memoize plan for 'best_path' and (recursively) plans for its
1659  * subpaths.
1660  *
1661  * Returns a Plan node.
1662  */
1663 static Memoize *
1664 create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
1665 {
1666  Memoize *plan;
1667  Bitmapset *keyparamids;
1668  Plan *subplan;
1669  Oid *operators;
1670  Oid *collations;
1671  List *param_exprs = NIL;
1672  ListCell *lc;
1673  ListCell *lc2;
1674  int nkeys;
1675  int i;
1676 
1677  subplan = create_plan_recurse(root, best_path->subpath,
1678  flags | CP_SMALL_TLIST);
1679 
1680  param_exprs = (List *) replace_nestloop_params(root, (Node *)
1681  best_path->param_exprs);
1682 
1683  nkeys = list_length(param_exprs);
1684  Assert(nkeys > 0);
1685  operators = palloc(nkeys * sizeof(Oid));
1686  collations = palloc(nkeys * sizeof(Oid));
1687 
1688  i = 0;
1689  forboth(lc, param_exprs, lc2, best_path->hash_operators)
1690  {
1691  Expr *param_expr = (Expr *) lfirst(lc);
1692  Oid opno = lfirst_oid(lc2);
1693 
1694  operators[i] = opno;
1695  collations[i] = exprCollation((Node *) param_expr);
1696  i++;
1697  }
1698 
1699  keyparamids = pull_paramids((Expr *) param_exprs);
1700 
1701  plan = make_memoize(subplan, operators, collations, param_exprs,
1702  best_path->singlerow, best_path->binary_mode,
1703  best_path->est_entries, keyparamids);
1704 
1705  copy_generic_path_info(&plan->plan, (Path *) best_path);
1706 
1707  return plan;
1708 }
1709 
1710 /*
1711  * create_unique_plan
1712  * Create a Unique plan for 'best_path' and (recursively) plans
1713  * for its subpaths.
1714  *
1715  * Returns a Plan node.
1716  */
1717 static Plan *
1718 create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
1719 {
1720  Plan *plan;
1721  Plan *subplan;
1722  List *in_operators;
1723  List *uniq_exprs;
1724  List *newtlist;
1725  int nextresno;
1726  bool newitems;
1727  int numGroupCols;
1728  AttrNumber *groupColIdx;
1729  Oid *groupCollations;
1730  int groupColPos;
1731  ListCell *l;
1732 
1733  /* Unique doesn't project, so tlist requirements pass through */
1734  subplan = create_plan_recurse(root, best_path->subpath, flags);
1735 
1736  /* Done if we don't need to do any actual unique-ifying */
1737  if (best_path->umethod == UNIQUE_PATH_NOOP)
1738  return subplan;
1739 
1740  /*
1741  * As constructed, the subplan has a "flat" tlist containing just the Vars
1742  * needed here and at upper levels. The values we are supposed to
1743  * unique-ify may be expressions in these variables. We have to add any
1744  * such expressions to the subplan's tlist.
1745  *
1746  * The subplan may have a "physical" tlist if it is a simple scan plan. If
1747  * we're going to sort, this should be reduced to the regular tlist, so
1748  * that we don't sort more data than we need to. For hashing, the tlist
1749  * should be left as-is if we don't need to add any expressions; but if we
1750  * do have to add expressions, then a projection step will be needed at
1751  * runtime anyway, so we may as well remove unneeded items. Therefore
1752  * newtlist starts from build_path_tlist() not just a copy of the
1753  * subplan's tlist; and we don't install it into the subplan unless we are
1754  * sorting or stuff has to be added.
1755  */
1756  in_operators = best_path->in_operators;
1757  uniq_exprs = best_path->uniq_exprs;
1758 
1759  /* initialize modified subplan tlist as just the "required" vars */
1760  newtlist = build_path_tlist(root, &best_path->path);
1761  nextresno = list_length(newtlist) + 1;
1762  newitems = false;
1763 
1764  foreach(l, uniq_exprs)
1765  {
1766  Expr *uniqexpr = lfirst(l);
1767  TargetEntry *tle;
1768 
1769  tle = tlist_member(uniqexpr, newtlist);
1770  if (!tle)
1771  {
1772  tle = makeTargetEntry((Expr *) uniqexpr,
1773  nextresno,
1774  NULL,
1775  false);
1776  newtlist = lappend(newtlist, tle);
1777  nextresno++;
1778  newitems = true;
1779  }
1780  }
1781 
1782  /* Use change_plan_targetlist in case we need to insert a Result node */
1783  if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
1784  subplan = change_plan_targetlist(subplan, newtlist,
1785  best_path->path.parallel_safe);
1786 
1787  /*
1788  * Build control information showing which subplan output columns are to
1789  * be examined by the grouping step. Unfortunately we can't merge this
1790  * with the previous loop, since we didn't then know which version of the
1791  * subplan tlist we'd end up using.
1792  */
1793  newtlist = subplan->targetlist;
1794  numGroupCols = list_length(uniq_exprs);
1795  groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
1796  groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
1797 
1798  groupColPos = 0;
1799  foreach(l, uniq_exprs)
1800  {
1801  Expr *uniqexpr = lfirst(l);
1802  TargetEntry *tle;
1803 
1804  tle = tlist_member(uniqexpr, newtlist);
1805  if (!tle) /* shouldn't happen */
1806  elog(ERROR, "failed to find unique expression in subplan tlist");
1807  groupColIdx[groupColPos] = tle->resno;
1808  groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
1809  groupColPos++;
1810  }
1811 
1812  if (best_path->umethod == UNIQUE_PATH_HASH)
1813  {
1814  Oid *groupOperators;
1815 
1816  /*
1817  * Get the hashable equality operators for the Agg node to use.
1818  * Normally these are the same as the IN clause operators, but if
1819  * those are cross-type operators then the equality operators are the
1820  * ones for the IN clause operators' RHS datatype.
1821  */
1822  groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
1823  groupColPos = 0;
1824  foreach(l, in_operators)
1825  {
1826  Oid in_oper = lfirst_oid(l);
1827  Oid eq_oper;
1828 
1829  if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
1830  elog(ERROR, "could not find compatible hash operator for operator %u",
1831  in_oper);
1832  groupOperators[groupColPos++] = eq_oper;
1833  }
1834 
1835  /*
1836  * Since the Agg node is going to project anyway, we can give it the
1837  * minimum output tlist, without any stuff we might have added to the
1838  * subplan tlist.
1839  */
1840  plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
1841  NIL,
1842  AGG_HASHED,
1844  numGroupCols,
1845  groupColIdx,
1846  groupOperators,
1847  groupCollations,
1848  NIL,
1849  NIL,
1850  best_path->path.rows,
1851  0,
1852  subplan);
1853  }
1854  else
1855  {
1856  List *sortList = NIL;
1857  Sort *sort;
1858 
1859  /* Create an ORDER BY list to sort the input compatibly */
1860  groupColPos = 0;
1861  foreach(l, in_operators)
1862  {
1863  Oid in_oper = lfirst_oid(l);
1864  Oid sortop;
1865  Oid eqop;
1866  TargetEntry *tle;
1867  SortGroupClause *sortcl;
1868 
1869  sortop = get_ordering_op_for_equality_op(in_oper, false);
1870  if (!OidIsValid(sortop)) /* shouldn't happen */
1871  elog(ERROR, "could not find ordering operator for equality operator %u",
1872  in_oper);
1873 
1874  /*
1875  * The Unique node will need equality operators. Normally these
1876  * are the same as the IN clause operators, but if those are
1877  * cross-type operators then the equality operators are the ones
1878  * for the IN clause operators' RHS datatype.
1879  */
1880  eqop = get_equality_op_for_ordering_op(sortop, NULL);
1881  if (!OidIsValid(eqop)) /* shouldn't happen */
1882  elog(ERROR, "could not find equality operator for ordering operator %u",
1883  sortop);
1884 
1885  tle = get_tle_by_resno(subplan->targetlist,
1886  groupColIdx[groupColPos]);
1887  Assert(tle != NULL);
1888 
1889  sortcl = makeNode(SortGroupClause);
1890  sortcl->tleSortGroupRef = assignSortGroupRef(tle,
1891  subplan->targetlist);
1892  sortcl->eqop = eqop;
1893  sortcl->sortop = sortop;
1894  sortcl->nulls_first = false;
1895  sortcl->hashable = false; /* no need to make this accurate */
1896  sortList = lappend(sortList, sortcl);
1897  groupColPos++;
1898  }
1899  sort = make_sort_from_sortclauses(sortList, subplan);
1900  label_sort_with_costsize(root, sort, -1.0);
1901  plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
1902  }
1903 
1904  /* Copy cost data from Path to Plan */
1905  copy_generic_path_info(plan, &best_path->path);
1906 
1907  return plan;
1908 }
1909 
1910 /*
1911  * create_gather_plan
1912  *
1913  * Create a Gather plan for 'best_path' and (recursively) plans
1914  * for its subpaths.
1915  */
1916 static Gather *
1918 {
1919  Gather *gather_plan;
1920  Plan *subplan;
1921  List *tlist;
1922 
1923  /*
1924  * Push projection down to the child node. That way, the projection work
1925  * is parallelized, and there can be no system columns in the result (they
1926  * can't travel through a tuple queue because it uses MinimalTuple
1927  * representation).
1928  */
1929  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1930 
1931  tlist = build_path_tlist(root, &best_path->path);
1932 
1933  gather_plan = make_gather(tlist,
1934  NIL,
1935  best_path->num_workers,
1937  best_path->single_copy,
1938  subplan);
1939 
1940  copy_generic_path_info(&gather_plan->plan, &best_path->path);
1941 
1942  /* use parallel mode for parallel plans. */
1943  root->glob->parallelModeNeeded = true;
1944 
1945  return gather_plan;
1946 }
1947 
1948 /*
1949  * create_gather_merge_plan
1950  *
1951  * Create a Gather Merge plan for 'best_path' and (recursively)
1952  * plans for its subpaths.
1953  */
1954 static GatherMerge *
1956 {
1957  GatherMerge *gm_plan;
1958  Plan *subplan;
1959  List *pathkeys = best_path->path.pathkeys;
1960  List *tlist = build_path_tlist(root, &best_path->path);
1961 
1962  /* As with Gather, project away columns in the workers. */
1963  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1964 
1965  /* Create a shell for a GatherMerge plan. */
1966  gm_plan = makeNode(GatherMerge);
1967  gm_plan->plan.targetlist = tlist;
1968  gm_plan->num_workers = best_path->num_workers;
1969  copy_generic_path_info(&gm_plan->plan, &best_path->path);
1970 
1971  /* Assign the rescan Param. */
1972  gm_plan->rescan_param = assign_special_exec_param(root);
1973 
1974  /* Gather Merge is pointless with no pathkeys; use Gather instead. */
1975  Assert(pathkeys != NIL);
1976 
1977  /* Compute sort column info, and adjust subplan's tlist as needed */
1978  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1979  best_path->subpath->parent->relids,
1980  gm_plan->sortColIdx,
1981  false,
1982  &gm_plan->numCols,
1983  &gm_plan->sortColIdx,
1984  &gm_plan->sortOperators,
1985  &gm_plan->collations,
1986  &gm_plan->nullsFirst);
1987 
1988 
1989  /*
1990  * All gather merge paths should have already guaranteed the necessary
1991  * sort order either by adding an explicit sort node or by using presorted
1992  * input. We can't simply add a sort here on additional pathkeys, because
1993  * we can't guarantee the sort would be safe. For example, expressions may
1994  * be volatile or otherwise parallel unsafe.
1995  */
1996  if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
1997  elog(ERROR, "gather merge input not sufficiently sorted");
1998 
1999  /* Now insert the subplan under GatherMerge. */
2000  gm_plan->plan.lefttree = subplan;
2001 
2002  /* use parallel mode for parallel plans. */
2003  root->glob->parallelModeNeeded = true;
2004 
2005  return gm_plan;
2006 }
2007 
2008 /*
2009  * create_projection_plan
2010  *
2011  * Create a plan tree to do a projection step and (recursively) plans
2012  * for its subpaths. We may need a Result node for the projection,
2013  * but sometimes we can just let the subplan do the work.
2014  */
2015 static Plan *
2017 {
2018  Plan *plan;
2019  Plan *subplan;
2020  List *tlist;
2021  bool needs_result_node = false;
2022 
2023  /*
2024  * Convert our subpath to a Plan and determine whether we need a Result
2025  * node.
2026  *
2027  * In most cases where we don't need to project, creation_projection_path
2028  * will have set dummypp, but not always. First, some createplan.c
2029  * routines change the tlists of their nodes. (An example is that
2030  * create_merge_append_plan might add resjunk sort columns to a
2031  * MergeAppend.) Second, create_projection_path has no way of knowing
2032  * what path node will be placed on top of the projection path and
2033  * therefore can't predict whether it will require an exact tlist. For
2034  * both of these reasons, we have to recheck here.
2035  */
2036  if (use_physical_tlist(root, &best_path->path, flags))
2037  {
2038  /*
2039  * Our caller doesn't really care what tlist we return, so we don't
2040  * actually need to project. However, we may still need to ensure
2041  * proper sortgroupref labels, if the caller cares about those.
2042  */
2043  subplan = create_plan_recurse(root, best_path->subpath, 0);
2044  tlist = subplan->targetlist;
2045  if (flags & CP_LABEL_TLIST)
2047  best_path->path.pathtarget);
2048  }
2049  else if (is_projection_capable_path(best_path->subpath))
2050  {
2051  /*
2052  * Our caller requires that we return the exact tlist, but no separate
2053  * result node is needed because the subpath is projection-capable.
2054  * Tell create_plan_recurse that we're going to ignore the tlist it
2055  * produces.
2056  */
2057  subplan = create_plan_recurse(root, best_path->subpath,
2058  CP_IGNORE_TLIST);
2060  tlist = build_path_tlist(root, &best_path->path);
2061  }
2062  else
2063  {
2064  /*
2065  * It looks like we need a result node, unless by good fortune the
2066  * requested tlist is exactly the one the child wants to produce.
2067  */
2068  subplan = create_plan_recurse(root, best_path->subpath, 0);
2069  tlist = build_path_tlist(root, &best_path->path);
2070  needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
2071  }
2072 
2073  /*
2074  * If we make a different decision about whether to include a Result node
2075  * than create_projection_path did, we'll have made slightly wrong cost
2076  * estimates; but label the plan with the cost estimates we actually used,
2077  * not "corrected" ones. (XXX this could be cleaned up if we moved more
2078  * of the sortcolumn setup logic into Path creation, but that would add
2079  * expense to creating Paths we might end up not using.)
2080  */
2081  if (!needs_result_node)
2082  {
2083  /* Don't need a separate Result, just assign tlist to subplan */
2084  plan = subplan;
2085  plan->targetlist = tlist;
2086 
2087  /* Label plan with the estimated costs we actually used */
2088  plan->startup_cost = best_path->path.startup_cost;
2089  plan->total_cost = best_path->path.total_cost;
2090  plan->plan_rows = best_path->path.rows;
2091  plan->plan_width = best_path->path.pathtarget->width;
2092  plan->parallel_safe = best_path->path.parallel_safe;
2093  /* ... but don't change subplan's parallel_aware flag */
2094  }
2095  else
2096  {
2097  /* We need a Result node */
2098  plan = (Plan *) make_result(tlist, NULL, subplan);
2099 
2100  copy_generic_path_info(plan, (Path *) best_path);
2101  }
2102 
2103  return plan;
2104 }
2105 
2106 /*
2107  * inject_projection_plan
2108  * Insert a Result node to do a projection step.
2109  *
2110  * This is used in a few places where we decide on-the-fly that we need a
2111  * projection step as part of the tree generated for some Path node.
2112  * We should try to get rid of this in favor of doing it more honestly.
2113  *
2114  * One reason it's ugly is we have to be told the right parallel_safe marking
2115  * to apply (since the tlist might be unsafe even if the child plan is safe).
2116  */
2117 static Plan *
2118 inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
2119 {
2120  Plan *plan;
2121 
2122  plan = (Plan *) make_result(tlist, NULL, subplan);
2123 
2124  /*
2125  * In principle, we should charge tlist eval cost plus cpu_per_tuple per
2126  * row for the Result node. But the former has probably been factored in
2127  * already and the latter was not accounted for during Path construction,
2128  * so being formally correct might just make the EXPLAIN output look less
2129  * consistent not more so. Hence, just copy the subplan's cost.
2130  */
2131  copy_plan_costsize(plan, subplan);
2132  plan->parallel_safe = parallel_safe;
2133 
2134  return plan;
2135 }
2136 
2137 /*
2138  * change_plan_targetlist
2139  * Externally available wrapper for inject_projection_plan.
2140  *
2141  * This is meant for use by FDW plan-generation functions, which might
2142  * want to adjust the tlist computed by some subplan tree. In general,
2143  * a Result node is needed to compute the new tlist, but we can optimize
2144  * some cases.
2145  *
2146  * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
2147  * flag of the FDW's own Path node.
2148  */
2149 Plan *
2150 change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
2151 {
2152  /*
2153  * If the top plan node can't do projections and its existing target list
2154  * isn't already what we need, we need to add a Result node to help it
2155  * along.
2156  */
2157  if (!is_projection_capable_plan(subplan) &&
2158  !tlist_same_exprs(tlist, subplan->targetlist))
2159  subplan = inject_projection_plan(subplan, tlist,
2160  subplan->parallel_safe &&
2161  tlist_parallel_safe);
2162  else
2163  {
2164  /* Else we can just replace the plan node's tlist */
2165  subplan->targetlist = tlist;
2166  subplan->parallel_safe &= tlist_parallel_safe;
2167  }
2168  return subplan;
2169 }
2170 
2171 /*
2172  * create_sort_plan
2173  *
2174  * Create a Sort plan for 'best_path' and (recursively) plans
2175  * for its subpaths.
2176  */
2177 static Sort *
2178 create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
2179 {
2180  Sort *plan;
2181  Plan *subplan;
2182 
2183  /*
2184  * We don't want any excess columns in the sorted tuples, so request a
2185  * smaller tlist. Otherwise, since Sort doesn't project, tlist
2186  * requirements pass through.
2187  */
2188  subplan = create_plan_recurse(root, best_path->subpath,
2189  flags | CP_SMALL_TLIST);
2190 
2191  /*
2192  * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
2193  * which will ignore any child EC members that don't belong to the given
2194  * relids. Thus, if this sort path is based on a child relation, we must
2195  * pass its relids.
2196  */
2197  plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
2198  IS_OTHER_REL(best_path->subpath->parent) ?
2199  best_path->path.parent->relids : NULL);
2200 
2201  copy_generic_path_info(&plan->plan, (Path *) best_path);
2202 
2203  return plan;
2204 }
2205 
2206 /*
2207  * create_incrementalsort_plan
2208  *
2209  * Do the same as create_sort_plan, but create IncrementalSort plan.
2210  */
2211 static IncrementalSort *
2213  int flags)
2214 {
2216  Plan *subplan;
2217 
2218  /* See comments in create_sort_plan() above */
2219  subplan = create_plan_recurse(root, best_path->spath.subpath,
2220  flags | CP_SMALL_TLIST);
2222  best_path->spath.path.pathkeys,
2223  IS_OTHER_REL(best_path->spath.subpath->parent) ?
2224  best_path->spath.path.parent->relids : NULL,
2225  best_path->nPresortedCols);
2226 
2227  copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
2228 
2229  return plan;
2230 }
2231 
2232 /*
2233  * create_group_plan
2234  *
2235  * Create a Group plan for 'best_path' and (recursively) plans
2236  * for its subpaths.
2237  */
2238 static Group *
2240 {
2241  Group *plan;
2242  Plan *subplan;
2243  List *tlist;
2244  List *quals;
2245 
2246  /*
2247  * Group can project, so no need to be terribly picky about child tlist,
2248  * but we do need grouping columns to be available
2249  */
2250  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2251 
2252  tlist = build_path_tlist(root, &best_path->path);
2253 
2254  quals = order_qual_clauses(root, best_path->qual);
2255 
2256  plan = make_group(tlist,
2257  quals,
2258  list_length(best_path->groupClause),
2260  subplan->targetlist),
2261  extract_grouping_ops(best_path->groupClause),
2263  subplan->targetlist),
2264  subplan);
2265 
2266  copy_generic_path_info(&plan->plan, (Path *) best_path);
2267 
2268  return plan;
2269 }
2270 
2271 /*
2272  * create_upper_unique_plan
2273  *
2274  * Create a Unique plan for 'best_path' and (recursively) plans
2275  * for its subpaths.
2276  */
2277 static Unique *
2279 {
2280  Unique *plan;
2281  Plan *subplan;
2282 
2283  /*
2284  * Unique doesn't project, so tlist requirements pass through; moreover we
2285  * need grouping columns to be labeled.
2286  */
2287  subplan = create_plan_recurse(root, best_path->subpath,
2288  flags | CP_LABEL_TLIST);
2289 
2290  plan = make_unique_from_pathkeys(subplan,
2291  best_path->path.pathkeys,
2292  best_path->numkeys);
2293 
2294  copy_generic_path_info(&plan->plan, (Path *) best_path);
2295 
2296  return plan;
2297 }
2298 
2299 /*
2300  * create_agg_plan
2301  *
2302  * Create an Agg plan for 'best_path' and (recursively) plans
2303  * for its subpaths.
2304  */
2305 static Agg *
2307 {
2308  Agg *plan;
2309  Plan *subplan;
2310  List *tlist;
2311  List *quals;
2312 
2313  /*
2314  * Agg can project, so no need to be terribly picky about child tlist, but
2315  * we do need grouping columns to be available
2316  */
2317  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2318 
2319  tlist = build_path_tlist(root, &best_path->path);
2320 
2321  quals = order_qual_clauses(root, best_path->qual);
2322 
2323  plan = make_agg(tlist, quals,
2324  best_path->aggstrategy,
2325  best_path->aggsplit,
2326  list_length(best_path->groupClause),
2328  subplan->targetlist),
2329  extract_grouping_ops(best_path->groupClause),
2331  subplan->targetlist),
2332  NIL,
2333  NIL,
2334  best_path->numGroups,
2335  best_path->transitionSpace,
2336  subplan);
2337 
2338  copy_generic_path_info(&plan->plan, (Path *) best_path);
2339 
2340  return plan;
2341 }
2342 
2343 /*
2344  * Given a groupclause for a collection of grouping sets, produce the
2345  * corresponding groupColIdx.
2346  *
2347  * root->grouping_map maps the tleSortGroupRef to the actual column position in
2348  * the input tuple. So we get the ref from the entries in the groupclause and
2349  * look them up there.
2350  */
2351 static AttrNumber *
2352 remap_groupColIdx(PlannerInfo *root, List *groupClause)
2353 {
2354  AttrNumber *grouping_map = root->grouping_map;
2355  AttrNumber *new_grpColIdx;
2356  ListCell *lc;
2357  int i;
2358 
2359  Assert(grouping_map);
2360 
2361  new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
2362 
2363  i = 0;
2364  foreach(lc, groupClause)
2365  {
2366  SortGroupClause *clause = lfirst(lc);
2367 
2368  new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
2369  }
2370 
2371  return new_grpColIdx;
2372 }
2373 
2374 /*
2375  * create_groupingsets_plan
2376  * Create a plan for 'best_path' and (recursively) plans
2377  * for its subpaths.
2378  *
2379  * What we emit is an Agg plan with some vestigial Agg and Sort nodes
2380  * hanging off the side. The top Agg implements the last grouping set
2381  * specified in the GroupingSetsPath, and any additional grouping sets
2382  * each give rise to a subsidiary Agg and Sort node in the top Agg's
2383  * "chain" list. These nodes don't participate in the plan directly,
2384  * but they are a convenient way to represent the required data for
2385  * the extra steps.
2386  *
2387  * Returns a Plan node.
2388  */
2389 static Plan *
2391 {
2392  Agg *plan;
2393  Plan *subplan;
2394  List *rollups = best_path->rollups;
2395  AttrNumber *grouping_map;
2396  int maxref;
2397  List *chain;
2398  ListCell *lc;
2399 
2400  /* Shouldn't get here without grouping sets */
2401  Assert(root->parse->groupingSets);
2402  Assert(rollups != NIL);
2403 
2404  /*
2405  * Agg can project, so no need to be terribly picky about child tlist, but
2406  * we do need grouping columns to be available
2407  */
2408  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2409 
2410  /*
2411  * Compute the mapping from tleSortGroupRef to column index in the child's
2412  * tlist. First, identify max SortGroupRef in groupClause, for array
2413  * sizing.
2414  */
2415  maxref = 0;
2416  foreach(lc, root->processed_groupClause)
2417  {
2418  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2419 
2420  if (gc->tleSortGroupRef > maxref)
2421  maxref = gc->tleSortGroupRef;
2422  }
2423 
2424  grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
2425 
2426  /* Now look up the column numbers in the child's tlist */
2427  foreach(lc, root->processed_groupClause)
2428  {
2429  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2430  TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
2431 
2432  grouping_map[gc->tleSortGroupRef] = tle->resno;
2433  }
2434 
2435  /*
2436  * During setrefs.c, we'll need the grouping_map to fix up the cols lists
2437  * in GroupingFunc nodes. Save it for setrefs.c to use.
2438  */
2439  Assert(root->grouping_map == NULL);
2440  root->grouping_map = grouping_map;
2441 
2442  /*
2443  * Generate the side nodes that describe the other sort and group
2444  * operations besides the top one. Note that we don't worry about putting
2445  * accurate cost estimates in the side nodes; only the topmost Agg node's
2446  * costs will be shown by EXPLAIN.
2447  */
2448  chain = NIL;
2449  if (list_length(rollups) > 1)
2450  {
2451  bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
2452 
2453  for_each_from(lc, rollups, 1)
2454  {
2455  RollupData *rollup = lfirst(lc);
2456  AttrNumber *new_grpColIdx;
2457  Plan *sort_plan = NULL;
2458  Plan *agg_plan;
2459  AggStrategy strat;
2460 
2461  new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2462 
2463  if (!rollup->is_hashed && !is_first_sort)
2464  {
2465  sort_plan = (Plan *)
2467  new_grpColIdx,
2468  subplan);
2469  }
2470 
2471  if (!rollup->is_hashed)
2472  is_first_sort = false;
2473 
2474  if (rollup->is_hashed)
2475  strat = AGG_HASHED;
2476  else if (linitial(rollup->gsets) == NIL)
2477  strat = AGG_PLAIN;
2478  else
2479  strat = AGG_SORTED;
2480 
2481  agg_plan = (Plan *) make_agg(NIL,
2482  NIL,
2483  strat,
2485  list_length((List *) linitial(rollup->gsets)),
2486  new_grpColIdx,
2489  rollup->gsets,
2490  NIL,
2491  rollup->numGroups,
2492  best_path->transitionSpace,
2493  sort_plan);
2494 
2495  /*
2496  * Remove stuff we don't need to avoid bloating debug output.
2497  */
2498  if (sort_plan)
2499  {
2500  sort_plan->targetlist = NIL;
2501  sort_plan->lefttree = NULL;
2502  }
2503 
2504  chain = lappend(chain, agg_plan);
2505  }
2506  }
2507 
2508  /*
2509  * Now make the real Agg node
2510  */
2511  {
2512  RollupData *rollup = linitial(rollups);
2513  AttrNumber *top_grpColIdx;
2514  int numGroupCols;
2515 
2516  top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2517 
2518  numGroupCols = list_length((List *) linitial(rollup->gsets));
2519 
2520  plan = make_agg(build_path_tlist(root, &best_path->path),
2521  best_path->qual,
2522  best_path->aggstrategy,
2524  numGroupCols,
2525  top_grpColIdx,
2528  rollup->gsets,
2529  chain,
2530  rollup->numGroups,
2531  best_path->transitionSpace,
2532  subplan);
2533 
2534  /* Copy cost data from Path to Plan */
2535  copy_generic_path_info(&plan->plan, &best_path->path);
2536  }
2537 
2538  return (Plan *) plan;
2539 }
2540 
2541 /*
2542  * create_minmaxagg_plan
2543  *
2544  * Create a Result plan for 'best_path' and (recursively) plans
2545  * for its subpaths.
2546  */
2547 static Result *
2549 {
2550  Result *plan;
2551  List *tlist;
2552  ListCell *lc;
2553 
2554  /* Prepare an InitPlan for each aggregate's subquery. */
2555  foreach(lc, best_path->mmaggregates)
2556  {
2557  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
2558  PlannerInfo *subroot = mminfo->subroot;
2559  Query *subparse = subroot->parse;
2560  Plan *plan;
2561 
2562  /*
2563  * Generate the plan for the subquery. We already have a Path, but we
2564  * have to convert it to a Plan and attach a LIMIT node above it.
2565  * Since we are entering a different planner context (subroot),
2566  * recurse to create_plan not create_plan_recurse.
2567  */
2568  plan = create_plan(subroot, mminfo->path);
2569 
2570  plan = (Plan *) make_limit(plan,
2571  subparse->limitOffset,
2572  subparse->limitCount,
2573  subparse->limitOption,
2574  0, NULL, NULL, NULL);
2575 
2576  /* Must apply correct cost/width data to Limit node */
2577  plan->startup_cost = mminfo->path->startup_cost;
2578  plan->total_cost = mminfo->pathcost;
2579  plan->plan_rows = 1;
2580  plan->plan_width = mminfo->path->pathtarget->width;
2581  plan->parallel_aware = false;
2582  plan->parallel_safe = mminfo->path->parallel_safe;
2583 
2584  /* Convert the plan into an InitPlan in the outer query. */
2585  SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
2586  }
2587 
2588  /* Generate the output plan --- basically just a Result */
2589  tlist = build_path_tlist(root, &best_path->path);
2590 
2591  plan = make_result(tlist, (Node *) best_path->quals, NULL);
2592 
2593  copy_generic_path_info(&plan->plan, (Path *) best_path);
2594 
2595  /*
2596  * During setrefs.c, we'll need to replace references to the Agg nodes
2597  * with InitPlan output params. (We can't just do that locally in the
2598  * MinMaxAgg node, because path nodes above here may have Agg references
2599  * as well.) Save the mmaggregates list to tell setrefs.c to do that.
2600  */
2601  Assert(root->minmax_aggs == NIL);
2602  root->minmax_aggs = best_path->mmaggregates;
2603 
2604  return plan;
2605 }
2606 
2607 /*
2608  * create_windowagg_plan
2609  *
2610  * Create a WindowAgg plan for 'best_path' and (recursively) plans
2611  * for its subpaths.
2612  */
2613 static WindowAgg *
2615 {
2616  WindowAgg *plan;
2617  WindowClause *wc = best_path->winclause;
2618  int numPart = list_length(wc->partitionClause);
2619  int numOrder = list_length(wc->orderClause);
2620  Plan *subplan;
2621  List *tlist;
2622  int partNumCols;
2623  AttrNumber *partColIdx;
2624  Oid *partOperators;
2625  Oid *partCollations;
2626  int ordNumCols;
2627  AttrNumber *ordColIdx;
2628  Oid *ordOperators;
2629  Oid *ordCollations;
2630  ListCell *lc;
2631 
2632  /*
2633  * Choice of tlist here is motivated by the fact that WindowAgg will be
2634  * storing the input rows of window frames in a tuplestore; it therefore
2635  * behooves us to request a small tlist to avoid wasting space. We do of
2636  * course need grouping columns to be available.
2637  */
2638  subplan = create_plan_recurse(root, best_path->subpath,
2640 
2641  tlist = build_path_tlist(root, &best_path->path);
2642 
2643  /*
2644  * Convert SortGroupClause lists into arrays of attr indexes and equality
2645  * operators, as wanted by executor.
2646  */
2647  partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
2648  partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
2649  partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
2650 
2651  partNumCols = 0;
2652  foreach(lc, wc->partitionClause)
2653  {
2654  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2655  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2656 
2657  Assert(OidIsValid(sgc->eqop));
2658  partColIdx[partNumCols] = tle->resno;
2659  partOperators[partNumCols] = sgc->eqop;
2660  partCollations[partNumCols] = exprCollation((Node *) tle->expr);
2661  partNumCols++;
2662  }
2663 
2664  ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
2665  ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
2666  ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
2667 
2668  ordNumCols = 0;
2669  foreach(lc, wc->orderClause)
2670  {
2671  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2672  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2673 
2674  Assert(OidIsValid(sgc->eqop));
2675  ordColIdx[ordNumCols] = tle->resno;
2676  ordOperators[ordNumCols] = sgc->eqop;
2677  ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
2678  ordNumCols++;
2679  }
2680 
2681  /* And finally we can make the WindowAgg node */
2682  plan = make_windowagg(tlist,
2683  wc->winref,
2684  partNumCols,
2685  partColIdx,
2686  partOperators,
2687  partCollations,
2688  ordNumCols,
2689  ordColIdx,
2690  ordOperators,
2691  ordCollations,
2692  wc->frameOptions,
2693  wc->startOffset,
2694  wc->endOffset,
2695  wc->startInRangeFunc,
2696  wc->endInRangeFunc,
2697  wc->inRangeColl,
2698  wc->inRangeAsc,
2699  wc->inRangeNullsFirst,
2700  wc->runCondition,
2701  best_path->qual,
2702  best_path->topwindow,
2703  subplan);
2704 
2705  copy_generic_path_info(&plan->plan, (Path *) best_path);
2706 
2707  return plan;
2708 }
2709 
2710 /*
2711  * create_setop_plan
2712  *
2713  * Create a SetOp plan for 'best_path' and (recursively) plans
2714  * for its subpaths.
2715  */
2716 static SetOp *
2717 create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
2718 {
2719  SetOp *plan;
2720  Plan *subplan;
2721  long numGroups;
2722 
2723  /*
2724  * SetOp doesn't project, so tlist requirements pass through; moreover we
2725  * need grouping columns to be labeled.
2726  */
2727  subplan = create_plan_recurse(root, best_path->subpath,
2728  flags | CP_LABEL_TLIST);
2729 
2730  /* Convert numGroups to long int --- but 'ware overflow! */
2731  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2732 
2733  plan = make_setop(best_path->cmd,
2734  best_path->strategy,
2735  subplan,
2736  best_path->distinctList,
2737  best_path->flagColIdx,
2738  best_path->firstFlag,
2739  numGroups);
2740 
2741  copy_generic_path_info(&plan->plan, (Path *) best_path);
2742 
2743  return plan;
2744 }
2745 
2746 /*
2747  * create_recursiveunion_plan
2748  *
2749  * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
2750  * for its subpaths.
2751  */
2752 static RecursiveUnion *
2754 {
2756  Plan *leftplan;
2757  Plan *rightplan;
2758  List *tlist;
2759  long numGroups;
2760 
2761  /* Need both children to produce same tlist, so force it */
2762  leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
2763  rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
2764 
2765  tlist = build_path_tlist(root, &best_path->path);
2766 
2767  /* Convert numGroups to long int --- but 'ware overflow! */
2768  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2769 
2770  plan = make_recursive_union(tlist,
2771  leftplan,
2772  rightplan,
2773  best_path->wtParam,
2774  best_path->distinctList,
2775  numGroups);
2776 
2777  copy_generic_path_info(&plan->plan, (Path *) best_path);
2778 
2779  return plan;
2780 }
2781 
2782 /*
2783  * create_lockrows_plan
2784  *
2785  * Create a LockRows plan for 'best_path' and (recursively) plans
2786  * for its subpaths.
2787  */
2788 static LockRows *
2790  int flags)
2791 {
2792  LockRows *plan;
2793  Plan *subplan;
2794 
2795  /* LockRows doesn't project, so tlist requirements pass through */
2796  subplan = create_plan_recurse(root, best_path->subpath, flags);
2797 
2798  plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
2799 
2800  copy_generic_path_info(&plan->plan, (Path *) best_path);
2801 
2802  return plan;
2803 }
2804 
2805 /*
2806  * create_modifytable_plan
2807  * Create a ModifyTable plan for 'best_path'.
2808  *
2809  * Returns a Plan node.
2810  */
2811 static ModifyTable *
2813 {
2814  ModifyTable *plan;
2815  Path *subpath = best_path->subpath;
2816  Plan *subplan;
2817 
2818  /* Subplan must produce exactly the specified tlist */
2819  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
2820 
2821  /* Transfer resname/resjunk labeling, too, to keep executor happy */
2823 
2824  plan = make_modifytable(root,
2825  subplan,
2826  best_path->operation,
2827  best_path->canSetTag,
2828  best_path->nominalRelation,
2829  best_path->rootRelation,
2830  best_path->partColsUpdated,
2831  best_path->resultRelations,
2832  best_path->updateColnosLists,
2833  best_path->withCheckOptionLists,
2834  best_path->returningLists,
2835  best_path->rowMarks,
2836  best_path->onconflict,
2837  best_path->mergeActionLists,
2838  best_path->epqParam);
2839 
2840  copy_generic_path_info(&plan->plan, &best_path->path);
2841 
2842  return plan;
2843 }
2844 
2845 /*
2846  * create_limit_plan
2847  *
2848  * Create a Limit plan for 'best_path' and (recursively) plans
2849  * for its subpaths.
2850  */
2851 static Limit *
2852 create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
2853 {
2854  Limit *plan;
2855  Plan *subplan;
2856  int numUniqkeys = 0;
2857  AttrNumber *uniqColIdx = NULL;
2858  Oid *uniqOperators = NULL;
2859  Oid *uniqCollations = NULL;
2860 
2861  /* Limit doesn't project, so tlist requirements pass through */
2862  subplan = create_plan_recurse(root, best_path->subpath, flags);
2863 
2864  /* Extract information necessary for comparing rows for WITH TIES. */
2865  if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
2866  {
2867  Query *parse = root->parse;
2868  ListCell *l;
2869 
2870  numUniqkeys = list_length(parse->sortClause);
2871  uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
2872  uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2873  uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2874 
2875  numUniqkeys = 0;
2876  foreach(l, parse->sortClause)
2877  {
2878  SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
2879  TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
2880 
2881  uniqColIdx[numUniqkeys] = tle->resno;
2882  uniqOperators[numUniqkeys] = sortcl->eqop;
2883  uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
2884  numUniqkeys++;
2885  }
2886  }
2887 
2888  plan = make_limit(subplan,
2889  best_path->limitOffset,
2890  best_path->limitCount,
2891  best_path->limitOption,
2892  numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
2893 
2894  copy_generic_path_info(&plan->plan, (Path *) best_path);
2895 
2896  return plan;
2897 }
2898 
2899 
2900 /*****************************************************************************
2901  *
2902  * BASE-RELATION SCAN METHODS
2903  *
2904  *****************************************************************************/
2905 
2906 
2907 /*
2908  * create_seqscan_plan
2909  * Returns a seqscan plan for the base relation scanned by 'best_path'
2910  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2911  */
2912 static SeqScan *
2914  List *tlist, List *scan_clauses)
2915 {
2916  SeqScan *scan_plan;
2917  Index scan_relid = best_path->parent->relid;
2918 
2919  /* it should be a base rel... */
2920  Assert(scan_relid > 0);
2921  Assert(best_path->parent->rtekind == RTE_RELATION);
2922 
2923  /* Sort clauses into best execution order */
2924  scan_clauses = order_qual_clauses(root, scan_clauses);
2925 
2926  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2927  scan_clauses = extract_actual_clauses(scan_clauses, false);
2928 
2929  /* Replace any outer-relation variables with nestloop params */
2930  if (best_path->param_info)
2931  {
2932  scan_clauses = (List *)
2933  replace_nestloop_params(root, (Node *) scan_clauses);
2934  }
2935 
2936  scan_plan = make_seqscan(tlist,
2937  scan_clauses,
2938  scan_relid);
2939 
2940  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2941 
2942  return scan_plan;
2943 }
2944 
2945 /*
2946  * create_samplescan_plan
2947  * Returns a samplescan plan for the base relation scanned by 'best_path'
2948  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2949  */
2950 static SampleScan *
2952  List *tlist, List *scan_clauses)
2953 {
2954  SampleScan *scan_plan;
2955  Index scan_relid = best_path->parent->relid;
2956  RangeTblEntry *rte;
2957  TableSampleClause *tsc;
2958 
2959  /* it should be a base rel with a tablesample clause... */
2960  Assert(scan_relid > 0);
2961  rte = planner_rt_fetch(scan_relid, root);
2962  Assert(rte->rtekind == RTE_RELATION);
2963  tsc = rte->tablesample;
2964  Assert(tsc != NULL);
2965 
2966  /* Sort clauses into best execution order */
2967  scan_clauses = order_qual_clauses(root, scan_clauses);
2968 
2969  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2970  scan_clauses = extract_actual_clauses(scan_clauses, false);
2971 
2972  /* Replace any outer-relation variables with nestloop params */
2973  if (best_path->param_info)
2974  {
2975  scan_clauses = (List *)
2976  replace_nestloop_params(root, (Node *) scan_clauses);
2977  tsc = (TableSampleClause *)
2978  replace_nestloop_params(root, (Node *) tsc);
2979  }
2980 
2981  scan_plan = make_samplescan(tlist,
2982  scan_clauses,
2983  scan_relid,
2984  tsc);
2985 
2986  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2987 
2988  return scan_plan;
2989 }
2990 
2991 /*
2992  * create_indexscan_plan
2993  * Returns an indexscan plan for the base relation scanned by 'best_path'
2994  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2995  *
2996  * We use this for both plain IndexScans and IndexOnlyScans, because the
2997  * qual preprocessing work is the same for both. Note that the caller tells
2998  * us which to build --- we don't look at best_path->path.pathtype, because
2999  * create_bitmap_subplan needs to be able to override the prior decision.
3000  */
3001 static Scan *
3003  IndexPath *best_path,
3004  List *tlist,
3005  List *scan_clauses,
3006  bool indexonly)
3007 {
3008  Scan *scan_plan;
3009  List *indexclauses = best_path->indexclauses;
3010  List *indexorderbys = best_path->indexorderbys;
3011  Index baserelid = best_path->path.parent->relid;
3012  IndexOptInfo *indexinfo = best_path->indexinfo;
3013  Oid indexoid = indexinfo->indexoid;
3014  List *qpqual;
3015  List *stripped_indexquals;
3016  List *fixed_indexquals;
3017  List *fixed_indexorderbys;
3018  List *indexorderbyops = NIL;
3019  ListCell *l;
3020 
3021  /* it should be a base rel... */
3022  Assert(baserelid > 0);
3023  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3024  /* check the scan direction is valid */
3025  Assert(best_path->indexscandir == ForwardScanDirection ||
3026  best_path->indexscandir == BackwardScanDirection);
3027 
3028  /*
3029  * Extract the index qual expressions (stripped of RestrictInfos) from the
3030  * IndexClauses list, and prepare a copy with index Vars substituted for
3031  * table Vars. (This step also does replace_nestloop_params on the
3032  * fixed_indexquals.)
3033  */
3034  fix_indexqual_references(root, best_path,
3035  &stripped_indexquals,
3036  &fixed_indexquals);
3037 
3038  /*
3039  * Likewise fix up index attr references in the ORDER BY expressions.
3040  */
3041  fixed_indexorderbys = fix_indexorderby_references(root, best_path);
3042 
3043  /*
3044  * The qpqual list must contain all restrictions not automatically handled
3045  * by the index, other than pseudoconstant clauses which will be handled
3046  * by a separate gating plan node. All the predicates in the indexquals
3047  * will be checked (either by the index itself, or by nodeIndexscan.c),
3048  * but if there are any "special" operators involved then they must be
3049  * included in qpqual. The upshot is that qpqual must contain
3050  * scan_clauses minus whatever appears in indexquals.
3051  *
3052  * is_redundant_with_indexclauses() detects cases where a scan clause is
3053  * present in the indexclauses list or is generated from the same
3054  * EquivalenceClass as some indexclause, and is therefore redundant with
3055  * it, though not equal. (The latter happens when indxpath.c prefers a
3056  * different derived equality than what generate_join_implied_equalities
3057  * picked for a parameterized scan's ppi_clauses.) Note that it will not
3058  * match to lossy index clauses, which is critical because we have to
3059  * include the original clause in qpqual in that case.
3060  *
3061  * In some situations (particularly with OR'd index conditions) we may
3062  * have scan_clauses that are not equal to, but are logically implied by,
3063  * the index quals; so we also try a predicate_implied_by() check to see
3064  * if we can discard quals that way. (predicate_implied_by assumes its
3065  * first input contains only immutable functions, so we have to check
3066  * that.)
3067  *
3068  * Note: if you change this bit of code you should also look at
3069  * extract_nonindex_conditions() in costsize.c.
3070  */
3071  qpqual = NIL;
3072  foreach(l, scan_clauses)
3073  {
3074  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3075 
3076  if (rinfo->pseudoconstant)
3077  continue; /* we may drop pseudoconstants here */
3078  if (is_redundant_with_indexclauses(rinfo, indexclauses))
3079  continue; /* dup or derived from same EquivalenceClass */
3080  if (!contain_mutable_functions((Node *) rinfo->clause) &&
3081  predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
3082  false))
3083  continue; /* provably implied by indexquals */
3084  qpqual = lappend(qpqual, rinfo);
3085  }
3086 
3087  /* Sort clauses into best execution order */
3088  qpqual = order_qual_clauses(root, qpqual);
3089 
3090  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3091  qpqual = extract_actual_clauses(qpqual, false);
3092 
3093  /*
3094  * We have to replace any outer-relation variables with nestloop params in
3095  * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
3096  * annoying to have to do this separately from the processing in
3097  * fix_indexqual_references --- rethink this when generalizing the inner
3098  * indexscan support. But note we can't really do this earlier because
3099  * it'd break the comparisons to predicates above ... (or would it? Those
3100  * wouldn't have outer refs)
3101  */
3102  if (best_path->path.param_info)
3103  {
3104  stripped_indexquals = (List *)
3105  replace_nestloop_params(root, (Node *) stripped_indexquals);
3106  qpqual = (List *)
3107  replace_nestloop_params(root, (Node *) qpqual);
3108  indexorderbys = (List *)
3109  replace_nestloop_params(root, (Node *) indexorderbys);
3110  }
3111 
3112  /*
3113  * If there are ORDER BY expressions, look up the sort operators for their
3114  * result datatypes.
3115  */
3116  if (indexorderbys)
3117  {
3118  ListCell *pathkeyCell,
3119  *exprCell;
3120 
3121  /*
3122  * PathKey contains OID of the btree opfamily we're sorting by, but
3123  * that's not quite enough because we need the expression's datatype
3124  * to look up the sort operator in the operator family.
3125  */
3126  Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
3127  forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
3128  {
3129  PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
3130  Node *expr = (Node *) lfirst(exprCell);
3131  Oid exprtype = exprType(expr);
3132  Oid sortop;
3133 
3134  /* Get sort operator from opfamily */
3135  sortop = get_opfamily_member(pathkey->pk_opfamily,
3136  exprtype,
3137  exprtype,
3138  pathkey->pk_strategy);
3139  if (!OidIsValid(sortop))
3140  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
3141  pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
3142  indexorderbyops = lappend_oid(indexorderbyops, sortop);
3143  }
3144  }
3145 
3146  /*
3147  * For an index-only scan, we must mark indextlist entries as resjunk if
3148  * they are columns that the index AM can't return; this cues setrefs.c to
3149  * not generate references to those columns.
3150  */
3151  if (indexonly)
3152  {
3153  int i = 0;
3154 
3155  foreach(l, indexinfo->indextlist)
3156  {
3157  TargetEntry *indextle = (TargetEntry *) lfirst(l);
3158 
3159  indextle->resjunk = !indexinfo->canreturn[i];
3160  i++;
3161  }
3162  }
3163 
3164  /* Finally ready to build the plan node */
3165  if (indexonly)
3166  scan_plan = (Scan *) make_indexonlyscan(tlist,
3167  qpqual,
3168  baserelid,
3169  indexoid,
3170  fixed_indexquals,
3171  stripped_indexquals,
3172  fixed_indexorderbys,
3173  indexinfo->indextlist,
3174  best_path->indexscandir);
3175  else
3176  scan_plan = (Scan *) make_indexscan(tlist,
3177  qpqual,
3178  baserelid,
3179  indexoid,
3180  fixed_indexquals,
3181  stripped_indexquals,
3182  fixed_indexorderbys,
3183  indexorderbys,
3184  indexorderbyops,
3185  best_path->indexscandir);
3186 
3187  copy_generic_path_info(&scan_plan->plan, &best_path->path);
3188 
3189  return scan_plan;
3190 }
3191 
3192 /*
3193  * create_bitmap_scan_plan
3194  * Returns a bitmap scan plan for the base relation scanned by 'best_path'
3195  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3196  */
3197 static BitmapHeapScan *
3199  BitmapHeapPath *best_path,
3200  List *tlist,
3201  List *scan_clauses)
3202 {
3203  Index baserelid = best_path->path.parent->relid;
3204  Plan *bitmapqualplan;
3205  List *bitmapqualorig;
3206  List *indexquals;
3207  List *indexECs;
3208  List *qpqual;
3209  ListCell *l;
3210  BitmapHeapScan *scan_plan;
3211 
3212  /* it should be a base rel... */
3213  Assert(baserelid > 0);
3214  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3215 
3216  /* Process the bitmapqual tree into a Plan tree and qual lists */
3217  bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
3218  &bitmapqualorig, &indexquals,
3219  &indexECs);
3220 
3221  if (best_path->path.parallel_aware)
3222  bitmap_subplan_mark_shared(bitmapqualplan);
3223 
3224  /*
3225  * The qpqual list must contain all restrictions not automatically handled
3226  * by the index, other than pseudoconstant clauses which will be handled
3227  * by a separate gating plan node. All the predicates in the indexquals
3228  * will be checked (either by the index itself, or by
3229  * nodeBitmapHeapscan.c), but if there are any "special" operators
3230  * involved then they must be added to qpqual. The upshot is that qpqual
3231  * must contain scan_clauses minus whatever appears in indexquals.
3232  *
3233  * This loop is similar to the comparable code in create_indexscan_plan(),
3234  * but with some differences because it has to compare the scan clauses to
3235  * stripped (no RestrictInfos) indexquals. See comments there for more
3236  * info.
3237  *
3238  * In normal cases simple equal() checks will be enough to spot duplicate
3239  * clauses, so we try that first. We next see if the scan clause is
3240  * redundant with any top-level indexqual by virtue of being generated
3241  * from the same EC. After that, try predicate_implied_by().
3242  *
3243  * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
3244  * useful for getting rid of qpquals that are implied by index predicates,
3245  * because the predicate conditions are included in the "indexquals"
3246  * returned by create_bitmap_subplan(). Bitmap scans have to do it that
3247  * way because predicate conditions need to be rechecked if the scan
3248  * becomes lossy, so they have to be included in bitmapqualorig.
3249  */
3250  qpqual = NIL;
3251  foreach(l, scan_clauses)
3252  {
3253  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3254  Node *clause = (Node *) rinfo->clause;
3255 
3256  if (rinfo->pseudoconstant)
3257  continue; /* we may drop pseudoconstants here */
3258  if (list_member(indexquals, clause))
3259  continue; /* simple duplicate */
3260  if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
3261  continue; /* derived from same EquivalenceClass */
3262  if (!contain_mutable_functions(clause) &&
3263  predicate_implied_by(list_make1(clause), indexquals, false))
3264  continue; /* provably implied by indexquals */
3265  qpqual = lappend(qpqual, rinfo);
3266  }
3267 
3268  /* Sort clauses into best execution order */
3269  qpqual = order_qual_clauses(root, qpqual);
3270 
3271  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3272  qpqual = extract_actual_clauses(qpqual, false);
3273 
3274  /*
3275  * When dealing with special operators, we will at this point have
3276  * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
3277  * 'em from bitmapqualorig, since there's no point in making the tests
3278  * twice.
3279  */
3280  bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
3281 
3282  /*
3283  * We have to replace any outer-relation variables with nestloop params in
3284  * the qpqual and bitmapqualorig expressions. (This was already done for
3285  * expressions attached to plan nodes in the bitmapqualplan tree.)
3286  */
3287  if (best_path->path.param_info)
3288  {
3289  qpqual = (List *)
3290  replace_nestloop_params(root, (Node *) qpqual);
3291  bitmapqualorig = (List *)
3292  replace_nestloop_params(root, (Node *) bitmapqualorig);
3293  }
3294 
3295  /* Finally ready to build the plan node */
3296  scan_plan = make_bitmap_heapscan(tlist,
3297  qpqual,
3298  bitmapqualplan,
3299  bitmapqualorig,
3300  baserelid);
3301 
3302  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3303 
3304  return scan_plan;
3305 }
3306 
3307 /*
3308  * Given a bitmapqual tree, generate the Plan tree that implements it
3309  *
3310  * As byproducts, we also return in *qual and *indexqual the qual lists
3311  * (in implicit-AND form, without RestrictInfos) describing the original index
3312  * conditions and the generated indexqual conditions. (These are the same in
3313  * simple cases, but when special index operators are involved, the former
3314  * list includes the special conditions while the latter includes the actual
3315  * indexable conditions derived from them.) Both lists include partial-index
3316  * predicates, because we have to recheck predicates as well as index
3317  * conditions if the bitmap scan becomes lossy.
3318  *
3319  * In addition, we return a list of EquivalenceClass pointers for all the
3320  * top-level indexquals that were possibly-redundantly derived from ECs.
3321  * This allows removal of scan_clauses that are redundant with such quals.
3322  * (We do not attempt to detect such redundancies for quals that are within
3323  * OR subtrees. This could be done in a less hacky way if we returned the
3324  * indexquals in RestrictInfo form, but that would be slower and still pretty
3325  * messy, since we'd have to build new RestrictInfos in many cases.)
3326  */
3327 static Plan *
3329  List **qual, List **indexqual, List **indexECs)
3330 {
3331  Plan *plan;
3332 
3333  if (IsA(bitmapqual, BitmapAndPath))
3334  {
3335  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
3336  List *subplans = NIL;
3337  List *subquals = NIL;
3338  List *subindexquals = NIL;
3339  List *subindexECs = NIL;
3340  ListCell *l;
3341 
3342  /*
3343  * There may well be redundant quals among the subplans, since a
3344  * top-level WHERE qual might have gotten used to form several
3345  * different index quals. We don't try exceedingly hard to eliminate
3346  * redundancies, but we do eliminate obvious duplicates by using
3347  * list_concat_unique.
3348  */
3349  foreach(l, apath->bitmapquals)
3350  {
3351  Plan *subplan;
3352  List *subqual;
3353  List *subindexqual;
3354  List *subindexEC;
3355 
3356  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3357  &subqual, &subindexqual,
3358  &subindexEC);
3359  subplans = lappend(subplans, subplan);
3360  subquals = list_concat_unique(subquals, subqual);
3361  subindexquals = list_concat_unique(subindexquals, subindexqual);
3362  /* Duplicates in indexECs aren't worth getting rid of */
3363  subindexECs = list_concat(subindexECs, subindexEC);
3364  }
3365  plan = (Plan *) make_bitmap_and(subplans);
3366  plan->startup_cost = apath->path.startup_cost;
3367  plan->total_cost = apath->path.total_cost;
3368  plan->plan_rows =
3369  clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
3370  plan->plan_width = 0; /* meaningless */
3371  plan->parallel_aware = false;
3372  plan->parallel_safe = apath->path.parallel_safe;
3373  *qual = subquals;
3374  *indexqual = subindexquals;
3375  *indexECs = subindexECs;
3376  }
3377  else if (IsA(bitmapqual, BitmapOrPath))
3378  {
3379  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
3380  List *subplans = NIL;
3381  List *subquals = NIL;
3382  List *subindexquals = NIL;
3383  bool const_true_subqual = false;
3384  bool const_true_subindexqual = false;
3385  ListCell *l;
3386 
3387  /*
3388  * Here, we only detect qual-free subplans. A qual-free subplan would
3389  * cause us to generate "... OR true ..." which we may as well reduce
3390  * to just "true". We do not try to eliminate redundant subclauses
3391  * because (a) it's not as likely as in the AND case, and (b) we might
3392  * well be working with hundreds or even thousands of OR conditions,
3393  * perhaps from a long IN list. The performance of list_append_unique
3394  * would be unacceptable.
3395  */
3396  foreach(l, opath->bitmapquals)
3397  {
3398  Plan *subplan;
3399  List *subqual;
3400  List *subindexqual;
3401  List *subindexEC;
3402 
3403  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3404  &subqual, &subindexqual,
3405  &subindexEC);
3406  subplans = lappend(subplans, subplan);
3407  if (subqual == NIL)
3408  const_true_subqual = true;
3409  else if (!const_true_subqual)
3410  subquals = lappend(subquals,
3411  make_ands_explicit(subqual));
3412  if (subindexqual == NIL)
3413  const_true_subindexqual = true;
3414  else if (!const_true_subindexqual)
3415  subindexquals = lappend(subindexquals,
3416  make_ands_explicit(subindexqual));
3417  }
3418 
3419  /*
3420  * In the presence of ScalarArrayOpExpr quals, we might have built
3421  * BitmapOrPaths with just one subpath; don't add an OR step.
3422  */
3423  if (list_length(subplans) == 1)
3424  {
3425  plan = (Plan *) linitial(subplans);
3426  }
3427  else
3428  {
3429  plan = (Plan *) make_bitmap_or(subplans);
3430  plan->startup_cost = opath->path.startup_cost;
3431  plan->total_cost = opath->path.total_cost;
3432  plan->plan_rows =
3433  clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
3434  plan->plan_width = 0; /* meaningless */
3435  plan->parallel_aware = false;
3436  plan->parallel_safe = opath->path.parallel_safe;
3437  }
3438 
3439  /*
3440  * If there were constant-TRUE subquals, the OR reduces to constant
3441  * TRUE. Also, avoid generating one-element ORs, which could happen
3442  * due to redundancy elimination or ScalarArrayOpExpr quals.
3443  */
3444  if (const_true_subqual)
3445  *qual = NIL;
3446  else if (list_length(subquals) <= 1)
3447  *qual = subquals;
3448  else
3449  *qual = list_make1(make_orclause(subquals));
3450  if (const_true_subindexqual)
3451  *indexqual = NIL;
3452  else if (list_length(subindexquals) <= 1)
3453  *indexqual = subindexquals;
3454  else
3455  *indexqual = list_make1(make_orclause(subindexquals));
3456  *indexECs = NIL;
3457  }
3458  else if (IsA(bitmapqual, IndexPath))
3459  {
3460  IndexPath *ipath = (IndexPath *) bitmapqual;
3461  IndexScan *iscan;
3462  List *subquals;
3463  List *subindexquals;
3464  List *subindexECs;
3465  ListCell *l;
3466 
3467  /* Use the regular indexscan plan build machinery... */
3468  iscan = castNode(IndexScan,
3469  create_indexscan_plan(root, ipath,
3470  NIL, NIL, false));
3471  /* then convert to a bitmap indexscan */
3473  iscan->indexid,
3474  iscan->indexqual,
3475  iscan->indexqualorig);
3476  /* and set its cost/width fields appropriately */
3477  plan->startup_cost = 0.0;
3478  plan->total_cost = ipath->indextotalcost;
3479  plan->plan_rows =
3480  clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
3481  plan->plan_width = 0; /* meaningless */
3482  plan->parallel_aware = false;
3483  plan->parallel_safe = ipath->path.parallel_safe;
3484  /* Extract original index clauses, actual index quals, relevant ECs */
3485  subquals = NIL;
3486  subindexquals = NIL;
3487  subindexECs = NIL;
3488  foreach(l, ipath->indexclauses)
3489  {
3490  IndexClause *iclause = (IndexClause *) lfirst(l);
3491  RestrictInfo *rinfo = iclause->rinfo;
3492 
3493  Assert(!rinfo->pseudoconstant);
3494  subquals = lappend(subquals, rinfo->clause);
3495  subindexquals = list_concat(subindexquals,
3496  get_actual_clauses(iclause->indexquals));
3497  if (rinfo->parent_ec)
3498  subindexECs = lappend(subindexECs, rinfo->parent_ec);
3499  }
3500  /* We can add any index predicate conditions, too */
3501  foreach(l, ipath->indexinfo->indpred)
3502  {
3503  Expr *pred = (Expr *) lfirst(l);
3504 
3505  /*
3506  * We know that the index predicate must have been implied by the
3507  * query condition as a whole, but it may or may not be implied by
3508  * the conditions that got pushed into the bitmapqual. Avoid
3509  * generating redundant conditions.
3510  */
3511  if (!predicate_implied_by(list_make1(pred), subquals, false))
3512  {
3513  subquals = lappend(subquals, pred);
3514  subindexquals = lappend(subindexquals, pred);
3515  }
3516  }
3517  *qual = subquals;
3518  *indexqual = subindexquals;
3519  *indexECs = subindexECs;
3520  }
3521  else
3522  {
3523  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
3524  plan = NULL; /* keep compiler quiet */
3525  }
3526 
3527  return plan;
3528 }
3529 
3530 /*
3531  * create_tidscan_plan
3532  * Returns a tidscan plan for the base relation scanned by 'best_path'
3533  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3534  */
3535 static TidScan *
3537  List *tlist, List *scan_clauses)
3538 {
3539  TidScan *scan_plan;
3540  Index scan_relid = best_path->path.parent->relid;
3541  List *tidquals = best_path->tidquals;
3542 
3543  /* it should be a base rel... */
3544  Assert(scan_relid > 0);
3545  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3546 
3547  /*
3548  * The qpqual list must contain all restrictions not enforced by the
3549  * tidquals list. Since tidquals has OR semantics, we have to be careful
3550  * about matching it up to scan_clauses. It's convenient to handle the
3551  * single-tidqual case separately from the multiple-tidqual case. In the
3552  * single-tidqual case, we look through the scan_clauses while they are
3553  * still in RestrictInfo form, and drop any that are redundant with the
3554  * tidqual.
3555  *
3556  * In normal cases simple pointer equality checks will be enough to spot
3557  * duplicate RestrictInfos, so we try that first.
3558  *
3559  * Another common case is that a scan_clauses entry is generated from the
3560  * same EquivalenceClass as some tidqual, and is therefore redundant with
3561  * it, though not equal.
3562  *
3563  * Unlike indexpaths, we don't bother with predicate_implied_by(); the
3564  * number of cases where it could win are pretty small.
3565  */
3566  if (list_length(tidquals) == 1)
3567  {
3568  List *qpqual = NIL;
3569  ListCell *l;
3570 
3571  foreach(l, scan_clauses)
3572  {
3573  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3574 
3575  if (rinfo->pseudoconstant)
3576  continue; /* we may drop pseudoconstants here */
3577  if (list_member_ptr(tidquals, rinfo))
3578  continue; /* simple duplicate */
3579  if (is_redundant_derived_clause(rinfo, tidquals))
3580  continue; /* derived from same EquivalenceClass */
3581  qpqual = lappend(qpqual, rinfo);
3582  }
3583  scan_clauses = qpqual;
3584  }
3585 
3586  /* Sort clauses into best execution order */
3587  scan_clauses = order_qual_clauses(root, scan_clauses);
3588 
3589  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3590  tidquals = extract_actual_clauses(tidquals, false);
3591  scan_clauses = extract_actual_clauses(scan_clauses, false);
3592 
3593  /*
3594  * If we have multiple tidquals, it's more convenient to remove duplicate
3595  * scan_clauses after stripping the RestrictInfos. In this situation,
3596  * because the tidquals represent OR sub-clauses, they could not have come
3597  * from EquivalenceClasses so we don't have to worry about matching up
3598  * non-identical clauses. On the other hand, because tidpath.c will have
3599  * extracted those sub-clauses from some OR clause and built its own list,
3600  * we will certainly not have pointer equality to any scan clause. So
3601  * convert the tidquals list to an explicit OR clause and see if we can
3602  * match it via equal() to any scan clause.
3603  */
3604  if (list_length(tidquals) > 1)
3605  scan_clauses = list_difference(scan_clauses,
3606  list_make1(make_orclause(tidquals)));
3607 
3608  /* Replace any outer-relation variables with nestloop params */
3609  if (best_path->path.param_info)
3610  {
3611  tidquals = (List *)
3612  replace_nestloop_params(root, (Node *) tidquals);
3613  scan_clauses = (List *)
3614  replace_nestloop_params(root, (Node *) scan_clauses);
3615  }
3616 
3617  scan_plan = make_tidscan(tlist,
3618  scan_clauses,
3619  scan_relid,
3620  tidquals);
3621 
3622  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3623 
3624  return scan_plan;
3625 }
3626 
3627 /*
3628  * create_tidrangescan_plan
3629  * Returns a tidrangescan plan for the base relation scanned by 'best_path'
3630  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3631  */
3632 static TidRangeScan *
3634  List *tlist, List *scan_clauses)
3635 {
3636  TidRangeScan *scan_plan;
3637  Index scan_relid = best_path->path.parent->relid;
3638  List *tidrangequals = best_path->tidrangequals;
3639 
3640  /* it should be a base rel... */
3641  Assert(scan_relid > 0);
3642  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3643 
3644  /*
3645  * The qpqual list must contain all restrictions not enforced by the
3646  * tidrangequals list. tidrangequals has AND semantics, so we can simply
3647  * remove any qual that appears in it.
3648  */
3649  {
3650  List *qpqual = NIL;
3651  ListCell *l;
3652 
3653  foreach(l, scan_clauses)
3654  {
3655  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3656 
3657  if (rinfo->pseudoconstant)
3658  continue; /* we may drop pseudoconstants here */
3659  if (list_member_ptr(tidrangequals, rinfo))
3660  continue; /* simple duplicate */
3661  qpqual = lappend(qpqual, rinfo);
3662  }
3663  scan_clauses = qpqual;
3664  }
3665 
3666  /* Sort clauses into best execution order */
3667  scan_clauses = order_qual_clauses(root, scan_clauses);
3668 
3669  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3670  tidrangequals = extract_actual_clauses(tidrangequals, false);
3671  scan_clauses = extract_actual_clauses(scan_clauses, false);
3672 
3673  /* Replace any outer-relation variables with nestloop params */
3674  if (best_path->path.param_info)
3675  {
3676  tidrangequals = (List *)
3677  replace_nestloop_params(root, (Node *) tidrangequals);
3678  scan_clauses = (List *)
3679  replace_nestloop_params(root, (Node *) scan_clauses);
3680  }
3681 
3682  scan_plan = make_tidrangescan(tlist,
3683  scan_clauses,
3684  scan_relid,
3685  tidrangequals);
3686 
3687  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3688 
3689  return scan_plan;
3690 }
3691 
3692 /*
3693  * create_subqueryscan_plan
3694  * Returns a subqueryscan plan for the base relation scanned by 'best_path'
3695  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3696  */
3697 static SubqueryScan *
3699  List *tlist, List *scan_clauses)
3700 {
3701  SubqueryScan *scan_plan;
3702  RelOptInfo *rel = best_path->path.parent;
3703  Index scan_relid = rel->relid;
3704  Plan *subplan;
3705 
3706  /* it should be a subquery base rel... */
3707  Assert(scan_relid > 0);
3708  Assert(rel->rtekind == RTE_SUBQUERY);
3709 
3710  /*
3711  * Recursively create Plan from Path for subquery. Since we are entering
3712  * a different planner context (subroot), recurse to create_plan not
3713  * create_plan_recurse.
3714  */
3715  subplan = create_plan(rel->subroot, best_path->subpath);
3716 
3717  /* Sort clauses into best execution order */
3718  scan_clauses = order_qual_clauses(root, scan_clauses);
3719 
3720  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3721  scan_clauses = extract_actual_clauses(scan_clauses, false);
3722 
3723  /* Replace any outer-relation variables with nestloop params */
3724  if (best_path->path.param_info)
3725  {
3726  scan_clauses = (List *)
3727  replace_nestloop_params(root, (Node *) scan_clauses);
3729  rel->subplan_params);
3730  }
3731 
3732  scan_plan = make_subqueryscan(tlist,
3733  scan_clauses,
3734  scan_relid,
3735  subplan);
3736 
3737  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3738 
3739  return scan_plan;
3740 }
3741 
3742 /*
3743  * create_functionscan_plan
3744  * Returns a functionscan plan for the base relation scanned by 'best_path'
3745  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3746  */
3747 static FunctionScan *
3749  List *tlist, List *scan_clauses)
3750 {
3751  FunctionScan *scan_plan;
3752  Index scan_relid = best_path->parent->relid;
3753  RangeTblEntry *rte;
3754  List *functions;
3755 
3756  /* it should be a function base rel... */
3757  Assert(scan_relid > 0);
3758  rte = planner_rt_fetch(scan_relid, root);
3759  Assert(rte->rtekind == RTE_FUNCTION);
3760  functions = rte->functions;
3761 
3762  /* Sort clauses into best execution order */
3763  scan_clauses = order_qual_clauses(root, scan_clauses);
3764 
3765  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3766  scan_clauses = extract_actual_clauses(scan_clauses, false);
3767 
3768  /* Replace any outer-relation variables with nestloop params */
3769  if (best_path->param_info)
3770  {
3771  scan_clauses = (List *)
3772  replace_nestloop_params(root, (Node *) scan_clauses);
3773  /* The function expressions could contain nestloop params, too */
3775  }
3776 
3777  scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
3778  functions, rte->funcordinality);
3779 
3780  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3781 
3782  return scan_plan;
3783 }
3784 
3785 /*
3786  * create_tablefuncscan_plan
3787  * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
3788  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3789  */
3790 static TableFuncScan *
3792  List *tlist, List *scan_clauses)
3793 {
3794  TableFuncScan *scan_plan;
3795  Index scan_relid = best_path->parent->relid;
3796  RangeTblEntry *rte;
3797  TableFunc *tablefunc;
3798 
3799  /* it should be a function base rel... */
3800  Assert(scan_relid > 0);
3801  rte = planner_rt_fetch(scan_relid, root);
3802  Assert(rte->rtekind == RTE_TABLEFUNC);
3803  tablefunc = rte->tablefunc;
3804 
3805  /* Sort clauses into best execution order */
3806  scan_clauses = order_qual_clauses(root, scan_clauses);
3807 
3808  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3809  scan_clauses = extract_actual_clauses(scan_clauses, false);
3810 
3811  /* Replace any outer-relation variables with nestloop params */
3812  if (best_path->param_info)
3813  {
3814  scan_clauses = (List *)
3815  replace_nestloop_params(root, (Node *) scan_clauses);
3816  /* The function expressions could contain nestloop params, too */
3817  tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
3818  }
3819 
3820  scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
3821  tablefunc);
3822 
3823  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3824 
3825  return scan_plan;
3826 }
3827 
3828 /*
3829  * create_valuesscan_plan
3830  * Returns a valuesscan plan for the base relation scanned by 'best_path'
3831  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3832  */
3833 static ValuesScan *
3835  List *tlist, List *scan_clauses)
3836 {
3837  ValuesScan *scan_plan;
3838  Index scan_relid = best_path->parent->relid;
3839  RangeTblEntry *rte;
3840  List *values_lists;
3841 
3842  /* it should be a values base rel... */
3843  Assert(scan_relid > 0);
3844  rte = planner_rt_fetch(scan_relid, root);
3845  Assert(rte->rtekind == RTE_VALUES);
3846  values_lists = rte->values_lists;
3847 
3848  /* Sort clauses into best execution order */
3849  scan_clauses = order_qual_clauses(root, scan_clauses);
3850 
3851  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3852  scan_clauses = extract_actual_clauses(scan_clauses, false);
3853 
3854  /* Replace any outer-relation variables with nestloop params */
3855  if (best_path->param_info)
3856  {
3857  scan_clauses = (List *)
3858  replace_nestloop_params(root, (Node *) scan_clauses);
3859  /* The values lists could contain nestloop params, too */
3860  values_lists = (List *)
3861  replace_nestloop_params(root, (Node *) values_lists);
3862  }
3863 
3864  scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
3865  values_lists);
3866 
3867  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3868 
3869  return scan_plan;
3870 }
3871 
3872 /*
3873  * create_ctescan_plan
3874  * Returns a ctescan plan for the base relation scanned by 'best_path'
3875  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3876  */
3877 static CteScan *
3879  List *tlist, List *scan_clauses)
3880 {
3881  CteScan *scan_plan;
3882  Index scan_relid = best_path->parent->relid;
3883  RangeTblEntry *rte;
3884  SubPlan *ctesplan = NULL;
3885  int plan_id;
3886  int cte_param_id;
3887  PlannerInfo *cteroot;
3888  Index levelsup;
3889  int ndx;
3890  ListCell *lc;
3891 
3892  Assert(scan_relid > 0);
3893  rte = planner_rt_fetch(scan_relid, root);
3894  Assert(rte->rtekind == RTE_CTE);
3895  Assert(!rte->self_reference);
3896 
3897  /*
3898  * Find the referenced CTE, and locate the SubPlan previously made for it.
3899  */
3900  levelsup = rte->ctelevelsup;
3901  cteroot = root;
3902  while (levelsup-- > 0)
3903  {
3904  cteroot = cteroot->parent_root;
3905  if (!cteroot) /* shouldn't happen */
3906  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3907  }
3908 
3909  /*
3910  * Note: cte_plan_ids can be shorter than cteList, if we are still working
3911  * on planning the CTEs (ie, this is a side-reference from another CTE).
3912  * So we mustn't use forboth here.
3913  */
3914  ndx = 0;
3915  foreach(lc, cteroot->parse->cteList)
3916  {
3917  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3918 
3919  if (strcmp(cte->ctename, rte->ctename) == 0)
3920  break;
3921  ndx++;
3922  }
3923  if (lc == NULL) /* shouldn't happen */
3924  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3925  if (ndx >= list_length(cteroot->cte_plan_ids))
3926  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3927  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3928  if (plan_id <= 0)
3929  elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
3930  foreach(lc, cteroot->init_plans)
3931  {
3932  ctesplan = (SubPlan *) lfirst(lc);
3933  if (ctesplan->plan_id == plan_id)
3934  break;
3935  }
3936  if (lc == NULL) /* shouldn't happen */
3937  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3938 
3939  /*
3940  * We need the CTE param ID, which is the sole member of the SubPlan's
3941  * setParam list.
3942  */
3943  cte_param_id = linitial_int(ctesplan->setParam);
3944 
3945  /* Sort clauses into best execution order */
3946  scan_clauses = order_qual_clauses(root, scan_clauses);
3947 
3948  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3949  scan_clauses = extract_actual_clauses(scan_clauses, false);
3950 
3951  /* Replace any outer-relation variables with nestloop params */
3952  if (best_path->param_info)
3953  {
3954  scan_clauses = (List *)
3955  replace_nestloop_params(root, (Node *) scan_clauses);
3956  }
3957 
3958  scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
3959  plan_id, cte_param_id);
3960 
3961  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3962 
3963  return scan_plan;
3964 }
3965 
3966 /*
3967  * create_namedtuplestorescan_plan
3968  * Returns a tuplestorescan plan for the base relation scanned by
3969  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3970  * 'tlist'.
3971  */
3972 static NamedTuplestoreScan *
3974  List *tlist, List *scan_clauses)
3975 {
3976  NamedTuplestoreScan *scan_plan;
3977  Index scan_relid = best_path->parent->relid;
3978  RangeTblEntry *rte;
3979 
3980  Assert(scan_relid > 0);
3981  rte = planner_rt_fetch(scan_relid, root);
3983 
3984  /* Sort clauses into best execution order */
3985  scan_clauses = order_qual_clauses(root, scan_clauses);
3986 
3987  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3988  scan_clauses = extract_actual_clauses(scan_clauses, false);
3989 
3990  /* Replace any outer-relation variables with nestloop params */
3991  if (best_path->param_info)
3992  {
3993  scan_clauses = (List *)
3994  replace_nestloop_params(root, (Node *) scan_clauses);
3995  }
3996 
3997  scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
3998  rte->enrname);
3999 
4000  copy_generic_path_info(&scan_plan->scan.plan, best_path);
4001 
4002  return scan_plan;
4003 }
4004 
4005 /*
4006  * create_resultscan_plan
4007  * Returns a Result plan for the RTE_RESULT base relation scanned by
4008  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
4009  * 'tlist'.
4010  */
4011 static Result *
4013  List *tlist, List *scan_clauses)
4014 {
4015  Result *scan_plan;
4016  Index scan_relid = best_path->parent->relid;
4018 
4019  Assert(scan_relid > 0);
4020  rte = planner_rt_fetch(scan_relid, root);
4021  Assert(rte->rtekind == RTE_RESULT);
4022 
4023  /* Sort clauses into best execution order */
4024  scan_clauses = order_qual_clauses(root, scan_clauses);
4025 
4026  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4027  scan_clauses = extract_actual_clauses(scan_clauses, false);
4028 
4029  /* Replace any outer-relation variables with nestloop params */
4030  if (best_path->param_info)
4031  {
4032  scan_clauses = (List *)
4033  replace_nestloop_params(root, (Node *) scan_clauses);
4034  }
4035 
4036  scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
4037 
4038  copy_generic_path_info(&scan_plan->plan, best_path);
4039 
4040  return scan_plan;
4041 }
4042 
4043 /*
4044  * create_worktablescan_plan
4045  * Returns a worktablescan plan for the base relation scanned by 'best_path'
4046  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4047  */
4048 static WorkTableScan *
4050  List *tlist, List *scan_clauses)
4051 {
4052  WorkTableScan *scan_plan;
4053  Index scan_relid = best_path->parent->relid;
4054  RangeTblEntry *rte;
4055  Index levelsup;
4056  PlannerInfo *cteroot;
4057 
4058  Assert(scan_relid > 0);
4059  rte = planner_rt_fetch(scan_relid, root);
4060  Assert(rte->rtekind == RTE_CTE);
4061  Assert(rte->self_reference);
4062 
4063  /*
4064  * We need to find the worktable param ID, which is in the plan level
4065  * that's processing the recursive UNION, which is one level *below* where
4066  * the CTE comes from.
4067  */
4068  levelsup = rte->ctelevelsup;
4069  if (levelsup == 0) /* shouldn't happen */
4070  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4071  levelsup--;
4072  cteroot = root;
4073  while (levelsup-- > 0)
4074  {
4075  cteroot = cteroot->parent_root;
4076  if (!cteroot) /* shouldn't happen */
4077  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4078  }
4079  if (cteroot->wt_param_id < 0) /* shouldn't happen */
4080  elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
4081 
4082  /* Sort clauses into best execution order */
4083  scan_clauses = order_qual_clauses(root, scan_clauses);
4084 
4085  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4086  scan_clauses = extract_actual_clauses(scan_clauses, false);
4087 
4088  /* Replace any outer-relation variables with nestloop params */
4089  if (best_path->param_info)
4090  {
4091  scan_clauses = (List *)
4092  replace_nestloop_params(root, (Node *) scan_clauses);
4093  }
4094 
4095  scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
4096  cteroot->wt_param_id);
4097 
4098  copy_generic_path_info(&scan_plan->scan.plan, best_path);
4099 
4100  return scan_plan;
4101 }
4102 
4103 /*
4104  * create_foreignscan_plan
4105  * Returns a foreignscan plan for the relation scanned by 'best_path'
4106  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4107  */
4108 static ForeignScan *
4110  List *tlist, List *scan_clauses)
4111 {
4112  ForeignScan *scan_plan;
4113  RelOptInfo *rel = best_path->path.parent;
4114  Index scan_relid = rel->relid;
4115  Oid rel_oid = InvalidOid;
4116  Plan *outer_plan = NULL;
4117 
4118  Assert(rel->fdwroutine != NULL);
4119 
4120  /* transform the child path if any */
4121  if (best_path->fdw_outerpath)
4122  outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
4123  CP_EXACT_TLIST);
4124 
4125  /*
4126  * If we're scanning a base relation, fetch its OID. (Irrelevant if
4127  * scanning a join relation.)
4128  */
4129  if (scan_relid > 0)
4130  {
4131  RangeTblEntry *rte;
4132 
4133  Assert(rel->rtekind == RTE_RELATION);
4134  rte = planner_rt_fetch(scan_relid, root);
4135  Assert(rte->rtekind == RTE_RELATION);
4136  rel_oid = rte->relid;
4137  }
4138 
4139  /*
4140  * Sort clauses into best execution order. We do this first since the FDW
4141  * might have more info than we do and wish to adjust the ordering.
4142  */
4143  scan_clauses = order_qual_clauses(root, scan_clauses);
4144 
4145  /*
4146  * Let the FDW perform its processing on the restriction clauses and
4147  * generate the plan node. Note that the FDW might remove restriction
4148  * clauses that it intends to execute remotely, or even add more (if it
4149  * has selected some join clauses for remote use but also wants them
4150  * rechecked locally).
4151  */
4152  scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
4153  best_path,
4154  tlist, scan_clauses,
4155  outer_plan);
4156 
4157  /* Copy cost data from Path to Plan; no need to make FDW do this */
4158  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
4159 
4160  /* Copy user OID to access as; likewise no need to make FDW do this */
4161  scan_plan->checkAsUser = rel->userid;
4162 
4163  /* Copy foreign server OID; likewise, no need to make FDW do this */
4164  scan_plan->fs_server = rel->serverid;
4165 
4166  /*
4167  * Likewise, copy the relids that are represented by this foreign scan. An
4168  * upper rel doesn't have relids set, but it covers all the relations
4169  * participating in the underlying scan/join, so use root->all_query_rels.
4170  */
4171  if (rel->reloptkind == RELOPT_UPPER_REL)
4172  scan_plan->fs_relids = root->all_query_rels;
4173  else
4174  scan_plan->fs_relids = best_path->path.parent->relids;
4175 
4176  /*
4177  * Join relid sets include relevant outer joins, but FDWs may need to know
4178  * which are the included base rels. That's a bit tedious to get without
4179  * access to the plan-time data structures, so compute it here.
4180  */
4181  scan_plan->fs_base_relids = bms_difference(scan_plan->fs_relids,
4182  root->outer_join_rels);
4183 
4184  /*
4185  * If this is a foreign join, and to make it valid to push down we had to
4186  * assume that the current user is the same as some user explicitly named
4187  * in the query, mark the finished plan as depending on the current user.
4188  */
4189  if (rel->useridiscurrent)
4190  root->glob->dependsOnRole = true;
4191 
4192  /*
4193  * Replace any outer-relation variables with nestloop params in the qual,
4194  * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
4195  * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
4196  * fdw_recheck_quals could have come from join clauses, so doing this
4197  * beforehand on the scan_clauses wouldn't work.) We assume
4198  * fdw_scan_tlist contains no such variables.
4199  */
4200  if (best_path->path.param_info)
4201  {
4202  scan_plan->scan.plan.qual = (List *)
4203  replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
4204  scan_plan->fdw_exprs = (List *)
4205  replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
4206  scan_plan->fdw_recheck_quals = (List *)
4208  (Node *) scan_plan->fdw_recheck_quals);
4209  }
4210 
4211  /*
4212  * If rel is a base relation, detect whether any system columns are
4213  * requested from the rel. (If rel is a join relation, rel->relid will be
4214  * 0, but there can be no Var with relid 0 in the rel's targetlist or the
4215  * restriction clauses, so we skip this in that case. Note that any such
4216  * columns in base relations that were joined are assumed to be contained
4217  * in fdw_scan_tlist.) This is a bit of a kluge and might go away
4218  * someday, so we intentionally leave it out of the API presented to FDWs.
4219  */
4220  scan_plan->fsSystemCol = false;
4221  if (scan_relid > 0)
4222  {
4223  Bitmapset *attrs_used = NULL;
4224  ListCell *lc;
4225  int i;
4226 
4227  /*
4228  * First, examine all the attributes needed for joins or final output.
4229  * Note: we must look at rel's targetlist, not the attr_needed data,
4230  * because attr_needed isn't computed for inheritance child rels.
4231  */
4232  pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
4233 
4234  /* Add all the attributes used by restriction clauses. */
4235  foreach(lc, rel->baserestrictinfo)
4236  {
4237  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4238 
4239  pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
4240  }
4241 
4242  /* Now, are any system columns requested from rel? */
4243  for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
4244  {
4246  {
4247  scan_plan->fsSystemCol = true;
4248  break;
4249  }
4250  }
4251 
4252  bms_free(attrs_used);
4253  }
4254 
4255  return scan_plan;
4256 }
4257 
4258 /*
4259  * create_customscan_plan
4260  *
4261  * Transform a CustomPath into a Plan.
4262  */
4263 static CustomScan *
4265  List *tlist, List *scan_clauses)
4266 {
4267  CustomScan *cplan;
4268  RelOptInfo *rel = best_path->path.parent;
4269  List *custom_plans = NIL;
4270  ListCell *lc;
4271 
4272  /* Recursively transform child paths. */
4273  foreach(lc, best_path->custom_paths)
4274  {
4275  Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc),
4276  CP_EXACT_TLIST);
4277 
4278  custom_plans = lappend(custom_plans, plan);
4279  }
4280 
4281  /*
4282  * Sort clauses into the best execution order, although custom-scan
4283  * provider can reorder them again.
4284  */
4285  scan_clauses = order_qual_clauses(root, scan_clauses);
4286 
4287  /*
4288  * Invoke custom plan provider to create the Plan node represented by the
4289  * CustomPath.
4290  */
4291  cplan = castNode(CustomScan,
4292  best_path->methods->PlanCustomPath(root,
4293  rel,
4294  best_path,
4295  tlist,
4296  scan_clauses,
4297  custom_plans));
4298 
4299  /*
4300  * Copy cost data from Path to Plan; no need to make custom-plan providers
4301  * do this
4302  */
4303  copy_generic_path_info(&cplan->scan.plan, &best_path->path);
4304 
4305  /* Likewise, copy the relids that are represented by this custom scan */
4306  cplan->custom_relids = best_path->path.parent->relids;
4307 
4308  /*
4309  * Replace any outer-relation variables with nestloop params in the qual
4310  * and custom_exprs expressions. We do this last so that the custom-plan
4311  * provider doesn't have to be involved. (Note that parts of custom_exprs
4312  * could have come from join clauses, so doing this beforehand on the
4313  * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
4314  * such variables.
4315  */
4316  if (best_path->path.param_info)
4317  {
4318  cplan->scan.plan.qual = (List *)
4319  replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
4320  cplan->custom_exprs = (List *)
4321  replace_nestloop_params(root, (Node *) cplan->custom_exprs);
4322  }
4323 
4324  return cplan;
4325 }
4326 
4327 
4328 /*****************************************************************************
4329  *
4330  * JOIN METHODS
4331  *
4332  *****************************************************************************/
4333 
4334 static NestLoop *
4336  NestPath *best_path)
4337 {
4338  NestLoop *join_plan;
4339  Plan *outer_plan;
4340  Plan *inner_plan;
4341  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4342  List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
4343  List *joinclauses;
4344  List *otherclauses;
4345  Relids outerrelids;
4346  List *nestParams;
4347  Relids saveOuterRels = root->curOuterRels;
4348 
4349  /* NestLoop can project, so no need to be picky about child tlists */
4350  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
4351 
4352  /* For a nestloop, include outer relids in curOuterRels for inner side */
4353  root->curOuterRels = bms_union(root->curOuterRels,
4354  best_path->jpath.outerjoinpath->parent->relids);
4355 
4356  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
4357 
4358  /* Restore curOuterRels */
4359  bms_free(root->curOuterRels);
4360  root->curOuterRels = saveOuterRels;
4361 
4362  /* Sort join qual clauses into best execution order */
4363  joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
4364 
4365  /* Get the join qual clauses (in plain expression form) */
4366  /* Any pseudoconstant clauses are ignored here */
4367  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4368  {
4369  extract_actual_join_clauses(joinrestrictclauses,
4370  best_path->jpath.path.parent->relids,
4371  &joinclauses, &otherclauses);
4372  }
4373  else
4374  {
4375  /* We can treat all clauses alike for an inner join */
4376  joinclauses = extract_actual_clauses(joinrestrictclauses, false);
4377  otherclauses = NIL;
4378  }
4379 
4380  /* Replace any outer-relation variables with nestloop params */
4381  if (best_path->jpath.path.param_info)
4382  {
4383  joinclauses = (List *)
4384  replace_nestloop_params(root, (Node *) joinclauses);
4385  otherclauses = (List *)
4386  replace_nestloop_params(root, (Node *) otherclauses);
4387  }
4388 
4389  /*
4390  * Identify any nestloop parameters that should be supplied by this join
4391  * node, and remove them from root->curOuterParams.
4392  */
4393  outerrelids = best_path->jpath.outerjoinpath->parent->relids;
4394  nestParams = identify_current_nestloop_params(root, outerrelids);
4395 
4396  join_plan = make_nestloop(tlist,
4397  joinclauses,
4398  otherclauses,
4399  nestParams,
4400  outer_plan,
4401  inner_plan,
4402  best_path->jpath.jointype,
4403  best_path->jpath.inner_unique);
4404 
4405  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4406 
4407  return join_plan;
4408 }
4409 
4410 static MergeJoin *
4412  MergePath *best_path)
4413 {
4414  MergeJoin *join_plan;
4415  Plan *outer_plan;
4416  Plan *inner_plan;
4417  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4418  List *joinclauses;
4419  List *otherclauses;
4420  List *mergeclauses;
4421  List *outerpathkeys;
4422  List *innerpathkeys;
4423  int nClauses;
4424  Oid *mergefamilies;
4425  Oid *mergecollations;
4426  int *mergestrategies;
4427  bool *mergenullsfirst;
4428  PathKey *opathkey;
4429  EquivalenceClass *opeclass;
4430  int i;
4431  ListCell *lc;
4432  ListCell *lop;
4433  ListCell *lip;
4434  Path *outer_path = best_path->jpath.outerjoinpath;
4435  Path *inner_path = best_path->jpath.innerjoinpath;
4436 
4437  /*
4438  * MergeJoin can project, so we don't have to demand exact tlists from the
4439  * inputs. However, if we're intending to sort an input's result, it's
4440  * best to request a small tlist so we aren't sorting more data than
4441  * necessary.
4442  */
4443  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4444  (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4445 
4446  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4447  (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4448 
4449  /* Sort join qual clauses into best execution order */
4450  /* NB: do NOT reorder the mergeclauses */
4451  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4452 
4453  /* Get the join qual clauses (in plain expression form) */
4454  /* Any pseudoconstant clauses are ignored here */
4455  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4456  {
4457  extract_actual_join_clauses(joinclauses,
4458  best_path->jpath.path.parent->relids,
4459  &joinclauses, &otherclauses);
4460  }
4461  else
4462  {
4463  /* We can treat all clauses alike for an inner join */
4464  joinclauses = extract_actual_clauses(joinclauses, false);
4465  otherclauses = NIL;
4466  }
4467 
4468  /*
4469  * Remove the mergeclauses from the list of join qual clauses, leaving the
4470  * list of quals that must be checked as qpquals.
4471  */
4472  mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
4473  joinclauses = list_difference(joinclauses, mergeclauses);
4474 
4475  /*
4476  * Replace any outer-relation variables with nestloop params. There
4477  * should not be any in the mergeclauses.
4478  */
4479  if (best_path->jpath.path.param_info)
4480  {
4481  joinclauses = (List *)
4482  replace_nestloop_params(root, (Node *) joinclauses);
4483  otherclauses = (List *)
4484  replace_nestloop_params(root, (Node *) otherclauses);
4485  }
4486 
4487  /*
4488  * Rearrange mergeclauses, if needed, so that the outer variable is always
4489  * on the left; mark the mergeclause restrictinfos with correct
4490  * outer_is_left status.
4491  */
4492  mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
4493  best_path->jpath.outerjoinpath->parent->relids);
4494 
4495  /*
4496  * Create explicit sort nodes for the outer and inner paths if necessary.
4497  */
4498  if (best_path->outersortkeys)
4499  {
4500  Relids outer_relids = outer_path->parent->relids;
4501  Sort *sort = make_sort_from_pathkeys(outer_plan,
4502  best_path->outersortkeys,
4503  outer_relids);
4504 
4505  label_sort_with_costsize(root, sort, -1.0);
4506  outer_plan = (Plan *) sort;
4507  outerpathkeys = best_path->outersortkeys;
4508  }
4509  else
4510  outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
4511 
4512  if (best_path->innersortkeys)
4513  {
4514  Relids inner_relids = inner_path->parent->relids;
4515  Sort *sort = make_sort_from_pathkeys(inner_plan,
4516  best_path->innersortkeys,
4517  inner_relids);
4518 
4519  label_sort_with_costsize(root, sort, -1.0);
4520  inner_plan = (Plan *) sort;
4521  innerpathkeys = best_path->innersortkeys;
4522  }
4523  else
4524  innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
4525 
4526  /*
4527  * If specified, add a materialize node to shield the inner plan from the
4528  * need to handle mark/restore.
4529  */
4530  if (best_path->materialize_inner)
4531  {
4532  Plan *matplan = (Plan *) make_material(inner_plan);
4533 
4534  /*
4535  * We assume the materialize will not spill to disk, and therefore
4536  * charge just cpu_operator_cost per tuple. (Keep this estimate in
4537  * sync with final_cost_mergejoin.)
4538  */
4539  copy_plan_costsize(matplan, inner_plan);
4540  matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
4541 
4542  inner_plan = matplan;
4543  }
4544 
4545  /*
4546  * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
4547  * executor. The information is in the pathkeys for the two inputs, but
4548  * we need to be careful about the possibility of mergeclauses sharing a
4549  * pathkey, as well as the possibility that the inner pathkeys are not in
4550  * an order matching the mergeclauses.
4551  */
4552  nClauses = list_length(mergeclauses);
4553  Assert(nClauses == list_length(best_path->path_mergeclauses));
4554  mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
4555  mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
4556  mergestrategies = (int *) palloc(nClauses * sizeof(int));
4557  mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
4558 
4559  opathkey = NULL;
4560  opeclass = NULL;
4561  lop = list_head(outerpathkeys);
4562  lip = list_head(innerpathkeys);
4563  i = 0;
4564  foreach(lc, best_path->path_mergeclauses)
4565  {
4566  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
4567  EquivalenceClass *oeclass;
4568  EquivalenceClass *ieclass;
4569  PathKey *ipathkey = NULL;
4570  EquivalenceClass *ipeclass = NULL;
4571  bool first_inner_match = false;
4572 
4573  /* fetch outer/inner eclass from mergeclause */
4574  if (rinfo->outer_is_left)
4575  {
4576  oeclass = rinfo->left_ec;
4577  ieclass = rinfo->right_ec;
4578  }
4579  else
4580  {
4581  oeclass = rinfo->right_ec;
4582  ieclass = rinfo->left_ec;
4583  }
4584  Assert(oeclass != NULL);
4585  Assert(ieclass != NULL);
4586 
4587  /*
4588  * We must identify the pathkey elements associated with this clause
4589  * by matching the eclasses (which should give a unique match, since
4590  * the pathkey lists should be canonical). In typical cases the merge
4591  * clauses are one-to-one with the pathkeys, but when dealing with
4592  * partially redundant query conditions, things are more complicated.
4593  *
4594  * lop and lip reference the first as-yet-unmatched pathkey elements.
4595  * If they're NULL then all pathkey elements have been matched.
4596  *
4597  * The ordering of the outer pathkeys should match the mergeclauses,
4598  * by construction (see find_mergeclauses_for_outer_pathkeys()). There
4599  * could be more than one mergeclause for the same outer pathkey, but
4600  * no pathkey may be entirely skipped over.
4601  */
4602  if (oeclass != opeclass) /* multiple matches are not interesting */
4603  {
4604  /* doesn't match the current opathkey, so must match the next */
4605  if (lop == NULL)
4606  elog(ERROR, "outer pathkeys do not match mergeclauses");
4607  opathkey = (PathKey *) lfirst(lop);
4608  opeclass = opathkey->pk_eclass;
4609  lop = lnext(outerpathkeys, lop);
4610  if (oeclass != opeclass)
4611  elog(ERROR, "outer pathkeys do not match mergeclauses");
4612  }
4613 
4614  /*
4615  * The inner pathkeys likewise should not have skipped-over keys, but
4616  * it's possible for a mergeclause to reference some earlier inner
4617  * pathkey if we had redundant pathkeys. For example we might have
4618  * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
4619  * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
4620  * mechanism drops the second sort by x as redundant, and this code
4621  * must cope.
4622  *
4623  * It's also possible for the implied inner-rel ordering to be like
4624  * "ORDER BY x, y, x DESC". We still drop the second instance of x as
4625  * redundant; but this means that the sort ordering of a redundant
4626  * inner pathkey should not be considered significant. So we must
4627  * detect whether this is the first clause matching an inner pathkey.
4628  */
4629  if (lip)
4630  {
4631  ipathkey = (PathKey *) lfirst(lip);
4632  ipeclass = ipathkey->pk_eclass;
4633  if (ieclass == ipeclass)
4634  {
4635  /* successful first match to this inner pathkey */
4636  lip = lnext(innerpathkeys, lip);
4637  first_inner_match = true;
4638  }
4639  }
4640  if (!first_inner_match)
4641  {
4642  /* redundant clause ... must match something before lip */
4643  ListCell *l2;
4644 
4645  foreach(l2, innerpathkeys)
4646  {
4647  if (l2 == lip)
4648  break;
4649  ipathkey = (PathKey *) lfirst(l2);
4650  ipeclass = ipathkey->pk_eclass;
4651  if (ieclass == ipeclass)
4652  break;
4653  }
4654  if (ieclass != ipeclass)
4655  elog(ERROR, "inner pathkeys do not match mergeclauses");
4656  }
4657 
4658  /*
4659  * The pathkeys should always match each other as to opfamily and
4660  * collation (which affect equality), but if we're considering a
4661  * redundant inner pathkey, its sort ordering might not match. In
4662  * such cases we may ignore the inner pathkey's sort ordering and use
4663  * the outer's. (In effect, we're lying to the executor about the
4664  * sort direction of this inner column, but it does not matter since
4665  * the run-time row comparisons would only reach this column when
4666  * there's equality for the earlier column containing the same eclass.
4667  * There could be only one value in this column for the range of inner
4668  * rows having a given value in the earlier column, so it does not
4669  * matter which way we imagine this column to be ordered.) But a
4670  * non-redundant inner pathkey had better match outer's ordering too.
4671  */
4672  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4673  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
4674  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4675  if (first_inner_match &&
4676  (opathkey->pk_strategy != ipathkey->pk_strategy ||
4677  opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
4678  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4679 
4680  /* OK, save info for executor */
4681  mergefamilies[i] = opathkey->pk_opfamily;
4682  mergecollations[i] = opathkey->pk_eclass->ec_collation;
4683  mergestrategies[i] = opathkey->pk_strategy;
4684  mergenullsfirst[i] = opathkey->pk_nulls_first;
4685  i++;
4686  }
4687 
4688  /*
4689  * Note: it is not an error if we have additional pathkey elements (i.e.,
4690  * lop or lip isn't NULL here). The input paths might be better-sorted
4691  * than we need for the current mergejoin.
4692  */
4693 
4694  /*
4695  * Now we can build the mergejoin node.
4696  */
4697  join_plan = make_mergejoin(tlist,
4698  joinclauses,
4699  otherclauses,
4700  mergeclauses,
4701  mergefamilies,
4702  mergecollations,
4703  mergestrategies,
4704  mergenullsfirst,
4705  outer_plan,
4706  inner_plan,
4707  best_path->jpath.jointype,
4708  best_path->jpath.inner_unique,
4709  best_path->skip_mark_restore);
4710 
4711  /* Costs of sort and material steps are included in path cost already */
4712  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4713 
4714  return join_plan;
4715 }
4716 
4717 static HashJoin *
4719  HashPath *best_path)
4720 {
4721  HashJoin *join_plan;
4722  Hash *hash_plan;
4723  Plan *outer_plan;
4724  Plan *inner_plan;
4725  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4726  List *joinclauses;
4727  List *otherclauses;
4728  List *hashclauses;
4729  List *hashoperators = NIL;
4730  List *hashcollations = NIL;
4731  List *inner_hashkeys = NIL;
4732  List *outer_hashkeys = NIL;
4733  Oid skewTable = InvalidOid;
4734  AttrNumber skewColumn = InvalidAttrNumber;
4735  bool skewInherit = false;
4736  ListCell *lc;
4737 
4738  /*
4739  * HashJoin can project, so we don't have to demand exact tlists from the
4740  * inputs. However, it's best to request a small tlist from the inner
4741  * side, so that we aren't storing more data than necessary. Likewise, if
4742  * we anticipate batching, request a small tlist from the outer side so
4743  * that we don't put extra data in the outer batch files.
4744  */
4745  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4746  (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
4747 
4748  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4749  CP_SMALL_TLIST);
4750 
4751  /* Sort join qual clauses into best execution order */
4752  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4753  /* There's no point in sorting the hash clauses ... */
4754 
4755  /* Get the join qual clauses (in plain expression form) */
4756  /* Any pseudoconstant clauses are ignored here */
4757  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4758  {
4759  extract_actual_join_clauses(joinclauses,
4760  best_path->jpath.path.parent->relids,
4761  &joinclauses, &otherclauses);
4762  }
4763  else
4764  {
4765  /* We can treat all clauses alike for an inner join */
4766  joinclauses = extract_actual_clauses(joinclauses, false);
4767  otherclauses = NIL;
4768  }
4769 
4770  /*
4771  * Remove the hashclauses from the list of join qual clauses, leaving the
4772  * list of quals that must be checked as qpquals.
4773  */
4774  hashclauses = get_actual_clauses(best_path->path_hashclauses);
4775  joinclauses = list_difference(joinclauses, hashclauses);
4776 
4777  /*
4778  * Replace any outer-relation variables with nestloop params. There
4779  * should not be any in the hashclauses.
4780  */
4781  if (best_path->jpath.path.param_info)
4782  {
4783  joinclauses = (List *)
4784  replace_nestloop_params(root, (Node *) joinclauses);
4785  otherclauses = (List *)
4786  replace_nestloop_params(root, (Node *) otherclauses);
4787  }
4788 
4789  /*
4790  * Rearrange hashclauses, if needed, so that the outer variable is always
4791  * on the left.
4792  */
4793  hashclauses = get_switched_clauses(best_path->path_hashclauses,
4794  best_path->jpath.outerjoinpath->parent->relids);
4795 
4796  /*
4797  * If there is a single join clause and we can identify the outer variable
4798  * as a simple column reference, supply its identity for possible use in
4799  * skew optimization. (Note: in principle we could do skew optimization
4800  * with multiple join clauses, but we'd have to be able to determine the
4801  * most common combinations of outer values, which we don't currently have
4802  * enough stats for.)
4803  */
4804  if (list_length(hashclauses) == 1)
4805  {
4806  OpExpr *clause = (OpExpr *) linitial(hashclauses);
4807  Node *node;
4808 
4809  Assert(is_opclause(clause));
4810  node = (Node *) linitial(clause->args);
4811  if (IsA(node, RelabelType))
4812  node = (Node *) ((RelabelType *) node)->arg;
4813  if (IsA(node, Var))
4814  {
4815  Var *var = (Var *) node;
4816  RangeTblEntry *rte;
4817 
4818  rte = root->simple_rte_array[var->varno];
4819  if (rte->rtekind == RTE_RELATION)
4820  {
4821  skewTable = rte->relid;
4822  skewColumn = var->varattno;
4823  skewInherit = rte->inh;
4824  }
4825  }
4826  }
4827 
4828  /*
4829  * Collect hash related information. The hashed expressions are
4830  * deconstructed into outer/inner expressions, so they can be computed
4831  * separately (inner expressions are used to build the hashtable via Hash,
4832  * outer expressions to perform lookups of tuples from HashJoin's outer
4833  * plan in the hashtable). Also collect operator information necessary to
4834  * build the hashtable.
4835  */
4836  foreach(lc, hashclauses)
4837  {
4838  OpExpr *hclause = lfirst_node(OpExpr, lc);
4839 
4840  hashoperators = lappend_oid(hashoperators, hclause->opno);
4841  hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
4842  outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
4843  inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
4844  }
4845 
4846  /*
4847  * Build the hash node and hash join node.
4848  */
4849  hash_plan = make_hash(inner_plan,
4850  inner_hashkeys,
4851  skewTable,
4852  skewColumn,
4853  skewInherit);
4854 
4855  /*
4856  * Set Hash node's startup & total costs equal to total cost of input
4857  * plan; this only affects EXPLAIN display not decisions.
4858  */
4859  copy_plan_costsize(&hash_plan->plan, inner_plan);
4860  hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
4861 
4862  /*
4863  * If parallel-aware, the executor will also need an estimate of the total
4864  * number of rows expected from all participants so that it can size the
4865  * shared hash table.
4866  */
4867  if (best_path->jpath.path.parallel_aware)
4868  {
4869  hash_plan->plan.parallel_aware = true;
4870  hash_plan->rows_total = best_path->inner_rows_total;
4871  }
4872 
4873  join_plan = make_hashjoin(tlist,
4874  joinclauses,
4875  otherclauses,
4876  hashclauses,
4877  hashoperators,
4878  hashcollations,
4879  outer_hashkeys,
4880  outer_plan,
4881  (Plan *) hash_plan,
4882  best_path->jpath.jointype,
4883  best_path->jpath.inner_unique);
4884 
4885  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4886 
4887  return join_plan;
4888 }
4889 
4890 
4891 /*****************************************************************************
4892  *
4893  * SUPPORTING ROUTINES
4894  *
4895  *****************************************************************************/
4896 
4897 /*
4898  * replace_nestloop_params
4899  * Replace outer-relation Vars and PlaceHolderVars in the given expression
4900  * with nestloop Params
4901  *
4902  * All Vars and PlaceHolderVars belonging to the relation(s) identified by
4903  * root->curOuterRels are replaced by Params, and entries are added to
4904  * root->curOuterParams if not already present.
4905  */
4906 static Node *
4908 {
4909  /* No setup needed for tree walk, so away we go */
4910  return replace_nestloop_params_mutator(expr, root);
4911 }
4912 
4913 static Node *
4915 {
4916  if (node == NULL)
4917  return NULL;
4918  if (IsA(node, Var))
4919  {
4920  Var *var = (Var *) node;
4921 
4922  /* Upper-level Vars should be long gone at this point */
4923  Assert(var->varlevelsup == 0);
4924  /* If not to be replaced, we can just return the Var unmodified */
4925  if (IS_SPECIAL_VARNO(var->varno) ||
4926  !bms_is_member(var->varno, root->curOuterRels))
4927  return node;
4928  /* Replace the Var with a nestloop Param */
4929  return (Node *) replace_nestloop_param_var(root, var);
4930  }
4931  if (IsA(node, PlaceHolderVar))
4932  {
4933  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4934 
4935  /* Upper-level PlaceHolderVars should be long gone at this point */
4936  Assert(phv->phlevelsup == 0);
4937 
4938  /* Check whether we need to replace the PHV */
4939  if (!bms_is_subset(find_placeholder_info(root, phv)->ph_eval_at,
4940  root->curOuterRels))
4941  {
4942  /*
4943  * We can't replace the whole PHV, but we might still need to
4944  * replace Vars or PHVs within its expression, in case it ends up
4945  * actually getting evaluated here. (It might get evaluated in
4946  * this plan node, or some child node; in the latter case we don't
4947  * really need to process the expression here, but we haven't got
4948  * enough info to tell if that's the case.) Flat-copy the PHV
4949  * node and then recurse on its expression.
4950  *
4951  * Note that after doing this, we might have different
4952  * representations of the contents of the same PHV in different
4953  * parts of the plan tree. This is OK because equal() will just
4954  * match on phid/phlevelsup, so setrefs.c will still recognize an
4955  * upper-level reference to a lower-level copy of the same PHV.
4956  */
4958 
4959  memcpy(newphv, phv, sizeof(PlaceHolderVar));
4960  newphv->phexpr = (Expr *)
4961  replace_nestloop_params_mutator((Node *) phv->phexpr,
4962  root);
4963  return (Node *) newphv;
4964  }
4965  /* Replace the PlaceHolderVar with a nestloop Param */
4966  return (Node *) replace_nestloop_param_placeholdervar(root, phv);
4967  }
4968  return expression_tree_mutator(node,
4970  (void *) root);
4971 }
4972 
4973 /*
4974  * fix_indexqual_references
4975  * Adjust indexqual clauses to the form the executor's indexqual
4976  * machinery needs.
4977  *
4978  * We have three tasks here:
4979  * * Select the actual qual clauses out of the input IndexClause list,
4980  * and remove RestrictInfo nodes from the qual clauses.
4981  * * Replace any outer-relation Var or PHV nodes with nestloop Params.
4982  * (XXX eventually, that responsibility should go elsewhere?)
4983  * * Index keys must be represented by Var nodes with varattno set to the
4984  * index's attribute number, not the attribute number in the original rel.
4985  *
4986  * *stripped_indexquals_p receives a list of the actual qual clauses.
4987  *
4988  * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
4989  * that shares no substructure with the original; this is needed in case there
4990  * are subplans in it (we need two separate copies of the subplan tree, or
4991  * things will go awry).
4992  */
4993 static void
4995  List **stripped_indexquals_p, List **fixed_indexquals_p)
4996 {
4997  IndexOptInfo *index = index_path->indexinfo;
4998  List *stripped_indexquals;
4999  List *fixed_indexquals;
5000  ListCell *lc;
5001 
5002  stripped_indexquals = fixed_indexquals = NIL;
5003 
5004  foreach(lc, index_path->indexclauses)
5005  {
5006  IndexClause *iclause = lfirst_node(IndexClause, lc);
5007  int indexcol = iclause->indexcol;
5008  ListCell *lc2;
5009 
5010  foreach(lc2, iclause->indexquals)
5011  {
5012  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5013  Node *clause = (Node *) rinfo->clause;
5014 
5015  stripped_indexquals = lappend(stripped_indexquals, clause);
5016  clause = fix_indexqual_clause(root, index, indexcol,
5017  clause, iclause->indexcols);
5018  fixed_indexquals = lappend(fixed_indexquals, clause);
5019  }
5020  }
5021 
5022  *stripped_indexquals_p = stripped_indexquals;
5023  *fixed_indexquals_p = fixed_indexquals;
5024 }
5025 
5026 /*
5027  * fix_indexorderby_references
5028  * Adjust indexorderby clauses to the form the executor's index
5029  * machinery needs.
5030  *
5031  * This is a simplified version of fix_indexqual_references. The input is
5032  * bare clauses and a separate indexcol list, instead of IndexClauses.
5033  */
5034 static List *
5036 {
5037  IndexOptInfo *index = index_path->indexinfo;
5038  List *fixed_indexorderbys;
5039  ListCell *lcc,
5040  *lci;
5041 
5042  fixed_indexorderbys = NIL;
5043 
5044  forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
5045  {
5046  Node *clause = (Node *) lfirst(lcc);
5047  int indexcol = lfirst_int(lci);
5048 
5049  clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
5050  fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
5051  }
5052 
5053  return fixed_indexorderbys;
5054 }
5055 
5056 /*
5057  * fix_indexqual_clause
5058  * Convert a single indexqual clause to the form needed by the executor.
5059  *
5060  * We replace nestloop params here, and replace the index key variables
5061  * or expressions by index Var nodes.
5062  */
5063 static Node *
5065  Node *clause, List *indexcolnos)
5066 {
5067  /*
5068  * Replace any outer-relation variables with nestloop params.
5069  *
5070  * This also makes a copy of the clause, so it's safe to modify it
5071  * in-place below.
5072  */
5073  clause = replace_nestloop_params(root, clause);
5074 
5075  if (IsA(clause, OpExpr))
5076  {
5077  OpExpr *op = (OpExpr *) clause;
5078 
5079  /* Replace the indexkey expression with an index Var. */
5081  index,
5082  indexcol);
5083  }
5084  else if (IsA(clause, RowCompareExpr))
5085  {
5086  RowCompareExpr *rc = (RowCompareExpr *) clause;
5087  ListCell *lca,
5088  *lcai;
5089 
5090  /* Replace the indexkey expressions with index Vars. */
5091  Assert(list_length(rc->largs) == list_length(indexcolnos));
5092  forboth(lca, rc->largs, lcai, indexcolnos)
5093  {
5095  index,
5096  lfirst_int(lcai));
5097  }
5098  }
5099  else if (IsA(clause, ScalarArrayOpExpr))
5100  {
5101  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5102 
5103  /* Replace the indexkey expression with an index Var. */
5105  index,
5106  indexcol);
5107  }
5108  else if (IsA(clause, NullTest))
5109  {
5110  NullTest *nt = (NullTest *) clause;
5111 
5112  /* Replace the indexkey expression with an index Var. */
5113  nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
5114  index,
5115  indexcol);
5116  }
5117  else
5118  elog(ERROR, "unsupported indexqual type: %d",
5119  (int) nodeTag(clause));
5120 
5121  return clause;
5122 }
5123 
5124 /*
5125  * fix_indexqual_operand
5126  * Convert an indexqual expression to a Var referencing the index column.
5127  *
5128  * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
5129  * equal to the index's attribute number (index column position).
5130  *
5131  * Most of the code here is just for sanity cross-checking that the given
5132  * expression actually matches the index column it's claimed to.
5133  */
5134 static Node *
5136 {
5137  Var *result;
5138  int pos;
5139  ListCell *indexpr_item;
5140 
5141  /*
5142  * Remove any binary-compatible relabeling of the indexkey
5143  */
5144  if (IsA(node, RelabelType))
5145  node = (Node *) ((RelabelType *) node)->arg;
5146 
5147  Assert(indexcol >= 0 && indexcol < index->ncolumns);
5148 
5149  if (index->indexkeys[indexcol] != 0)
5150  {
5151  /* It's a simple index column */
5152  if (IsA(node, Var) &&
5153  ((Var *) node)->varno == index->rel->relid &&
5154  ((Var *) node)->varattno == index->indexkeys[indexcol])
5155  {
5156  result = (Var *) copyObject(node);
5157  result->varno = INDEX_VAR;
5158  result->varattno = indexcol + 1;
5159  return (Node *) result;
5160  }
5161  else
5162  elog(ERROR, "index key does not match expected index column");
5163  }
5164 
5165  /* It's an index expression, so find and cross-check the expression */
5166  indexpr_item = list_head(index->indexprs);
5167  for (pos = 0; pos < index->ncolumns; pos++)
5168  {
5169  if (index->indexkeys[pos] == 0)
5170  {
5171  if (indexpr_item == NULL)
5172  elog(ERROR, "too few entries in indexprs list");
5173  if (pos == indexcol)
5174  {
5175  Node *indexkey;
5176 
5177  indexkey = (Node *) lfirst(indexpr_item);
5178  if (indexkey && IsA(indexkey, RelabelType))
5179  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5180  if (equal(node, indexkey))
5181  {
5182  result = makeVar(INDEX_VAR, indexcol + 1,
5183  exprType(lfirst(indexpr_item)), -1,
5184  exprCollation(lfirst(indexpr_item)),
5185  0);
5186  return (Node *) result;
5187  }
5188  else
5189  elog(ERROR, "index key does not match expected index column");
5190  }
5191  indexpr_item = lnext(index->indexprs, indexpr_item);
5192  }
5193  }
5194 
5195  /* Oops... */
5196  elog(ERROR, "index key does not match expected index column");
5197  return NULL; /* keep compiler quiet */
5198 }
5199 
5200 /*
5201  * get_switched_clauses
5202  * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
5203  * extract the bare clauses, and rearrange the elements within the
5204  * clauses, if needed, so the outer join variable is on the left and
5205  * the inner is on the right. The original clause data structure is not
5206  * touched; a modified list is returned. We do, however, set the transient
5207  * outer_is_left field in each RestrictInfo to show which side was which.
5208  */
5209 static List *
5210 get_switched_clauses(List *clauses, Relids outerrelids)
5211 {
5212  List *t_list = NIL;
5213  ListCell *l;
5214 
5215  foreach(l, clauses)
5216  {
5217  RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
5218  OpExpr *clause = (OpExpr *) restrictinfo->clause;
5219 
5220  Assert(is_opclause(clause));
5221  if (bms_is_subset(restrictinfo->right_relids, outerrelids))
5222  {
5223  /*
5224  * Duplicate just enough of the structure to allow commuting the
5225  * clause without changing the original list. Could use
5226  * copyObject, but a complete deep copy is overkill.
5227  */
5228  OpExpr *temp = makeNode(OpExpr);
5229 
5230  temp->opno = clause->opno;
5231  temp->opfuncid = InvalidOid;
5232  temp->opresulttype = clause->opresulttype;
5233  temp->opretset = clause->opretset;
5234  temp->opcollid = clause->opcollid;
5235  temp->inputcollid = clause->inputcollid;
5236  temp->args = list_copy(clause->args);
5237  temp->location = clause->location;
5238  /* Commute it --- note this modifies the temp node in-place. */
5239  CommuteOpExpr(temp);
5240  t_list = lappend(t_list, temp);
5241  restrictinfo->outer_is_left = false;
5242  }
5243  else
5244  {
5245  Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
5246  t_list = lappend(t_list, clause);
5247  restrictinfo->outer_is_left = true;
5248  }
5249  }
5250  return t_list;
5251 }
5252 
5253 /*
5254  * order_qual_clauses
5255  * Given a list of qual clauses that will all be evaluated at the same
5256  * plan node, sort the list into the order we want to check the quals
5257  * in at runtime.
5258  *
5259  * When security barrier quals are used in the query, we may have quals with
5260  * different security levels in the list. Quals of lower security_level
5261  * must go before quals of higher security_level, except that we can grant
5262  * exceptions to move up quals that are leakproof. When security level
5263  * doesn't force the decision, we prefer to order clauses by estimated
5264  * execution cost, cheapest first.
5265  *
5266  * Ideally the order should be driven by a combination of execution cost and
5267  * selectivity, but it's not immediately clear how to account for both,
5268  * and given the uncertainty of the estimates the reliability of the decisions
5269  * would be doubtful anyway. So we just order by security level then
5270  * estimated per-tuple cost, being careful not to change the order when
5271  * (as is often the case) the estimates are identical.
5272  *
5273  * Although this will work on either bare clauses or RestrictInfos, it's
5274  * much faster to apply it to RestrictInfos, since it can re-use cost
5275  * information that is cached in RestrictInfos. XXX in the bare-clause
5276  * case, we are also not able to apply security considerations. That is
5277  * all right for the moment, because the bare-clause case doesn't occur
5278  * anywhere that barrier quals could be present, but it would be better to
5279  * get rid of it.
5280  *
5281  * Note: some callers pass lists that contain entries that will later be
5282  * removed; this is the easiest way to let this routine see RestrictInfos
5283  * instead of bare clauses. This is another reason why trying to consider
5284  * selectivity in the ordering would likely do the wrong thing.
5285  */
5286 static List *
5288 {
5289  typedef struct
5290  {
5291  Node *clause;
5292  Cost cost;
5293  Index security_level;
5294  } QualItem;
5295  int nitems = list_length(clauses);
5296  QualItem *items;
5297  ListCell *lc;
5298  int i;
5299  List *result;
5300 
5301  /* No need to work hard for 0 or 1 clause */
5302  if (nitems <= 1)
5303  return clauses;
5304 
5305  /*
5306  * Collect the items and costs into an array. This is to avoid repeated
5307  * cost_qual_eval work if the inputs aren't RestrictInfos.
5308  */
5309  items = (QualItem *) palloc(nitems * sizeof(QualItem));
5310  i = 0;
5311  foreach(lc, clauses)
5312  {
5313  Node *clause = (Node *) lfirst(lc);
5314  QualCost qcost;
5315 
5316  cost_qual_eval_node(&qcost, clause, root);
5317  items[i].clause = clause;
5318  items[i].cost = qcost.per_tuple;
5319  if (IsA(clause, RestrictInfo))
5320  {
5321  RestrictInfo *rinfo = (RestrictInfo *) clause;
5322 
5323  /*
5324  * If a clause is leakproof, it doesn't have to be constrained by
5325  * its nominal security level. If it's also reasonably cheap
5326  * (here defined as 10X cpu_operator_cost), pretend it has
5327  * security_level 0, which will allow it to go in front of
5328  * more-expensive quals of lower security levels. Of course, that
5329  * will also force it to go in front of cheaper quals of its own
5330  * security level, which is not so great, but we can alleviate
5331  * that risk by applying the cost limit cutoff.
5332  */
5333  if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
5334  items[i].security_level = 0;
5335  else
5336  items[i].security_level = rinfo->security_level;
5337  }
5338  else
5339  items[i].security_level = 0;
5340  i++;
5341  }
5342 
5343  /*
5344  * Sort. We don't use qsort() because it's not guaranteed stable for
5345  * equal keys. The expected number of entries is small enough that a
5346  * simple insertion sort should be good enough.
5347  */
5348  for (i = 1; i < nitems; i++)
5349  {
5350  QualItem newitem = items[i];
5351  int j;
5352 
5353  /* insert newitem into the already-sorted subarray */
5354  for (j = i; j > 0; j--)
5355  {
5356  QualItem *olditem = &items[j - 1];
5357 
5358  if (newitem.security_level > olditem->security_level ||
5359  (newitem.security_level == olditem->security_level &&
5360  newitem.cost >= olditem->cost))
5361  break;
5362  items[j] = *olditem;
5363  }
5364  items[j] = newitem;
5365  }
5366 
5367  /* Convert back to a list */
5368  result = NIL;
5369  for (i = 0; i < nitems; i++)
5370  result = lappend(result, items[i].clause);
5371 
5372  return result;
5373 }
5374 
5375 /*
5376  * Copy cost and size info from a Path node to the Plan node created from it.
5377  * The executor usually won't use this info, but it's needed by EXPLAIN.
5378  * Also copy the parallel-related flags, which the executor *will* use.
5379  */
5380 static void
5382 {
5383  dest->startup_cost = src->startup_cost;
5384  dest->total_cost = src->total_cost;
5385  dest->plan_rows = src->rows;
5386  dest->plan_width = src->pathtarget->width;
5387  dest->parallel_aware = src->parallel_aware;
5388  dest->parallel_safe = src->parallel_safe;
5389 }
5390 
5391 /*
5392  * Copy cost and size info from a lower plan node to an inserted node.
5393  * (Most callers alter the info after copying it.)
5394  */
5395 static void
5397 {
5398  dest->startup_cost = src->startup_cost;
5399  dest->total_cost = src->total_cost;
5400  dest->plan_rows = src->plan_rows;
5401  dest->plan_width = src->plan_width;
5402  /* Assume the inserted node is not parallel-aware. */
5403  dest->parallel_aware = false;
5404  /* Assume the inserted node is parallel-safe, if child plan is. */
5405  dest->parallel_safe = src->parallel_safe;
5406 }
5407 
5408 /*
5409  * Some places in this file build Sort nodes that don't have a directly
5410  * corresponding Path node. The cost of the sort is, or should have been,
5411  * included in the cost of the Path node we're working from, but since it's
5412  * not split out, we have to re-figure it using cost_sort(). This is just
5413  * to label the Sort node nicely for EXPLAIN.
5414  *
5415  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
5416  */
5417 static void
5418 label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
5419 {
5420  Plan *lefttree = plan->plan.lefttree;
5421  Path sort_path; /* dummy for result of cost_sort */
5422 
5423  /*
5424  * This function shouldn't have to deal with IncrementalSort plans because
5425  * they are only created from corresponding Path nodes.
5426  */
5427  Assert(IsA(plan, Sort));
5428 
5429  cost_sort(&sort_path, root, NIL,
5430  lefttree->total_cost,
5431  lefttree->plan_rows,
5432  lefttree->plan_width,
5433  0.0,
5434  work_mem,
5435  limit_tuples);
5436  plan->plan.startup_cost = sort_path.startup_cost;
5437  plan->plan.total_cost = sort_path.total_cost;
5438  plan->plan.plan_rows = lefttree->plan_rows;
5439  plan->plan.plan_width = lefttree->plan_width;
5440  plan->plan.parallel_aware = false;
5441  plan->plan.parallel_safe = lefttree->parallel_safe;
5442 }
5443 
5444 /*
5445  * bitmap_subplan_mark_shared
5446  * Set isshared flag in bitmap subplan so that it will be created in
5447  * shared memory.
5448  */
5449 static void
5451 {
5452  if (IsA(plan, BitmapAnd))
5453  bitmap_subplan_mark_shared(linitial(((BitmapAnd *) plan)->bitmapplans));
5454  else if (IsA(plan, BitmapOr))
5455  {
5456  ((BitmapOr *) plan)->isshared = true;
5457  bitmap_subplan_mark_shared(linitial(((BitmapOr *) plan)->bitmapplans));
5458  }
5459  else if (IsA(plan, BitmapIndexScan))
5460  ((BitmapIndexScan *) plan)->isshared = true;
5461  else
5462  elog(ERROR, "unrecognized node type: %d", nodeTag(plan));
5463 }
5464 
5465 /*****************************************************************************
5466  *
5467  * PLAN NODE BUILDING ROUTINES
5468  *
5469  * In general, these functions are not passed the original Path and therefore
5470  * leave it to the caller to fill in the cost/width fields from the Path,
5471  * typically by calling copy_generic_path_info(). This convention is
5472  * somewhat historical, but it does support a few places above where we build
5473  * a plan node without having an exactly corresponding Path node. Under no
5474  * circumstances should one of these functions do its own cost calculations,
5475  * as that would be redundant with calculations done while building Paths.
5476  *
5477  *****************************************************************************/
5478 
5479 static SeqScan *
5481  List *qpqual,
5482  Index scanrelid)
5483 {
5484  SeqScan *node = makeNode(SeqScan);
5485  Plan *plan = &node->scan.plan;
5486 
5487  plan->targetlist = qptlist;
5488  plan->qual = qpqual;
5489  plan->lefttree = NULL;
5490  plan->righttree = NULL;
5491  node->scan.scanrelid = scanrelid;
5492 
5493  return node;
5494 }
5495 
5496 static SampleScan *
5498  List *qpqual,
5499  Index scanrelid,
5500  TableSampleClause *tsc)
5501 {
5502  SampleScan *node = makeNode(SampleScan);
5503  Plan *plan = &node->scan.plan;
5504 
5505  plan->targetlist = qptlist;
5506  plan->qual = qpqual;
5507  plan->lefttree = NULL;
5508  plan->righttree = NULL;
5509  node->scan.scanrelid = scanrelid;
5510  node->tablesample = tsc;
5511 
5512  return node;
5513 }
5514 
5515 static IndexScan *
5517  List *qpqual,
5518  Index scanrelid,
5519  Oid indexid,
5520  List *indexqual,
5521  List *indexqualorig,
5522  List *indexorderby,
5523  List *indexorderbyorig,
5524  List *indexorderbyops,
5525  ScanDirection indexscandir)
5526 {
5527  IndexScan *node = makeNode(IndexScan);
5528  Plan *plan = &node->scan.plan;
5529 
5530  plan->targetlist = qptlist;
5531  plan->qual = qpqual;
5532  plan->lefttree = NULL;
5533  plan->righttree = NULL;
5534  node->scan.scanrelid = scanrelid;
5535  node->indexid = indexid;
5536  node->indexqual = indexqual;
5537  node->indexqualorig = indexqualorig;
5538  node->indexorderby = indexorderby;
5539  node->indexorderbyorig = indexorderbyorig;
5540  node->indexorderbyops = indexorderbyops;
5541  node->indexorderdir = indexscandir;
5542 
5543  return node;
5544 }
5545 
5546 static IndexOnlyScan *
5548  List *qpqual,
5549  Index scanrelid,
5550  Oid indexid,
5551  List *indexqual,
5552  List *recheckqual,
5553  List *indexorderby,
5554  List *indextlist,
5555  ScanDirection indexscandir)
5556 {
5558  Plan *plan = &node->scan.plan;
5559 
5560  plan->targetlist = qptlist;
5561  plan->qual = qpqual;
5562  plan->lefttree = NULL;
5563  plan->righttree = NULL;
5564  node->scan.scanrelid = scanrelid;
5565  node->indexid = indexid;
5566  node->indexqual = indexqual;
5567  node->recheckqual = recheckqual;
5568  node->indexorderby = indexorderby;
5569  node->indextlist = indextlist;
5570  node->indexorderdir = indexscandir;
5571 
5572  return node;
5573 }
5574 
5575 static BitmapIndexScan *
5577  Oid indexid,
5578  List *indexqual,
5579  List *indexqualorig)
5580 {
5582  Plan *plan = &node->scan.plan;
5583 
5584  plan->targetlist = NIL; /* not used */
5585  plan->qual = NIL; /* not used */
5586  plan->lefttree = NULL;
5587  plan->righttree = NULL;
5588  node->scan.scanrelid = scanrelid;
5589  node->indexid = indexid;
5590  node->indexqual = indexqual;
5591  node->indexqualorig = indexqualorig;
5592 
5593  return node;
5594 }
5595 
5596 static BitmapHeapScan *
5598  List *qpqual,
5599  Plan *lefttree,
5600  List *bitmapqualorig,
5601  Index scanrelid)
5602 {
5604  Plan *plan = &node->scan.plan;
5605 
5606  plan->targetlist = qptlist;
5607  plan->qual = qpqual;
5608  plan->lefttree = lefttree;
5609  plan->righttree = NULL;
5610  node->scan.scanrelid = scanrelid;
5611  node->bitmapqualorig = bitmapqualorig;
5612 
5613  return node;
5614 }
5615 
5616 static TidScan *
5618  List *qpqual,