PostgreSQL Source Code  git master
createplan.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * createplan.c
4  * Routines to create the desired plan for processing a query.
5  * Planning is complete, we just need to convert the selected
6  * Path into a Plan.
7  *
8  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
9  * Portions Copyright (c) 1994, Regents of the University of California
10  *
11  *
12  * IDENTIFICATION
13  * src/backend/optimizer/plan/createplan.c
14  *
15  *-------------------------------------------------------------------------
16  */
17 #include "postgres.h"
18 
19 #include <limits.h>
20 #include <math.h>
21 
22 #include "access/sysattr.h"
23 #include "catalog/pg_class.h"
24 #include "foreign/fdwapi.h"
25 #include "miscadmin.h"
26 #include "nodes/extensible.h"
27 #include "nodes/makefuncs.h"
28 #include "nodes/nodeFuncs.h"
29 #include "optimizer/clauses.h"
30 #include "optimizer/cost.h"
31 #include "optimizer/optimizer.h"
32 #include "optimizer/paramassign.h"
33 #include "optimizer/paths.h"
34 #include "optimizer/placeholder.h"
35 #include "optimizer/plancat.h"
36 #include "optimizer/planmain.h"
37 #include "optimizer/prep.h"
38 #include "optimizer/restrictinfo.h"
39 #include "optimizer/subselect.h"
40 #include "optimizer/tlist.h"
41 #include "parser/parse_clause.h"
42 #include "parser/parsetree.h"
43 #include "partitioning/partprune.h"
44 #include "utils/lsyscache.h"
45 
46 
47 /*
48  * Flag bits that can appear in the flags argument of create_plan_recurse().
49  * These can be OR-ed together.
50  *
51  * CP_EXACT_TLIST specifies that the generated plan node must return exactly
52  * the tlist specified by the path's pathtarget (this overrides both
53  * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
54  * plan node is allowed to return just the Vars and PlaceHolderVars needed
55  * to evaluate the pathtarget.
56  *
57  * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
58  * passed down by parent nodes such as Sort and Hash, which will have to
59  * store the returned tuples.
60  *
61  * CP_LABEL_TLIST specifies that the plan node must return columns matching
62  * any sortgrouprefs specified in its pathtarget, with appropriate
63  * ressortgroupref labels. This is passed down by parent nodes such as Sort
64  * and Group, which need these values to be available in their inputs.
65  *
66  * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
67  * and therefore it doesn't matter a bit what target list gets generated.
68  */
69 #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
70 #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
71 #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
72 #define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
73 
74 
75 static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
76  int flags);
77 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
78  int flags);
79 static List *build_path_tlist(PlannerInfo *root, Path *path);
80 static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
81 static List *get_gating_quals(PlannerInfo *root, List *quals);
82 static Plan *create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
83  List *gating_quals);
84 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
85 static bool is_async_capable_path(Path *path);
86 static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path,
87  int flags);
89  int flags);
91  GroupResultPath *best_path);
93 static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
94  int flags);
95 static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
96  int flags);
97 static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
98  int flags);
99 static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
101  ProjectionPath *best_path,
102  int flags);
103 static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
104 static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
106  IncrementalSortPath *best_path, int flags);
107 static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
109  int flags);
110 static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
111 static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
112 static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
113 static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
114 static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
115  int flags);
117 static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
118  int flags);
120 static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
121  int flags);
122 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
123  List *tlist, List *scan_clauses);
124 static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
125  List *tlist, List *scan_clauses);
126 static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
127  List *tlist, List *scan_clauses, bool indexonly);
129  BitmapHeapPath *best_path,
130  List *tlist, List *scan_clauses);
131 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
132  List **qual, List **indexqual, List **indexECs);
133 static void bitmap_subplan_mark_shared(Plan *plan);
134 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
135  List *tlist, List *scan_clauses);
137  TidRangePath *best_path,
138  List *tlist,
139  List *scan_clauses);
141  SubqueryScanPath *best_path,
142  List *tlist, List *scan_clauses);
143 static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
144  List *tlist, List *scan_clauses);
145 static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
146  List *tlist, List *scan_clauses);
147 static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
148  List *tlist, List *scan_clauses);
149 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
150  List *tlist, List *scan_clauses);
152  Path *best_path, List *tlist, List *scan_clauses);
153 static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
154  List *tlist, List *scan_clauses);
155 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
156  List *tlist, List *scan_clauses);
158  List *tlist, List *scan_clauses);
160  CustomPath *best_path,
161  List *tlist, List *scan_clauses);
162 static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path);
163 static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path);
164 static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path);
165 static Node *replace_nestloop_params(PlannerInfo *root, Node *expr);
167 static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
168  List **stripped_indexquals_p,
169  List **fixed_indexquals_p);
170 static List *fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path);
172  IndexOptInfo *index, int indexcol,
173  Node *clause, List *indexcolnos);
174 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
175 static List *get_switched_clauses(List *clauses, Relids outerrelids);
176 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
177 static void copy_generic_path_info(Plan *dest, Path *src);
178 static void copy_plan_costsize(Plan *dest, Plan *src);
179 static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
180  double limit_tuples);
181 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
182 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
183  TableSampleClause *tsc);
184 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
185  Oid indexid, List *indexqual, List *indexqualorig,
186  List *indexorderby, List *indexorderbyorig,
187  List *indexorderbyops,
188  ScanDirection indexscandir);
189 static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
190  Index scanrelid, Oid indexid,
191  List *indexqual, List *indexorderby,
192  List *indextlist,
193  ScanDirection indexscandir);
194 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
195  List *indexqual,
196  List *indexqualorig);
197 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
198  List *qpqual,
199  Plan *lefttree,
200  List *bitmapqualorig,
201  Index scanrelid);
202 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
203  List *tidquals);
204 static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
205  Index scanrelid, List *tidrangequals);
206 static SubqueryScan *make_subqueryscan(List *qptlist,
207  List *qpqual,
208  Index scanrelid,
209  Plan *subplan);
210 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
211  Index scanrelid, List *functions, bool funcordinality);
212 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
213  Index scanrelid, List *values_lists);
214 static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
215  Index scanrelid, TableFunc *tablefunc);
216 static CteScan *make_ctescan(List *qptlist, List *qpqual,
217  Index scanrelid, int ctePlanId, int cteParam);
218 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
219  Index scanrelid, char *enrname);
220 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
221  Index scanrelid, int wtParam);
223  Plan *lefttree,
224  Plan *righttree,
225  int wtParam,
226  List *distinctList,
227  long numGroups);
228 static BitmapAnd *make_bitmap_and(List *bitmapplans);
229 static BitmapOr *make_bitmap_or(List *bitmapplans);
230 static NestLoop *make_nestloop(List *tlist,
231  List *joinclauses, List *otherclauses, List *nestParams,
232  Plan *lefttree, Plan *righttree,
233  JoinType jointype, bool inner_unique);
234 static HashJoin *make_hashjoin(List *tlist,
235  List *joinclauses, List *otherclauses,
236  List *hashclauses,
237  List *hashoperators, List *hashcollations,
238  List *hashkeys,
239  Plan *lefttree, Plan *righttree,
240  JoinType jointype, bool inner_unique);
241 static Hash *make_hash(Plan *lefttree,
242  List *hashkeys,
243  Oid skewTable,
244  AttrNumber skewColumn,
245  bool skewInherit);
246 static MergeJoin *make_mergejoin(List *tlist,
247  List *joinclauses, List *otherclauses,
248  List *mergeclauses,
249  Oid *mergefamilies,
250  Oid *mergecollations,
251  int *mergestrategies,
252  bool *mergenullsfirst,
253  Plan *lefttree, Plan *righttree,
254  JoinType jointype, bool inner_unique,
255  bool skip_mark_restore);
256 static Sort *make_sort(Plan *lefttree, int numCols,
257  AttrNumber *sortColIdx, Oid *sortOperators,
258  Oid *collations, bool *nullsFirst);
259 static IncrementalSort *make_incrementalsort(Plan *lefttree,
260  int numCols, int nPresortedCols,
261  AttrNumber *sortColIdx, Oid *sortOperators,
262  Oid *collations, bool *nullsFirst);
263 static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
264  Relids relids,
265  const AttrNumber *reqColIdx,
266  bool adjust_tlist_in_place,
267  int *p_numsortkeys,
268  AttrNumber **p_sortColIdx,
269  Oid **p_sortOperators,
270  Oid **p_collations,
271  bool **p_nullsFirst);
272 static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
273  Relids relids);
275  List *pathkeys, Relids relids, int nPresortedCols);
276 static Sort *make_sort_from_groupcols(List *groupcls,
277  AttrNumber *grpColIdx,
278  Plan *lefttree);
279 static Material *make_material(Plan *lefttree);
280 static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
281  Oid *collations, List *param_exprs,
282  bool singlerow, bool binary_mode,
283  uint32 est_entries, Bitmapset *keyparamids);
284 static WindowAgg *make_windowagg(List *tlist, Index winref,
285  int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
286  int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
287  int frameOptions, Node *startOffset, Node *endOffset,
288  Oid startInRangeFunc, Oid endInRangeFunc,
289  Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
290  Plan *lefttree);
291 static Group *make_group(List *tlist, List *qual, int numGroupCols,
292  AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
293  Plan *lefttree);
294 static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
295 static Unique *make_unique_from_pathkeys(Plan *lefttree,
296  List *pathkeys, int numCols);
297 static Gather *make_gather(List *qptlist, List *qpqual,
298  int nworkers, int rescan_param, bool single_copy, Plan *subplan);
299 static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
300  List *distinctList, AttrNumber flagColIdx, int firstFlag,
301  long numGroups);
302 static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
303 static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
304 static ProjectSet *make_project_set(List *tlist, Plan *subplan);
305 static ModifyTable *make_modifytable(PlannerInfo *root, Plan *subplan,
306  CmdType operation, bool canSetTag,
307  Index nominalRelation, Index rootRelation,
308  bool partColsUpdated,
309  List *resultRelations,
310  List *updateColnosLists,
311  List *withCheckOptionLists, List *returningLists,
312  List *rowMarks, OnConflictExpr *onconflict, int epqParam);
314  GatherMergePath *best_path);
315 
316 
317 /*
318  * create_plan
319  * Creates the access plan for a query by recursively processing the
320  * desired tree of pathnodes, starting at the node 'best_path'. For
321  * every pathnode found, we create a corresponding plan node containing
322  * appropriate id, target list, and qualification information.
323  *
324  * The tlists and quals in the plan tree are still in planner format,
325  * ie, Vars still correspond to the parser's numbering. This will be
326  * fixed later by setrefs.c.
327  *
328  * best_path is the best access path
329  *
330  * Returns a Plan tree.
331  */
332 Plan *
333 create_plan(PlannerInfo *root, Path *best_path)
334 {
335  Plan *plan;
336 
337  /* plan_params should not be in use in current query level */
338  Assert(root->plan_params == NIL);
339 
340  /* Initialize this module's workspace in PlannerInfo */
341  root->curOuterRels = NULL;
342  root->curOuterParams = NIL;
343 
344  /* Recursively process the path tree, demanding the correct tlist result */
345  plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
346 
347  /*
348  * Make sure the topmost plan node's targetlist exposes the original
349  * column names and other decorative info. Targetlists generated within
350  * the planner don't bother with that stuff, but we must have it on the
351  * top-level tlist seen at execution time. However, ModifyTable plan
352  * nodes don't have a tlist matching the querytree targetlist.
353  */
354  if (!IsA(plan, ModifyTable))
356 
357  /*
358  * Attach any initPlans created in this query level to the topmost plan
359  * node. (In principle the initplans could go in any plan node at or
360  * above where they're referenced, but there seems no reason to put them
361  * any lower than the topmost node for the query level. Also, see
362  * comments for SS_finalize_plan before you try to change this.)
363  */
364  SS_attach_initplans(root, plan);
365 
366  /* Check we successfully assigned all NestLoopParams to plan nodes */
367  if (root->curOuterParams != NIL)
368  elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
369 
370  /*
371  * Reset plan_params to ensure param IDs used for nestloop params are not
372  * re-used later
373  */
374  root->plan_params = NIL;
375 
376  return plan;
377 }
378 
379 /*
380  * create_plan_recurse
381  * Recursive guts of create_plan().
382  */
383 static Plan *
384 create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
385 {
386  Plan *plan;
387 
388  /* Guard against stack overflow due to overly complex plans */
390 
391  switch (best_path->pathtype)
392  {
393  case T_SeqScan:
394  case T_SampleScan:
395  case T_IndexScan:
396  case T_IndexOnlyScan:
397  case T_BitmapHeapScan:
398  case T_TidScan:
399  case T_TidRangeScan:
400  case T_SubqueryScan:
401  case T_FunctionScan:
402  case T_TableFuncScan:
403  case T_ValuesScan:
404  case T_CteScan:
405  case T_WorkTableScan:
407  case T_ForeignScan:
408  case T_CustomScan:
409  plan = create_scan_plan(root, best_path, flags);
410  break;
411  case T_HashJoin:
412  case T_MergeJoin:
413  case T_NestLoop:
414  plan = create_join_plan(root,
415  (JoinPath *) best_path);
416  break;
417  case T_Append:
418  plan = create_append_plan(root,
419  (AppendPath *) best_path,
420  flags);
421  break;
422  case T_MergeAppend:
423  plan = create_merge_append_plan(root,
424  (MergeAppendPath *) best_path,
425  flags);
426  break;
427  case T_Result:
428  if (IsA(best_path, ProjectionPath))
429  {
430  plan = create_projection_plan(root,
431  (ProjectionPath *) best_path,
432  flags);
433  }
434  else if (IsA(best_path, MinMaxAggPath))
435  {
436  plan = (Plan *) create_minmaxagg_plan(root,
437  (MinMaxAggPath *) best_path);
438  }
439  else if (IsA(best_path, GroupResultPath))
440  {
441  plan = (Plan *) create_group_result_plan(root,
442  (GroupResultPath *) best_path);
443  }
444  else
445  {
446  /* Simple RTE_RESULT base relation */
447  Assert(IsA(best_path, Path));
448  plan = create_scan_plan(root, best_path, flags);
449  }
450  break;
451  case T_ProjectSet:
452  plan = (Plan *) create_project_set_plan(root,
453  (ProjectSetPath *) best_path);
454  break;
455  case T_Material:
456  plan = (Plan *) create_material_plan(root,
457  (MaterialPath *) best_path,
458  flags);
459  break;
460  case T_Memoize:
461  plan = (Plan *) create_memoize_plan(root,
462  (MemoizePath *) best_path,
463  flags);
464  break;
465  case T_Unique:
466  if (IsA(best_path, UpperUniquePath))
467  {
468  plan = (Plan *) create_upper_unique_plan(root,
469  (UpperUniquePath *) best_path,
470  flags);
471  }
472  else
473  {
474  Assert(IsA(best_path, UniquePath));
475  plan = create_unique_plan(root,
476  (UniquePath *) best_path,
477  flags);
478  }
479  break;
480  case T_Gather:
481  plan = (Plan *) create_gather_plan(root,
482  (GatherPath *) best_path);
483  break;
484  case T_Sort:
485  plan = (Plan *) create_sort_plan(root,
486  (SortPath *) best_path,
487  flags);
488  break;
489  case T_IncrementalSort:
490  plan = (Plan *) create_incrementalsort_plan(root,
491  (IncrementalSortPath *) best_path,
492  flags);
493  break;
494  case T_Group:
495  plan = (Plan *) create_group_plan(root,
496  (GroupPath *) best_path);
497  break;
498  case T_Agg:
499  if (IsA(best_path, GroupingSetsPath))
500  plan = create_groupingsets_plan(root,
501  (GroupingSetsPath *) best_path);
502  else
503  {
504  Assert(IsA(best_path, AggPath));
505  plan = (Plan *) create_agg_plan(root,
506  (AggPath *) best_path);
507  }
508  break;
509  case T_WindowAgg:
510  plan = (Plan *) create_windowagg_plan(root,
511  (WindowAggPath *) best_path);
512  break;
513  case T_SetOp:
514  plan = (Plan *) create_setop_plan(root,
515  (SetOpPath *) best_path,
516  flags);
517  break;
518  case T_RecursiveUnion:
519  plan = (Plan *) create_recursiveunion_plan(root,
520  (RecursiveUnionPath *) best_path);
521  break;
522  case T_LockRows:
523  plan = (Plan *) create_lockrows_plan(root,
524  (LockRowsPath *) best_path,
525  flags);
526  break;
527  case T_ModifyTable:
528  plan = (Plan *) create_modifytable_plan(root,
529  (ModifyTablePath *) best_path);
530  break;
531  case T_Limit:
532  plan = (Plan *) create_limit_plan(root,
533  (LimitPath *) best_path,
534  flags);
535  break;
536  case T_GatherMerge:
537  plan = (Plan *) create_gather_merge_plan(root,
538  (GatherMergePath *) best_path);
539  break;
540  default:
541  elog(ERROR, "unrecognized node type: %d",
542  (int) best_path->pathtype);
543  plan = NULL; /* keep compiler quiet */
544  break;
545  }
546 
547  return plan;
548 }
549 
550 /*
551  * create_scan_plan
552  * Create a scan plan for the parent relation of 'best_path'.
553  */
554 static Plan *
555 create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
556 {
557  RelOptInfo *rel = best_path->parent;
558  List *scan_clauses;
559  List *gating_clauses;
560  List *tlist;
561  Plan *plan;
562 
563  /*
564  * Extract the relevant restriction clauses from the parent relation. The
565  * executor must apply all these restrictions during the scan, except for
566  * pseudoconstants which we'll take care of below.
567  *
568  * If this is a plain indexscan or index-only scan, we need not consider
569  * restriction clauses that are implied by the index's predicate, so use
570  * indrestrictinfo not baserestrictinfo. Note that we can't do that for
571  * bitmap indexscans, since there's not necessarily a single index
572  * involved; but it doesn't matter since create_bitmap_scan_plan() will be
573  * able to get rid of such clauses anyway via predicate proof.
574  */
575  switch (best_path->pathtype)
576  {
577  case T_IndexScan:
578  case T_IndexOnlyScan:
579  scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
580  break;
581  default:
582  scan_clauses = rel->baserestrictinfo;
583  break;
584  }
585 
586  /*
587  * If this is a parameterized scan, we also need to enforce all the join
588  * clauses available from the outer relation(s).
589  *
590  * For paranoia's sake, don't modify the stored baserestrictinfo list.
591  */
592  if (best_path->param_info)
593  scan_clauses = list_concat_copy(scan_clauses,
594  best_path->param_info->ppi_clauses);
595 
596  /*
597  * Detect whether we have any pseudoconstant quals to deal with. Then, if
598  * we'll need a gating Result node, it will be able to project, so there
599  * are no requirements on the child's tlist.
600  */
601  gating_clauses = get_gating_quals(root, scan_clauses);
602  if (gating_clauses)
603  flags = 0;
604 
605  /*
606  * For table scans, rather than using the relation targetlist (which is
607  * only those Vars actually needed by the query), we prefer to generate a
608  * tlist containing all Vars in order. This will allow the executor to
609  * optimize away projection of the table tuples, if possible.
610  *
611  * But if the caller is going to ignore our tlist anyway, then don't
612  * bother generating one at all. We use an exact equality test here, so
613  * that this only applies when CP_IGNORE_TLIST is the only flag set.
614  */
615  if (flags == CP_IGNORE_TLIST)
616  {
617  tlist = NULL;
618  }
619  else if (use_physical_tlist(root, best_path, flags))
620  {
621  if (best_path->pathtype == T_IndexOnlyScan)
622  {
623  /* For index-only scan, the preferred tlist is the index's */
624  tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
625 
626  /*
627  * Transfer sortgroupref data to the replacement tlist, if
628  * requested (use_physical_tlist checked that this will work).
629  */
630  if (flags & CP_LABEL_TLIST)
631  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
632  }
633  else
634  {
635  tlist = build_physical_tlist(root, rel);
636  if (tlist == NIL)
637  {
638  /* Failed because of dropped cols, so use regular method */
639  tlist = build_path_tlist(root, best_path);
640  }
641  else
642  {
643  /* As above, transfer sortgroupref data to replacement tlist */
644  if (flags & CP_LABEL_TLIST)
646  }
647  }
648  }
649  else
650  {
651  tlist = build_path_tlist(root, best_path);
652  }
653 
654  switch (best_path->pathtype)
655  {
656  case T_SeqScan:
657  plan = (Plan *) create_seqscan_plan(root,
658  best_path,
659  tlist,
660  scan_clauses);
661  break;
662 
663  case T_SampleScan:
664  plan = (Plan *) create_samplescan_plan(root,
665  best_path,
666  tlist,
667  scan_clauses);
668  break;
669 
670  case T_IndexScan:
671  plan = (Plan *) create_indexscan_plan(root,
672  (IndexPath *) best_path,
673  tlist,
674  scan_clauses,
675  false);
676  break;
677 
678  case T_IndexOnlyScan:
679  plan = (Plan *) create_indexscan_plan(root,
680  (IndexPath *) best_path,
681  tlist,
682  scan_clauses,
683  true);
684  break;
685 
686  case T_BitmapHeapScan:
687  plan = (Plan *) create_bitmap_scan_plan(root,
688  (BitmapHeapPath *) best_path,
689  tlist,
690  scan_clauses);
691  break;
692 
693  case T_TidScan:
694  plan = (Plan *) create_tidscan_plan(root,
695  (TidPath *) best_path,
696  tlist,
697  scan_clauses);
698  break;
699 
700  case T_TidRangeScan:
701  plan = (Plan *) create_tidrangescan_plan(root,
702  (TidRangePath *) best_path,
703  tlist,
704  scan_clauses);
705  break;
706 
707  case T_SubqueryScan:
708  plan = (Plan *) create_subqueryscan_plan(root,
709  (SubqueryScanPath *) best_path,
710  tlist,
711  scan_clauses);
712  break;
713 
714  case T_FunctionScan:
715  plan = (Plan *) create_functionscan_plan(root,
716  best_path,
717  tlist,
718  scan_clauses);
719  break;
720 
721  case T_TableFuncScan:
722  plan = (Plan *) create_tablefuncscan_plan(root,
723  best_path,
724  tlist,
725  scan_clauses);
726  break;
727 
728  case T_ValuesScan:
729  plan = (Plan *) create_valuesscan_plan(root,
730  best_path,
731  tlist,
732  scan_clauses);
733  break;
734 
735  case T_CteScan:
736  plan = (Plan *) create_ctescan_plan(root,
737  best_path,
738  tlist,
739  scan_clauses);
740  break;
741 
743  plan = (Plan *) create_namedtuplestorescan_plan(root,
744  best_path,
745  tlist,
746  scan_clauses);
747  break;
748 
749  case T_Result:
750  plan = (Plan *) create_resultscan_plan(root,
751  best_path,
752  tlist,
753  scan_clauses);
754  break;
755 
756  case T_WorkTableScan:
757  plan = (Plan *) create_worktablescan_plan(root,
758  best_path,
759  tlist,
760  scan_clauses);
761  break;
762 
763  case T_ForeignScan:
764  plan = (Plan *) create_foreignscan_plan(root,
765  (ForeignPath *) best_path,
766  tlist,
767  scan_clauses);
768  break;
769 
770  case T_CustomScan:
771  plan = (Plan *) create_customscan_plan(root,
772  (CustomPath *) best_path,
773  tlist,
774  scan_clauses);
775  break;
776 
777  default:
778  elog(ERROR, "unrecognized node type: %d",
779  (int) best_path->pathtype);
780  plan = NULL; /* keep compiler quiet */
781  break;
782  }
783 
784  /*
785  * If there are any pseudoconstant clauses attached to this node, insert a
786  * gating Result node that evaluates the pseudoconstants as one-time
787  * quals.
788  */
789  if (gating_clauses)
790  plan = create_gating_plan(root, best_path, plan, gating_clauses);
791 
792  return plan;
793 }
794 
795 /*
796  * Build a target list (ie, a list of TargetEntry) for the Path's output.
797  *
798  * This is almost just make_tlist_from_pathtarget(), but we also have to
799  * deal with replacing nestloop params.
800  */
801 static List *
803 {
804  List *tlist = NIL;
805  Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
806  int resno = 1;
807  ListCell *v;
808 
809  foreach(v, path->pathtarget->exprs)
810  {
811  Node *node = (Node *) lfirst(v);
812  TargetEntry *tle;
813 
814  /*
815  * If it's a parameterized path, there might be lateral references in
816  * the tlist, which need to be replaced with Params. There's no need
817  * to remake the TargetEntry nodes, so apply this to each list item
818  * separately.
819  */
820  if (path->param_info)
821  node = replace_nestloop_params(root, node);
822 
823  tle = makeTargetEntry((Expr *) node,
824  resno,
825  NULL,
826  false);
827  if (sortgrouprefs)
828  tle->ressortgroupref = sortgrouprefs[resno - 1];
829 
830  tlist = lappend(tlist, tle);
831  resno++;
832  }
833  return tlist;
834 }
835 
836 /*
837  * use_physical_tlist
838  * Decide whether to use a tlist matching relation structure,
839  * rather than only those Vars actually referenced.
840  */
841 static bool
842 use_physical_tlist(PlannerInfo *root, Path *path, int flags)
843 {
844  RelOptInfo *rel = path->parent;
845  int i;
846  ListCell *lc;
847 
848  /*
849  * Forget it if either exact tlist or small tlist is demanded.
850  */
851  if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
852  return false;
853 
854  /*
855  * We can do this for real relation scans, subquery scans, function scans,
856  * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
857  */
858  if (rel->rtekind != RTE_RELATION &&
859  rel->rtekind != RTE_SUBQUERY &&
860  rel->rtekind != RTE_FUNCTION &&
861  rel->rtekind != RTE_TABLEFUNC &&
862  rel->rtekind != RTE_VALUES &&
863  rel->rtekind != RTE_CTE)
864  return false;
865 
866  /*
867  * Can't do it with inheritance cases either (mainly because Append
868  * doesn't project; this test may be unnecessary now that
869  * create_append_plan instructs its children to return an exact tlist).
870  */
871  if (rel->reloptkind != RELOPT_BASEREL)
872  return false;
873 
874  /*
875  * Also, don't do it to a CustomPath; the premise that we're extracting
876  * columns from a simple physical tuple is unlikely to hold for those.
877  * (When it does make sense, the custom path creator can set up the path's
878  * pathtarget that way.)
879  */
880  if (IsA(path, CustomPath))
881  return false;
882 
883  /*
884  * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
885  * executor to skip heap page fetches, and in any case, the benefit of
886  * using a physical tlist instead would be minimal.
887  */
888  if (IsA(path, BitmapHeapPath) &&
889  path->pathtarget->exprs == NIL)
890  return false;
891 
892  /*
893  * Can't do it if any system columns or whole-row Vars are requested.
894  * (This could possibly be fixed but would take some fragile assumptions
895  * in setrefs.c, I think.)
896  */
897  for (i = rel->min_attr; i <= 0; i++)
898  {
899  if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
900  return false;
901  }
902 
903  /*
904  * Can't do it if the rel is required to emit any placeholder expressions,
905  * either.
906  */
907  foreach(lc, root->placeholder_list)
908  {
909  PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
910 
911  if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
912  bms_is_subset(phinfo->ph_eval_at, rel->relids))
913  return false;
914  }
915 
916  /*
917  * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
918  * to emit any sort/group columns that are not simple Vars. (If they are
919  * simple Vars, they should appear in the physical tlist, and
920  * apply_pathtarget_labeling_to_tlist will take care of getting them
921  * labeled again.) We also have to check that no two sort/group columns
922  * are the same Var, else that element of the physical tlist would need
923  * conflicting ressortgroupref labels.
924  */
925  if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
926  {
927  Bitmapset *sortgroupatts = NULL;
928 
929  i = 0;
930  foreach(lc, path->pathtarget->exprs)
931  {
932  Expr *expr = (Expr *) lfirst(lc);
933 
934  if (path->pathtarget->sortgrouprefs[i])
935  {
936  if (expr && IsA(expr, Var))
937  {
938  int attno = ((Var *) expr)->varattno;
939 
941  if (bms_is_member(attno, sortgroupatts))
942  return false;
943  sortgroupatts = bms_add_member(sortgroupatts, attno);
944  }
945  else
946  return false;
947  }
948  i++;
949  }
950  }
951 
952  return true;
953 }
954 
955 /*
956  * get_gating_quals
957  * See if there are pseudoconstant quals in a node's quals list
958  *
959  * If the node's quals list includes any pseudoconstant quals,
960  * return just those quals.
961  */
962 static List *
964 {
965  /* No need to look if we know there are no pseudoconstants */
966  if (!root->hasPseudoConstantQuals)
967  return NIL;
968 
969  /* Sort into desirable execution order while still in RestrictInfo form */
970  quals = order_qual_clauses(root, quals);
971 
972  /* Pull out any pseudoconstant quals from the RestrictInfo list */
973  return extract_actual_clauses(quals, true);
974 }
975 
976 /*
977  * create_gating_plan
978  * Deal with pseudoconstant qual clauses
979  *
980  * Add a gating Result node atop the already-built plan.
981  */
982 static Plan *
984  List *gating_quals)
985 {
986  Plan *gplan;
987  Plan *splan;
988 
989  Assert(gating_quals);
990 
991  /*
992  * We might have a trivial Result plan already. Stacking one Result atop
993  * another is silly, so if that applies, just discard the input plan.
994  * (We're assuming its targetlist is uninteresting; it should be either
995  * the same as the result of build_path_tlist, or a simplified version.)
996  */
997  splan = plan;
998  if (IsA(plan, Result))
999  {
1000  Result *rplan = (Result *) plan;
1001 
1002  if (rplan->plan.lefttree == NULL &&
1003  rplan->resconstantqual == NULL)
1004  splan = NULL;
1005  }
1006 
1007  /*
1008  * Since we need a Result node anyway, always return the path's requested
1009  * tlist; that's never a wrong choice, even if the parent node didn't ask
1010  * for CP_EXACT_TLIST.
1011  */
1012  gplan = (Plan *) make_result(build_path_tlist(root, path),
1013  (Node *) gating_quals,
1014  splan);
1015 
1016  /*
1017  * Notice that we don't change cost or size estimates when doing gating.
1018  * The costs of qual eval were already included in the subplan's cost.
1019  * Leaving the size alone amounts to assuming that the gating qual will
1020  * succeed, which is the conservative estimate for planning upper queries.
1021  * We certainly don't want to assume the output size is zero (unless the
1022  * gating qual is actually constant FALSE, and that case is dealt with in
1023  * clausesel.c). Interpolating between the two cases is silly, because it
1024  * doesn't reflect what will really happen at runtime, and besides which
1025  * in most cases we have only a very bad idea of the probability of the
1026  * gating qual being true.
1027  */
1028  copy_plan_costsize(gplan, plan);
1029 
1030  /* Gating quals could be unsafe, so better use the Path's safety flag */
1031  gplan->parallel_safe = path->parallel_safe;
1032 
1033  return gplan;
1034 }
1035 
1036 /*
1037  * create_join_plan
1038  * Create a join plan for 'best_path' and (recursively) plans for its
1039  * inner and outer paths.
1040  */
1041 static Plan *
1043 {
1044  Plan *plan;
1045  List *gating_clauses;
1046 
1047  switch (best_path->path.pathtype)
1048  {
1049  case T_MergeJoin:
1050  plan = (Plan *) create_mergejoin_plan(root,
1051  (MergePath *) best_path);
1052  break;
1053  case T_HashJoin:
1054  plan = (Plan *) create_hashjoin_plan(root,
1055  (HashPath *) best_path);
1056  break;
1057  case T_NestLoop:
1058  plan = (Plan *) create_nestloop_plan(root,
1059  (NestPath *) best_path);
1060  break;
1061  default:
1062  elog(ERROR, "unrecognized node type: %d",
1063  (int) best_path->path.pathtype);
1064  plan = NULL; /* keep compiler quiet */
1065  break;
1066  }
1067 
1068  /*
1069  * If there are any pseudoconstant clauses attached to this node, insert a
1070  * gating Result node that evaluates the pseudoconstants as one-time
1071  * quals.
1072  */
1073  gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
1074  if (gating_clauses)
1075  plan = create_gating_plan(root, (Path *) best_path, plan,
1076  gating_clauses);
1077 
1078 #ifdef NOT_USED
1079 
1080  /*
1081  * * Expensive function pullups may have pulled local predicates * into
1082  * this path node. Put them in the qpqual of the plan node. * JMH,
1083  * 6/15/92
1084  */
1085  if (get_loc_restrictinfo(best_path) != NIL)
1086  set_qpqual((Plan) plan,
1087  list_concat(get_qpqual((Plan) plan),
1088  get_actual_clauses(get_loc_restrictinfo(best_path))));
1089 #endif
1090 
1091  return plan;
1092 }
1093 
1094 /*
1095  * is_async_capable_path
1096  * Check whether a given Path node is async-capable.
1097  */
1098 static bool
1100 {
1101  switch (nodeTag(path))
1102  {
1103  case T_ForeignPath:
1104  {
1105  FdwRoutine *fdwroutine = path->parent->fdwroutine;
1106 
1107  Assert(fdwroutine != NULL);
1108  if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
1109  fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
1110  return true;
1111  }
1112  break;
1113  default:
1114  break;
1115  }
1116  return false;
1117 }
1118 
1119 /*
1120  * create_append_plan
1121  * Create an Append plan for 'best_path' and (recursively) plans
1122  * for its subpaths.
1123  *
1124  * Returns a Plan node.
1125  */
1126 static Plan *
1127 create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
1128 {
1129  Append *plan;
1130  List *tlist = build_path_tlist(root, &best_path->path);
1131  int orig_tlist_length = list_length(tlist);
1132  bool tlist_was_changed = false;
1133  List *pathkeys = best_path->path.pathkeys;
1134  List *subplans = NIL;
1135  ListCell *subpaths;
1136  int nasyncplans = 0;
1137  RelOptInfo *rel = best_path->path.parent;
1138  PartitionPruneInfo *partpruneinfo = NULL;
1139  int nodenumsortkeys = 0;
1140  AttrNumber *nodeSortColIdx = NULL;
1141  Oid *nodeSortOperators = NULL;
1142  Oid *nodeCollations = NULL;
1143  bool *nodeNullsFirst = NULL;
1144  bool consider_async = false;
1145 
1146  /*
1147  * The subpaths list could be empty, if every child was proven empty by
1148  * constraint exclusion. In that case generate a dummy plan that returns
1149  * no rows.
1150  *
1151  * Note that an AppendPath with no members is also generated in certain
1152  * cases where there was no appending construct at all, but we know the
1153  * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
1154  */
1155  if (best_path->subpaths == NIL)
1156  {
1157  /* Generate a Result plan with constant-FALSE gating qual */
1158  Plan *plan;
1159 
1160  plan = (Plan *) make_result(tlist,
1161  (Node *) list_make1(makeBoolConst(false,
1162  false)),
1163  NULL);
1164 
1165  copy_generic_path_info(plan, (Path *) best_path);
1166 
1167  return plan;
1168  }
1169 
1170  /*
1171  * Otherwise build an Append plan. Note that if there's just one child,
1172  * the Append is pretty useless; but we wait till setrefs.c to get rid of
1173  * it. Doing so here doesn't work because the varno of the child scan
1174  * plan won't match the parent-rel Vars it'll be asked to emit.
1175  *
1176  * We don't have the actual creation of the Append node split out into a
1177  * separate make_xxx function. This is because we want to run
1178  * prepare_sort_from_pathkeys on it before we do so on the individual
1179  * child plans, to make cross-checking the sort info easier.
1180  */
1181  plan = makeNode(Append);
1182  plan->plan.targetlist = tlist;
1183  plan->plan.qual = NIL;
1184  plan->plan.lefttree = NULL;
1185  plan->plan.righttree = NULL;
1186  plan->apprelids = rel->relids;
1187 
1188  if (pathkeys != NIL)
1189  {
1190  /*
1191  * Compute sort column info, and adjust the Append's tlist as needed.
1192  * Because we pass adjust_tlist_in_place = true, we may ignore the
1193  * function result; it must be the same plan node. However, we then
1194  * need to detect whether any tlist entries were added.
1195  */
1196  (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
1197  best_path->path.parent->relids,
1198  NULL,
1199  true,
1200  &nodenumsortkeys,
1201  &nodeSortColIdx,
1202  &nodeSortOperators,
1203  &nodeCollations,
1204  &nodeNullsFirst);
1205  tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
1206  }
1207 
1208  /* If appropriate, consider async append */
1209  consider_async = (enable_async_append && pathkeys == NIL &&
1210  !best_path->path.parallel_safe &&
1211  list_length(best_path->subpaths) > 1);
1212 
1213  /* Build the plan for each child */
1214  foreach(subpaths, best_path->subpaths)
1215  {
1216  Path *subpath = (Path *) lfirst(subpaths);
1217  Plan *subplan;
1218 
1219  /* Must insist that all children return the same tlist */
1220  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1221 
1222  /*
1223  * For ordered Appends, we must insert a Sort node if subplan isn't
1224  * sufficiently ordered.
1225  */
1226  if (pathkeys != NIL)
1227  {
1228  int numsortkeys;
1229  AttrNumber *sortColIdx;
1230  Oid *sortOperators;
1231  Oid *collations;
1232  bool *nullsFirst;
1233 
1234  /*
1235  * Compute sort column info, and adjust subplan's tlist as needed.
1236  * We must apply prepare_sort_from_pathkeys even to subplans that
1237  * don't need an explicit sort, to make sure they are returning
1238  * the same sort key columns the Append expects.
1239  */
1240  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1241  subpath->parent->relids,
1242  nodeSortColIdx,
1243  false,
1244  &numsortkeys,
1245  &sortColIdx,
1246  &sortOperators,
1247  &collations,
1248  &nullsFirst);
1249 
1250  /*
1251  * Check that we got the same sort key information. We just
1252  * Assert that the sortops match, since those depend only on the
1253  * pathkeys; but it seems like a good idea to check the sort
1254  * column numbers explicitly, to ensure the tlists match up.
1255  */
1256  Assert(numsortkeys == nodenumsortkeys);
1257  if (memcmp(sortColIdx, nodeSortColIdx,
1258  numsortkeys * sizeof(AttrNumber)) != 0)
1259  elog(ERROR, "Append child's targetlist doesn't match Append");
1260  Assert(memcmp(sortOperators, nodeSortOperators,
1261  numsortkeys * sizeof(Oid)) == 0);
1262  Assert(memcmp(collations, nodeCollations,
1263  numsortkeys * sizeof(Oid)) == 0);
1264  Assert(memcmp(nullsFirst, nodeNullsFirst,
1265  numsortkeys * sizeof(bool)) == 0);
1266 
1267  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1268  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1269  {
1270  Sort *sort = make_sort(subplan, numsortkeys,
1271  sortColIdx, sortOperators,
1272  collations, nullsFirst);
1273 
1274  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1275  subplan = (Plan *) sort;
1276  }
1277  }
1278 
1279  subplans = lappend(subplans, subplan);
1280 
1281  /* Check to see if subplan can be executed asynchronously */
1282  if (consider_async && is_async_capable_path(subpath))
1283  {
1284  subplan->async_capable = true;
1285  ++nasyncplans;
1286  }
1287  }
1288 
1289  /*
1290  * If any quals exist, they may be useful to perform further partition
1291  * pruning during execution. Gather information needed by the executor to
1292  * do partition pruning.
1293  */
1295  {
1296  List *prunequal;
1297 
1298  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1299 
1300  if (best_path->path.param_info)
1301  {
1302  List *prmquals = best_path->path.param_info->ppi_clauses;
1303 
1304  prmquals = extract_actual_clauses(prmquals, false);
1305  prmquals = (List *) replace_nestloop_params(root,
1306  (Node *) prmquals);
1307 
1308  prunequal = list_concat(prunequal, prmquals);
1309  }
1310 
1311  if (prunequal != NIL)
1312  partpruneinfo =
1313  make_partition_pruneinfo(root, rel,
1314  best_path->subpaths,
1315  prunequal);
1316  }
1317 
1318  plan->appendplans = subplans;
1319  plan->nasyncplans = nasyncplans;
1320  plan->first_partial_plan = best_path->first_partial_path;
1321  plan->part_prune_info = partpruneinfo;
1322 
1323  copy_generic_path_info(&plan->plan, (Path *) best_path);
1324 
1325  /*
1326  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1327  * produce either the exact tlist or a narrow tlist, we should get rid of
1328  * the sort columns again. We must inject a projection node to do so.
1329  */
1330  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1331  {
1332  tlist = list_truncate(list_copy(plan->plan.targetlist),
1333  orig_tlist_length);
1334  return inject_projection_plan((Plan *) plan, tlist,
1335  plan->plan.parallel_safe);
1336  }
1337  else
1338  return (Plan *) plan;
1339 }
1340 
1341 /*
1342  * create_merge_append_plan
1343  * Create a MergeAppend plan for 'best_path' and (recursively) plans
1344  * for its subpaths.
1345  *
1346  * Returns a Plan node.
1347  */
1348 static Plan *
1350  int flags)
1351 {
1352  MergeAppend *node = makeNode(MergeAppend);
1353  Plan *plan = &node->plan;
1354  List *tlist = build_path_tlist(root, &best_path->path);
1355  int orig_tlist_length = list_length(tlist);
1356  bool tlist_was_changed;
1357  List *pathkeys = best_path->path.pathkeys;
1358  List *subplans = NIL;
1359  ListCell *subpaths;
1360  RelOptInfo *rel = best_path->path.parent;
1361  PartitionPruneInfo *partpruneinfo = NULL;
1362 
1363  /*
1364  * We don't have the actual creation of the MergeAppend node split out
1365  * into a separate make_xxx function. This is because we want to run
1366  * prepare_sort_from_pathkeys on it before we do so on the individual
1367  * child plans, to make cross-checking the sort info easier.
1368  */
1369  copy_generic_path_info(plan, (Path *) best_path);
1370  plan->targetlist = tlist;
1371  plan->qual = NIL;
1372  plan->lefttree = NULL;
1373  plan->righttree = NULL;
1374  node->apprelids = rel->relids;
1375 
1376  /*
1377  * Compute sort column info, and adjust MergeAppend's tlist as needed.
1378  * Because we pass adjust_tlist_in_place = true, we may ignore the
1379  * function result; it must be the same plan node. However, we then need
1380  * to detect whether any tlist entries were added.
1381  */
1382  (void) prepare_sort_from_pathkeys(plan, pathkeys,
1383  best_path->path.parent->relids,
1384  NULL,
1385  true,
1386  &node->numCols,
1387  &node->sortColIdx,
1388  &node->sortOperators,
1389  &node->collations,
1390  &node->nullsFirst);
1391  tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
1392 
1393  /*
1394  * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
1395  * even to subplans that don't need an explicit sort, to make sure they
1396  * are returning the same sort key columns the MergeAppend expects.
1397  */
1398  foreach(subpaths, best_path->subpaths)
1399  {
1400  Path *subpath = (Path *) lfirst(subpaths);
1401  Plan *subplan;
1402  int numsortkeys;
1403  AttrNumber *sortColIdx;
1404  Oid *sortOperators;
1405  Oid *collations;
1406  bool *nullsFirst;
1407 
1408  /* Build the child plan */
1409  /* Must insist that all children return the same tlist */
1410  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1411 
1412  /* Compute sort column info, and adjust subplan's tlist as needed */
1413  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1414  subpath->parent->relids,
1415  node->sortColIdx,
1416  false,
1417  &numsortkeys,
1418  &sortColIdx,
1419  &sortOperators,
1420  &collations,
1421  &nullsFirst);
1422 
1423  /*
1424  * Check that we got the same sort key information. We just Assert
1425  * that the sortops match, since those depend only on the pathkeys;
1426  * but it seems like a good idea to check the sort column numbers
1427  * explicitly, to ensure the tlists really do match up.
1428  */
1429  Assert(numsortkeys == node->numCols);
1430  if (memcmp(sortColIdx, node->sortColIdx,
1431  numsortkeys * sizeof(AttrNumber)) != 0)
1432  elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
1433  Assert(memcmp(sortOperators, node->sortOperators,
1434  numsortkeys * sizeof(Oid)) == 0);
1435  Assert(memcmp(collations, node->collations,
1436  numsortkeys * sizeof(Oid)) == 0);
1437  Assert(memcmp(nullsFirst, node->nullsFirst,
1438  numsortkeys * sizeof(bool)) == 0);
1439 
1440  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1441  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1442  {
1443  Sort *sort = make_sort(subplan, numsortkeys,
1444  sortColIdx, sortOperators,
1445  collations, nullsFirst);
1446 
1447  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1448  subplan = (Plan *) sort;
1449  }
1450 
1451  subplans = lappend(subplans, subplan);
1452  }
1453 
1454  /*
1455  * If any quals exist, they may be useful to perform further partition
1456  * pruning during execution. Gather information needed by the executor to
1457  * do partition pruning.
1458  */
1460  {
1461  List *prunequal;
1462 
1463  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1464 
1465  if (best_path->path.param_info)
1466  {
1467  List *prmquals = best_path->path.param_info->ppi_clauses;
1468 
1469  prmquals = extract_actual_clauses(prmquals, false);
1470  prmquals = (List *) replace_nestloop_params(root,
1471  (Node *) prmquals);
1472 
1473  prunequal = list_concat(prunequal, prmquals);
1474  }
1475 
1476  if (prunequal != NIL)
1477  partpruneinfo = make_partition_pruneinfo(root, rel,
1478  best_path->subpaths,
1479  prunequal);
1480  }
1481 
1482  node->mergeplans = subplans;
1483  node->part_prune_info = partpruneinfo;
1484 
1485  /*
1486  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1487  * produce either the exact tlist or a narrow tlist, we should get rid of
1488  * the sort columns again. We must inject a projection node to do so.
1489  */
1490  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1491  {
1492  tlist = list_truncate(list_copy(plan->targetlist), orig_tlist_length);
1493  return inject_projection_plan(plan, tlist, plan->parallel_safe);
1494  }
1495  else
1496  return plan;
1497 }
1498 
1499 /*
1500  * create_group_result_plan
1501  * Create a Result plan for 'best_path'.
1502  * This is only used for degenerate grouping cases.
1503  *
1504  * Returns a Plan node.
1505  */
1506 static Result *
1508 {
1509  Result *plan;
1510  List *tlist;
1511  List *quals;
1512 
1513  tlist = build_path_tlist(root, &best_path->path);
1514 
1515  /* best_path->quals is just bare clauses */
1516  quals = order_qual_clauses(root, best_path->quals);
1517 
1518  plan = make_result(tlist, (Node *) quals, NULL);
1519 
1520  copy_generic_path_info(&plan->plan, (Path *) best_path);
1521 
1522  return plan;
1523 }
1524 
1525 /*
1526  * create_project_set_plan
1527  * Create a ProjectSet plan for 'best_path'.
1528  *
1529  * Returns a Plan node.
1530  */
1531 static ProjectSet *
1533 {
1534  ProjectSet *plan;
1535  Plan *subplan;
1536  List *tlist;
1537 
1538  /* Since we intend to project, we don't need to constrain child tlist */
1539  subplan = create_plan_recurse(root, best_path->subpath, 0);
1540 
1541  tlist = build_path_tlist(root, &best_path->path);
1542 
1543  plan = make_project_set(tlist, subplan);
1544 
1545  copy_generic_path_info(&plan->plan, (Path *) best_path);
1546 
1547  return plan;
1548 }
1549 
1550 /*
1551  * create_material_plan
1552  * Create a Material plan for 'best_path' and (recursively) plans
1553  * for its subpaths.
1554  *
1555  * Returns a Plan node.
1556  */
1557 static Material *
1558 create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
1559 {
1560  Material *plan;
1561  Plan *subplan;
1562 
1563  /*
1564  * We don't want any excess columns in the materialized tuples, so request
1565  * a smaller tlist. Otherwise, since Material doesn't project, tlist
1566  * requirements pass through.
1567  */
1568  subplan = create_plan_recurse(root, best_path->subpath,
1569  flags | CP_SMALL_TLIST);
1570 
1571  plan = make_material(subplan);
1572 
1573  copy_generic_path_info(&plan->plan, (Path *) best_path);
1574 
1575  return plan;
1576 }
1577 
1578 /*
1579  * create_memoize_plan
1580  * Create a Memoize plan for 'best_path' and (recursively) plans for its
1581  * subpaths.
1582  *
1583  * Returns a Plan node.
1584  */
1585 static Memoize *
1586 create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
1587 {
1588  Memoize *plan;
1589  Bitmapset *keyparamids;
1590  Plan *subplan;
1591  Oid *operators;
1592  Oid *collations;
1593  List *param_exprs = NIL;
1594  ListCell *lc;
1595  ListCell *lc2;
1596  int nkeys;
1597  int i;
1598 
1599  subplan = create_plan_recurse(root, best_path->subpath,
1600  flags | CP_SMALL_TLIST);
1601 
1602  param_exprs = (List *) replace_nestloop_params(root, (Node *)
1603  best_path->param_exprs);
1604 
1605  nkeys = list_length(param_exprs);
1606  Assert(nkeys > 0);
1607  operators = palloc(nkeys * sizeof(Oid));
1608  collations = palloc(nkeys * sizeof(Oid));
1609 
1610  i = 0;
1611  forboth(lc, param_exprs, lc2, best_path->hash_operators)
1612  {
1613  Expr *param_expr = (Expr *) lfirst(lc);
1614  Oid opno = lfirst_oid(lc2);
1615 
1616  operators[i] = opno;
1617  collations[i] = exprCollation((Node *) param_expr);
1618  i++;
1619  }
1620 
1621  keyparamids = pull_paramids((Expr *) param_exprs);
1622 
1623  plan = make_memoize(subplan, operators, collations, param_exprs,
1624  best_path->singlerow, best_path->binary_mode,
1625  best_path->est_entries, keyparamids);
1626 
1627  copy_generic_path_info(&plan->plan, (Path *) best_path);
1628 
1629  return plan;
1630 }
1631 
1632 /*
1633  * create_unique_plan
1634  * Create a Unique plan for 'best_path' and (recursively) plans
1635  * for its subpaths.
1636  *
1637  * Returns a Plan node.
1638  */
1639 static Plan *
1640 create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
1641 {
1642  Plan *plan;
1643  Plan *subplan;
1644  List *in_operators;
1645  List *uniq_exprs;
1646  List *newtlist;
1647  int nextresno;
1648  bool newitems;
1649  int numGroupCols;
1650  AttrNumber *groupColIdx;
1651  Oid *groupCollations;
1652  int groupColPos;
1653  ListCell *l;
1654 
1655  /* Unique doesn't project, so tlist requirements pass through */
1656  subplan = create_plan_recurse(root, best_path->subpath, flags);
1657 
1658  /* Done if we don't need to do any actual unique-ifying */
1659  if (best_path->umethod == UNIQUE_PATH_NOOP)
1660  return subplan;
1661 
1662  /*
1663  * As constructed, the subplan has a "flat" tlist containing just the Vars
1664  * needed here and at upper levels. The values we are supposed to
1665  * unique-ify may be expressions in these variables. We have to add any
1666  * such expressions to the subplan's tlist.
1667  *
1668  * The subplan may have a "physical" tlist if it is a simple scan plan. If
1669  * we're going to sort, this should be reduced to the regular tlist, so
1670  * that we don't sort more data than we need to. For hashing, the tlist
1671  * should be left as-is if we don't need to add any expressions; but if we
1672  * do have to add expressions, then a projection step will be needed at
1673  * runtime anyway, so we may as well remove unneeded items. Therefore
1674  * newtlist starts from build_path_tlist() not just a copy of the
1675  * subplan's tlist; and we don't install it into the subplan unless we are
1676  * sorting or stuff has to be added.
1677  */
1678  in_operators = best_path->in_operators;
1679  uniq_exprs = best_path->uniq_exprs;
1680 
1681  /* initialize modified subplan tlist as just the "required" vars */
1682  newtlist = build_path_tlist(root, &best_path->path);
1683  nextresno = list_length(newtlist) + 1;
1684  newitems = false;
1685 
1686  foreach(l, uniq_exprs)
1687  {
1688  Expr *uniqexpr = lfirst(l);
1689  TargetEntry *tle;
1690 
1691  tle = tlist_member(uniqexpr, newtlist);
1692  if (!tle)
1693  {
1694  tle = makeTargetEntry((Expr *) uniqexpr,
1695  nextresno,
1696  NULL,
1697  false);
1698  newtlist = lappend(newtlist, tle);
1699  nextresno++;
1700  newitems = true;
1701  }
1702  }
1703 
1704  /* Use change_plan_targetlist in case we need to insert a Result node */
1705  if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
1706  subplan = change_plan_targetlist(subplan, newtlist,
1707  best_path->path.parallel_safe);
1708 
1709  /*
1710  * Build control information showing which subplan output columns are to
1711  * be examined by the grouping step. Unfortunately we can't merge this
1712  * with the previous loop, since we didn't then know which version of the
1713  * subplan tlist we'd end up using.
1714  */
1715  newtlist = subplan->targetlist;
1716  numGroupCols = list_length(uniq_exprs);
1717  groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
1718  groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
1719 
1720  groupColPos = 0;
1721  foreach(l, uniq_exprs)
1722  {
1723  Expr *uniqexpr = lfirst(l);
1724  TargetEntry *tle;
1725 
1726  tle = tlist_member(uniqexpr, newtlist);
1727  if (!tle) /* shouldn't happen */
1728  elog(ERROR, "failed to find unique expression in subplan tlist");
1729  groupColIdx[groupColPos] = tle->resno;
1730  groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
1731  groupColPos++;
1732  }
1733 
1734  if (best_path->umethod == UNIQUE_PATH_HASH)
1735  {
1736  Oid *groupOperators;
1737 
1738  /*
1739  * Get the hashable equality operators for the Agg node to use.
1740  * Normally these are the same as the IN clause operators, but if
1741  * those are cross-type operators then the equality operators are the
1742  * ones for the IN clause operators' RHS datatype.
1743  */
1744  groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
1745  groupColPos = 0;
1746  foreach(l, in_operators)
1747  {
1748  Oid in_oper = lfirst_oid(l);
1749  Oid eq_oper;
1750 
1751  if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
1752  elog(ERROR, "could not find compatible hash operator for operator %u",
1753  in_oper);
1754  groupOperators[groupColPos++] = eq_oper;
1755  }
1756 
1757  /*
1758  * Since the Agg node is going to project anyway, we can give it the
1759  * minimum output tlist, without any stuff we might have added to the
1760  * subplan tlist.
1761  */
1762  plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
1763  NIL,
1764  AGG_HASHED,
1766  numGroupCols,
1767  groupColIdx,
1768  groupOperators,
1769  groupCollations,
1770  NIL,
1771  NIL,
1772  best_path->path.rows,
1773  0,
1774  subplan);
1775  }
1776  else
1777  {
1778  List *sortList = NIL;
1779  Sort *sort;
1780 
1781  /* Create an ORDER BY list to sort the input compatibly */
1782  groupColPos = 0;
1783  foreach(l, in_operators)
1784  {
1785  Oid in_oper = lfirst_oid(l);
1786  Oid sortop;
1787  Oid eqop;
1788  TargetEntry *tle;
1789  SortGroupClause *sortcl;
1790 
1791  sortop = get_ordering_op_for_equality_op(in_oper, false);
1792  if (!OidIsValid(sortop)) /* shouldn't happen */
1793  elog(ERROR, "could not find ordering operator for equality operator %u",
1794  in_oper);
1795 
1796  /*
1797  * The Unique node will need equality operators. Normally these
1798  * are the same as the IN clause operators, but if those are
1799  * cross-type operators then the equality operators are the ones
1800  * for the IN clause operators' RHS datatype.
1801  */
1802  eqop = get_equality_op_for_ordering_op(sortop, NULL);
1803  if (!OidIsValid(eqop)) /* shouldn't happen */
1804  elog(ERROR, "could not find equality operator for ordering operator %u",
1805  sortop);
1806 
1807  tle = get_tle_by_resno(subplan->targetlist,
1808  groupColIdx[groupColPos]);
1809  Assert(tle != NULL);
1810 
1811  sortcl = makeNode(SortGroupClause);
1812  sortcl->tleSortGroupRef = assignSortGroupRef(tle,
1813  subplan->targetlist);
1814  sortcl->eqop = eqop;
1815  sortcl->sortop = sortop;
1816  sortcl->nulls_first = false;
1817  sortcl->hashable = false; /* no need to make this accurate */
1818  sortList = lappend(sortList, sortcl);
1819  groupColPos++;
1820  }
1821  sort = make_sort_from_sortclauses(sortList, subplan);
1822  label_sort_with_costsize(root, sort, -1.0);
1823  plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
1824  }
1825 
1826  /* Copy cost data from Path to Plan */
1827  copy_generic_path_info(plan, &best_path->path);
1828 
1829  return plan;
1830 }
1831 
1832 /*
1833  * create_gather_plan
1834  *
1835  * Create a Gather plan for 'best_path' and (recursively) plans
1836  * for its subpaths.
1837  */
1838 static Gather *
1840 {
1841  Gather *gather_plan;
1842  Plan *subplan;
1843  List *tlist;
1844 
1845  /*
1846  * Push projection down to the child node. That way, the projection work
1847  * is parallelized, and there can be no system columns in the result (they
1848  * can't travel through a tuple queue because it uses MinimalTuple
1849  * representation).
1850  */
1851  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1852 
1853  tlist = build_path_tlist(root, &best_path->path);
1854 
1855  gather_plan = make_gather(tlist,
1856  NIL,
1857  best_path->num_workers,
1859  best_path->single_copy,
1860  subplan);
1861 
1862  copy_generic_path_info(&gather_plan->plan, &best_path->path);
1863 
1864  /* use parallel mode for parallel plans. */
1865  root->glob->parallelModeNeeded = true;
1866 
1867  return gather_plan;
1868 }
1869 
1870 /*
1871  * create_gather_merge_plan
1872  *
1873  * Create a Gather Merge plan for 'best_path' and (recursively)
1874  * plans for its subpaths.
1875  */
1876 static GatherMerge *
1878 {
1879  GatherMerge *gm_plan;
1880  Plan *subplan;
1881  List *pathkeys = best_path->path.pathkeys;
1882  List *tlist = build_path_tlist(root, &best_path->path);
1883 
1884  /* As with Gather, project away columns in the workers. */
1885  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1886 
1887  /* Create a shell for a GatherMerge plan. */
1888  gm_plan = makeNode(GatherMerge);
1889  gm_plan->plan.targetlist = tlist;
1890  gm_plan->num_workers = best_path->num_workers;
1891  copy_generic_path_info(&gm_plan->plan, &best_path->path);
1892 
1893  /* Assign the rescan Param. */
1894  gm_plan->rescan_param = assign_special_exec_param(root);
1895 
1896  /* Gather Merge is pointless with no pathkeys; use Gather instead. */
1897  Assert(pathkeys != NIL);
1898 
1899  /* Compute sort column info, and adjust subplan's tlist as needed */
1900  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1901  best_path->subpath->parent->relids,
1902  gm_plan->sortColIdx,
1903  false,
1904  &gm_plan->numCols,
1905  &gm_plan->sortColIdx,
1906  &gm_plan->sortOperators,
1907  &gm_plan->collations,
1908  &gm_plan->nullsFirst);
1909 
1910 
1911  /*
1912  * All gather merge paths should have already guaranteed the necessary
1913  * sort order either by adding an explicit sort node or by using presorted
1914  * input. We can't simply add a sort here on additional pathkeys, because
1915  * we can't guarantee the sort would be safe. For example, expressions may
1916  * be volatile or otherwise parallel unsafe.
1917  */
1918  if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
1919  elog(ERROR, "gather merge input not sufficiently sorted");
1920 
1921  /* Now insert the subplan under GatherMerge. */
1922  gm_plan->plan.lefttree = subplan;
1923 
1924  /* use parallel mode for parallel plans. */
1925  root->glob->parallelModeNeeded = true;
1926 
1927  return gm_plan;
1928 }
1929 
1930 /*
1931  * create_projection_plan
1932  *
1933  * Create a plan tree to do a projection step and (recursively) plans
1934  * for its subpaths. We may need a Result node for the projection,
1935  * but sometimes we can just let the subplan do the work.
1936  */
1937 static Plan *
1939 {
1940  Plan *plan;
1941  Plan *subplan;
1942  List *tlist;
1943  bool needs_result_node = false;
1944 
1945  /*
1946  * Convert our subpath to a Plan and determine whether we need a Result
1947  * node.
1948  *
1949  * In most cases where we don't need to project, creation_projection_path
1950  * will have set dummypp, but not always. First, some createplan.c
1951  * routines change the tlists of their nodes. (An example is that
1952  * create_merge_append_plan might add resjunk sort columns to a
1953  * MergeAppend.) Second, create_projection_path has no way of knowing
1954  * what path node will be placed on top of the projection path and
1955  * therefore can't predict whether it will require an exact tlist. For
1956  * both of these reasons, we have to recheck here.
1957  */
1958  if (use_physical_tlist(root, &best_path->path, flags))
1959  {
1960  /*
1961  * Our caller doesn't really care what tlist we return, so we don't
1962  * actually need to project. However, we may still need to ensure
1963  * proper sortgroupref labels, if the caller cares about those.
1964  */
1965  subplan = create_plan_recurse(root, best_path->subpath, 0);
1966  tlist = subplan->targetlist;
1967  if (flags & CP_LABEL_TLIST)
1969  best_path->path.pathtarget);
1970  }
1971  else if (is_projection_capable_path(best_path->subpath))
1972  {
1973  /*
1974  * Our caller requires that we return the exact tlist, but no separate
1975  * result node is needed because the subpath is projection-capable.
1976  * Tell create_plan_recurse that we're going to ignore the tlist it
1977  * produces.
1978  */
1979  subplan = create_plan_recurse(root, best_path->subpath,
1980  CP_IGNORE_TLIST);
1982  tlist = build_path_tlist(root, &best_path->path);
1983  }
1984  else
1985  {
1986  /*
1987  * It looks like we need a result node, unless by good fortune the
1988  * requested tlist is exactly the one the child wants to produce.
1989  */
1990  subplan = create_plan_recurse(root, best_path->subpath, 0);
1991  tlist = build_path_tlist(root, &best_path->path);
1992  needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
1993  }
1994 
1995  /*
1996  * If we make a different decision about whether to include a Result node
1997  * than create_projection_path did, we'll have made slightly wrong cost
1998  * estimates; but label the plan with the cost estimates we actually used,
1999  * not "corrected" ones. (XXX this could be cleaned up if we moved more
2000  * of the sortcolumn setup logic into Path creation, but that would add
2001  * expense to creating Paths we might end up not using.)
2002  */
2003  if (!needs_result_node)
2004  {
2005  /* Don't need a separate Result, just assign tlist to subplan */
2006  plan = subplan;
2007  plan->targetlist = tlist;
2008 
2009  /* Label plan with the estimated costs we actually used */
2010  plan->startup_cost = best_path->path.startup_cost;
2011  plan->total_cost = best_path->path.total_cost;
2012  plan->plan_rows = best_path->path.rows;
2013  plan->plan_width = best_path->path.pathtarget->width;
2014  plan->parallel_safe = best_path->path.parallel_safe;
2015  /* ... but don't change subplan's parallel_aware flag */
2016  }
2017  else
2018  {
2019  /* We need a Result node */
2020  plan = (Plan *) make_result(tlist, NULL, subplan);
2021 
2022  copy_generic_path_info(plan, (Path *) best_path);
2023  }
2024 
2025  return plan;
2026 }
2027 
2028 /*
2029  * inject_projection_plan
2030  * Insert a Result node to do a projection step.
2031  *
2032  * This is used in a few places where we decide on-the-fly that we need a
2033  * projection step as part of the tree generated for some Path node.
2034  * We should try to get rid of this in favor of doing it more honestly.
2035  *
2036  * One reason it's ugly is we have to be told the right parallel_safe marking
2037  * to apply (since the tlist might be unsafe even if the child plan is safe).
2038  */
2039 static Plan *
2040 inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
2041 {
2042  Plan *plan;
2043 
2044  plan = (Plan *) make_result(tlist, NULL, subplan);
2045 
2046  /*
2047  * In principle, we should charge tlist eval cost plus cpu_per_tuple per
2048  * row for the Result node. But the former has probably been factored in
2049  * already and the latter was not accounted for during Path construction,
2050  * so being formally correct might just make the EXPLAIN output look less
2051  * consistent not more so. Hence, just copy the subplan's cost.
2052  */
2053  copy_plan_costsize(plan, subplan);
2054  plan->parallel_safe = parallel_safe;
2055 
2056  return plan;
2057 }
2058 
2059 /*
2060  * change_plan_targetlist
2061  * Externally available wrapper for inject_projection_plan.
2062  *
2063  * This is meant for use by FDW plan-generation functions, which might
2064  * want to adjust the tlist computed by some subplan tree. In general,
2065  * a Result node is needed to compute the new tlist, but we can optimize
2066  * some cases.
2067  *
2068  * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
2069  * flag of the FDW's own Path node.
2070  */
2071 Plan *
2072 change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
2073 {
2074  /*
2075  * If the top plan node can't do projections and its existing target list
2076  * isn't already what we need, we need to add a Result node to help it
2077  * along.
2078  */
2079  if (!is_projection_capable_plan(subplan) &&
2080  !tlist_same_exprs(tlist, subplan->targetlist))
2081  subplan = inject_projection_plan(subplan, tlist,
2082  subplan->parallel_safe &&
2083  tlist_parallel_safe);
2084  else
2085  {
2086  /* Else we can just replace the plan node's tlist */
2087  subplan->targetlist = tlist;
2088  subplan->parallel_safe &= tlist_parallel_safe;
2089  }
2090  return subplan;
2091 }
2092 
2093 /*
2094  * create_sort_plan
2095  *
2096  * Create a Sort plan for 'best_path' and (recursively) plans
2097  * for its subpaths.
2098  */
2099 static Sort *
2100 create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
2101 {
2102  Sort *plan;
2103  Plan *subplan;
2104 
2105  /*
2106  * We don't want any excess columns in the sorted tuples, so request a
2107  * smaller tlist. Otherwise, since Sort doesn't project, tlist
2108  * requirements pass through.
2109  */
2110  subplan = create_plan_recurse(root, best_path->subpath,
2111  flags | CP_SMALL_TLIST);
2112 
2113  /*
2114  * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
2115  * which will ignore any child EC members that don't belong to the given
2116  * relids. Thus, if this sort path is based on a child relation, we must
2117  * pass its relids.
2118  */
2119  plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
2120  IS_OTHER_REL(best_path->subpath->parent) ?
2121  best_path->path.parent->relids : NULL);
2122 
2123  copy_generic_path_info(&plan->plan, (Path *) best_path);
2124 
2125  return plan;
2126 }
2127 
2128 /*
2129  * create_incrementalsort_plan
2130  *
2131  * Do the same as create_sort_plan, but create IncrementalSort plan.
2132  */
2133 static IncrementalSort *
2135  int flags)
2136 {
2137  IncrementalSort *plan;
2138  Plan *subplan;
2139 
2140  /* See comments in create_sort_plan() above */
2141  subplan = create_plan_recurse(root, best_path->spath.subpath,
2142  flags | CP_SMALL_TLIST);
2143  plan = make_incrementalsort_from_pathkeys(subplan,
2144  best_path->spath.path.pathkeys,
2145  IS_OTHER_REL(best_path->spath.subpath->parent) ?
2146  best_path->spath.path.parent->relids : NULL,
2147  best_path->nPresortedCols);
2148 
2149  copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
2150 
2151  return plan;
2152 }
2153 
2154 /*
2155  * create_group_plan
2156  *
2157  * Create a Group plan for 'best_path' and (recursively) plans
2158  * for its subpaths.
2159  */
2160 static Group *
2162 {
2163  Group *plan;
2164  Plan *subplan;
2165  List *tlist;
2166  List *quals;
2167 
2168  /*
2169  * Group can project, so no need to be terribly picky about child tlist,
2170  * but we do need grouping columns to be available
2171  */
2172  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2173 
2174  tlist = build_path_tlist(root, &best_path->path);
2175 
2176  quals = order_qual_clauses(root, best_path->qual);
2177 
2178  plan = make_group(tlist,
2179  quals,
2180  list_length(best_path->groupClause),
2182  subplan->targetlist),
2183  extract_grouping_ops(best_path->groupClause),
2185  subplan->targetlist),
2186  subplan);
2187 
2188  copy_generic_path_info(&plan->plan, (Path *) best_path);
2189 
2190  return plan;
2191 }
2192 
2193 /*
2194  * create_upper_unique_plan
2195  *
2196  * Create a Unique plan for 'best_path' and (recursively) plans
2197  * for its subpaths.
2198  */
2199 static Unique *
2201 {
2202  Unique *plan;
2203  Plan *subplan;
2204 
2205  /*
2206  * Unique doesn't project, so tlist requirements pass through; moreover we
2207  * need grouping columns to be labeled.
2208  */
2209  subplan = create_plan_recurse(root, best_path->subpath,
2210  flags | CP_LABEL_TLIST);
2211 
2212  plan = make_unique_from_pathkeys(subplan,
2213  best_path->path.pathkeys,
2214  best_path->numkeys);
2215 
2216  copy_generic_path_info(&plan->plan, (Path *) best_path);
2217 
2218  return plan;
2219 }
2220 
2221 /*
2222  * create_agg_plan
2223  *
2224  * Create an Agg plan for 'best_path' and (recursively) plans
2225  * for its subpaths.
2226  */
2227 static Agg *
2229 {
2230  Agg *plan;
2231  Plan *subplan;
2232  List *tlist;
2233  List *quals;
2234 
2235  /*
2236  * Agg can project, so no need to be terribly picky about child tlist, but
2237  * we do need grouping columns to be available
2238  */
2239  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2240 
2241  tlist = build_path_tlist(root, &best_path->path);
2242 
2243  quals = order_qual_clauses(root, best_path->qual);
2244 
2245  plan = make_agg(tlist, quals,
2246  best_path->aggstrategy,
2247  best_path->aggsplit,
2248  list_length(best_path->groupClause),
2250  subplan->targetlist),
2251  extract_grouping_ops(best_path->groupClause),
2253  subplan->targetlist),
2254  NIL,
2255  NIL,
2256  best_path->numGroups,
2257  best_path->transitionSpace,
2258  subplan);
2259 
2260  copy_generic_path_info(&plan->plan, (Path *) best_path);
2261 
2262  return plan;
2263 }
2264 
2265 /*
2266  * Given a groupclause for a collection of grouping sets, produce the
2267  * corresponding groupColIdx.
2268  *
2269  * root->grouping_map maps the tleSortGroupRef to the actual column position in
2270  * the input tuple. So we get the ref from the entries in the groupclause and
2271  * look them up there.
2272  */
2273 static AttrNumber *
2274 remap_groupColIdx(PlannerInfo *root, List *groupClause)
2275 {
2276  AttrNumber *grouping_map = root->grouping_map;
2277  AttrNumber *new_grpColIdx;
2278  ListCell *lc;
2279  int i;
2280 
2281  Assert(grouping_map);
2282 
2283  new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
2284 
2285  i = 0;
2286  foreach(lc, groupClause)
2287  {
2288  SortGroupClause *clause = lfirst(lc);
2289 
2290  new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
2291  }
2292 
2293  return new_grpColIdx;
2294 }
2295 
2296 /*
2297  * create_groupingsets_plan
2298  * Create a plan for 'best_path' and (recursively) plans
2299  * for its subpaths.
2300  *
2301  * What we emit is an Agg plan with some vestigial Agg and Sort nodes
2302  * hanging off the side. The top Agg implements the last grouping set
2303  * specified in the GroupingSetsPath, and any additional grouping sets
2304  * each give rise to a subsidiary Agg and Sort node in the top Agg's
2305  * "chain" list. These nodes don't participate in the plan directly,
2306  * but they are a convenient way to represent the required data for
2307  * the extra steps.
2308  *
2309  * Returns a Plan node.
2310  */
2311 static Plan *
2313 {
2314  Agg *plan;
2315  Plan *subplan;
2316  List *rollups = best_path->rollups;
2317  AttrNumber *grouping_map;
2318  int maxref;
2319  List *chain;
2320  ListCell *lc;
2321 
2322  /* Shouldn't get here without grouping sets */
2323  Assert(root->parse->groupingSets);
2324  Assert(rollups != NIL);
2325 
2326  /*
2327  * Agg can project, so no need to be terribly picky about child tlist, but
2328  * we do need grouping columns to be available
2329  */
2330  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2331 
2332  /*
2333  * Compute the mapping from tleSortGroupRef to column index in the child's
2334  * tlist. First, identify max SortGroupRef in groupClause, for array
2335  * sizing.
2336  */
2337  maxref = 0;
2338  foreach(lc, root->parse->groupClause)
2339  {
2340  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2341 
2342  if (gc->tleSortGroupRef > maxref)
2343  maxref = gc->tleSortGroupRef;
2344  }
2345 
2346  grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
2347 
2348  /* Now look up the column numbers in the child's tlist */
2349  foreach(lc, root->parse->groupClause)
2350  {
2351  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2352  TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
2353 
2354  grouping_map[gc->tleSortGroupRef] = tle->resno;
2355  }
2356 
2357  /*
2358  * During setrefs.c, we'll need the grouping_map to fix up the cols lists
2359  * in GroupingFunc nodes. Save it for setrefs.c to use.
2360  */
2361  Assert(root->grouping_map == NULL);
2362  root->grouping_map = grouping_map;
2363 
2364  /*
2365  * Generate the side nodes that describe the other sort and group
2366  * operations besides the top one. Note that we don't worry about putting
2367  * accurate cost estimates in the side nodes; only the topmost Agg node's
2368  * costs will be shown by EXPLAIN.
2369  */
2370  chain = NIL;
2371  if (list_length(rollups) > 1)
2372  {
2373  bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
2374 
2375  for_each_from(lc, rollups, 1)
2376  {
2377  RollupData *rollup = lfirst(lc);
2378  AttrNumber *new_grpColIdx;
2379  Plan *sort_plan = NULL;
2380  Plan *agg_plan;
2381  AggStrategy strat;
2382 
2383  new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2384 
2385  if (!rollup->is_hashed && !is_first_sort)
2386  {
2387  sort_plan = (Plan *)
2389  new_grpColIdx,
2390  subplan);
2391  }
2392 
2393  if (!rollup->is_hashed)
2394  is_first_sort = false;
2395 
2396  if (rollup->is_hashed)
2397  strat = AGG_HASHED;
2398  else if (list_length(linitial(rollup->gsets)) == 0)
2399  strat = AGG_PLAIN;
2400  else
2401  strat = AGG_SORTED;
2402 
2403  agg_plan = (Plan *) make_agg(NIL,
2404  NIL,
2405  strat,
2407  list_length((List *) linitial(rollup->gsets)),
2408  new_grpColIdx,
2411  rollup->gsets,
2412  NIL,
2413  rollup->numGroups,
2414  best_path->transitionSpace,
2415  sort_plan);
2416 
2417  /*
2418  * Remove stuff we don't need to avoid bloating debug output.
2419  */
2420  if (sort_plan)
2421  {
2422  sort_plan->targetlist = NIL;
2423  sort_plan->lefttree = NULL;
2424  }
2425 
2426  chain = lappend(chain, agg_plan);
2427  }
2428  }
2429 
2430  /*
2431  * Now make the real Agg node
2432  */
2433  {
2434  RollupData *rollup = linitial(rollups);
2435  AttrNumber *top_grpColIdx;
2436  int numGroupCols;
2437 
2438  top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2439 
2440  numGroupCols = list_length((List *) linitial(rollup->gsets));
2441 
2442  plan = make_agg(build_path_tlist(root, &best_path->path),
2443  best_path->qual,
2444  best_path->aggstrategy,
2446  numGroupCols,
2447  top_grpColIdx,
2450  rollup->gsets,
2451  chain,
2452  rollup->numGroups,
2453  best_path->transitionSpace,
2454  subplan);
2455 
2456  /* Copy cost data from Path to Plan */
2457  copy_generic_path_info(&plan->plan, &best_path->path);
2458  }
2459 
2460  return (Plan *) plan;
2461 }
2462 
2463 /*
2464  * create_minmaxagg_plan
2465  *
2466  * Create a Result plan for 'best_path' and (recursively) plans
2467  * for its subpaths.
2468  */
2469 static Result *
2471 {
2472  Result *plan;
2473  List *tlist;
2474  ListCell *lc;
2475 
2476  /* Prepare an InitPlan for each aggregate's subquery. */
2477  foreach(lc, best_path->mmaggregates)
2478  {
2479  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
2480  PlannerInfo *subroot = mminfo->subroot;
2481  Query *subparse = subroot->parse;
2482  Plan *plan;
2483 
2484  /*
2485  * Generate the plan for the subquery. We already have a Path, but we
2486  * have to convert it to a Plan and attach a LIMIT node above it.
2487  * Since we are entering a different planner context (subroot),
2488  * recurse to create_plan not create_plan_recurse.
2489  */
2490  plan = create_plan(subroot, mminfo->path);
2491 
2492  plan = (Plan *) make_limit(plan,
2493  subparse->limitOffset,
2494  subparse->limitCount,
2495  subparse->limitOption,
2496  0, NULL, NULL, NULL);
2497 
2498  /* Must apply correct cost/width data to Limit node */
2499  plan->startup_cost = mminfo->path->startup_cost;
2500  plan->total_cost = mminfo->pathcost;
2501  plan->plan_rows = 1;
2502  plan->plan_width = mminfo->path->pathtarget->width;
2503  plan->parallel_aware = false;
2504  plan->parallel_safe = mminfo->path->parallel_safe;
2505 
2506  /* Convert the plan into an InitPlan in the outer query. */
2507  SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
2508  }
2509 
2510  /* Generate the output plan --- basically just a Result */
2511  tlist = build_path_tlist(root, &best_path->path);
2512 
2513  plan = make_result(tlist, (Node *) best_path->quals, NULL);
2514 
2515  copy_generic_path_info(&plan->plan, (Path *) best_path);
2516 
2517  /*
2518  * During setrefs.c, we'll need to replace references to the Agg nodes
2519  * with InitPlan output params. (We can't just do that locally in the
2520  * MinMaxAgg node, because path nodes above here may have Agg references
2521  * as well.) Save the mmaggregates list to tell setrefs.c to do that.
2522  */
2523  Assert(root->minmax_aggs == NIL);
2524  root->minmax_aggs = best_path->mmaggregates;
2525 
2526  return plan;
2527 }
2528 
2529 /*
2530  * create_windowagg_plan
2531  *
2532  * Create a WindowAgg plan for 'best_path' and (recursively) plans
2533  * for its subpaths.
2534  */
2535 static WindowAgg *
2537 {
2538  WindowAgg *plan;
2539  WindowClause *wc = best_path->winclause;
2540  int numPart = list_length(wc->partitionClause);
2541  int numOrder = list_length(wc->orderClause);
2542  Plan *subplan;
2543  List *tlist;
2544  int partNumCols;
2545  AttrNumber *partColIdx;
2546  Oid *partOperators;
2547  Oid *partCollations;
2548  int ordNumCols;
2549  AttrNumber *ordColIdx;
2550  Oid *ordOperators;
2551  Oid *ordCollations;
2552  ListCell *lc;
2553 
2554  /*
2555  * Choice of tlist here is motivated by the fact that WindowAgg will be
2556  * storing the input rows of window frames in a tuplestore; it therefore
2557  * behooves us to request a small tlist to avoid wasting space. We do of
2558  * course need grouping columns to be available.
2559  */
2560  subplan = create_plan_recurse(root, best_path->subpath,
2562 
2563  tlist = build_path_tlist(root, &best_path->path);
2564 
2565  /*
2566  * Convert SortGroupClause lists into arrays of attr indexes and equality
2567  * operators, as wanted by executor. (Note: in principle, it's possible
2568  * to drop some of the sort columns, if they were proved redundant by
2569  * pathkey logic. However, it doesn't seem worth going out of our way to
2570  * optimize such cases. In any case, we must *not* remove the ordering
2571  * column for RANGE OFFSET cases, as the executor needs that for in_range
2572  * tests even if it's known to be equal to some partitioning column.)
2573  */
2574  partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
2575  partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
2576  partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
2577 
2578  partNumCols = 0;
2579  foreach(lc, wc->partitionClause)
2580  {
2581  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2582  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2583 
2584  Assert(OidIsValid(sgc->eqop));
2585  partColIdx[partNumCols] = tle->resno;
2586  partOperators[partNumCols] = sgc->eqop;
2587  partCollations[partNumCols] = exprCollation((Node *) tle->expr);
2588  partNumCols++;
2589  }
2590 
2591  ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
2592  ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
2593  ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
2594 
2595  ordNumCols = 0;
2596  foreach(lc, wc->orderClause)
2597  {
2598  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2599  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2600 
2601  Assert(OidIsValid(sgc->eqop));
2602  ordColIdx[ordNumCols] = tle->resno;
2603  ordOperators[ordNumCols] = sgc->eqop;
2604  ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
2605  ordNumCols++;
2606  }
2607 
2608  /* And finally we can make the WindowAgg node */
2609  plan = make_windowagg(tlist,
2610  wc->winref,
2611  partNumCols,
2612  partColIdx,
2613  partOperators,
2614  partCollations,
2615  ordNumCols,
2616  ordColIdx,
2617  ordOperators,
2618  ordCollations,
2619  wc->frameOptions,
2620  wc->startOffset,
2621  wc->endOffset,
2622  wc->startInRangeFunc,
2623  wc->endInRangeFunc,
2624  wc->inRangeColl,
2625  wc->inRangeAsc,
2626  wc->inRangeNullsFirst,
2627  subplan);
2628 
2629  copy_generic_path_info(&plan->plan, (Path *) best_path);
2630 
2631  return plan;
2632 }
2633 
2634 /*
2635  * create_setop_plan
2636  *
2637  * Create a SetOp plan for 'best_path' and (recursively) plans
2638  * for its subpaths.
2639  */
2640 static SetOp *
2641 create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
2642 {
2643  SetOp *plan;
2644  Plan *subplan;
2645  long numGroups;
2646 
2647  /*
2648  * SetOp doesn't project, so tlist requirements pass through; moreover we
2649  * need grouping columns to be labeled.
2650  */
2651  subplan = create_plan_recurse(root, best_path->subpath,
2652  flags | CP_LABEL_TLIST);
2653 
2654  /* Convert numGroups to long int --- but 'ware overflow! */
2655  numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
2656 
2657  plan = make_setop(best_path->cmd,
2658  best_path->strategy,
2659  subplan,
2660  best_path->distinctList,
2661  best_path->flagColIdx,
2662  best_path->firstFlag,
2663  numGroups);
2664 
2665  copy_generic_path_info(&plan->plan, (Path *) best_path);
2666 
2667  return plan;
2668 }
2669 
2670 /*
2671  * create_recursiveunion_plan
2672  *
2673  * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
2674  * for its subpaths.
2675  */
2676 static RecursiveUnion *
2678 {
2679  RecursiveUnion *plan;
2680  Plan *leftplan;
2681  Plan *rightplan;
2682  List *tlist;
2683  long numGroups;
2684 
2685  /* Need both children to produce same tlist, so force it */
2686  leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
2687  rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
2688 
2689  tlist = build_path_tlist(root, &best_path->path);
2690 
2691  /* Convert numGroups to long int --- but 'ware overflow! */
2692  numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
2693 
2694  plan = make_recursive_union(tlist,
2695  leftplan,
2696  rightplan,
2697  best_path->wtParam,
2698  best_path->distinctList,
2699  numGroups);
2700 
2701  copy_generic_path_info(&plan->plan, (Path *) best_path);
2702 
2703  return plan;
2704 }
2705 
2706 /*
2707  * create_lockrows_plan
2708  *
2709  * Create a LockRows plan for 'best_path' and (recursively) plans
2710  * for its subpaths.
2711  */
2712 static LockRows *
2714  int flags)
2715 {
2716  LockRows *plan;
2717  Plan *subplan;
2718 
2719  /* LockRows doesn't project, so tlist requirements pass through */
2720  subplan = create_plan_recurse(root, best_path->subpath, flags);
2721 
2722  plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
2723 
2724  copy_generic_path_info(&plan->plan, (Path *) best_path);
2725 
2726  return plan;
2727 }
2728 
2729 /*
2730  * create_modifytable_plan
2731  * Create a ModifyTable plan for 'best_path'.
2732  *
2733  * Returns a Plan node.
2734  */
2735 static ModifyTable *
2737 {
2738  ModifyTable *plan;
2739  Path *subpath = best_path->subpath;
2740  Plan *subplan;
2741 
2742  /* Subplan must produce exactly the specified tlist */
2743  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
2744 
2745  /* Transfer resname/resjunk labeling, too, to keep executor happy */
2747 
2748  plan = make_modifytable(root,
2749  subplan,
2750  best_path->operation,
2751  best_path->canSetTag,
2752  best_path->nominalRelation,
2753  best_path->rootRelation,
2754  best_path->partColsUpdated,
2755  best_path->resultRelations,
2756  best_path->updateColnosLists,
2757  best_path->withCheckOptionLists,
2758  best_path->returningLists,
2759  best_path->rowMarks,
2760  best_path->onconflict,
2761  best_path->epqParam);
2762 
2763  copy_generic_path_info(&plan->plan, &best_path->path);
2764 
2765  return plan;
2766 }
2767 
2768 /*
2769  * create_limit_plan
2770  *
2771  * Create a Limit plan for 'best_path' and (recursively) plans
2772  * for its subpaths.
2773  */
2774 static Limit *
2775 create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
2776 {
2777  Limit *plan;
2778  Plan *subplan;
2779  int numUniqkeys = 0;
2780  AttrNumber *uniqColIdx = NULL;
2781  Oid *uniqOperators = NULL;
2782  Oid *uniqCollations = NULL;
2783 
2784  /* Limit doesn't project, so tlist requirements pass through */
2785  subplan = create_plan_recurse(root, best_path->subpath, flags);
2786 
2787  /* Extract information necessary for comparing rows for WITH TIES. */
2788  if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
2789  {
2790  Query *parse = root->parse;
2791  ListCell *l;
2792 
2793  numUniqkeys = list_length(parse->sortClause);
2794  uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
2795  uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2796  uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2797 
2798  numUniqkeys = 0;
2799  foreach(l, parse->sortClause)
2800  {
2801  SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
2802  TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
2803 
2804  uniqColIdx[numUniqkeys] = tle->resno;
2805  uniqOperators[numUniqkeys] = sortcl->eqop;
2806  uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
2807  numUniqkeys++;
2808  }
2809  }
2810 
2811  plan = make_limit(subplan,
2812  best_path->limitOffset,
2813  best_path->limitCount,
2814  best_path->limitOption,
2815  numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
2816 
2817  copy_generic_path_info(&plan->plan, (Path *) best_path);
2818 
2819  return plan;
2820 }
2821 
2822 
2823 /*****************************************************************************
2824  *
2825  * BASE-RELATION SCAN METHODS
2826  *
2827  *****************************************************************************/
2828 
2829 
2830 /*
2831  * create_seqscan_plan
2832  * Returns a seqscan plan for the base relation scanned by 'best_path'
2833  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2834  */
2835 static SeqScan *
2837  List *tlist, List *scan_clauses)
2838 {
2839  SeqScan *scan_plan;
2840  Index scan_relid = best_path->parent->relid;
2841 
2842  /* it should be a base rel... */
2843  Assert(scan_relid > 0);
2844  Assert(best_path->parent->rtekind == RTE_RELATION);
2845 
2846  /* Sort clauses into best execution order */
2847  scan_clauses = order_qual_clauses(root, scan_clauses);
2848 
2849  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2850  scan_clauses = extract_actual_clauses(scan_clauses, false);
2851 
2852  /* Replace any outer-relation variables with nestloop params */
2853  if (best_path->param_info)
2854  {
2855  scan_clauses = (List *)
2856  replace_nestloop_params(root, (Node *) scan_clauses);
2857  }
2858 
2859  scan_plan = make_seqscan(tlist,
2860  scan_clauses,
2861  scan_relid);
2862 
2863  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2864 
2865  return scan_plan;
2866 }
2867 
2868 /*
2869  * create_samplescan_plan
2870  * Returns a samplescan plan for the base relation scanned by 'best_path'
2871  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2872  */
2873 static SampleScan *
2875  List *tlist, List *scan_clauses)
2876 {
2877  SampleScan *scan_plan;
2878  Index scan_relid = best_path->parent->relid;
2879  RangeTblEntry *rte;
2880  TableSampleClause *tsc;
2881 
2882  /* it should be a base rel with a tablesample clause... */
2883  Assert(scan_relid > 0);
2884  rte = planner_rt_fetch(scan_relid, root);
2885  Assert(rte->rtekind == RTE_RELATION);
2886  tsc = rte->tablesample;
2887  Assert(tsc != NULL);
2888 
2889  /* Sort clauses into best execution order */
2890  scan_clauses = order_qual_clauses(root, scan_clauses);
2891 
2892  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2893  scan_clauses = extract_actual_clauses(scan_clauses, false);
2894 
2895  /* Replace any outer-relation variables with nestloop params */
2896  if (best_path->param_info)
2897  {
2898  scan_clauses = (List *)
2899  replace_nestloop_params(root, (Node *) scan_clauses);
2900  tsc = (TableSampleClause *)
2901  replace_nestloop_params(root, (Node *) tsc);
2902  }
2903 
2904  scan_plan = make_samplescan(tlist,
2905  scan_clauses,
2906  scan_relid,
2907  tsc);
2908 
2909  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2910 
2911  return scan_plan;
2912 }
2913 
2914 /*
2915  * create_indexscan_plan
2916  * Returns an indexscan plan for the base relation scanned by 'best_path'
2917  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2918  *
2919  * We use this for both plain IndexScans and IndexOnlyScans, because the
2920  * qual preprocessing work is the same for both. Note that the caller tells
2921  * us which to build --- we don't look at best_path->path.pathtype, because
2922  * create_bitmap_subplan needs to be able to override the prior decision.
2923  */
2924 static Scan *
2926  IndexPath *best_path,
2927  List *tlist,
2928  List *scan_clauses,
2929  bool indexonly)
2930 {
2931  Scan *scan_plan;
2932  List *indexclauses = best_path->indexclauses;
2933  List *indexorderbys = best_path->indexorderbys;
2934  Index baserelid = best_path->path.parent->relid;
2935  Oid indexoid = best_path->indexinfo->indexoid;
2936  List *qpqual;
2937  List *stripped_indexquals;
2938  List *fixed_indexquals;
2939  List *fixed_indexorderbys;
2940  List *indexorderbyops = NIL;
2941  ListCell *l;
2942 
2943  /* it should be a base rel... */
2944  Assert(baserelid > 0);
2945  Assert(best_path->path.parent->rtekind == RTE_RELATION);
2946 
2947  /*
2948  * Extract the index qual expressions (stripped of RestrictInfos) from the
2949  * IndexClauses list, and prepare a copy with index Vars substituted for
2950  * table Vars. (This step also does replace_nestloop_params on the
2951  * fixed_indexquals.)
2952  */
2953  fix_indexqual_references(root, best_path,
2954  &stripped_indexquals,
2955  &fixed_indexquals);
2956 
2957  /*
2958  * Likewise fix up index attr references in the ORDER BY expressions.
2959  */
2960  fixed_indexorderbys = fix_indexorderby_references(root, best_path);
2961 
2962  /*
2963  * The qpqual list must contain all restrictions not automatically handled
2964  * by the index, other than pseudoconstant clauses which will be handled
2965  * by a separate gating plan node. All the predicates in the indexquals
2966  * will be checked (either by the index itself, or by nodeIndexscan.c),
2967  * but if there are any "special" operators involved then they must be
2968  * included in qpqual. The upshot is that qpqual must contain
2969  * scan_clauses minus whatever appears in indexquals.
2970  *
2971  * is_redundant_with_indexclauses() detects cases where a scan clause is
2972  * present in the indexclauses list or is generated from the same
2973  * EquivalenceClass as some indexclause, and is therefore redundant with
2974  * it, though not equal. (The latter happens when indxpath.c prefers a
2975  * different derived equality than what generate_join_implied_equalities
2976  * picked for a parameterized scan's ppi_clauses.) Note that it will not
2977  * match to lossy index clauses, which is critical because we have to
2978  * include the original clause in qpqual in that case.
2979  *
2980  * In some situations (particularly with OR'd index conditions) we may
2981  * have scan_clauses that are not equal to, but are logically implied by,
2982  * the index quals; so we also try a predicate_implied_by() check to see
2983  * if we can discard quals that way. (predicate_implied_by assumes its
2984  * first input contains only immutable functions, so we have to check
2985  * that.)
2986  *
2987  * Note: if you change this bit of code you should also look at
2988  * extract_nonindex_conditions() in costsize.c.
2989  */
2990  qpqual = NIL;
2991  foreach(l, scan_clauses)
2992  {
2993  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
2994 
2995  if (rinfo->pseudoconstant)
2996  continue; /* we may drop pseudoconstants here */
2997  if (is_redundant_with_indexclauses(rinfo, indexclauses))
2998  continue; /* dup or derived from same EquivalenceClass */
2999  if (!contain_mutable_functions((Node *) rinfo->clause) &&
3000  predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
3001  false))
3002  continue; /* provably implied by indexquals */
3003  qpqual = lappend(qpqual, rinfo);
3004  }
3005 
3006  /* Sort clauses into best execution order */
3007  qpqual = order_qual_clauses(root, qpqual);
3008 
3009  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3010  qpqual = extract_actual_clauses(qpqual, false);
3011 
3012  /*
3013  * We have to replace any outer-relation variables with nestloop params in
3014  * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
3015  * annoying to have to do this separately from the processing in
3016  * fix_indexqual_references --- rethink this when generalizing the inner
3017  * indexscan support. But note we can't really do this earlier because
3018  * it'd break the comparisons to predicates above ... (or would it? Those
3019  * wouldn't have outer refs)
3020  */
3021  if (best_path->path.param_info)
3022  {
3023  stripped_indexquals = (List *)
3024  replace_nestloop_params(root, (Node *) stripped_indexquals);
3025  qpqual = (List *)
3026  replace_nestloop_params(root, (Node *) qpqual);
3027  indexorderbys = (List *)
3028  replace_nestloop_params(root, (Node *) indexorderbys);
3029  }
3030 
3031  /*
3032  * If there are ORDER BY expressions, look up the sort operators for their
3033  * result datatypes.
3034  */
3035  if (indexorderbys)
3036  {
3037  ListCell *pathkeyCell,
3038  *exprCell;
3039 
3040  /*
3041  * PathKey contains OID of the btree opfamily we're sorting by, but
3042  * that's not quite enough because we need the expression's datatype
3043  * to look up the sort operator in the operator family.
3044  */
3045  Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
3046  forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
3047  {
3048  PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
3049  Node *expr = (Node *) lfirst(exprCell);
3050  Oid exprtype = exprType(expr);
3051  Oid sortop;
3052 
3053  /* Get sort operator from opfamily */
3054  sortop = get_opfamily_member(pathkey->pk_opfamily,
3055  exprtype,
3056  exprtype,
3057  pathkey->pk_strategy);
3058  if (!OidIsValid(sortop))
3059  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
3060  pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
3061  indexorderbyops = lappend_oid(indexorderbyops, sortop);
3062  }
3063  }
3064 
3065  /* Finally ready to build the plan node */
3066  if (indexonly)
3067  scan_plan = (Scan *) make_indexonlyscan(tlist,
3068  qpqual,
3069  baserelid,
3070  indexoid,
3071  fixed_indexquals,
3072  fixed_indexorderbys,
3073  best_path->indexinfo->indextlist,
3074  best_path->indexscandir);
3075  else
3076  scan_plan = (Scan *) make_indexscan(tlist,
3077  qpqual,
3078  baserelid,
3079  indexoid,
3080  fixed_indexquals,
3081  stripped_indexquals,
3082  fixed_indexorderbys,
3083  indexorderbys,
3084  indexorderbyops,
3085  best_path->indexscandir);
3086 
3087  copy_generic_path_info(&scan_plan->plan, &best_path->path);
3088 
3089  return scan_plan;
3090 }
3091 
3092 /*
3093  * create_bitmap_scan_plan
3094  * Returns a bitmap scan plan for the base relation scanned by 'best_path'
3095  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3096  */
3097 static BitmapHeapScan *
3099  BitmapHeapPath *best_path,
3100  List *tlist,
3101  List *scan_clauses)
3102 {
3103  Index baserelid = best_path->path.parent->relid;
3104  Plan *bitmapqualplan;
3105  List *bitmapqualorig;
3106  List *indexquals;
3107  List *indexECs;
3108  List *qpqual;
3109  ListCell *l;
3110  BitmapHeapScan *scan_plan;
3111 
3112  /* it should be a base rel... */
3113  Assert(baserelid > 0);
3114  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3115 
3116  /* Process the bitmapqual tree into a Plan tree and qual lists */
3117  bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
3118  &bitmapqualorig, &indexquals,
3119  &indexECs);
3120 
3121  if (best_path->path.parallel_aware)
3122  bitmap_subplan_mark_shared(bitmapqualplan);
3123 
3124  /*
3125  * The qpqual list must contain all restrictions not automatically handled
3126  * by the index, other than pseudoconstant clauses which will be handled
3127  * by a separate gating plan node. All the predicates in the indexquals
3128  * will be checked (either by the index itself, or by
3129  * nodeBitmapHeapscan.c), but if there are any "special" operators
3130  * involved then they must be added to qpqual. The upshot is that qpqual
3131  * must contain scan_clauses minus whatever appears in indexquals.
3132  *
3133  * This loop is similar to the comparable code in create_indexscan_plan(),
3134  * but with some differences because it has to compare the scan clauses to
3135  * stripped (no RestrictInfos) indexquals. See comments there for more
3136  * info.
3137  *
3138  * In normal cases simple equal() checks will be enough to spot duplicate
3139  * clauses, so we try that first. We next see if the scan clause is
3140  * redundant with any top-level indexqual by virtue of being generated
3141  * from the same EC. After that, try predicate_implied_by().
3142  *
3143  * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
3144  * useful for getting rid of qpquals that are implied by index predicates,
3145  * because the predicate conditions are included in the "indexquals"
3146  * returned by create_bitmap_subplan(). Bitmap scans have to do it that
3147  * way because predicate conditions need to be rechecked if the scan
3148  * becomes lossy, so they have to be included in bitmapqualorig.
3149  */
3150  qpqual = NIL;
3151  foreach(l, scan_clauses)
3152  {
3153  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3154  Node *clause = (Node *) rinfo->clause;
3155 
3156  if (rinfo->pseudoconstant)
3157  continue; /* we may drop pseudoconstants here */
3158  if (list_member(indexquals, clause))
3159  continue; /* simple duplicate */
3160  if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
3161  continue; /* derived from same EquivalenceClass */
3162  if (!contain_mutable_functions(clause) &&
3163  predicate_implied_by(list_make1(clause), indexquals, false))
3164  continue; /* provably implied by indexquals */
3165  qpqual = lappend(qpqual, rinfo);
3166  }
3167 
3168  /* Sort clauses into best execution order */
3169  qpqual = order_qual_clauses(root, qpqual);
3170 
3171  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3172  qpqual = extract_actual_clauses(qpqual, false);
3173 
3174  /*
3175  * When dealing with special operators, we will at this point have
3176  * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
3177  * 'em from bitmapqualorig, since there's no point in making the tests
3178  * twice.
3179  */
3180  bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
3181 
3182  /*
3183  * We have to replace any outer-relation variables with nestloop params in
3184  * the qpqual and bitmapqualorig expressions. (This was already done for
3185  * expressions attached to plan nodes in the bitmapqualplan tree.)
3186  */
3187  if (best_path->path.param_info)
3188  {
3189  qpqual = (List *)
3190  replace_nestloop_params(root, (Node *) qpqual);
3191  bitmapqualorig = (List *)
3192  replace_nestloop_params(root, (Node *) bitmapqualorig);
3193  }
3194 
3195  /* Finally ready to build the plan node */
3196  scan_plan = make_bitmap_heapscan(tlist,
3197  qpqual,
3198  bitmapqualplan,
3199  bitmapqualorig,
3200  baserelid);
3201 
3202  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3203 
3204  return scan_plan;
3205 }
3206 
3207 /*
3208  * Given a bitmapqual tree, generate the Plan tree that implements it
3209  *
3210  * As byproducts, we also return in *qual and *indexqual the qual lists
3211  * (in implicit-AND form, without RestrictInfos) describing the original index
3212  * conditions and the generated indexqual conditions. (These are the same in
3213  * simple cases, but when special index operators are involved, the former
3214  * list includes the special conditions while the latter includes the actual
3215  * indexable conditions derived from them.) Both lists include partial-index
3216  * predicates, because we have to recheck predicates as well as index
3217  * conditions if the bitmap scan becomes lossy.
3218  *
3219  * In addition, we return a list of EquivalenceClass pointers for all the
3220  * top-level indexquals that were possibly-redundantly derived from ECs.
3221  * This allows removal of scan_clauses that are redundant with such quals.
3222  * (We do not attempt to detect such redundancies for quals that are within
3223  * OR subtrees. This could be done in a less hacky way if we returned the
3224  * indexquals in RestrictInfo form, but that would be slower and still pretty
3225  * messy, since we'd have to build new RestrictInfos in many cases.)
3226  */
3227 static Plan *
3229  List **qual, List **indexqual, List **indexECs)
3230 {
3231  Plan *plan;
3232 
3233  if (IsA(bitmapqual, BitmapAndPath))
3234  {
3235  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
3236  List *subplans = NIL;
3237  List *subquals = NIL;
3238  List *subindexquals = NIL;
3239  List *subindexECs = NIL;
3240  ListCell *l;
3241 
3242  /*
3243  * There may well be redundant quals among the subplans, since a
3244  * top-level WHERE qual might have gotten used to form several
3245  * different index quals. We don't try exceedingly hard to eliminate
3246  * redundancies, but we do eliminate obvious duplicates by using
3247  * list_concat_unique.
3248  */
3249  foreach(l, apath->bitmapquals)
3250  {
3251  Plan *subplan;
3252  List *subqual;
3253  List *subindexqual;
3254  List *subindexEC;
3255 
3256  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3257  &subqual, &subindexqual,
3258  &subindexEC);
3259  subplans = lappend(subplans, subplan);
3260  subquals = list_concat_unique(subquals, subqual);
3261  subindexquals = list_concat_unique(subindexquals, subindexqual);
3262  /* Duplicates in indexECs aren't worth getting rid of */
3263  subindexECs = list_concat(subindexECs, subindexEC);
3264  }
3265  plan = (Plan *) make_bitmap_and(subplans);
3266  plan->startup_cost = apath->path.startup_cost;
3267  plan->total_cost = apath->path.total_cost;
3268  plan->plan_rows =
3269  clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
3270  plan->plan_width = 0; /* meaningless */
3271  plan->parallel_aware = false;
3272  plan->parallel_safe = apath->path.parallel_safe;
3273  *qual = subquals;
3274  *indexqual = subindexquals;
3275  *indexECs = subindexECs;
3276  }
3277  else if (IsA(bitmapqual, BitmapOrPath))
3278  {
3279  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
3280  List *subplans = NIL;
3281  List *subquals = NIL;
3282  List *subindexquals = NIL;
3283  bool const_true_subqual = false;
3284  bool const_true_subindexqual = false;
3285  ListCell *l;
3286 
3287  /*
3288  * Here, we only detect qual-free subplans. A qual-free subplan would
3289  * cause us to generate "... OR true ..." which we may as well reduce
3290  * to just "true". We do not try to eliminate redundant subclauses
3291  * because (a) it's not as likely as in the AND case, and (b) we might
3292  * well be working with hundreds or even thousands of OR conditions,
3293  * perhaps from a long IN list. The performance of list_append_unique
3294  * would be unacceptable.
3295  */
3296  foreach(l, opath->bitmapquals)
3297  {
3298  Plan *subplan;
3299  List *subqual;
3300  List *subindexqual;
3301  List *subindexEC;
3302 
3303  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3304  &subqual, &subindexqual,
3305  &subindexEC);
3306  subplans = lappend(subplans, subplan);
3307  if (subqual == NIL)
3308  const_true_subqual = true;
3309  else if (!const_true_subqual)
3310  subquals = lappend(subquals,
3311  make_ands_explicit(subqual));
3312  if (subindexqual == NIL)
3313  const_true_subindexqual = true;
3314  else if (!const_true_subindexqual)
3315  subindexquals = lappend(subindexquals,
3316  make_ands_explicit(subindexqual));
3317  }
3318 
3319  /*
3320  * In the presence of ScalarArrayOpExpr quals, we might have built
3321  * BitmapOrPaths with just one subpath; don't add an OR step.
3322  */
3323  if (list_length(subplans) == 1)
3324  {
3325  plan = (Plan *) linitial(subplans);
3326  }
3327  else
3328  {
3329  plan = (Plan *) make_bitmap_or(subplans);
3330  plan->startup_cost = opath->path.startup_cost;
3331  plan->total_cost = opath->path.total_cost;
3332  plan->plan_rows =
3333  clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
3334  plan->plan_width = 0; /* meaningless */
3335  plan->parallel_aware = false;
3336  plan->parallel_safe = opath->path.parallel_safe;
3337  }
3338 
3339  /*
3340  * If there were constant-TRUE subquals, the OR reduces to constant
3341  * TRUE. Also, avoid generating one-element ORs, which could happen
3342  * due to redundancy elimination or ScalarArrayOpExpr quals.
3343  */
3344  if (const_true_subqual)
3345  *qual = NIL;
3346  else if (list_length(subquals) <= 1)
3347  *qual = subquals;
3348  else
3349  *qual = list_make1(make_orclause(subquals));
3350  if (const_true_subindexqual)
3351  *indexqual = NIL;
3352  else if (list_length(subindexquals) <= 1)
3353  *indexqual = subindexquals;
3354  else
3355  *indexqual = list_make1(make_orclause(subindexquals));
3356  *indexECs = NIL;
3357  }
3358  else if (IsA(bitmapqual, IndexPath))
3359  {
3360  IndexPath *ipath = (IndexPath *) bitmapqual;
3361  IndexScan *iscan;
3362  List *subquals;
3363  List *subindexquals;
3364  List *subindexECs;
3365  ListCell *l;
3366 
3367  /* Use the regular indexscan plan build machinery... */
3368  iscan = castNode(IndexScan,
3369  create_indexscan_plan(root, ipath,
3370  NIL, NIL, false));
3371  /* then convert to a bitmap indexscan */
3372  plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
3373  iscan->indexid,
3374  iscan->indexqual,
3375  iscan->indexqualorig);
3376  /* and set its cost/width fields appropriately */
3377  plan->startup_cost = 0.0;
3378  plan->total_cost = ipath->indextotalcost;
3379  plan->plan_rows =
3380  clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
3381  plan->plan_width = 0; /* meaningless */
3382  plan->parallel_aware = false;
3383  plan->parallel_safe = ipath->path.parallel_safe;
3384  /* Extract original index clauses, actual index quals, relevant ECs */
3385  subquals = NIL;
3386  subindexquals = NIL;
3387  subindexECs = NIL;
3388  foreach(l, ipath->indexclauses)
3389  {
3390  IndexClause *iclause = (IndexClause *) lfirst(l);
3391  RestrictInfo *rinfo = iclause->rinfo;
3392 
3393  Assert(!rinfo->pseudoconstant);
3394  subquals = lappend(subquals, rinfo->clause);
3395  subindexquals = list_concat(subindexquals,
3396  get_actual_clauses(iclause->indexquals));
3397  if (rinfo->parent_ec)
3398  subindexECs = lappend(subindexECs, rinfo->parent_ec);
3399  }
3400  /* We can add any index predicate conditions, too */
3401  foreach(l, ipath->indexinfo->indpred)
3402  {
3403  Expr *pred = (Expr *) lfirst(l);
3404 
3405  /*
3406  * We know that the index predicate must have been implied by the
3407  * query condition as a whole, but it may or may not be implied by
3408  * the conditions that got pushed into the bitmapqual. Avoid
3409  * generating redundant conditions.
3410  */
3411  if (!predicate_implied_by(list_make1(pred), subquals, false))
3412  {
3413  subquals = lappend(subquals, pred);
3414  subindexquals = lappend(subindexquals, pred);
3415  }
3416  }
3417  *qual = subquals;
3418  *indexqual = subindexquals;
3419  *indexECs = subindexECs;
3420  }
3421  else
3422  {
3423  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
3424  plan = NULL; /* keep compiler quiet */
3425  }
3426 
3427  return plan;
3428 }
3429 
3430 /*
3431  * create_tidscan_plan
3432  * Returns a tidscan plan for the base relation scanned by 'best_path'
3433  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3434  */
3435 static TidScan *
3437  List *tlist, List *scan_clauses)
3438 {
3439  TidScan *scan_plan;
3440  Index scan_relid = best_path->path.parent->relid;
3441  List *tidquals = best_path->tidquals;
3442 
3443  /* it should be a base rel... */
3444  Assert(scan_relid > 0);
3445  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3446 
3447  /*
3448  * The qpqual list must contain all restrictions not enforced by the
3449  * tidquals list. Since tidquals has OR semantics, we have to be careful
3450  * about matching it up to scan_clauses. It's convenient to handle the
3451  * single-tidqual case separately from the multiple-tidqual case. In the
3452  * single-tidqual case, we look through the scan_clauses while they are
3453  * still in RestrictInfo form, and drop any that are redundant with the
3454  * tidqual.
3455  *
3456  * In normal cases simple pointer equality checks will be enough to spot
3457  * duplicate RestrictInfos, so we try that first.
3458  *
3459  * Another common case is that a scan_clauses entry is generated from the
3460  * same EquivalenceClass as some tidqual, and is therefore redundant with
3461  * it, though not equal.
3462  *
3463  * Unlike indexpaths, we don't bother with predicate_implied_by(); the
3464  * number of cases where it could win are pretty small.
3465  */
3466  if (list_length(tidquals) == 1)
3467  {
3468  List *qpqual = NIL;
3469  ListCell *l;
3470 
3471  foreach(l, scan_clauses)
3472  {
3473  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3474 
3475  if (rinfo->pseudoconstant)
3476  continue; /* we may drop pseudoconstants here */
3477  if (list_member_ptr(tidquals, rinfo))
3478  continue; /* simple duplicate */
3479  if (is_redundant_derived_clause(rinfo, tidquals))
3480  continue; /* derived from same EquivalenceClass */
3481  qpqual = lappend(qpqual, rinfo);
3482  }
3483  scan_clauses = qpqual;
3484  }
3485 
3486  /* Sort clauses into best execution order */
3487  scan_clauses = order_qual_clauses(root, scan_clauses);
3488 
3489  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3490  tidquals = extract_actual_clauses(tidquals, false);
3491  scan_clauses = extract_actual_clauses(scan_clauses, false);
3492 
3493  /*
3494  * If we have multiple tidquals, it's more convenient to remove duplicate
3495  * scan_clauses after stripping the RestrictInfos. In this situation,
3496  * because the tidquals represent OR sub-clauses, they could not have come
3497  * from EquivalenceClasses so we don't have to worry about matching up
3498  * non-identical clauses. On the other hand, because tidpath.c will have
3499  * extracted those sub-clauses from some OR clause and built its own list,
3500  * we will certainly not have pointer equality to any scan clause. So
3501  * convert the tidquals list to an explicit OR clause and see if we can
3502  * match it via equal() to any scan clause.
3503  */
3504  if (list_length(tidquals) > 1)
3505  scan_clauses = list_difference(scan_clauses,
3506  list_make1(make_orclause(tidquals)));
3507 
3508  /* Replace any outer-relation variables with nestloop params */
3509  if (best_path->path.param_info)
3510  {
3511  tidquals = (List *)
3512  replace_nestloop_params(root, (Node *) tidquals);
3513  scan_clauses = (List *)
3514  replace_nestloop_params(root, (Node *) scan_clauses);
3515  }
3516 
3517  scan_plan = make_tidscan(tlist,
3518  scan_clauses,
3519  scan_relid,
3520  tidquals);
3521 
3522  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3523 
3524  return scan_plan;
3525 }
3526 
3527 /*
3528  * create_tidrangescan_plan
3529  * Returns a tidrangescan plan for the base relation scanned by 'best_path'
3530  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3531  */
3532 static TidRangeScan *
3534  List *tlist, List *scan_clauses)
3535 {
3536  TidRangeScan *scan_plan;
3537  Index scan_relid = best_path->path.parent->relid;
3538  List *tidrangequals = best_path->tidrangequals;
3539 
3540  /* it should be a base rel... */
3541  Assert(scan_relid > 0);
3542  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3543 
3544  /*
3545  * The qpqual list must contain all restrictions not enforced by the
3546  * tidrangequals list. tidrangequals has AND semantics, so we can simply
3547  * remove any qual that appears in it.
3548  */
3549  {
3550  List *qpqual = NIL;
3551  ListCell *l;
3552 
3553  foreach(l, scan_clauses)
3554  {
3555  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3556 
3557  if (rinfo->pseudoconstant)
3558  continue; /* we may drop pseudoconstants here */
3559  if (list_member_ptr(tidrangequals, rinfo))
3560  continue; /* simple duplicate */
3561  qpqual = lappend(qpqual, rinfo);
3562  }
3563  scan_clauses = qpqual;
3564  }
3565 
3566  /* Sort clauses into best execution order */
3567  scan_clauses = order_qual_clauses(root, scan_clauses);
3568 
3569  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3570  tidrangequals = extract_actual_clauses(tidrangequals, false);
3571  scan_clauses = extract_actual_clauses(scan_clauses, false);
3572 
3573  /* Replace any outer-relation variables with nestloop params */
3574  if (best_path->path.param_info)
3575  {
3576  tidrangequals = (List *)
3577  replace_nestloop_params(root, (Node *) tidrangequals);
3578  scan_clauses = (List *)
3579  replace_nestloop_params(root, (Node *) scan_clauses);
3580  }
3581 
3582  scan_plan = make_tidrangescan(tlist,
3583  scan_clauses,
3584  scan_relid,
3585  tidrangequals);
3586 
3587  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3588 
3589  return scan_plan;
3590 }
3591 
3592 /*
3593  * create_subqueryscan_plan
3594  * Returns a subqueryscan plan for the base relation scanned by 'best_path'
3595  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3596  */
3597 static SubqueryScan *
3599  List *tlist, List *scan_clauses)
3600 {
3601  SubqueryScan *scan_plan;
3602  RelOptInfo *rel = best_path->path.parent;
3603  Index scan_relid = rel->relid;
3604  Plan *subplan;
3605 
3606  /* it should be a subquery base rel... */
3607  Assert(scan_relid > 0);
3608  Assert(rel->rtekind == RTE_SUBQUERY);
3609 
3610  /*
3611  * Recursively create Plan from Path for subquery. Since we are entering
3612  * a different planner context (subroot), recurse to create_plan not
3613  * create_plan_recurse.
3614  */
3615  subplan = create_plan(rel->subroot, best_path->subpath);
3616 
3617  /* Sort clauses into best execution order */
3618  scan_clauses = order_qual_clauses(root, scan_clauses);
3619 
3620  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3621  scan_clauses = extract_actual_clauses(scan_clauses, false);
3622 
3623  /* Replace any outer-relation variables with nestloop params */
3624  if (best_path->path.param_info)
3625  {
3626  scan_clauses = (List *)
3627  replace_nestloop_params(root, (Node *) scan_clauses);
3629  rel->subplan_params);
3630  }
3631 
3632  scan_plan = make_subqueryscan(tlist,
3633  scan_clauses,
3634  scan_relid,
3635  subplan);
3636 
3637  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3638 
3639  return scan_plan;
3640 }
3641 
3642 /*
3643  * create_functionscan_plan
3644  * Returns a functionscan plan for the base relation scanned by 'best_path'
3645  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3646  */
3647 static FunctionScan *
3649  List *tlist, List *scan_clauses)
3650 {
3651  FunctionScan *scan_plan;
3652  Index scan_relid = best_path->parent->relid;
3653  RangeTblEntry *rte;
3654  List *functions;
3655 
3656  /* it should be a function base rel... */
3657  Assert(scan_relid > 0);
3658  rte = planner_rt_fetch(scan_relid, root);
3659  Assert(rte->rtekind == RTE_FUNCTION);
3660  functions = rte->functions;
3661 
3662  /* Sort clauses into best execution order */
3663  scan_clauses = order_qual_clauses(root, scan_clauses);
3664 
3665  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3666  scan_clauses = extract_actual_clauses(scan_clauses, false);
3667 
3668  /* Replace any outer-relation variables with nestloop params */
3669  if (best_path->param_info)
3670  {
3671  scan_clauses = (List *)
3672  replace_nestloop_params(root, (Node *) scan_clauses);
3673  /* The function expressions could contain nestloop params, too */
3675  }
3676 
3677  scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
3678  functions, rte->funcordinality);
3679 
3680  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3681 
3682  return scan_plan;
3683 }
3684 
3685 /*
3686  * create_tablefuncscan_plan
3687  * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
3688  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3689  */
3690 static TableFuncScan *
3692  List *tlist, List *scan_clauses)
3693 {
3694  TableFuncScan *scan_plan;
3695  Index scan_relid = best_path->parent->relid;
3696  RangeTblEntry *rte;
3697  TableFunc *tablefunc;
3698 
3699  /* it should be a function base rel... */
3700  Assert(scan_relid > 0);
3701  rte = planner_rt_fetch(scan_relid, root);
3702  Assert(rte->rtekind == RTE_TABLEFUNC);
3703  tablefunc = rte->tablefunc;
3704 
3705  /* Sort clauses into best execution order */
3706  scan_clauses = order_qual_clauses(root, scan_clauses);
3707 
3708  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3709  scan_clauses = extract_actual_clauses(scan_clauses, false);
3710 
3711  /* Replace any outer-relation variables with nestloop params */
3712  if (best_path->param_info)
3713  {
3714  scan_clauses = (List *)
3715  replace_nestloop_params(root, (Node *) scan_clauses);
3716  /* The function expressions could contain nestloop params, too */
3717  tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
3718  }
3719 
3720  scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
3721  tablefunc);
3722 
3723  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3724 
3725  return scan_plan;
3726 }
3727 
3728 /*
3729  * create_valuesscan_plan
3730  * Returns a valuesscan plan for the base relation scanned by 'best_path'
3731  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3732  */
3733 static ValuesScan *
3735  List *tlist, List *scan_clauses)
3736 {
3737  ValuesScan *scan_plan;
3738  Index scan_relid = best_path->parent->relid;
3739  RangeTblEntry *rte;
3740  List *values_lists;
3741 
3742  /* it should be a values base rel... */
3743  Assert(scan_relid > 0);
3744  rte = planner_rt_fetch(scan_relid, root);
3745  Assert(rte->rtekind == RTE_VALUES);
3746  values_lists = rte->values_lists;
3747 
3748  /* Sort clauses into best execution order */
3749  scan_clauses = order_qual_clauses(root, scan_clauses);
3750 
3751  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3752  scan_clauses = extract_actual_clauses(scan_clauses, false);
3753 
3754  /* Replace any outer-relation variables with nestloop params */
3755  if (best_path->param_info)
3756  {
3757  scan_clauses = (List *)
3758  replace_nestloop_params(root, (Node *) scan_clauses);
3759  /* The values lists could contain nestloop params, too */
3760  values_lists = (List *)
3761  replace_nestloop_params(root, (Node *) values_lists);
3762  }
3763 
3764  scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
3765  values_lists);
3766 
3767  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3768 
3769  return scan_plan;
3770 }
3771 
3772 /*
3773  * create_ctescan_plan
3774  * Returns a ctescan plan for the base relation scanned by 'best_path'
3775  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3776  */
3777 static CteScan *
3779  List *tlist, List *scan_clauses)
3780 {
3781  CteScan *scan_plan;
3782  Index scan_relid = best_path->parent->relid;
3783  RangeTblEntry *rte;
3784  SubPlan *ctesplan = NULL;
3785  int plan_id;
3786  int cte_param_id;
3787  PlannerInfo *cteroot;
3788  Index levelsup;
3789  int ndx;
3790  ListCell *lc;
3791 
3792  Assert(scan_relid > 0);
3793  rte = planner_rt_fetch(scan_relid, root);
3794  Assert(rte->rtekind == RTE_CTE);
3795  Assert(!rte->self_reference);
3796 
3797  /*
3798  * Find the referenced CTE, and locate the SubPlan previously made for it.
3799  */
3800  levelsup = rte->ctelevelsup;
3801  cteroot = root;
3802  while (levelsup-- > 0)
3803  {
3804  cteroot = cteroot->parent_root;
3805  if (!cteroot) /* shouldn't happen */
3806  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3807  }
3808 
3809  /*
3810  * Note: cte_plan_ids can be shorter than cteList, if we are still working
3811  * on planning the CTEs (ie, this is a side-reference from another CTE).
3812  * So we mustn't use forboth here.
3813  */
3814  ndx = 0;
3815  foreach(lc, cteroot->parse->cteList)
3816  {
3817  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3818 
3819  if (strcmp(cte->ctename, rte->ctename) == 0)
3820  break;
3821  ndx++;
3822  }
3823  if (lc == NULL) /* shouldn't happen */
3824  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3825  if (ndx >= list_length(cteroot->cte_plan_ids))
3826  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3827  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3828  Assert(plan_id > 0);
3829  foreach(lc, cteroot->init_plans)
3830  {
3831  ctesplan = (SubPlan *) lfirst(lc);
3832  if (ctesplan->plan_id == plan_id)
3833  break;
3834  }
3835  if (lc == NULL) /* shouldn't happen */
3836  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3837 
3838  /*
3839  * We need the CTE param ID, which is the sole member of the SubPlan's
3840  * setParam list.
3841  */
3842  cte_param_id = linitial_int(ctesplan->setParam);
3843 
3844  /* Sort clauses into best execution order */
3845  scan_clauses = order_qual_clauses(root, scan_clauses);
3846 
3847  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3848  scan_clauses = extract_actual_clauses(scan_clauses, false);
3849 
3850  /* Replace any outer-relation variables with nestloop params */
3851  if (best_path->param_info)
3852  {
3853  scan_clauses = (List *)
3854  replace_nestloop_params(root, (Node *) scan_clauses);
3855  }
3856 
3857  scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
3858  plan_id, cte_param_id);
3859 
3860  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3861 
3862  return scan_plan;
3863 }
3864 
3865 /*
3866  * create_namedtuplestorescan_plan
3867  * Returns a tuplestorescan plan for the base relation scanned by
3868  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3869  * 'tlist'.
3870  */
3871 static NamedTuplestoreScan *
3873  List *tlist, List *scan_clauses)
3874 {
3875  NamedTuplestoreScan *scan_plan;
3876  Index scan_relid = best_path->parent->relid;
3877  RangeTblEntry *rte;
3878 
3879  Assert(scan_relid > 0);
3880  rte = planner_rt_fetch(scan_relid, root);
3882 
3883  /* Sort clauses into best execution order */
3884  scan_clauses = order_qual_clauses(root, scan_clauses);
3885 
3886  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3887  scan_clauses = extract_actual_clauses(scan_clauses, false);
3888 
3889  /* Replace any outer-relation variables with nestloop params */
3890  if (best_path->param_info)
3891  {
3892  scan_clauses = (List *)
3893  replace_nestloop_params(root, (Node *) scan_clauses);
3894  }
3895 
3896  scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
3897  rte->enrname);
3898 
3899  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3900 
3901  return scan_plan;
3902 }
3903 
3904 /*
3905  * create_resultscan_plan
3906  * Returns a Result plan for the RTE_RESULT base relation scanned by
3907  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3908  * 'tlist'.
3909  */
3910 static Result *
3912  List *tlist, List *scan_clauses)
3913 {
3914  Result *scan_plan;
3915  Index scan_relid = best_path->parent->relid;
3917 
3918  Assert(scan_relid > 0);
3919  rte = planner_rt_fetch(scan_relid, root);
3920  Assert(rte->rtekind == RTE_RESULT);
3921 
3922  /* Sort clauses into best execution order */
3923  scan_clauses = order_qual_clauses(root, scan_clauses);
3924 
3925  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3926  scan_clauses = extract_actual_clauses(scan_clauses, false);
3927 
3928  /* Replace any outer-relation variables with nestloop params */
3929  if (best_path->param_info)
3930  {
3931  scan_clauses = (List *)
3932  replace_nestloop_params(root, (Node *) scan_clauses);
3933  }
3934 
3935  scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
3936 
3937  copy_generic_path_info(&scan_plan->plan, best_path);
3938 
3939  return scan_plan;
3940 }
3941 
3942 /*
3943  * create_worktablescan_plan
3944  * Returns a worktablescan plan for the base relation scanned by 'best_path'
3945  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3946  */
3947 static WorkTableScan *
3949  List *tlist, List *scan_clauses)
3950 {
3951  WorkTableScan *scan_plan;
3952  Index scan_relid = best_path->parent->relid;
3953  RangeTblEntry *rte;
3954  Index levelsup;
3955  PlannerInfo *cteroot;
3956 
3957  Assert(scan_relid > 0);
3958  rte = planner_rt_fetch(scan_relid, root);
3959  Assert(rte->rtekind == RTE_CTE);
3960  Assert(rte->self_reference);
3961 
3962  /*
3963  * We need to find the worktable param ID, which is in the plan level
3964  * that's processing the recursive UNION, which is one level *below* where
3965  * the CTE comes from.
3966  */
3967  levelsup = rte->ctelevelsup;
3968  if (levelsup == 0) /* shouldn't happen */
3969  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3970  levelsup--;
3971  cteroot = root;
3972  while (levelsup-- > 0)
3973  {
3974  cteroot = cteroot->parent_root;
3975  if (!cteroot) /* shouldn't happen */
3976  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3977  }
3978  if (cteroot->wt_param_id < 0) /* shouldn't happen */
3979  elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
3980 
3981  /* Sort clauses into best execution order */
3982  scan_clauses = order_qual_clauses(root, scan_clauses);
3983 
3984  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3985  scan_clauses = extract_actual_clauses(scan_clauses, false);
3986 
3987  /* Replace any outer-relation variables with nestloop params */
3988  if (best_path->param_info)
3989  {
3990  scan_clauses = (List *)
3991  replace_nestloop_params(root, (Node *) scan_clauses);
3992  }
3993 
3994  scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
3995  cteroot->wt_param_id);
3996 
3997  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3998 
3999  return scan_plan;
4000 }
4001 
4002 /*
4003  * create_foreignscan_plan
4004  * Returns a foreignscan plan for the relation scanned by 'best_path'
4005  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4006  */
4007 static ForeignScan *
4009  List *tlist, List *scan_clauses)
4010 {
4011  ForeignScan *scan_plan;
4012  RelOptInfo *rel = best_path->path.parent;
4013  Index scan_relid = rel->relid;
4014  Oid rel_oid = InvalidOid;
4015  Plan *outer_plan = NULL;
4016 
4017  Assert(rel->fdwroutine != NULL);
4018 
4019  /* transform the child path if any */
4020  if (best_path->fdw_outerpath)
4021  outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
4022  CP_EXACT_TLIST);
4023 
4024  /*
4025  * If we're scanning a base relation, fetch its OID. (Irrelevant if
4026  * scanning a join relation.)
4027  */
4028  if (scan_relid > 0)
4029  {
4030  RangeTblEntry *rte;
4031 
4032  Assert(rel->rtekind == RTE_RELATION);
4033  rte = planner_rt_fetch(scan_relid, root);
4034  Assert(rte->rtekind == RTE_RELATION);
4035  rel_oid = rte->relid;
4036  }
4037 
4038  /*
4039  * Sort clauses into best execution order. We do this first since the FDW
4040  * might have more info than we do and wish to adjust the ordering.
4041  */
4042  scan_clauses = order_qual_clauses(root, scan_clauses);
4043 
4044  /*
4045  * Let the FDW perform its processing on the restriction clauses and
4046  * generate the plan node. Note that the FDW might remove restriction
4047  * clauses that it intends to execute remotely, or even add more (if it
4048  * has selected some join clauses for remote use but also wants them
4049  * rechecked locally).
4050  */
4051  scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
4052  best_path,
4053  tlist, scan_clauses,
4054  outer_plan);
4055 
4056  /* Copy cost data from Path to Plan; no need to make FDW do this */
4057  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
4058 
4059  /* Copy foreign server OID; likewise, no need to make FDW do this */
4060  scan_plan->fs_server = rel->serverid;
4061 
4062  /*
4063  * Likewise, copy the relids that are represented by this foreign scan. An
4064  * upper rel doesn't have relids set, but it covers all the base relations
4065  * participating in the underlying scan, so use root's all_baserels.
4066  */
4067  if (rel->reloptkind == RELOPT_UPPER_REL)
4068  scan_plan->fs_relids = root->all_baserels;
4069  else
4070  scan_plan->fs_relids = best_path->path.parent->relids;
4071 
4072  /*
4073  * If this is a foreign join, and to make it valid to push down we had to
4074  * assume that the current user is the same as some user explicitly named
4075  * in the query, mark the finished plan as depending on the current user.
4076  */
4077  if (rel->useridiscurrent)
4078  root->glob->dependsOnRole = true;
4079 
4080  /*
4081  * Replace any outer-relation variables with nestloop params in the qual,
4082  * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
4083  * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
4084  * fdw_recheck_quals could have come from join clauses, so doing this
4085  * beforehand on the scan_clauses wouldn't work.) We assume
4086  * fdw_scan_tlist contains no such variables.
4087  */
4088  if (best_path->path.param_info)
4089  {
4090  scan_plan->scan.plan.qual = (List *)
4091  replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
4092  scan_plan->fdw_exprs = (List *)
4093  replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
4094  scan_plan->fdw_recheck_quals = (List *)
4096  (Node *) scan_plan->fdw_recheck_quals);
4097  }
4098 
4099  /*
4100  * If rel is a base relation, detect whether any system columns are
4101  * requested from the rel. (If rel is a join relation, rel->relid will be
4102  * 0, but there can be no Var with relid 0 in the rel's targetlist or the
4103  * restriction clauses, so we skip this in that case. Note that any such
4104  * columns in base relations that were joined are assumed to be contained
4105  * in fdw_scan_tlist.) This is a bit of a kluge and might go away
4106  * someday, so we intentionally leave it out of the API presented to FDWs.
4107  */
4108  scan_plan->fsSystemCol = false;
4109  if (scan_relid > 0)
4110  {
4111  Bitmapset *attrs_used = NULL;
4112  ListCell *lc;
4113  int i;
4114 
4115  /*
4116  * First, examine all the attributes needed for joins or final output.
4117  * Note: we must look at rel's targetlist, not the attr_needed data,
4118  * because attr_needed isn't computed for inheritance child rels.
4119  */
4120  pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
4121 
4122  /* Add all the attributes used by restriction clauses. */
4123  foreach(lc, rel->baserestrictinfo)
4124  {
4125  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4126 
4127  pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
4128  }
4129 
4130  /* Now, are any system columns requested from rel? */
4131  for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
4132  {
4134  {
4135  scan_plan->fsSystemCol = true;
4136  break;
4137  }
4138  }
4139 
4140  bms_free(attrs_used);
4141  }
4142 
4143  return scan_plan;
4144 }
4145 
4146 /*
4147  * create_customscan_plan
4148  *
4149  * Transform a CustomPath into a Plan.
4150  */
4151 static CustomScan *
4153  List *tlist, List *scan_clauses)
4154 {
4155  CustomScan *cplan;
4156  RelOptInfo *rel = best_path->path.parent;
4157  List *custom_plans = NIL;
4158  ListCell *lc;
4159 
4160  /* Recursively transform child paths. */
4161  foreach(lc, best_path->custom_paths)
4162  {
4163  Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc),
4164  CP_EXACT_TLIST);
4165 
4166  custom_plans = lappend(custom_plans, plan);
4167  }
4168 
4169  /*
4170  * Sort clauses into the best execution order, although custom-scan
4171  * provider can reorder them again.
4172  */
4173  scan_clauses = order_qual_clauses(root, scan_clauses);
4174 
4175  /*
4176  * Invoke custom plan provider to create the Plan node represented by the
4177  * CustomPath.
4178  */
4179  cplan = castNode(CustomScan,
4180  best_path->methods->PlanCustomPath(root,
4181  rel,
4182  best_path,
4183  tlist,
4184  scan_clauses,
4185  custom_plans));
4186 
4187  /*
4188  * Copy cost data from Path to Plan; no need to make custom-plan providers
4189  * do this
4190  */
4191  copy_generic_path_info(&cplan->scan.plan, &best_path->path);
4192 
4193  /* Likewise, copy the relids that are represented by this custom scan */
4194  cplan->custom_relids = best_path->path.parent->relids;
4195 
4196  /*
4197  * Replace any outer-relation variables with nestloop params in the qual
4198  * and custom_exprs expressions. We do this last so that the custom-plan
4199  * provider doesn't have to be involved. (Note that parts of custom_exprs
4200  * could have come from join clauses, so doing this beforehand on the
4201  * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
4202  * such variables.
4203  */
4204  if (best_path->path.param_info)
4205  {
4206  cplan->scan.plan.qual = (List *)
4207  replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
4208  cplan->custom_exprs = (List *)
4209  replace_nestloop_params(root, (Node *) cplan->custom_exprs);
4210  }
4211 
4212  return cplan;
4213 }
4214 
4215 
4216 /*****************************************************************************
4217  *
4218  * JOIN METHODS
4219  *
4220  *****************************************************************************/
4221 
4222 static NestLoop *
4224  NestPath *best_path)
4225 {
4226  NestLoop *join_plan;
4227  Plan *outer_plan;
4228  Plan *inner_plan;
4229  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4230  List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
4231  List *joinclauses;
4232  List *otherclauses;
4233  Relids outerrelids;
4234  List *nestParams;
4235  Relids saveOuterRels = root->curOuterRels;
4236 
4237  /* NestLoop can project, so no need to be picky about child tlists */
4238  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
4239 
4240  /* For a nestloop, include outer relids in curOuterRels for inner side */
4241  root->curOuterRels = bms_union(root->curOuterRels,
4242  best_path->jpath.outerjoinpath->parent->relids);
4243 
4244  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
4245 
4246  /* Restore curOuterRels */
4247  bms_free(root->curOuterRels);
4248  root->curOuterRels = saveOuterRels;
4249 
4250  /* Sort join qual clauses into best execution order */
4251  joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
4252 
4253  /* Get the join qual clauses (in plain expression form) */
4254  /* Any pseudoconstant clauses are ignored here */
4255  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4256  {
4257  extract_actual_join_clauses(joinrestrictclauses,
4258  best_path->jpath.path.parent->relids,
4259  &joinclauses, &otherclauses);
4260  }
4261  else
4262  {
4263  /* We can treat all clauses alike for an inner join */
4264  joinclauses = extract_actual_clauses(joinrestrictclauses, false);
4265  otherclauses = NIL;
4266  }
4267 
4268  /* Replace any outer-relation variables with nestloop params */
4269  if (best_path->jpath.path.param_info)
4270  {
4271  joinclauses = (List *)
4272  replace_nestloop_params(root, (Node *) joinclauses);
4273  otherclauses = (List *)
4274  replace_nestloop_params(root, (Node *) otherclauses);
4275  }
4276 
4277  /*
4278  * Identify any nestloop parameters that should be supplied by this join
4279  * node, and remove them from root->curOuterParams.
4280  */
4281  outerrelids = best_path->jpath.outerjoinpath->parent->relids;
4282  nestParams = identify_current_nestloop_params(root, outerrelids);
4283 
4284  join_plan = make_nestloop(tlist,
4285  joinclauses,
4286  otherclauses,
4287  nestParams,
4288  outer_plan,
4289  inner_plan,
4290  best_path->jpath.jointype,
4291  best_path->jpath.inner_unique);
4292 
4293  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4294 
4295  return join_plan;
4296 }
4297 
4298 static MergeJoin *
4300  MergePath *best_path)
4301 {
4302  MergeJoin *join_plan;
4303  Plan *outer_plan;
4304  Plan *inner_plan;
4305  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4306  List *joinclauses;
4307  List *otherclauses;
4308  List *mergeclauses;
4309  List *outerpathkeys;
4310  List *innerpathkeys;
4311  int nClauses;
4312  Oid *mergefamilies;
4313  Oid *mergecollations;
4314  int *mergestrategies;
4315  bool *mergenullsfirst;
4316  PathKey *opathkey;
4317  EquivalenceClass *opeclass;
4318  int i;
4319  ListCell *lc;
4320  ListCell *lop;
4321  ListCell *lip;
4322  Path *outer_path = best_path->jpath.outerjoinpath;
4323  Path *inner_path = best_path->jpath.innerjoinpath;
4324 
4325  /*
4326  * MergeJoin can project, so we don't have to demand exact tlists from the
4327  * inputs. However, if we're intending to sort an input's result, it's
4328  * best to request a small tlist so we aren't sorting more data than
4329  * necessary.
4330  */
4331  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4332  (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4333 
4334  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4335  (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4336 
4337  /* Sort join qual clauses into best execution order */
4338  /* NB: do NOT reorder the mergeclauses */
4339  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4340 
4341  /* Get the join qual clauses (in plain expression form) */
4342  /* Any pseudoconstant clauses are ignored here */
4343  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4344  {
4345  extract_actual_join_clauses(joinclauses,
4346  best_path->jpath.path.parent->relids,
4347  &joinclauses, &otherclauses);
4348  }
4349  else
4350  {
4351  /* We can treat all clauses alike for an inner join */
4352  joinclauses = extract_actual_clauses(joinclauses, false);
4353  otherclauses = NIL;
4354  }
4355 
4356  /*
4357  * Remove the mergeclauses from the list of join qual clauses, leaving the
4358  * list of quals that must be checked as qpquals.
4359  */
4360  mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
4361  joinclauses = list_difference(joinclauses, mergeclauses);
4362 
4363  /*
4364  * Replace any outer-relation variables with nestloop params. There
4365  * should not be any in the mergeclauses.
4366  */
4367  if (best_path->jpath.path.param_info)
4368  {
4369  joinclauses = (List *)
4370  replace_nestloop_params(root, (Node *) joinclauses);
4371  otherclauses = (List *)
4372  replace_nestloop_params(root, (Node *) otherclauses);
4373  }
4374 
4375  /*
4376  * Rearrange mergeclauses, if needed, so that the outer variable is always
4377  * on the left; mark the mergeclause restrictinfos with correct
4378  * outer_is_left status.
4379  */
4380  mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
4381  best_path->jpath.outerjoinpath->parent->relids);
4382 
4383  /*
4384  * Create explicit sort nodes for the outer and inner paths if necessary.
4385  */
4386  if (best_path->outersortkeys)
4387  {
4388  Relids outer_relids = outer_path->parent->relids;
4389  Sort *sort = make_sort_from_pathkeys(outer_plan,
4390  best_path->outersortkeys,
4391  outer_relids);
4392 
4393  label_sort_with_costsize(root, sort, -1.0);
4394  outer_plan = (Plan *) sort;
4395  outerpathkeys = best_path->outersortkeys;
4396  }
4397  else
4398  outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
4399 
4400  if (best_path->innersortkeys)
4401  {
4402  Relids inner_relids = inner_path->parent->relids;
4403  Sort *sort = make_sort_from_pathkeys(inner_plan,
4404  best_path->innersortkeys,
4405  inner_relids);
4406 
4407  label_sort_with_costsize(root, sort, -1.0);
4408  inner_plan = (Plan *) sort;
4409  innerpathkeys = best_path->innersortkeys;
4410  }
4411  else
4412  innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
4413 
4414  /*
4415  * If specified, add a materialize node to shield the inner plan from the
4416  * need to handle mark/restore.
4417  */
4418  if (best_path->materialize_inner)
4419  {
4420  Plan *matplan = (Plan *) make_material(inner_plan);
4421 
4422  /*
4423  * We assume the materialize will not spill to disk, and therefore
4424  * charge just cpu_operator_cost per tuple. (Keep this estimate in
4425  * sync with final_cost_mergejoin.)
4426  */
4427  copy_plan_costsize(matplan, inner_plan);
4428  matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
4429 
4430  inner_plan = matplan;
4431  }
4432 
4433  /*
4434  * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
4435  * executor. The information is in the pathkeys for the two inputs, but
4436  * we need to be careful about the possibility of mergeclauses sharing a
4437  * pathkey, as well as the possibility that the inner pathkeys are not in
4438  * an order matching the mergeclauses.
4439  */
4440  nClauses = list_length(mergeclauses);
4441  Assert(nClauses == list_length(best_path->path_mergeclauses));
4442  mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
4443  mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
4444  mergestrategies = (int *) palloc(nClauses * sizeof(int));
4445  mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
4446 
4447  opathkey = NULL;
4448  opeclass = NULL;
4449  lop = list_head(outerpathkeys);
4450  lip = list_head(innerpathkeys);
4451  i = 0;
4452  foreach(lc, best_path->path_mergeclauses)
4453  {
4454  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
4455  EquivalenceClass *oeclass;
4456  EquivalenceClass *ieclass;
4457  PathKey *ipathkey = NULL;
4458  EquivalenceClass *ipeclass = NULL;
4459  bool first_inner_match = false;
4460 
4461  /* fetch outer/inner eclass from mergeclause */
4462  if (rinfo->outer_is_left)
4463  {
4464  oeclass = rinfo->left_ec;
4465  ieclass = rinfo->right_ec;
4466  }
4467  else
4468  {
4469  oeclass = rinfo->right_ec;
4470  ieclass = rinfo->left_ec;
4471  }
4472  Assert(oeclass != NULL);
4473  Assert(ieclass != NULL);
4474 
4475  /*
4476  * We must identify the pathkey elements associated with this clause
4477  * by matching the eclasses (which should give a unique match, since
4478  * the pathkey lists should be canonical). In typical cases the merge
4479  * clauses are one-to-one with the pathkeys, but when dealing with
4480  * partially redundant query conditions, things are more complicated.
4481  *
4482  * lop and lip reference the first as-yet-unmatched pathkey elements.
4483  * If they're NULL then all pathkey elements have been matched.
4484  *
4485  * The ordering of the outer pathkeys should match the mergeclauses,
4486  * by construction (see find_mergeclauses_for_outer_pathkeys()). There
4487  * could be more than one mergeclause for the same outer pathkey, but
4488  * no pathkey may be entirely skipped over.
4489  */
4490  if (oeclass != opeclass) /* multiple matches are not interesting */
4491  {
4492  /* doesn't match the current opathkey, so must match the next */
4493  if (lop == NULL)
4494  elog(ERROR, "outer pathkeys do not match mergeclauses");
4495  opathkey = (PathKey *) lfirst(lop);
4496  opeclass = opathkey->pk_eclass;
4497  lop = lnext(outerpathkeys, lop);
4498  if (oeclass != opeclass)
4499  elog(ERROR, "outer pathkeys do not match mergeclauses");
4500  }
4501 
4502  /*
4503  * The inner pathkeys likewise should not have skipped-over keys, but
4504  * it's possible for a mergeclause to reference some earlier inner
4505  * pathkey if we had redundant pathkeys. For example we might have
4506  * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
4507  * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
4508  * mechanism drops the second sort by x as redundant, and this code
4509  * must cope.
4510  *
4511  * It's also possible for the implied inner-rel ordering to be like
4512  * "ORDER BY x, y, x DESC". We still drop the second instance of x as
4513  * redundant; but this means that the sort ordering of a redundant
4514  * inner pathkey should not be considered significant. So we must
4515  * detect whether this is the first clause matching an inner pathkey.
4516  */
4517  if (lip)
4518  {
4519  ipathkey = (PathKey *) lfirst(lip);
4520  ipeclass = ipathkey->pk_eclass;
4521  if (ieclass == ipeclass)
4522  {
4523  /* successful first match to this inner pathkey */
4524  lip = lnext(innerpathkeys, lip);
4525  first_inner_match = true;
4526  }
4527  }
4528  if (!first_inner_match)
4529  {
4530  /* redundant clause ... must match something before lip */
4531  ListCell *l2;
4532 
4533  foreach(l2, innerpathkeys)
4534  {
4535  if (l2 == lip)
4536  break;
4537  ipathkey = (PathKey *) lfirst(l2);
4538  ipeclass = ipathkey->pk_eclass;
4539  if (ieclass == ipeclass)
4540  break;
4541  }
4542  if (ieclass != ipeclass)
4543  elog(ERROR, "inner pathkeys do not match mergeclauses");
4544  }
4545 
4546  /*
4547  * The pathkeys should always match each other as to opfamily and
4548  * collation (which affect equality), but if we're considering a
4549  * redundant inner pathkey, its sort ordering might not match. In
4550  * such cases we may ignore the inner pathkey's sort ordering and use
4551  * the outer's. (In effect, we're lying to the executor about the
4552  * sort direction of this inner column, but it does not matter since
4553  * the run-time row comparisons would only reach this column when
4554  * there's equality for the earlier column containing the same eclass.
4555  * There could be only one value in this column for the range of inner
4556  * rows having a given value in the earlier column, so it does not
4557  * matter which way we imagine this column to be ordered.) But a
4558  * non-redundant inner pathkey had better match outer's ordering too.
4559  */
4560  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4561  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
4562  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4563  if (first_inner_match &&
4564  (opathkey->pk_strategy != ipathkey->pk_strategy ||
4565  opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
4566  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4567 
4568  /* OK, save info for executor */
4569  mergefamilies[i] = opathkey->pk_opfamily;
4570  mergecollations[i] = opathkey->pk_eclass->ec_collation;
4571  mergestrategies[i] = opathkey->pk_strategy;
4572  mergenullsfirst[i] = opathkey->pk_nulls_first;
4573  i++;
4574  }
4575 
4576  /*
4577  * Note: it is not an error if we have additional pathkey elements (i.e.,
4578  * lop or lip isn't NULL here). The input paths might be better-sorted
4579  * than we need for the current mergejoin.
4580  */
4581 
4582  /*
4583  * Now we can build the mergejoin node.
4584  */
4585  join_plan = make_mergejoin(tlist,
4586  joinclauses,
4587  otherclauses,
4588  mergeclauses,
4589  mergefamilies,
4590  mergecollations,
4591  mergestrategies,
4592  mergenullsfirst,
4593  outer_plan,
4594  inner_plan,
4595  best_path->jpath.jointype,
4596  best_path->jpath.inner_unique,
4597  best_path->skip_mark_restore);
4598 
4599  /* Costs of sort and material steps are included in path cost already */
4600  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4601 
4602  return join_plan;
4603 }
4604 
4605 static HashJoin *
4607  HashPath *best_path)
4608 {
4609  HashJoin *join_plan;
4610  Hash *hash_plan;
4611  Plan *outer_plan;
4612  Plan *inner_plan;
4613  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4614  List *joinclauses;
4615  List *otherclauses;
4616  List *hashclauses;
4617  List *hashoperators = NIL;
4618  List *hashcollations = NIL;
4619  List *inner_hashkeys = NIL;
4620  List *outer_hashkeys = NIL;
4621  Oid skewTable = InvalidOid;
4622  AttrNumber skewColumn = InvalidAttrNumber;
4623  bool skewInherit = false;
4624  ListCell *lc;
4625 
4626  /*
4627  * HashJoin can project, so we don't have to demand exact tlists from the
4628  * inputs. However, it's best to request a small tlist from the inner
4629  * side, so that we aren't storing more data than necessary. Likewise, if
4630  * we anticipate batching, request a small tlist from the outer side so
4631  * that we don't put extra data in the outer batch files.
4632  */
4633  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4634  (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
4635 
4636  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4637  CP_SMALL_TLIST);
4638 
4639  /* Sort join qual clauses into best execution order */
4640  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4641  /* There's no point in sorting the hash clauses ... */
4642 
4643  /* Get the join qual clauses (in plain expression form) */
4644  /* Any pseudoconstant clauses are ignored here */
4645  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4646  {
4647  extract_actual_join_clauses(joinclauses,
4648  best_path->jpath.path.parent->relids,
4649  &joinclauses, &otherclauses);
4650  }
4651  else
4652  {
4653  /* We can treat all clauses alike for an inner join */
4654  joinclauses = extract_actual_clauses(joinclauses, false);
4655  otherclauses = NIL;
4656  }
4657 
4658  /*
4659  * Remove the hashclauses from the list of join qual clauses, leaving the
4660  * list of quals that must be checked as qpquals.
4661  */
4662  hashclauses = get_actual_clauses(best_path->path_hashclauses);
4663  joinclauses = list_difference(joinclauses, hashclauses);
4664 
4665  /*
4666  * Replace any outer-relation variables with nestloop params. There
4667  * should not be any in the hashclauses.
4668  */
4669  if (best_path->jpath.path.param_info)
4670  {
4671  joinclauses = (List *)
4672  replace_nestloop_params(root, (Node *) joinclauses);
4673  otherclauses = (List *)
4674  replace_nestloop_params(root, (Node *) otherclauses);
4675  }
4676 
4677  /*
4678  * Rearrange hashclauses, if needed, so that the outer variable is always
4679  * on the left.
4680  */
4681  hashclauses = get_switched_clauses(best_path->path_hashclauses,
4682  best_path->jpath.outerjoinpath->parent->relids);
4683 
4684  /*
4685  * If there is a single join clause and we can identify the outer variable
4686  * as a simple column reference, supply its identity for possible use in
4687  * skew optimization. (Note: in principle we could do skew optimization
4688  * with multiple join clauses, but we'd have to be able to determine the
4689  * most common combinations of outer values, which we don't currently have
4690  * enough stats for.)
4691  */
4692  if (list_length(hashclauses) == 1)
4693  {
4694  OpExpr *clause = (OpExpr *) linitial(hashclauses);
4695  Node *node;
4696 
4697  Assert(is_opclause(clause));
4698  node = (Node *) linitial(clause->args);
4699  if (IsA(node, RelabelType))
4700  node = (Node *) ((RelabelType *) node)->arg;
4701  if (IsA(node, Var))
4702  {
4703  Var *var = (Var *) node;
4704  RangeTblEntry *rte;
4705 
4706  rte = root->simple_rte_array[var->varno];
4707  if (rte->rtekind == RTE_RELATION)
4708  {
4709  skewTable = rte->relid;
4710  skewColumn = var->varattno;
4711  skewInherit = rte->inh;
4712  }
4713  }
4714  }
4715 
4716  /*
4717  * Collect hash related information. The hashed expressions are
4718  * deconstructed into outer/inner expressions, so they can be computed
4719  * separately (inner expressions are used to build the hashtable via Hash,
4720  * outer expressions to perform lookups of tuples from HashJoin's outer
4721  * plan in the hashtable). Also collect operator information necessary to
4722  * build the hashtable.
4723  */
4724  foreach(lc, hashclauses)
4725  {
4726  OpExpr *hclause = lfirst_node(OpExpr, lc);
4727 
4728  hashoperators = lappend_oid(hashoperators, hclause->opno);
4729  hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
4730  outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
4731  inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
4732  }
4733 
4734  /*
4735  * Build the hash node and hash join node.
4736  */
4737  hash_plan = make_hash(inner_plan,
4738  inner_hashkeys,
4739  skewTable,
4740  skewColumn,
4741  skewInherit);
4742 
4743  /*
4744  * Set Hash node's startup & total costs equal to total cost of input
4745  * plan; this only affects EXPLAIN display not decisions.
4746  */
4747  copy_plan_costsize(&hash_plan->plan, inner_plan);
4748  hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
4749 
4750  /*
4751  * If parallel-aware, the executor will also need an estimate of the total
4752  * number of rows expected from all participants so that it can size the
4753  * shared hash table.
4754  */
4755  if (best_path->jpath.path.parallel_aware)
4756  {
4757  hash_plan->plan.parallel_aware = true;
4758  hash_plan->rows_total = best_path->inner_rows_total;
4759  }
4760 
4761  join_plan = make_hashjoin(tlist,
4762  joinclauses,
4763  otherclauses,
4764  hashclauses,
4765  hashoperators,
4766  hashcollations,
4767  outer_hashkeys,
4768  outer_plan,
4769  (Plan *) hash_plan,
4770  best_path->jpath.jointype,
4771  best_path->jpath.inner_unique);
4772 
4773  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4774 
4775  return join_plan;
4776 }
4777 
4778 
4779 /*****************************************************************************
4780  *
4781  * SUPPORTING ROUTINES
4782  *
4783  *****************************************************************************/
4784 
4785 /*
4786  * replace_nestloop_params
4787  * Replace outer-relation Vars and PlaceHolderVars in the given expression
4788  * with nestloop Params
4789  *
4790  * All Vars and PlaceHolderVars belonging to the relation(s) identified by
4791  * root->curOuterRels are replaced by Params, and entries are added to
4792  * root->curOuterParams if not already present.
4793  */
4794 static Node *
4796 {
4797  /* No setup needed for tree walk, so away we go */
4798  return replace_nestloop_params_mutator(expr, root);
4799 }
4800 
4801 static Node *
4803 {
4804  if (node == NULL)
4805  return NULL;
4806  if (IsA(node, Var))
4807  {
4808  Var *var = (Var *) node;
4809 
4810  /* Upper-level Vars should be long gone at this point */
4811  Assert(var->varlevelsup == 0);
4812  /* If not to be replaced, we can just return the Var unmodified */
4813  if (IS_SPECIAL_VARNO(var->varno) ||
4814  !bms_is_member(var->varno, root->curOuterRels))
4815  return node;
4816  /* Replace the Var with a nestloop Param */
4817  return (Node *) replace_nestloop_param_var(root, var);
4818  }
4819  if (IsA(node, PlaceHolderVar))
4820  {
4821  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4822 
4823  /* Upper-level PlaceHolderVars should be long gone at this point */
4824  Assert(phv->phlevelsup == 0);
4825 
4826  /*
4827  * Check whether we need to replace the PHV. We use bms_overlap as a
4828  * cheap/quick test to see if the PHV might be evaluated in the outer
4829  * rels, and then grab its PlaceHolderInfo to tell for sure.
4830  */
4831  if (!bms_overlap(phv->phrels, root->curOuterRels) ||
4832  !bms_is_subset(find_placeholder_info(root, phv, false)->ph_eval_at,
4833  root->curOuterRels))
4834  {
4835  /*
4836  * We can't replace the whole PHV, but we might still need to
4837  * replace Vars or PHVs within its expression, in case it ends up
4838  * actually getting evaluated here. (It might get evaluated in
4839  * this plan node, or some child node; in the latter case we don't
4840  * really need to process the expression here, but we haven't got
4841  * enough info to tell if that's the case.) Flat-copy the PHV
4842  * node and then recurse on its expression.
4843  *
4844  * Note that after doing this, we might have different
4845  * representations of the contents of the same PHV in different
4846  * parts of the plan tree. This is OK because equal() will just
4847  * match on phid/phlevelsup, so setrefs.c will still recognize an
4848  * upper-level reference to a lower-level copy of the same PHV.
4849  */
4851 
4852  memcpy(newphv, phv, sizeof(PlaceHolderVar));
4853  newphv->phexpr = (Expr *)
4855  root);
4856  return (Node *) newphv;
4857  }
4858  /* Replace the PlaceHolderVar with a nestloop Param */
4859  return (Node *) replace_nestloop_param_placeholdervar(root, phv);
4860  }
4861  return expression_tree_mutator(node,
4863  (void *) root);
4864 }
4865 
4866 /*
4867  * fix_indexqual_references
4868  * Adjust indexqual clauses to the form the executor's indexqual
4869  * machinery needs.
4870  *
4871  * We have three tasks here:
4872  * * Select the actual qual clauses out of the input IndexClause list,
4873  * and remove RestrictInfo nodes from the qual clauses.
4874  * * Replace any outer-relation Var or PHV nodes with nestloop Params.
4875  * (XXX eventually, that responsibility should go elsewhere?)
4876  * * Index keys must be represented by Var nodes with varattno set to the
4877  * index's attribute number, not the attribute number in the original rel.
4878  *
4879  * *stripped_indexquals_p receives a list of the actual qual clauses.
4880  *
4881  * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
4882  * that shares no substructure with the original; this is needed in case there
4883  * are subplans in it (we need two separate copies of the subplan tree, or
4884  * things will go awry).
4885  */
4886 static void
4888  List **stripped_indexquals_p, List **fixed_indexquals_p)
4889 {
4890  IndexOptInfo *index = index_path->indexinfo;
4891  List *stripped_indexquals;
4892  List *fixed_indexquals;
4893  ListCell *lc;
4894 
4895  stripped_indexquals = fixed_indexquals = NIL;
4896 
4897  foreach(lc, index_path->indexclauses)
4898  {
4899  IndexClause *iclause = lfirst_node(IndexClause, lc);
4900  int indexcol = iclause->indexcol;
4901  ListCell *lc2;
4902 
4903  foreach(lc2, iclause->indexquals)
4904  {
4905  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
4906  Node *clause = (Node *) rinfo->clause;
4907 
4908  stripped_indexquals = lappend(stripped_indexquals, clause);
4909  clause = fix_indexqual_clause(root, index, indexcol,
4910  clause, iclause->indexcols);
4911  fixed_indexquals = lappend(fixed_indexquals, clause);
4912  }
4913  }
4914 
4915  *stripped_indexquals_p = stripped_indexquals;
4916  *fixed_indexquals_p = fixed_indexquals;
4917 }
4918 
4919 /*
4920  * fix_indexorderby_references
4921  * Adjust indexorderby clauses to the form the executor's index
4922  * machinery needs.
4923  *
4924  * This is a simplified version of fix_indexqual_references. The input is
4925  * bare clauses and a separate indexcol list, instead of IndexClauses.
4926  */
4927 static List *
4929 {
4930  IndexOptInfo *index = index_path->indexinfo;
4931  List *fixed_indexorderbys;
4932  ListCell *lcc,
4933  *lci;
4934 
4935  fixed_indexorderbys = NIL;
4936 
4937  forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
4938  {
4939  Node *clause = (Node *) lfirst(lcc);
4940  int indexcol = lfirst_int(lci);
4941 
4942  clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
4943  fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
4944  }
4945 
4946  return fixed_indexorderbys;
4947 }
4948 
4949 /*
4950  * fix_indexqual_clause
4951  * Convert a single indexqual clause to the form needed by the executor.
4952  *
4953  * We replace nestloop params here, and replace the index key variables
4954  * or expressions by index Var nodes.
4955  */
4956 static Node *
4958  Node *clause, List *indexcolnos)
4959 {
4960  /*
4961  * Replace any outer-relation variables with nestloop params.
4962  *
4963  * This also makes a copy of the clause, so it's safe to modify it
4964  * in-place below.
4965  */
4966  clause = replace_nestloop_params(root, clause);
4967 
4968  if (IsA(clause, OpExpr))
4969  {
4970  OpExpr *op = (OpExpr *) clause;
4971 
4972  /* Replace the indexkey expression with an index Var. */
4974  index,
4975  indexcol);
4976  }
4977  else if (IsA(clause, RowCompareExpr))
4978  {
4979  RowCompareExpr *rc = (RowCompareExpr *) clause;
4980  ListCell *lca,
4981  *lcai;
4982 
4983  /* Replace the indexkey expressions with index Vars. */
4984  Assert(list_length(rc->largs) == list_length(indexcolnos));
4985  forboth(lca, rc->largs, lcai, indexcolnos)
4986  {
4988  index,
4989  lfirst_int(lcai));
4990  }
4991  }
4992  else if (IsA(clause, ScalarArrayOpExpr))
4993  {
4994  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
4995 
4996  /* Replace the indexkey expression with an index Var. */
4998  index,
4999  indexcol);
5000  }
5001  else if (IsA(clause, NullTest))
5002  {
5003  NullTest *nt = (NullTest *) clause;
5004 
5005  /* Replace the indexkey expression with an index Var. */
5006  nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
5007  index,
5008  indexcol);
5009  }
5010  else
5011  elog(ERROR, "unsupported indexqual type: %d",
5012  (int) nodeTag(clause));
5013 
5014  return clause;
5015 }
5016 
5017 /*
5018  * fix_indexqual_operand
5019  * Convert an indexqual expression to a Var referencing the index column.
5020  *
5021  * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
5022  * equal to the index's attribute number (index column position).
5023  *
5024  * Most of the code here is just for sanity cross-checking that the given
5025  * expression actually matches the index column it's claimed to.
5026  */
5027 static Node *
5029 {
5030  Var *result;
5031  int pos;
5032  ListCell *indexpr_item;
5033 
5034  /*
5035  * Remove any binary-compatible relabeling of the indexkey
5036  */
5037  if (IsA(node, RelabelType))
5038  node = (Node *) ((RelabelType *) node)->arg;
5039 
5040  Assert(indexcol >= 0 && indexcol < index->ncolumns);
5041 
5042  if (index->indexkeys[indexcol] != 0)
5043  {
5044  /* It's a simple index column */
5045  if (IsA(node, Var) &&
5046  ((Var *) node)->varno == index->rel->relid &&
5047  ((Var *) node)->varattno == index->indexkeys[indexcol])
5048  {
5049  result = (Var *) copyObject(node);
5050  result->varno = INDEX_VAR;
5051  result->varattno = indexcol + 1;
5052  return (Node *) result;
5053  }
5054  else
5055  elog(ERROR, "index key does not match expected index column");
5056  }
5057 
5058  /* It's an index expression, so find and cross-check the expression */
5059  indexpr_item = list_head(index->indexprs);
5060  for (pos = 0; pos < index->ncolumns; pos++)
5061  {
5062  if (index->indexkeys[pos] == 0)
5063  {
5064  if (indexpr_item == NULL)
5065  elog(ERROR, "too few entries in indexprs list");
5066  if (pos == indexcol)
5067  {
5068  Node *indexkey;
5069 
5070  indexkey = (Node *) lfirst(indexpr_item);
5071  if (indexkey && IsA(indexkey, RelabelType))
5072  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5073  if (equal(node, indexkey))
5074  {
5075  result = makeVar(INDEX_VAR, indexcol + 1,
5076  exprType(lfirst(indexpr_item)), -1,
5077  exprCollation(lfirst(indexpr_item)),
5078  0);
5079  return (Node *) result;
5080  }
5081  else
5082  elog(ERROR, "index key does not match expected index column");
5083  }
5084  indexpr_item = lnext(index->indexprs, indexpr_item);
5085  }
5086  }
5087 
5088  /* Oops... */
5089  elog(ERROR, "index key does not match expected index column");
5090  return NULL; /* keep compiler quiet */
5091 }
5092 
5093 /*
5094  * get_switched_clauses
5095  * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
5096  * extract the bare clauses, and rearrange the elements within the
5097  * clauses, if needed, so the outer join variable is on the left and
5098  * the inner is on the right. The original clause data structure is not
5099  * touched; a modified list is returned. We do, however, set the transient
5100  * outer_is_left field in each RestrictInfo to show which side was which.
5101  */
5102 static List *
5103 get_switched_clauses(List *clauses, Relids outerrelids)
5104 {
5105  List *t_list = NIL;
5106  ListCell *l;
5107 
5108  foreach(l, clauses)
5109  {
5110  RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
5111  OpExpr *clause = (OpExpr *) restrictinfo->clause;
5112 
5113  Assert(is_opclause(clause));
5114  if (bms_is_subset(restrictinfo->right_relids, outerrelids))
5115  {
5116  /*
5117  * Duplicate just enough of the structure to allow commuting the
5118  * clause without changing the original list. Could use
5119  * copyObject, but a complete deep copy is overkill.
5120  */
5121  OpExpr *temp = makeNode(OpExpr);
5122 
5123  temp->opno = clause->opno;
5124  temp->opfuncid = InvalidOid;
5125  temp->opresulttype = clause->opresulttype;
5126  temp->opretset = clause->opretset;
5127  temp->opcollid = clause->opcollid;
5128  temp->inputcollid = clause->inputcollid;
5129  temp->args = list_copy(clause->args);
5130  temp->location = clause->location;
5131  /* Commute it --- note this modifies the temp node in-place. */
5132  CommuteOpExpr(temp);
5133  t_list = lappend(t_list, temp);
5134  restrictinfo->outer_is_left = false;
5135  }
5136  else
5137  {
5138  Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
5139  t_list = lappend(t_list, clause);
5140  restrictinfo->outer_is_left = true;
5141  }
5142  }
5143  return t_list;
5144 }
5145 
5146 /*
5147  * order_qual_clauses
5148  * Given a list of qual clauses that will all be evaluated at the same
5149  * plan node, sort the list into the order we want to check the quals
5150  * in at runtime.
5151  *
5152  * When security barrier quals are used in the query, we may have quals with
5153  * different security levels in the list. Quals of lower security_level
5154  * must go before quals of higher security_level, except that we can grant
5155  * exceptions to move up quals that are leakproof. When security level
5156  * doesn't force the decision, we prefer to order clauses by estimated
5157  * execution cost, cheapest first.
5158  *
5159  * Ideally the order should be driven by a combination of execution cost and
5160  * selectivity, but it's not immediately clear how to account for both,
5161  * and given the uncertainty of the estimates the reliability of the decisions
5162  * would be doubtful anyway. So we just order by security level then
5163  * estimated per-tuple cost, being careful not to change the order when
5164  * (as is often the case) the estimates are identical.
5165  *
5166  * Although this will work on either bare clauses or RestrictInfos, it's
5167  * much faster to apply it to RestrictInfos, since it can re-use cost
5168  * information that is cached in RestrictInfos. XXX in the bare-clause
5169  * case, we are also not able to apply security considerations. That is
5170  * all right for the moment, because the bare-clause case doesn't occur
5171  * anywhere that barrier quals could be present, but it would be better to
5172  * get rid of it.
5173  *
5174  * Note: some callers pass lists that contain entries that will later be
5175  * removed; this is the easiest way to let this routine see RestrictInfos
5176  * instead of bare clauses. This is another reason why trying to consider
5177  * selectivity in the ordering would likely do the wrong thing.
5178  */
5179 static List *
5181 {
5182  typedef struct
5183  {
5184  Node *clause;
5185  Cost cost;
5186  Index security_level;
5187  } QualItem;
5188  int nitems = list_length(clauses);
5189  QualItem *items;
5190  ListCell *lc;
5191  int i;
5192  List *result;
5193 
5194  /* No need to work hard for 0 or 1 clause */
5195  if (nitems <= 1)
5196  return clauses;
5197 
5198  /*
5199  * Collect the items and costs into an array. This is to avoid repeated
5200  * cost_qual_eval work if the inputs aren't RestrictInfos.
5201  */
5202  items = (QualItem *) palloc(nitems * sizeof(QualItem));
5203  i = 0;
5204  foreach(lc, clauses)
5205  {
5206  Node *clause = (Node *) lfirst(lc);
5207  QualCost qcost;
5208 
5209  cost_qual_eval_node(&qcost, clause, root);
5210  items[i].clause = clause;
5211  items[i].cost = qcost.per_tuple;
5212  if (IsA(clause, RestrictInfo))
5213  {
5214  RestrictInfo *rinfo = (RestrictInfo *) clause;
5215 
5216  /*
5217  * If a clause is leakproof, it doesn't have to be constrained by
5218  * its nominal security level. If it's also reasonably cheap
5219  * (here defined as 10X cpu_operator_cost), pretend it has
5220  * security_level 0, which will allow it to go in front of
5221  * more-expensive quals of lower security levels. Of course, that
5222  * will also force it to go in front of cheaper quals of its own
5223  * security level, which is not so great, but we can alleviate
5224  * that risk by applying the cost limit cutoff.
5225  */
5226  if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
5227  items[i].security_level = 0;
5228  else
5229  items[i].security_level = rinfo->security_level;
5230  }
5231  else
5232  items[i].security_level = 0;
5233  i++;
5234  }
5235 
5236  /*
5237  * Sort. We don't use qsort() because it's not guaranteed stable for
5238  * equal keys. The expected number of entries is small enough that a
5239  * simple insertion sort should be good enough.
5240  */
5241  for (i = 1; i < nitems; i++)
5242  {
5243  QualItem newitem = items[i];
5244  int j;
5245 
5246  /* insert newitem into the already-sorted subarray */
5247  for (j = i; j > 0; j--)
5248  {
5249  QualItem *olditem = &items[j - 1];
5250 
5251  if (newitem.security_level > olditem->security_level ||
5252  (newitem.security_level == olditem->security_level &&
5253  newitem.cost >= olditem->cost))
5254  break;
5255  items[j] = *olditem;
5256  }
5257  items[j] = newitem;
5258  }
5259 
5260  /* Convert back to a list */
5261  result = NIL;
5262  for (i = 0; i < nitems; i++)
5263  result = lappend(result, items[i].clause);
5264 
5265  return result;
5266 }
5267 
5268 /*
5269  * Copy cost and size info from a Path node to the Plan node created from it.
5270  * The executor usually won't use this info, but it's needed by EXPLAIN.
5271  * Also copy the parallel-related flags, which the executor *will* use.
5272  */
5273 static void
5275 {
5276  dest->startup_cost = src->startup_cost;
5277  dest->total_cost = src->total_cost;
5278  dest->plan_rows = src->rows;
5279  dest->plan_width = src->pathtarget->width;
5280  dest->parallel_aware = src->parallel_aware;
5281  dest->parallel_safe = src->parallel_safe;
5282 }
5283 
5284 /*
5285  * Copy cost and size info from a lower plan node to an inserted node.
5286  * (Most callers alter the info after copying it.)
5287  */
5288 static void
5290 {
5291  dest->startup_cost = src->startup_cost;
5292  dest->total_cost = src->total_cost;
5293  dest->plan_rows = src->plan_rows;
5294  dest->plan_width = src->plan_width;
5295  /* Assume the inserted node is not parallel-aware. */
5296  dest->parallel_aware = false;
5297  /* Assume the inserted node is parallel-safe, if child plan is. */
5298  dest->parallel_safe = src->parallel_safe;
5299 }
5300 
5301 /*
5302  * Some places in this file build Sort nodes that don't have a directly
5303  * corresponding Path node. The cost of the sort is, or should have been,
5304  * included in the cost of the Path node we're working from, but since it's
5305  * not split out, we have to re-figure it using cost_sort(). This is just
5306  * to label the Sort node nicely for EXPLAIN.
5307  *
5308  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
5309  */
5310 static void
5311 label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
5312 {
5313  Plan *lefttree = plan->plan.lefttree;
5314  Path sort_path; /* dummy for result of cost_sort */
5315 
5316  /*
5317  * This function shouldn't have to deal with IncrementalSort plans because
5318  * they are only created from corresponding Path nodes.
5319  */
5320  Assert(IsA(plan, Sort));
5321 
5322  cost_sort(&sort_path, root, NIL,
5323  lefttree->total_cost,
5324  lefttree->plan_rows,
5325  lefttree->plan_width,
5326  0.0,
5327  work_mem,
5328  limit_tuples);
5329  plan->plan.startup_cost = sort_path.startup_cost;
5330  plan->plan.total_cost = sort_path.total_cost;
5331  plan->plan.plan_rows = lefttree->plan_rows;
5332  plan->plan.plan_width = lefttree->plan_width;
5333  plan->plan.parallel_aware = false;
5334  plan->plan.parallel_safe = lefttree->parallel_safe;
5335 }
5336 
5337 /*
5338  * bitmap_subplan_mark_shared
5339  * Set isshared flag in bitmap subplan so that it will be created in
5340  * shared memory.
5341  */
5342 static void
5344 {
5345  if (IsA(plan, BitmapAnd))
5346  bitmap_subplan_mark_shared(linitial(((BitmapAnd *) plan)->bitmapplans));
5347  else if (IsA(plan, BitmapOr))
5348  {
5349  ((BitmapOr *) plan)->isshared = true;
5350  bitmap_subplan_mark_shared(linitial(((BitmapOr *) plan)->bitmapplans));
5351  }
5352  else if (IsA(plan, BitmapIndexScan))
5353  ((BitmapIndexScan *) plan)->isshared = true;
5354  else
5355  elog(ERROR, "unrecognized node type: %d", nodeTag(plan));
5356 }
5357 
5358 /*****************************************************************************
5359  *
5360  * PLAN NODE BUILDING ROUTINES
5361  *
5362  * In general, these functions are not passed the original Path and therefore
5363  * leave it to the caller to fill in the cost/width fields from the Path,
5364  * typically by calling copy_generic_path_info(). This convention is
5365  * somewhat historical, but it does support a few places above where we build
5366  * a plan node without having an exactly corresponding Path node. Under no
5367  * circumstances should one of these functions do its own cost calculations,
5368  * as that would be redundant with calculations done while building Paths.
5369  *
5370  *****************************************************************************/
5371 
5372 static SeqScan *
5374  List *qpqual,
5375  Index scanrelid)
5376 {
5377  SeqScan *node = makeNode(SeqScan);
5378  Plan *plan = &node->scan.plan;
5379 
5380  plan->targetlist = qptlist;
5381  plan->qual = qpqual;
5382  plan->lefttree = NULL;
5383  plan->righttree = NULL;
5384  node->scan.scanrelid = scanrelid;
5385 
5386  return node;
5387 }
5388 
5389 static SampleScan *
5391  List *qpqual,
5392  Index scanrelid,
5393  TableSampleClause *tsc)
5394 {
5395  SampleScan *node = makeNode(SampleScan);
5396  Plan *plan = &node->scan.plan;
5397 
5398  plan->targetlist = qptlist;
5399  plan->qual = qpqual;
5400  plan->lefttree = NULL;
5401  plan->righttree = NULL;
5402  node->scan.scanrelid = scanrelid;
5403  node->tablesample = tsc;
5404 
5405  return node;
5406 }
5407 
5408 static IndexScan *
5410  List *qpqual,
5411  Index scanrelid,
5412  Oid indexid,
5413  List *indexqual,
5414  List *indexqualorig,
5415  List *indexorderby,
5416  List *indexorderbyorig,
5417  List *indexorderbyops,
5418  ScanDirection indexscandir)
5419 {
5420  IndexScan *node = makeNode(IndexScan);
5421  Plan *plan = &node->scan.plan;
5422 
5423  plan->targetlist = qptlist;
5424  plan->qual = qpqual;
5425  plan->lefttree = NULL;
5426  plan->righttree = NULL;
5427  node->scan.scanrelid = scanrelid;
5428  node->indexid = indexid;
5429  node->indexqual = indexqual;
5430  node->indexqualorig = indexqualorig;
5431  node->indexorderby = indexorderby;
5432  node->indexorderbyorig = indexorderbyorig;
5433  node->indexorderbyops = indexorderbyops;
5434  node->indexorderdir = indexscandir;
5435 
5436  return node;
5437 }
5438 
5439 static IndexOnlyScan *
5441  List *qpqual,
5442  Index scanrelid,
5443  Oid indexid,
5444  List *indexqual,
5445  List *indexorderby,
5446  List *indextlist,
5447  ScanDirection indexscandir)
5448 {
5450  Plan *plan = &node->scan.plan;
5451 
5452  plan->targetlist = qptlist;
5453  plan->qual = qpqual;
5454  plan->lefttree = NULL;
5455  plan->righttree = NULL;
5456  node->scan.scanrelid = scanrelid;
5457  node->indexid = indexid;
5458  node->indexqual = indexqual;
5459  node->indexorderby = indexorderby;
5460  node->indextlist = indextlist;
5461  node->indexorderdir = indexscandir;
5462 
5463  return node;
5464 }
5465 
5466 static BitmapIndexScan *
5468  Oid indexid,
5469  List *indexqual,
5470  List *indexqualorig)
5471 {
5473  Plan *plan = &node->scan.plan;
5474 
5475  plan->targetlist = NIL; /* not used */
5476  plan->qual = NIL; /* not used */
5477  plan->lefttree = NULL;
5478  plan->righttree = NULL;
5479  node->scan.scanrelid = scanrelid;
5480  node->indexid = indexid;
5481  node->indexqual = indexqual;
5482  node->indexqualorig = indexqualorig;
5483 
5484  return node;
5485 }
5486 
5487 static BitmapHeapScan *
5489  List *qpqual,
5490  Plan *lefttree,
5491  List *bitmapqualorig,
5492  Index scanrelid)
5493 {
5495  Plan *plan = &node->scan.plan;
5496 
5497  plan->targetlist = qptlist;
5498  plan->qual = qpqual;
5499  plan->lefttree = lefttree;
5500  plan->righttree = NULL;
5501  node->scan.scanrelid = scanrelid;
5502  node->bitmapqualorig = bitmapqualorig;
5503 
5504  return node;
5505 }
5506 
5507 static TidScan *
5509  List *qpqual,
5510  Index scanrelid,
5511  List *tidquals)
5512 {
5513  TidScan *node = makeNode(TidScan);
5514  Plan *plan = &node->scan.plan;
5515 
5516  plan->targetlist = qptlist;
5517  plan->qual = qpqual;
5518  plan->lefttree = NULL;
5519  plan->righttree = NULL;
5520  node->scan.scanrelid = scanrelid;
5521  node->tidquals = tidquals;
5522 
5523  return node;
5524 }
5525 
5526 static TidRangeScan *
5528  List *qpqual,
5529  Index scanrelid,
5530  List *tidrangequals)
5531 {
5533  Plan *plan = &node->scan.plan;
5534 
5535  plan->targetlist = qptlist;
5536  plan->qual = qpqual;
5537  plan->lefttree = NULL;
5538  plan->righttree = NULL;
5539  node->scan.scanrelid = scanrelid;
5540  node->tidrangequals = tidrangequals;
5541 
5542  return node;
5543 }
5544 
5545 static SubqueryScan *