PostgreSQL Source Code  git master
createplan.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * createplan.c
4  * Routines to create the desired plan for processing a query.
5  * Planning is complete, we just need to convert the selected
6  * Path into a Plan.
7  *
8  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
9  * Portions Copyright (c) 1994, Regents of the University of California
10  *
11  *
12  * IDENTIFICATION
13  * src/backend/optimizer/plan/createplan.c
14  *
15  *-------------------------------------------------------------------------
16  */
17 #include "postgres.h"
18 
19 #include <limits.h>
20 #include <math.h>
21 
22 #include "access/sysattr.h"
23 #include "catalog/pg_class.h"
24 #include "foreign/fdwapi.h"
25 #include "miscadmin.h"
26 #include "nodes/extensible.h"
27 #include "nodes/makefuncs.h"
28 #include "nodes/nodeFuncs.h"
29 #include "optimizer/clauses.h"
30 #include "optimizer/cost.h"
31 #include "optimizer/optimizer.h"
32 #include "optimizer/paramassign.h"
33 #include "optimizer/paths.h"
34 #include "optimizer/placeholder.h"
35 #include "optimizer/plancat.h"
36 #include "optimizer/planmain.h"
37 #include "optimizer/prep.h"
38 #include "optimizer/restrictinfo.h"
39 #include "optimizer/subselect.h"
40 #include "optimizer/tlist.h"
41 #include "parser/parse_clause.h"
42 #include "parser/parsetree.h"
43 #include "partitioning/partprune.h"
44 #include "utils/lsyscache.h"
45 
46 
47 /*
48  * Flag bits that can appear in the flags argument of create_plan_recurse().
49  * These can be OR-ed together.
50  *
51  * CP_EXACT_TLIST specifies that the generated plan node must return exactly
52  * the tlist specified by the path's pathtarget (this overrides both
53  * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
54  * plan node is allowed to return just the Vars and PlaceHolderVars needed
55  * to evaluate the pathtarget.
56  *
57  * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
58  * passed down by parent nodes such as Sort and Hash, which will have to
59  * store the returned tuples.
60  *
61  * CP_LABEL_TLIST specifies that the plan node must return columns matching
62  * any sortgrouprefs specified in its pathtarget, with appropriate
63  * ressortgroupref labels. This is passed down by parent nodes such as Sort
64  * and Group, which need these values to be available in their inputs.
65  *
66  * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
67  * and therefore it doesn't matter a bit what target list gets generated.
68  */
69 #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
70 #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
71 #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
72 #define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
73 
74 
75 static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
76  int flags);
77 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
78  int flags);
79 static List *build_path_tlist(PlannerInfo *root, Path *path);
80 static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
81 static List *get_gating_quals(PlannerInfo *root, List *quals);
82 static Plan *create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
83  List *gating_quals);
84 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
85 static bool is_async_capable_path(Path *path);
86 static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path,
87  int flags);
89  int flags);
91  GroupResultPath *best_path);
93 static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
94  int flags);
96  ResultCachePath *best_path,
97  int flags);
98 static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
99  int flags);
100 static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
102  ProjectionPath *best_path,
103  int flags);
104 static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
105 static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
107  IncrementalSortPath *best_path, int flags);
108 static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
110  int flags);
111 static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
112 static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
113 static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
114 static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
115 static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
116  int flags);
118 static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
119  int flags);
121 static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
122  int flags);
123 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
124  List *tlist, List *scan_clauses);
125 static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
126  List *tlist, List *scan_clauses);
127 static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
128  List *tlist, List *scan_clauses, bool indexonly);
130  BitmapHeapPath *best_path,
131  List *tlist, List *scan_clauses);
132 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
133  List **qual, List **indexqual, List **indexECs);
134 static void bitmap_subplan_mark_shared(Plan *plan);
135 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
136  List *tlist, List *scan_clauses);
138  TidRangePath *best_path,
139  List *tlist,
140  List *scan_clauses);
142  SubqueryScanPath *best_path,
143  List *tlist, List *scan_clauses);
144 static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
145  List *tlist, List *scan_clauses);
146 static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
147  List *tlist, List *scan_clauses);
148 static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
149  List *tlist, List *scan_clauses);
150 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
151  List *tlist, List *scan_clauses);
153  Path *best_path, List *tlist, List *scan_clauses);
154 static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
155  List *tlist, List *scan_clauses);
156 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
157  List *tlist, List *scan_clauses);
159  List *tlist, List *scan_clauses);
161  CustomPath *best_path,
162  List *tlist, List *scan_clauses);
163 static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path);
164 static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path);
165 static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path);
166 static Node *replace_nestloop_params(PlannerInfo *root, Node *expr);
168 static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
169  List **stripped_indexquals_p,
170  List **fixed_indexquals_p);
171 static List *fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path);
173  IndexOptInfo *index, int indexcol,
174  Node *clause, List *indexcolnos);
175 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
176 static List *get_switched_clauses(List *clauses, Relids outerrelids);
177 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
178 static void copy_generic_path_info(Plan *dest, Path *src);
179 static void copy_plan_costsize(Plan *dest, Plan *src);
180 static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
181  double limit_tuples);
182 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
183 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
184  TableSampleClause *tsc);
185 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
186  Oid indexid, List *indexqual, List *indexqualorig,
187  List *indexorderby, List *indexorderbyorig,
188  List *indexorderbyops,
189  ScanDirection indexscandir);
190 static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
191  Index scanrelid, Oid indexid,
192  List *indexqual, List *indexorderby,
193  List *indextlist,
194  ScanDirection indexscandir);
195 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
196  List *indexqual,
197  List *indexqualorig);
198 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
199  List *qpqual,
200  Plan *lefttree,
201  List *bitmapqualorig,
202  Index scanrelid);
203 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
204  List *tidquals);
205 static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
206  Index scanrelid, List *tidrangequals);
207 static SubqueryScan *make_subqueryscan(List *qptlist,
208  List *qpqual,
209  Index scanrelid,
210  Plan *subplan);
211 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
212  Index scanrelid, List *functions, bool funcordinality);
213 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
214  Index scanrelid, List *values_lists);
215 static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
216  Index scanrelid, TableFunc *tablefunc);
217 static CteScan *make_ctescan(List *qptlist, List *qpqual,
218  Index scanrelid, int ctePlanId, int cteParam);
219 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
220  Index scanrelid, char *enrname);
221 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
222  Index scanrelid, int wtParam);
224  Plan *lefttree,
225  Plan *righttree,
226  int wtParam,
227  List *distinctList,
228  long numGroups);
229 static BitmapAnd *make_bitmap_and(List *bitmapplans);
230 static BitmapOr *make_bitmap_or(List *bitmapplans);
231 static NestLoop *make_nestloop(List *tlist,
232  List *joinclauses, List *otherclauses, List *nestParams,
233  Plan *lefttree, Plan *righttree,
234  JoinType jointype, bool inner_unique);
235 static HashJoin *make_hashjoin(List *tlist,
236  List *joinclauses, List *otherclauses,
237  List *hashclauses,
238  List *hashoperators, List *hashcollations,
239  List *hashkeys,
240  Plan *lefttree, Plan *righttree,
241  JoinType jointype, bool inner_unique);
242 static Hash *make_hash(Plan *lefttree,
243  List *hashkeys,
244  Oid skewTable,
245  AttrNumber skewColumn,
246  bool skewInherit);
247 static MergeJoin *make_mergejoin(List *tlist,
248  List *joinclauses, List *otherclauses,
249  List *mergeclauses,
250  Oid *mergefamilies,
251  Oid *mergecollations,
252  int *mergestrategies,
253  bool *mergenullsfirst,
254  Plan *lefttree, Plan *righttree,
255  JoinType jointype, bool inner_unique,
256  bool skip_mark_restore);
257 static Sort *make_sort(Plan *lefttree, int numCols,
258  AttrNumber *sortColIdx, Oid *sortOperators,
259  Oid *collations, bool *nullsFirst);
260 static IncrementalSort *make_incrementalsort(Plan *lefttree,
261  int numCols, int nPresortedCols,
262  AttrNumber *sortColIdx, Oid *sortOperators,
263  Oid *collations, bool *nullsFirst);
264 static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
265  Relids relids,
266  const AttrNumber *reqColIdx,
267  bool adjust_tlist_in_place,
268  int *p_numsortkeys,
269  AttrNumber **p_sortColIdx,
270  Oid **p_sortOperators,
271  Oid **p_collations,
272  bool **p_nullsFirst);
273 static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
274  Relids relids);
276  List *pathkeys, Relids relids, int nPresortedCols);
277 static Sort *make_sort_from_groupcols(List *groupcls,
278  AttrNumber *grpColIdx,
279  Plan *lefttree);
280 static Material *make_material(Plan *lefttree);
281 static ResultCache *make_resultcache(Plan *lefttree, Oid *hashoperators,
282  Oid *collations,
283  List *param_exprs,
284  bool singlerow,
285  uint32 est_entries);
286 static WindowAgg *make_windowagg(List *tlist, Index winref,
287  int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
288  int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
289  int frameOptions, Node *startOffset, Node *endOffset,
290  Oid startInRangeFunc, Oid endInRangeFunc,
291  Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
292  Plan *lefttree);
293 static Group *make_group(List *tlist, List *qual, int numGroupCols,
294  AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
295  Plan *lefttree);
296 static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
297 static Unique *make_unique_from_pathkeys(Plan *lefttree,
298  List *pathkeys, int numCols);
299 static Gather *make_gather(List *qptlist, List *qpqual,
300  int nworkers, int rescan_param, bool single_copy, Plan *subplan);
301 static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
302  List *distinctList, AttrNumber flagColIdx, int firstFlag,
303  long numGroups);
304 static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
305 static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
306 static ProjectSet *make_project_set(List *tlist, Plan *subplan);
307 static ModifyTable *make_modifytable(PlannerInfo *root, Plan *subplan,
308  CmdType operation, bool canSetTag,
309  Index nominalRelation, Index rootRelation,
310  bool partColsUpdated,
311  List *resultRelations,
312  List *updateColnosLists,
313  List *withCheckOptionLists, List *returningLists,
314  List *rowMarks, OnConflictExpr *onconflict, int epqParam);
316  GatherMergePath *best_path);
317 
318 
319 /*
320  * create_plan
321  * Creates the access plan for a query by recursively processing the
322  * desired tree of pathnodes, starting at the node 'best_path'. For
323  * every pathnode found, we create a corresponding plan node containing
324  * appropriate id, target list, and qualification information.
325  *
326  * The tlists and quals in the plan tree are still in planner format,
327  * ie, Vars still correspond to the parser's numbering. This will be
328  * fixed later by setrefs.c.
329  *
330  * best_path is the best access path
331  *
332  * Returns a Plan tree.
333  */
334 Plan *
335 create_plan(PlannerInfo *root, Path *best_path)
336 {
337  Plan *plan;
338 
339  /* plan_params should not be in use in current query level */
340  Assert(root->plan_params == NIL);
341 
342  /* Initialize this module's workspace in PlannerInfo */
343  root->curOuterRels = NULL;
344  root->curOuterParams = NIL;
345 
346  /* Recursively process the path tree, demanding the correct tlist result */
347  plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
348 
349  /*
350  * Make sure the topmost plan node's targetlist exposes the original
351  * column names and other decorative info. Targetlists generated within
352  * the planner don't bother with that stuff, but we must have it on the
353  * top-level tlist seen at execution time. However, ModifyTable plan
354  * nodes don't have a tlist matching the querytree targetlist.
355  */
356  if (!IsA(plan, ModifyTable))
358 
359  /*
360  * Attach any initPlans created in this query level to the topmost plan
361  * node. (In principle the initplans could go in any plan node at or
362  * above where they're referenced, but there seems no reason to put them
363  * any lower than the topmost node for the query level. Also, see
364  * comments for SS_finalize_plan before you try to change this.)
365  */
366  SS_attach_initplans(root, plan);
367 
368  /* Check we successfully assigned all NestLoopParams to plan nodes */
369  if (root->curOuterParams != NIL)
370  elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
371 
372  /*
373  * Reset plan_params to ensure param IDs used for nestloop params are not
374  * re-used later
375  */
376  root->plan_params = NIL;
377 
378  return plan;
379 }
380 
381 /*
382  * create_plan_recurse
383  * Recursive guts of create_plan().
384  */
385 static Plan *
386 create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
387 {
388  Plan *plan;
389 
390  /* Guard against stack overflow due to overly complex plans */
392 
393  switch (best_path->pathtype)
394  {
395  case T_SeqScan:
396  case T_SampleScan:
397  case T_IndexScan:
398  case T_IndexOnlyScan:
399  case T_BitmapHeapScan:
400  case T_TidScan:
401  case T_TidRangeScan:
402  case T_SubqueryScan:
403  case T_FunctionScan:
404  case T_TableFuncScan:
405  case T_ValuesScan:
406  case T_CteScan:
407  case T_WorkTableScan:
409  case T_ForeignScan:
410  case T_CustomScan:
411  plan = create_scan_plan(root, best_path, flags);
412  break;
413  case T_HashJoin:
414  case T_MergeJoin:
415  case T_NestLoop:
416  plan = create_join_plan(root,
417  (JoinPath *) best_path);
418  break;
419  case T_Append:
420  plan = create_append_plan(root,
421  (AppendPath *) best_path,
422  flags);
423  break;
424  case T_MergeAppend:
425  plan = create_merge_append_plan(root,
426  (MergeAppendPath *) best_path,
427  flags);
428  break;
429  case T_Result:
430  if (IsA(best_path, ProjectionPath))
431  {
432  plan = create_projection_plan(root,
433  (ProjectionPath *) best_path,
434  flags);
435  }
436  else if (IsA(best_path, MinMaxAggPath))
437  {
438  plan = (Plan *) create_minmaxagg_plan(root,
439  (MinMaxAggPath *) best_path);
440  }
441  else if (IsA(best_path, GroupResultPath))
442  {
443  plan = (Plan *) create_group_result_plan(root,
444  (GroupResultPath *) best_path);
445  }
446  else
447  {
448  /* Simple RTE_RESULT base relation */
449  Assert(IsA(best_path, Path));
450  plan = create_scan_plan(root, best_path, flags);
451  }
452  break;
453  case T_ProjectSet:
454  plan = (Plan *) create_project_set_plan(root,
455  (ProjectSetPath *) best_path);
456  break;
457  case T_Material:
458  plan = (Plan *) create_material_plan(root,
459  (MaterialPath *) best_path,
460  flags);
461  break;
462  case T_ResultCache:
463  plan = (Plan *) create_resultcache_plan(root,
464  (ResultCachePath *) best_path,
465  flags);
466  break;
467  case T_Unique:
468  if (IsA(best_path, UpperUniquePath))
469  {
470  plan = (Plan *) create_upper_unique_plan(root,
471  (UpperUniquePath *) best_path,
472  flags);
473  }
474  else
475  {
476  Assert(IsA(best_path, UniquePath));
477  plan = create_unique_plan(root,
478  (UniquePath *) best_path,
479  flags);
480  }
481  break;
482  case T_Gather:
483  plan = (Plan *) create_gather_plan(root,
484  (GatherPath *) best_path);
485  break;
486  case T_Sort:
487  plan = (Plan *) create_sort_plan(root,
488  (SortPath *) best_path,
489  flags);
490  break;
491  case T_IncrementalSort:
492  plan = (Plan *) create_incrementalsort_plan(root,
493  (IncrementalSortPath *) best_path,
494  flags);
495  break;
496  case T_Group:
497  plan = (Plan *) create_group_plan(root,
498  (GroupPath *) best_path);
499  break;
500  case T_Agg:
501  if (IsA(best_path, GroupingSetsPath))
502  plan = create_groupingsets_plan(root,
503  (GroupingSetsPath *) best_path);
504  else
505  {
506  Assert(IsA(best_path, AggPath));
507  plan = (Plan *) create_agg_plan(root,
508  (AggPath *) best_path);
509  }
510  break;
511  case T_WindowAgg:
512  plan = (Plan *) create_windowagg_plan(root,
513  (WindowAggPath *) best_path);
514  break;
515  case T_SetOp:
516  plan = (Plan *) create_setop_plan(root,
517  (SetOpPath *) best_path,
518  flags);
519  break;
520  case T_RecursiveUnion:
521  plan = (Plan *) create_recursiveunion_plan(root,
522  (RecursiveUnionPath *) best_path);
523  break;
524  case T_LockRows:
525  plan = (Plan *) create_lockrows_plan(root,
526  (LockRowsPath *) best_path,
527  flags);
528  break;
529  case T_ModifyTable:
530  plan = (Plan *) create_modifytable_plan(root,
531  (ModifyTablePath *) best_path);
532  break;
533  case T_Limit:
534  plan = (Plan *) create_limit_plan(root,
535  (LimitPath *) best_path,
536  flags);
537  break;
538  case T_GatherMerge:
539  plan = (Plan *) create_gather_merge_plan(root,
540  (GatherMergePath *) best_path);
541  break;
542  default:
543  elog(ERROR, "unrecognized node type: %d",
544  (int) best_path->pathtype);
545  plan = NULL; /* keep compiler quiet */
546  break;
547  }
548 
549  return plan;
550 }
551 
552 /*
553  * create_scan_plan
554  * Create a scan plan for the parent relation of 'best_path'.
555  */
556 static Plan *
557 create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
558 {
559  RelOptInfo *rel = best_path->parent;
560  List *scan_clauses;
561  List *gating_clauses;
562  List *tlist;
563  Plan *plan;
564 
565  /*
566  * Extract the relevant restriction clauses from the parent relation. The
567  * executor must apply all these restrictions during the scan, except for
568  * pseudoconstants which we'll take care of below.
569  *
570  * If this is a plain indexscan or index-only scan, we need not consider
571  * restriction clauses that are implied by the index's predicate, so use
572  * indrestrictinfo not baserestrictinfo. Note that we can't do that for
573  * bitmap indexscans, since there's not necessarily a single index
574  * involved; but it doesn't matter since create_bitmap_scan_plan() will be
575  * able to get rid of such clauses anyway via predicate proof.
576  */
577  switch (best_path->pathtype)
578  {
579  case T_IndexScan:
580  case T_IndexOnlyScan:
581  scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
582  break;
583  default:
584  scan_clauses = rel->baserestrictinfo;
585  break;
586  }
587 
588  /*
589  * If this is a parameterized scan, we also need to enforce all the join
590  * clauses available from the outer relation(s).
591  *
592  * For paranoia's sake, don't modify the stored baserestrictinfo list.
593  */
594  if (best_path->param_info)
595  scan_clauses = list_concat_copy(scan_clauses,
596  best_path->param_info->ppi_clauses);
597 
598  /*
599  * Detect whether we have any pseudoconstant quals to deal with. Then, if
600  * we'll need a gating Result node, it will be able to project, so there
601  * are no requirements on the child's tlist.
602  */
603  gating_clauses = get_gating_quals(root, scan_clauses);
604  if (gating_clauses)
605  flags = 0;
606 
607  /*
608  * For table scans, rather than using the relation targetlist (which is
609  * only those Vars actually needed by the query), we prefer to generate a
610  * tlist containing all Vars in order. This will allow the executor to
611  * optimize away projection of the table tuples, if possible.
612  *
613  * But if the caller is going to ignore our tlist anyway, then don't
614  * bother generating one at all. We use an exact equality test here, so
615  * that this only applies when CP_IGNORE_TLIST is the only flag set.
616  */
617  if (flags == CP_IGNORE_TLIST)
618  {
619  tlist = NULL;
620  }
621  else if (use_physical_tlist(root, best_path, flags))
622  {
623  if (best_path->pathtype == T_IndexOnlyScan)
624  {
625  /* For index-only scan, the preferred tlist is the index's */
626  tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
627 
628  /*
629  * Transfer sortgroupref data to the replacement tlist, if
630  * requested (use_physical_tlist checked that this will work).
631  */
632  if (flags & CP_LABEL_TLIST)
633  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
634  }
635  else
636  {
637  tlist = build_physical_tlist(root, rel);
638  if (tlist == NIL)
639  {
640  /* Failed because of dropped cols, so use regular method */
641  tlist = build_path_tlist(root, best_path);
642  }
643  else
644  {
645  /* As above, transfer sortgroupref data to replacement tlist */
646  if (flags & CP_LABEL_TLIST)
648  }
649  }
650  }
651  else
652  {
653  tlist = build_path_tlist(root, best_path);
654  }
655 
656  switch (best_path->pathtype)
657  {
658  case T_SeqScan:
659  plan = (Plan *) create_seqscan_plan(root,
660  best_path,
661  tlist,
662  scan_clauses);
663  break;
664 
665  case T_SampleScan:
666  plan = (Plan *) create_samplescan_plan(root,
667  best_path,
668  tlist,
669  scan_clauses);
670  break;
671 
672  case T_IndexScan:
673  plan = (Plan *) create_indexscan_plan(root,
674  (IndexPath *) best_path,
675  tlist,
676  scan_clauses,
677  false);
678  break;
679 
680  case T_IndexOnlyScan:
681  plan = (Plan *) create_indexscan_plan(root,
682  (IndexPath *) best_path,
683  tlist,
684  scan_clauses,
685  true);
686  break;
687 
688  case T_BitmapHeapScan:
689  plan = (Plan *) create_bitmap_scan_plan(root,
690  (BitmapHeapPath *) best_path,
691  tlist,
692  scan_clauses);
693  break;
694 
695  case T_TidScan:
696  plan = (Plan *) create_tidscan_plan(root,
697  (TidPath *) best_path,
698  tlist,
699  scan_clauses);
700  break;
701 
702  case T_TidRangeScan:
703  plan = (Plan *) create_tidrangescan_plan(root,
704  (TidRangePath *) best_path,
705  tlist,
706  scan_clauses);
707  break;
708 
709  case T_SubqueryScan:
710  plan = (Plan *) create_subqueryscan_plan(root,
711  (SubqueryScanPath *) best_path,
712  tlist,
713  scan_clauses);
714  break;
715 
716  case T_FunctionScan:
717  plan = (Plan *) create_functionscan_plan(root,
718  best_path,
719  tlist,
720  scan_clauses);
721  break;
722 
723  case T_TableFuncScan:
724  plan = (Plan *) create_tablefuncscan_plan(root,
725  best_path,
726  tlist,
727  scan_clauses);
728  break;
729 
730  case T_ValuesScan:
731  plan = (Plan *) create_valuesscan_plan(root,
732  best_path,
733  tlist,
734  scan_clauses);
735  break;
736 
737  case T_CteScan:
738  plan = (Plan *) create_ctescan_plan(root,
739  best_path,
740  tlist,
741  scan_clauses);
742  break;
743 
745  plan = (Plan *) create_namedtuplestorescan_plan(root,
746  best_path,
747  tlist,
748  scan_clauses);
749  break;
750 
751  case T_Result:
752  plan = (Plan *) create_resultscan_plan(root,
753  best_path,
754  tlist,
755  scan_clauses);
756  break;
757 
758  case T_WorkTableScan:
759  plan = (Plan *) create_worktablescan_plan(root,
760  best_path,
761  tlist,
762  scan_clauses);
763  break;
764 
765  case T_ForeignScan:
766  plan = (Plan *) create_foreignscan_plan(root,
767  (ForeignPath *) best_path,
768  tlist,
769  scan_clauses);
770  break;
771 
772  case T_CustomScan:
773  plan = (Plan *) create_customscan_plan(root,
774  (CustomPath *) best_path,
775  tlist,
776  scan_clauses);
777  break;
778 
779  default:
780  elog(ERROR, "unrecognized node type: %d",
781  (int) best_path->pathtype);
782  plan = NULL; /* keep compiler quiet */
783  break;
784  }
785 
786  /*
787  * If there are any pseudoconstant clauses attached to this node, insert a
788  * gating Result node that evaluates the pseudoconstants as one-time
789  * quals.
790  */
791  if (gating_clauses)
792  plan = create_gating_plan(root, best_path, plan, gating_clauses);
793 
794  return plan;
795 }
796 
797 /*
798  * Build a target list (ie, a list of TargetEntry) for the Path's output.
799  *
800  * This is almost just make_tlist_from_pathtarget(), but we also have to
801  * deal with replacing nestloop params.
802  */
803 static List *
805 {
806  List *tlist = NIL;
807  Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
808  int resno = 1;
809  ListCell *v;
810 
811  foreach(v, path->pathtarget->exprs)
812  {
813  Node *node = (Node *) lfirst(v);
814  TargetEntry *tle;
815 
816  /*
817  * If it's a parameterized path, there might be lateral references in
818  * the tlist, which need to be replaced with Params. There's no need
819  * to remake the TargetEntry nodes, so apply this to each list item
820  * separately.
821  */
822  if (path->param_info)
823  node = replace_nestloop_params(root, node);
824 
825  tle = makeTargetEntry((Expr *) node,
826  resno,
827  NULL,
828  false);
829  if (sortgrouprefs)
830  tle->ressortgroupref = sortgrouprefs[resno - 1];
831 
832  tlist = lappend(tlist, tle);
833  resno++;
834  }
835  return tlist;
836 }
837 
838 /*
839  * use_physical_tlist
840  * Decide whether to use a tlist matching relation structure,
841  * rather than only those Vars actually referenced.
842  */
843 static bool
844 use_physical_tlist(PlannerInfo *root, Path *path, int flags)
845 {
846  RelOptInfo *rel = path->parent;
847  int i;
848  ListCell *lc;
849 
850  /*
851  * Forget it if either exact tlist or small tlist is demanded.
852  */
853  if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
854  return false;
855 
856  /*
857  * We can do this for real relation scans, subquery scans, function scans,
858  * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
859  */
860  if (rel->rtekind != RTE_RELATION &&
861  rel->rtekind != RTE_SUBQUERY &&
862  rel->rtekind != RTE_FUNCTION &&
863  rel->rtekind != RTE_TABLEFUNC &&
864  rel->rtekind != RTE_VALUES &&
865  rel->rtekind != RTE_CTE)
866  return false;
867 
868  /*
869  * Can't do it with inheritance cases either (mainly because Append
870  * doesn't project; this test may be unnecessary now that
871  * create_append_plan instructs its children to return an exact tlist).
872  */
873  if (rel->reloptkind != RELOPT_BASEREL)
874  return false;
875 
876  /*
877  * Also, don't do it to a CustomPath; the premise that we're extracting
878  * columns from a simple physical tuple is unlikely to hold for those.
879  * (When it does make sense, the custom path creator can set up the path's
880  * pathtarget that way.)
881  */
882  if (IsA(path, CustomPath))
883  return false;
884 
885  /*
886  * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
887  * executor to skip heap page fetches, and in any case, the benefit of
888  * using a physical tlist instead would be minimal.
889  */
890  if (IsA(path, BitmapHeapPath) &&
891  path->pathtarget->exprs == NIL)
892  return false;
893 
894  /*
895  * Can't do it if any system columns or whole-row Vars are requested.
896  * (This could possibly be fixed but would take some fragile assumptions
897  * in setrefs.c, I think.)
898  */
899  for (i = rel->min_attr; i <= 0; i++)
900  {
901  if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
902  return false;
903  }
904 
905  /*
906  * Can't do it if the rel is required to emit any placeholder expressions,
907  * either.
908  */
909  foreach(lc, root->placeholder_list)
910  {
911  PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
912 
913  if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
914  bms_is_subset(phinfo->ph_eval_at, rel->relids))
915  return false;
916  }
917 
918  /*
919  * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
920  * to emit any sort/group columns that are not simple Vars. (If they are
921  * simple Vars, they should appear in the physical tlist, and
922  * apply_pathtarget_labeling_to_tlist will take care of getting them
923  * labeled again.) We also have to check that no two sort/group columns
924  * are the same Var, else that element of the physical tlist would need
925  * conflicting ressortgroupref labels.
926  */
927  if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
928  {
929  Bitmapset *sortgroupatts = NULL;
930 
931  i = 0;
932  foreach(lc, path->pathtarget->exprs)
933  {
934  Expr *expr = (Expr *) lfirst(lc);
935 
936  if (path->pathtarget->sortgrouprefs[i])
937  {
938  if (expr && IsA(expr, Var))
939  {
940  int attno = ((Var *) expr)->varattno;
941 
943  if (bms_is_member(attno, sortgroupatts))
944  return false;
945  sortgroupatts = bms_add_member(sortgroupatts, attno);
946  }
947  else
948  return false;
949  }
950  i++;
951  }
952  }
953 
954  return true;
955 }
956 
957 /*
958  * get_gating_quals
959  * See if there are pseudoconstant quals in a node's quals list
960  *
961  * If the node's quals list includes any pseudoconstant quals,
962  * return just those quals.
963  */
964 static List *
966 {
967  /* No need to look if we know there are no pseudoconstants */
968  if (!root->hasPseudoConstantQuals)
969  return NIL;
970 
971  /* Sort into desirable execution order while still in RestrictInfo form */
972  quals = order_qual_clauses(root, quals);
973 
974  /* Pull out any pseudoconstant quals from the RestrictInfo list */
975  return extract_actual_clauses(quals, true);
976 }
977 
978 /*
979  * create_gating_plan
980  * Deal with pseudoconstant qual clauses
981  *
982  * Add a gating Result node atop the already-built plan.
983  */
984 static Plan *
986  List *gating_quals)
987 {
988  Plan *gplan;
989  Plan *splan;
990 
991  Assert(gating_quals);
992 
993  /*
994  * We might have a trivial Result plan already. Stacking one Result atop
995  * another is silly, so if that applies, just discard the input plan.
996  * (We're assuming its targetlist is uninteresting; it should be either
997  * the same as the result of build_path_tlist, or a simplified version.)
998  */
999  splan = plan;
1000  if (IsA(plan, Result))
1001  {
1002  Result *rplan = (Result *) plan;
1003 
1004  if (rplan->plan.lefttree == NULL &&
1005  rplan->resconstantqual == NULL)
1006  splan = NULL;
1007  }
1008 
1009  /*
1010  * Since we need a Result node anyway, always return the path's requested
1011  * tlist; that's never a wrong choice, even if the parent node didn't ask
1012  * for CP_EXACT_TLIST.
1013  */
1014  gplan = (Plan *) make_result(build_path_tlist(root, path),
1015  (Node *) gating_quals,
1016  splan);
1017 
1018  /*
1019  * Notice that we don't change cost or size estimates when doing gating.
1020  * The costs of qual eval were already included in the subplan's cost.
1021  * Leaving the size alone amounts to assuming that the gating qual will
1022  * succeed, which is the conservative estimate for planning upper queries.
1023  * We certainly don't want to assume the output size is zero (unless the
1024  * gating qual is actually constant FALSE, and that case is dealt with in
1025  * clausesel.c). Interpolating between the two cases is silly, because it
1026  * doesn't reflect what will really happen at runtime, and besides which
1027  * in most cases we have only a very bad idea of the probability of the
1028  * gating qual being true.
1029  */
1030  copy_plan_costsize(gplan, plan);
1031 
1032  /* Gating quals could be unsafe, so better use the Path's safety flag */
1033  gplan->parallel_safe = path->parallel_safe;
1034 
1035  return gplan;
1036 }
1037 
1038 /*
1039  * create_join_plan
1040  * Create a join plan for 'best_path' and (recursively) plans for its
1041  * inner and outer paths.
1042  */
1043 static Plan *
1045 {
1046  Plan *plan;
1047  List *gating_clauses;
1048 
1049  switch (best_path->path.pathtype)
1050  {
1051  case T_MergeJoin:
1052  plan = (Plan *) create_mergejoin_plan(root,
1053  (MergePath *) best_path);
1054  break;
1055  case T_HashJoin:
1056  plan = (Plan *) create_hashjoin_plan(root,
1057  (HashPath *) best_path);
1058  break;
1059  case T_NestLoop:
1060  plan = (Plan *) create_nestloop_plan(root,
1061  (NestPath *) best_path);
1062  break;
1063  default:
1064  elog(ERROR, "unrecognized node type: %d",
1065  (int) best_path->path.pathtype);
1066  plan = NULL; /* keep compiler quiet */
1067  break;
1068  }
1069 
1070  /*
1071  * If there are any pseudoconstant clauses attached to this node, insert a
1072  * gating Result node that evaluates the pseudoconstants as one-time
1073  * quals.
1074  */
1075  gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
1076  if (gating_clauses)
1077  plan = create_gating_plan(root, (Path *) best_path, plan,
1078  gating_clauses);
1079 
1080 #ifdef NOT_USED
1081 
1082  /*
1083  * * Expensive function pullups may have pulled local predicates * into
1084  * this path node. Put them in the qpqual of the plan node. * JMH,
1085  * 6/15/92
1086  */
1087  if (get_loc_restrictinfo(best_path) != NIL)
1088  set_qpqual((Plan) plan,
1089  list_concat(get_qpqual((Plan) plan),
1090  get_actual_clauses(get_loc_restrictinfo(best_path))));
1091 #endif
1092 
1093  return plan;
1094 }
1095 
1096 /*
1097  * is_async_capable_path
1098  * Check whether a given Path node is async-capable.
1099  */
1100 static bool
1102 {
1103  switch (nodeTag(path))
1104  {
1105  case T_ForeignPath:
1106  {
1107  FdwRoutine *fdwroutine = path->parent->fdwroutine;
1108 
1109  Assert(fdwroutine != NULL);
1110  if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
1111  fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
1112  return true;
1113  }
1114  break;
1115  default:
1116  break;
1117  }
1118  return false;
1119 }
1120 
1121 /*
1122  * create_append_plan
1123  * Create an Append plan for 'best_path' and (recursively) plans
1124  * for its subpaths.
1125  *
1126  * Returns a Plan node.
1127  */
1128 static Plan *
1129 create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
1130 {
1131  Append *plan;
1132  List *tlist = build_path_tlist(root, &best_path->path);
1133  int orig_tlist_length = list_length(tlist);
1134  bool tlist_was_changed = false;
1135  List *pathkeys = best_path->path.pathkeys;
1136  List *subplans = NIL;
1137  ListCell *subpaths;
1138  int nasyncplans = 0;
1139  RelOptInfo *rel = best_path->path.parent;
1140  PartitionPruneInfo *partpruneinfo = NULL;
1141  int nodenumsortkeys = 0;
1142  AttrNumber *nodeSortColIdx = NULL;
1143  Oid *nodeSortOperators = NULL;
1144  Oid *nodeCollations = NULL;
1145  bool *nodeNullsFirst = NULL;
1146  bool consider_async = false;
1147 
1148  /*
1149  * The subpaths list could be empty, if every child was proven empty by
1150  * constraint exclusion. In that case generate a dummy plan that returns
1151  * no rows.
1152  *
1153  * Note that an AppendPath with no members is also generated in certain
1154  * cases where there was no appending construct at all, but we know the
1155  * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
1156  */
1157  if (best_path->subpaths == NIL)
1158  {
1159  /* Generate a Result plan with constant-FALSE gating qual */
1160  Plan *plan;
1161 
1162  plan = (Plan *) make_result(tlist,
1163  (Node *) list_make1(makeBoolConst(false,
1164  false)),
1165  NULL);
1166 
1167  copy_generic_path_info(plan, (Path *) best_path);
1168 
1169  return plan;
1170  }
1171 
1172  /*
1173  * Otherwise build an Append plan. Note that if there's just one child,
1174  * the Append is pretty useless; but we wait till setrefs.c to get rid of
1175  * it. Doing so here doesn't work because the varno of the child scan
1176  * plan won't match the parent-rel Vars it'll be asked to emit.
1177  *
1178  * We don't have the actual creation of the Append node split out into a
1179  * separate make_xxx function. This is because we want to run
1180  * prepare_sort_from_pathkeys on it before we do so on the individual
1181  * child plans, to make cross-checking the sort info easier.
1182  */
1183  plan = makeNode(Append);
1184  plan->plan.targetlist = tlist;
1185  plan->plan.qual = NIL;
1186  plan->plan.lefttree = NULL;
1187  plan->plan.righttree = NULL;
1188  plan->apprelids = rel->relids;
1189 
1190  if (pathkeys != NIL)
1191  {
1192  /*
1193  * Compute sort column info, and adjust the Append's tlist as needed.
1194  * Because we pass adjust_tlist_in_place = true, we may ignore the
1195  * function result; it must be the same plan node. However, we then
1196  * need to detect whether any tlist entries were added.
1197  */
1198  (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
1199  best_path->path.parent->relids,
1200  NULL,
1201  true,
1202  &nodenumsortkeys,
1203  &nodeSortColIdx,
1204  &nodeSortOperators,
1205  &nodeCollations,
1206  &nodeNullsFirst);
1207  tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
1208  }
1209 
1210  /* If appropriate, consider async append */
1211  consider_async = (enable_async_append && pathkeys == NIL &&
1212  !best_path->path.parallel_safe &&
1213  list_length(best_path->subpaths) > 1);
1214 
1215  /* Build the plan for each child */
1216  foreach(subpaths, best_path->subpaths)
1217  {
1218  Path *subpath = (Path *) lfirst(subpaths);
1219  Plan *subplan;
1220 
1221  /* Must insist that all children return the same tlist */
1222  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1223 
1224  /*
1225  * For ordered Appends, we must insert a Sort node if subplan isn't
1226  * sufficiently ordered.
1227  */
1228  if (pathkeys != NIL)
1229  {
1230  int numsortkeys;
1231  AttrNumber *sortColIdx;
1232  Oid *sortOperators;
1233  Oid *collations;
1234  bool *nullsFirst;
1235 
1236  /*
1237  * Compute sort column info, and adjust subplan's tlist as needed.
1238  * We must apply prepare_sort_from_pathkeys even to subplans that
1239  * don't need an explicit sort, to make sure they are returning
1240  * the same sort key columns the Append expects.
1241  */
1242  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1243  subpath->parent->relids,
1244  nodeSortColIdx,
1245  false,
1246  &numsortkeys,
1247  &sortColIdx,
1248  &sortOperators,
1249  &collations,
1250  &nullsFirst);
1251 
1252  /*
1253  * Check that we got the same sort key information. We just
1254  * Assert that the sortops match, since those depend only on the
1255  * pathkeys; but it seems like a good idea to check the sort
1256  * column numbers explicitly, to ensure the tlists match up.
1257  */
1258  Assert(numsortkeys == nodenumsortkeys);
1259  if (memcmp(sortColIdx, nodeSortColIdx,
1260  numsortkeys * sizeof(AttrNumber)) != 0)
1261  elog(ERROR, "Append child's targetlist doesn't match Append");
1262  Assert(memcmp(sortOperators, nodeSortOperators,
1263  numsortkeys * sizeof(Oid)) == 0);
1264  Assert(memcmp(collations, nodeCollations,
1265  numsortkeys * sizeof(Oid)) == 0);
1266  Assert(memcmp(nullsFirst, nodeNullsFirst,
1267  numsortkeys * sizeof(bool)) == 0);
1268 
1269  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1270  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1271  {
1272  Sort *sort = make_sort(subplan, numsortkeys,
1273  sortColIdx, sortOperators,
1274  collations, nullsFirst);
1275 
1276  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1277  subplan = (Plan *) sort;
1278  }
1279  }
1280 
1281  subplans = lappend(subplans, subplan);
1282 
1283  /* Check to see if subplan can be executed asynchronously */
1284  if (consider_async && is_async_capable_path(subpath))
1285  {
1286  subplan->async_capable = true;
1287  ++nasyncplans;
1288  }
1289  }
1290 
1291  /*
1292  * If any quals exist, they may be useful to perform further partition
1293  * pruning during execution. Gather information needed by the executor to
1294  * do partition pruning.
1295  */
1297  {
1298  List *prunequal;
1299 
1300  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1301 
1302  if (best_path->path.param_info)
1303  {
1304  List *prmquals = best_path->path.param_info->ppi_clauses;
1305 
1306  prmquals = extract_actual_clauses(prmquals, false);
1307  prmquals = (List *) replace_nestloop_params(root,
1308  (Node *) prmquals);
1309 
1310  prunequal = list_concat(prunequal, prmquals);
1311  }
1312 
1313  if (prunequal != NIL)
1314  partpruneinfo =
1315  make_partition_pruneinfo(root, rel,
1316  best_path->subpaths,
1317  prunequal);
1318  }
1319 
1320  plan->appendplans = subplans;
1321  plan->nasyncplans = nasyncplans;
1322  plan->first_partial_plan = best_path->first_partial_path;
1323  plan->part_prune_info = partpruneinfo;
1324 
1325  copy_generic_path_info(&plan->plan, (Path *) best_path);
1326 
1327  /*
1328  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1329  * produce either the exact tlist or a narrow tlist, we should get rid of
1330  * the sort columns again. We must inject a projection node to do so.
1331  */
1332  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1333  {
1334  tlist = list_truncate(list_copy(plan->plan.targetlist),
1335  orig_tlist_length);
1336  return inject_projection_plan((Plan *) plan, tlist,
1337  plan->plan.parallel_safe);
1338  }
1339  else
1340  return (Plan *) plan;
1341 }
1342 
1343 /*
1344  * create_merge_append_plan
1345  * Create a MergeAppend plan for 'best_path' and (recursively) plans
1346  * for its subpaths.
1347  *
1348  * Returns a Plan node.
1349  */
1350 static Plan *
1352  int flags)
1353 {
1354  MergeAppend *node = makeNode(MergeAppend);
1355  Plan *plan = &node->plan;
1356  List *tlist = build_path_tlist(root, &best_path->path);
1357  int orig_tlist_length = list_length(tlist);
1358  bool tlist_was_changed;
1359  List *pathkeys = best_path->path.pathkeys;
1360  List *subplans = NIL;
1361  ListCell *subpaths;
1362  RelOptInfo *rel = best_path->path.parent;
1363  PartitionPruneInfo *partpruneinfo = NULL;
1364 
1365  /*
1366  * We don't have the actual creation of the MergeAppend node split out
1367  * into a separate make_xxx function. This is because we want to run
1368  * prepare_sort_from_pathkeys on it before we do so on the individual
1369  * child plans, to make cross-checking the sort info easier.
1370  */
1371  copy_generic_path_info(plan, (Path *) best_path);
1372  plan->targetlist = tlist;
1373  plan->qual = NIL;
1374  plan->lefttree = NULL;
1375  plan->righttree = NULL;
1376  node->apprelids = rel->relids;
1377 
1378  /*
1379  * Compute sort column info, and adjust MergeAppend's tlist as needed.
1380  * Because we pass adjust_tlist_in_place = true, we may ignore the
1381  * function result; it must be the same plan node. However, we then need
1382  * to detect whether any tlist entries were added.
1383  */
1384  (void) prepare_sort_from_pathkeys(plan, pathkeys,
1385  best_path->path.parent->relids,
1386  NULL,
1387  true,
1388  &node->numCols,
1389  &node->sortColIdx,
1390  &node->sortOperators,
1391  &node->collations,
1392  &node->nullsFirst);
1393  tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
1394 
1395  /*
1396  * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
1397  * even to subplans that don't need an explicit sort, to make sure they
1398  * are returning the same sort key columns the MergeAppend expects.
1399  */
1400  foreach(subpaths, best_path->subpaths)
1401  {
1402  Path *subpath = (Path *) lfirst(subpaths);
1403  Plan *subplan;
1404  int numsortkeys;
1405  AttrNumber *sortColIdx;
1406  Oid *sortOperators;
1407  Oid *collations;
1408  bool *nullsFirst;
1409 
1410  /* Build the child plan */
1411  /* Must insist that all children return the same tlist */
1412  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
1413 
1414  /* Compute sort column info, and adjust subplan's tlist as needed */
1415  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1416  subpath->parent->relids,
1417  node->sortColIdx,
1418  false,
1419  &numsortkeys,
1420  &sortColIdx,
1421  &sortOperators,
1422  &collations,
1423  &nullsFirst);
1424 
1425  /*
1426  * Check that we got the same sort key information. We just Assert
1427  * that the sortops match, since those depend only on the pathkeys;
1428  * but it seems like a good idea to check the sort column numbers
1429  * explicitly, to ensure the tlists really do match up.
1430  */
1431  Assert(numsortkeys == node->numCols);
1432  if (memcmp(sortColIdx, node->sortColIdx,
1433  numsortkeys * sizeof(AttrNumber)) != 0)
1434  elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
1435  Assert(memcmp(sortOperators, node->sortOperators,
1436  numsortkeys * sizeof(Oid)) == 0);
1437  Assert(memcmp(collations, node->collations,
1438  numsortkeys * sizeof(Oid)) == 0);
1439  Assert(memcmp(nullsFirst, node->nullsFirst,
1440  numsortkeys * sizeof(bool)) == 0);
1441 
1442  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1443  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1444  {
1445  Sort *sort = make_sort(subplan, numsortkeys,
1446  sortColIdx, sortOperators,
1447  collations, nullsFirst);
1448 
1449  label_sort_with_costsize(root, sort, best_path->limit_tuples);
1450  subplan = (Plan *) sort;
1451  }
1452 
1453  subplans = lappend(subplans, subplan);
1454  }
1455 
1456  /*
1457  * If any quals exist, they may be useful to perform further partition
1458  * pruning during execution. Gather information needed by the executor to
1459  * do partition pruning.
1460  */
1462  {
1463  List *prunequal;
1464 
1465  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1466 
1467  if (best_path->path.param_info)
1468  {
1469  List *prmquals = best_path->path.param_info->ppi_clauses;
1470 
1471  prmquals = extract_actual_clauses(prmquals, false);
1472  prmquals = (List *) replace_nestloop_params(root,
1473  (Node *) prmquals);
1474 
1475  prunequal = list_concat(prunequal, prmquals);
1476  }
1477 
1478  if (prunequal != NIL)
1479  partpruneinfo = make_partition_pruneinfo(root, rel,
1480  best_path->subpaths,
1481  prunequal);
1482  }
1483 
1484  node->mergeplans = subplans;
1485  node->part_prune_info = partpruneinfo;
1486 
1487  /*
1488  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1489  * produce either the exact tlist or a narrow tlist, we should get rid of
1490  * the sort columns again. We must inject a projection node to do so.
1491  */
1492  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1493  {
1494  tlist = list_truncate(list_copy(plan->targetlist), orig_tlist_length);
1495  return inject_projection_plan(plan, tlist, plan->parallel_safe);
1496  }
1497  else
1498  return plan;
1499 }
1500 
1501 /*
1502  * create_group_result_plan
1503  * Create a Result plan for 'best_path'.
1504  * This is only used for degenerate grouping cases.
1505  *
1506  * Returns a Plan node.
1507  */
1508 static Result *
1510 {
1511  Result *plan;
1512  List *tlist;
1513  List *quals;
1514 
1515  tlist = build_path_tlist(root, &best_path->path);
1516 
1517  /* best_path->quals is just bare clauses */
1518  quals = order_qual_clauses(root, best_path->quals);
1519 
1520  plan = make_result(tlist, (Node *) quals, NULL);
1521 
1522  copy_generic_path_info(&plan->plan, (Path *) best_path);
1523 
1524  return plan;
1525 }
1526 
1527 /*
1528  * create_project_set_plan
1529  * Create a ProjectSet plan for 'best_path'.
1530  *
1531  * Returns a Plan node.
1532  */
1533 static ProjectSet *
1535 {
1536  ProjectSet *plan;
1537  Plan *subplan;
1538  List *tlist;
1539 
1540  /* Since we intend to project, we don't need to constrain child tlist */
1541  subplan = create_plan_recurse(root, best_path->subpath, 0);
1542 
1543  tlist = build_path_tlist(root, &best_path->path);
1544 
1545  plan = make_project_set(tlist, subplan);
1546 
1547  copy_generic_path_info(&plan->plan, (Path *) best_path);
1548 
1549  return plan;
1550 }
1551 
1552 /*
1553  * create_material_plan
1554  * Create a Material plan for 'best_path' and (recursively) plans
1555  * for its subpaths.
1556  *
1557  * Returns a Plan node.
1558  */
1559 static Material *
1560 create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
1561 {
1562  Material *plan;
1563  Plan *subplan;
1564 
1565  /*
1566  * We don't want any excess columns in the materialized tuples, so request
1567  * a smaller tlist. Otherwise, since Material doesn't project, tlist
1568  * requirements pass through.
1569  */
1570  subplan = create_plan_recurse(root, best_path->subpath,
1571  flags | CP_SMALL_TLIST);
1572 
1573  plan = make_material(subplan);
1574 
1575  copy_generic_path_info(&plan->plan, (Path *) best_path);
1576 
1577  return plan;
1578 }
1579 
1580 /*
1581  * create_resultcache_plan
1582  * Create a ResultCache plan for 'best_path' and (recursively) plans
1583  * for its subpaths.
1584  *
1585  * Returns a Plan node.
1586  */
1587 static ResultCache *
1589 {
1590  ResultCache *plan;
1591  Plan *subplan;
1592  Oid *operators;
1593  Oid *collations;
1594  List *param_exprs = NIL;
1595  ListCell *lc;
1596  ListCell *lc2;
1597  int nkeys;
1598  int i;
1599 
1600  subplan = create_plan_recurse(root, best_path->subpath,
1601  flags | CP_SMALL_TLIST);
1602 
1603  param_exprs = (List *) replace_nestloop_params(root, (Node *)
1604  best_path->param_exprs);
1605 
1606  nkeys = list_length(param_exprs);
1607  Assert(nkeys > 0);
1608  operators = palloc(nkeys * sizeof(Oid));
1609  collations = palloc(nkeys * sizeof(Oid));
1610 
1611  i = 0;
1612  forboth(lc, param_exprs, lc2, best_path->hash_operators)
1613  {
1614  Expr *param_expr = (Expr *) lfirst(lc);
1615  Oid opno = lfirst_oid(lc2);
1616 
1617  operators[i] = opno;
1618  collations[i] = exprCollation((Node *) param_expr);
1619  i++;
1620  }
1621 
1622  plan = make_resultcache(subplan, operators, collations, param_exprs,
1623  best_path->singlerow, best_path->est_entries);
1624 
1625  copy_generic_path_info(&plan->plan, (Path *) best_path);
1626 
1627  return plan;
1628 }
1629 
1630 /*
1631  * create_unique_plan
1632  * Create a Unique plan for 'best_path' and (recursively) plans
1633  * for its subpaths.
1634  *
1635  * Returns a Plan node.
1636  */
1637 static Plan *
1638 create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
1639 {
1640  Plan *plan;
1641  Plan *subplan;
1642  List *in_operators;
1643  List *uniq_exprs;
1644  List *newtlist;
1645  int nextresno;
1646  bool newitems;
1647  int numGroupCols;
1648  AttrNumber *groupColIdx;
1649  Oid *groupCollations;
1650  int groupColPos;
1651  ListCell *l;
1652 
1653  /* Unique doesn't project, so tlist requirements pass through */
1654  subplan = create_plan_recurse(root, best_path->subpath, flags);
1655 
1656  /* Done if we don't need to do any actual unique-ifying */
1657  if (best_path->umethod == UNIQUE_PATH_NOOP)
1658  return subplan;
1659 
1660  /*
1661  * As constructed, the subplan has a "flat" tlist containing just the Vars
1662  * needed here and at upper levels. The values we are supposed to
1663  * unique-ify may be expressions in these variables. We have to add any
1664  * such expressions to the subplan's tlist.
1665  *
1666  * The subplan may have a "physical" tlist if it is a simple scan plan. If
1667  * we're going to sort, this should be reduced to the regular tlist, so
1668  * that we don't sort more data than we need to. For hashing, the tlist
1669  * should be left as-is if we don't need to add any expressions; but if we
1670  * do have to add expressions, then a projection step will be needed at
1671  * runtime anyway, so we may as well remove unneeded items. Therefore
1672  * newtlist starts from build_path_tlist() not just a copy of the
1673  * subplan's tlist; and we don't install it into the subplan unless we are
1674  * sorting or stuff has to be added.
1675  */
1676  in_operators = best_path->in_operators;
1677  uniq_exprs = best_path->uniq_exprs;
1678 
1679  /* initialize modified subplan tlist as just the "required" vars */
1680  newtlist = build_path_tlist(root, &best_path->path);
1681  nextresno = list_length(newtlist) + 1;
1682  newitems = false;
1683 
1684  foreach(l, uniq_exprs)
1685  {
1686  Expr *uniqexpr = lfirst(l);
1687  TargetEntry *tle;
1688 
1689  tle = tlist_member(uniqexpr, newtlist);
1690  if (!tle)
1691  {
1692  tle = makeTargetEntry((Expr *) uniqexpr,
1693  nextresno,
1694  NULL,
1695  false);
1696  newtlist = lappend(newtlist, tle);
1697  nextresno++;
1698  newitems = true;
1699  }
1700  }
1701 
1702  /* Use change_plan_targetlist in case we need to insert a Result node */
1703  if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
1704  subplan = change_plan_targetlist(subplan, newtlist,
1705  best_path->path.parallel_safe);
1706 
1707  /*
1708  * Build control information showing which subplan output columns are to
1709  * be examined by the grouping step. Unfortunately we can't merge this
1710  * with the previous loop, since we didn't then know which version of the
1711  * subplan tlist we'd end up using.
1712  */
1713  newtlist = subplan->targetlist;
1714  numGroupCols = list_length(uniq_exprs);
1715  groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
1716  groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
1717 
1718  groupColPos = 0;
1719  foreach(l, uniq_exprs)
1720  {
1721  Expr *uniqexpr = lfirst(l);
1722  TargetEntry *tle;
1723 
1724  tle = tlist_member(uniqexpr, newtlist);
1725  if (!tle) /* shouldn't happen */
1726  elog(ERROR, "failed to find unique expression in subplan tlist");
1727  groupColIdx[groupColPos] = tle->resno;
1728  groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
1729  groupColPos++;
1730  }
1731 
1732  if (best_path->umethod == UNIQUE_PATH_HASH)
1733  {
1734  Oid *groupOperators;
1735 
1736  /*
1737  * Get the hashable equality operators for the Agg node to use.
1738  * Normally these are the same as the IN clause operators, but if
1739  * those are cross-type operators then the equality operators are the
1740  * ones for the IN clause operators' RHS datatype.
1741  */
1742  groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
1743  groupColPos = 0;
1744  foreach(l, in_operators)
1745  {
1746  Oid in_oper = lfirst_oid(l);
1747  Oid eq_oper;
1748 
1749  if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
1750  elog(ERROR, "could not find compatible hash operator for operator %u",
1751  in_oper);
1752  groupOperators[groupColPos++] = eq_oper;
1753  }
1754 
1755  /*
1756  * Since the Agg node is going to project anyway, we can give it the
1757  * minimum output tlist, without any stuff we might have added to the
1758  * subplan tlist.
1759  */
1760  plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
1761  NIL,
1762  AGG_HASHED,
1764  numGroupCols,
1765  groupColIdx,
1766  groupOperators,
1767  groupCollations,
1768  NIL,
1769  NIL,
1770  best_path->path.rows,
1771  0,
1772  subplan);
1773  }
1774  else
1775  {
1776  List *sortList = NIL;
1777  Sort *sort;
1778 
1779  /* Create an ORDER BY list to sort the input compatibly */
1780  groupColPos = 0;
1781  foreach(l, in_operators)
1782  {
1783  Oid in_oper = lfirst_oid(l);
1784  Oid sortop;
1785  Oid eqop;
1786  TargetEntry *tle;
1787  SortGroupClause *sortcl;
1788 
1789  sortop = get_ordering_op_for_equality_op(in_oper, false);
1790  if (!OidIsValid(sortop)) /* shouldn't happen */
1791  elog(ERROR, "could not find ordering operator for equality operator %u",
1792  in_oper);
1793 
1794  /*
1795  * The Unique node will need equality operators. Normally these
1796  * are the same as the IN clause operators, but if those are
1797  * cross-type operators then the equality operators are the ones
1798  * for the IN clause operators' RHS datatype.
1799  */
1800  eqop = get_equality_op_for_ordering_op(sortop, NULL);
1801  if (!OidIsValid(eqop)) /* shouldn't happen */
1802  elog(ERROR, "could not find equality operator for ordering operator %u",
1803  sortop);
1804 
1805  tle = get_tle_by_resno(subplan->targetlist,
1806  groupColIdx[groupColPos]);
1807  Assert(tle != NULL);
1808 
1809  sortcl = makeNode(SortGroupClause);
1810  sortcl->tleSortGroupRef = assignSortGroupRef(tle,
1811  subplan->targetlist);
1812  sortcl->eqop = eqop;
1813  sortcl->sortop = sortop;
1814  sortcl->nulls_first = false;
1815  sortcl->hashable = false; /* no need to make this accurate */
1816  sortList = lappend(sortList, sortcl);
1817  groupColPos++;
1818  }
1819  sort = make_sort_from_sortclauses(sortList, subplan);
1820  label_sort_with_costsize(root, sort, -1.0);
1821  plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
1822  }
1823 
1824  /* Copy cost data from Path to Plan */
1825  copy_generic_path_info(plan, &best_path->path);
1826 
1827  return plan;
1828 }
1829 
1830 /*
1831  * create_gather_plan
1832  *
1833  * Create a Gather plan for 'best_path' and (recursively) plans
1834  * for its subpaths.
1835  */
1836 static Gather *
1838 {
1839  Gather *gather_plan;
1840  Plan *subplan;
1841  List *tlist;
1842 
1843  /*
1844  * Push projection down to the child node. That way, the projection work
1845  * is parallelized, and there can be no system columns in the result (they
1846  * can't travel through a tuple queue because it uses MinimalTuple
1847  * representation).
1848  */
1849  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1850 
1851  tlist = build_path_tlist(root, &best_path->path);
1852 
1853  gather_plan = make_gather(tlist,
1854  NIL,
1855  best_path->num_workers,
1857  best_path->single_copy,
1858  subplan);
1859 
1860  copy_generic_path_info(&gather_plan->plan, &best_path->path);
1861 
1862  /* use parallel mode for parallel plans. */
1863  root->glob->parallelModeNeeded = true;
1864 
1865  return gather_plan;
1866 }
1867 
1868 /*
1869  * create_gather_merge_plan
1870  *
1871  * Create a Gather Merge plan for 'best_path' and (recursively)
1872  * plans for its subpaths.
1873  */
1874 static GatherMerge *
1876 {
1877  GatherMerge *gm_plan;
1878  Plan *subplan;
1879  List *pathkeys = best_path->path.pathkeys;
1880  List *tlist = build_path_tlist(root, &best_path->path);
1881 
1882  /* As with Gather, project away columns in the workers. */
1883  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1884 
1885  /* Create a shell for a GatherMerge plan. */
1886  gm_plan = makeNode(GatherMerge);
1887  gm_plan->plan.targetlist = tlist;
1888  gm_plan->num_workers = best_path->num_workers;
1889  copy_generic_path_info(&gm_plan->plan, &best_path->path);
1890 
1891  /* Assign the rescan Param. */
1892  gm_plan->rescan_param = assign_special_exec_param(root);
1893 
1894  /* Gather Merge is pointless with no pathkeys; use Gather instead. */
1895  Assert(pathkeys != NIL);
1896 
1897  /* Compute sort column info, and adjust subplan's tlist as needed */
1898  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1899  best_path->subpath->parent->relids,
1900  gm_plan->sortColIdx,
1901  false,
1902  &gm_plan->numCols,
1903  &gm_plan->sortColIdx,
1904  &gm_plan->sortOperators,
1905  &gm_plan->collations,
1906  &gm_plan->nullsFirst);
1907 
1908 
1909  /*
1910  * All gather merge paths should have already guaranteed the necessary
1911  * sort order either by adding an explicit sort node or by using presorted
1912  * input. We can't simply add a sort here on additional pathkeys, because
1913  * we can't guarantee the sort would be safe. For example, expressions may
1914  * be volatile or otherwise parallel unsafe.
1915  */
1916  if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
1917  elog(ERROR, "gather merge input not sufficiently sorted");
1918 
1919  /* Now insert the subplan under GatherMerge. */
1920  gm_plan->plan.lefttree = subplan;
1921 
1922  /* use parallel mode for parallel plans. */
1923  root->glob->parallelModeNeeded = true;
1924 
1925  return gm_plan;
1926 }
1927 
1928 /*
1929  * create_projection_plan
1930  *
1931  * Create a plan tree to do a projection step and (recursively) plans
1932  * for its subpaths. We may need a Result node for the projection,
1933  * but sometimes we can just let the subplan do the work.
1934  */
1935 static Plan *
1937 {
1938  Plan *plan;
1939  Plan *subplan;
1940  List *tlist;
1941  bool needs_result_node = false;
1942 
1943  /*
1944  * Convert our subpath to a Plan and determine whether we need a Result
1945  * node.
1946  *
1947  * In most cases where we don't need to project, creation_projection_path
1948  * will have set dummypp, but not always. First, some createplan.c
1949  * routines change the tlists of their nodes. (An example is that
1950  * create_merge_append_plan might add resjunk sort columns to a
1951  * MergeAppend.) Second, create_projection_path has no way of knowing
1952  * what path node will be placed on top of the projection path and
1953  * therefore can't predict whether it will require an exact tlist. For
1954  * both of these reasons, we have to recheck here.
1955  */
1956  if (use_physical_tlist(root, &best_path->path, flags))
1957  {
1958  /*
1959  * Our caller doesn't really care what tlist we return, so we don't
1960  * actually need to project. However, we may still need to ensure
1961  * proper sortgroupref labels, if the caller cares about those.
1962  */
1963  subplan = create_plan_recurse(root, best_path->subpath, 0);
1964  tlist = subplan->targetlist;
1965  if (flags & CP_LABEL_TLIST)
1967  best_path->path.pathtarget);
1968  }
1969  else if (is_projection_capable_path(best_path->subpath))
1970  {
1971  /*
1972  * Our caller requires that we return the exact tlist, but no separate
1973  * result node is needed because the subpath is projection-capable.
1974  * Tell create_plan_recurse that we're going to ignore the tlist it
1975  * produces.
1976  */
1977  subplan = create_plan_recurse(root, best_path->subpath,
1978  CP_IGNORE_TLIST);
1980  tlist = build_path_tlist(root, &best_path->path);
1981  }
1982  else
1983  {
1984  /*
1985  * It looks like we need a result node, unless by good fortune the
1986  * requested tlist is exactly the one the child wants to produce.
1987  */
1988  subplan = create_plan_recurse(root, best_path->subpath, 0);
1989  tlist = build_path_tlist(root, &best_path->path);
1990  needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
1991  }
1992 
1993  /*
1994  * If we make a different decision about whether to include a Result node
1995  * than create_projection_path did, we'll have made slightly wrong cost
1996  * estimates; but label the plan with the cost estimates we actually used,
1997  * not "corrected" ones. (XXX this could be cleaned up if we moved more
1998  * of the sortcolumn setup logic into Path creation, but that would add
1999  * expense to creating Paths we might end up not using.)
2000  */
2001  if (!needs_result_node)
2002  {
2003  /* Don't need a separate Result, just assign tlist to subplan */
2004  plan = subplan;
2005  plan->targetlist = tlist;
2006 
2007  /* Label plan with the estimated costs we actually used */
2008  plan->startup_cost = best_path->path.startup_cost;
2009  plan->total_cost = best_path->path.total_cost;
2010  plan->plan_rows = best_path->path.rows;
2011  plan->plan_width = best_path->path.pathtarget->width;
2012  plan->parallel_safe = best_path->path.parallel_safe;
2013  /* ... but don't change subplan's parallel_aware flag */
2014  }
2015  else
2016  {
2017  /* We need a Result node */
2018  plan = (Plan *) make_result(tlist, NULL, subplan);
2019 
2020  copy_generic_path_info(plan, (Path *) best_path);
2021  }
2022 
2023  return plan;
2024 }
2025 
2026 /*
2027  * inject_projection_plan
2028  * Insert a Result node to do a projection step.
2029  *
2030  * This is used in a few places where we decide on-the-fly that we need a
2031  * projection step as part of the tree generated for some Path node.
2032  * We should try to get rid of this in favor of doing it more honestly.
2033  *
2034  * One reason it's ugly is we have to be told the right parallel_safe marking
2035  * to apply (since the tlist might be unsafe even if the child plan is safe).
2036  */
2037 static Plan *
2038 inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
2039 {
2040  Plan *plan;
2041 
2042  plan = (Plan *) make_result(tlist, NULL, subplan);
2043 
2044  /*
2045  * In principle, we should charge tlist eval cost plus cpu_per_tuple per
2046  * row for the Result node. But the former has probably been factored in
2047  * already and the latter was not accounted for during Path construction,
2048  * so being formally correct might just make the EXPLAIN output look less
2049  * consistent not more so. Hence, just copy the subplan's cost.
2050  */
2051  copy_plan_costsize(plan, subplan);
2052  plan->parallel_safe = parallel_safe;
2053 
2054  return plan;
2055 }
2056 
2057 /*
2058  * change_plan_targetlist
2059  * Externally available wrapper for inject_projection_plan.
2060  *
2061  * This is meant for use by FDW plan-generation functions, which might
2062  * want to adjust the tlist computed by some subplan tree. In general,
2063  * a Result node is needed to compute the new tlist, but we can optimize
2064  * some cases.
2065  *
2066  * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
2067  * flag of the FDW's own Path node.
2068  */
2069 Plan *
2070 change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
2071 {
2072  /*
2073  * If the top plan node can't do projections and its existing target list
2074  * isn't already what we need, we need to add a Result node to help it
2075  * along.
2076  */
2077  if (!is_projection_capable_plan(subplan) &&
2078  !tlist_same_exprs(tlist, subplan->targetlist))
2079  subplan = inject_projection_plan(subplan, tlist,
2080  subplan->parallel_safe &&
2081  tlist_parallel_safe);
2082  else
2083  {
2084  /* Else we can just replace the plan node's tlist */
2085  subplan->targetlist = tlist;
2086  subplan->parallel_safe &= tlist_parallel_safe;
2087  }
2088  return subplan;
2089 }
2090 
2091 /*
2092  * create_sort_plan
2093  *
2094  * Create a Sort plan for 'best_path' and (recursively) plans
2095  * for its subpaths.
2096  */
2097 static Sort *
2098 create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
2099 {
2100  Sort *plan;
2101  Plan *subplan;
2102 
2103  /*
2104  * We don't want any excess columns in the sorted tuples, so request a
2105  * smaller tlist. Otherwise, since Sort doesn't project, tlist
2106  * requirements pass through.
2107  */
2108  subplan = create_plan_recurse(root, best_path->subpath,
2109  flags | CP_SMALL_TLIST);
2110 
2111  /*
2112  * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
2113  * which will ignore any child EC members that don't belong to the given
2114  * relids. Thus, if this sort path is based on a child relation, we must
2115  * pass its relids.
2116  */
2117  plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
2118  IS_OTHER_REL(best_path->subpath->parent) ?
2119  best_path->path.parent->relids : NULL);
2120 
2121  copy_generic_path_info(&plan->plan, (Path *) best_path);
2122 
2123  return plan;
2124 }
2125 
2126 /*
2127  * create_incrementalsort_plan
2128  *
2129  * Do the same as create_sort_plan, but create IncrementalSort plan.
2130  */
2131 static IncrementalSort *
2133  int flags)
2134 {
2135  IncrementalSort *plan;
2136  Plan *subplan;
2137 
2138  /* See comments in create_sort_plan() above */
2139  subplan = create_plan_recurse(root, best_path->spath.subpath,
2140  flags | CP_SMALL_TLIST);
2141  plan = make_incrementalsort_from_pathkeys(subplan,
2142  best_path->spath.path.pathkeys,
2143  IS_OTHER_REL(best_path->spath.subpath->parent) ?
2144  best_path->spath.path.parent->relids : NULL,
2145  best_path->nPresortedCols);
2146 
2147  copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
2148 
2149  return plan;
2150 }
2151 
2152 /*
2153  * create_group_plan
2154  *
2155  * Create a Group plan for 'best_path' and (recursively) plans
2156  * for its subpaths.
2157  */
2158 static Group *
2160 {
2161  Group *plan;
2162  Plan *subplan;
2163  List *tlist;
2164  List *quals;
2165 
2166  /*
2167  * Group can project, so no need to be terribly picky about child tlist,
2168  * but we do need grouping columns to be available
2169  */
2170  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2171 
2172  tlist = build_path_tlist(root, &best_path->path);
2173 
2174  quals = order_qual_clauses(root, best_path->qual);
2175 
2176  plan = make_group(tlist,
2177  quals,
2178  list_length(best_path->groupClause),
2180  subplan->targetlist),
2181  extract_grouping_ops(best_path->groupClause),
2183  subplan->targetlist),
2184  subplan);
2185 
2186  copy_generic_path_info(&plan->plan, (Path *) best_path);
2187 
2188  return plan;
2189 }
2190 
2191 /*
2192  * create_upper_unique_plan
2193  *
2194  * Create a Unique plan for 'best_path' and (recursively) plans
2195  * for its subpaths.
2196  */
2197 static Unique *
2199 {
2200  Unique *plan;
2201  Plan *subplan;
2202 
2203  /*
2204  * Unique doesn't project, so tlist requirements pass through; moreover we
2205  * need grouping columns to be labeled.
2206  */
2207  subplan = create_plan_recurse(root, best_path->subpath,
2208  flags | CP_LABEL_TLIST);
2209 
2210  plan = make_unique_from_pathkeys(subplan,
2211  best_path->path.pathkeys,
2212  best_path->numkeys);
2213 
2214  copy_generic_path_info(&plan->plan, (Path *) best_path);
2215 
2216  return plan;
2217 }
2218 
2219 /*
2220  * create_agg_plan
2221  *
2222  * Create an Agg plan for 'best_path' and (recursively) plans
2223  * for its subpaths.
2224  */
2225 static Agg *
2227 {
2228  Agg *plan;
2229  Plan *subplan;
2230  List *tlist;
2231  List *quals;
2232 
2233  /*
2234  * Agg can project, so no need to be terribly picky about child tlist, but
2235  * we do need grouping columns to be available
2236  */
2237  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2238 
2239  tlist = build_path_tlist(root, &best_path->path);
2240 
2241  quals = order_qual_clauses(root, best_path->qual);
2242 
2243  plan = make_agg(tlist, quals,
2244  best_path->aggstrategy,
2245  best_path->aggsplit,
2246  list_length(best_path->groupClause),
2248  subplan->targetlist),
2249  extract_grouping_ops(best_path->groupClause),
2251  subplan->targetlist),
2252  NIL,
2253  NIL,
2254  best_path->numGroups,
2255  best_path->transitionSpace,
2256  subplan);
2257 
2258  copy_generic_path_info(&plan->plan, (Path *) best_path);
2259 
2260  return plan;
2261 }
2262 
2263 /*
2264  * Given a groupclause for a collection of grouping sets, produce the
2265  * corresponding groupColIdx.
2266  *
2267  * root->grouping_map maps the tleSortGroupRef to the actual column position in
2268  * the input tuple. So we get the ref from the entries in the groupclause and
2269  * look them up there.
2270  */
2271 static AttrNumber *
2272 remap_groupColIdx(PlannerInfo *root, List *groupClause)
2273 {
2274  AttrNumber *grouping_map = root->grouping_map;
2275  AttrNumber *new_grpColIdx;
2276  ListCell *lc;
2277  int i;
2278 
2279  Assert(grouping_map);
2280 
2281  new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
2282 
2283  i = 0;
2284  foreach(lc, groupClause)
2285  {
2286  SortGroupClause *clause = lfirst(lc);
2287 
2288  new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
2289  }
2290 
2291  return new_grpColIdx;
2292 }
2293 
2294 /*
2295  * create_groupingsets_plan
2296  * Create a plan for 'best_path' and (recursively) plans
2297  * for its subpaths.
2298  *
2299  * What we emit is an Agg plan with some vestigial Agg and Sort nodes
2300  * hanging off the side. The top Agg implements the last grouping set
2301  * specified in the GroupingSetsPath, and any additional grouping sets
2302  * each give rise to a subsidiary Agg and Sort node in the top Agg's
2303  * "chain" list. These nodes don't participate in the plan directly,
2304  * but they are a convenient way to represent the required data for
2305  * the extra steps.
2306  *
2307  * Returns a Plan node.
2308  */
2309 static Plan *
2311 {
2312  Agg *plan;
2313  Plan *subplan;
2314  List *rollups = best_path->rollups;
2315  AttrNumber *grouping_map;
2316  int maxref;
2317  List *chain;
2318  ListCell *lc;
2319 
2320  /* Shouldn't get here without grouping sets */
2321  Assert(root->parse->groupingSets);
2322  Assert(rollups != NIL);
2323 
2324  /*
2325  * Agg can project, so no need to be terribly picky about child tlist, but
2326  * we do need grouping columns to be available
2327  */
2328  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2329 
2330  /*
2331  * Compute the mapping from tleSortGroupRef to column index in the child's
2332  * tlist. First, identify max SortGroupRef in groupClause, for array
2333  * sizing.
2334  */
2335  maxref = 0;
2336  foreach(lc, root->parse->groupClause)
2337  {
2338  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2339 
2340  if (gc->tleSortGroupRef > maxref)
2341  maxref = gc->tleSortGroupRef;
2342  }
2343 
2344  grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
2345 
2346  /* Now look up the column numbers in the child's tlist */
2347  foreach(lc, root->parse->groupClause)
2348  {
2349  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2350  TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
2351 
2352  grouping_map[gc->tleSortGroupRef] = tle->resno;
2353  }
2354 
2355  /*
2356  * During setrefs.c, we'll need the grouping_map to fix up the cols lists
2357  * in GroupingFunc nodes. Save it for setrefs.c to use.
2358  */
2359  Assert(root->grouping_map == NULL);
2360  root->grouping_map = grouping_map;
2361 
2362  /*
2363  * Generate the side nodes that describe the other sort and group
2364  * operations besides the top one. Note that we don't worry about putting
2365  * accurate cost estimates in the side nodes; only the topmost Agg node's
2366  * costs will be shown by EXPLAIN.
2367  */
2368  chain = NIL;
2369  if (list_length(rollups) > 1)
2370  {
2371  bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
2372 
2373  for_each_from(lc, rollups, 1)
2374  {
2375  RollupData *rollup = lfirst(lc);
2376  AttrNumber *new_grpColIdx;
2377  Plan *sort_plan = NULL;
2378  Plan *agg_plan;
2379  AggStrategy strat;
2380 
2381  new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2382 
2383  if (!rollup->is_hashed && !is_first_sort)
2384  {
2385  sort_plan = (Plan *)
2387  new_grpColIdx,
2388  subplan);
2389  }
2390 
2391  if (!rollup->is_hashed)
2392  is_first_sort = false;
2393 
2394  if (rollup->is_hashed)
2395  strat = AGG_HASHED;
2396  else if (list_length(linitial(rollup->gsets)) == 0)
2397  strat = AGG_PLAIN;
2398  else
2399  strat = AGG_SORTED;
2400 
2401  agg_plan = (Plan *) make_agg(NIL,
2402  NIL,
2403  strat,
2405  list_length((List *) linitial(rollup->gsets)),
2406  new_grpColIdx,
2409  rollup->gsets,
2410  NIL,
2411  rollup->numGroups,
2412  best_path->transitionSpace,
2413  sort_plan);
2414 
2415  /*
2416  * Remove stuff we don't need to avoid bloating debug output.
2417  */
2418  if (sort_plan)
2419  {
2420  sort_plan->targetlist = NIL;
2421  sort_plan->lefttree = NULL;
2422  }
2423 
2424  chain = lappend(chain, agg_plan);
2425  }
2426  }
2427 
2428  /*
2429  * Now make the real Agg node
2430  */
2431  {
2432  RollupData *rollup = linitial(rollups);
2433  AttrNumber *top_grpColIdx;
2434  int numGroupCols;
2435 
2436  top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2437 
2438  numGroupCols = list_length((List *) linitial(rollup->gsets));
2439 
2440  plan = make_agg(build_path_tlist(root, &best_path->path),
2441  best_path->qual,
2442  best_path->aggstrategy,
2444  numGroupCols,
2445  top_grpColIdx,
2448  rollup->gsets,
2449  chain,
2450  rollup->numGroups,
2451  best_path->transitionSpace,
2452  subplan);
2453 
2454  /* Copy cost data from Path to Plan */
2455  copy_generic_path_info(&plan->plan, &best_path->path);
2456  }
2457 
2458  return (Plan *) plan;
2459 }
2460 
2461 /*
2462  * create_minmaxagg_plan
2463  *
2464  * Create a Result plan for 'best_path' and (recursively) plans
2465  * for its subpaths.
2466  */
2467 static Result *
2469 {
2470  Result *plan;
2471  List *tlist;
2472  ListCell *lc;
2473 
2474  /* Prepare an InitPlan for each aggregate's subquery. */
2475  foreach(lc, best_path->mmaggregates)
2476  {
2477  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
2478  PlannerInfo *subroot = mminfo->subroot;
2479  Query *subparse = subroot->parse;
2480  Plan *plan;
2481 
2482  /*
2483  * Generate the plan for the subquery. We already have a Path, but we
2484  * have to convert it to a Plan and attach a LIMIT node above it.
2485  * Since we are entering a different planner context (subroot),
2486  * recurse to create_plan not create_plan_recurse.
2487  */
2488  plan = create_plan(subroot, mminfo->path);
2489 
2490  plan = (Plan *) make_limit(plan,
2491  subparse->limitOffset,
2492  subparse->limitCount,
2493  subparse->limitOption,
2494  0, NULL, NULL, NULL);
2495 
2496  /* Must apply correct cost/width data to Limit node */
2497  plan->startup_cost = mminfo->path->startup_cost;
2498  plan->total_cost = mminfo->pathcost;
2499  plan->plan_rows = 1;
2500  plan->plan_width = mminfo->path->pathtarget->width;
2501  plan->parallel_aware = false;
2502  plan->parallel_safe = mminfo->path->parallel_safe;
2503 
2504  /* Convert the plan into an InitPlan in the outer query. */
2505  SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
2506  }
2507 
2508  /* Generate the output plan --- basically just a Result */
2509  tlist = build_path_tlist(root, &best_path->path);
2510 
2511  plan = make_result(tlist, (Node *) best_path->quals, NULL);
2512 
2513  copy_generic_path_info(&plan->plan, (Path *) best_path);
2514 
2515  /*
2516  * During setrefs.c, we'll need to replace references to the Agg nodes
2517  * with InitPlan output params. (We can't just do that locally in the
2518  * MinMaxAgg node, because path nodes above here may have Agg references
2519  * as well.) Save the mmaggregates list to tell setrefs.c to do that.
2520  */
2521  Assert(root->minmax_aggs == NIL);
2522  root->minmax_aggs = best_path->mmaggregates;
2523 
2524  return plan;
2525 }
2526 
2527 /*
2528  * create_windowagg_plan
2529  *
2530  * Create a WindowAgg plan for 'best_path' and (recursively) plans
2531  * for its subpaths.
2532  */
2533 static WindowAgg *
2535 {
2536  WindowAgg *plan;
2537  WindowClause *wc = best_path->winclause;
2538  int numPart = list_length(wc->partitionClause);
2539  int numOrder = list_length(wc->orderClause);
2540  Plan *subplan;
2541  List *tlist;
2542  int partNumCols;
2543  AttrNumber *partColIdx;
2544  Oid *partOperators;
2545  Oid *partCollations;
2546  int ordNumCols;
2547  AttrNumber *ordColIdx;
2548  Oid *ordOperators;
2549  Oid *ordCollations;
2550  ListCell *lc;
2551 
2552  /*
2553  * Choice of tlist here is motivated by the fact that WindowAgg will be
2554  * storing the input rows of window frames in a tuplestore; it therefore
2555  * behooves us to request a small tlist to avoid wasting space. We do of
2556  * course need grouping columns to be available.
2557  */
2558  subplan = create_plan_recurse(root, best_path->subpath,
2560 
2561  tlist = build_path_tlist(root, &best_path->path);
2562 
2563  /*
2564  * Convert SortGroupClause lists into arrays of attr indexes and equality
2565  * operators, as wanted by executor. (Note: in principle, it's possible
2566  * to drop some of the sort columns, if they were proved redundant by
2567  * pathkey logic. However, it doesn't seem worth going out of our way to
2568  * optimize such cases. In any case, we must *not* remove the ordering
2569  * column for RANGE OFFSET cases, as the executor needs that for in_range
2570  * tests even if it's known to be equal to some partitioning column.)
2571  */
2572  partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
2573  partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
2574  partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
2575 
2576  partNumCols = 0;
2577  foreach(lc, wc->partitionClause)
2578  {
2579  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2580  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2581 
2582  Assert(OidIsValid(sgc->eqop));
2583  partColIdx[partNumCols] = tle->resno;
2584  partOperators[partNumCols] = sgc->eqop;
2585  partCollations[partNumCols] = exprCollation((Node *) tle->expr);
2586  partNumCols++;
2587  }
2588 
2589  ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
2590  ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
2591  ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
2592 
2593  ordNumCols = 0;
2594  foreach(lc, wc->orderClause)
2595  {
2596  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2597  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2598 
2599  Assert(OidIsValid(sgc->eqop));
2600  ordColIdx[ordNumCols] = tle->resno;
2601  ordOperators[ordNumCols] = sgc->eqop;
2602  ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
2603  ordNumCols++;
2604  }
2605 
2606  /* And finally we can make the WindowAgg node */
2607  plan = make_windowagg(tlist,
2608  wc->winref,
2609  partNumCols,
2610  partColIdx,
2611  partOperators,
2612  partCollations,
2613  ordNumCols,
2614  ordColIdx,
2615  ordOperators,
2616  ordCollations,
2617  wc->frameOptions,
2618  wc->startOffset,
2619  wc->endOffset,
2620  wc->startInRangeFunc,
2621  wc->endInRangeFunc,
2622  wc->inRangeColl,
2623  wc->inRangeAsc,
2624  wc->inRangeNullsFirst,
2625  subplan);
2626 
2627  copy_generic_path_info(&plan->plan, (Path *) best_path);
2628 
2629  return plan;
2630 }
2631 
2632 /*
2633  * create_setop_plan
2634  *
2635  * Create a SetOp plan for 'best_path' and (recursively) plans
2636  * for its subpaths.
2637  */
2638 static SetOp *
2639 create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
2640 {
2641  SetOp *plan;
2642  Plan *subplan;
2643  long numGroups;
2644 
2645  /*
2646  * SetOp doesn't project, so tlist requirements pass through; moreover we
2647  * need grouping columns to be labeled.
2648  */
2649  subplan = create_plan_recurse(root, best_path->subpath,
2650  flags | CP_LABEL_TLIST);
2651 
2652  /* Convert numGroups to long int --- but 'ware overflow! */
2653  numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
2654 
2655  plan = make_setop(best_path->cmd,
2656  best_path->strategy,
2657  subplan,
2658  best_path->distinctList,
2659  best_path->flagColIdx,
2660  best_path->firstFlag,
2661  numGroups);
2662 
2663  copy_generic_path_info(&plan->plan, (Path *) best_path);
2664 
2665  return plan;
2666 }
2667 
2668 /*
2669  * create_recursiveunion_plan
2670  *
2671  * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
2672  * for its subpaths.
2673  */
2674 static RecursiveUnion *
2676 {
2677  RecursiveUnion *plan;
2678  Plan *leftplan;
2679  Plan *rightplan;
2680  List *tlist;
2681  long numGroups;
2682 
2683  /* Need both children to produce same tlist, so force it */
2684  leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
2685  rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
2686 
2687  tlist = build_path_tlist(root, &best_path->path);
2688 
2689  /* Convert numGroups to long int --- but 'ware overflow! */
2690  numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
2691 
2692  plan = make_recursive_union(tlist,
2693  leftplan,
2694  rightplan,
2695  best_path->wtParam,
2696  best_path->distinctList,
2697  numGroups);
2698 
2699  copy_generic_path_info(&plan->plan, (Path *) best_path);
2700 
2701  return plan;
2702 }
2703 
2704 /*
2705  * create_lockrows_plan
2706  *
2707  * Create a LockRows plan for 'best_path' and (recursively) plans
2708  * for its subpaths.
2709  */
2710 static LockRows *
2712  int flags)
2713 {
2714  LockRows *plan;
2715  Plan *subplan;
2716 
2717  /* LockRows doesn't project, so tlist requirements pass through */
2718  subplan = create_plan_recurse(root, best_path->subpath, flags);
2719 
2720  plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
2721 
2722  copy_generic_path_info(&plan->plan, (Path *) best_path);
2723 
2724  return plan;
2725 }
2726 
2727 /*
2728  * create_modifytable_plan
2729  * Create a ModifyTable plan for 'best_path'.
2730  *
2731  * Returns a Plan node.
2732  */
2733 static ModifyTable *
2735 {
2736  ModifyTable *plan;
2737  Path *subpath = best_path->subpath;
2738  Plan *subplan;
2739 
2740  /* Subplan must produce exactly the specified tlist */
2741  subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
2742 
2743  /* Transfer resname/resjunk labeling, too, to keep executor happy */
2745 
2746  plan = make_modifytable(root,
2747  subplan,
2748  best_path->operation,
2749  best_path->canSetTag,
2750  best_path->nominalRelation,
2751  best_path->rootRelation,
2752  best_path->partColsUpdated,
2753  best_path->resultRelations,
2754  best_path->updateColnosLists,
2755  best_path->withCheckOptionLists,
2756  best_path->returningLists,
2757  best_path->rowMarks,
2758  best_path->onconflict,
2759  best_path->epqParam);
2760 
2761  copy_generic_path_info(&plan->plan, &best_path->path);
2762 
2763  return plan;
2764 }
2765 
2766 /*
2767  * create_limit_plan
2768  *
2769  * Create a Limit plan for 'best_path' and (recursively) plans
2770  * for its subpaths.
2771  */
2772 static Limit *
2773 create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
2774 {
2775  Limit *plan;
2776  Plan *subplan;
2777  int numUniqkeys = 0;
2778  AttrNumber *uniqColIdx = NULL;
2779  Oid *uniqOperators = NULL;
2780  Oid *uniqCollations = NULL;
2781 
2782  /* Limit doesn't project, so tlist requirements pass through */
2783  subplan = create_plan_recurse(root, best_path->subpath, flags);
2784 
2785  /* Extract information necessary for comparing rows for WITH TIES. */
2786  if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
2787  {
2788  Query *parse = root->parse;
2789  ListCell *l;
2790 
2791  numUniqkeys = list_length(parse->sortClause);
2792  uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
2793  uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2794  uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2795 
2796  numUniqkeys = 0;
2797  foreach(l, parse->sortClause)
2798  {
2799  SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
2800  TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
2801 
2802  uniqColIdx[numUniqkeys] = tle->resno;
2803  uniqOperators[numUniqkeys] = sortcl->eqop;
2804  uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
2805  numUniqkeys++;
2806  }
2807  }
2808 
2809  plan = make_limit(subplan,
2810  best_path->limitOffset,
2811  best_path->limitCount,
2812  best_path->limitOption,
2813  numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
2814 
2815  copy_generic_path_info(&plan->plan, (Path *) best_path);
2816 
2817  return plan;
2818 }
2819 
2820 
2821 /*****************************************************************************
2822  *
2823  * BASE-RELATION SCAN METHODS
2824  *
2825  *****************************************************************************/
2826 
2827 
2828 /*
2829  * create_seqscan_plan
2830  * Returns a seqscan plan for the base relation scanned by 'best_path'
2831  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2832  */
2833 static SeqScan *
2835  List *tlist, List *scan_clauses)
2836 {
2837  SeqScan *scan_plan;
2838  Index scan_relid = best_path->parent->relid;
2839 
2840  /* it should be a base rel... */
2841  Assert(scan_relid > 0);
2842  Assert(best_path->parent->rtekind == RTE_RELATION);
2843 
2844  /* Sort clauses into best execution order */
2845  scan_clauses = order_qual_clauses(root, scan_clauses);
2846 
2847  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2848  scan_clauses = extract_actual_clauses(scan_clauses, false);
2849 
2850  /* Replace any outer-relation variables with nestloop params */
2851  if (best_path->param_info)
2852  {
2853  scan_clauses = (List *)
2854  replace_nestloop_params(root, (Node *) scan_clauses);
2855  }
2856 
2857  scan_plan = make_seqscan(tlist,
2858  scan_clauses,
2859  scan_relid);
2860 
2861  copy_generic_path_info(&scan_plan->plan, best_path);
2862 
2863  return scan_plan;
2864 }
2865 
2866 /*
2867  * create_samplescan_plan
2868  * Returns a samplescan plan for the base relation scanned by 'best_path'
2869  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2870  */
2871 static SampleScan *
2873  List *tlist, List *scan_clauses)
2874 {
2875  SampleScan *scan_plan;
2876  Index scan_relid = best_path->parent->relid;
2877  RangeTblEntry *rte;
2878  TableSampleClause *tsc;
2879 
2880  /* it should be a base rel with a tablesample clause... */
2881  Assert(scan_relid > 0);
2882  rte = planner_rt_fetch(scan_relid, root);
2883  Assert(rte->rtekind == RTE_RELATION);
2884  tsc = rte->tablesample;
2885  Assert(tsc != NULL);
2886 
2887  /* Sort clauses into best execution order */
2888  scan_clauses = order_qual_clauses(root, scan_clauses);
2889 
2890  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2891  scan_clauses = extract_actual_clauses(scan_clauses, false);
2892 
2893  /* Replace any outer-relation variables with nestloop params */
2894  if (best_path->param_info)
2895  {
2896  scan_clauses = (List *)
2897  replace_nestloop_params(root, (Node *) scan_clauses);
2898  tsc = (TableSampleClause *)
2899  replace_nestloop_params(root, (Node *) tsc);
2900  }
2901 
2902  scan_plan = make_samplescan(tlist,
2903  scan_clauses,
2904  scan_relid,
2905  tsc);
2906 
2907  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2908 
2909  return scan_plan;
2910 }
2911 
2912 /*
2913  * create_indexscan_plan
2914  * Returns an indexscan plan for the base relation scanned by 'best_path'
2915  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2916  *
2917  * We use this for both plain IndexScans and IndexOnlyScans, because the
2918  * qual preprocessing work is the same for both. Note that the caller tells
2919  * us which to build --- we don't look at best_path->path.pathtype, because
2920  * create_bitmap_subplan needs to be able to override the prior decision.
2921  */
2922 static Scan *
2924  IndexPath *best_path,
2925  List *tlist,
2926  List *scan_clauses,
2927  bool indexonly)
2928 {
2929  Scan *scan_plan;
2930  List *indexclauses = best_path->indexclauses;
2931  List *indexorderbys = best_path->indexorderbys;
2932  Index baserelid = best_path->path.parent->relid;
2933  Oid indexoid = best_path->indexinfo->indexoid;
2934  List *qpqual;
2935  List *stripped_indexquals;
2936  List *fixed_indexquals;
2937  List *fixed_indexorderbys;
2938  List *indexorderbyops = NIL;
2939  ListCell *l;
2940 
2941  /* it should be a base rel... */
2942  Assert(baserelid > 0);
2943  Assert(best_path->path.parent->rtekind == RTE_RELATION);
2944 
2945  /*
2946  * Extract the index qual expressions (stripped of RestrictInfos) from the
2947  * IndexClauses list, and prepare a copy with index Vars substituted for
2948  * table Vars. (This step also does replace_nestloop_params on the
2949  * fixed_indexquals.)
2950  */
2951  fix_indexqual_references(root, best_path,
2952  &stripped_indexquals,
2953  &fixed_indexquals);
2954 
2955  /*
2956  * Likewise fix up index attr references in the ORDER BY expressions.
2957  */
2958  fixed_indexorderbys = fix_indexorderby_references(root, best_path);
2959 
2960  /*
2961  * The qpqual list must contain all restrictions not automatically handled
2962  * by the index, other than pseudoconstant clauses which will be handled
2963  * by a separate gating plan node. All the predicates in the indexquals
2964  * will be checked (either by the index itself, or by nodeIndexscan.c),
2965  * but if there are any "special" operators involved then they must be
2966  * included in qpqual. The upshot is that qpqual must contain
2967  * scan_clauses minus whatever appears in indexquals.
2968  *
2969  * is_redundant_with_indexclauses() detects cases where a scan clause is
2970  * present in the indexclauses list or is generated from the same
2971  * EquivalenceClass as some indexclause, and is therefore redundant with
2972  * it, though not equal. (The latter happens when indxpath.c prefers a
2973  * different derived equality than what generate_join_implied_equalities
2974  * picked for a parameterized scan's ppi_clauses.) Note that it will not
2975  * match to lossy index clauses, which is critical because we have to
2976  * include the original clause in qpqual in that case.
2977  *
2978  * In some situations (particularly with OR'd index conditions) we may
2979  * have scan_clauses that are not equal to, but are logically implied by,
2980  * the index quals; so we also try a predicate_implied_by() check to see
2981  * if we can discard quals that way. (predicate_implied_by assumes its
2982  * first input contains only immutable functions, so we have to check
2983  * that.)
2984  *
2985  * Note: if you change this bit of code you should also look at
2986  * extract_nonindex_conditions() in costsize.c.
2987  */
2988  qpqual = NIL;
2989  foreach(l, scan_clauses)
2990  {
2991  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
2992 
2993  if (rinfo->pseudoconstant)
2994  continue; /* we may drop pseudoconstants here */
2995  if (is_redundant_with_indexclauses(rinfo, indexclauses))
2996  continue; /* dup or derived from same EquivalenceClass */
2997  if (!contain_mutable_functions((Node *) rinfo->clause) &&
2998  predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
2999  false))
3000  continue; /* provably implied by indexquals */
3001  qpqual = lappend(qpqual, rinfo);
3002  }
3003 
3004  /* Sort clauses into best execution order */
3005  qpqual = order_qual_clauses(root, qpqual);
3006 
3007  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3008  qpqual = extract_actual_clauses(qpqual, false);
3009 
3010  /*
3011  * We have to replace any outer-relation variables with nestloop params in
3012  * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
3013  * annoying to have to do this separately from the processing in
3014  * fix_indexqual_references --- rethink this when generalizing the inner
3015  * indexscan support. But note we can't really do this earlier because
3016  * it'd break the comparisons to predicates above ... (or would it? Those
3017  * wouldn't have outer refs)
3018  */
3019  if (best_path->path.param_info)
3020  {
3021  stripped_indexquals = (List *)
3022  replace_nestloop_params(root, (Node *) stripped_indexquals);
3023  qpqual = (List *)
3024  replace_nestloop_params(root, (Node *) qpqual);
3025  indexorderbys = (List *)
3026  replace_nestloop_params(root, (Node *) indexorderbys);
3027  }
3028 
3029  /*
3030  * If there are ORDER BY expressions, look up the sort operators for their
3031  * result datatypes.
3032  */
3033  if (indexorderbys)
3034  {
3035  ListCell *pathkeyCell,
3036  *exprCell;
3037 
3038  /*
3039  * PathKey contains OID of the btree opfamily we're sorting by, but
3040  * that's not quite enough because we need the expression's datatype
3041  * to look up the sort operator in the operator family.
3042  */
3043  Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
3044  forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
3045  {
3046  PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
3047  Node *expr = (Node *) lfirst(exprCell);
3048  Oid exprtype = exprType(expr);
3049  Oid sortop;
3050 
3051  /* Get sort operator from opfamily */
3052  sortop = get_opfamily_member(pathkey->pk_opfamily,
3053  exprtype,
3054  exprtype,
3055  pathkey->pk_strategy);
3056  if (!OidIsValid(sortop))
3057  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
3058  pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
3059  indexorderbyops = lappend_oid(indexorderbyops, sortop);
3060  }
3061  }
3062 
3063  /* Finally ready to build the plan node */
3064  if (indexonly)
3065  scan_plan = (Scan *) make_indexonlyscan(tlist,
3066  qpqual,
3067  baserelid,
3068  indexoid,
3069  fixed_indexquals,
3070  fixed_indexorderbys,
3071  best_path->indexinfo->indextlist,
3072  best_path->indexscandir);
3073  else
3074  scan_plan = (Scan *) make_indexscan(tlist,
3075  qpqual,
3076  baserelid,
3077  indexoid,
3078  fixed_indexquals,
3079  stripped_indexquals,
3080  fixed_indexorderbys,
3081  indexorderbys,
3082  indexorderbyops,
3083  best_path->indexscandir);
3084 
3085  copy_generic_path_info(&scan_plan->plan, &best_path->path);
3086 
3087  return scan_plan;
3088 }
3089 
3090 /*
3091  * create_bitmap_scan_plan
3092  * Returns a bitmap scan plan for the base relation scanned by 'best_path'
3093  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3094  */
3095 static BitmapHeapScan *
3097  BitmapHeapPath *best_path,
3098  List *tlist,
3099  List *scan_clauses)
3100 {
3101  Index baserelid = best_path->path.parent->relid;
3102  Plan *bitmapqualplan;
3103  List *bitmapqualorig;
3104  List *indexquals;
3105  List *indexECs;
3106  List *qpqual;
3107  ListCell *l;
3108  BitmapHeapScan *scan_plan;
3109 
3110  /* it should be a base rel... */
3111  Assert(baserelid > 0);
3112  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3113 
3114  /* Process the bitmapqual tree into a Plan tree and qual lists */
3115  bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
3116  &bitmapqualorig, &indexquals,
3117  &indexECs);
3118 
3119  if (best_path->path.parallel_aware)
3120  bitmap_subplan_mark_shared(bitmapqualplan);
3121 
3122  /*
3123  * The qpqual list must contain all restrictions not automatically handled
3124  * by the index, other than pseudoconstant clauses which will be handled
3125  * by a separate gating plan node. All the predicates in the indexquals
3126  * will be checked (either by the index itself, or by
3127  * nodeBitmapHeapscan.c), but if there are any "special" operators
3128  * involved then they must be added to qpqual. The upshot is that qpqual
3129  * must contain scan_clauses minus whatever appears in indexquals.
3130  *
3131  * This loop is similar to the comparable code in create_indexscan_plan(),
3132  * but with some differences because it has to compare the scan clauses to
3133  * stripped (no RestrictInfos) indexquals. See comments there for more
3134  * info.
3135  *
3136  * In normal cases simple equal() checks will be enough to spot duplicate
3137  * clauses, so we try that first. We next see if the scan clause is
3138  * redundant with any top-level indexqual by virtue of being generated
3139  * from the same EC. After that, try predicate_implied_by().
3140  *
3141  * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
3142  * useful for getting rid of qpquals that are implied by index predicates,
3143  * because the predicate conditions are included in the "indexquals"
3144  * returned by create_bitmap_subplan(). Bitmap scans have to do it that
3145  * way because predicate conditions need to be rechecked if the scan
3146  * becomes lossy, so they have to be included in bitmapqualorig.
3147  */
3148  qpqual = NIL;
3149  foreach(l, scan_clauses)
3150  {
3151  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3152  Node *clause = (Node *) rinfo->clause;
3153 
3154  if (rinfo->pseudoconstant)
3155  continue; /* we may drop pseudoconstants here */
3156  if (list_member(indexquals, clause))
3157  continue; /* simple duplicate */
3158  if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
3159  continue; /* derived from same EquivalenceClass */
3160  if (!contain_mutable_functions(clause) &&
3161  predicate_implied_by(list_make1(clause), indexquals, false))
3162  continue; /* provably implied by indexquals */
3163  qpqual = lappend(qpqual, rinfo);
3164  }
3165 
3166  /* Sort clauses into best execution order */
3167  qpqual = order_qual_clauses(root, qpqual);
3168 
3169  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3170  qpqual = extract_actual_clauses(qpqual, false);
3171 
3172  /*
3173  * When dealing with special operators, we will at this point have
3174  * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
3175  * 'em from bitmapqualorig, since there's no point in making the tests
3176  * twice.
3177  */
3178  bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
3179 
3180  /*
3181  * We have to replace any outer-relation variables with nestloop params in
3182  * the qpqual and bitmapqualorig expressions. (This was already done for
3183  * expressions attached to plan nodes in the bitmapqualplan tree.)
3184  */
3185  if (best_path->path.param_info)
3186  {
3187  qpqual = (List *)
3188  replace_nestloop_params(root, (Node *) qpqual);
3189  bitmapqualorig = (List *)
3190  replace_nestloop_params(root, (Node *) bitmapqualorig);
3191  }
3192 
3193  /* Finally ready to build the plan node */
3194  scan_plan = make_bitmap_heapscan(tlist,
3195  qpqual,
3196  bitmapqualplan,
3197  bitmapqualorig,
3198  baserelid);
3199 
3200  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3201 
3202  return scan_plan;
3203 }
3204 
3205 /*
3206  * Given a bitmapqual tree, generate the Plan tree that implements it
3207  *
3208  * As byproducts, we also return in *qual and *indexqual the qual lists
3209  * (in implicit-AND form, without RestrictInfos) describing the original index
3210  * conditions and the generated indexqual conditions. (These are the same in
3211  * simple cases, but when special index operators are involved, the former
3212  * list includes the special conditions while the latter includes the actual
3213  * indexable conditions derived from them.) Both lists include partial-index
3214  * predicates, because we have to recheck predicates as well as index
3215  * conditions if the bitmap scan becomes lossy.
3216  *
3217  * In addition, we return a list of EquivalenceClass pointers for all the
3218  * top-level indexquals that were possibly-redundantly derived from ECs.
3219  * This allows removal of scan_clauses that are redundant with such quals.
3220  * (We do not attempt to detect such redundancies for quals that are within
3221  * OR subtrees. This could be done in a less hacky way if we returned the
3222  * indexquals in RestrictInfo form, but that would be slower and still pretty
3223  * messy, since we'd have to build new RestrictInfos in many cases.)
3224  */
3225 static Plan *
3227  List **qual, List **indexqual, List **indexECs)
3228 {
3229  Plan *plan;
3230 
3231  if (IsA(bitmapqual, BitmapAndPath))
3232  {
3233  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
3234  List *subplans = NIL;
3235  List *subquals = NIL;
3236  List *subindexquals = NIL;
3237  List *subindexECs = NIL;
3238  ListCell *l;
3239 
3240  /*
3241  * There may well be redundant quals among the subplans, since a
3242  * top-level WHERE qual might have gotten used to form several
3243  * different index quals. We don't try exceedingly hard to eliminate
3244  * redundancies, but we do eliminate obvious duplicates by using
3245  * list_concat_unique.
3246  */
3247  foreach(l, apath->bitmapquals)
3248  {
3249  Plan *subplan;
3250  List *subqual;
3251  List *subindexqual;
3252  List *subindexEC;
3253 
3254  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3255  &subqual, &subindexqual,
3256  &subindexEC);
3257  subplans = lappend(subplans, subplan);
3258  subquals = list_concat_unique(subquals, subqual);
3259  subindexquals = list_concat_unique(subindexquals, subindexqual);
3260  /* Duplicates in indexECs aren't worth getting rid of */
3261  subindexECs = list_concat(subindexECs, subindexEC);
3262  }
3263  plan = (Plan *) make_bitmap_and(subplans);
3264  plan->startup_cost = apath->path.startup_cost;
3265  plan->total_cost = apath->path.total_cost;
3266  plan->plan_rows =
3267  clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
3268  plan->plan_width = 0; /* meaningless */
3269  plan->parallel_aware = false;
3270  plan->parallel_safe = apath->path.parallel_safe;
3271  *qual = subquals;
3272  *indexqual = subindexquals;
3273  *indexECs = subindexECs;
3274  }
3275  else if (IsA(bitmapqual, BitmapOrPath))
3276  {
3277  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
3278  List *subplans = NIL;
3279  List *subquals = NIL;
3280  List *subindexquals = NIL;
3281  bool const_true_subqual = false;
3282  bool const_true_subindexqual = false;
3283  ListCell *l;
3284 
3285  /*
3286  * Here, we only detect qual-free subplans. A qual-free subplan would
3287  * cause us to generate "... OR true ..." which we may as well reduce
3288  * to just "true". We do not try to eliminate redundant subclauses
3289  * because (a) it's not as likely as in the AND case, and (b) we might
3290  * well be working with hundreds or even thousands of OR conditions,
3291  * perhaps from a long IN list. The performance of list_append_unique
3292  * would be unacceptable.
3293  */
3294  foreach(l, opath->bitmapquals)
3295  {
3296  Plan *subplan;
3297  List *subqual;
3298  List *subindexqual;
3299  List *subindexEC;
3300 
3301  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3302  &subqual, &subindexqual,
3303  &subindexEC);
3304  subplans = lappend(subplans, subplan);
3305  if (subqual == NIL)
3306  const_true_subqual = true;
3307  else if (!const_true_subqual)
3308  subquals = lappend(subquals,
3309  make_ands_explicit(subqual));
3310  if (subindexqual == NIL)
3311  const_true_subindexqual = true;
3312  else if (!const_true_subindexqual)
3313  subindexquals = lappend(subindexquals,
3314  make_ands_explicit(subindexqual));
3315  }
3316 
3317  /*
3318  * In the presence of ScalarArrayOpExpr quals, we might have built
3319  * BitmapOrPaths with just one subpath; don't add an OR step.
3320  */
3321  if (list_length(subplans) == 1)
3322  {
3323  plan = (Plan *) linitial(subplans);
3324  }
3325  else
3326  {
3327  plan = (Plan *) make_bitmap_or(subplans);
3328  plan->startup_cost = opath->path.startup_cost;
3329  plan->total_cost = opath->path.total_cost;
3330  plan->plan_rows =
3331  clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
3332  plan->plan_width = 0; /* meaningless */
3333  plan->parallel_aware = false;
3334  plan->parallel_safe = opath->path.parallel_safe;
3335  }
3336 
3337  /*
3338  * If there were constant-TRUE subquals, the OR reduces to constant
3339  * TRUE. Also, avoid generating one-element ORs, which could happen
3340  * due to redundancy elimination or ScalarArrayOpExpr quals.
3341  */
3342  if (const_true_subqual)
3343  *qual = NIL;
3344  else if (list_length(subquals) <= 1)
3345  *qual = subquals;
3346  else
3347  *qual = list_make1(make_orclause(subquals));
3348  if (const_true_subindexqual)
3349  *indexqual = NIL;
3350  else if (list_length(subindexquals) <= 1)
3351  *indexqual = subindexquals;
3352  else
3353  *indexqual = list_make1(make_orclause(subindexquals));
3354  *indexECs = NIL;
3355  }
3356  else if (IsA(bitmapqual, IndexPath))
3357  {
3358  IndexPath *ipath = (IndexPath *) bitmapqual;
3359  IndexScan *iscan;
3360  List *subquals;
3361  List *subindexquals;
3362  List *subindexECs;
3363  ListCell *l;
3364 
3365  /* Use the regular indexscan plan build machinery... */
3366  iscan = castNode(IndexScan,
3367  create_indexscan_plan(root, ipath,
3368  NIL, NIL, false));
3369  /* then convert to a bitmap indexscan */
3370  plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
3371  iscan->indexid,
3372  iscan->indexqual,
3373  iscan->indexqualorig);
3374  /* and set its cost/width fields appropriately */
3375  plan->startup_cost = 0.0;
3376  plan->total_cost = ipath->indextotalcost;
3377  plan->plan_rows =
3378  clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
3379  plan->plan_width = 0; /* meaningless */
3380  plan->parallel_aware = false;
3381  plan->parallel_safe = ipath->path.parallel_safe;
3382  /* Extract original index clauses, actual index quals, relevant ECs */
3383  subquals = NIL;
3384  subindexquals = NIL;
3385  subindexECs = NIL;
3386  foreach(l, ipath->indexclauses)
3387  {
3388  IndexClause *iclause = (IndexClause *) lfirst(l);
3389  RestrictInfo *rinfo = iclause->rinfo;
3390 
3391  Assert(!rinfo->pseudoconstant);
3392  subquals = lappend(subquals, rinfo->clause);
3393  subindexquals = list_concat(subindexquals,
3394  get_actual_clauses(iclause->indexquals));
3395  if (rinfo->parent_ec)
3396  subindexECs = lappend(subindexECs, rinfo->parent_ec);
3397  }
3398  /* We can add any index predicate conditions, too */
3399  foreach(l, ipath->indexinfo->indpred)
3400  {
3401  Expr *pred = (Expr *) lfirst(l);
3402 
3403  /*
3404  * We know that the index predicate must have been implied by the
3405  * query condition as a whole, but it may or may not be implied by
3406  * the conditions that got pushed into the bitmapqual. Avoid
3407  * generating redundant conditions.
3408  */
3409  if (!predicate_implied_by(list_make1(pred), subquals, false))
3410  {
3411  subquals = lappend(subquals, pred);
3412  subindexquals = lappend(subindexquals, pred);
3413  }
3414  }
3415  *qual = subquals;
3416  *indexqual = subindexquals;
3417  *indexECs = subindexECs;
3418  }
3419  else
3420  {
3421  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
3422  plan = NULL; /* keep compiler quiet */
3423  }
3424 
3425  return plan;
3426 }
3427 
3428 /*
3429  * create_tidscan_plan
3430  * Returns a tidscan plan for the base relation scanned by 'best_path'
3431  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3432  */
3433 static TidScan *
3435  List *tlist, List *scan_clauses)
3436 {
3437  TidScan *scan_plan;
3438  Index scan_relid = best_path->path.parent->relid;
3439  List *tidquals = best_path->tidquals;
3440 
3441  /* it should be a base rel... */
3442  Assert(scan_relid > 0);
3443  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3444 
3445  /*
3446  * The qpqual list must contain all restrictions not enforced by the
3447  * tidquals list. Since tidquals has OR semantics, we have to be careful
3448  * about matching it up to scan_clauses. It's convenient to handle the
3449  * single-tidqual case separately from the multiple-tidqual case. In the
3450  * single-tidqual case, we look through the scan_clauses while they are
3451  * still in RestrictInfo form, and drop any that are redundant with the
3452  * tidqual.
3453  *
3454  * In normal cases simple pointer equality checks will be enough to spot
3455  * duplicate RestrictInfos, so we try that first.
3456  *
3457  * Another common case is that a scan_clauses entry is generated from the
3458  * same EquivalenceClass as some tidqual, and is therefore redundant with
3459  * it, though not equal.
3460  *
3461  * Unlike indexpaths, we don't bother with predicate_implied_by(); the
3462  * number of cases where it could win are pretty small.
3463  */
3464  if (list_length(tidquals) == 1)
3465  {
3466  List *qpqual = NIL;
3467  ListCell *l;
3468 
3469  foreach(l, scan_clauses)
3470  {
3471  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3472 
3473  if (rinfo->pseudoconstant)
3474  continue; /* we may drop pseudoconstants here */
3475  if (list_member_ptr(tidquals, rinfo))
3476  continue; /* simple duplicate */
3477  if (is_redundant_derived_clause(rinfo, tidquals))
3478  continue; /* derived from same EquivalenceClass */
3479  qpqual = lappend(qpqual, rinfo);
3480  }
3481  scan_clauses = qpqual;
3482  }
3483 
3484  /* Sort clauses into best execution order */
3485  scan_clauses = order_qual_clauses(root, scan_clauses);
3486 
3487  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3488  tidquals = extract_actual_clauses(tidquals, false);
3489  scan_clauses = extract_actual_clauses(scan_clauses, false);
3490 
3491  /*
3492  * If we have multiple tidquals, it's more convenient to remove duplicate
3493  * scan_clauses after stripping the RestrictInfos. In this situation,
3494  * because the tidquals represent OR sub-clauses, they could not have come
3495  * from EquivalenceClasses so we don't have to worry about matching up
3496  * non-identical clauses. On the other hand, because tidpath.c will have
3497  * extracted those sub-clauses from some OR clause and built its own list,
3498  * we will certainly not have pointer equality to any scan clause. So
3499  * convert the tidquals list to an explicit OR clause and see if we can
3500  * match it via equal() to any scan clause.
3501  */
3502  if (list_length(tidquals) > 1)
3503  scan_clauses = list_difference(scan_clauses,
3504  list_make1(make_orclause(tidquals)));
3505 
3506  /* Replace any outer-relation variables with nestloop params */
3507  if (best_path->path.param_info)
3508  {
3509  tidquals = (List *)
3510  replace_nestloop_params(root, (Node *) tidquals);
3511  scan_clauses = (List *)
3512  replace_nestloop_params(root, (Node *) scan_clauses);
3513  }
3514 
3515  scan_plan = make_tidscan(tlist,
3516  scan_clauses,
3517  scan_relid,
3518  tidquals);
3519 
3520  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3521 
3522  return scan_plan;
3523 }
3524 
3525 /*
3526  * create_tidrangescan_plan
3527  * Returns a tidrangescan plan for the base relation scanned by 'best_path'
3528  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3529  */
3530 static TidRangeScan *
3532  List *tlist, List *scan_clauses)
3533 {
3534  TidRangeScan *scan_plan;
3535  Index scan_relid = best_path->path.parent->relid;
3536  List *tidrangequals = best_path->tidrangequals;
3537 
3538  /* it should be a base rel... */
3539  Assert(scan_relid > 0);
3540  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3541 
3542  /*
3543  * The qpqual list must contain all restrictions not enforced by the
3544  * tidrangequals list. tidrangequals has AND semantics, so we can simply
3545  * remove any qual that appears in it.
3546  */
3547  {
3548  List *qpqual = NIL;
3549  ListCell *l;
3550 
3551  foreach(l, scan_clauses)
3552  {
3553  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3554 
3555  if (rinfo->pseudoconstant)
3556  continue; /* we may drop pseudoconstants here */
3557  if (list_member_ptr(tidrangequals, rinfo))
3558  continue; /* simple duplicate */
3559  qpqual = lappend(qpqual, rinfo);
3560  }
3561  scan_clauses = qpqual;
3562  }
3563 
3564  /* Sort clauses into best execution order */
3565  scan_clauses = order_qual_clauses(root, scan_clauses);
3566 
3567  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3568  tidrangequals = extract_actual_clauses(tidrangequals, false);
3569  scan_clauses = extract_actual_clauses(scan_clauses, false);
3570 
3571  /* Replace any outer-relation variables with nestloop params */
3572  if (best_path->path.param_info)
3573  {
3574  tidrangequals = (List *)
3575  replace_nestloop_params(root, (Node *) tidrangequals);
3576  scan_clauses = (List *)
3577  replace_nestloop_params(root, (Node *) scan_clauses);
3578  }
3579 
3580  scan_plan = make_tidrangescan(tlist,
3581  scan_clauses,
3582  scan_relid,
3583  tidrangequals);
3584 
3585  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3586 
3587  return scan_plan;
3588 }
3589 
3590 /*
3591  * create_subqueryscan_plan
3592  * Returns a subqueryscan plan for the base relation scanned by 'best_path'
3593  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3594  */
3595 static SubqueryScan *
3597  List *tlist, List *scan_clauses)
3598 {
3599  SubqueryScan *scan_plan;
3600  RelOptInfo *rel = best_path->path.parent;
3601  Index scan_relid = rel->relid;
3602  Plan *subplan;
3603 
3604  /* it should be a subquery base rel... */
3605  Assert(scan_relid > 0);
3606  Assert(rel->rtekind == RTE_SUBQUERY);
3607 
3608  /*
3609  * Recursively create Plan from Path for subquery. Since we are entering
3610  * a different planner context (subroot), recurse to create_plan not
3611  * create_plan_recurse.
3612  */
3613  subplan = create_plan(rel->subroot, best_path->subpath);
3614 
3615  /* Sort clauses into best execution order */
3616  scan_clauses = order_qual_clauses(root, scan_clauses);
3617 
3618  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3619  scan_clauses = extract_actual_clauses(scan_clauses, false);
3620 
3621  /* Replace any outer-relation variables with nestloop params */
3622  if (best_path->path.param_info)
3623  {
3624  scan_clauses = (List *)
3625  replace_nestloop_params(root, (Node *) scan_clauses);
3627  rel->subplan_params);
3628  }
3629 
3630  scan_plan = make_subqueryscan(tlist,
3631  scan_clauses,
3632  scan_relid,
3633  subplan);
3634 
3635  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3636 
3637  return scan_plan;
3638 }
3639 
3640 /*
3641  * create_functionscan_plan
3642  * Returns a functionscan plan for the base relation scanned by 'best_path'
3643  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3644  */
3645 static FunctionScan *
3647  List *tlist, List *scan_clauses)
3648 {
3649  FunctionScan *scan_plan;
3650  Index scan_relid = best_path->parent->relid;
3651  RangeTblEntry *rte;
3652  List *functions;
3653 
3654  /* it should be a function base rel... */
3655  Assert(scan_relid > 0);
3656  rte = planner_rt_fetch(scan_relid, root);
3657  Assert(rte->rtekind == RTE_FUNCTION);
3658  functions = rte->functions;
3659 
3660  /* Sort clauses into best execution order */
3661  scan_clauses = order_qual_clauses(root, scan_clauses);
3662 
3663  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3664  scan_clauses = extract_actual_clauses(scan_clauses, false);
3665 
3666  /* Replace any outer-relation variables with nestloop params */
3667  if (best_path->param_info)
3668  {
3669  scan_clauses = (List *)
3670  replace_nestloop_params(root, (Node *) scan_clauses);
3671  /* The function expressions could contain nestloop params, too */
3672  functions = (List *) replace_nestloop_params(root, (Node *) functions);
3673  }
3674 
3675  scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
3676  functions, rte->funcordinality);
3677 
3678  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3679 
3680  return scan_plan;
3681 }
3682 
3683 /*
3684  * create_tablefuncscan_plan
3685  * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
3686  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3687  */
3688 static TableFuncScan *
3690  List *tlist, List *scan_clauses)
3691 {
3692  TableFuncScan *scan_plan;
3693  Index scan_relid = best_path->parent->relid;
3694  RangeTblEntry *rte;
3695  TableFunc *tablefunc;
3696 
3697  /* it should be a function base rel... */
3698  Assert(scan_relid > 0);
3699  rte = planner_rt_fetch(scan_relid, root);
3700  Assert(rte->rtekind == RTE_TABLEFUNC);
3701  tablefunc = rte->tablefunc;
3702 
3703  /* Sort clauses into best execution order */
3704  scan_clauses = order_qual_clauses(root, scan_clauses);
3705 
3706  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3707  scan_clauses = extract_actual_clauses(scan_clauses, false);
3708 
3709  /* Replace any outer-relation variables with nestloop params */
3710  if (best_path->param_info)
3711  {
3712  scan_clauses = (List *)
3713  replace_nestloop_params(root, (Node *) scan_clauses);
3714  /* The function expressions could contain nestloop params, too */
3715  tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
3716  }
3717 
3718  scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
3719  tablefunc);
3720 
3721  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3722 
3723  return scan_plan;
3724 }
3725 
3726 /*
3727  * create_valuesscan_plan
3728  * Returns a valuesscan plan for the base relation scanned by 'best_path'
3729  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3730  */
3731 static ValuesScan *
3733  List *tlist, List *scan_clauses)
3734 {
3735  ValuesScan *scan_plan;
3736  Index scan_relid = best_path->parent->relid;
3737  RangeTblEntry *rte;
3738  List *values_lists;
3739 
3740  /* it should be a values base rel... */
3741  Assert(scan_relid > 0);
3742  rte = planner_rt_fetch(scan_relid, root);
3743  Assert(rte->rtekind == RTE_VALUES);
3744  values_lists = rte->values_lists;
3745 
3746  /* Sort clauses into best execution order */
3747  scan_clauses = order_qual_clauses(root, scan_clauses);
3748 
3749  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3750  scan_clauses = extract_actual_clauses(scan_clauses, false);
3751 
3752  /* Replace any outer-relation variables with nestloop params */
3753  if (best_path->param_info)
3754  {
3755  scan_clauses = (List *)
3756  replace_nestloop_params(root, (Node *) scan_clauses);
3757  /* The values lists could contain nestloop params, too */
3758  values_lists = (List *)
3759  replace_nestloop_params(root, (Node *) values_lists);
3760  }
3761 
3762  scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
3763  values_lists);
3764 
3765  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3766 
3767  return scan_plan;
3768 }
3769 
3770 /*
3771  * create_ctescan_plan
3772  * Returns a ctescan plan for the base relation scanned by 'best_path'
3773  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3774  */
3775 static CteScan *
3777  List *tlist, List *scan_clauses)
3778 {
3779  CteScan *scan_plan;
3780  Index scan_relid = best_path->parent->relid;
3781  RangeTblEntry *rte;
3782  SubPlan *ctesplan = NULL;
3783  int plan_id;
3784  int cte_param_id;
3785  PlannerInfo *cteroot;
3786  Index levelsup;
3787  int ndx;
3788  ListCell *lc;
3789 
3790  Assert(scan_relid > 0);
3791  rte = planner_rt_fetch(scan_relid, root);
3792  Assert(rte->rtekind == RTE_CTE);
3793  Assert(!rte->self_reference);
3794 
3795  /*
3796  * Find the referenced CTE, and locate the SubPlan previously made for it.
3797  */
3798  levelsup = rte->ctelevelsup;
3799  cteroot = root;
3800  while (levelsup-- > 0)
3801  {
3802  cteroot = cteroot->parent_root;
3803  if (!cteroot) /* shouldn't happen */
3804  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3805  }
3806 
3807  /*
3808  * Note: cte_plan_ids can be shorter than cteList, if we are still working
3809  * on planning the CTEs (ie, this is a side-reference from another CTE).
3810  * So we mustn't use forboth here.
3811  */
3812  ndx = 0;
3813  foreach(lc, cteroot->parse->cteList)
3814  {
3815  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3816 
3817  if (strcmp(cte->ctename, rte->ctename) == 0)
3818  break;
3819  ndx++;
3820  }
3821  if (lc == NULL) /* shouldn't happen */
3822  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3823  if (ndx >= list_length(cteroot->cte_plan_ids))
3824  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3825  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3826  Assert(plan_id > 0);
3827  foreach(lc, cteroot->init_plans)
3828  {
3829  ctesplan = (SubPlan *) lfirst(lc);
3830  if (ctesplan->plan_id == plan_id)
3831  break;
3832  }
3833  if (lc == NULL) /* shouldn't happen */
3834  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3835 
3836  /*
3837  * We need the CTE param ID, which is the sole member of the SubPlan's
3838  * setParam list.
3839  */
3840  cte_param_id = linitial_int(ctesplan->setParam);
3841 
3842  /* Sort clauses into best execution order */
3843  scan_clauses = order_qual_clauses(root, scan_clauses);
3844 
3845  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3846  scan_clauses = extract_actual_clauses(scan_clauses, false);
3847 
3848  /* Replace any outer-relation variables with nestloop params */
3849  if (best_path->param_info)
3850  {
3851  scan_clauses = (List *)
3852  replace_nestloop_params(root, (Node *) scan_clauses);
3853  }
3854 
3855  scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
3856  plan_id, cte_param_id);
3857 
3858  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3859 
3860  return scan_plan;
3861 }
3862 
3863 /*
3864  * create_namedtuplestorescan_plan
3865  * Returns a tuplestorescan plan for the base relation scanned by
3866  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3867  * 'tlist'.
3868  */
3869 static NamedTuplestoreScan *
3871  List *tlist, List *scan_clauses)
3872 {
3873  NamedTuplestoreScan *scan_plan;
3874  Index scan_relid = best_path->parent->relid;
3875  RangeTblEntry *rte;
3876 
3877  Assert(scan_relid > 0);
3878  rte = planner_rt_fetch(scan_relid, root);
3880 
3881  /* Sort clauses into best execution order */
3882  scan_clauses = order_qual_clauses(root, scan_clauses);
3883 
3884  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3885  scan_clauses = extract_actual_clauses(scan_clauses, false);
3886 
3887  /* Replace any outer-relation variables with nestloop params */
3888  if (best_path->param_info)
3889  {
3890  scan_clauses = (List *)
3891  replace_nestloop_params(root, (Node *) scan_clauses);
3892  }
3893 
3894  scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
3895  rte->enrname);
3896 
3897  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3898 
3899  return scan_plan;
3900 }
3901 
3902 /*
3903  * create_resultscan_plan
3904  * Returns a Result plan for the RTE_RESULT base relation scanned by
3905  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3906  * 'tlist'.
3907  */
3908 static Result *
3910  List *tlist, List *scan_clauses)
3911 {
3912  Result *scan_plan;
3913  Index scan_relid = best_path->parent->relid;
3915 
3916  Assert(scan_relid > 0);
3917  rte = planner_rt_fetch(scan_relid, root);
3918  Assert(rte->rtekind == RTE_RESULT);
3919 
3920  /* Sort clauses into best execution order */
3921  scan_clauses = order_qual_clauses(root, scan_clauses);
3922 
3923  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3924  scan_clauses = extract_actual_clauses(scan_clauses, false);
3925 
3926  /* Replace any outer-relation variables with nestloop params */
3927  if (best_path->param_info)
3928  {
3929  scan_clauses = (List *)
3930  replace_nestloop_params(root, (Node *) scan_clauses);
3931  }
3932 
3933  scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
3934 
3935  copy_generic_path_info(&scan_plan->plan, best_path);
3936 
3937  return scan_plan;
3938 }
3939 
3940 /*
3941  * create_worktablescan_plan
3942  * Returns a worktablescan plan for the base relation scanned by 'best_path'
3943  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3944  */
3945 static WorkTableScan *
3947  List *tlist, List *scan_clauses)
3948 {
3949  WorkTableScan *scan_plan;
3950  Index scan_relid = best_path->parent->relid;
3951  RangeTblEntry *rte;
3952  Index levelsup;
3953  PlannerInfo *cteroot;
3954 
3955  Assert(scan_relid > 0);
3956  rte = planner_rt_fetch(scan_relid, root);
3957  Assert(rte->rtekind == RTE_CTE);
3958  Assert(rte->self_reference);
3959 
3960  /*
3961  * We need to find the worktable param ID, which is in the plan level
3962  * that's processing the recursive UNION, which is one level *below* where
3963  * the CTE comes from.
3964  */
3965  levelsup = rte->ctelevelsup;
3966  if (levelsup == 0) /* shouldn't happen */
3967  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3968  levelsup--;
3969  cteroot = root;
3970  while (levelsup-- > 0)
3971  {
3972  cteroot = cteroot->parent_root;
3973  if (!cteroot) /* shouldn't happen */
3974  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3975  }
3976  if (cteroot->wt_param_id < 0) /* shouldn't happen */
3977  elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
3978 
3979  /* Sort clauses into best execution order */
3980  scan_clauses = order_qual_clauses(root, scan_clauses);
3981 
3982  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3983  scan_clauses = extract_actual_clauses(scan_clauses, false);
3984 
3985  /* Replace any outer-relation variables with nestloop params */
3986  if (best_path->param_info)
3987  {
3988  scan_clauses = (List *)
3989  replace_nestloop_params(root, (Node *) scan_clauses);
3990  }
3991 
3992  scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
3993  cteroot->wt_param_id);
3994 
3995  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3996 
3997  return scan_plan;
3998 }
3999 
4000 /*
4001  * create_foreignscan_plan
4002  * Returns a foreignscan plan for the relation scanned by 'best_path'
4003  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4004  */
4005 static ForeignScan *
4007  List *tlist, List *scan_clauses)
4008 {
4009  ForeignScan *scan_plan;
4010  RelOptInfo *rel = best_path->path.parent;
4011  Index scan_relid = rel->relid;
4012  Oid rel_oid = InvalidOid;
4013  Plan *outer_plan = NULL;
4014 
4015  Assert(rel->fdwroutine != NULL);
4016 
4017  /* transform the child path if any */
4018  if (best_path->fdw_outerpath)
4019  outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
4020  CP_EXACT_TLIST);
4021 
4022  /*
4023  * If we're scanning a base relation, fetch its OID. (Irrelevant if
4024  * scanning a join relation.)
4025  */
4026  if (scan_relid > 0)
4027  {
4028  RangeTblEntry *rte;
4029 
4030  Assert(rel->rtekind == RTE_RELATION);
4031  rte = planner_rt_fetch(scan_relid, root);
4032  Assert(rte->rtekind == RTE_RELATION);
4033  rel_oid = rte->relid;
4034  }
4035 
4036  /*
4037  * Sort clauses into best execution order. We do this first since the FDW
4038  * might have more info than we do and wish to adjust the ordering.
4039  */
4040  scan_clauses = order_qual_clauses(root, scan_clauses);
4041 
4042  /*
4043  * Let the FDW perform its processing on the restriction clauses and
4044  * generate the plan node. Note that the FDW might remove restriction
4045  * clauses that it intends to execute remotely, or even add more (if it
4046  * has selected some join clauses for remote use but also wants them
4047  * rechecked locally).
4048  */
4049  scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
4050  best_path,
4051  tlist, scan_clauses,
4052  outer_plan);
4053 
4054  /* Copy cost data from Path to Plan; no need to make FDW do this */
4055  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
4056 
4057  /* Copy foreign server OID; likewise, no need to make FDW do this */
4058  scan_plan->fs_server = rel->serverid;
4059 
4060  /*
4061  * Likewise, copy the relids that are represented by this foreign scan. An
4062  * upper rel doesn't have relids set, but it covers all the base relations
4063  * participating in the underlying scan, so use root's all_baserels.
4064  */
4065  if (rel->reloptkind == RELOPT_UPPER_REL)
4066  scan_plan->fs_relids = root->all_baserels;
4067  else
4068  scan_plan->fs_relids = best_path->path.parent->relids;
4069 
4070  /*
4071  * If this is a foreign join, and to make it valid to push down we had to
4072  * assume that the current user is the same as some user explicitly named
4073  * in the query, mark the finished plan as depending on the current user.
4074  */
4075  if (rel->useridiscurrent)
4076  root->glob->dependsOnRole = true;
4077 
4078  /*
4079  * Replace any outer-relation variables with nestloop params in the qual,
4080  * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
4081  * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
4082  * fdw_recheck_quals could have come from join clauses, so doing this
4083  * beforehand on the scan_clauses wouldn't work.) We assume
4084  * fdw_scan_tlist contains no such variables.
4085  */
4086  if (best_path->path.param_info)
4087  {
4088  scan_plan->scan.plan.qual = (List *)
4089  replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
4090  scan_plan->fdw_exprs = (List *)
4091  replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
4092  scan_plan->fdw_recheck_quals = (List *)
4094  (Node *) scan_plan->fdw_recheck_quals);
4095  }
4096 
4097  /*
4098  * If rel is a base relation, detect whether any system columns are
4099  * requested from the rel. (If rel is a join relation, rel->relid will be
4100  * 0, but there can be no Var with relid 0 in the rel's targetlist or the
4101  * restriction clauses, so we skip this in that case. Note that any such
4102  * columns in base relations that were joined are assumed to be contained
4103  * in fdw_scan_tlist.) This is a bit of a kluge and might go away
4104  * someday, so we intentionally leave it out of the API presented to FDWs.
4105  */
4106  scan_plan->fsSystemCol = false;
4107  if (scan_relid > 0)
4108  {
4109  Bitmapset *attrs_used = NULL;
4110  ListCell *lc;
4111  int i;
4112 
4113  /*
4114  * First, examine all the attributes needed for joins or final output.
4115  * Note: we must look at rel's targetlist, not the attr_needed data,
4116  * because attr_needed isn't computed for inheritance child rels.
4117  */
4118  pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
4119 
4120  /* Add all the attributes used by restriction clauses. */
4121  foreach(lc, rel->baserestrictinfo)
4122  {
4123  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4124 
4125  pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
4126  }
4127 
4128  /* Now, are any system columns requested from rel? */
4129  for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
4130  {
4132  {
4133  scan_plan->fsSystemCol = true;
4134  break;
4135  }
4136  }
4137 
4138  bms_free(attrs_used);
4139  }
4140 
4141  return scan_plan;
4142 }
4143 
4144 /*
4145  * create_customscan_plan
4146  *
4147  * Transform a CustomPath into a Plan.
4148  */
4149 static CustomScan *
4151  List *tlist, List *scan_clauses)
4152 {
4153  CustomScan *cplan;
4154  RelOptInfo *rel = best_path->path.parent;
4155  List *custom_plans = NIL;
4156  ListCell *lc;
4157 
4158  /* Recursively transform child paths. */
4159  foreach(lc, best_path->custom_paths)
4160  {
4161  Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc),
4162  CP_EXACT_TLIST);
4163 
4164  custom_plans = lappend(custom_plans, plan);
4165  }
4166 
4167  /*
4168  * Sort clauses into the best execution order, although custom-scan
4169  * provider can reorder them again.
4170  */
4171  scan_clauses = order_qual_clauses(root, scan_clauses);
4172 
4173  /*
4174  * Invoke custom plan provider to create the Plan node represented by the
4175  * CustomPath.
4176  */
4177  cplan = castNode(CustomScan,
4178  best_path->methods->PlanCustomPath(root,
4179  rel,
4180  best_path,
4181  tlist,
4182  scan_clauses,
4183  custom_plans));
4184 
4185  /*
4186  * Copy cost data from Path to Plan; no need to make custom-plan providers
4187  * do this
4188  */
4189  copy_generic_path_info(&cplan->scan.plan, &best_path->path);
4190 
4191  /* Likewise, copy the relids that are represented by this custom scan */
4192  cplan->custom_relids = best_path->path.parent->relids;
4193 
4194  /*
4195  * Replace any outer-relation variables with nestloop params in the qual
4196  * and custom_exprs expressions. We do this last so that the custom-plan
4197  * provider doesn't have to be involved. (Note that parts of custom_exprs
4198  * could have come from join clauses, so doing this beforehand on the
4199  * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
4200  * such variables.
4201  */
4202  if (best_path->path.param_info)
4203  {
4204  cplan->scan.plan.qual = (List *)
4205  replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
4206  cplan->custom_exprs = (List *)
4207  replace_nestloop_params(root, (Node *) cplan->custom_exprs);
4208  }
4209 
4210  return cplan;
4211 }
4212 
4213 
4214 /*****************************************************************************
4215  *
4216  * JOIN METHODS
4217  *
4218  *****************************************************************************/
4219 
4220 static NestLoop *
4222  NestPath *best_path)
4223 {
4224  NestLoop *join_plan;
4225  Plan *outer_plan;
4226  Plan *inner_plan;
4227  List *tlist = build_path_tlist(root, &best_path->path);
4228  List *joinrestrictclauses = best_path->joinrestrictinfo;
4229  List *joinclauses;
4230  List *otherclauses;
4231  Relids outerrelids;
4232  List *nestParams;
4233  Relids saveOuterRels = root->curOuterRels;
4234 
4235  /* NestLoop can project, so no need to be picky about child tlists */
4236  outer_plan = create_plan_recurse(root, best_path->outerjoinpath, 0);
4237 
4238  /* For a nestloop, include outer relids in curOuterRels for inner side */
4239  root->curOuterRels = bms_union(root->curOuterRels,
4240  best_path->outerjoinpath->parent->relids);
4241 
4242  inner_plan = create_plan_recurse(root, best_path->innerjoinpath, 0);
4243 
4244  /* Restore curOuterRels */
4245  bms_free(root->curOuterRels);
4246  root->curOuterRels = saveOuterRels;
4247 
4248  /* Sort join qual clauses into best execution order */
4249  joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
4250 
4251  /* Get the join qual clauses (in plain expression form) */
4252  /* Any pseudoconstant clauses are ignored here */
4253  if (IS_OUTER_JOIN(best_path->jointype))
4254  {
4255  extract_actual_join_clauses(joinrestrictclauses,
4256  best_path->path.parent->relids,
4257  &joinclauses, &otherclauses);
4258  }
4259  else
4260  {
4261  /* We can treat all clauses alike for an inner join */
4262  joinclauses = extract_actual_clauses(joinrestrictclauses, false);
4263  otherclauses = NIL;
4264  }
4265 
4266  /* Replace any outer-relation variables with nestloop params */
4267  if (best_path->path.param_info)
4268  {
4269  joinclauses = (List *)
4270  replace_nestloop_params(root, (Node *) joinclauses);
4271  otherclauses = (List *)
4272  replace_nestloop_params(root, (Node *) otherclauses);
4273  }
4274 
4275  /*
4276  * Identify any nestloop parameters that should be supplied by this join
4277  * node, and remove them from root->curOuterParams.
4278  */
4279  outerrelids = best_path->outerjoinpath->parent->relids;
4280  nestParams = identify_current_nestloop_params(root, outerrelids);
4281 
4282  join_plan = make_nestloop(tlist,
4283  joinclauses,
4284  otherclauses,
4285  nestParams,
4286  outer_plan,
4287  inner_plan,
4288  best_path->jointype,
4289  best_path->inner_unique);
4290 
4291  copy_generic_path_info(&join_plan->join.plan, &best_path->path);
4292 
4293  return join_plan;
4294 }
4295 
4296 static MergeJoin *
4298  MergePath *best_path)
4299 {
4300  MergeJoin *join_plan;
4301  Plan *outer_plan;
4302  Plan *inner_plan;
4303  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4304  List *joinclauses;
4305  List *otherclauses;
4306  List *mergeclauses;
4307  List *outerpathkeys;
4308  List *innerpathkeys;
4309  int nClauses;
4310  Oid *mergefamilies;
4311  Oid *mergecollations;
4312  int *mergestrategies;
4313  bool *mergenullsfirst;
4314  PathKey *opathkey;
4315  EquivalenceClass *opeclass;
4316  int i;
4317  ListCell *lc;
4318  ListCell *lop;
4319  ListCell *lip;
4320  Path *outer_path = best_path->jpath.outerjoinpath;
4321  Path *inner_path = best_path->jpath.innerjoinpath;
4322 
4323  /*
4324  * MergeJoin can project, so we don't have to demand exact tlists from the
4325  * inputs. However, if we're intending to sort an input's result, it's
4326  * best to request a small tlist so we aren't sorting more data than
4327  * necessary.
4328  */
4329  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4330  (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4331 
4332  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4333  (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4334 
4335  /* Sort join qual clauses into best execution order */
4336  /* NB: do NOT reorder the mergeclauses */
4337  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4338 
4339  /* Get the join qual clauses (in plain expression form) */
4340  /* Any pseudoconstant clauses are ignored here */
4341  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4342  {
4343  extract_actual_join_clauses(joinclauses,
4344  best_path->jpath.path.parent->relids,
4345  &joinclauses, &otherclauses);
4346  }
4347  else
4348  {
4349  /* We can treat all clauses alike for an inner join */
4350  joinclauses = extract_actual_clauses(joinclauses, false);
4351  otherclauses = NIL;
4352  }
4353 
4354  /*
4355  * Remove the mergeclauses from the list of join qual clauses, leaving the
4356  * list of quals that must be checked as qpquals.
4357  */
4358  mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
4359  joinclauses = list_difference(joinclauses, mergeclauses);
4360 
4361  /*
4362  * Replace any outer-relation variables with nestloop params. There
4363  * should not be any in the mergeclauses.
4364  */
4365  if (best_path->jpath.path.param_info)
4366  {
4367  joinclauses = (List *)
4368  replace_nestloop_params(root, (Node *) joinclauses);
4369  otherclauses = (List *)
4370  replace_nestloop_params(root, (Node *) otherclauses);
4371  }
4372 
4373  /*
4374  * Rearrange mergeclauses, if needed, so that the outer variable is always
4375  * on the left; mark the mergeclause restrictinfos with correct
4376  * outer_is_left status.
4377  */
4378  mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
4379  best_path->jpath.outerjoinpath->parent->relids);
4380 
4381  /*
4382  * Create explicit sort nodes for the outer and inner paths if necessary.
4383  */
4384  if (best_path->outersortkeys)
4385  {
4386  Relids outer_relids = outer_path->parent->relids;
4387  Sort *sort = make_sort_from_pathkeys(outer_plan,
4388  best_path->outersortkeys,
4389  outer_relids);
4390 
4391  label_sort_with_costsize(root, sort, -1.0);
4392  outer_plan = (Plan *) sort;
4393  outerpathkeys = best_path->outersortkeys;
4394  }
4395  else
4396  outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
4397 
4398  if (best_path->innersortkeys)
4399  {
4400  Relids inner_relids = inner_path->parent->relids;
4401  Sort *sort = make_sort_from_pathkeys(inner_plan,
4402  best_path->innersortkeys,
4403  inner_relids);
4404 
4405  label_sort_with_costsize(root, sort, -1.0);
4406  inner_plan = (Plan *) sort;
4407  innerpathkeys = best_path->innersortkeys;
4408  }
4409  else
4410  innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
4411 
4412  /*
4413  * If specified, add a materialize node to shield the inner plan from the
4414  * need to handle mark/restore.
4415  */
4416  if (best_path->materialize_inner)
4417  {
4418  Plan *matplan = (Plan *) make_material(inner_plan);
4419 
4420  /*
4421  * We assume the materialize will not spill to disk, and therefore
4422  * charge just cpu_operator_cost per tuple. (Keep this estimate in
4423  * sync with final_cost_mergejoin.)
4424  */
4425  copy_plan_costsize(matplan, inner_plan);
4426  matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
4427 
4428  inner_plan = matplan;
4429  }
4430 
4431  /*
4432  * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
4433  * executor. The information is in the pathkeys for the two inputs, but
4434  * we need to be careful about the possibility of mergeclauses sharing a
4435  * pathkey, as well as the possibility that the inner pathkeys are not in
4436  * an order matching the mergeclauses.
4437  */
4438  nClauses = list_length(mergeclauses);
4439  Assert(nClauses == list_length(best_path->path_mergeclauses));
4440  mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
4441  mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
4442  mergestrategies = (int *) palloc(nClauses * sizeof(int));
4443  mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
4444 
4445  opathkey = NULL;
4446  opeclass = NULL;
4447  lop = list_head(outerpathkeys);
4448  lip = list_head(innerpathkeys);
4449  i = 0;
4450  foreach(lc, best_path->path_mergeclauses)
4451  {
4452  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
4453  EquivalenceClass *oeclass;
4454  EquivalenceClass *ieclass;
4455  PathKey *ipathkey = NULL;
4456  EquivalenceClass *ipeclass = NULL;
4457  bool first_inner_match = false;
4458 
4459  /* fetch outer/inner eclass from mergeclause */
4460  if (rinfo->outer_is_left)
4461  {
4462  oeclass = rinfo->left_ec;
4463  ieclass = rinfo->right_ec;
4464  }
4465  else
4466  {
4467  oeclass = rinfo->right_ec;
4468  ieclass = rinfo->left_ec;
4469  }
4470  Assert(oeclass != NULL);
4471  Assert(ieclass != NULL);
4472 
4473  /*
4474  * We must identify the pathkey elements associated with this clause
4475  * by matching the eclasses (which should give a unique match, since
4476  * the pathkey lists should be canonical). In typical cases the merge
4477  * clauses are one-to-one with the pathkeys, but when dealing with
4478  * partially redundant query conditions, things are more complicated.
4479  *
4480  * lop and lip reference the first as-yet-unmatched pathkey elements.
4481  * If they're NULL then all pathkey elements have been matched.
4482  *
4483  * The ordering of the outer pathkeys should match the mergeclauses,
4484  * by construction (see find_mergeclauses_for_outer_pathkeys()). There
4485  * could be more than one mergeclause for the same outer pathkey, but
4486  * no pathkey may be entirely skipped over.
4487  */
4488  if (oeclass != opeclass) /* multiple matches are not interesting */
4489  {
4490  /* doesn't match the current opathkey, so must match the next */
4491  if (lop == NULL)
4492  elog(ERROR, "outer pathkeys do not match mergeclauses");
4493  opathkey = (PathKey *) lfirst(lop);
4494  opeclass = opathkey->pk_eclass;
4495  lop = lnext(outerpathkeys, lop);
4496  if (oeclass != opeclass)
4497  elog(ERROR, "outer pathkeys do not match mergeclauses");
4498  }
4499 
4500  /*
4501  * The inner pathkeys likewise should not have skipped-over keys, but
4502  * it's possible for a mergeclause to reference some earlier inner
4503  * pathkey if we had redundant pathkeys. For example we might have
4504  * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
4505  * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
4506  * mechanism drops the second sort by x as redundant, and this code
4507  * must cope.
4508  *
4509  * It's also possible for the implied inner-rel ordering to be like
4510  * "ORDER BY x, y, x DESC". We still drop the second instance of x as
4511  * redundant; but this means that the sort ordering of a redundant
4512  * inner pathkey should not be considered significant. So we must
4513  * detect whether this is the first clause matching an inner pathkey.
4514  */
4515  if (lip)
4516  {
4517  ipathkey = (PathKey *) lfirst(lip);
4518  ipeclass = ipathkey->pk_eclass;
4519  if (ieclass == ipeclass)
4520  {
4521  /* successful first match to this inner pathkey */
4522  lip = lnext(innerpathkeys, lip);
4523  first_inner_match = true;
4524  }
4525  }
4526  if (!first_inner_match)
4527  {
4528  /* redundant clause ... must match something before lip */
4529  ListCell *l2;
4530 
4531  foreach(l2, innerpathkeys)
4532  {
4533  if (l2 == lip)
4534  break;
4535  ipathkey = (PathKey *) lfirst(l2);
4536  ipeclass = ipathkey->pk_eclass;
4537  if (ieclass == ipeclass)
4538  break;
4539  }
4540  if (ieclass != ipeclass)
4541  elog(ERROR, "inner pathkeys do not match mergeclauses");
4542  }
4543 
4544  /*
4545  * The pathkeys should always match each other as to opfamily and
4546  * collation (which affect equality), but if we're considering a
4547  * redundant inner pathkey, its sort ordering might not match. In
4548  * such cases we may ignore the inner pathkey's sort ordering and use
4549  * the outer's. (In effect, we're lying to the executor about the
4550  * sort direction of this inner column, but it does not matter since
4551  * the run-time row comparisons would only reach this column when
4552  * there's equality for the earlier column containing the same eclass.
4553  * There could be only one value in this column for the range of inner
4554  * rows having a given value in the earlier column, so it does not
4555  * matter which way we imagine this column to be ordered.) But a
4556  * non-redundant inner pathkey had better match outer's ordering too.
4557  */
4558  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4559  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
4560  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4561  if (first_inner_match &&
4562  (opathkey->pk_strategy != ipathkey->pk_strategy ||
4563  opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
4564  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4565 
4566  /* OK, save info for executor */
4567  mergefamilies[i] = opathkey->pk_opfamily;
4568  mergecollations[i] = opathkey->pk_eclass->ec_collation;
4569  mergestrategies[i] = opathkey->pk_strategy;
4570  mergenullsfirst[i] = opathkey->pk_nulls_first;
4571  i++;
4572  }
4573 
4574  /*
4575  * Note: it is not an error if we have additional pathkey elements (i.e.,
4576  * lop or lip isn't NULL here). The input paths might be better-sorted
4577  * than we need for the current mergejoin.
4578  */
4579 
4580  /*
4581  * Now we can build the mergejoin node.
4582  */
4583  join_plan = make_mergejoin(tlist,
4584  joinclauses,
4585  otherclauses,
4586  mergeclauses,
4587  mergefamilies,
4588  mergecollations,
4589  mergestrategies,
4590  mergenullsfirst,
4591  outer_plan,
4592  inner_plan,
4593  best_path->jpath.jointype,
4594  best_path->jpath.inner_unique,
4595  best_path->skip_mark_restore);
4596 
4597  /* Costs of sort and material steps are included in path cost already */
4598  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4599 
4600  return join_plan;
4601 }
4602 
4603 static HashJoin *
4605  HashPath *best_path)
4606 {
4607  HashJoin *join_plan;
4608  Hash *hash_plan;
4609  Plan *outer_plan;
4610  Plan *inner_plan;
4611  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4612  List *joinclauses;
4613  List *otherclauses;
4614  List *hashclauses;
4615  List *hashoperators = NIL;
4616  List *hashcollations = NIL;
4617  List *inner_hashkeys = NIL;
4618  List *outer_hashkeys = NIL;
4619  Oid skewTable = InvalidOid;
4620  AttrNumber skewColumn = InvalidAttrNumber;
4621  bool skewInherit = false;
4622  ListCell *lc;
4623 
4624  /*
4625  * HashJoin can project, so we don't have to demand exact tlists from the
4626  * inputs. However, it's best to request a small tlist from the inner
4627  * side, so that we aren't storing more data than necessary. Likewise, if
4628  * we anticipate batching, request a small tlist from the outer side so
4629  * that we don't put extra data in the outer batch files.
4630  */
4631  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4632  (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
4633 
4634  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4635  CP_SMALL_TLIST);
4636 
4637  /* Sort join qual clauses into best execution order */
4638  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4639  /* There's no point in sorting the hash clauses ... */
4640 
4641  /* Get the join qual clauses (in plain expression form) */
4642  /* Any pseudoconstant clauses are ignored here */
4643  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4644  {
4645  extract_actual_join_clauses(joinclauses,
4646  best_path->jpath.path.parent->relids,
4647  &joinclauses, &otherclauses);
4648  }
4649  else
4650  {
4651  /* We can treat all clauses alike for an inner join */
4652  joinclauses = extract_actual_clauses(joinclauses, false);
4653  otherclauses = NIL;
4654  }
4655 
4656  /*
4657  * Remove the hashclauses from the list of join qual clauses, leaving the
4658  * list of quals that must be checked as qpquals.
4659  */
4660  hashclauses = get_actual_clauses(best_path->path_hashclauses);
4661  joinclauses = list_difference(joinclauses, hashclauses);
4662 
4663  /*
4664  * Replace any outer-relation variables with nestloop params. There
4665  * should not be any in the hashclauses.
4666  */
4667  if (best_path->jpath.path.param_info)
4668  {
4669  joinclauses = (List *)
4670  replace_nestloop_params(root, (Node *) joinclauses);
4671  otherclauses = (List *)
4672  replace_nestloop_params(root, (Node *) otherclauses);
4673  }
4674 
4675  /*
4676  * Rearrange hashclauses, if needed, so that the outer variable is always
4677  * on the left.
4678  */
4679  hashclauses = get_switched_clauses(best_path->path_hashclauses,
4680  best_path->jpath.outerjoinpath->parent->relids);
4681 
4682  /*
4683  * If there is a single join clause and we can identify the outer variable
4684  * as a simple column reference, supply its identity for possible use in
4685  * skew optimization. (Note: in principle we could do skew optimization
4686  * with multiple join clauses, but we'd have to be able to determine the
4687  * most common combinations of outer values, which we don't currently have
4688  * enough stats for.)
4689  */
4690  if (list_length(hashclauses) == 1)
4691  {
4692  OpExpr *clause = (OpExpr *) linitial(hashclauses);
4693  Node *node;
4694 
4695  Assert(is_opclause(clause));
4696  node = (Node *) linitial(clause->args);
4697  if (IsA(node, RelabelType))
4698  node = (Node *) ((RelabelType *) node)->arg;
4699  if (IsA(node, Var))
4700  {
4701  Var *var = (Var *) node;
4702  RangeTblEntry *rte;
4703 
4704  rte = root->simple_rte_array[var->varno];
4705  if (rte->rtekind == RTE_RELATION)
4706  {
4707  skewTable = rte->relid;
4708  skewColumn = var->varattno;
4709  skewInherit = rte->inh;
4710  }
4711  }
4712  }
4713 
4714  /*
4715  * Collect hash related information. The hashed expressions are
4716  * deconstructed into outer/inner expressions, so they can be computed
4717  * separately (inner expressions are used to build the hashtable via Hash,
4718  * outer expressions to perform lookups of tuples from HashJoin's outer
4719  * plan in the hashtable). Also collect operator information necessary to
4720  * build the hashtable.
4721  */
4722  foreach(lc, hashclauses)
4723  {
4724  OpExpr *hclause = lfirst_node(OpExpr, lc);
4725 
4726  hashoperators = lappend_oid(hashoperators, hclause->opno);
4727  hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
4728  outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
4729  inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
4730  }
4731 
4732  /*
4733  * Build the hash node and hash join node.
4734  */
4735  hash_plan = make_hash(inner_plan,
4736  inner_hashkeys,
4737  skewTable,
4738  skewColumn,
4739  skewInherit);
4740 
4741  /*
4742  * Set Hash node's startup & total costs equal to total cost of input
4743  * plan; this only affects EXPLAIN display not decisions.
4744  */
4745  copy_plan_costsize(&hash_plan->plan, inner_plan);
4746  hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
4747 
4748  /*
4749  * If parallel-aware, the executor will also need an estimate of the total
4750  * number of rows expected from all participants so that it can size the
4751  * shared hash table.
4752  */
4753  if (best_path->jpath.path.parallel_aware)
4754  {
4755  hash_plan->plan.parallel_aware = true;
4756  hash_plan->rows_total = best_path->inner_rows_total;
4757  }
4758 
4759  join_plan = make_hashjoin(tlist,
4760  joinclauses,
4761  otherclauses,
4762  hashclauses,
4763  hashoperators,
4764  hashcollations,
4765  outer_hashkeys,
4766  outer_plan,
4767  (Plan *) hash_plan,
4768  best_path->jpath.jointype,
4769  best_path->jpath.inner_unique);
4770 
4771  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4772 
4773  return join_plan;
4774 }
4775 
4776 
4777 /*****************************************************************************
4778  *
4779  * SUPPORTING ROUTINES
4780  *
4781  *****************************************************************************/
4782 
4783 /*
4784  * replace_nestloop_params
4785  * Replace outer-relation Vars and PlaceHolderVars in the given expression
4786  * with nestloop Params
4787  *
4788  * All Vars and PlaceHolderVars belonging to the relation(s) identified by
4789  * root->curOuterRels are replaced by Params, and entries are added to
4790  * root->curOuterParams if not already present.
4791  */
4792 static Node *
4794 {
4795  /* No setup needed for tree walk, so away we go */
4796  return replace_nestloop_params_mutator(expr, root);
4797 }
4798 
4799 static Node *
4801 {
4802  if (node == NULL)
4803  return NULL;
4804  if (IsA(node, Var))
4805  {
4806  Var *var = (Var *) node;
4807 
4808  /* Upper-level Vars should be long gone at this point */
4809  Assert(var->varlevelsup == 0);
4810  /* If not to be replaced, we can just return the Var unmodified */
4811  if (!bms_is_member(var->varno, root->curOuterRels))
4812  return node;
4813  /* Replace the Var with a nestloop Param */
4814  return (Node *) replace_nestloop_param_var(root, var);
4815  }
4816  if (IsA(node, PlaceHolderVar))
4817  {
4818  PlaceHolderVar *phv = (PlaceHolderVar *) node;
4819 
4820  /* Upper-level PlaceHolderVars should be long gone at this point */
4821  Assert(phv->phlevelsup == 0);
4822 
4823  /*
4824  * Check whether we need to replace the PHV. We use bms_overlap as a
4825  * cheap/quick test to see if the PHV might be evaluated in the outer
4826  * rels, and then grab its PlaceHolderInfo to tell for sure.
4827  */
4828  if (!bms_overlap(phv->phrels, root->curOuterRels) ||
4829  !bms_is_subset(find_placeholder_info(root, phv, false)->ph_eval_at,
4830  root->curOuterRels))
4831  {
4832  /*
4833  * We can't replace the whole PHV, but we might still need to
4834  * replace Vars or PHVs within its expression, in case it ends up
4835  * actually getting evaluated here. (It might get evaluated in
4836  * this plan node, or some child node; in the latter case we don't
4837  * really need to process the expression here, but we haven't got
4838  * enough info to tell if that's the case.) Flat-copy the PHV
4839  * node and then recurse on its expression.
4840  *
4841  * Note that after doing this, we might have different
4842  * representations of the contents of the same PHV in different
4843  * parts of the plan tree. This is OK because equal() will just
4844  * match on phid/phlevelsup, so setrefs.c will still recognize an
4845  * upper-level reference to a lower-level copy of the same PHV.
4846  */
4848 
4849  memcpy(newphv, phv, sizeof(PlaceHolderVar));
4850  newphv->phexpr = (Expr *)
4852  root);
4853  return (Node *) newphv;
4854  }
4855  /* Replace the PlaceHolderVar with a nestloop Param */
4856  return (Node *) replace_nestloop_param_placeholdervar(root, phv);
4857  }
4858  return expression_tree_mutator(node,
4860  (void *) root);
4861 }
4862 
4863 /*
4864  * fix_indexqual_references
4865  * Adjust indexqual clauses to the form the executor's indexqual
4866  * machinery needs.
4867  *
4868  * We have three tasks here:
4869  * * Select the actual qual clauses out of the input IndexClause list,
4870  * and remove RestrictInfo nodes from the qual clauses.
4871  * * Replace any outer-relation Var or PHV nodes with nestloop Params.
4872  * (XXX eventually, that responsibility should go elsewhere?)
4873  * * Index keys must be represented by Var nodes with varattno set to the
4874  * index's attribute number, not the attribute number in the original rel.
4875  *
4876  * *stripped_indexquals_p receives a list of the actual qual clauses.
4877  *
4878  * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
4879  * that shares no substructure with the original; this is needed in case there
4880  * are subplans in it (we need two separate copies of the subplan tree, or
4881  * things will go awry).
4882  */
4883 static void
4885  List **stripped_indexquals_p, List **fixed_indexquals_p)
4886 {
4887  IndexOptInfo *index = index_path->indexinfo;
4888  List *stripped_indexquals;
4889  List *fixed_indexquals;
4890  ListCell *lc;
4891 
4892  stripped_indexquals = fixed_indexquals = NIL;
4893 
4894  foreach(lc, index_path->indexclauses)
4895  {
4896  IndexClause *iclause = lfirst_node(IndexClause, lc);
4897  int indexcol = iclause->indexcol;
4898  ListCell *lc2;
4899 
4900  foreach(lc2, iclause->indexquals)
4901  {
4902  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
4903  Node *clause = (Node *) rinfo->clause;
4904 
4905  stripped_indexquals = lappend(stripped_indexquals, clause);
4906  clause = fix_indexqual_clause(root, index, indexcol,
4907  clause, iclause->indexcols);
4908  fixed_indexquals = lappend(fixed_indexquals, clause);
4909  }
4910  }
4911 
4912  *stripped_indexquals_p = stripped_indexquals;
4913  *fixed_indexquals_p = fixed_indexquals;
4914 }
4915 
4916 /*
4917  * fix_indexorderby_references
4918  * Adjust indexorderby clauses to the form the executor's index
4919  * machinery needs.
4920  *
4921  * This is a simplified version of fix_indexqual_references. The input is
4922  * bare clauses and a separate indexcol list, instead of IndexClauses.
4923  */
4924 static List *
4926 {
4927  IndexOptInfo *index = index_path->indexinfo;
4928  List *fixed_indexorderbys;
4929  ListCell *lcc,
4930  *lci;
4931 
4932  fixed_indexorderbys = NIL;
4933 
4934  forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
4935  {
4936  Node *clause = (Node *) lfirst(lcc);
4937  int indexcol = lfirst_int(lci);
4938 
4939  clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
4940  fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
4941  }
4942 
4943  return fixed_indexorderbys;
4944 }
4945 
4946 /*
4947  * fix_indexqual_clause
4948  * Convert a single indexqual clause to the form needed by the executor.
4949  *
4950  * We replace nestloop params here, and replace the index key variables
4951  * or expressions by index Var nodes.
4952  */
4953 static Node *
4955  Node *clause, List *indexcolnos)
4956 {
4957  /*
4958  * Replace any outer-relation variables with nestloop params.
4959  *
4960  * This also makes a copy of the clause, so it's safe to modify it
4961  * in-place below.
4962  */
4963  clause = replace_nestloop_params(root, clause);
4964 
4965  if (IsA(clause, OpExpr))
4966  {
4967  OpExpr *op = (OpExpr *) clause;
4968 
4969  /* Replace the indexkey expression with an index Var. */
4971  index,
4972  indexcol);
4973  }
4974  else if (IsA(clause, RowCompareExpr))
4975  {
4976  RowCompareExpr *rc = (RowCompareExpr *) clause;
4977  ListCell *lca,
4978  *lcai;
4979 
4980  /* Replace the indexkey expressions with index Vars. */
4981  Assert(list_length(rc->largs) == list_length(indexcolnos));
4982  forboth(lca, rc->largs, lcai, indexcolnos)
4983  {
4984  lfirst(lca) = fix_indexqual_operand(lfirst(lca),
4985  index,
4986  lfirst_int(lcai));
4987  }
4988  }
4989  else if (IsA(clause, ScalarArrayOpExpr))
4990  {
4991  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
4992 
4993  /* Replace the indexkey expression with an index Var. */
4995  index,
4996  indexcol);
4997  }
4998  else if (IsA(clause, NullTest))
4999  {
5000  NullTest *nt = (NullTest *) clause;
5001 
5002  /* Replace the indexkey expression with an index Var. */
5003  nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
5004  index,
5005  indexcol);
5006  }
5007  else
5008  elog(ERROR, "unsupported indexqual type: %d",
5009  (int) nodeTag(clause));
5010 
5011  return clause;
5012 }
5013 
5014 /*
5015  * fix_indexqual_operand
5016  * Convert an indexqual expression to a Var referencing the index column.
5017  *
5018  * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
5019  * equal to the index's attribute number (index column position).
5020  *
5021  * Most of the code here is just for sanity cross-checking that the given
5022  * expression actually matches the index column it's claimed to.
5023  */
5024 static Node *
5026 {
5027  Var *result;
5028  int pos;
5029  ListCell *indexpr_item;
5030 
5031  /*
5032  * Remove any binary-compatible relabeling of the indexkey
5033  */
5034  if (IsA(node, RelabelType))
5035  node = (Node *) ((RelabelType *) node)->arg;
5036 
5037  Assert(indexcol >= 0 && indexcol < index->ncolumns);
5038 
5039  if (index->indexkeys[indexcol] != 0)
5040  {
5041  /* It's a simple index column */
5042  if (IsA(node, Var) &&
5043  ((Var *) node)->varno == index->rel->relid &&
5044  ((Var *) node)->varattno == index->indexkeys[indexcol])
5045  {
5046  result = (Var *) copyObject(node);
5047  result->varno = INDEX_VAR;
5048  result->varattno = indexcol + 1;
5049  return (Node *) result;
5050  }
5051  else
5052  elog(ERROR, "index key does not match expected index column");
5053  }
5054 
5055  /* It's an index expression, so find and cross-check the expression */
5056  indexpr_item = list_head(index->indexprs);
5057  for (pos = 0; pos < index->ncolumns; pos++)
5058  {
5059  if (index->indexkeys[pos] == 0)
5060  {
5061  if (indexpr_item == NULL)
5062  elog(ERROR, "too few entries in indexprs list");
5063  if (pos == indexcol)
5064  {
5065  Node *indexkey;
5066 
5067  indexkey = (Node *) lfirst(indexpr_item);
5068  if (indexkey && IsA(indexkey, RelabelType))
5069  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5070  if (equal(node, indexkey))
5071  {
5072  result = makeVar(INDEX_VAR, indexcol + 1,
5073  exprType(lfirst(indexpr_item)), -1,
5074  exprCollation(lfirst(indexpr_item)),
5075  0);
5076  return (Node *) result;
5077  }
5078  else
5079  elog(ERROR, "index key does not match expected index column");
5080  }
5081  indexpr_item = lnext(index->indexprs, indexpr_item);
5082  }
5083  }
5084 
5085  /* Oops... */
5086  elog(ERROR, "index key does not match expected index column");
5087  return NULL; /* keep compiler quiet */
5088 }
5089 
5090 /*
5091  * get_switched_clauses
5092  * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
5093  * extract the bare clauses, and rearrange the elements within the
5094  * clauses, if needed, so the outer join variable is on the left and
5095  * the inner is on the right. The original clause data structure is not
5096  * touched; a modified list is returned. We do, however, set the transient
5097  * outer_is_left field in each RestrictInfo to show which side was which.
5098  */
5099 static List *
5100 get_switched_clauses(List *clauses, Relids outerrelids)
5101 {
5102  List *t_list = NIL;
5103  ListCell *l;
5104 
5105  foreach(l, clauses)
5106  {
5107  RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
5108  OpExpr *clause = (OpExpr *) restrictinfo->clause;
5109 
5110  Assert(is_opclause(clause));
5111  if (bms_is_subset(restrictinfo->right_relids, outerrelids))
5112  {
5113  /*
5114  * Duplicate just enough of the structure to allow commuting the
5115  * clause without changing the original list. Could use
5116  * copyObject, but a complete deep copy is overkill.
5117  */
5118  OpExpr *temp = makeNode(OpExpr);
5119 
5120  temp->opno = clause->opno;
5121  temp->opfuncid = InvalidOid;
5122  temp->opresulttype = clause->opresulttype;
5123  temp->opretset = clause->opretset;
5124  temp->opcollid = clause->opcollid;
5125  temp->inputcollid = clause->inputcollid;
5126  temp->args = list_copy(clause->args);
5127  temp->location = clause->location;
5128  /* Commute it --- note this modifies the temp node in-place. */
5129  CommuteOpExpr(temp);
5130  t_list = lappend(t_list, temp);
5131  restrictinfo->outer_is_left = false;
5132  }
5133  else
5134  {
5135  Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
5136  t_list = lappend(t_list, clause);
5137  restrictinfo->outer_is_left = true;
5138  }
5139  }
5140  return t_list;
5141 }
5142 
5143 /*
5144  * order_qual_clauses
5145  * Given a list of qual clauses that will all be evaluated at the same
5146  * plan node, sort the list into the order we want to check the quals
5147  * in at runtime.
5148  *
5149  * When security barrier quals are used in the query, we may have quals with
5150  * different security levels in the list. Quals of lower security_level
5151  * must go before quals of higher security_level, except that we can grant
5152  * exceptions to move up quals that are leakproof. When security level
5153  * doesn't force the decision, we prefer to order clauses by estimated
5154  * execution cost, cheapest first.
5155  *
5156  * Ideally the order should be driven by a combination of execution cost and
5157  * selectivity, but it's not immediately clear how to account for both,
5158  * and given the uncertainty of the estimates the reliability of the decisions
5159  * would be doubtful anyway. So we just order by security level then
5160  * estimated per-tuple cost, being careful not to change the order when
5161  * (as is often the case) the estimates are identical.
5162  *
5163  * Although this will work on either bare clauses or RestrictInfos, it's
5164  * much faster to apply it to RestrictInfos, since it can re-use cost
5165  * information that is cached in RestrictInfos. XXX in the bare-clause
5166  * case, we are also not able to apply security considerations. That is
5167  * all right for the moment, because the bare-clause case doesn't occur
5168  * anywhere that barrier quals could be present, but it would be better to
5169  * get rid of it.
5170  *
5171  * Note: some callers pass lists that contain entries that will later be
5172  * removed; this is the easiest way to let this routine see RestrictInfos
5173  * instead of bare clauses. This is another reason why trying to consider
5174  * selectivity in the ordering would likely do the wrong thing.
5175  */
5176 static List *
5178 {
5179  typedef struct
5180  {
5181  Node *clause;
5182  Cost cost;
5183  Index security_level;
5184  } QualItem;
5185  int nitems = list_length(clauses);
5186  QualItem *items;
5187  ListCell *lc;
5188  int i;
5189  List *result;
5190 
5191  /* No need to work hard for 0 or 1 clause */
5192  if (nitems <= 1)
5193  return clauses;
5194 
5195  /*
5196  * Collect the items and costs into an array. This is to avoid repeated
5197  * cost_qual_eval work if the inputs aren't RestrictInfos.
5198  */
5199  items = (QualItem *) palloc(nitems * sizeof(QualItem));
5200  i = 0;
5201  foreach(lc, clauses)
5202  {
5203  Node *clause = (Node *) lfirst(lc);
5204  QualCost qcost;
5205 
5206  cost_qual_eval_node(&qcost, clause, root);
5207  items[i].clause = clause;
5208  items[i].cost = qcost.per_tuple;
5209  if (IsA(clause, RestrictInfo))
5210  {
5211  RestrictInfo *rinfo = (RestrictInfo *) clause;
5212 
5213  /*
5214  * If a clause is leakproof, it doesn't have to be constrained by
5215  * its nominal security level. If it's also reasonably cheap
5216  * (here defined as 10X cpu_operator_cost), pretend it has
5217  * security_level 0, which will allow it to go in front of
5218  * more-expensive quals of lower security levels. Of course, that
5219  * will also force it to go in front of cheaper quals of its own
5220  * security level, which is not so great, but we can alleviate
5221  * that risk by applying the cost limit cutoff.
5222  */
5223  if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
5224  items[i].security_level = 0;
5225  else
5226  items[i].security_level = rinfo->security_level;
5227  }
5228  else
5229  items[i].security_level = 0;
5230  i++;
5231  }
5232 
5233  /*
5234  * Sort. We don't use qsort() because it's not guaranteed stable for
5235  * equal keys. The expected number of entries is small enough that a
5236  * simple insertion sort should be good enough.
5237  */
5238  for (i = 1; i < nitems; i++)
5239  {
5240  QualItem newitem = items[i];
5241  int j;
5242 
5243  /* insert newitem into the already-sorted subarray */
5244  for (j = i; j > 0; j--)
5245  {
5246  QualItem *olditem = &items[j - 1];
5247 
5248  if (newitem.security_level > olditem->security_level ||
5249  (newitem.security_level == olditem->security_level &&
5250  newitem.cost >= olditem->cost))
5251  break;
5252  items[j] = *olditem;
5253  }
5254  items[j] = newitem;
5255  }
5256 
5257  /* Convert back to a list */
5258  result = NIL;
5259  for (i = 0; i < nitems; i++)
5260  result = lappend(result, items[i].clause);
5261 
5262  return result;
5263 }
5264 
5265 /*
5266  * Copy cost and size info from a Path node to the Plan node created from it.
5267  * The executor usually won't use this info, but it's needed by EXPLAIN.
5268  * Also copy the parallel-related flags, which the executor *will* use.
5269  */
5270 static void
5272 {
5273  dest->startup_cost = src->startup_cost;
5274  dest->total_cost = src->total_cost;
5275  dest->plan_rows = src->rows;
5276  dest->plan_width = src->pathtarget->width;
5277  dest->parallel_aware = src->parallel_aware;
5278  dest->parallel_safe = src->parallel_safe;
5279 }
5280 
5281 /*
5282  * Copy cost and size info from a lower plan node to an inserted node.
5283  * (Most callers alter the info after copying it.)
5284  */
5285 static void
5287 {
5288  dest->startup_cost = src->startup_cost;
5289  dest->total_cost = src->total_cost;
5290  dest->plan_rows = src->plan_rows;
5291  dest->plan_width = src->plan_width;
5292  /* Assume the inserted node is not parallel-aware. */
5293  dest->parallel_aware = false;
5294  /* Assume the inserted node is parallel-safe, if child plan is. */
5295  dest->parallel_safe = src->parallel_safe;
5296 }
5297 
5298 /*
5299  * Some places in this file build Sort nodes that don't have a directly
5300  * corresponding Path node. The cost of the sort is, or should have been,
5301  * included in the cost of the Path node we're working from, but since it's
5302  * not split out, we have to re-figure it using cost_sort(). This is just
5303  * to label the Sort node nicely for EXPLAIN.
5304  *
5305  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
5306  */
5307 static void
5308 label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
5309 {
5310  Plan *lefttree = plan->plan.lefttree;
5311  Path sort_path; /* dummy for result of cost_sort */
5312 
5313  /*
5314  * This function shouldn't have to deal with IncrementalSort plans because
5315  * they are only created from corresponding Path nodes.
5316  */
5317  Assert(IsA(plan, Sort));
5318 
5319  cost_sort(&sort_path, root, NIL,
5320  lefttree->total_cost,
5321  lefttree->plan_rows,
5322  lefttree->plan_width,
5323  0.0,
5324  work_mem,
5325  limit_tuples);
5326  plan->plan.startup_cost = sort_path.startup_cost;
5327  plan->plan.total_cost = sort_path.total_cost;
5328  plan->plan.plan_rows = lefttree->plan_rows;
5329  plan->plan.plan_width = lefttree->plan_width;
5330  plan->plan.parallel_aware = false;
5331  plan->plan.parallel_safe = lefttree->parallel_safe;
5332 }
5333 
5334 /*
5335  * bitmap_subplan_mark_shared
5336  * Set isshared flag in bitmap subplan so that it will be created in
5337  * shared memory.
5338  */
5339 static void
5341 {
5342  if (IsA(plan, BitmapAnd))
5343  bitmap_subplan_mark_shared(linitial(((BitmapAnd *) plan)->bitmapplans));
5344  else if (IsA(plan, BitmapOr))
5345  {
5346  ((BitmapOr *) plan)->isshared = true;
5347  bitmap_subplan_mark_shared(linitial(((BitmapOr *) plan)->bitmapplans));
5348  }
5349  else if (IsA(plan, BitmapIndexScan))
5350  ((BitmapIndexScan *) plan)->isshared = true;
5351  else
5352  elog(ERROR, "unrecognized node type: %d", nodeTag(plan));
5353 }
5354 
5355 /*****************************************************************************
5356  *
5357  * PLAN NODE BUILDING ROUTINES
5358  *
5359  * In general, these functions are not passed the original Path and therefore
5360  * leave it to the caller to fill in the cost/width fields from the Path,
5361  * typically by calling copy_generic_path_info(). This convention is
5362  * somewhat historical, but it does support a few places above where we build
5363  * a plan node without having an exactly corresponding Path node. Under no
5364  * circumstances should one of these functions do its own cost calculations,
5365  * as that would be redundant with calculations done while building Paths.
5366  *
5367  *****************************************************************************/
5368 
5369 static SeqScan *
5371  List *qpqual,
5372  Index scanrelid)
5373 {
5374  SeqScan *node = makeNode(SeqScan);
5375  Plan *plan = &node->plan;
5376 
5377  plan->targetlist = qptlist;
5378  plan->qual = qpqual;
5379  plan->lefttree = NULL;
5380  plan->righttree = NULL;
5381  node->scanrelid = scanrelid;
5382 
5383  return node;
5384 }
5385 
5386 static SampleScan *
5388  List *qpqual,
5389  Index scanrelid,
5390  TableSampleClause *tsc)
5391 {
5392  SampleScan *node = makeNode(SampleScan);
5393  Plan *plan = &node->scan.plan;
5394 
5395  plan->targetlist = qptlist;
5396  plan->qual = qpqual;
5397  plan->lefttree = NULL;
5398  plan->righttree = NULL;
5399  node->scan.scanrelid = scanrelid;
5400  node->tablesample = tsc;
5401 
5402  return node;
5403 }
5404 
5405 static IndexScan *
5407  List *qpqual,
5408  Index scanrelid,
5409  Oid indexid,
5410  List *indexqual,
5411  List *indexqualorig,
5412  List *indexorderby,
5413  List *indexorderbyorig,
5414  List *indexorderbyops,
5415  ScanDirection indexscandir)
5416 {
5417  IndexScan *node = makeNode(IndexScan);
5418  Plan *plan = &node->scan.plan;
5419 
5420  plan->targetlist = qptlist;
5421  plan->qual = qpqual;
5422  plan->lefttree = NULL;
5423  plan->righttree = NULL;
5424  node->scan.scanrelid = scanrelid;
5425  node->indexid = indexid;
5426  node->indexqual = indexqual;
5427  node->indexqualorig = indexqualorig;
5428  node->indexorderby = indexorderby;
5429  node->indexorderbyorig = indexorderbyorig;
5430  node->indexorderbyops = indexorderbyops;
5431  node->indexorderdir = indexscandir;
5432 
5433  return node;
5434 }
5435 
5436 static IndexOnlyScan *
5438  List *qpqual,
5439  Index scanrelid,
5440  Oid indexid,
5441  List *indexqual,
5442  List *indexorderby,
5443  List *indextlist,
5444  ScanDirection indexscandir)
5445 {
5447  Plan *plan = &node->scan.plan;
5448 
5449  plan->targetlist = qptlist;
5450  plan->qual = qpqual;
5451  plan->lefttree = NULL;
5452  plan->righttree = NULL;
5453  node->scan.scanrelid = scanrelid;
5454  node->indexid = indexid;
5455  node->indexqual = indexqual;
5456  node->indexorderby = indexorderby;
5457  node->indextlist = indextlist;
5458  node->indexorderdir = indexscandir;
5459 
5460  return node;
5461 }
5462 
5463 static BitmapIndexScan *
5465  Oid indexid,
5466  List *indexqual,
5467  List *indexqualorig)
5468 {
5470  Plan *plan = &node->scan.plan;
5471 
5472  plan->targetlist = NIL; /* not used */
5473  plan->qual = NIL; /* not used */
5474  plan->lefttree = NULL;
5475  plan->righttree = NULL;
5476  node->scan.scanrelid = scanrelid;
5477  node->indexid = indexid;
5478  node->indexqual = indexqual;
5479  node->indexqualorig = indexqualorig;
5480 
5481  return node;
5482 }
5483 
5484 static BitmapHeapScan *
5486  List *qpqual,
5487  Plan *lefttree,
5488  List *bitmapqualorig,
5489  Index scanrelid)
5490 {
5492  Plan *plan = &node->scan.plan;
5493 
5494  plan->targetlist = qptlist;
5495  plan->qual = qpqual;
5496  plan->lefttree = lefttree;
5497  plan->righttree = NULL;
5498  node->scan.scanrelid = scanrelid;
5499  node->bitmapqualorig = bitmapqualorig;
5500 
5501  return node;
5502 }
5503 
5504 static TidScan *
5506  List *qpqual,
5507  Index scanrelid,
5508  List *tidquals)
5509 {
5510  TidScan *node = makeNode(TidScan);
5511  Plan *plan = &node->scan.plan;
5512 
5513  plan->targetlist = qptlist;
5514  plan->qual = qpqual;
5515  plan->lefttree = NULL;
5516  plan->righttree = NULL;
5517  node->scan.scanrelid = scanrelid;
5518  node->tidquals = tidquals;
5519 
5520  return node;
5521 }
5522 
5523 static TidRangeScan *
5525  List *qpqual,
5526  Index scanrelid,
5527  List *tidrangequals)
5528 {
5530  Plan *plan = &node->scan.plan;
5531 
5532  plan->targetlist = qptlist;
5533  plan->qual = qpqual;
5534  plan->lefttree = NULL;
5535  plan->righttree = NULL;
5536  node->scan.scanrelid = scanrelid;
5537  node->tidrangequals = tidrangequals;
5538 
5539  return node;
5540 }
5541 
5542 static SubqueryScan *
5544  List *qpqual,
5545  Index scanrelid,
5546  Plan *subplan)
5547 {
5549  Plan *plan = &node->scan.plan;
5550 
5551  plan->targetlist = qptlist;
5552  plan->qual = qpqual;
5553  plan->lefttree = NULL;
5554  plan->righttree = NULL;
5555  node->scan.scanrelid = scanrelid;
5556  node->subplan = subplan;
5557 
5558  return node;
5559 }
5560 
5561 static FunctionScan *
5563  List *qpqual,
5564  Index scanrelid,
5565  List *functions,
5566  bool funcordinality)
5567 {
5569  Plan *plan = &node->scan.plan;
5570 
5571  plan->targetlist = qptlist;
5572  plan->qual = qpqual;
5573  plan->lefttree = NULL;
5574  plan->righttree = NULL;
5575  node->scan.scanrelid = scanrelid;
5576  node->functions = functions;
5577  node->funcordinality = funcordinality;
5578 
5579  return node;
5580 }
5581 
5582 static TableFuncScan *
5584  List *qpqual,
5585  Index scanrelid,
5586  TableFunc *tablefunc)
5587 {
5589  Plan *plan = &node->scan.plan;
5590 
5591  plan->targetlist = qptlist;
5592  plan->qual = qpqual;
5593  plan->lefttree = NULL;
5594  plan->righttree = NULL;
5595  node->scan.scanrelid = scanrelid;
5596  node->tablefunc = tablefunc;
5597 
5598  return node;
5599 }
5600 
5601 static ValuesScan *
5603  List *qpqual,
5604  Index scanrelid,
5605  List *values_lists)
5606 {
5607  ValuesScan *node = makeNode(ValuesScan);
5608  Plan *plan = &node->scan.plan;
5609 
5610  plan->targetlist = qptlist;
5611  plan->qual = qpqual;
5612  plan->lefttree = NULL;
5613  plan->righttree = NULL;
5614  node->scan.scanrelid = scanrelid;
5615  node->values_lists = values_lists;
5616 
5617  return node;
5618 }
5619 
5620 static CteScan *