PostgreSQL Source Code  git master
createplan.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * createplan.c
4  * Routines to create the desired plan for processing a query.
5  * Planning is complete, we just need to convert the selected
6  * Path into a Plan.
7  *
8  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
9  * Portions Copyright (c) 1994, Regents of the University of California
10  *
11  *
12  * IDENTIFICATION
13  * src/backend/optimizer/plan/createplan.c
14  *
15  *-------------------------------------------------------------------------
16  */
17 #include "postgres.h"
18 
19 #include <math.h>
20 
21 #include "access/sysattr.h"
22 #include "catalog/pg_class.h"
23 #include "foreign/fdwapi.h"
24 #include "miscadmin.h"
25 #include "nodes/extensible.h"
26 #include "nodes/makefuncs.h"
27 #include "nodes/nodeFuncs.h"
28 #include "optimizer/clauses.h"
29 #include "optimizer/cost.h"
30 #include "optimizer/optimizer.h"
31 #include "optimizer/paramassign.h"
32 #include "optimizer/pathnode.h"
33 #include "optimizer/paths.h"
34 #include "optimizer/placeholder.h"
35 #include "optimizer/plancat.h"
36 #include "optimizer/planmain.h"
37 #include "optimizer/prep.h"
38 #include "optimizer/restrictinfo.h"
39 #include "optimizer/subselect.h"
40 #include "optimizer/tlist.h"
41 #include "parser/parse_clause.h"
42 #include "parser/parsetree.h"
43 #include "partitioning/partprune.h"
44 #include "tcop/tcopprot.h"
45 #include "utils/lsyscache.h"
46 
47 
48 /*
49  * Flag bits that can appear in the flags argument of create_plan_recurse().
50  * These can be OR-ed together.
51  *
52  * CP_EXACT_TLIST specifies that the generated plan node must return exactly
53  * the tlist specified by the path's pathtarget (this overrides both
54  * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
55  * plan node is allowed to return just the Vars and PlaceHolderVars needed
56  * to evaluate the pathtarget.
57  *
58  * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
59  * passed down by parent nodes such as Sort and Hash, which will have to
60  * store the returned tuples.
61  *
62  * CP_LABEL_TLIST specifies that the plan node must return columns matching
63  * any sortgrouprefs specified in its pathtarget, with appropriate
64  * ressortgroupref labels. This is passed down by parent nodes such as Sort
65  * and Group, which need these values to be available in their inputs.
66  *
67  * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
68  * and therefore it doesn't matter a bit what target list gets generated.
69  */
70 #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
71 #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
72 #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
73 #define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
74 
75 
76 static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
77  int flags);
78 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
79  int flags);
80 static List *build_path_tlist(PlannerInfo *root, Path *path);
81 static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
82 static List *get_gating_quals(PlannerInfo *root, List *quals);
84  List *gating_quals);
85 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
86 static bool mark_async_capable_plan(Plan *plan, Path *path);
88  int flags);
90  int flags);
92  GroupResultPath *best_path);
95  int flags);
97  int flags);
99  int flags);
100 static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
102  ProjectionPath *best_path,
103  int flags);
104 static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
105 static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
107  IncrementalSortPath *best_path, int flags);
108 static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
110  int flags);
111 static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
115 static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
116  int flags);
119  int flags);
121 static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
122  int flags);
123 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
124  List *tlist, List *scan_clauses);
126  List *tlist, List *scan_clauses);
128  List *tlist, List *scan_clauses, bool indexonly);
130  BitmapHeapPath *best_path,
131  List *tlist, List *scan_clauses);
132 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
133  List **qual, List **indexqual, List **indexECs);
134 static void bitmap_subplan_mark_shared(Plan *plan);
135 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
136  List *tlist, List *scan_clauses);
138  TidRangePath *best_path,
139  List *tlist,
140  List *scan_clauses);
142  SubqueryScanPath *best_path,
143  List *tlist, List *scan_clauses);
145  List *tlist, List *scan_clauses);
147  List *tlist, List *scan_clauses);
149  List *tlist, List *scan_clauses);
150 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
151  List *tlist, List *scan_clauses);
153  Path *best_path, List *tlist, List *scan_clauses);
154 static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
155  List *tlist, List *scan_clauses);
157  List *tlist, List *scan_clauses);
159  List *tlist, List *scan_clauses);
161  CustomPath *best_path,
162  List *tlist, List *scan_clauses);
168 static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
169  List **stripped_indexquals_p,
170  List **fixed_indexquals_p);
173  IndexOptInfo *index, int indexcol,
174  Node *clause, List *indexcolnos);
175 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
176 static List *get_switched_clauses(List *clauses, Relids outerrelids);
177 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
178 static void copy_generic_path_info(Plan *dest, Path *src);
179 static void copy_plan_costsize(Plan *dest, Plan *src);
181  double limit_tuples);
183  List *pathkeys, double limit_tuples);
184 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
185 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
186  TableSampleClause *tsc);
187 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
188  Oid indexid, List *indexqual, List *indexqualorig,
189  List *indexorderby, List *indexorderbyorig,
190  List *indexorderbyops,
191  ScanDirection indexscandir);
192 static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
193  Index scanrelid, Oid indexid,
194  List *indexqual, List *recheckqual,
195  List *indexorderby,
196  List *indextlist,
197  ScanDirection indexscandir);
198 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
199  List *indexqual,
200  List *indexqualorig);
201 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
202  List *qpqual,
203  Plan *lefttree,
204  List *bitmapqualorig,
205  Index scanrelid);
206 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
207  List *tidquals);
208 static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
209  Index scanrelid, List *tidrangequals);
210 static SubqueryScan *make_subqueryscan(List *qptlist,
211  List *qpqual,
212  Index scanrelid,
213  Plan *subplan);
214 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
215  Index scanrelid, List *functions, bool funcordinality);
216 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
217  Index scanrelid, List *values_lists);
218 static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
219  Index scanrelid, TableFunc *tablefunc);
220 static CteScan *make_ctescan(List *qptlist, List *qpqual,
221  Index scanrelid, int ctePlanId, int cteParam);
222 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
223  Index scanrelid, char *enrname);
224 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
225  Index scanrelid, int wtParam);
227  Plan *lefttree,
228  Plan *righttree,
229  int wtParam,
230  List *distinctList,
231  long numGroups);
232 static BitmapAnd *make_bitmap_and(List *bitmapplans);
233 static BitmapOr *make_bitmap_or(List *bitmapplans);
234 static NestLoop *make_nestloop(List *tlist,
235  List *joinclauses, List *otherclauses, List *nestParams,
236  Plan *lefttree, Plan *righttree,
237  JoinType jointype, bool inner_unique);
238 static HashJoin *make_hashjoin(List *tlist,
239  List *joinclauses, List *otherclauses,
240  List *hashclauses,
241  List *hashoperators, List *hashcollations,
242  List *hashkeys,
243  Plan *lefttree, Plan *righttree,
244  JoinType jointype, bool inner_unique);
245 static Hash *make_hash(Plan *lefttree,
246  List *hashkeys,
247  Oid skewTable,
248  AttrNumber skewColumn,
249  bool skewInherit);
250 static MergeJoin *make_mergejoin(List *tlist,
251  List *joinclauses, List *otherclauses,
252  List *mergeclauses,
253  Oid *mergefamilies,
254  Oid *mergecollations,
255  int *mergestrategies,
256  bool *mergenullsfirst,
257  Plan *lefttree, Plan *righttree,
258  JoinType jointype, bool inner_unique,
259  bool skip_mark_restore);
260 static Sort *make_sort(Plan *lefttree, int numCols,
261  AttrNumber *sortColIdx, Oid *sortOperators,
262  Oid *collations, bool *nullsFirst);
263 static IncrementalSort *make_incrementalsort(Plan *lefttree,
264  int numCols, int nPresortedCols,
265  AttrNumber *sortColIdx, Oid *sortOperators,
266  Oid *collations, bool *nullsFirst);
267 static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
268  Relids relids,
269  const AttrNumber *reqColIdx,
270  bool adjust_tlist_in_place,
271  int *p_numsortkeys,
272  AttrNumber **p_sortColIdx,
273  Oid **p_sortOperators,
274  Oid **p_collations,
275  bool **p_nullsFirst);
276 static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
277  Relids relids);
279  List *pathkeys, Relids relids, int nPresortedCols);
280 static Sort *make_sort_from_groupcols(List *groupcls,
281  AttrNumber *grpColIdx,
282  Plan *lefttree);
283 static Material *make_material(Plan *lefttree);
284 static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
285  Oid *collations, List *param_exprs,
286  bool singlerow, bool binary_mode,
287  uint32 est_entries, Bitmapset *keyparamids);
288 static WindowAgg *make_windowagg(List *tlist, Index winref,
289  int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
290  int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
291  int frameOptions, Node *startOffset, Node *endOffset,
292  Oid startInRangeFunc, Oid endInRangeFunc,
293  Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
294  List *runCondition, List *qual, bool topWindow,
295  Plan *lefttree);
296 static Group *make_group(List *tlist, List *qual, int numGroupCols,
297  AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
298  Plan *lefttree);
299 static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
300 static Unique *make_unique_from_pathkeys(Plan *lefttree,
301  List *pathkeys, int numCols);
302 static Gather *make_gather(List *qptlist, List *qpqual,
303  int nworkers, int rescan_param, bool single_copy, Plan *subplan);
304 static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
305  List *distinctList, AttrNumber flagColIdx, int firstFlag,
306  long numGroups);
307 static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
308 static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
309 static ProjectSet *make_project_set(List *tlist, Plan *subplan);
311  CmdType operation, bool canSetTag,
312  Index nominalRelation, Index rootRelation,
313  bool partColsUpdated,
314  List *resultRelations,
315  List *updateColnosLists,
316  List *withCheckOptionLists, List *returningLists,
317  List *rowMarks, OnConflictExpr *onconflict,
318  List *mergeActionLists, List *mergeJoinConditions,
319  int epqParam);
321  GatherMergePath *best_path);
322 
323 
324 /*
325  * create_plan
326  * Creates the access plan for a query by recursively processing the
327  * desired tree of pathnodes, starting at the node 'best_path'. For
328  * every pathnode found, we create a corresponding plan node containing
329  * appropriate id, target list, and qualification information.
330  *
331  * The tlists and quals in the plan tree are still in planner format,
332  * ie, Vars still correspond to the parser's numbering. This will be
333  * fixed later by setrefs.c.
334  *
335  * best_path is the best access path
336  *
337  * Returns a Plan tree.
338  */
339 Plan *
341 {
342  Plan *plan;
343 
344  /* plan_params should not be in use in current query level */
345  Assert(root->plan_params == NIL);
346 
347  /* Initialize this module's workspace in PlannerInfo */
348  root->curOuterRels = NULL;
349  root->curOuterParams = NIL;
350 
351  /* Recursively process the path tree, demanding the correct tlist result */
353 
354  /*
355  * Make sure the topmost plan node's targetlist exposes the original
356  * column names and other decorative info. Targetlists generated within
357  * the planner don't bother with that stuff, but we must have it on the
358  * top-level tlist seen at execution time. However, ModifyTable plan
359  * nodes don't have a tlist matching the querytree targetlist.
360  */
361  if (!IsA(plan, ModifyTable))
362  apply_tlist_labeling(plan->targetlist, root->processed_tlist);
363 
364  /*
365  * Attach any initPlans created in this query level to the topmost plan
366  * node. (In principle the initplans could go in any plan node at or
367  * above where they're referenced, but there seems no reason to put them
368  * any lower than the topmost node for the query level. Also, see
369  * comments for SS_finalize_plan before you try to change this.)
370  */
372 
373  /* Check we successfully assigned all NestLoopParams to plan nodes */
374  if (root->curOuterParams != NIL)
375  elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
376 
377  /*
378  * Reset plan_params to ensure param IDs used for nestloop params are not
379  * re-used later
380  */
381  root->plan_params = NIL;
382 
383  return plan;
384 }
385 
386 /*
387  * create_plan_recurse
388  * Recursive guts of create_plan().
389  */
390 static Plan *
391 create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
392 {
393  Plan *plan;
394 
395  /* Guard against stack overflow due to overly complex plans */
397 
398  switch (best_path->pathtype)
399  {
400  case T_SeqScan:
401  case T_SampleScan:
402  case T_IndexScan:
403  case T_IndexOnlyScan:
404  case T_BitmapHeapScan:
405  case T_TidScan:
406  case T_TidRangeScan:
407  case T_SubqueryScan:
408  case T_FunctionScan:
409  case T_TableFuncScan:
410  case T_ValuesScan:
411  case T_CteScan:
412  case T_WorkTableScan:
413  case T_NamedTuplestoreScan:
414  case T_ForeignScan:
415  case T_CustomScan:
416  plan = create_scan_plan(root, best_path, flags);
417  break;
418  case T_HashJoin:
419  case T_MergeJoin:
420  case T_NestLoop:
422  (JoinPath *) best_path);
423  break;
424  case T_Append:
426  (AppendPath *) best_path,
427  flags);
428  break;
429  case T_MergeAppend:
431  (MergeAppendPath *) best_path,
432  flags);
433  break;
434  case T_Result:
435  if (IsA(best_path, ProjectionPath))
436  {
438  (ProjectionPath *) best_path,
439  flags);
440  }
441  else if (IsA(best_path, MinMaxAggPath))
442  {
444  (MinMaxAggPath *) best_path);
445  }
446  else if (IsA(best_path, GroupResultPath))
447  {
449  (GroupResultPath *) best_path);
450  }
451  else
452  {
453  /* Simple RTE_RESULT base relation */
454  Assert(IsA(best_path, Path));
455  plan = create_scan_plan(root, best_path, flags);
456  }
457  break;
458  case T_ProjectSet:
460  (ProjectSetPath *) best_path);
461  break;
462  case T_Material:
464  (MaterialPath *) best_path,
465  flags);
466  break;
467  case T_Memoize:
469  (MemoizePath *) best_path,
470  flags);
471  break;
472  case T_Unique:
473  if (IsA(best_path, UpperUniquePath))
474  {
476  (UpperUniquePath *) best_path,
477  flags);
478  }
479  else
480  {
481  Assert(IsA(best_path, UniquePath));
483  (UniquePath *) best_path,
484  flags);
485  }
486  break;
487  case T_Gather:
489  (GatherPath *) best_path);
490  break;
491  case T_Sort:
493  (SortPath *) best_path,
494  flags);
495  break;
496  case T_IncrementalSort:
498  (IncrementalSortPath *) best_path,
499  flags);
500  break;
501  case T_Group:
503  (GroupPath *) best_path);
504  break;
505  case T_Agg:
506  if (IsA(best_path, GroupingSetsPath))
508  (GroupingSetsPath *) best_path);
509  else
510  {
511  Assert(IsA(best_path, AggPath));
513  (AggPath *) best_path);
514  }
515  break;
516  case T_WindowAgg:
518  (WindowAggPath *) best_path);
519  break;
520  case T_SetOp:
522  (SetOpPath *) best_path,
523  flags);
524  break;
525  case T_RecursiveUnion:
527  (RecursiveUnionPath *) best_path);
528  break;
529  case T_LockRows:
531  (LockRowsPath *) best_path,
532  flags);
533  break;
534  case T_ModifyTable:
536  (ModifyTablePath *) best_path);
537  break;
538  case T_Limit:
540  (LimitPath *) best_path,
541  flags);
542  break;
543  case T_GatherMerge:
545  (GatherMergePath *) best_path);
546  break;
547  default:
548  elog(ERROR, "unrecognized node type: %d",
549  (int) best_path->pathtype);
550  plan = NULL; /* keep compiler quiet */
551  break;
552  }
553 
554  return plan;
555 }
556 
557 /*
558  * create_scan_plan
559  * Create a scan plan for the parent relation of 'best_path'.
560  */
561 static Plan *
562 create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
563 {
564  RelOptInfo *rel = best_path->parent;
565  List *scan_clauses;
566  List *gating_clauses;
567  List *tlist;
568  Plan *plan;
569 
570  /*
571  * Extract the relevant restriction clauses from the parent relation. The
572  * executor must apply all these restrictions during the scan, except for
573  * pseudoconstants which we'll take care of below.
574  *
575  * If this is a plain indexscan or index-only scan, we need not consider
576  * restriction clauses that are implied by the index's predicate, so use
577  * indrestrictinfo not baserestrictinfo. Note that we can't do that for
578  * bitmap indexscans, since there's not necessarily a single index
579  * involved; but it doesn't matter since create_bitmap_scan_plan() will be
580  * able to get rid of such clauses anyway via predicate proof.
581  */
582  switch (best_path->pathtype)
583  {
584  case T_IndexScan:
585  case T_IndexOnlyScan:
586  scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
587  break;
588  default:
589  scan_clauses = rel->baserestrictinfo;
590  break;
591  }
592 
593  /*
594  * If this is a parameterized scan, we also need to enforce all the join
595  * clauses available from the outer relation(s).
596  *
597  * For paranoia's sake, don't modify the stored baserestrictinfo list.
598  */
599  if (best_path->param_info)
600  scan_clauses = list_concat_copy(scan_clauses,
601  best_path->param_info->ppi_clauses);
602 
603  /*
604  * Detect whether we have any pseudoconstant quals to deal with. Then, if
605  * we'll need a gating Result node, it will be able to project, so there
606  * are no requirements on the child's tlist.
607  *
608  * If this replaces a join, it must be a foreign scan or a custom scan,
609  * and the FDW or the custom scan provider would have stored in the best
610  * path the list of RestrictInfo nodes to apply to the join; check against
611  * that list in that case.
612  */
613  if (IS_JOIN_REL(rel))
614  {
615  List *join_clauses;
616 
617  Assert(best_path->pathtype == T_ForeignScan ||
618  best_path->pathtype == T_CustomScan);
619  if (best_path->pathtype == T_ForeignScan)
620  join_clauses = ((ForeignPath *) best_path)->fdw_restrictinfo;
621  else
622  join_clauses = ((CustomPath *) best_path)->custom_restrictinfo;
623 
624  gating_clauses = get_gating_quals(root, join_clauses);
625  }
626  else
627  gating_clauses = get_gating_quals(root, scan_clauses);
628  if (gating_clauses)
629  flags = 0;
630 
631  /*
632  * For table scans, rather than using the relation targetlist (which is
633  * only those Vars actually needed by the query), we prefer to generate a
634  * tlist containing all Vars in order. This will allow the executor to
635  * optimize away projection of the table tuples, if possible.
636  *
637  * But if the caller is going to ignore our tlist anyway, then don't
638  * bother generating one at all. We use an exact equality test here, so
639  * that this only applies when CP_IGNORE_TLIST is the only flag set.
640  */
641  if (flags == CP_IGNORE_TLIST)
642  {
643  tlist = NULL;
644  }
645  else if (use_physical_tlist(root, best_path, flags))
646  {
647  if (best_path->pathtype == T_IndexOnlyScan)
648  {
649  /* For index-only scan, the preferred tlist is the index's */
650  tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
651 
652  /*
653  * Transfer sortgroupref data to the replacement tlist, if
654  * requested (use_physical_tlist checked that this will work).
655  */
656  if (flags & CP_LABEL_TLIST)
657  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
658  }
659  else
660  {
661  tlist = build_physical_tlist(root, rel);
662  if (tlist == NIL)
663  {
664  /* Failed because of dropped cols, so use regular method */
665  tlist = build_path_tlist(root, best_path);
666  }
667  else
668  {
669  /* As above, transfer sortgroupref data to replacement tlist */
670  if (flags & CP_LABEL_TLIST)
671  apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
672  }
673  }
674  }
675  else
676  {
677  tlist = build_path_tlist(root, best_path);
678  }
679 
680  switch (best_path->pathtype)
681  {
682  case T_SeqScan:
684  best_path,
685  tlist,
686  scan_clauses);
687  break;
688 
689  case T_SampleScan:
691  best_path,
692  tlist,
693  scan_clauses);
694  break;
695 
696  case T_IndexScan:
698  (IndexPath *) best_path,
699  tlist,
700  scan_clauses,
701  false);
702  break;
703 
704  case T_IndexOnlyScan:
706  (IndexPath *) best_path,
707  tlist,
708  scan_clauses,
709  true);
710  break;
711 
712  case T_BitmapHeapScan:
714  (BitmapHeapPath *) best_path,
715  tlist,
716  scan_clauses);
717  break;
718 
719  case T_TidScan:
721  (TidPath *) best_path,
722  tlist,
723  scan_clauses);
724  break;
725 
726  case T_TidRangeScan:
728  (TidRangePath *) best_path,
729  tlist,
730  scan_clauses);
731  break;
732 
733  case T_SubqueryScan:
735  (SubqueryScanPath *) best_path,
736  tlist,
737  scan_clauses);
738  break;
739 
740  case T_FunctionScan:
742  best_path,
743  tlist,
744  scan_clauses);
745  break;
746 
747  case T_TableFuncScan:
749  best_path,
750  tlist,
751  scan_clauses);
752  break;
753 
754  case T_ValuesScan:
756  best_path,
757  tlist,
758  scan_clauses);
759  break;
760 
761  case T_CteScan:
763  best_path,
764  tlist,
765  scan_clauses);
766  break;
767 
768  case T_NamedTuplestoreScan:
770  best_path,
771  tlist,
772  scan_clauses);
773  break;
774 
775  case T_Result:
777  best_path,
778  tlist,
779  scan_clauses);
780  break;
781 
782  case T_WorkTableScan:
784  best_path,
785  tlist,
786  scan_clauses);
787  break;
788 
789  case T_ForeignScan:
791  (ForeignPath *) best_path,
792  tlist,
793  scan_clauses);
794  break;
795 
796  case T_CustomScan:
798  (CustomPath *) best_path,
799  tlist,
800  scan_clauses);
801  break;
802 
803  default:
804  elog(ERROR, "unrecognized node type: %d",
805  (int) best_path->pathtype);
806  plan = NULL; /* keep compiler quiet */
807  break;
808  }
809 
810  /*
811  * If there are any pseudoconstant clauses attached to this node, insert a
812  * gating Result node that evaluates the pseudoconstants as one-time
813  * quals.
814  */
815  if (gating_clauses)
816  plan = create_gating_plan(root, best_path, plan, gating_clauses);
817 
818  return plan;
819 }
820 
821 /*
822  * Build a target list (ie, a list of TargetEntry) for the Path's output.
823  *
824  * This is almost just make_tlist_from_pathtarget(), but we also have to
825  * deal with replacing nestloop params.
826  */
827 static List *
829 {
830  List *tlist = NIL;
831  Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
832  int resno = 1;
833  ListCell *v;
834 
835  foreach(v, path->pathtarget->exprs)
836  {
837  Node *node = (Node *) lfirst(v);
838  TargetEntry *tle;
839 
840  /*
841  * If it's a parameterized path, there might be lateral references in
842  * the tlist, which need to be replaced with Params. There's no need
843  * to remake the TargetEntry nodes, so apply this to each list item
844  * separately.
845  */
846  if (path->param_info)
847  node = replace_nestloop_params(root, node);
848 
849  tle = makeTargetEntry((Expr *) node,
850  resno,
851  NULL,
852  false);
853  if (sortgrouprefs)
854  tle->ressortgroupref = sortgrouprefs[resno - 1];
855 
856  tlist = lappend(tlist, tle);
857  resno++;
858  }
859  return tlist;
860 }
861 
862 /*
863  * use_physical_tlist
864  * Decide whether to use a tlist matching relation structure,
865  * rather than only those Vars actually referenced.
866  */
867 static bool
869 {
870  RelOptInfo *rel = path->parent;
871  int i;
872  ListCell *lc;
873 
874  /*
875  * Forget it if either exact tlist or small tlist is demanded.
876  */
877  if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
878  return false;
879 
880  /*
881  * We can do this for real relation scans, subquery scans, function scans,
882  * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
883  */
884  if (rel->rtekind != RTE_RELATION &&
885  rel->rtekind != RTE_SUBQUERY &&
886  rel->rtekind != RTE_FUNCTION &&
887  rel->rtekind != RTE_TABLEFUNC &&
888  rel->rtekind != RTE_VALUES &&
889  rel->rtekind != RTE_CTE)
890  return false;
891 
892  /*
893  * Can't do it with inheritance cases either (mainly because Append
894  * doesn't project; this test may be unnecessary now that
895  * create_append_plan instructs its children to return an exact tlist).
896  */
897  if (rel->reloptkind != RELOPT_BASEREL)
898  return false;
899 
900  /*
901  * Also, don't do it to a CustomPath; the premise that we're extracting
902  * columns from a simple physical tuple is unlikely to hold for those.
903  * (When it does make sense, the custom path creator can set up the path's
904  * pathtarget that way.)
905  */
906  if (IsA(path, CustomPath))
907  return false;
908 
909  /*
910  * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
911  * executor to skip heap page fetches, and in any case, the benefit of
912  * using a physical tlist instead would be minimal.
913  */
914  if (IsA(path, BitmapHeapPath) &&
915  path->pathtarget->exprs == NIL)
916  return false;
917 
918  /*
919  * Can't do it if any system columns or whole-row Vars are requested.
920  * (This could possibly be fixed but would take some fragile assumptions
921  * in setrefs.c, I think.)
922  */
923  for (i = rel->min_attr; i <= 0; i++)
924  {
925  if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
926  return false;
927  }
928 
929  /*
930  * Can't do it if the rel is required to emit any placeholder expressions,
931  * either.
932  */
933  foreach(lc, root->placeholder_list)
934  {
935  PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
936 
937  if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
938  bms_is_subset(phinfo->ph_eval_at, rel->relids))
939  return false;
940  }
941 
942  /*
943  * For an index-only scan, the "physical tlist" is the index's indextlist.
944  * We can only return that without a projection if all the index's columns
945  * are returnable.
946  */
947  if (path->pathtype == T_IndexOnlyScan)
948  {
949  IndexOptInfo *indexinfo = ((IndexPath *) path)->indexinfo;
950 
951  for (i = 0; i < indexinfo->ncolumns; i++)
952  {
953  if (!indexinfo->canreturn[i])
954  return false;
955  }
956  }
957 
958  /*
959  * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
960  * to emit any sort/group columns that are not simple Vars. (If they are
961  * simple Vars, they should appear in the physical tlist, and
962  * apply_pathtarget_labeling_to_tlist will take care of getting them
963  * labeled again.) We also have to check that no two sort/group columns
964  * are the same Var, else that element of the physical tlist would need
965  * conflicting ressortgroupref labels.
966  */
967  if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
968  {
969  Bitmapset *sortgroupatts = NULL;
970 
971  i = 0;
972  foreach(lc, path->pathtarget->exprs)
973  {
974  Expr *expr = (Expr *) lfirst(lc);
975 
976  if (path->pathtarget->sortgrouprefs[i])
977  {
978  if (expr && IsA(expr, Var))
979  {
980  int attno = ((Var *) expr)->varattno;
981 
983  if (bms_is_member(attno, sortgroupatts))
984  return false;
985  sortgroupatts = bms_add_member(sortgroupatts, attno);
986  }
987  else
988  return false;
989  }
990  i++;
991  }
992  }
993 
994  return true;
995 }
996 
997 /*
998  * get_gating_quals
999  * See if there are pseudoconstant quals in a node's quals list
1000  *
1001  * If the node's quals list includes any pseudoconstant quals,
1002  * return just those quals.
1003  */
1004 static List *
1006 {
1007  /* No need to look if we know there are no pseudoconstants */
1008  if (!root->hasPseudoConstantQuals)
1009  return NIL;
1010 
1011  /* Sort into desirable execution order while still in RestrictInfo form */
1012  quals = order_qual_clauses(root, quals);
1013 
1014  /* Pull out any pseudoconstant quals from the RestrictInfo list */
1015  return extract_actual_clauses(quals, true);
1016 }
1017 
1018 /*
1019  * create_gating_plan
1020  * Deal with pseudoconstant qual clauses
1021  *
1022  * Add a gating Result node atop the already-built plan.
1023  */
1024 static Plan *
1026  List *gating_quals)
1027 {
1028  Plan *gplan;
1029  Plan *splan;
1030 
1031  Assert(gating_quals);
1032 
1033  /*
1034  * We might have a trivial Result plan already. Stacking one Result atop
1035  * another is silly, so if that applies, just discard the input plan.
1036  * (We're assuming its targetlist is uninteresting; it should be either
1037  * the same as the result of build_path_tlist, or a simplified version.)
1038  */
1039  splan = plan;
1040  if (IsA(plan, Result))
1041  {
1042  Result *rplan = (Result *) plan;
1043 
1044  if (rplan->plan.lefttree == NULL &&
1045  rplan->resconstantqual == NULL)
1046  splan = NULL;
1047  }
1048 
1049  /*
1050  * Since we need a Result node anyway, always return the path's requested
1051  * tlist; that's never a wrong choice, even if the parent node didn't ask
1052  * for CP_EXACT_TLIST.
1053  */
1054  gplan = (Plan *) make_result(build_path_tlist(root, path),
1055  (Node *) gating_quals,
1056  splan);
1057 
1058  /*
1059  * Notice that we don't change cost or size estimates when doing gating.
1060  * The costs of qual eval were already included in the subplan's cost.
1061  * Leaving the size alone amounts to assuming that the gating qual will
1062  * succeed, which is the conservative estimate for planning upper queries.
1063  * We certainly don't want to assume the output size is zero (unless the
1064  * gating qual is actually constant FALSE, and that case is dealt with in
1065  * clausesel.c). Interpolating between the two cases is silly, because it
1066  * doesn't reflect what will really happen at runtime, and besides which
1067  * in most cases we have only a very bad idea of the probability of the
1068  * gating qual being true.
1069  */
1070  copy_plan_costsize(gplan, plan);
1071 
1072  /* Gating quals could be unsafe, so better use the Path's safety flag */
1073  gplan->parallel_safe = path->parallel_safe;
1074 
1075  return gplan;
1076 }
1077 
1078 /*
1079  * create_join_plan
1080  * Create a join plan for 'best_path' and (recursively) plans for its
1081  * inner and outer paths.
1082  */
1083 static Plan *
1085 {
1086  Plan *plan;
1087  List *gating_clauses;
1088 
1089  switch (best_path->path.pathtype)
1090  {
1091  case T_MergeJoin:
1093  (MergePath *) best_path);
1094  break;
1095  case T_HashJoin:
1097  (HashPath *) best_path);
1098  break;
1099  case T_NestLoop:
1101  (NestPath *) best_path);
1102  break;
1103  default:
1104  elog(ERROR, "unrecognized node type: %d",
1105  (int) best_path->path.pathtype);
1106  plan = NULL; /* keep compiler quiet */
1107  break;
1108  }
1109 
1110  /*
1111  * If there are any pseudoconstant clauses attached to this node, insert a
1112  * gating Result node that evaluates the pseudoconstants as one-time
1113  * quals.
1114  */
1115  gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
1116  if (gating_clauses)
1117  plan = create_gating_plan(root, (Path *) best_path, plan,
1118  gating_clauses);
1119 
1120 #ifdef NOT_USED
1121 
1122  /*
1123  * * Expensive function pullups may have pulled local predicates * into
1124  * this path node. Put them in the qpqual of the plan node. * JMH,
1125  * 6/15/92
1126  */
1127  if (get_loc_restrictinfo(best_path) != NIL)
1128  set_qpqual((Plan) plan,
1129  list_concat(get_qpqual((Plan) plan),
1130  get_actual_clauses(get_loc_restrictinfo(best_path))));
1131 #endif
1132 
1133  return plan;
1134 }
1135 
1136 /*
1137  * mark_async_capable_plan
1138  * Check whether the Plan node created from a Path node is async-capable,
1139  * and if so, mark the Plan node as such and return true, otherwise
1140  * return false.
1141  */
1142 static bool
1144 {
1145  switch (nodeTag(path))
1146  {
1147  case T_SubqueryScanPath:
1148  {
1149  SubqueryScan *scan_plan = (SubqueryScan *) plan;
1150 
1151  /*
1152  * If the generated plan node includes a gating Result node,
1153  * we can't execute it asynchronously.
1154  */
1155  if (IsA(plan, Result))
1156  return false;
1157 
1158  /*
1159  * If a SubqueryScan node atop of an async-capable plan node
1160  * is deletable, consider it as async-capable.
1161  */
1162  if (trivial_subqueryscan(scan_plan) &&
1163  mark_async_capable_plan(scan_plan->subplan,
1164  ((SubqueryScanPath *) path)->subpath))
1165  break;
1166  return false;
1167  }
1168  case T_ForeignPath:
1169  {
1170  FdwRoutine *fdwroutine = path->parent->fdwroutine;
1171 
1172  /*
1173  * If the generated plan node includes a gating Result node,
1174  * we can't execute it asynchronously.
1175  */
1176  if (IsA(plan, Result))
1177  return false;
1178 
1179  Assert(fdwroutine != NULL);
1180  if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
1181  fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
1182  break;
1183  return false;
1184  }
1185  case T_ProjectionPath:
1186 
1187  /*
1188  * If the generated plan node includes a Result node for the
1189  * projection, we can't execute it asynchronously.
1190  */
1191  if (IsA(plan, Result))
1192  return false;
1193 
1194  /*
1195  * create_projection_plan() would have pulled up the subplan, so
1196  * check the capability using the subpath.
1197  */
1199  ((ProjectionPath *) path)->subpath))
1200  return true;
1201  return false;
1202  default:
1203  return false;
1204  }
1205 
1206  plan->async_capable = true;
1207 
1208  return true;
1209 }
1210 
1211 /*
1212  * create_append_plan
1213  * Create an Append plan for 'best_path' and (recursively) plans
1214  * for its subpaths.
1215  *
1216  * Returns a Plan node.
1217  */
1218 static Plan *
1220 {
1221  Append *plan;
1222  List *tlist = build_path_tlist(root, &best_path->path);
1223  int orig_tlist_length = list_length(tlist);
1224  bool tlist_was_changed = false;
1225  List *pathkeys = best_path->path.pathkeys;
1226  List *subplans = NIL;
1227  ListCell *subpaths;
1228  int nasyncplans = 0;
1229  RelOptInfo *rel = best_path->path.parent;
1230  PartitionPruneInfo *partpruneinfo = NULL;
1231  int nodenumsortkeys = 0;
1232  AttrNumber *nodeSortColIdx = NULL;
1233  Oid *nodeSortOperators = NULL;
1234  Oid *nodeCollations = NULL;
1235  bool *nodeNullsFirst = NULL;
1236  bool consider_async = false;
1237 
1238  /*
1239  * The subpaths list could be empty, if every child was proven empty by
1240  * constraint exclusion. In that case generate a dummy plan that returns
1241  * no rows.
1242  *
1243  * Note that an AppendPath with no members is also generated in certain
1244  * cases where there was no appending construct at all, but we know the
1245  * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
1246  */
1247  if (best_path->subpaths == NIL)
1248  {
1249  /* Generate a Result plan with constant-FALSE gating qual */
1250  Plan *plan;
1251 
1252  plan = (Plan *) make_result(tlist,
1253  (Node *) list_make1(makeBoolConst(false,
1254  false)),
1255  NULL);
1256 
1257  copy_generic_path_info(plan, (Path *) best_path);
1258 
1259  return plan;
1260  }
1261 
1262  /*
1263  * Otherwise build an Append plan. Note that if there's just one child,
1264  * the Append is pretty useless; but we wait till setrefs.c to get rid of
1265  * it. Doing so here doesn't work because the varno of the child scan
1266  * plan won't match the parent-rel Vars it'll be asked to emit.
1267  *
1268  * We don't have the actual creation of the Append node split out into a
1269  * separate make_xxx function. This is because we want to run
1270  * prepare_sort_from_pathkeys on it before we do so on the individual
1271  * child plans, to make cross-checking the sort info easier.
1272  */
1273  plan = makeNode(Append);
1274  plan->plan.targetlist = tlist;
1275  plan->plan.qual = NIL;
1276  plan->plan.lefttree = NULL;
1277  plan->plan.righttree = NULL;
1278  plan->apprelids = rel->relids;
1279 
1280  if (pathkeys != NIL)
1281  {
1282  /*
1283  * Compute sort column info, and adjust the Append's tlist as needed.
1284  * Because we pass adjust_tlist_in_place = true, we may ignore the
1285  * function result; it must be the same plan node. However, we then
1286  * need to detect whether any tlist entries were added.
1287  */
1288  (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
1289  best_path->path.parent->relids,
1290  NULL,
1291  true,
1292  &nodenumsortkeys,
1293  &nodeSortColIdx,
1294  &nodeSortOperators,
1295  &nodeCollations,
1296  &nodeNullsFirst);
1297  tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
1298  }
1299 
1300  /* If appropriate, consider async append */
1301  consider_async = (enable_async_append && pathkeys == NIL &&
1302  !best_path->path.parallel_safe &&
1303  list_length(best_path->subpaths) > 1);
1304 
1305  /* Build the plan for each child */
1306  foreach(subpaths, best_path->subpaths)
1307  {
1308  Path *subpath = (Path *) lfirst(subpaths);
1309  Plan *subplan;
1310 
1311  /* Must insist that all children return the same tlist */
1313 
1314  /*
1315  * For ordered Appends, we must insert a Sort node if subplan isn't
1316  * sufficiently ordered.
1317  */
1318  if (pathkeys != NIL)
1319  {
1320  int numsortkeys;
1321  AttrNumber *sortColIdx;
1322  Oid *sortOperators;
1323  Oid *collations;
1324  bool *nullsFirst;
1325 
1326  /*
1327  * Compute sort column info, and adjust subplan's tlist as needed.
1328  * We must apply prepare_sort_from_pathkeys even to subplans that
1329  * don't need an explicit sort, to make sure they are returning
1330  * the same sort key columns the Append expects.
1331  */
1332  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1333  subpath->parent->relids,
1334  nodeSortColIdx,
1335  false,
1336  &numsortkeys,
1337  &sortColIdx,
1338  &sortOperators,
1339  &collations,
1340  &nullsFirst);
1341 
1342  /*
1343  * Check that we got the same sort key information. We just
1344  * Assert that the sortops match, since those depend only on the
1345  * pathkeys; but it seems like a good idea to check the sort
1346  * column numbers explicitly, to ensure the tlists match up.
1347  */
1348  Assert(numsortkeys == nodenumsortkeys);
1349  if (memcmp(sortColIdx, nodeSortColIdx,
1350  numsortkeys * sizeof(AttrNumber)) != 0)
1351  elog(ERROR, "Append child's targetlist doesn't match Append");
1352  Assert(memcmp(sortOperators, nodeSortOperators,
1353  numsortkeys * sizeof(Oid)) == 0);
1354  Assert(memcmp(collations, nodeCollations,
1355  numsortkeys * sizeof(Oid)) == 0);
1356  Assert(memcmp(nullsFirst, nodeNullsFirst,
1357  numsortkeys * sizeof(bool)) == 0);
1358 
1359  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1360  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1361  {
1362  Sort *sort = make_sort(subplan, numsortkeys,
1363  sortColIdx, sortOperators,
1364  collations, nullsFirst);
1365 
1367  subplan = (Plan *) sort;
1368  }
1369  }
1370 
1371  /* If needed, check to see if subplan can be executed asynchronously */
1372  if (consider_async && mark_async_capable_plan(subplan, subpath))
1373  {
1374  Assert(subplan->async_capable);
1375  ++nasyncplans;
1376  }
1377 
1378  subplans = lappend(subplans, subplan);
1379  }
1380 
1381  /*
1382  * If any quals exist, they may be useful to perform further partition
1383  * pruning during execution. Gather information needed by the executor to
1384  * do partition pruning.
1385  */
1387  {
1388  List *prunequal;
1389 
1390  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1391 
1392  if (best_path->path.param_info)
1393  {
1394  List *prmquals = best_path->path.param_info->ppi_clauses;
1395 
1396  prmquals = extract_actual_clauses(prmquals, false);
1397  prmquals = (List *) replace_nestloop_params(root,
1398  (Node *) prmquals);
1399 
1400  prunequal = list_concat(prunequal, prmquals);
1401  }
1402 
1403  if (prunequal != NIL)
1404  partpruneinfo =
1406  best_path->subpaths,
1407  prunequal);
1408  }
1409 
1410  plan->appendplans = subplans;
1411  plan->nasyncplans = nasyncplans;
1412  plan->first_partial_plan = best_path->first_partial_path;
1413  plan->part_prune_info = partpruneinfo;
1414 
1415  copy_generic_path_info(&plan->plan, (Path *) best_path);
1416 
1417  /*
1418  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1419  * produce either the exact tlist or a narrow tlist, we should get rid of
1420  * the sort columns again. We must inject a projection node to do so.
1421  */
1422  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1423  {
1424  tlist = list_copy_head(plan->plan.targetlist, orig_tlist_length);
1425  return inject_projection_plan((Plan *) plan, tlist,
1426  plan->plan.parallel_safe);
1427  }
1428  else
1429  return (Plan *) plan;
1430 }
1431 
1432 /*
1433  * create_merge_append_plan
1434  * Create a MergeAppend plan for 'best_path' and (recursively) plans
1435  * for its subpaths.
1436  *
1437  * Returns a Plan node.
1438  */
1439 static Plan *
1441  int flags)
1442 {
1443  MergeAppend *node = makeNode(MergeAppend);
1444  Plan *plan = &node->plan;
1445  List *tlist = build_path_tlist(root, &best_path->path);
1446  int orig_tlist_length = list_length(tlist);
1447  bool tlist_was_changed;
1448  List *pathkeys = best_path->path.pathkeys;
1449  List *subplans = NIL;
1450  ListCell *subpaths;
1451  RelOptInfo *rel = best_path->path.parent;
1452  PartitionPruneInfo *partpruneinfo = NULL;
1453 
1454  /*
1455  * We don't have the actual creation of the MergeAppend node split out
1456  * into a separate make_xxx function. This is because we want to run
1457  * prepare_sort_from_pathkeys on it before we do so on the individual
1458  * child plans, to make cross-checking the sort info easier.
1459  */
1460  copy_generic_path_info(plan, (Path *) best_path);
1461  plan->targetlist = tlist;
1462  plan->qual = NIL;
1463  plan->lefttree = NULL;
1464  plan->righttree = NULL;
1465  node->apprelids = rel->relids;
1466 
1467  /*
1468  * Compute sort column info, and adjust MergeAppend's tlist as needed.
1469  * Because we pass adjust_tlist_in_place = true, we may ignore the
1470  * function result; it must be the same plan node. However, we then need
1471  * to detect whether any tlist entries were added.
1472  */
1473  (void) prepare_sort_from_pathkeys(plan, pathkeys,
1474  best_path->path.parent->relids,
1475  NULL,
1476  true,
1477  &node->numCols,
1478  &node->sortColIdx,
1479  &node->sortOperators,
1480  &node->collations,
1481  &node->nullsFirst);
1482  tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
1483 
1484  /*
1485  * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
1486  * even to subplans that don't need an explicit sort, to make sure they
1487  * are returning the same sort key columns the MergeAppend expects.
1488  */
1489  foreach(subpaths, best_path->subpaths)
1490  {
1491  Path *subpath = (Path *) lfirst(subpaths);
1492  Plan *subplan;
1493  int numsortkeys;
1494  AttrNumber *sortColIdx;
1495  Oid *sortOperators;
1496  Oid *collations;
1497  bool *nullsFirst;
1498 
1499  /* Build the child plan */
1500  /* Must insist that all children return the same tlist */
1502 
1503  /* Compute sort column info, and adjust subplan's tlist as needed */
1504  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1505  subpath->parent->relids,
1506  node->sortColIdx,
1507  false,
1508  &numsortkeys,
1509  &sortColIdx,
1510  &sortOperators,
1511  &collations,
1512  &nullsFirst);
1513 
1514  /*
1515  * Check that we got the same sort key information. We just Assert
1516  * that the sortops match, since those depend only on the pathkeys;
1517  * but it seems like a good idea to check the sort column numbers
1518  * explicitly, to ensure the tlists really do match up.
1519  */
1520  Assert(numsortkeys == node->numCols);
1521  if (memcmp(sortColIdx, node->sortColIdx,
1522  numsortkeys * sizeof(AttrNumber)) != 0)
1523  elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
1524  Assert(memcmp(sortOperators, node->sortOperators,
1525  numsortkeys * sizeof(Oid)) == 0);
1526  Assert(memcmp(collations, node->collations,
1527  numsortkeys * sizeof(Oid)) == 0);
1528  Assert(memcmp(nullsFirst, node->nullsFirst,
1529  numsortkeys * sizeof(bool)) == 0);
1530 
1531  /* Now, insert a Sort node if subplan isn't sufficiently ordered */
1532  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1533  {
1534  Sort *sort = make_sort(subplan, numsortkeys,
1535  sortColIdx, sortOperators,
1536  collations, nullsFirst);
1537 
1539  subplan = (Plan *) sort;
1540  }
1541 
1542  subplans = lappend(subplans, subplan);
1543  }
1544 
1545  /*
1546  * If any quals exist, they may be useful to perform further partition
1547  * pruning during execution. Gather information needed by the executor to
1548  * do partition pruning.
1549  */
1551  {
1552  List *prunequal;
1553 
1554  prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
1555 
1556  /* We don't currently generate any parameterized MergeAppend paths */
1557  Assert(best_path->path.param_info == NULL);
1558 
1559  if (prunequal != NIL)
1560  partpruneinfo = make_partition_pruneinfo(root, rel,
1561  best_path->subpaths,
1562  prunequal);
1563  }
1564 
1565  node->mergeplans = subplans;
1566  node->part_prune_info = partpruneinfo;
1567 
1568  /*
1569  * If prepare_sort_from_pathkeys added sort columns, but we were told to
1570  * produce either the exact tlist or a narrow tlist, we should get rid of
1571  * the sort columns again. We must inject a projection node to do so.
1572  */
1573  if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
1574  {
1575  tlist = list_copy_head(plan->targetlist, orig_tlist_length);
1576  return inject_projection_plan(plan, tlist, plan->parallel_safe);
1577  }
1578  else
1579  return plan;
1580 }
1581 
1582 /*
1583  * create_group_result_plan
1584  * Create a Result plan for 'best_path'.
1585  * This is only used for degenerate grouping cases.
1586  *
1587  * Returns a Plan node.
1588  */
1589 static Result *
1591 {
1592  Result *plan;
1593  List *tlist;
1594  List *quals;
1595 
1596  tlist = build_path_tlist(root, &best_path->path);
1597 
1598  /* best_path->quals is just bare clauses */
1599  quals = order_qual_clauses(root, best_path->quals);
1600 
1601  plan = make_result(tlist, (Node *) quals, NULL);
1602 
1603  copy_generic_path_info(&plan->plan, (Path *) best_path);
1604 
1605  return plan;
1606 }
1607 
1608 /*
1609  * create_project_set_plan
1610  * Create a ProjectSet plan for 'best_path'.
1611  *
1612  * Returns a Plan node.
1613  */
1614 static ProjectSet *
1616 {
1617  ProjectSet *plan;
1618  Plan *subplan;
1619  List *tlist;
1620 
1621  /* Since we intend to project, we don't need to constrain child tlist */
1622  subplan = create_plan_recurse(root, best_path->subpath, 0);
1623 
1624  tlist = build_path_tlist(root, &best_path->path);
1625 
1626  plan = make_project_set(tlist, subplan);
1627 
1628  copy_generic_path_info(&plan->plan, (Path *) best_path);
1629 
1630  return plan;
1631 }
1632 
1633 /*
1634  * create_material_plan
1635  * Create a Material plan for 'best_path' and (recursively) plans
1636  * for its subpaths.
1637  *
1638  * Returns a Plan node.
1639  */
1640 static Material *
1642 {
1643  Material *plan;
1644  Plan *subplan;
1645 
1646  /*
1647  * We don't want any excess columns in the materialized tuples, so request
1648  * a smaller tlist. Otherwise, since Material doesn't project, tlist
1649  * requirements pass through.
1650  */
1651  subplan = create_plan_recurse(root, best_path->subpath,
1652  flags | CP_SMALL_TLIST);
1653 
1654  plan = make_material(subplan);
1655 
1656  copy_generic_path_info(&plan->plan, (Path *) best_path);
1657 
1658  return plan;
1659 }
1660 
1661 /*
1662  * create_memoize_plan
1663  * Create a Memoize plan for 'best_path' and (recursively) plans for its
1664  * subpaths.
1665  *
1666  * Returns a Plan node.
1667  */
1668 static Memoize *
1670 {
1671  Memoize *plan;
1672  Bitmapset *keyparamids;
1673  Plan *subplan;
1674  Oid *operators;
1675  Oid *collations;
1676  List *param_exprs = NIL;
1677  ListCell *lc;
1678  ListCell *lc2;
1679  int nkeys;
1680  int i;
1681 
1682  subplan = create_plan_recurse(root, best_path->subpath,
1683  flags | CP_SMALL_TLIST);
1684 
1685  param_exprs = (List *) replace_nestloop_params(root, (Node *)
1686  best_path->param_exprs);
1687 
1688  nkeys = list_length(param_exprs);
1689  Assert(nkeys > 0);
1690  operators = palloc(nkeys * sizeof(Oid));
1691  collations = palloc(nkeys * sizeof(Oid));
1692 
1693  i = 0;
1694  forboth(lc, param_exprs, lc2, best_path->hash_operators)
1695  {
1696  Expr *param_expr = (Expr *) lfirst(lc);
1697  Oid opno = lfirst_oid(lc2);
1698 
1699  operators[i] = opno;
1700  collations[i] = exprCollation((Node *) param_expr);
1701  i++;
1702  }
1703 
1704  keyparamids = pull_paramids((Expr *) param_exprs);
1705 
1706  plan = make_memoize(subplan, operators, collations, param_exprs,
1707  best_path->singlerow, best_path->binary_mode,
1708  best_path->est_entries, keyparamids);
1709 
1710  copy_generic_path_info(&plan->plan, (Path *) best_path);
1711 
1712  return plan;
1713 }
1714 
1715 /*
1716  * create_unique_plan
1717  * Create a Unique plan for 'best_path' and (recursively) plans
1718  * for its subpaths.
1719  *
1720  * Returns a Plan node.
1721  */
1722 static Plan *
1724 {
1725  Plan *plan;
1726  Plan *subplan;
1727  List *in_operators;
1728  List *uniq_exprs;
1729  List *newtlist;
1730  int nextresno;
1731  bool newitems;
1732  int numGroupCols;
1733  AttrNumber *groupColIdx;
1734  Oid *groupCollations;
1735  int groupColPos;
1736  ListCell *l;
1737 
1738  /* Unique doesn't project, so tlist requirements pass through */
1739  subplan = create_plan_recurse(root, best_path->subpath, flags);
1740 
1741  /* Done if we don't need to do any actual unique-ifying */
1742  if (best_path->umethod == UNIQUE_PATH_NOOP)
1743  return subplan;
1744 
1745  /*
1746  * As constructed, the subplan has a "flat" tlist containing just the Vars
1747  * needed here and at upper levels. The values we are supposed to
1748  * unique-ify may be expressions in these variables. We have to add any
1749  * such expressions to the subplan's tlist.
1750  *
1751  * The subplan may have a "physical" tlist if it is a simple scan plan. If
1752  * we're going to sort, this should be reduced to the regular tlist, so
1753  * that we don't sort more data than we need to. For hashing, the tlist
1754  * should be left as-is if we don't need to add any expressions; but if we
1755  * do have to add expressions, then a projection step will be needed at
1756  * runtime anyway, so we may as well remove unneeded items. Therefore
1757  * newtlist starts from build_path_tlist() not just a copy of the
1758  * subplan's tlist; and we don't install it into the subplan unless we are
1759  * sorting or stuff has to be added.
1760  */
1761  in_operators = best_path->in_operators;
1762  uniq_exprs = best_path->uniq_exprs;
1763 
1764  /* initialize modified subplan tlist as just the "required" vars */
1765  newtlist = build_path_tlist(root, &best_path->path);
1766  nextresno = list_length(newtlist) + 1;
1767  newitems = false;
1768 
1769  foreach(l, uniq_exprs)
1770  {
1771  Expr *uniqexpr = lfirst(l);
1772  TargetEntry *tle;
1773 
1774  tle = tlist_member(uniqexpr, newtlist);
1775  if (!tle)
1776  {
1777  tle = makeTargetEntry((Expr *) uniqexpr,
1778  nextresno,
1779  NULL,
1780  false);
1781  newtlist = lappend(newtlist, tle);
1782  nextresno++;
1783  newitems = true;
1784  }
1785  }
1786 
1787  /* Use change_plan_targetlist in case we need to insert a Result node */
1788  if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
1789  subplan = change_plan_targetlist(subplan, newtlist,
1790  best_path->path.parallel_safe);
1791 
1792  /*
1793  * Build control information showing which subplan output columns are to
1794  * be examined by the grouping step. Unfortunately we can't merge this
1795  * with the previous loop, since we didn't then know which version of the
1796  * subplan tlist we'd end up using.
1797  */
1798  newtlist = subplan->targetlist;
1799  numGroupCols = list_length(uniq_exprs);
1800  groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
1801  groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
1802 
1803  groupColPos = 0;
1804  foreach(l, uniq_exprs)
1805  {
1806  Expr *uniqexpr = lfirst(l);
1807  TargetEntry *tle;
1808 
1809  tle = tlist_member(uniqexpr, newtlist);
1810  if (!tle) /* shouldn't happen */
1811  elog(ERROR, "failed to find unique expression in subplan tlist");
1812  groupColIdx[groupColPos] = tle->resno;
1813  groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
1814  groupColPos++;
1815  }
1816 
1817  if (best_path->umethod == UNIQUE_PATH_HASH)
1818  {
1819  Oid *groupOperators;
1820 
1821  /*
1822  * Get the hashable equality operators for the Agg node to use.
1823  * Normally these are the same as the IN clause operators, but if
1824  * those are cross-type operators then the equality operators are the
1825  * ones for the IN clause operators' RHS datatype.
1826  */
1827  groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
1828  groupColPos = 0;
1829  foreach(l, in_operators)
1830  {
1831  Oid in_oper = lfirst_oid(l);
1832  Oid eq_oper;
1833 
1834  if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
1835  elog(ERROR, "could not find compatible hash operator for operator %u",
1836  in_oper);
1837  groupOperators[groupColPos++] = eq_oper;
1838  }
1839 
1840  /*
1841  * Since the Agg node is going to project anyway, we can give it the
1842  * minimum output tlist, without any stuff we might have added to the
1843  * subplan tlist.
1844  */
1845  plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
1846  NIL,
1847  AGG_HASHED,
1849  numGroupCols,
1850  groupColIdx,
1851  groupOperators,
1852  groupCollations,
1853  NIL,
1854  NIL,
1855  best_path->path.rows,
1856  0,
1857  subplan);
1858  }
1859  else
1860  {
1861  List *sortList = NIL;
1862  Sort *sort;
1863 
1864  /* Create an ORDER BY list to sort the input compatibly */
1865  groupColPos = 0;
1866  foreach(l, in_operators)
1867  {
1868  Oid in_oper = lfirst_oid(l);
1869  Oid sortop;
1870  Oid eqop;
1871  TargetEntry *tle;
1872  SortGroupClause *sortcl;
1873 
1874  sortop = get_ordering_op_for_equality_op(in_oper, false);
1875  if (!OidIsValid(sortop)) /* shouldn't happen */
1876  elog(ERROR, "could not find ordering operator for equality operator %u",
1877  in_oper);
1878 
1879  /*
1880  * The Unique node will need equality operators. Normally these
1881  * are the same as the IN clause operators, but if those are
1882  * cross-type operators then the equality operators are the ones
1883  * for the IN clause operators' RHS datatype.
1884  */
1885  eqop = get_equality_op_for_ordering_op(sortop, NULL);
1886  if (!OidIsValid(eqop)) /* shouldn't happen */
1887  elog(ERROR, "could not find equality operator for ordering operator %u",
1888  sortop);
1889 
1890  tle = get_tle_by_resno(subplan->targetlist,
1891  groupColIdx[groupColPos]);
1892  Assert(tle != NULL);
1893 
1894  sortcl = makeNode(SortGroupClause);
1895  sortcl->tleSortGroupRef = assignSortGroupRef(tle,
1896  subplan->targetlist);
1897  sortcl->eqop = eqop;
1898  sortcl->sortop = sortop;
1899  sortcl->nulls_first = false;
1900  sortcl->hashable = false; /* no need to make this accurate */
1901  sortList = lappend(sortList, sortcl);
1902  groupColPos++;
1903  }
1904  sort = make_sort_from_sortclauses(sortList, subplan);
1906  plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
1907  }
1908 
1909  /* Copy cost data from Path to Plan */
1910  copy_generic_path_info(plan, &best_path->path);
1911 
1912  return plan;
1913 }
1914 
1915 /*
1916  * create_gather_plan
1917  *
1918  * Create a Gather plan for 'best_path' and (recursively) plans
1919  * for its subpaths.
1920  */
1921 static Gather *
1923 {
1924  Gather *gather_plan;
1925  Plan *subplan;
1926  List *tlist;
1927 
1928  /*
1929  * Push projection down to the child node. That way, the projection work
1930  * is parallelized, and there can be no system columns in the result (they
1931  * can't travel through a tuple queue because it uses MinimalTuple
1932  * representation).
1933  */
1934  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1935 
1936  tlist = build_path_tlist(root, &best_path->path);
1937 
1938  gather_plan = make_gather(tlist,
1939  NIL,
1940  best_path->num_workers,
1942  best_path->single_copy,
1943  subplan);
1944 
1945  copy_generic_path_info(&gather_plan->plan, &best_path->path);
1946 
1947  /* use parallel mode for parallel plans. */
1948  root->glob->parallelModeNeeded = true;
1949 
1950  return gather_plan;
1951 }
1952 
1953 /*
1954  * create_gather_merge_plan
1955  *
1956  * Create a Gather Merge plan for 'best_path' and (recursively)
1957  * plans for its subpaths.
1958  */
1959 static GatherMerge *
1961 {
1962  GatherMerge *gm_plan;
1963  Plan *subplan;
1964  List *pathkeys = best_path->path.pathkeys;
1965  List *tlist = build_path_tlist(root, &best_path->path);
1966 
1967  /* As with Gather, project away columns in the workers. */
1968  subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
1969 
1970  /* Create a shell for a GatherMerge plan. */
1971  gm_plan = makeNode(GatherMerge);
1972  gm_plan->plan.targetlist = tlist;
1973  gm_plan->num_workers = best_path->num_workers;
1974  copy_generic_path_info(&gm_plan->plan, &best_path->path);
1975 
1976  /* Assign the rescan Param. */
1978 
1979  /* Gather Merge is pointless with no pathkeys; use Gather instead. */
1980  Assert(pathkeys != NIL);
1981 
1982  /* Compute sort column info, and adjust subplan's tlist as needed */
1983  subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
1984  best_path->subpath->parent->relids,
1985  gm_plan->sortColIdx,
1986  false,
1987  &gm_plan->numCols,
1988  &gm_plan->sortColIdx,
1989  &gm_plan->sortOperators,
1990  &gm_plan->collations,
1991  &gm_plan->nullsFirst);
1992 
1993  /*
1994  * All gather merge paths should have already guaranteed the necessary
1995  * sort order. See create_gather_merge_path.
1996  */
1997  Assert(pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys));
1998 
1999  /* Now insert the subplan under GatherMerge. */
2000  gm_plan->plan.lefttree = subplan;
2001 
2002  /* use parallel mode for parallel plans. */
2003  root->glob->parallelModeNeeded = true;
2004 
2005  return gm_plan;
2006 }
2007 
2008 /*
2009  * create_projection_plan
2010  *
2011  * Create a plan tree to do a projection step and (recursively) plans
2012  * for its subpaths. We may need a Result node for the projection,
2013  * but sometimes we can just let the subplan do the work.
2014  */
2015 static Plan *
2017 {
2018  Plan *plan;
2019  Plan *subplan;
2020  List *tlist;
2021  bool needs_result_node = false;
2022 
2023  /*
2024  * Convert our subpath to a Plan and determine whether we need a Result
2025  * node.
2026  *
2027  * In most cases where we don't need to project, create_projection_path
2028  * will have set dummypp, but not always. First, some createplan.c
2029  * routines change the tlists of their nodes. (An example is that
2030  * create_merge_append_plan might add resjunk sort columns to a
2031  * MergeAppend.) Second, create_projection_path has no way of knowing
2032  * what path node will be placed on top of the projection path and
2033  * therefore can't predict whether it will require an exact tlist. For
2034  * both of these reasons, we have to recheck here.
2035  */
2036  if (use_physical_tlist(root, &best_path->path, flags))
2037  {
2038  /*
2039  * Our caller doesn't really care what tlist we return, so we don't
2040  * actually need to project. However, we may still need to ensure
2041  * proper sortgroupref labels, if the caller cares about those.
2042  */
2043  subplan = create_plan_recurse(root, best_path->subpath, 0);
2044  tlist = subplan->targetlist;
2045  if (flags & CP_LABEL_TLIST)
2047  best_path->path.pathtarget);
2048  }
2049  else if (is_projection_capable_path(best_path->subpath))
2050  {
2051  /*
2052  * Our caller requires that we return the exact tlist, but no separate
2053  * result node is needed because the subpath is projection-capable.
2054  * Tell create_plan_recurse that we're going to ignore the tlist it
2055  * produces.
2056  */
2057  subplan = create_plan_recurse(root, best_path->subpath,
2058  CP_IGNORE_TLIST);
2060  tlist = build_path_tlist(root, &best_path->path);
2061  }
2062  else
2063  {
2064  /*
2065  * It looks like we need a result node, unless by good fortune the
2066  * requested tlist is exactly the one the child wants to produce.
2067  */
2068  subplan = create_plan_recurse(root, best_path->subpath, 0);
2069  tlist = build_path_tlist(root, &best_path->path);
2070  needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
2071  }
2072 
2073  /*
2074  * If we make a different decision about whether to include a Result node
2075  * than create_projection_path did, we'll have made slightly wrong cost
2076  * estimates; but label the plan with the cost estimates we actually used,
2077  * not "corrected" ones. (XXX this could be cleaned up if we moved more
2078  * of the sortcolumn setup logic into Path creation, but that would add
2079  * expense to creating Paths we might end up not using.)
2080  */
2081  if (!needs_result_node)
2082  {
2083  /* Don't need a separate Result, just assign tlist to subplan */
2084  plan = subplan;
2085  plan->targetlist = tlist;
2086 
2087  /* Label plan with the estimated costs we actually used */
2088  plan->startup_cost = best_path->path.startup_cost;
2089  plan->total_cost = best_path->path.total_cost;
2090  plan->plan_rows = best_path->path.rows;
2091  plan->plan_width = best_path->path.pathtarget->width;
2092  plan->parallel_safe = best_path->path.parallel_safe;
2093  /* ... but don't change subplan's parallel_aware flag */
2094  }
2095  else
2096  {
2097  /* We need a Result node */
2098  plan = (Plan *) make_result(tlist, NULL, subplan);
2099 
2100  copy_generic_path_info(plan, (Path *) best_path);
2101  }
2102 
2103  return plan;
2104 }
2105 
2106 /*
2107  * inject_projection_plan
2108  * Insert a Result node to do a projection step.
2109  *
2110  * This is used in a few places where we decide on-the-fly that we need a
2111  * projection step as part of the tree generated for some Path node.
2112  * We should try to get rid of this in favor of doing it more honestly.
2113  *
2114  * One reason it's ugly is we have to be told the right parallel_safe marking
2115  * to apply (since the tlist might be unsafe even if the child plan is safe).
2116  */
2117 static Plan *
2118 inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
2119 {
2120  Plan *plan;
2121 
2122  plan = (Plan *) make_result(tlist, NULL, subplan);
2123 
2124  /*
2125  * In principle, we should charge tlist eval cost plus cpu_per_tuple per
2126  * row for the Result node. But the former has probably been factored in
2127  * already and the latter was not accounted for during Path construction,
2128  * so being formally correct might just make the EXPLAIN output look less
2129  * consistent not more so. Hence, just copy the subplan's cost.
2130  */
2131  copy_plan_costsize(plan, subplan);
2132  plan->parallel_safe = parallel_safe;
2133 
2134  return plan;
2135 }
2136 
2137 /*
2138  * change_plan_targetlist
2139  * Externally available wrapper for inject_projection_plan.
2140  *
2141  * This is meant for use by FDW plan-generation functions, which might
2142  * want to adjust the tlist computed by some subplan tree. In general,
2143  * a Result node is needed to compute the new tlist, but we can optimize
2144  * some cases.
2145  *
2146  * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
2147  * flag of the FDW's own Path node.
2148  */
2149 Plan *
2150 change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
2151 {
2152  /*
2153  * If the top plan node can't do projections and its existing target list
2154  * isn't already what we need, we need to add a Result node to help it
2155  * along.
2156  */
2157  if (!is_projection_capable_plan(subplan) &&
2158  !tlist_same_exprs(tlist, subplan->targetlist))
2159  subplan = inject_projection_plan(subplan, tlist,
2160  subplan->parallel_safe &&
2161  tlist_parallel_safe);
2162  else
2163  {
2164  /* Else we can just replace the plan node's tlist */
2165  subplan->targetlist = tlist;
2166  subplan->parallel_safe &= tlist_parallel_safe;
2167  }
2168  return subplan;
2169 }
2170 
2171 /*
2172  * create_sort_plan
2173  *
2174  * Create a Sort plan for 'best_path' and (recursively) plans
2175  * for its subpaths.
2176  */
2177 static Sort *
2178 create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
2179 {
2180  Sort *plan;
2181  Plan *subplan;
2182 
2183  /*
2184  * We don't want any excess columns in the sorted tuples, so request a
2185  * smaller tlist. Otherwise, since Sort doesn't project, tlist
2186  * requirements pass through.
2187  */
2188  subplan = create_plan_recurse(root, best_path->subpath,
2189  flags | CP_SMALL_TLIST);
2190 
2191  /*
2192  * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
2193  * which will ignore any child EC members that don't belong to the given
2194  * relids. Thus, if this sort path is based on a child relation, we must
2195  * pass its relids.
2196  */
2197  plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
2198  IS_OTHER_REL(best_path->subpath->parent) ?
2199  best_path->path.parent->relids : NULL);
2200 
2201  copy_generic_path_info(&plan->plan, (Path *) best_path);
2202 
2203  return plan;
2204 }
2205 
2206 /*
2207  * create_incrementalsort_plan
2208  *
2209  * Do the same as create_sort_plan, but create IncrementalSort plan.
2210  */
2211 static IncrementalSort *
2213  int flags)
2214 {
2216  Plan *subplan;
2217 
2218  /* See comments in create_sort_plan() above */
2219  subplan = create_plan_recurse(root, best_path->spath.subpath,
2220  flags | CP_SMALL_TLIST);
2222  best_path->spath.path.pathkeys,
2223  IS_OTHER_REL(best_path->spath.subpath->parent) ?
2224  best_path->spath.path.parent->relids : NULL,
2225  best_path->nPresortedCols);
2226 
2227  copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
2228 
2229  return plan;
2230 }
2231 
2232 /*
2233  * create_group_plan
2234  *
2235  * Create a Group plan for 'best_path' and (recursively) plans
2236  * for its subpaths.
2237  */
2238 static Group *
2240 {
2241  Group *plan;
2242  Plan *subplan;
2243  List *tlist;
2244  List *quals;
2245 
2246  /*
2247  * Group can project, so no need to be terribly picky about child tlist,
2248  * but we do need grouping columns to be available
2249  */
2250  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2251 
2252  tlist = build_path_tlist(root, &best_path->path);
2253 
2254  quals = order_qual_clauses(root, best_path->qual);
2255 
2256  plan = make_group(tlist,
2257  quals,
2258  list_length(best_path->groupClause),
2260  subplan->targetlist),
2261  extract_grouping_ops(best_path->groupClause),
2263  subplan->targetlist),
2264  subplan);
2265 
2266  copy_generic_path_info(&plan->plan, (Path *) best_path);
2267 
2268  return plan;
2269 }
2270 
2271 /*
2272  * create_upper_unique_plan
2273  *
2274  * Create a Unique plan for 'best_path' and (recursively) plans
2275  * for its subpaths.
2276  */
2277 static Unique *
2279 {
2280  Unique *plan;
2281  Plan *subplan;
2282 
2283  /*
2284  * Unique doesn't project, so tlist requirements pass through; moreover we
2285  * need grouping columns to be labeled.
2286  */
2287  subplan = create_plan_recurse(root, best_path->subpath,
2288  flags | CP_LABEL_TLIST);
2289 
2290  plan = make_unique_from_pathkeys(subplan,
2291  best_path->path.pathkeys,
2292  best_path->numkeys);
2293 
2294  copy_generic_path_info(&plan->plan, (Path *) best_path);
2295 
2296  return plan;
2297 }
2298 
2299 /*
2300  * create_agg_plan
2301  *
2302  * Create an Agg plan for 'best_path' and (recursively) plans
2303  * for its subpaths.
2304  */
2305 static Agg *
2307 {
2308  Agg *plan;
2309  Plan *subplan;
2310  List *tlist;
2311  List *quals;
2312 
2313  /*
2314  * Agg can project, so no need to be terribly picky about child tlist, but
2315  * we do need grouping columns to be available
2316  */
2317  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2318 
2319  tlist = build_path_tlist(root, &best_path->path);
2320 
2321  quals = order_qual_clauses(root, best_path->qual);
2322 
2323  plan = make_agg(tlist, quals,
2324  best_path->aggstrategy,
2325  best_path->aggsplit,
2326  list_length(best_path->groupClause),
2328  subplan->targetlist),
2329  extract_grouping_ops(best_path->groupClause),
2331  subplan->targetlist),
2332  NIL,
2333  NIL,
2334  best_path->numGroups,
2335  best_path->transitionSpace,
2336  subplan);
2337 
2338  copy_generic_path_info(&plan->plan, (Path *) best_path);
2339 
2340  return plan;
2341 }
2342 
2343 /*
2344  * Given a groupclause for a collection of grouping sets, produce the
2345  * corresponding groupColIdx.
2346  *
2347  * root->grouping_map maps the tleSortGroupRef to the actual column position in
2348  * the input tuple. So we get the ref from the entries in the groupclause and
2349  * look them up there.
2350  */
2351 static AttrNumber *
2353 {
2354  AttrNumber *grouping_map = root->grouping_map;
2355  AttrNumber *new_grpColIdx;
2356  ListCell *lc;
2357  int i;
2358 
2359  Assert(grouping_map);
2360 
2361  new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
2362 
2363  i = 0;
2364  foreach(lc, groupClause)
2365  {
2366  SortGroupClause *clause = lfirst(lc);
2367 
2368  new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
2369  }
2370 
2371  return new_grpColIdx;
2372 }
2373 
2374 /*
2375  * create_groupingsets_plan
2376  * Create a plan for 'best_path' and (recursively) plans
2377  * for its subpaths.
2378  *
2379  * What we emit is an Agg plan with some vestigial Agg and Sort nodes
2380  * hanging off the side. The top Agg implements the last grouping set
2381  * specified in the GroupingSetsPath, and any additional grouping sets
2382  * each give rise to a subsidiary Agg and Sort node in the top Agg's
2383  * "chain" list. These nodes don't participate in the plan directly,
2384  * but they are a convenient way to represent the required data for
2385  * the extra steps.
2386  *
2387  * Returns a Plan node.
2388  */
2389 static Plan *
2391 {
2392  Agg *plan;
2393  Plan *subplan;
2394  List *rollups = best_path->rollups;
2395  AttrNumber *grouping_map;
2396  int maxref;
2397  List *chain;
2398  ListCell *lc;
2399 
2400  /* Shouldn't get here without grouping sets */
2401  Assert(root->parse->groupingSets);
2402  Assert(rollups != NIL);
2403 
2404  /*
2405  * Agg can project, so no need to be terribly picky about child tlist, but
2406  * we do need grouping columns to be available
2407  */
2408  subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
2409 
2410  /*
2411  * Compute the mapping from tleSortGroupRef to column index in the child's
2412  * tlist. First, identify max SortGroupRef in groupClause, for array
2413  * sizing.
2414  */
2415  maxref = 0;
2416  foreach(lc, root->processed_groupClause)
2417  {
2418  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2419 
2420  if (gc->tleSortGroupRef > maxref)
2421  maxref = gc->tleSortGroupRef;
2422  }
2423 
2424  grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
2425 
2426  /* Now look up the column numbers in the child's tlist */
2427  foreach(lc, root->processed_groupClause)
2428  {
2429  SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
2430  TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
2431 
2432  grouping_map[gc->tleSortGroupRef] = tle->resno;
2433  }
2434 
2435  /*
2436  * During setrefs.c, we'll need the grouping_map to fix up the cols lists
2437  * in GroupingFunc nodes. Save it for setrefs.c to use.
2438  */
2439  Assert(root->grouping_map == NULL);
2440  root->grouping_map = grouping_map;
2441 
2442  /*
2443  * Generate the side nodes that describe the other sort and group
2444  * operations besides the top one. Note that we don't worry about putting
2445  * accurate cost estimates in the side nodes; only the topmost Agg node's
2446  * costs will be shown by EXPLAIN.
2447  */
2448  chain = NIL;
2449  if (list_length(rollups) > 1)
2450  {
2451  bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
2452 
2453  for_each_from(lc, rollups, 1)
2454  {
2455  RollupData *rollup = lfirst(lc);
2456  AttrNumber *new_grpColIdx;
2457  Plan *sort_plan = NULL;
2458  Plan *agg_plan;
2459  AggStrategy strat;
2460 
2461  new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2462 
2463  if (!rollup->is_hashed && !is_first_sort)
2464  {
2465  sort_plan = (Plan *)
2467  new_grpColIdx,
2468  subplan);
2469  }
2470 
2471  if (!rollup->is_hashed)
2472  is_first_sort = false;
2473 
2474  if (rollup->is_hashed)
2475  strat = AGG_HASHED;
2476  else if (linitial(rollup->gsets) == NIL)
2477  strat = AGG_PLAIN;
2478  else
2479  strat = AGG_SORTED;
2480 
2481  agg_plan = (Plan *) make_agg(NIL,
2482  NIL,
2483  strat,
2485  list_length((List *) linitial(rollup->gsets)),
2486  new_grpColIdx,
2489  rollup->gsets,
2490  NIL,
2491  rollup->numGroups,
2492  best_path->transitionSpace,
2493  sort_plan);
2494 
2495  /*
2496  * Remove stuff we don't need to avoid bloating debug output.
2497  */
2498  if (sort_plan)
2499  {
2500  sort_plan->targetlist = NIL;
2501  sort_plan->lefttree = NULL;
2502  }
2503 
2504  chain = lappend(chain, agg_plan);
2505  }
2506  }
2507 
2508  /*
2509  * Now make the real Agg node
2510  */
2511  {
2512  RollupData *rollup = linitial(rollups);
2513  AttrNumber *top_grpColIdx;
2514  int numGroupCols;
2515 
2516  top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
2517 
2518  numGroupCols = list_length((List *) linitial(rollup->gsets));
2519 
2520  plan = make_agg(build_path_tlist(root, &best_path->path),
2521  best_path->qual,
2522  best_path->aggstrategy,
2524  numGroupCols,
2525  top_grpColIdx,
2528  rollup->gsets,
2529  chain,
2530  rollup->numGroups,
2531  best_path->transitionSpace,
2532  subplan);
2533 
2534  /* Copy cost data from Path to Plan */
2535  copy_generic_path_info(&plan->plan, &best_path->path);
2536  }
2537 
2538  return (Plan *) plan;
2539 }
2540 
2541 /*
2542  * create_minmaxagg_plan
2543  *
2544  * Create a Result plan for 'best_path' and (recursively) plans
2545  * for its subpaths.
2546  */
2547 static Result *
2549 {
2550  Result *plan;
2551  List *tlist;
2552  ListCell *lc;
2553 
2554  /* Prepare an InitPlan for each aggregate's subquery. */
2555  foreach(lc, best_path->mmaggregates)
2556  {
2557  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
2558  PlannerInfo *subroot = mminfo->subroot;
2559  Query *subparse = subroot->parse;
2560  Plan *plan;
2561 
2562  /*
2563  * Generate the plan for the subquery. We already have a Path, but we
2564  * have to convert it to a Plan and attach a LIMIT node above it.
2565  * Since we are entering a different planner context (subroot),
2566  * recurse to create_plan not create_plan_recurse.
2567  */
2568  plan = create_plan(subroot, mminfo->path);
2569 
2570  plan = (Plan *) make_limit(plan,
2571  subparse->limitOffset,
2572  subparse->limitCount,
2573  subparse->limitOption,
2574  0, NULL, NULL, NULL);
2575 
2576  /* Must apply correct cost/width data to Limit node */
2577  plan->disabled_nodes = mminfo->path->disabled_nodes;
2578  plan->startup_cost = mminfo->path->startup_cost;
2579  plan->total_cost = mminfo->pathcost;
2580  plan->plan_rows = 1;
2581  plan->plan_width = mminfo->path->pathtarget->width;
2582  plan->parallel_aware = false;
2583  plan->parallel_safe = mminfo->path->parallel_safe;
2584 
2585  /* Convert the plan into an InitPlan in the outer query. */
2586  SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
2587  }
2588 
2589  /* Generate the output plan --- basically just a Result */
2590  tlist = build_path_tlist(root, &best_path->path);
2591 
2592  plan = make_result(tlist, (Node *) best_path->quals, NULL);
2593 
2594  copy_generic_path_info(&plan->plan, (Path *) best_path);
2595 
2596  /*
2597  * During setrefs.c, we'll need to replace references to the Agg nodes
2598  * with InitPlan output params. (We can't just do that locally in the
2599  * MinMaxAgg node, because path nodes above here may have Agg references
2600  * as well.) Save the mmaggregates list to tell setrefs.c to do that.
2601  */
2602  Assert(root->minmax_aggs == NIL);
2603  root->minmax_aggs = best_path->mmaggregates;
2604 
2605  return plan;
2606 }
2607 
2608 /*
2609  * create_windowagg_plan
2610  *
2611  * Create a WindowAgg plan for 'best_path' and (recursively) plans
2612  * for its subpaths.
2613  */
2614 static WindowAgg *
2616 {
2617  WindowAgg *plan;
2618  WindowClause *wc = best_path->winclause;
2619  int numPart = list_length(wc->partitionClause);
2620  int numOrder = list_length(wc->orderClause);
2621  Plan *subplan;
2622  List *tlist;
2623  int partNumCols;
2624  AttrNumber *partColIdx;
2625  Oid *partOperators;
2626  Oid *partCollations;
2627  int ordNumCols;
2628  AttrNumber *ordColIdx;
2629  Oid *ordOperators;
2630  Oid *ordCollations;
2631  ListCell *lc;
2632 
2633  /*
2634  * Choice of tlist here is motivated by the fact that WindowAgg will be
2635  * storing the input rows of window frames in a tuplestore; it therefore
2636  * behooves us to request a small tlist to avoid wasting space. We do of
2637  * course need grouping columns to be available.
2638  */
2639  subplan = create_plan_recurse(root, best_path->subpath,
2641 
2642  tlist = build_path_tlist(root, &best_path->path);
2643 
2644  /*
2645  * Convert SortGroupClause lists into arrays of attr indexes and equality
2646  * operators, as wanted by executor.
2647  */
2648  partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
2649  partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
2650  partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
2651 
2652  partNumCols = 0;
2653  foreach(lc, wc->partitionClause)
2654  {
2655  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2656  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2657 
2658  Assert(OidIsValid(sgc->eqop));
2659  partColIdx[partNumCols] = tle->resno;
2660  partOperators[partNumCols] = sgc->eqop;
2661  partCollations[partNumCols] = exprCollation((Node *) tle->expr);
2662  partNumCols++;
2663  }
2664 
2665  ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
2666  ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
2667  ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
2668 
2669  ordNumCols = 0;
2670  foreach(lc, wc->orderClause)
2671  {
2672  SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2673  TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
2674 
2675  Assert(OidIsValid(sgc->eqop));
2676  ordColIdx[ordNumCols] = tle->resno;
2677  ordOperators[ordNumCols] = sgc->eqop;
2678  ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
2679  ordNumCols++;
2680  }
2681 
2682  /* And finally we can make the WindowAgg node */
2683  plan = make_windowagg(tlist,
2684  wc->winref,
2685  partNumCols,
2686  partColIdx,
2687  partOperators,
2688  partCollations,
2689  ordNumCols,
2690  ordColIdx,
2691  ordOperators,
2692  ordCollations,
2693  wc->frameOptions,
2694  wc->startOffset,
2695  wc->endOffset,
2696  wc->startInRangeFunc,
2697  wc->endInRangeFunc,
2698  wc->inRangeColl,
2699  wc->inRangeAsc,
2700  wc->inRangeNullsFirst,
2701  best_path->runCondition,
2702  best_path->qual,
2703  best_path->topwindow,
2704  subplan);
2705 
2706  copy_generic_path_info(&plan->plan, (Path *) best_path);
2707 
2708  return plan;
2709 }
2710 
2711 /*
2712  * create_setop_plan
2713  *
2714  * Create a SetOp plan for 'best_path' and (recursively) plans
2715  * for its subpaths.
2716  */
2717 static SetOp *
2719 {
2720  SetOp *plan;
2721  Plan *subplan;
2722  long numGroups;
2723 
2724  /*
2725  * SetOp doesn't project, so tlist requirements pass through; moreover we
2726  * need grouping columns to be labeled.
2727  */
2728  subplan = create_plan_recurse(root, best_path->subpath,
2729  flags | CP_LABEL_TLIST);
2730 
2731  /* Convert numGroups to long int --- but 'ware overflow! */
2732  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2733 
2734  plan = make_setop(best_path->cmd,
2735  best_path->strategy,
2736  subplan,
2737  best_path->distinctList,
2738  best_path->flagColIdx,
2739  best_path->firstFlag,
2740  numGroups);
2741 
2742  copy_generic_path_info(&plan->plan, (Path *) best_path);
2743 
2744  return plan;
2745 }
2746 
2747 /*
2748  * create_recursiveunion_plan
2749  *
2750  * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
2751  * for its subpaths.
2752  */
2753 static RecursiveUnion *
2755 {
2757  Plan *leftplan;
2758  Plan *rightplan;
2759  List *tlist;
2760  long numGroups;
2761 
2762  /* Need both children to produce same tlist, so force it */
2763  leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
2764  rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
2765 
2766  tlist = build_path_tlist(root, &best_path->path);
2767 
2768  /* Convert numGroups to long int --- but 'ware overflow! */
2769  numGroups = clamp_cardinality_to_long(best_path->numGroups);
2770 
2771  plan = make_recursive_union(tlist,
2772  leftplan,
2773  rightplan,
2774  best_path->wtParam,
2775  best_path->distinctList,
2776  numGroups);
2777 
2778  copy_generic_path_info(&plan->plan, (Path *) best_path);
2779 
2780  return plan;
2781 }
2782 
2783 /*
2784  * create_lockrows_plan
2785  *
2786  * Create a LockRows plan for 'best_path' and (recursively) plans
2787  * for its subpaths.
2788  */
2789 static LockRows *
2791  int flags)
2792 {
2793  LockRows *plan;
2794  Plan *subplan;
2795 
2796  /* LockRows doesn't project, so tlist requirements pass through */
2797  subplan = create_plan_recurse(root, best_path->subpath, flags);
2798 
2799  plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
2800 
2801  copy_generic_path_info(&plan->plan, (Path *) best_path);
2802 
2803  return plan;
2804 }
2805 
2806 /*
2807  * create_modifytable_plan
2808  * Create a ModifyTable plan for 'best_path'.
2809  *
2810  * Returns a Plan node.
2811  */
2812 static ModifyTable *
2814 {
2815  ModifyTable *plan;
2816  Path *subpath = best_path->subpath;
2817  Plan *subplan;
2818 
2819  /* Subplan must produce exactly the specified tlist */
2821 
2822  /* Transfer resname/resjunk labeling, too, to keep executor happy */
2823  apply_tlist_labeling(subplan->targetlist, root->processed_tlist);
2824 
2826  subplan,
2827  best_path->operation,
2828  best_path->canSetTag,
2829  best_path->nominalRelation,
2830  best_path->rootRelation,
2831  best_path->partColsUpdated,
2832  best_path->resultRelations,
2833  best_path->updateColnosLists,
2834  best_path->withCheckOptionLists,
2835  best_path->returningLists,
2836  best_path->rowMarks,
2837  best_path->onconflict,
2838  best_path->mergeActionLists,
2839  best_path->mergeJoinConditions,
2840  best_path->epqParam);
2841 
2842  copy_generic_path_info(&plan->plan, &best_path->path);
2843 
2844  return plan;
2845 }
2846 
2847 /*
2848  * create_limit_plan
2849  *
2850  * Create a Limit plan for 'best_path' and (recursively) plans
2851  * for its subpaths.
2852  */
2853 static Limit *
2855 {
2856  Limit *plan;
2857  Plan *subplan;
2858  int numUniqkeys = 0;
2859  AttrNumber *uniqColIdx = NULL;
2860  Oid *uniqOperators = NULL;
2861  Oid *uniqCollations = NULL;
2862 
2863  /* Limit doesn't project, so tlist requirements pass through */
2864  subplan = create_plan_recurse(root, best_path->subpath, flags);
2865 
2866  /* Extract information necessary for comparing rows for WITH TIES. */
2867  if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
2868  {
2869  Query *parse = root->parse;
2870  ListCell *l;
2871 
2872  numUniqkeys = list_length(parse->sortClause);
2873  uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
2874  uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2875  uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
2876 
2877  numUniqkeys = 0;
2878  foreach(l, parse->sortClause)
2879  {
2880  SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
2881  TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
2882 
2883  uniqColIdx[numUniqkeys] = tle->resno;
2884  uniqOperators[numUniqkeys] = sortcl->eqop;
2885  uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
2886  numUniqkeys++;
2887  }
2888  }
2889 
2890  plan = make_limit(subplan,
2891  best_path->limitOffset,
2892  best_path->limitCount,
2893  best_path->limitOption,
2894  numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
2895 
2896  copy_generic_path_info(&plan->plan, (Path *) best_path);
2897 
2898  return plan;
2899 }
2900 
2901 
2902 /*****************************************************************************
2903  *
2904  * BASE-RELATION SCAN METHODS
2905  *
2906  *****************************************************************************/
2907 
2908 
2909 /*
2910  * create_seqscan_plan
2911  * Returns a seqscan plan for the base relation scanned by 'best_path'
2912  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2913  */
2914 static SeqScan *
2916  List *tlist, List *scan_clauses)
2917 {
2918  SeqScan *scan_plan;
2919  Index scan_relid = best_path->parent->relid;
2920 
2921  /* it should be a base rel... */
2922  Assert(scan_relid > 0);
2923  Assert(best_path->parent->rtekind == RTE_RELATION);
2924 
2925  /* Sort clauses into best execution order */
2926  scan_clauses = order_qual_clauses(root, scan_clauses);
2927 
2928  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2929  scan_clauses = extract_actual_clauses(scan_clauses, false);
2930 
2931  /* Replace any outer-relation variables with nestloop params */
2932  if (best_path->param_info)
2933  {
2934  scan_clauses = (List *)
2935  replace_nestloop_params(root, (Node *) scan_clauses);
2936  }
2937 
2938  scan_plan = make_seqscan(tlist,
2939  scan_clauses,
2940  scan_relid);
2941 
2942  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2943 
2944  return scan_plan;
2945 }
2946 
2947 /*
2948  * create_samplescan_plan
2949  * Returns a samplescan plan for the base relation scanned by 'best_path'
2950  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2951  */
2952 static SampleScan *
2954  List *tlist, List *scan_clauses)
2955 {
2956  SampleScan *scan_plan;
2957  Index scan_relid = best_path->parent->relid;
2958  RangeTblEntry *rte;
2959  TableSampleClause *tsc;
2960 
2961  /* it should be a base rel with a tablesample clause... */
2962  Assert(scan_relid > 0);
2963  rte = planner_rt_fetch(scan_relid, root);
2964  Assert(rte->rtekind == RTE_RELATION);
2965  tsc = rte->tablesample;
2966  Assert(tsc != NULL);
2967 
2968  /* Sort clauses into best execution order */
2969  scan_clauses = order_qual_clauses(root, scan_clauses);
2970 
2971  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
2972  scan_clauses = extract_actual_clauses(scan_clauses, false);
2973 
2974  /* Replace any outer-relation variables with nestloop params */
2975  if (best_path->param_info)
2976  {
2977  scan_clauses = (List *)
2978  replace_nestloop_params(root, (Node *) scan_clauses);
2979  tsc = (TableSampleClause *)
2980  replace_nestloop_params(root, (Node *) tsc);
2981  }
2982 
2983  scan_plan = make_samplescan(tlist,
2984  scan_clauses,
2985  scan_relid,
2986  tsc);
2987 
2988  copy_generic_path_info(&scan_plan->scan.plan, best_path);
2989 
2990  return scan_plan;
2991 }
2992 
2993 /*
2994  * create_indexscan_plan
2995  * Returns an indexscan plan for the base relation scanned by 'best_path'
2996  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
2997  *
2998  * We use this for both plain IndexScans and IndexOnlyScans, because the
2999  * qual preprocessing work is the same for both. Note that the caller tells
3000  * us which to build --- we don't look at best_path->path.pathtype, because
3001  * create_bitmap_subplan needs to be able to override the prior decision.
3002  */
3003 static Scan *
3005  IndexPath *best_path,
3006  List *tlist,
3007  List *scan_clauses,
3008  bool indexonly)
3009 {
3010  Scan *scan_plan;
3011  List *indexclauses = best_path->indexclauses;
3012  List *indexorderbys = best_path->indexorderbys;
3013  Index baserelid = best_path->path.parent->relid;
3014  IndexOptInfo *indexinfo = best_path->indexinfo;
3015  Oid indexoid = indexinfo->indexoid;
3016  List *qpqual;
3017  List *stripped_indexquals;
3018  List *fixed_indexquals;
3019  List *fixed_indexorderbys;
3020  List *indexorderbyops = NIL;
3021  ListCell *l;
3022 
3023  /* it should be a base rel... */
3024  Assert(baserelid > 0);
3025  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3026  /* check the scan direction is valid */
3027  Assert(best_path->indexscandir == ForwardScanDirection ||
3028  best_path->indexscandir == BackwardScanDirection);
3029 
3030  /*
3031  * Extract the index qual expressions (stripped of RestrictInfos) from the
3032  * IndexClauses list, and prepare a copy with index Vars substituted for
3033  * table Vars. (This step also does replace_nestloop_params on the
3034  * fixed_indexquals.)
3035  */
3036  fix_indexqual_references(root, best_path,
3037  &stripped_indexquals,
3038  &fixed_indexquals);
3039 
3040  /*
3041  * Likewise fix up index attr references in the ORDER BY expressions.
3042  */
3043  fixed_indexorderbys = fix_indexorderby_references(root, best_path);
3044 
3045  /*
3046  * The qpqual list must contain all restrictions not automatically handled
3047  * by the index, other than pseudoconstant clauses which will be handled
3048  * by a separate gating plan node. All the predicates in the indexquals
3049  * will be checked (either by the index itself, or by nodeIndexscan.c),
3050  * but if there are any "special" operators involved then they must be
3051  * included in qpqual. The upshot is that qpqual must contain
3052  * scan_clauses minus whatever appears in indexquals.
3053  *
3054  * is_redundant_with_indexclauses() detects cases where a scan clause is
3055  * present in the indexclauses list or is generated from the same
3056  * EquivalenceClass as some indexclause, and is therefore redundant with
3057  * it, though not equal. (The latter happens when indxpath.c prefers a
3058  * different derived equality than what generate_join_implied_equalities
3059  * picked for a parameterized scan's ppi_clauses.) Note that it will not
3060  * match to lossy index clauses, which is critical because we have to
3061  * include the original clause in qpqual in that case.
3062  *
3063  * In some situations (particularly with OR'd index conditions) we may
3064  * have scan_clauses that are not equal to, but are logically implied by,
3065  * the index quals; so we also try a predicate_implied_by() check to see
3066  * if we can discard quals that way. (predicate_implied_by assumes its
3067  * first input contains only immutable functions, so we have to check
3068  * that.)
3069  *
3070  * Note: if you change this bit of code you should also look at
3071  * extract_nonindex_conditions() in costsize.c.
3072  */
3073  qpqual = NIL;
3074  foreach(l, scan_clauses)
3075  {
3076  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3077 
3078  if (rinfo->pseudoconstant)
3079  continue; /* we may drop pseudoconstants here */
3080  if (is_redundant_with_indexclauses(rinfo, indexclauses))
3081  continue; /* dup or derived from same EquivalenceClass */
3082  if (!contain_mutable_functions((Node *) rinfo->clause) &&
3083  predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
3084  false))
3085  continue; /* provably implied by indexquals */
3086  qpqual = lappend(qpqual, rinfo);
3087  }
3088 
3089  /* Sort clauses into best execution order */
3090  qpqual = order_qual_clauses(root, qpqual);
3091 
3092  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3093  qpqual = extract_actual_clauses(qpqual, false);
3094 
3095  /*
3096  * We have to replace any outer-relation variables with nestloop params in
3097  * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
3098  * annoying to have to do this separately from the processing in
3099  * fix_indexqual_references --- rethink this when generalizing the inner
3100  * indexscan support. But note we can't really do this earlier because
3101  * it'd break the comparisons to predicates above ... (or would it? Those
3102  * wouldn't have outer refs)
3103  */
3104  if (best_path->path.param_info)
3105  {
3106  stripped_indexquals = (List *)
3107  replace_nestloop_params(root, (Node *) stripped_indexquals);
3108  qpqual = (List *)
3109  replace_nestloop_params(root, (Node *) qpqual);
3110  indexorderbys = (List *)
3111  replace_nestloop_params(root, (Node *) indexorderbys);
3112  }
3113 
3114  /*
3115  * If there are ORDER BY expressions, look up the sort operators for their
3116  * result datatypes.
3117  */
3118  if (indexorderbys)
3119  {
3120  ListCell *pathkeyCell,
3121  *exprCell;
3122 
3123  /*
3124  * PathKey contains OID of the btree opfamily we're sorting by, but
3125  * that's not quite enough because we need the expression's datatype
3126  * to look up the sort operator in the operator family.
3127  */
3128  Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
3129  forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
3130  {
3131  PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
3132  Node *expr = (Node *) lfirst(exprCell);
3133  Oid exprtype = exprType(expr);
3134  Oid sortop;
3135 
3136  /* Get sort operator from opfamily */
3137  sortop = get_opfamily_member(pathkey->pk_opfamily,
3138  exprtype,
3139  exprtype,
3140  pathkey->pk_strategy);
3141  if (!OidIsValid(sortop))
3142  elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
3143  pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
3144  indexorderbyops = lappend_oid(indexorderbyops, sortop);
3145  }
3146  }
3147 
3148  /*
3149  * For an index-only scan, we must mark indextlist entries as resjunk if
3150  * they are columns that the index AM can't return; this cues setrefs.c to
3151  * not generate references to those columns.
3152  */
3153  if (indexonly)
3154  {
3155  int i = 0;
3156 
3157  foreach(l, indexinfo->indextlist)
3158  {
3159  TargetEntry *indextle = (TargetEntry *) lfirst(l);
3160 
3161  indextle->resjunk = !indexinfo->canreturn[i];
3162  i++;
3163  }
3164  }
3165 
3166  /* Finally ready to build the plan node */
3167  if (indexonly)
3168  scan_plan = (Scan *) make_indexonlyscan(tlist,
3169  qpqual,
3170  baserelid,
3171  indexoid,
3172  fixed_indexquals,
3173  stripped_indexquals,
3174  fixed_indexorderbys,
3175  indexinfo->indextlist,
3176  best_path->indexscandir);
3177  else
3178  scan_plan = (Scan *) make_indexscan(tlist,
3179  qpqual,
3180  baserelid,
3181  indexoid,
3182  fixed_indexquals,
3183  stripped_indexquals,
3184  fixed_indexorderbys,
3185  indexorderbys,
3186  indexorderbyops,
3187  best_path->indexscandir);
3188 
3189  copy_generic_path_info(&scan_plan->plan, &best_path->path);
3190 
3191  return scan_plan;
3192 }
3193 
3194 /*
3195  * create_bitmap_scan_plan
3196  * Returns a bitmap scan plan for the base relation scanned by 'best_path'
3197  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3198  */
3199 static BitmapHeapScan *
3201  BitmapHeapPath *best_path,
3202  List *tlist,
3203  List *scan_clauses)
3204 {
3205  Index baserelid = best_path->path.parent->relid;
3206  Plan *bitmapqualplan;
3207  List *bitmapqualorig;
3208  List *indexquals;
3209  List *indexECs;
3210  List *qpqual;
3211  ListCell *l;
3212  BitmapHeapScan *scan_plan;
3213 
3214  /* it should be a base rel... */
3215  Assert(baserelid > 0);
3216  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3217 
3218  /* Process the bitmapqual tree into a Plan tree and qual lists */
3219  bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
3220  &bitmapqualorig, &indexquals,
3221  &indexECs);
3222 
3223  if (best_path->path.parallel_aware)
3224  bitmap_subplan_mark_shared(bitmapqualplan);
3225 
3226  /*
3227  * The qpqual list must contain all restrictions not automatically handled
3228  * by the index, other than pseudoconstant clauses which will be handled
3229  * by a separate gating plan node. All the predicates in the indexquals
3230  * will be checked (either by the index itself, or by
3231  * nodeBitmapHeapscan.c), but if there are any "special" operators
3232  * involved then they must be added to qpqual. The upshot is that qpqual
3233  * must contain scan_clauses minus whatever appears in indexquals.
3234  *
3235  * This loop is similar to the comparable code in create_indexscan_plan(),
3236  * but with some differences because it has to compare the scan clauses to
3237  * stripped (no RestrictInfos) indexquals. See comments there for more
3238  * info.
3239  *
3240  * In normal cases simple equal() checks will be enough to spot duplicate
3241  * clauses, so we try that first. We next see if the scan clause is
3242  * redundant with any top-level indexqual by virtue of being generated
3243  * from the same EC. After that, try predicate_implied_by().
3244  *
3245  * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
3246  * useful for getting rid of qpquals that are implied by index predicates,
3247  * because the predicate conditions are included in the "indexquals"
3248  * returned by create_bitmap_subplan(). Bitmap scans have to do it that
3249  * way because predicate conditions need to be rechecked if the scan
3250  * becomes lossy, so they have to be included in bitmapqualorig.
3251  */
3252  qpqual = NIL;
3253  foreach(l, scan_clauses)
3254  {
3255  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3256  Node *clause = (Node *) rinfo->clause;
3257 
3258  if (rinfo->pseudoconstant)
3259  continue; /* we may drop pseudoconstants here */
3260  if (list_member(indexquals, clause))
3261  continue; /* simple duplicate */
3262  if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
3263  continue; /* derived from same EquivalenceClass */
3264  if (!contain_mutable_functions(clause) &&
3265  predicate_implied_by(list_make1(clause), indexquals, false))
3266  continue; /* provably implied by indexquals */
3267  qpqual = lappend(qpqual, rinfo);
3268  }
3269 
3270  /* Sort clauses into best execution order */
3271  qpqual = order_qual_clauses(root, qpqual);
3272 
3273  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3274  qpqual = extract_actual_clauses(qpqual, false);
3275 
3276  /*
3277  * When dealing with special operators, we will at this point have
3278  * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
3279  * 'em from bitmapqualorig, since there's no point in making the tests
3280  * twice.
3281  */
3282  bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
3283 
3284  /*
3285  * We have to replace any outer-relation variables with nestloop params in
3286  * the qpqual and bitmapqualorig expressions. (This was already done for
3287  * expressions attached to plan nodes in the bitmapqualplan tree.)
3288  */
3289  if (best_path->path.param_info)
3290  {
3291  qpqual = (List *)
3292  replace_nestloop_params(root, (Node *) qpqual);
3293  bitmapqualorig = (List *)
3294  replace_nestloop_params(root, (Node *) bitmapqualorig);
3295  }
3296 
3297  /* Finally ready to build the plan node */
3298  scan_plan = make_bitmap_heapscan(tlist,
3299  qpqual,
3300  bitmapqualplan,
3301  bitmapqualorig,
3302  baserelid);
3303 
3304  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3305 
3306  return scan_plan;
3307 }
3308 
3309 /*
3310  * Given a bitmapqual tree, generate the Plan tree that implements it
3311  *
3312  * As byproducts, we also return in *qual and *indexqual the qual lists
3313  * (in implicit-AND form, without RestrictInfos) describing the original index
3314  * conditions and the generated indexqual conditions. (These are the same in
3315  * simple cases, but when special index operators are involved, the former
3316  * list includes the special conditions while the latter includes the actual
3317  * indexable conditions derived from them.) Both lists include partial-index
3318  * predicates, because we have to recheck predicates as well as index
3319  * conditions if the bitmap scan becomes lossy.
3320  *
3321  * In addition, we return a list of EquivalenceClass pointers for all the
3322  * top-level indexquals that were possibly-redundantly derived from ECs.
3323  * This allows removal of scan_clauses that are redundant with such quals.
3324  * (We do not attempt to detect such redundancies for quals that are within
3325  * OR subtrees. This could be done in a less hacky way if we returned the
3326  * indexquals in RestrictInfo form, but that would be slower and still pretty
3327  * messy, since we'd have to build new RestrictInfos in many cases.)
3328  */
3329 static Plan *
3331  List **qual, List **indexqual, List **indexECs)
3332 {
3333  Plan *plan;
3334 
3335  if (IsA(bitmapqual, BitmapAndPath))
3336  {
3337  BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
3338  List *subplans = NIL;
3339  List *subquals = NIL;
3340  List *subindexquals = NIL;
3341  List *subindexECs = NIL;
3342  ListCell *l;
3343 
3344  /*
3345  * There may well be redundant quals among the subplans, since a
3346  * top-level WHERE qual might have gotten used to form several
3347  * different index quals. We don't try exceedingly hard to eliminate
3348  * redundancies, but we do eliminate obvious duplicates by using
3349  * list_concat_unique.
3350  */
3351  foreach(l, apath->bitmapquals)
3352  {
3353  Plan *subplan;
3354  List *subqual;
3355  List *subindexqual;
3356  List *subindexEC;
3357 
3358  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3359  &subqual, &subindexqual,
3360  &subindexEC);
3361  subplans = lappend(subplans, subplan);
3362  subquals = list_concat_unique(subquals, subqual);
3363  subindexquals = list_concat_unique(subindexquals, subindexqual);
3364  /* Duplicates in indexECs aren't worth getting rid of */
3365  subindexECs = list_concat(subindexECs, subindexEC);
3366  }
3367  plan = (Plan *) make_bitmap_and(subplans);
3368  plan->startup_cost = apath->path.startup_cost;
3369  plan->total_cost = apath->path.total_cost;
3370  plan->plan_rows =
3371  clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
3372  plan->plan_width = 0; /* meaningless */
3373  plan->parallel_aware = false;
3374  plan->parallel_safe = apath->path.parallel_safe;
3375  *qual = subquals;
3376  *indexqual = subindexquals;
3377  *indexECs = subindexECs;
3378  }
3379  else if (IsA(bitmapqual, BitmapOrPath))
3380  {
3381  BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
3382  List *subplans = NIL;
3383  List *subquals = NIL;
3384  List *subindexquals = NIL;
3385  bool const_true_subqual = false;
3386  bool const_true_subindexqual = false;
3387  ListCell *l;
3388 
3389  /*
3390  * Here, we only detect qual-free subplans. A qual-free subplan would
3391  * cause us to generate "... OR true ..." which we may as well reduce
3392  * to just "true". We do not try to eliminate redundant subclauses
3393  * because (a) it's not as likely as in the AND case, and (b) we might
3394  * well be working with hundreds or even thousands of OR conditions,
3395  * perhaps from a long IN list. The performance of list_append_unique
3396  * would be unacceptable.
3397  */
3398  foreach(l, opath->bitmapquals)
3399  {
3400  Plan *subplan;
3401  List *subqual;
3402  List *subindexqual;
3403  List *subindexEC;
3404 
3405  subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
3406  &subqual, &subindexqual,
3407  &subindexEC);
3408  subplans = lappend(subplans, subplan);
3409  if (subqual == NIL)
3410  const_true_subqual = true;
3411  else if (!const_true_subqual)
3412  subquals = lappend(subquals,
3413  make_ands_explicit(subqual));
3414  if (subindexqual == NIL)
3415  const_true_subindexqual = true;
3416  else if (!const_true_subindexqual)
3417  subindexquals = lappend(subindexquals,
3418  make_ands_explicit(subindexqual));
3419  }
3420 
3421  /*
3422  * In the presence of ScalarArrayOpExpr quals, we might have built
3423  * BitmapOrPaths with just one subpath; don't add an OR step.
3424  */
3425  if (list_length(subplans) == 1)
3426  {
3427  plan = (Plan *) linitial(subplans);
3428  }
3429  else
3430  {
3431  plan = (Plan *) make_bitmap_or(subplans);
3432  plan->startup_cost = opath->path.startup_cost;
3433  plan->total_cost = opath->path.total_cost;
3434  plan->plan_rows =
3435  clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
3436  plan->plan_width = 0; /* meaningless */
3437  plan->parallel_aware = false;
3438  plan->parallel_safe = opath->path.parallel_safe;
3439  }
3440 
3441  /*
3442  * If there were constant-TRUE subquals, the OR reduces to constant
3443  * TRUE. Also, avoid generating one-element ORs, which could happen
3444  * due to redundancy elimination or ScalarArrayOpExpr quals.
3445  */
3446  if (const_true_subqual)
3447  *qual = NIL;
3448  else if (list_length(subquals) <= 1)
3449  *qual = subquals;
3450  else
3451  *qual = list_make1(make_orclause(subquals));
3452  if (const_true_subindexqual)
3453  *indexqual = NIL;
3454  else if (list_length(subindexquals) <= 1)
3455  *indexqual = subindexquals;
3456  else
3457  *indexqual = list_make1(make_orclause(subindexquals));
3458  *indexECs = NIL;
3459  }
3460  else if (IsA(bitmapqual, IndexPath))
3461  {
3462  IndexPath *ipath = (IndexPath *) bitmapqual;
3463  IndexScan *iscan;
3464  List *subquals;
3465  List *subindexquals;
3466  List *subindexECs;
3467  ListCell *l;
3468 
3469  /* Use the regular indexscan plan build machinery... */
3470  iscan = castNode(IndexScan,
3471  create_indexscan_plan(root, ipath,
3472  NIL, NIL, false));
3473  /* then convert to a bitmap indexscan */
3475  iscan->indexid,
3476  iscan->indexqual,
3477  iscan->indexqualorig);
3478  /* and set its cost/width fields appropriately */
3479  plan->startup_cost = 0.0;
3480  plan->total_cost = ipath->indextotalcost;
3481  plan->plan_rows =
3482  clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
3483  plan->plan_width = 0; /* meaningless */
3484  plan->parallel_aware = false;
3485  plan->parallel_safe = ipath->path.parallel_safe;
3486  /* Extract original index clauses, actual index quals, relevant ECs */
3487  subquals = NIL;
3488  subindexquals = NIL;
3489  subindexECs = NIL;
3490  foreach(l, ipath->indexclauses)
3491  {
3492  IndexClause *iclause = (IndexClause *) lfirst(l);
3493  RestrictInfo *rinfo = iclause->rinfo;
3494 
3495  Assert(!rinfo->pseudoconstant);
3496  subquals = lappend(subquals, rinfo->clause);
3497  subindexquals = list_concat(subindexquals,
3498  get_actual_clauses(iclause->indexquals));
3499  if (rinfo->parent_ec)
3500  subindexECs = lappend(subindexECs, rinfo->parent_ec);
3501  }
3502  /* We can add any index predicate conditions, too */
3503  foreach(l, ipath->indexinfo->indpred)
3504  {
3505  Expr *pred = (Expr *) lfirst(l);
3506 
3507  /*
3508  * We know that the index predicate must have been implied by the
3509  * query condition as a whole, but it may or may not be implied by
3510  * the conditions that got pushed into the bitmapqual. Avoid
3511  * generating redundant conditions.
3512  */
3513  if (!predicate_implied_by(list_make1(pred), subquals, false))
3514  {
3515  subquals = lappend(subquals, pred);
3516  subindexquals = lappend(subindexquals, pred);
3517  }
3518  }
3519  *qual = subquals;
3520  *indexqual = subindexquals;
3521  *indexECs = subindexECs;
3522  }
3523  else
3524  {
3525  elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
3526  plan = NULL; /* keep compiler quiet */
3527  }
3528 
3529  return plan;
3530 }
3531 
3532 /*
3533  * create_tidscan_plan
3534  * Returns a tidscan plan for the base relation scanned by 'best_path'
3535  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3536  */
3537 static TidScan *
3539  List *tlist, List *scan_clauses)
3540 {
3541  TidScan *scan_plan;
3542  Index scan_relid = best_path->path.parent->relid;
3543  List *tidquals = best_path->tidquals;
3544 
3545  /* it should be a base rel... */
3546  Assert(scan_relid > 0);
3547  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3548 
3549  /*
3550  * The qpqual list must contain all restrictions not enforced by the
3551  * tidquals list. Since tidquals has OR semantics, we have to be careful
3552  * about matching it up to scan_clauses. It's convenient to handle the
3553  * single-tidqual case separately from the multiple-tidqual case. In the
3554  * single-tidqual case, we look through the scan_clauses while they are
3555  * still in RestrictInfo form, and drop any that are redundant with the
3556  * tidqual.
3557  *
3558  * In normal cases simple pointer equality checks will be enough to spot
3559  * duplicate RestrictInfos, so we try that first.
3560  *
3561  * Another common case is that a scan_clauses entry is generated from the
3562  * same EquivalenceClass as some tidqual, and is therefore redundant with
3563  * it, though not equal.
3564  *
3565  * Unlike indexpaths, we don't bother with predicate_implied_by(); the
3566  * number of cases where it could win are pretty small.
3567  */
3568  if (list_length(tidquals) == 1)
3569  {
3570  List *qpqual = NIL;
3571  ListCell *l;
3572 
3573  foreach(l, scan_clauses)
3574  {
3575  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3576 
3577  if (rinfo->pseudoconstant)
3578  continue; /* we may drop pseudoconstants here */
3579  if (list_member_ptr(tidquals, rinfo))
3580  continue; /* simple duplicate */
3581  if (is_redundant_derived_clause(rinfo, tidquals))
3582  continue; /* derived from same EquivalenceClass */
3583  qpqual = lappend(qpqual, rinfo);
3584  }
3585  scan_clauses = qpqual;
3586  }
3587 
3588  /* Sort clauses into best execution order */
3589  scan_clauses = order_qual_clauses(root, scan_clauses);
3590 
3591  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3592  tidquals = extract_actual_clauses(tidquals, false);
3593  scan_clauses = extract_actual_clauses(scan_clauses, false);
3594 
3595  /*
3596  * If we have multiple tidquals, it's more convenient to remove duplicate
3597  * scan_clauses after stripping the RestrictInfos. In this situation,
3598  * because the tidquals represent OR sub-clauses, they could not have come
3599  * from EquivalenceClasses so we don't have to worry about matching up
3600  * non-identical clauses. On the other hand, because tidpath.c will have
3601  * extracted those sub-clauses from some OR clause and built its own list,
3602  * we will certainly not have pointer equality to any scan clause. So
3603  * convert the tidquals list to an explicit OR clause and see if we can
3604  * match it via equal() to any scan clause.
3605  */
3606  if (list_length(tidquals) > 1)
3607  scan_clauses = list_difference(scan_clauses,
3608  list_make1(make_orclause(tidquals)));
3609 
3610  /* Replace any outer-relation variables with nestloop params */
3611  if (best_path->path.param_info)
3612  {
3613  tidquals = (List *)
3614  replace_nestloop_params(root, (Node *) tidquals);
3615  scan_clauses = (List *)
3616  replace_nestloop_params(root, (Node *) scan_clauses);
3617  }
3618 
3619  scan_plan = make_tidscan(tlist,
3620  scan_clauses,
3621  scan_relid,
3622  tidquals);
3623 
3624  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3625 
3626  return scan_plan;
3627 }
3628 
3629 /*
3630  * create_tidrangescan_plan
3631  * Returns a tidrangescan plan for the base relation scanned by 'best_path'
3632  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3633  */
3634 static TidRangeScan *
3636  List *tlist, List *scan_clauses)
3637 {
3638  TidRangeScan *scan_plan;
3639  Index scan_relid = best_path->path.parent->relid;
3640  List *tidrangequals = best_path->tidrangequals;
3641 
3642  /* it should be a base rel... */
3643  Assert(scan_relid > 0);
3644  Assert(best_path->path.parent->rtekind == RTE_RELATION);
3645 
3646  /*
3647  * The qpqual list must contain all restrictions not enforced by the
3648  * tidrangequals list. tidrangequals has AND semantics, so we can simply
3649  * remove any qual that appears in it.
3650  */
3651  {
3652  List *qpqual = NIL;
3653  ListCell *l;
3654 
3655  foreach(l, scan_clauses)
3656  {
3657  RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3658 
3659  if (rinfo->pseudoconstant)
3660  continue; /* we may drop pseudoconstants here */
3661  if (list_member_ptr(tidrangequals, rinfo))
3662  continue; /* simple duplicate */
3663  qpqual = lappend(qpqual, rinfo);
3664  }
3665  scan_clauses = qpqual;
3666  }
3667 
3668  /* Sort clauses into best execution order */
3669  scan_clauses = order_qual_clauses(root, scan_clauses);
3670 
3671  /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
3672  tidrangequals = extract_actual_clauses(tidrangequals, false);
3673  scan_clauses = extract_actual_clauses(scan_clauses, false);
3674 
3675  /* Replace any outer-relation variables with nestloop params */
3676  if (best_path->path.param_info)
3677  {
3678  tidrangequals = (List *)
3679  replace_nestloop_params(root, (Node *) tidrangequals);
3680  scan_clauses = (List *)
3681  replace_nestloop_params(root, (Node *) scan_clauses);
3682  }
3683 
3684  scan_plan = make_tidrangescan(tlist,
3685  scan_clauses,
3686  scan_relid,
3687  tidrangequals);
3688 
3689  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3690 
3691  return scan_plan;
3692 }
3693 
3694 /*
3695  * create_subqueryscan_plan
3696  * Returns a subqueryscan plan for the base relation scanned by 'best_path'
3697  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3698  */
3699 static SubqueryScan *
3701  List *tlist, List *scan_clauses)
3702 {
3703  SubqueryScan *scan_plan;
3704  RelOptInfo *rel = best_path->path.parent;
3705  Index scan_relid = rel->relid;
3706  Plan *subplan;
3707 
3708  /* it should be a subquery base rel... */
3709  Assert(scan_relid > 0);
3710  Assert(rel->rtekind == RTE_SUBQUERY);
3711 
3712  /*
3713  * Recursively create Plan from Path for subquery. Since we are entering
3714  * a different planner context (subroot), recurse to create_plan not
3715  * create_plan_recurse.
3716  */
3717  subplan = create_plan(rel->subroot, best_path->subpath);
3718 
3719  /* Sort clauses into best execution order */
3720  scan_clauses = order_qual_clauses(root, scan_clauses);
3721 
3722  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3723  scan_clauses = extract_actual_clauses(scan_clauses, false);
3724 
3725  /*
3726  * Replace any outer-relation variables with nestloop params.
3727  *
3728  * We must provide nestloop params for both lateral references of the
3729  * subquery and outer vars in the scan_clauses. It's better to assign the
3730  * former first, because that code path requires specific param IDs, while
3731  * replace_nestloop_params can adapt to the IDs assigned by
3732  * process_subquery_nestloop_params. This avoids possibly duplicating
3733  * nestloop params when the same Var is needed for both reasons.
3734  */
3735  if (best_path->path.param_info)
3736  {
3738  rel->subplan_params);
3739  scan_clauses = (List *)
3740  replace_nestloop_params(root, (Node *) scan_clauses);
3741  }
3742 
3743  scan_plan = make_subqueryscan(tlist,
3744  scan_clauses,
3745  scan_relid,
3746  subplan);
3747 
3748  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
3749 
3750  return scan_plan;
3751 }
3752 
3753 /*
3754  * create_functionscan_plan
3755  * Returns a functionscan plan for the base relation scanned by 'best_path'
3756  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3757  */
3758 static FunctionScan *
3760  List *tlist, List *scan_clauses)
3761 {
3762  FunctionScan *scan_plan;
3763  Index scan_relid = best_path->parent->relid;
3764  RangeTblEntry *rte;
3765  List *functions;
3766 
3767  /* it should be a function base rel... */
3768  Assert(scan_relid > 0);
3769  rte = planner_rt_fetch(scan_relid, root);
3770  Assert(rte->rtekind == RTE_FUNCTION);
3771  functions = rte->functions;
3772 
3773  /* Sort clauses into best execution order */
3774  scan_clauses = order_qual_clauses(root, scan_clauses);
3775 
3776  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3777  scan_clauses = extract_actual_clauses(scan_clauses, false);
3778 
3779  /* Replace any outer-relation variables with nestloop params */
3780  if (best_path->param_info)
3781  {
3782  scan_clauses = (List *)
3783  replace_nestloop_params(root, (Node *) scan_clauses);
3784  /* The function expressions could contain nestloop params, too */
3786  }
3787 
3788  scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
3789  functions, rte->funcordinality);
3790 
3791  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3792 
3793  return scan_plan;
3794 }
3795 
3796 /*
3797  * create_tablefuncscan_plan
3798  * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
3799  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3800  */
3801 static TableFuncScan *
3803  List *tlist, List *scan_clauses)
3804 {
3805  TableFuncScan *scan_plan;
3806  Index scan_relid = best_path->parent->relid;
3807  RangeTblEntry *rte;
3808  TableFunc *tablefunc;
3809 
3810  /* it should be a function base rel... */
3811  Assert(scan_relid > 0);
3812  rte = planner_rt_fetch(scan_relid, root);
3813  Assert(rte->rtekind == RTE_TABLEFUNC);
3814  tablefunc = rte->tablefunc;
3815 
3816  /* Sort clauses into best execution order */
3817  scan_clauses = order_qual_clauses(root, scan_clauses);
3818 
3819  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3820  scan_clauses = extract_actual_clauses(scan_clauses, false);
3821 
3822  /* Replace any outer-relation variables with nestloop params */
3823  if (best_path->param_info)
3824  {
3825  scan_clauses = (List *)
3826  replace_nestloop_params(root, (Node *) scan_clauses);
3827  /* The function expressions could contain nestloop params, too */
3828  tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
3829  }
3830 
3831  scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
3832  tablefunc);
3833 
3834  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3835 
3836  return scan_plan;
3837 }
3838 
3839 /*
3840  * create_valuesscan_plan
3841  * Returns a valuesscan plan for the base relation scanned by 'best_path'
3842  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3843  */
3844 static ValuesScan *
3846  List *tlist, List *scan_clauses)
3847 {
3848  ValuesScan *scan_plan;
3849  Index scan_relid = best_path->parent->relid;
3850  RangeTblEntry *rte;
3851  List *values_lists;
3852 
3853  /* it should be a values base rel... */
3854  Assert(scan_relid > 0);
3855  rte = planner_rt_fetch(scan_relid, root);
3856  Assert(rte->rtekind == RTE_VALUES);
3857  values_lists = rte->values_lists;
3858 
3859  /* Sort clauses into best execution order */
3860  scan_clauses = order_qual_clauses(root, scan_clauses);
3861 
3862  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3863  scan_clauses = extract_actual_clauses(scan_clauses, false);
3864 
3865  /* Replace any outer-relation variables with nestloop params */
3866  if (best_path->param_info)
3867  {
3868  scan_clauses = (List *)
3869  replace_nestloop_params(root, (Node *) scan_clauses);
3870  /* The values lists could contain nestloop params, too */
3871  values_lists = (List *)
3872  replace_nestloop_params(root, (Node *) values_lists);
3873  }
3874 
3875  scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
3876  values_lists);
3877 
3878  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3879 
3880  return scan_plan;
3881 }
3882 
3883 /*
3884  * create_ctescan_plan
3885  * Returns a ctescan plan for the base relation scanned by 'best_path'
3886  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
3887  */
3888 static CteScan *
3890  List *tlist, List *scan_clauses)
3891 {
3892  CteScan *scan_plan;
3893  Index scan_relid = best_path->parent->relid;
3894  RangeTblEntry *rte;
3895  SubPlan *ctesplan = NULL;
3896  int plan_id;
3897  int cte_param_id;
3898  PlannerInfo *cteroot;
3899  Index levelsup;
3900  int ndx;
3901  ListCell *lc;
3902 
3903  Assert(scan_relid > 0);
3904  rte = planner_rt_fetch(scan_relid, root);
3905  Assert(rte->rtekind == RTE_CTE);
3906  Assert(!rte->self_reference);
3907 
3908  /*
3909  * Find the referenced CTE, and locate the SubPlan previously made for it.
3910  */
3911  levelsup = rte->ctelevelsup;
3912  cteroot = root;
3913  while (levelsup-- > 0)
3914  {
3915  cteroot = cteroot->parent_root;
3916  if (!cteroot) /* shouldn't happen */
3917  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
3918  }
3919 
3920  /*
3921  * Note: cte_plan_ids can be shorter than cteList, if we are still working
3922  * on planning the CTEs (ie, this is a side-reference from another CTE).
3923  * So we mustn't use forboth here.
3924  */
3925  ndx = 0;
3926  foreach(lc, cteroot->parse->cteList)
3927  {
3928  CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
3929 
3930  if (strcmp(cte->ctename, rte->ctename) == 0)
3931  break;
3932  ndx++;
3933  }
3934  if (lc == NULL) /* shouldn't happen */
3935  elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
3936  if (ndx >= list_length(cteroot->cte_plan_ids))
3937  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3938  plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
3939  if (plan_id <= 0)
3940  elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
3941  foreach(lc, cteroot->init_plans)
3942  {
3943  ctesplan = (SubPlan *) lfirst(lc);
3944  if (ctesplan->plan_id == plan_id)
3945  break;
3946  }
3947  if (lc == NULL) /* shouldn't happen */
3948  elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
3949 
3950  /*
3951  * We need the CTE param ID, which is the sole member of the SubPlan's
3952  * setParam list.
3953  */
3954  cte_param_id = linitial_int(ctesplan->setParam);
3955 
3956  /* Sort clauses into best execution order */
3957  scan_clauses = order_qual_clauses(root, scan_clauses);
3958 
3959  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3960  scan_clauses = extract_actual_clauses(scan_clauses, false);
3961 
3962  /* Replace any outer-relation variables with nestloop params */
3963  if (best_path->param_info)
3964  {
3965  scan_clauses = (List *)
3966  replace_nestloop_params(root, (Node *) scan_clauses);
3967  }
3968 
3969  scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
3970  plan_id, cte_param_id);
3971 
3972  copy_generic_path_info(&scan_plan->scan.plan, best_path);
3973 
3974  return scan_plan;
3975 }
3976 
3977 /*
3978  * create_namedtuplestorescan_plan
3979  * Returns a tuplestorescan plan for the base relation scanned by
3980  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
3981  * 'tlist'.
3982  */
3983 static NamedTuplestoreScan *
3985  List *tlist, List *scan_clauses)
3986 {
3987  NamedTuplestoreScan *scan_plan;
3988  Index scan_relid = best_path->parent->relid;
3989  RangeTblEntry *rte;
3990 
3991  Assert(scan_relid > 0);
3992  rte = planner_rt_fetch(scan_relid, root);
3994 
3995  /* Sort clauses into best execution order */
3996  scan_clauses = order_qual_clauses(root, scan_clauses);
3997 
3998  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
3999  scan_clauses = extract_actual_clauses(scan_clauses, false);
4000 
4001  /* Replace any outer-relation variables with nestloop params */
4002  if (best_path->param_info)
4003  {
4004  scan_clauses = (List *)
4005  replace_nestloop_params(root, (Node *) scan_clauses);
4006  }
4007 
4008  scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
4009  rte->enrname);
4010 
4011  copy_generic_path_info(&scan_plan->scan.plan, best_path);
4012 
4013  return scan_plan;
4014 }
4015 
4016 /*
4017  * create_resultscan_plan
4018  * Returns a Result plan for the RTE_RESULT base relation scanned by
4019  * 'best_path' with restriction clauses 'scan_clauses' and targetlist
4020  * 'tlist'.
4021  */
4022 static Result *
4024  List *tlist, List *scan_clauses)
4025 {
4026  Result *scan_plan;
4027  Index scan_relid = best_path->parent->relid;
4029 
4030  Assert(scan_relid > 0);
4031  rte = planner_rt_fetch(scan_relid, root);
4032  Assert(rte->rtekind == RTE_RESULT);
4033 
4034  /* Sort clauses into best execution order */
4035  scan_clauses = order_qual_clauses(root, scan_clauses);
4036 
4037  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4038  scan_clauses = extract_actual_clauses(scan_clauses, false);
4039 
4040  /* Replace any outer-relation variables with nestloop params */
4041  if (best_path->param_info)
4042  {
4043  scan_clauses = (List *)
4044  replace_nestloop_params(root, (Node *) scan_clauses);
4045  }
4046 
4047  scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
4048 
4049  copy_generic_path_info(&scan_plan->plan, best_path);
4050 
4051  return scan_plan;
4052 }
4053 
4054 /*
4055  * create_worktablescan_plan
4056  * Returns a worktablescan plan for the base relation scanned by 'best_path'
4057  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4058  */
4059 static WorkTableScan *
4061  List *tlist, List *scan_clauses)
4062 {
4063  WorkTableScan *scan_plan;
4064  Index scan_relid = best_path->parent->relid;
4065  RangeTblEntry *rte;
4066  Index levelsup;
4067  PlannerInfo *cteroot;
4068 
4069  Assert(scan_relid > 0);
4070  rte = planner_rt_fetch(scan_relid, root);
4071  Assert(rte->rtekind == RTE_CTE);
4072  Assert(rte->self_reference);
4073 
4074  /*
4075  * We need to find the worktable param ID, which is in the plan level
4076  * that's processing the recursive UNION, which is one level *below* where
4077  * the CTE comes from.
4078  */
4079  levelsup = rte->ctelevelsup;
4080  if (levelsup == 0) /* shouldn't happen */
4081  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4082  levelsup--;
4083  cteroot = root;
4084  while (levelsup-- > 0)
4085  {
4086  cteroot = cteroot->parent_root;
4087  if (!cteroot) /* shouldn't happen */
4088  elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
4089  }
4090  if (cteroot->wt_param_id < 0) /* shouldn't happen */
4091  elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
4092 
4093  /* Sort clauses into best execution order */
4094  scan_clauses = order_qual_clauses(root, scan_clauses);
4095 
4096  /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
4097  scan_clauses = extract_actual_clauses(scan_clauses, false);
4098 
4099  /* Replace any outer-relation variables with nestloop params */
4100  if (best_path->param_info)
4101  {
4102  scan_clauses = (List *)
4103  replace_nestloop_params(root, (Node *) scan_clauses);
4104  }
4105 
4106  scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
4107  cteroot->wt_param_id);
4108 
4109  copy_generic_path_info(&scan_plan->scan.plan, best_path);
4110 
4111  return scan_plan;
4112 }
4113 
4114 /*
4115  * create_foreignscan_plan
4116  * Returns a foreignscan plan for the relation scanned by 'best_path'
4117  * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
4118  */
4119 static ForeignScan *
4121  List *tlist, List *scan_clauses)
4122 {
4123  ForeignScan *scan_plan;
4124  RelOptInfo *rel = best_path->path.parent;
4125  Index scan_relid = rel->relid;
4126  Oid rel_oid = InvalidOid;
4127  Plan *outer_plan = NULL;
4128 
4129  Assert(rel->fdwroutine != NULL);
4130 
4131  /* transform the child path if any */
4132  if (best_path->fdw_outerpath)
4133  outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
4134  CP_EXACT_TLIST);
4135 
4136  /*
4137  * If we're scanning a base relation, fetch its OID. (Irrelevant if
4138  * scanning a join relation.)
4139  */
4140  if (scan_relid > 0)
4141  {
4142  RangeTblEntry *rte;
4143 
4144  Assert(rel->rtekind == RTE_RELATION);
4145  rte = planner_rt_fetch(scan_relid, root);
4146  Assert(rte->rtekind == RTE_RELATION);
4147  rel_oid = rte->relid;
4148  }
4149 
4150  /*
4151  * Sort clauses into best execution order. We do this first since the FDW
4152  * might have more info than we do and wish to adjust the ordering.
4153  */
4154  scan_clauses = order_qual_clauses(root, scan_clauses);
4155 
4156  /*
4157  * Let the FDW perform its processing on the restriction clauses and
4158  * generate the plan node. Note that the FDW might remove restriction
4159  * clauses that it intends to execute remotely, or even add more (if it
4160  * has selected some join clauses for remote use but also wants them
4161  * rechecked locally).
4162  */
4163  scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
4164  best_path,
4165  tlist, scan_clauses,
4166  outer_plan);
4167 
4168  /* Copy cost data from Path to Plan; no need to make FDW do this */
4169  copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
4170 
4171  /* Copy user OID to access as; likewise no need to make FDW do this */
4172  scan_plan->checkAsUser = rel->userid;
4173 
4174  /* Copy foreign server OID; likewise, no need to make FDW do this */
4175  scan_plan->fs_server = rel->serverid;
4176 
4177  /*
4178  * Likewise, copy the relids that are represented by this foreign scan. An
4179  * upper rel doesn't have relids set, but it covers all the relations
4180  * participating in the underlying scan/join, so use root->all_query_rels.
4181  */
4182  if (rel->reloptkind == RELOPT_UPPER_REL)
4183  scan_plan->fs_relids = root->all_query_rels;
4184  else
4185  scan_plan->fs_relids = best_path->path.parent->relids;
4186 
4187  /*
4188  * Join relid sets include relevant outer joins, but FDWs may need to know
4189  * which are the included base rels. That's a bit tedious to get without
4190  * access to the plan-time data structures, so compute it here.
4191  */
4192  scan_plan->fs_base_relids = bms_difference(scan_plan->fs_relids,
4193  root->outer_join_rels);
4194 
4195  /*
4196  * If this is a foreign join, and to make it valid to push down we had to
4197  * assume that the current user is the same as some user explicitly named
4198  * in the query, mark the finished plan as depending on the current user.
4199  */
4200  if (rel->useridiscurrent)
4201  root->glob->dependsOnRole = true;
4202 
4203  /*
4204  * Replace any outer-relation variables with nestloop params in the qual,
4205  * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
4206  * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
4207  * fdw_recheck_quals could have come from join clauses, so doing this
4208  * beforehand on the scan_clauses wouldn't work.) We assume
4209  * fdw_scan_tlist contains no such variables.
4210  */
4211  if (best_path->path.param_info)
4212  {
4213  scan_plan->scan.plan.qual = (List *)
4214  replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
4215  scan_plan->fdw_exprs = (List *)
4216  replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
4217  scan_plan->fdw_recheck_quals = (List *)
4219  (Node *) scan_plan->fdw_recheck_quals);
4220  }
4221 
4222  /*
4223  * If rel is a base relation, detect whether any system columns are
4224  * requested from the rel. (If rel is a join relation, rel->relid will be
4225  * 0, but there can be no Var with relid 0 in the rel's targetlist or the
4226  * restriction clauses, so we skip this in that case. Note that any such
4227  * columns in base relations that were joined are assumed to be contained
4228  * in fdw_scan_tlist.) This is a bit of a kluge and might go away
4229  * someday, so we intentionally leave it out of the API presented to FDWs.
4230  */
4231  scan_plan->fsSystemCol = false;
4232  if (scan_relid > 0)
4233  {
4234  Bitmapset *attrs_used = NULL;
4235  ListCell *lc;
4236  int i;
4237 
4238  /*
4239  * First, examine all the attributes needed for joins or final output.
4240  * Note: we must look at rel's targetlist, not the attr_needed data,
4241  * because attr_needed isn't computed for inheritance child rels.
4242  */
4243  pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
4244 
4245  /* Add all the attributes used by restriction clauses. */
4246  foreach(lc, rel->baserestrictinfo)
4247  {
4248  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4249 
4250  pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
4251  }
4252 
4253  /* Now, are any system columns requested from rel? */
4254  for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
4255  {
4257  {
4258  scan_plan->fsSystemCol = true;
4259  break;
4260  }
4261  }
4262 
4263  bms_free(attrs_used);
4264  }
4265 
4266  return scan_plan;
4267 }
4268 
4269 /*
4270  * create_customscan_plan
4271  *
4272  * Transform a CustomPath into a Plan.
4273  */
4274 static CustomScan *
4276  List *tlist, List *scan_clauses)
4277 {
4278  CustomScan *cplan;
4279  RelOptInfo *rel = best_path->path.parent;
4280  List *custom_plans = NIL;
4281  ListCell *lc;
4282 
4283  /* Recursively transform child paths. */
4284  foreach(lc, best_path->custom_paths)
4285  {
4287  CP_EXACT_TLIST);
4288 
4289  custom_plans = lappend(custom_plans, plan);
4290  }
4291 
4292  /*
4293  * Sort clauses into the best execution order, although custom-scan
4294  * provider can reorder them again.
4295  */
4296  scan_clauses = order_qual_clauses(root, scan_clauses);
4297 
4298  /*
4299  * Invoke custom plan provider to create the Plan node represented by the
4300  * CustomPath.
4301  */
4302  cplan = castNode(CustomScan,
4303  best_path->methods->PlanCustomPath(root,
4304  rel,
4305  best_path,
4306  tlist,
4307  scan_clauses,
4308  custom_plans));
4309 
4310  /*
4311  * Copy cost data from Path to Plan; no need to make custom-plan providers
4312  * do this
4313  */
4314  copy_generic_path_info(&cplan->scan.plan, &best_path->path);
4315 
4316  /* Likewise, copy the relids that are represented by this custom scan */
4317  cplan->custom_relids = best_path->path.parent->relids;
4318 
4319  /*
4320  * Replace any outer-relation variables with nestloop params in the qual
4321  * and custom_exprs expressions. We do this last so that the custom-plan
4322  * provider doesn't have to be involved. (Note that parts of custom_exprs
4323  * could have come from join clauses, so doing this beforehand on the
4324  * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
4325  * such variables.
4326  */
4327  if (best_path->path.param_info)
4328  {
4329  cplan->scan.plan.qual = (List *)
4330  replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
4331  cplan->custom_exprs = (List *)
4333  }
4334 
4335  return cplan;
4336 }
4337 
4338 
4339 /*****************************************************************************
4340  *
4341  * JOIN METHODS
4342  *
4343  *****************************************************************************/
4344 
4345 static NestLoop *
4347  NestPath *best_path)
4348 {
4349  NestLoop *join_plan;
4350  Plan *outer_plan;
4351  Plan *inner_plan;
4352  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4353  List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
4354  List *joinclauses;
4355  List *otherclauses;
4356  Relids outerrelids;
4357  List *nestParams;
4358  Relids saveOuterRels = root->curOuterRels;
4359 
4360  /*
4361  * If the inner path is parameterized by the topmost parent of the outer
4362  * rel rather than the outer rel itself, fix that. (Nothing happens here
4363  * if it is not so parameterized.)
4364  */
4365  best_path->jpath.innerjoinpath =
4367  best_path->jpath.innerjoinpath,
4368  best_path->jpath.outerjoinpath->parent);
4369 
4370  /*
4371  * Failure here probably means that reparameterize_path_by_child() is not
4372  * in sync with path_is_reparameterizable_by_child().
4373  */
4374  Assert(best_path->jpath.innerjoinpath != NULL);
4375 
4376  /* NestLoop can project, so no need to be picky about child tlists */
4377  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
4378 
4379  /* For a nestloop, include outer relids in curOuterRels for inner side */
4380  root->curOuterRels = bms_union(root->curOuterRels,
4381  best_path->jpath.outerjoinpath->parent->relids);
4382 
4383  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
4384 
4385  /* Restore curOuterRels */
4386  bms_free(root->curOuterRels);
4387  root->curOuterRels = saveOuterRels;
4388 
4389  /* Sort join qual clauses into best execution order */
4390  joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
4391 
4392  /* Get the join qual clauses (in plain expression form) */
4393  /* Any pseudoconstant clauses are ignored here */
4394  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4395  {
4396  extract_actual_join_clauses(joinrestrictclauses,
4397  best_path->jpath.path.parent->relids,
4398  &joinclauses, &otherclauses);
4399  }
4400  else
4401  {
4402  /* We can treat all clauses alike for an inner join */
4403  joinclauses = extract_actual_clauses(joinrestrictclauses, false);
4404  otherclauses = NIL;
4405  }
4406 
4407  /* Replace any outer-relation variables with nestloop params */
4408  if (best_path->jpath.path.param_info)
4409  {
4410  joinclauses = (List *)
4411  replace_nestloop_params(root, (Node *) joinclauses);
4412  otherclauses = (List *)
4413  replace_nestloop_params(root, (Node *) otherclauses);
4414  }
4415 
4416  /*
4417  * Identify any nestloop parameters that should be supplied by this join
4418  * node, and remove them from root->curOuterParams.
4419  */
4420  outerrelids = best_path->jpath.outerjoinpath->parent->relids;
4421  nestParams = identify_current_nestloop_params(root, outerrelids);
4422 
4423  join_plan = make_nestloop(tlist,
4424  joinclauses,
4425  otherclauses,
4426  nestParams,
4427  outer_plan,
4428  inner_plan,
4429  best_path->jpath.jointype,
4430  best_path->jpath.inner_unique);
4431 
4432  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4433 
4434  return join_plan;
4435 }
4436 
4437 static MergeJoin *
4439  MergePath *best_path)
4440 {
4441  MergeJoin *join_plan;
4442  Plan *outer_plan;
4443  Plan *inner_plan;
4444  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4445  List *joinclauses;
4446  List *otherclauses;
4447  List *mergeclauses;
4448  List *outerpathkeys;
4449  List *innerpathkeys;
4450  int nClauses;
4451  Oid *mergefamilies;
4452  Oid *mergecollations;
4453  int *mergestrategies;
4454  bool *mergenullsfirst;
4455  PathKey *opathkey;
4456  EquivalenceClass *opeclass;
4457  int i;
4458  ListCell *lc;
4459  ListCell *lop;
4460  ListCell *lip;
4461  Path *outer_path = best_path->jpath.outerjoinpath;
4462  Path *inner_path = best_path->jpath.innerjoinpath;
4463 
4464  /*
4465  * MergeJoin can project, so we don't have to demand exact tlists from the
4466  * inputs. However, if we're intending to sort an input's result, it's
4467  * best to request a small tlist so we aren't sorting more data than
4468  * necessary.
4469  */
4470  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4471  (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4472 
4473  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4474  (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
4475 
4476  /* Sort join qual clauses into best execution order */
4477  /* NB: do NOT reorder the mergeclauses */
4478  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4479 
4480  /* Get the join qual clauses (in plain expression form) */
4481  /* Any pseudoconstant clauses are ignored here */
4482  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4483  {
4484  extract_actual_join_clauses(joinclauses,
4485  best_path->jpath.path.parent->relids,
4486  &joinclauses, &otherclauses);
4487  }
4488  else
4489  {
4490  /* We can treat all clauses alike for an inner join */
4491  joinclauses = extract_actual_clauses(joinclauses, false);
4492  otherclauses = NIL;
4493  }
4494 
4495  /*
4496  * Remove the mergeclauses from the list of join qual clauses, leaving the
4497  * list of quals that must be checked as qpquals.
4498  */
4499  mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
4500  joinclauses = list_difference(joinclauses, mergeclauses);
4501 
4502  /*
4503  * Replace any outer-relation variables with nestloop params. There
4504  * should not be any in the mergeclauses.
4505  */
4506  if (best_path->jpath.path.param_info)
4507  {
4508  joinclauses = (List *)
4509  replace_nestloop_params(root, (Node *) joinclauses);
4510  otherclauses = (List *)
4511  replace_nestloop_params(root, (Node *) otherclauses);
4512  }
4513 
4514  /*
4515  * Rearrange mergeclauses, if needed, so that the outer variable is always
4516  * on the left; mark the mergeclause restrictinfos with correct
4517  * outer_is_left status.
4518  */
4519  mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
4520  best_path->jpath.outerjoinpath->parent->relids);
4521 
4522  /*
4523  * Create explicit sort nodes for the outer and inner paths if necessary.
4524  */
4525  if (best_path->outersortkeys)
4526  {
4527  Relids outer_relids = outer_path->parent->relids;
4528  Plan *sort_plan;
4529  bool use_incremental_sort = false;
4530  int presorted_keys;
4531 
4532  /*
4533  * We choose to use incremental sort if it is enabled and there are
4534  * presorted keys; otherwise we use full sort.
4535  */
4537  {
4538  bool is_sorted PG_USED_FOR_ASSERTS_ONLY;
4539 
4540  is_sorted = pathkeys_count_contained_in(best_path->outersortkeys,
4541  outer_path->pathkeys,
4542  &presorted_keys);
4543  Assert(!is_sorted);
4544 
4545  if (presorted_keys > 0)
4546  use_incremental_sort = true;
4547  }
4548 
4549  if (!use_incremental_sort)
4550  {
4551  sort_plan = (Plan *)
4552  make_sort_from_pathkeys(outer_plan,
4553  best_path->outersortkeys,
4554  outer_relids);
4555 
4556  label_sort_with_costsize(root, (Sort *) sort_plan, -1.0);
4557  }
4558  else
4559  {
4560  sort_plan = (Plan *)
4562  best_path->outersortkeys,
4563  outer_relids,
4564  presorted_keys);
4565 
4567  (IncrementalSort *) sort_plan,
4568  best_path->outersortkeys,
4569  -1.0);
4570  }
4571 
4572  outer_plan = sort_plan;
4573  outerpathkeys = best_path->outersortkeys;
4574  }
4575  else
4576  outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
4577 
4578  if (best_path->innersortkeys)
4579  {
4580  /*
4581  * We do not consider incremental sort for inner path, because
4582  * incremental sort does not support mark/restore.
4583  */
4584 
4585  Relids inner_relids = inner_path->parent->relids;
4586  Sort *sort = make_sort_from_pathkeys(inner_plan,
4587  best_path->innersortkeys,
4588  inner_relids);
4589 
4591  inner_plan = (Plan *) sort;
4592  innerpathkeys = best_path->innersortkeys;
4593  }
4594  else
4595  innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
4596 
4597  /*
4598  * If specified, add a materialize node to shield the inner plan from the
4599  * need to handle mark/restore.
4600  */
4601  if (best_path->materialize_inner)
4602  {
4603  Plan *matplan = (Plan *) make_material(inner_plan);
4604 
4605  /*
4606  * We assume the materialize will not spill to disk, and therefore
4607  * charge just cpu_operator_cost per tuple. (Keep this estimate in
4608  * sync with final_cost_mergejoin.)
4609  */
4610  copy_plan_costsize(matplan, inner_plan);
4611  matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
4612 
4613  inner_plan = matplan;
4614  }
4615 
4616  /*
4617  * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
4618  * executor. The information is in the pathkeys for the two inputs, but
4619  * we need to be careful about the possibility of mergeclauses sharing a
4620  * pathkey, as well as the possibility that the inner pathkeys are not in
4621  * an order matching the mergeclauses.
4622  */
4623  nClauses = list_length(mergeclauses);
4624  Assert(nClauses == list_length(best_path->path_mergeclauses));
4625  mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
4626  mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
4627  mergestrategies = (int *) palloc(nClauses * sizeof(int));
4628  mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
4629 
4630  opathkey = NULL;
4631  opeclass = NULL;
4632  lop = list_head(outerpathkeys);
4633  lip = list_head(innerpathkeys);
4634  i = 0;
4635  foreach(lc, best_path->path_mergeclauses)
4636  {
4637  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
4638  EquivalenceClass *oeclass;
4639  EquivalenceClass *ieclass;
4640  PathKey *ipathkey = NULL;
4641  EquivalenceClass *ipeclass = NULL;
4642  bool first_inner_match = false;
4643 
4644  /* fetch outer/inner eclass from mergeclause */
4645  if (rinfo->outer_is_left)
4646  {
4647  oeclass = rinfo->left_ec;
4648  ieclass = rinfo->right_ec;
4649  }
4650  else
4651  {
4652  oeclass = rinfo->right_ec;
4653  ieclass = rinfo->left_ec;
4654  }
4655  Assert(oeclass != NULL);
4656  Assert(ieclass != NULL);
4657 
4658  /*
4659  * We must identify the pathkey elements associated with this clause
4660  * by matching the eclasses (which should give a unique match, since
4661  * the pathkey lists should be canonical). In typical cases the merge
4662  * clauses are one-to-one with the pathkeys, but when dealing with
4663  * partially redundant query conditions, things are more complicated.
4664  *
4665  * lop and lip reference the first as-yet-unmatched pathkey elements.
4666  * If they're NULL then all pathkey elements have been matched.
4667  *
4668  * The ordering of the outer pathkeys should match the mergeclauses,
4669  * by construction (see find_mergeclauses_for_outer_pathkeys()). There
4670  * could be more than one mergeclause for the same outer pathkey, but
4671  * no pathkey may be entirely skipped over.
4672  */
4673  if (oeclass != opeclass) /* multiple matches are not interesting */
4674  {
4675  /* doesn't match the current opathkey, so must match the next */
4676  if (lop == NULL)
4677  elog(ERROR, "outer pathkeys do not match mergeclauses");
4678  opathkey = (PathKey *) lfirst(lop);
4679  opeclass = opathkey->pk_eclass;
4680  lop = lnext(outerpathkeys, lop);
4681  if (oeclass != opeclass)
4682  elog(ERROR, "outer pathkeys do not match mergeclauses");
4683  }
4684 
4685  /*
4686  * The inner pathkeys likewise should not have skipped-over keys, but
4687  * it's possible for a mergeclause to reference some earlier inner
4688  * pathkey if we had redundant pathkeys. For example we might have
4689  * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
4690  * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
4691  * mechanism drops the second sort by x as redundant, and this code
4692  * must cope.
4693  *
4694  * It's also possible for the implied inner-rel ordering to be like
4695  * "ORDER BY x, y, x DESC". We still drop the second instance of x as
4696  * redundant; but this means that the sort ordering of a redundant
4697  * inner pathkey should not be considered significant. So we must
4698  * detect whether this is the first clause matching an inner pathkey.
4699  */
4700  if (lip)
4701  {
4702  ipathkey = (PathKey *) lfirst(lip);
4703  ipeclass = ipathkey->pk_eclass;
4704  if (ieclass == ipeclass)
4705  {
4706  /* successful first match to this inner pathkey */
4707  lip = lnext(innerpathkeys, lip);
4708  first_inner_match = true;
4709  }
4710  }
4711  if (!first_inner_match)
4712  {
4713  /* redundant clause ... must match something before lip */
4714  ListCell *l2;
4715 
4716  foreach(l2, innerpathkeys)
4717  {
4718  if (l2 == lip)
4719  break;
4720  ipathkey = (PathKey *) lfirst(l2);
4721  ipeclass = ipathkey->pk_eclass;
4722  if (ieclass == ipeclass)
4723  break;
4724  }
4725  if (ieclass != ipeclass)
4726  elog(ERROR, "inner pathkeys do not match mergeclauses");
4727  }
4728 
4729  /*
4730  * The pathkeys should always match each other as to opfamily and
4731  * collation (which affect equality), but if we're considering a
4732  * redundant inner pathkey, its sort ordering might not match. In
4733  * such cases we may ignore the inner pathkey's sort ordering and use
4734  * the outer's. (In effect, we're lying to the executor about the
4735  * sort direction of this inner column, but it does not matter since
4736  * the run-time row comparisons would only reach this column when
4737  * there's equality for the earlier column containing the same eclass.
4738  * There could be only one value in this column for the range of inner
4739  * rows having a given value in the earlier column, so it does not
4740  * matter which way we imagine this column to be ordered.) But a
4741  * non-redundant inner pathkey had better match outer's ordering too.
4742  */
4743  if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4744  opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
4745  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4746  if (first_inner_match &&
4747  (opathkey->pk_strategy != ipathkey->pk_strategy ||
4748  opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
4749  elog(ERROR, "left and right pathkeys do not match in mergejoin");
4750 
4751  /* OK, save info for executor */
4752  mergefamilies[i] = opathkey->pk_opfamily;
4753  mergecollations[i] = opathkey->pk_eclass->ec_collation;
4754  mergestrategies[i] = opathkey->pk_strategy;
4755  mergenullsfirst[i] = opathkey->pk_nulls_first;
4756  i++;
4757  }
4758 
4759  /*
4760  * Note: it is not an error if we have additional pathkey elements (i.e.,
4761  * lop or lip isn't NULL here). The input paths might be better-sorted
4762  * than we need for the current mergejoin.
4763  */
4764 
4765  /*
4766  * Now we can build the mergejoin node.
4767  */
4768  join_plan = make_mergejoin(tlist,
4769  joinclauses,
4770  otherclauses,
4771  mergeclauses,
4772  mergefamilies,
4773  mergecollations,
4774  mergestrategies,
4775  mergenullsfirst,
4776  outer_plan,
4777  inner_plan,
4778  best_path->jpath.jointype,
4779  best_path->jpath.inner_unique,
4780  best_path->skip_mark_restore);
4781 
4782  /* Costs of sort and material steps are included in path cost already */
4783  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4784 
4785  return join_plan;
4786 }
4787 
4788 static HashJoin *
4790  HashPath *best_path)
4791 {
4792  HashJoin *join_plan;
4793  Hash *hash_plan;
4794  Plan *outer_plan;
4795  Plan *inner_plan;
4796  List *tlist = build_path_tlist(root, &best_path->jpath.path);
4797  List *joinclauses;
4798  List *otherclauses;
4799  List *hashclauses;
4800  List *hashoperators = NIL;
4801  List *hashcollations = NIL;
4802  List *inner_hashkeys = NIL;
4803  List *outer_hashkeys = NIL;
4804  Oid skewTable = InvalidOid;
4805  AttrNumber skewColumn = InvalidAttrNumber;
4806  bool skewInherit = false;
4807  ListCell *lc;
4808 
4809  /*
4810  * HashJoin can project, so we don't have to demand exact tlists from the
4811  * inputs. However, it's best to request a small tlist from the inner
4812  * side, so that we aren't storing more data than necessary. Likewise, if
4813  * we anticipate batching, request a small tlist from the outer side so
4814  * that we don't put extra data in the outer batch files.
4815  */
4816  outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
4817  (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
4818 
4819  inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
4820  CP_SMALL_TLIST);
4821 
4822  /* Sort join qual clauses into best execution order */
4823  joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
4824  /* There's no point in sorting the hash clauses ... */
4825 
4826  /* Get the join qual clauses (in plain expression form) */
4827  /* Any pseudoconstant clauses are ignored here */
4828  if (IS_OUTER_JOIN(best_path->jpath.jointype))
4829  {
4830  extract_actual_join_clauses(joinclauses,
4831  best_path->jpath.path.parent->relids,
4832  &joinclauses, &otherclauses);
4833  }
4834  else
4835  {
4836  /* We can treat all clauses alike for an inner join */
4837  joinclauses = extract_actual_clauses(joinclauses, false);
4838  otherclauses = NIL;
4839  }
4840 
4841  /*
4842  * Remove the hashclauses from the list of join qual clauses, leaving the
4843  * list of quals that must be checked as qpquals.
4844  */
4845  hashclauses = get_actual_clauses(best_path->path_hashclauses);
4846  joinclauses = list_difference(joinclauses, hashclauses);
4847 
4848  /*
4849  * Replace any outer-relation variables with nestloop params. There
4850  * should not be any in the hashclauses.
4851  */
4852  if (best_path->jpath.path.param_info)
4853  {
4854  joinclauses = (List *)
4855  replace_nestloop_params(root, (Node *) joinclauses);
4856  otherclauses = (List *)
4857  replace_nestloop_params(root, (Node *) otherclauses);
4858  }
4859 
4860  /*
4861  * Rearrange hashclauses, if needed, so that the outer variable is always
4862  * on the left.
4863  */
4864  hashclauses = get_switched_clauses(best_path->path_hashclauses,
4865  best_path->jpath.outerjoinpath->parent->relids);
4866 
4867  /*
4868  * If there is a single join clause and we can identify the outer variable
4869  * as a simple column reference, supply its identity for possible use in
4870  * skew optimization. (Note: in principle we could do skew optimization
4871  * with multiple join clauses, but we'd have to be able to determine the
4872  * most common combinations of outer values, which we don't currently have
4873  * enough stats for.)
4874  */
4875  if (list_length(hashclauses) == 1)
4876  {
4877  OpExpr *clause = (OpExpr *) linitial(hashclauses);
4878  Node *node;
4879 
4880  Assert(is_opclause(clause));
4881  node = (Node *) linitial(clause->args);
4882  if (IsA(node, RelabelType))
4883  node = (Node *) ((RelabelType *) node)->arg;
4884  if (IsA(node, Var))
4885  {
4886  Var *var = (Var *) node;
4887  RangeTblEntry *rte;
4888 
4889  rte = root->simple_rte_array[var->varno];
4890  if (rte->rtekind == RTE_RELATION)
4891  {
4892  skewTable = rte->relid;
4893  skewColumn = var->varattno;
4894  skewInherit = rte->inh;
4895  }
4896  }
4897  }
4898 
4899  /*
4900  * Collect hash related information. The hashed expressions are
4901  * deconstructed into outer/inner expressions, so they can be computed
4902  * separately (inner expressions are used to build the hashtable via Hash,
4903  * outer expressions to perform lookups of tuples from HashJoin's outer
4904  * plan in the hashtable). Also collect operator information necessary to
4905  * build the hashtable.
4906  */
4907  foreach(lc, hashclauses)
4908  {
4909  OpExpr *hclause = lfirst_node(OpExpr, lc);
4910 
4911  hashoperators = lappend_oid(hashoperators, hclause->opno);
4912  hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
4913  outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
4914  inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
4915  }
4916 
4917  /*
4918  * Build the hash node and hash join node.
4919  */
4920  hash_plan = make_hash(inner_plan,
4921  inner_hashkeys,
4922  skewTable,
4923  skewColumn,
4924  skewInherit);
4925 
4926  /*
4927  * Set Hash node's startup & total costs equal to total cost of input
4928  * plan; this only affects EXPLAIN display not decisions.
4929  */
4930  copy_plan_costsize(&hash_plan->plan, inner_plan);
4931  hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
4932 
4933  /*
4934  * If parallel-aware, the executor will also need an estimate of the total
4935  * number of rows expected from all participants so that it can size the
4936  * shared hash table.
4937  */
4938  if (best_path->jpath.path.parallel_aware)
4939  {
4940  hash_plan->plan.parallel_aware = true;
4941  hash_plan->rows_total = best_path->inner_rows_total;
4942  }
4943 
4944  join_plan = make_hashjoin(tlist,
4945  joinclauses,
4946  otherclauses,
4947  hashclauses,
4948  hashoperators,
4949  hashcollations,
4950  outer_hashkeys,
4951  outer_plan,
4952  (Plan *) hash_plan,
4953  best_path->jpath.jointype,
4954  best_path->jpath.inner_unique);
4955 
4956  copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
4957 
4958  return join_plan;
4959 }
4960 
4961 
4962 /*****************************************************************************
4963  *
4964  * SUPPORTING ROUTINES
4965  *
4966  *****************************************************************************/
4967 
4968 /*
4969  * replace_nestloop_params
4970  * Replace outer-relation Vars and PlaceHolderVars in the given expression
4971  * with nestloop Params
4972  *
4973  * All Vars and PlaceHolderVars belonging to the relation(s) identified by
4974  * root->curOuterRels are replaced by Params, and entries are added to
4975  * root->curOuterParams if not already present.
4976  */
4977 static Node *
4979 {
4980  /* No setup needed for tree walk, so away we go */
4981  return replace_nestloop_params_mutator(expr, root);
4982 }
4983 
4984 static Node *
4986 {
4987  if (node == NULL)
4988  return NULL;
4989  if (IsA(node, Var))
4990  {
4991  Var *var = (Var *) node;
4992 
4993  /* Upper-level Vars should be long gone at this point */
4994  Assert(var->varlevelsup == 0);
4995  /* If not to be replaced, we can just return the Var unmodified */
4996  if (IS_SPECIAL_VARNO(var->varno) ||
4997  !bms_is_member(var->varno, root->curOuterRels))
4998  return node;
4999  /* Replace the Var with a nestloop Param */
5000  return (Node *) replace_nestloop_param_var(root, var);
5001  }
5002  if (IsA(node, PlaceHolderVar))
5003  {
5004  PlaceHolderVar *phv = (PlaceHolderVar *) node;
5005 
5006  /* Upper-level PlaceHolderVars should be long gone at this point */
5007  Assert(phv->phlevelsup == 0);
5008 
5009  /* Check whether we need to replace the PHV */
5010  if (!bms_is_subset(find_placeholder_info(root, phv)->ph_eval_at,
5011  root->curOuterRels))
5012  {
5013  /*
5014  * We can't replace the whole PHV, but we might still need to
5015  * replace Vars or PHVs within its expression, in case it ends up
5016  * actually getting evaluated here. (It might get evaluated in
5017  * this plan node, or some child node; in the latter case we don't
5018  * really need to process the expression here, but we haven't got
5019  * enough info to tell if that's the case.) Flat-copy the PHV
5020  * node and then recurse on its expression.
5021  *
5022  * Note that after doing this, we might have different
5023  * representations of the contents of the same PHV in different
5024  * parts of the plan tree. This is OK because equal() will just
5025  * match on phid/phlevelsup, so setrefs.c will still recognize an
5026  * upper-level reference to a lower-level copy of the same PHV.
5027  */
5029 
5030  memcpy(newphv, phv, sizeof(PlaceHolderVar));
5031  newphv->phexpr = (Expr *)
5032  replace_nestloop_params_mutator((Node *) phv->phexpr,
5033  root);
5034  return (Node *) newphv;
5035  }
5036  /* Replace the PlaceHolderVar with a nestloop Param */
5038  }
5039  return expression_tree_mutator(node,
5041  (void *) root);
5042 }
5043 
5044 /*
5045  * fix_indexqual_references
5046  * Adjust indexqual clauses to the form the executor's indexqual
5047  * machinery needs.
5048  *
5049  * We have three tasks here:
5050  * * Select the actual qual clauses out of the input IndexClause list,
5051  * and remove RestrictInfo nodes from the qual clauses.
5052  * * Replace any outer-relation Var or PHV nodes with nestloop Params.
5053  * (XXX eventually, that responsibility should go elsewhere?)
5054  * * Index keys must be represented by Var nodes with varattno set to the
5055  * index's attribute number, not the attribute number in the original rel.
5056  *
5057  * *stripped_indexquals_p receives a list of the actual qual clauses.
5058  *
5059  * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
5060  * that shares no substructure with the original; this is needed in case there
5061  * are subplans in it (we need two separate copies of the subplan tree, or
5062  * things will go awry).
5063  */
5064 static void
5066  List **stripped_indexquals_p, List **fixed_indexquals_p)
5067 {
5068  IndexOptInfo *index = index_path->indexinfo;
5069  List *stripped_indexquals;
5070  List *fixed_indexquals;
5071  ListCell *lc;
5072 
5073  stripped_indexquals = fixed_indexquals = NIL;
5074 
5075  foreach(lc, index_path->indexclauses)
5076  {
5077  IndexClause *iclause = lfirst_node(IndexClause, lc);
5078  int indexcol = iclause->indexcol;
5079  ListCell *lc2;
5080 
5081  foreach(lc2, iclause->indexquals)
5082  {
5083  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5084  Node *clause = (Node *) rinfo->clause;
5085 
5086  stripped_indexquals = lappend(stripped_indexquals, clause);
5087  clause = fix_indexqual_clause(root, index, indexcol,
5088  clause, iclause->indexcols);
5089  fixed_indexquals = lappend(fixed_indexquals, clause);
5090  }
5091  }
5092 
5093  *stripped_indexquals_p = stripped_indexquals;
5094  *fixed_indexquals_p = fixed_indexquals;
5095 }
5096 
5097 /*
5098  * fix_indexorderby_references
5099  * Adjust indexorderby clauses to the form the executor's index
5100  * machinery needs.
5101  *
5102  * This is a simplified version of fix_indexqual_references. The input is
5103  * bare clauses and a separate indexcol list, instead of IndexClauses.
5104  */
5105 static List *
5107 {
5108  IndexOptInfo *index = index_path->indexinfo;
5109  List *fixed_indexorderbys;
5110  ListCell *lcc,
5111  *lci;
5112 
5113  fixed_indexorderbys = NIL;
5114 
5115  forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
5116  {
5117  Node *clause = (Node *) lfirst(lcc);
5118  int indexcol = lfirst_int(lci);
5119 
5120  clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
5121  fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
5122  }
5123 
5124  return fixed_indexorderbys;
5125 }
5126 
5127 /*
5128  * fix_indexqual_clause
5129  * Convert a single indexqual clause to the form needed by the executor.
5130  *
5131  * We replace nestloop params here, and replace the index key variables
5132  * or expressions by index Var nodes.
5133  */
5134 static Node *
5136  Node *clause, List *indexcolnos)
5137 {
5138  /*
5139  * Replace any outer-relation variables with nestloop params.
5140  *
5141  * This also makes a copy of the clause, so it's safe to modify it
5142  * in-place below.
5143  */
5144  clause = replace_nestloop_params(root, clause);
5145 
5146  if (IsA(clause, OpExpr))
5147  {
5148  OpExpr *op = (OpExpr *) clause;
5149 
5150  /* Replace the indexkey expression with an index Var. */
5152  index,
5153  indexcol);
5154  }
5155  else if (IsA(clause, RowCompareExpr))
5156  {
5157  RowCompareExpr *rc = (RowCompareExpr *) clause;
5158  ListCell *lca,
5159  *lcai;
5160 
5161  /* Replace the indexkey expressions with index Vars. */
5162  Assert(list_length(rc->largs) == list_length(indexcolnos));
5163  forboth(lca, rc->largs, lcai, indexcolnos)
5164  {
5166  index,
5167  lfirst_int(lcai));
5168  }
5169  }
5170  else if (IsA(clause, ScalarArrayOpExpr))
5171  {
5172  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5173 
5174  /* Replace the indexkey expression with an index Var. */
5176  index,
5177  indexcol);
5178  }
5179  else if (IsA(clause, NullTest))
5180  {
5181  NullTest *nt = (NullTest *) clause;
5182 
5183  /* Replace the indexkey expression with an index Var. */
5184  nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
5185  index,
5186  indexcol);
5187  }
5188  else
5189  elog(ERROR, "unsupported indexqual type: %d",
5190  (int) nodeTag(clause));
5191 
5192  return clause;
5193 }
5194 
5195 /*
5196  * fix_indexqual_operand
5197  * Convert an indexqual expression to a Var referencing the index column.
5198  *
5199  * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
5200  * equal to the index's attribute number (index column position).
5201  *
5202  * Most of the code here is just for sanity cross-checking that the given
5203  * expression actually matches the index column it's claimed to.
5204  */
5205 static Node *
5207 {
5208  Var *result;
5209  int pos;
5210  ListCell *indexpr_item;
5211 
5212  /*
5213  * Remove any binary-compatible relabeling of the indexkey
5214  */
5215  if (IsA(node, RelabelType))
5216  node = (Node *) ((RelabelType *) node)->arg;
5217 
5218  Assert(indexcol >= 0 && indexcol < index->ncolumns);
5219 
5220  if (index->indexkeys[indexcol] != 0)
5221  {
5222  /* It's a simple index column */
5223  if (IsA(node, Var) &&
5224  ((Var *) node)->varno == index->rel->relid &&
5225  ((Var *) node)->varattno == index->indexkeys[indexcol])
5226  {
5227  result = (Var *) copyObject(node);
5228  result->varno = INDEX_VAR;
5229  result->varattno = indexcol + 1;
5230  return (Node *) result;
5231  }
5232  else
5233  elog(ERROR, "index key does not match expected index column");
5234  }
5235 
5236  /* It's an index expression, so find and cross-check the expression */
5237  indexpr_item = list_head(index->indexprs);
5238  for (pos = 0; pos < index->ncolumns; pos++)
5239  {
5240  if (index->indexkeys[pos] == 0)
5241  {
5242  if (indexpr_item == NULL)
5243  elog(ERROR, "too few entries in indexprs list");
5244  if (pos == indexcol)
5245  {
5246  Node *indexkey;
5247 
5248  indexkey = (Node *) lfirst(indexpr_item);
5249  if (indexkey && IsA(indexkey, RelabelType))
5250  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5251  if (equal(node, indexkey))
5252  {
5253  result = makeVar(INDEX_VAR, indexcol + 1,
5254  exprType(lfirst(indexpr_item)), -1,
5255  exprCollation(lfirst(indexpr_item)),
5256  0);
5257  return (Node *) result;
5258  }
5259  else
5260  elog(ERROR, "index key does not match expected index column");
5261  }
5262  indexpr_item = lnext(index->indexprs, indexpr_item);
5263  }
5264  }
5265 
5266  /* Oops... */
5267  elog(ERROR, "index key does not match expected index column");
5268  return NULL; /* keep compiler quiet */
5269 }
5270 
5271 /*
5272  * get_switched_clauses
5273  * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
5274  * extract the bare clauses, and rearrange the elements within the
5275  * clauses, if needed, so the outer join variable is on the left and
5276  * the inner is on the right. The original clause data structure is not
5277  * touched; a modified list is returned. We do, however, set the transient
5278  * outer_is_left field in each RestrictInfo to show which side was which.
5279  */
5280 static List *
5281 get_switched_clauses(List *clauses, Relids outerrelids)
5282 {
5283  List *t_list = NIL;
5284  ListCell *l;
5285 
5286  foreach(l, clauses)
5287  {
5288  RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
5289  OpExpr *clause = (OpExpr *) restrictinfo->clause;
5290 
5291  Assert(is_opclause(clause));
5292  if (bms_is_subset(restrictinfo->right_relids, outerrelids))
5293  {
5294  /*
5295  * Duplicate just enough of the structure to allow commuting the
5296  * clause without changing the original list. Could use
5297  * copyObject, but a complete deep copy is overkill.
5298  */
5299  OpExpr *temp = makeNode(OpExpr);
5300 
5301  temp->opno = clause->opno;
5302  temp->opfuncid = InvalidOid;
5303  temp->opresulttype = clause->opresulttype;
5304  temp->opretset = clause->opretset;
5305  temp->opcollid = clause->opcollid;
5306  temp->inputcollid = clause->inputcollid;
5307  temp->args = list_copy(clause->args);
5308  temp->location = clause->location;
5309  /* Commute it --- note this modifies the temp node in-place. */
5310  CommuteOpExpr(temp);
5311  t_list = lappend(t_list, temp);
5312  restrictinfo->outer_is_left = false;
5313  }
5314  else
5315  {
5316  Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
5317  t_list = lappend(t_list, clause);
5318  restrictinfo->outer_is_left = true;
5319  }
5320  }
5321  return t_list;
5322 }
5323 
5324 /*
5325  * order_qual_clauses
5326  * Given a list of qual clauses that will all be evaluated at the same
5327  * plan node, sort the list into the order we want to check the quals
5328  * in at runtime.
5329  *
5330  * When security barrier quals are used in the query, we may have quals with
5331  * different security levels in the list. Quals of lower security_level
5332  * must go before quals of higher security_level, except that we can grant
5333  * exceptions to move up quals that are leakproof. When security level
5334  * doesn't force the decision, we prefer to order clauses by estimated
5335  * execution cost, cheapest first.
5336  *
5337  * Ideally the order should be driven by a combination of execution cost and
5338  * selectivity, but it's not immediately clear how to account for both,
5339  * and given the uncertainty of the estimates the reliability of the decisions
5340  * would be doubtful anyway. So we just order by security level then
5341  * estimated per-tuple cost, being careful not to change the order when
5342  * (as is often the case) the estimates are identical.
5343  *
5344  * Although this will work on either bare clauses or RestrictInfos, it's
5345  * much faster to apply it to RestrictInfos, since it can re-use cost
5346  * information that is cached in RestrictInfos. XXX in the bare-clause
5347  * case, we are also not able to apply security considerations. That is
5348  * all right for the moment, because the bare-clause case doesn't occur
5349  * anywhere that barrier quals could be present, but it would be better to
5350  * get rid of it.
5351  *
5352  * Note: some callers pass lists that contain entries that will later be
5353  * removed; this is the easiest way to let this routine see RestrictInfos
5354  * instead of bare clauses. This is another reason why trying to consider
5355  * selectivity in the ordering would likely do the wrong thing.
5356  */
5357 static List *
5359 {
5360  typedef struct
5361  {
5362  Node *clause;
5363  Cost cost;
5364  Index security_level;
5365  } QualItem;
5366  int nitems = list_length(clauses);
5367  QualItem *items;
5368  ListCell *lc;
5369  int i;
5370  List *result;
5371 
5372  /* No need to work hard for 0 or 1 clause */
5373  if (nitems <= 1)
5374  return clauses;
5375 
5376  /*
5377  * Collect the items and costs into an array. This is to avoid repeated
5378  * cost_qual_eval work if the inputs aren't RestrictInfos.
5379  */
5380  items = (QualItem *) palloc(nitems * sizeof(QualItem));
5381  i = 0;
5382  foreach(lc, clauses)
5383  {
5384  Node *clause = (Node *) lfirst(lc);
5385  QualCost qcost;
5386 
5387  cost_qual_eval_node(&qcost, clause, root);
5388  items[i].clause = clause;
5389  items[i].cost = qcost.per_tuple;
5390  if (IsA(clause, RestrictInfo))
5391  {
5392  RestrictInfo *rinfo = (RestrictInfo *) clause;
5393 
5394  /*
5395  * If a clause is leakproof, it doesn't have to be constrained by
5396  * its nominal security level. If it's also reasonably cheap
5397  * (here defined as 10X cpu_operator_cost), pretend it has
5398  * security_level 0, which will allow it to go in front of
5399  * more-expensive quals of lower security levels. Of course, that
5400  * will also force it to go in front of cheaper quals of its own
5401  * security level, which is not so great, but we can alleviate
5402  * that risk by applying the cost limit cutoff.
5403  */
5404  if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
5405  items[i].security_level = 0;
5406  else
5407  items[i].security_level = rinfo->security_level;
5408  }
5409  else
5410  items[i].security_level = 0;
5411  i++;
5412  }
5413 
5414  /*
5415  * Sort. We don't use qsort() because it's not guaranteed stable for
5416  * equal keys. The expected number of entries is small enough that a
5417  * simple insertion sort should be good enough.
5418  */
5419  for (i = 1; i < nitems; i++)
5420  {
5421  QualItem newitem = items[i];
5422  int j;
5423 
5424  /* insert newitem into the already-sorted subarray */
5425  for (j = i; j > 0; j--)
5426  {
5427  QualItem *olditem = &items[j - 1];
5428 
5429  if (newitem.security_level > olditem->security_level ||
5430  (newitem.security_level == olditem->security_level &&
5431  newitem.cost >= olditem->cost))
5432  break;
5433  items[j] = *olditem;
5434  }
5435  items[j] = newitem;
5436  }
5437 
5438  /* Convert back to a list */
5439  result = NIL;
5440  for (i = 0; i < nitems; i++)
5441  result = lappend(result, items[i].clause);
5442 
5443  return result;
5444 }
5445 
5446 /*
5447  * Copy cost and size info from a Path node to the Plan node created from it.
5448  * The executor usually won't use this info, but it's needed by EXPLAIN.
5449  * Also copy the parallel-related flags, which the executor *will* use.
5450  */
5451 static void
5453 {
5454  dest->disabled_nodes = src->disabled_nodes;
5455  dest->startup_cost = src->startup_cost;
5456  dest->total_cost = src->total_cost;
5457  dest->plan_rows = src->rows;
5458  dest->plan_width = src->pathtarget->width;
5459  dest->parallel_aware = src->parallel_aware;
5460  dest->parallel_safe = src->parallel_safe;
5461 }
5462 
5463 /*
5464  * Copy cost and size info from a lower plan node to an inserted node.
5465  * (Most callers alter the info after copying it.)
5466  */
5467 static void
5469 {
5470  dest->disabled_nodes = src->disabled_nodes;
5471  dest->startup_cost = src->startup_cost;
5472  dest->total_cost = src->total_cost;
5473  dest->plan_rows = src->plan_rows;
5474  dest->plan_width = src->plan_width;
5475  /* Assume the inserted node is not parallel-aware. */
5476  dest->parallel_aware = false;
5477  /* Assume the inserted node is parallel-safe, if child plan is. */
5478  dest->parallel_safe = src->parallel_safe;
5479 }
5480 
5481 /*
5482  * Some places in this file build Sort nodes that don't have a directly
5483  * corresponding Path node. The cost of the sort is, or should have been,
5484  * included in the cost of the Path node we're working from, but since it's
5485  * not split out, we have to re-figure it using cost_sort(). This is just
5486  * to label the Sort node nicely for EXPLAIN.
5487  *
5488  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
5489  */
5490 static void