PostgreSQL Source Code  git master
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UpperUniquePathcreate_upper_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SetOpCmd cmd, SetOpStrategy strategy, List *distinctList, AttrNumber flagColIdx, int firstFlag, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 747 of file pathnode.c.

748 {
749  bool accept_new = true; /* unless we find a superior old path */
750  int insert_at = 0; /* where to insert new item */
751  ListCell *p1;
752 
753  /* Check for query cancel. */
755 
756  /* Path to be added must be parallel safe. */
757  Assert(new_path->parallel_safe);
758 
759  /* Relation should be OK for parallelism, too. */
760  Assert(parent_rel->consider_parallel);
761 
762  /*
763  * As in add_path, throw out any paths which are dominated by the new
764  * path, but throw out the new path if some existing path dominates it.
765  */
766  foreach(p1, parent_rel->partial_pathlist)
767  {
768  Path *old_path = (Path *) lfirst(p1);
769  bool remove_old = false; /* unless new proves superior */
770  PathKeysComparison keyscmp;
771 
772  /* Compare pathkeys. */
773  keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
774 
775  /* Unless pathkeys are incompatible, keep just one of the two paths. */
776  if (keyscmp != PATHKEYS_DIFFERENT)
777  {
778  if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
779  {
780  /* New path costs more; keep it only if pathkeys are better. */
781  if (keyscmp != PATHKEYS_BETTER1)
782  accept_new = false;
783  }
784  else if (old_path->total_cost > new_path->total_cost
785  * STD_FUZZ_FACTOR)
786  {
787  /* Old path costs more; keep it only if pathkeys are better. */
788  if (keyscmp != PATHKEYS_BETTER2)
789  remove_old = true;
790  }
791  else if (keyscmp == PATHKEYS_BETTER1)
792  {
793  /* Costs are about the same, new path has better pathkeys. */
794  remove_old = true;
795  }
796  else if (keyscmp == PATHKEYS_BETTER2)
797  {
798  /* Costs are about the same, old path has better pathkeys. */
799  accept_new = false;
800  }
801  else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
802  {
803  /* Pathkeys are the same, and the old path costs more. */
804  remove_old = true;
805  }
806  else
807  {
808  /*
809  * Pathkeys are the same, and new path isn't materially
810  * cheaper.
811  */
812  accept_new = false;
813  }
814  }
815 
816  /*
817  * Remove current element from partial_pathlist if dominated by new.
818  */
819  if (remove_old)
820  {
821  parent_rel->partial_pathlist =
822  foreach_delete_current(parent_rel->partial_pathlist, p1);
823  pfree(old_path);
824  }
825  else
826  {
827  /* new belongs after this old path if it has cost >= old's */
828  if (new_path->total_cost >= old_path->total_cost)
829  insert_at = foreach_current_index(p1) + 1;
830  }
831 
832  /*
833  * If we found an old path that dominates new_path, we can quit
834  * scanning the partial_pathlist; we will not add new_path, and we
835  * assume new_path cannot dominate any later path.
836  */
837  if (!accept_new)
838  break;
839  }
840 
841  if (accept_new)
842  {
843  /* Accept the new path: insert it at proper place */
844  parent_rel->partial_pathlist =
845  list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
846  }
847  else
848  {
849  /* Reject and recycle the new path */
850  pfree(new_path);
851  }
852 }
#define Assert(condition)
Definition: c.h:858
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:1520
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:302
#define STD_FUZZ_FACTOR
Definition: pathnode.c:47
PathKeysComparison
Definition: paths.h:204
@ PATHKEYS_BETTER2
Definition: paths.h:207
@ PATHKEYS_BETTER1
Definition: paths.h:206
@ PATHKEYS_DIFFERENT
Definition: paths.h:208
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1654
Cost total_cost
Definition: pathnodes.h:1651
bool parallel_safe
Definition: pathnodes.h:1644
bool consider_parallel
Definition: pathnodes.h:877
List * partial_pathlist
Definition: pathnodes.h:890

References Assert, CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_plain_partial_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
Cost  total_cost,
List pathkeys 
)

Definition at line 865 of file pathnode.c.

867 {
868  ListCell *p1;
869 
870  /*
871  * Our goal here is twofold. First, we want to find out whether this path
872  * is clearly inferior to some existing partial path. If so, we want to
873  * reject it immediately. Second, we want to find out whether this path
874  * is clearly superior to some existing partial path -- at least, modulo
875  * final cost computations. If so, we definitely want to consider it.
876  *
877  * Unlike add_path(), we always compare pathkeys here. This is because we
878  * expect partial_pathlist to be very short, and getting a definitive
879  * answer at this stage avoids the need to call add_path_precheck.
880  */
881  foreach(p1, parent_rel->partial_pathlist)
882  {
883  Path *old_path = (Path *) lfirst(p1);
884  PathKeysComparison keyscmp;
885 
886  keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
887  if (keyscmp != PATHKEYS_DIFFERENT)
888  {
889  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
890  keyscmp != PATHKEYS_BETTER1)
891  return false;
892  if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
893  keyscmp != PATHKEYS_BETTER2)
894  return true;
895  }
896  }
897 
898  /*
899  * This path is neither clearly inferior to an existing partial path nor
900  * clearly good enough that it might replace one. Compare it to
901  * non-parallel plans. If it loses even before accounting for the cost of
902  * the Gather node, we should definitely reject it.
903  *
904  * Note that we pass the total_cost to add_path_precheck twice. This is
905  * because it's never advantageous to consider the startup cost of a
906  * partial path; the resulting plans, if run in parallel, will be run to
907  * completion.
908  */
909  if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
910  NULL))
911  return false;
912 
913  return true;
914 }
bool add_path_precheck(RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:642

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 420 of file pathnode.c.

421 {
422  bool accept_new = true; /* unless we find a superior old path */
423  int insert_at = 0; /* where to insert new item */
424  List *new_path_pathkeys;
425  ListCell *p1;
426 
427  /*
428  * This is a convenient place to check for query cancel --- no part of the
429  * planner goes very long without calling add_path().
430  */
432 
433  /* Pretend parameterized paths have no pathkeys, per comment above */
434  new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
435 
436  /*
437  * Loop to check proposed new path against old paths. Note it is possible
438  * for more than one old path to be tossed out because new_path dominates
439  * it.
440  */
441  foreach(p1, parent_rel->pathlist)
442  {
443  Path *old_path = (Path *) lfirst(p1);
444  bool remove_old = false; /* unless new proves superior */
445  PathCostComparison costcmp;
446  PathKeysComparison keyscmp;
447  BMS_Comparison outercmp;
448 
449  /*
450  * Do a fuzzy cost comparison with standard fuzziness limit.
451  */
452  costcmp = compare_path_costs_fuzzily(new_path, old_path,
454 
455  /*
456  * If the two paths compare differently for startup and total cost,
457  * then we want to keep both, and we can skip comparing pathkeys and
458  * required_outer rels. If they compare the same, proceed with the
459  * other comparisons. Row count is checked last. (We make the tests
460  * in this order because the cost comparison is most likely to turn
461  * out "different", and the pathkeys comparison next most likely. As
462  * explained above, row count very seldom makes a difference, so even
463  * though it's cheap to compare there's not much point in checking it
464  * earlier.)
465  */
466  if (costcmp != COSTS_DIFFERENT)
467  {
468  /* Similarly check to see if either dominates on pathkeys */
469  List *old_path_pathkeys;
470 
471  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
472  keyscmp = compare_pathkeys(new_path_pathkeys,
473  old_path_pathkeys);
474  if (keyscmp != PATHKEYS_DIFFERENT)
475  {
476  switch (costcmp)
477  {
478  case COSTS_EQUAL:
479  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
480  PATH_REQ_OUTER(old_path));
481  if (keyscmp == PATHKEYS_BETTER1)
482  {
483  if ((outercmp == BMS_EQUAL ||
484  outercmp == BMS_SUBSET1) &&
485  new_path->rows <= old_path->rows &&
486  new_path->parallel_safe >= old_path->parallel_safe)
487  remove_old = true; /* new dominates old */
488  }
489  else if (keyscmp == PATHKEYS_BETTER2)
490  {
491  if ((outercmp == BMS_EQUAL ||
492  outercmp == BMS_SUBSET2) &&
493  new_path->rows >= old_path->rows &&
494  new_path->parallel_safe <= old_path->parallel_safe)
495  accept_new = false; /* old dominates new */
496  }
497  else /* keyscmp == PATHKEYS_EQUAL */
498  {
499  if (outercmp == BMS_EQUAL)
500  {
501  /*
502  * Same pathkeys and outer rels, and fuzzily
503  * the same cost, so keep just one; to decide
504  * which, first check parallel-safety, then
505  * rows, then do a fuzzy cost comparison with
506  * very small fuzz limit. (We used to do an
507  * exact cost comparison, but that results in
508  * annoying platform-specific plan variations
509  * due to roundoff in the cost estimates.) If
510  * things are still tied, arbitrarily keep
511  * only the old path. Notice that we will
512  * keep only the old path even if the
513  * less-fuzzy comparison decides the startup
514  * and total costs compare differently.
515  */
516  if (new_path->parallel_safe >
517  old_path->parallel_safe)
518  remove_old = true; /* new dominates old */
519  else if (new_path->parallel_safe <
520  old_path->parallel_safe)
521  accept_new = false; /* old dominates new */
522  else if (new_path->rows < old_path->rows)
523  remove_old = true; /* new dominates old */
524  else if (new_path->rows > old_path->rows)
525  accept_new = false; /* old dominates new */
526  else if (compare_path_costs_fuzzily(new_path,
527  old_path,
528  1.0000000001) == COSTS_BETTER1)
529  remove_old = true; /* new dominates old */
530  else
531  accept_new = false; /* old equals or
532  * dominates new */
533  }
534  else if (outercmp == BMS_SUBSET1 &&
535  new_path->rows <= old_path->rows &&
536  new_path->parallel_safe >= old_path->parallel_safe)
537  remove_old = true; /* new dominates old */
538  else if (outercmp == BMS_SUBSET2 &&
539  new_path->rows >= old_path->rows &&
540  new_path->parallel_safe <= old_path->parallel_safe)
541  accept_new = false; /* old dominates new */
542  /* else different parameterizations, keep both */
543  }
544  break;
545  case COSTS_BETTER1:
546  if (keyscmp != PATHKEYS_BETTER2)
547  {
548  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
549  PATH_REQ_OUTER(old_path));
550  if ((outercmp == BMS_EQUAL ||
551  outercmp == BMS_SUBSET1) &&
552  new_path->rows <= old_path->rows &&
553  new_path->parallel_safe >= old_path->parallel_safe)
554  remove_old = true; /* new dominates old */
555  }
556  break;
557  case COSTS_BETTER2:
558  if (keyscmp != PATHKEYS_BETTER1)
559  {
560  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
561  PATH_REQ_OUTER(old_path));
562  if ((outercmp == BMS_EQUAL ||
563  outercmp == BMS_SUBSET2) &&
564  new_path->rows >= old_path->rows &&
565  new_path->parallel_safe <= old_path->parallel_safe)
566  accept_new = false; /* old dominates new */
567  }
568  break;
569  case COSTS_DIFFERENT:
570 
571  /*
572  * can't get here, but keep this case to keep compiler
573  * quiet
574  */
575  break;
576  }
577  }
578  }
579 
580  /*
581  * Remove current element from pathlist if dominated by new.
582  */
583  if (remove_old)
584  {
585  parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
586  p1);
587 
588  /*
589  * Delete the data pointed-to by the deleted cell, if possible
590  */
591  if (!IsA(old_path, IndexPath))
592  pfree(old_path);
593  }
594  else
595  {
596  /* new belongs after this old path if it has cost >= old's */
597  if (new_path->total_cost >= old_path->total_cost)
598  insert_at = foreach_current_index(p1) + 1;
599  }
600 
601  /*
602  * If we found an old path that dominates new_path, we can quit
603  * scanning the pathlist; we will not add new_path, and we assume
604  * new_path cannot dominate any other elements of the pathlist.
605  */
606  if (!accept_new)
607  break;
608  }
609 
610  if (accept_new)
611  {
612  /* Accept the new path: insert it at proper place in pathlist */
613  parent_rel->pathlist =
614  list_insert_nth(parent_rel->pathlist, insert_at, new_path);
615  }
616  else
617  {
618  /* Reject and recycle the new path */
619  if (!IsA(new_path, IndexPath))
620  pfree(new_path);
621  }
622 }
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:164
PathCostComparison
Definition: pathnode.c:35
@ COSTS_EQUAL
Definition: pathnode.c:36
@ COSTS_BETTER1
Definition: pathnode.c:37
@ COSTS_BETTER2
Definition: pathnode.c:38
@ COSTS_DIFFERENT
Definition: pathnode.c:39
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1658
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1649
List * pathlist
Definition: pathnodes.h:888

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 642 of file pathnode.c.

645 {
646  List *new_path_pathkeys;
647  bool consider_startup;
648  ListCell *p1;
649 
650  /* Pretend parameterized paths have no pathkeys, per add_path policy */
651  new_path_pathkeys = required_outer ? NIL : pathkeys;
652 
653  /* Decide whether new path's startup cost is interesting */
654  consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
655 
656  foreach(p1, parent_rel->pathlist)
657  {
658  Path *old_path = (Path *) lfirst(p1);
659  PathKeysComparison keyscmp;
660 
661  /*
662  * We are looking for an old_path with the same parameterization (and
663  * by assumption the same rowcount) that dominates the new path on
664  * pathkeys as well as both cost metrics. If we find one, we can
665  * reject the new path.
666  *
667  * Cost comparisons here should match compare_path_costs_fuzzily.
668  */
669  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
670  {
671  /* new path can win on startup cost only if consider_startup */
672  if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
673  !consider_startup)
674  {
675  /* new path loses on cost, so check pathkeys... */
676  List *old_path_pathkeys;
677 
678  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
679  keyscmp = compare_pathkeys(new_path_pathkeys,
680  old_path_pathkeys);
681  if (keyscmp == PATHKEYS_EQUAL ||
682  keyscmp == PATHKEYS_BETTER2)
683  {
684  /* new path does not win on pathkeys... */
685  if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
686  {
687  /* Found an old path that dominates the new one */
688  return false;
689  }
690  }
691  }
692  }
693  else
694  {
695  /*
696  * Since the pathlist is sorted by total_cost, we can stop looking
697  * once we reach a path with a total_cost larger than the new
698  * path's.
699  */
700  break;
701  }
702  }
703 
704  return true;
705 }
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:205
Cost startup_cost
Definition: pathnodes.h:1650
bool consider_param_startup
Definition: pathnodes.h:875
bool consider_startup
Definition: pathnodes.h:873

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 3878 of file pathnode.c.

3883 {
3884  double input_rows = *rows;
3885  Cost input_startup_cost = *startup_cost;
3886  Cost input_total_cost = *total_cost;
3887 
3888  if (offset_est != 0)
3889  {
3890  double offset_rows;
3891 
3892  if (offset_est > 0)
3893  offset_rows = (double) offset_est;
3894  else
3895  offset_rows = clamp_row_est(input_rows * 0.10);
3896  if (offset_rows > *rows)
3897  offset_rows = *rows;
3898  if (input_rows > 0)
3899  *startup_cost +=
3900  (input_total_cost - input_startup_cost)
3901  * offset_rows / input_rows;
3902  *rows -= offset_rows;
3903  if (*rows < 1)
3904  *rows = 1;
3905  }
3906 
3907  if (count_est != 0)
3908  {
3909  double count_rows;
3910 
3911  if (count_est > 0)
3912  count_rows = (double) count_est;
3913  else
3914  count_rows = clamp_row_est(input_rows * 0.10);
3915  if (count_rows > *rows)
3916  count_rows = *rows;
3917  if (input_rows > 0)
3918  *total_cost = *startup_cost +
3919  (input_total_cost - input_startup_cost)
3920  * count_rows / input_rows;
3921  *rows = count_rows;
3922  if (*rows < 1)
3923  *rows = 1;
3924  }
3925 }
double clamp_row_est(double nrows)
Definition: costsize.c:202
double Cost
Definition: nodes.h:251

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path* apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2793 of file pathnode.c.

2797 {
2798  QualCost oldcost;
2799 
2800  /*
2801  * If given path can't project, we might need a Result node, so make a
2802  * separate ProjectionPath.
2803  */
2804  if (!is_projection_capable_path(path))
2805  return (Path *) create_projection_path(root, rel, path, target);
2806 
2807  /*
2808  * We can just jam the desired tlist into the existing path, being sure to
2809  * update its cost estimates appropriately.
2810  */
2811  oldcost = path->pathtarget->cost;
2812  path->pathtarget = target;
2813 
2814  path->startup_cost += target->cost.startup - oldcost.startup;
2815  path->total_cost += target->cost.startup - oldcost.startup +
2816  (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2817 
2818  /*
2819  * If the path happens to be a Gather or GatherMerge path, we'd like to
2820  * arrange for the subpath to return the required target list so that
2821  * workers can help project. But if there is something that is not
2822  * parallel-safe in the target expressions, then we can't.
2823  */
2824  if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2825  is_parallel_safe(root, (Node *) target->exprs))
2826  {
2827  /*
2828  * We always use create_projection_path here, even if the subpath is
2829  * projection-capable, so as to avoid modifying the subpath in place.
2830  * It seems unlikely at present that there could be any other
2831  * references to the subpath, but better safe than sorry.
2832  *
2833  * Note that we don't change the parallel path's cost estimates; it
2834  * might be appropriate to do so, to reflect the fact that the bulk of
2835  * the target evaluation will happen in workers.
2836  */
2837  if (IsA(path, GatherPath))
2838  {
2839  GatherPath *gpath = (GatherPath *) path;
2840 
2841  gpath->subpath = (Path *)
2843  gpath->subpath->parent,
2844  gpath->subpath,
2845  target);
2846  }
2847  else
2848  {
2849  GatherMergePath *gmpath = (GatherMergePath *) path;
2850 
2851  gmpath->subpath = (Path *)
2853  gmpath->subpath->parent,
2854  gmpath->subpath,
2855  target);
2856  }
2857  }
2858  else if (path->parallel_safe &&
2859  !is_parallel_safe(root, (Node *) target->exprs))
2860  {
2861  /*
2862  * We're inserting a parallel-restricted target list into a path
2863  * currently marked parallel-safe, so we have to mark it as no longer
2864  * safe.
2865  */
2866  path->parallel_safe = false;
2867  }
2868 
2869  return path;
2870 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:753
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7207
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2685
tree ctl root
Definition: radixtree.h:1884
Path * subpath
Definition: pathnodes.h:2031
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1522
QualCost cost
Definition: pathnodes.h:1528
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo* build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo 
)

Definition at line 881 of file relnode.c.

884 {
885  RelOptInfo *joinrel = makeNode(RelOptInfo);
886  AppendRelInfo **appinfos;
887  int nappinfos;
888 
889  /* Only joins between "other" relations land here. */
890  Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
891 
892  /* The parent joinrel should have consider_partitionwise_join set. */
893  Assert(parent_joinrel->consider_partitionwise_join);
894 
895  /*
896  * Find the AppendRelInfo structures for the child baserels. We'll need
897  * these for computing the child join's relid set, and later for mapping
898  * Vars to the child rel.
899  */
900  appinfos = find_appinfos_by_relids(root,
901  bms_union(outer_rel->relids,
902  inner_rel->relids),
903  &nappinfos);
904 
905  joinrel->reloptkind = RELOPT_OTHER_JOINREL;
906  joinrel->relids = adjust_child_relids(parent_joinrel->relids,
907  nappinfos, appinfos);
908  joinrel->rows = 0;
909  /* cheap startup cost is interesting iff not all tuples to be retrieved */
910  joinrel->consider_startup = (root->tuple_fraction > 0);
911  joinrel->consider_param_startup = false;
912  joinrel->consider_parallel = false;
913  joinrel->reltarget = create_empty_pathtarget();
914  joinrel->pathlist = NIL;
915  joinrel->ppilist = NIL;
916  joinrel->partial_pathlist = NIL;
917  joinrel->cheapest_startup_path = NULL;
918  joinrel->cheapest_total_path = NULL;
919  joinrel->cheapest_unique_path = NULL;
921  joinrel->direct_lateral_relids = NULL;
922  joinrel->lateral_relids = NULL;
923  joinrel->relid = 0; /* indicates not a baserel */
924  joinrel->rtekind = RTE_JOIN;
925  joinrel->min_attr = 0;
926  joinrel->max_attr = 0;
927  joinrel->attr_needed = NULL;
928  joinrel->attr_widths = NULL;
929  joinrel->notnullattnums = NULL;
930  joinrel->nulling_relids = NULL;
931  joinrel->lateral_vars = NIL;
932  joinrel->lateral_referencers = NULL;
933  joinrel->indexlist = NIL;
934  joinrel->pages = 0;
935  joinrel->tuples = 0;
936  joinrel->allvisfrac = 0;
937  joinrel->eclass_indexes = NULL;
938  joinrel->subroot = NULL;
939  joinrel->subplan_params = NIL;
940  joinrel->amflags = 0;
941  joinrel->serverid = InvalidOid;
942  joinrel->userid = InvalidOid;
943  joinrel->useridiscurrent = false;
944  joinrel->fdwroutine = NULL;
945  joinrel->fdw_private = NULL;
946  joinrel->baserestrictinfo = NIL;
947  joinrel->baserestrictcost.startup = 0;
948  joinrel->baserestrictcost.per_tuple = 0;
949  joinrel->joininfo = NIL;
950  joinrel->has_eclass_joins = false;
951  joinrel->consider_partitionwise_join = false; /* might get changed later */
952  joinrel->parent = parent_joinrel;
953  joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
954  joinrel->top_parent_relids = joinrel->top_parent->relids;
955  joinrel->part_scheme = NULL;
956  joinrel->nparts = -1;
957  joinrel->boundinfo = NULL;
958  joinrel->partbounds_merged = false;
959  joinrel->partition_qual = NIL;
960  joinrel->part_rels = NULL;
961  joinrel->live_parts = NULL;
962  joinrel->all_partrels = NULL;
963  joinrel->partexprs = NULL;
964  joinrel->nullable_partexprs = NULL;
965 
966  /* Compute information relevant to foreign relations. */
967  set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
968 
969  /* Set up reltarget struct */
970  build_child_join_reltarget(root, parent_joinrel, joinrel,
971  nappinfos, appinfos);
972 
973  /* Construct joininfo list. */
974  joinrel->joininfo = (List *) adjust_appendrel_attrs(root,
975  (Node *) parent_joinrel->joininfo,
976  nappinfos,
977  appinfos);
978 
979  /*
980  * Lateral relids referred in child join will be same as that referred in
981  * the parent relation.
982  */
983  joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
984  joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
985 
986  /*
987  * If the parent joinrel has pending equivalence classes, so does the
988  * child.
989  */
990  joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
991 
992  /* Is the join between partitions itself partitioned? */
993  build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
994  restrictlist);
995 
996  /* Child joinrel is parallel safe if parent is parallel safe. */
997  joinrel->consider_parallel = parent_joinrel->consider_parallel;
998 
999  /* Set estimates of the child-joinrel's size. */
1000  set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
1001  sjinfo, restrictlist);
1002 
1003  /* We build the join only once. */
1004  Assert(!find_join_rel(root, joinrel->relids));
1005 
1006  /* Add the relation to the PlannerInfo. */
1007  add_join_rel(root, joinrel);
1008 
1009  /*
1010  * We might need EquivalenceClass members corresponding to the child join,
1011  * so that we can represent sort pathkeys for it. As with children of
1012  * baserels, we shouldn't need this unless there are relevant eclass joins
1013  * (implying that a merge join might be possible) or pathkeys to sort by.
1014  */
1015  if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1017  nappinfos, appinfos,
1018  parent_joinrel, joinrel);
1019 
1020  pfree(appinfos);
1021 
1022  return joinrel;
1023 }
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:733
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:196
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:554
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5318
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2769
#define makeNode(_type_)
Definition: nodes.h:155
@ RTE_JOIN
Definition: parsenodes.h:1030
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2261
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:820
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:844
#define InvalidOid
Definition: postgres_ext.h:36
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:2017
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:527
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2425
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:589
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:627
List * baserestrictinfo
Definition: pathnodes.h:975
List * subplan_params
Definition: pathnodes.h:944
List * ppilist
Definition: pathnodes.h:889
bool useridiscurrent
Definition: pathnodes.h:958
uint32 amflags
Definition: pathnodes.h:948
List * joininfo
Definition: pathnodes.h:981
Bitmapset * notnullattnums
Definition: pathnodes.h:926
List * partition_qual
Definition: pathnodes.h:1017
Relids relids
Definition: pathnodes.h:861
struct PathTarget * reltarget
Definition: pathnodes.h:883
Index relid
Definition: pathnodes.h:908
List * lateral_vars
Definition: pathnodes.h:930
Cardinality tuples
Definition: pathnodes.h:939
Relids top_parent_relids
Definition: pathnodes.h:999
bool partbounds_merged
Definition: pathnodes.h:1015
BlockNumber pages
Definition: pathnodes.h:938
Relids lateral_relids
Definition: pathnodes.h:903
List * cheapest_parameterized_paths
Definition: pathnodes.h:894
RelOptKind reloptkind
Definition: pathnodes.h:855
List * indexlist
Definition: pathnodes.h:934
struct Path * cheapest_unique_path
Definition: pathnodes.h:893
Relids lateral_referencers
Definition: pathnodes.h:932
struct Path * cheapest_startup_path
Definition: pathnodes.h:891
QualCost baserestrictcost
Definition: pathnodes.h:977
struct Path * cheapest_total_path
Definition: pathnodes.h:892
Oid userid
Definition: pathnodes.h:956
Bitmapset * eclass_indexes
Definition: pathnodes.h:942
Relids all_partrels
Definition: pathnodes.h:1031
Relids direct_lateral_relids
Definition: pathnodes.h:901
bool has_eclass_joins
Definition: pathnodes.h:983
Oid serverid
Definition: pathnodes.h:954
Bitmapset * live_parts
Definition: pathnodes.h:1029
bool consider_partitionwise_join
Definition: pathnodes.h:989
PlannerInfo * subroot
Definition: pathnodes.h:943
AttrNumber max_attr
Definition: pathnodes.h:916
Relids nulling_relids
Definition: pathnodes.h:928
double allvisfrac
Definition: pathnodes.h:940
Cardinality rows
Definition: pathnodes.h:867
AttrNumber min_attr
Definition: pathnodes.h:914
RTEKind rtekind
Definition: pathnodes.h:912
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_union(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_appinfos_by_relids(), find_join_rel(), RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, pfree(), RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_join_rel()

RelOptInfo* build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 665 of file relnode.c.

672 {
673  RelOptInfo *joinrel;
674  List *restrictlist;
675 
676  /* This function should be used only for join between parents. */
677  Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
678 
679  /*
680  * See if we already have a joinrel for this set of base rels.
681  */
682  joinrel = find_join_rel(root, joinrelids);
683 
684  if (joinrel)
685  {
686  /*
687  * Yes, so we only need to figure the restrictlist for this particular
688  * pair of component relations.
689  */
690  if (restrictlist_ptr)
691  *restrictlist_ptr = build_joinrel_restrictlist(root,
692  joinrel,
693  outer_rel,
694  inner_rel,
695  sjinfo);
696  return joinrel;
697  }
698 
699  /*
700  * Nope, so make one.
701  */
702  joinrel = makeNode(RelOptInfo);
703  joinrel->reloptkind = RELOPT_JOINREL;
704  joinrel->relids = bms_copy(joinrelids);
705  joinrel->rows = 0;
706  /* cheap startup cost is interesting iff not all tuples to be retrieved */
707  joinrel->consider_startup = (root->tuple_fraction > 0);
708  joinrel->consider_param_startup = false;
709  joinrel->consider_parallel = false;
710  joinrel->reltarget = create_empty_pathtarget();
711  joinrel->pathlist = NIL;
712  joinrel->ppilist = NIL;
713  joinrel->partial_pathlist = NIL;
714  joinrel->cheapest_startup_path = NULL;
715  joinrel->cheapest_total_path = NULL;
716  joinrel->cheapest_unique_path = NULL;
718  /* init direct_lateral_relids from children; we'll finish it up below */
719  joinrel->direct_lateral_relids =
720  bms_union(outer_rel->direct_lateral_relids,
721  inner_rel->direct_lateral_relids);
723  outer_rel, inner_rel);
724  joinrel->relid = 0; /* indicates not a baserel */
725  joinrel->rtekind = RTE_JOIN;
726  joinrel->min_attr = 0;
727  joinrel->max_attr = 0;
728  joinrel->attr_needed = NULL;
729  joinrel->attr_widths = NULL;
730  joinrel->notnullattnums = NULL;
731  joinrel->nulling_relids = NULL;
732  joinrel->lateral_vars = NIL;
733  joinrel->lateral_referencers = NULL;
734  joinrel->indexlist = NIL;
735  joinrel->statlist = NIL;
736  joinrel->pages = 0;
737  joinrel->tuples = 0;
738  joinrel->allvisfrac = 0;
739  joinrel->eclass_indexes = NULL;
740  joinrel->subroot = NULL;
741  joinrel->subplan_params = NIL;
742  joinrel->rel_parallel_workers = -1;
743  joinrel->amflags = 0;
744  joinrel->serverid = InvalidOid;
745  joinrel->userid = InvalidOid;
746  joinrel->useridiscurrent = false;
747  joinrel->fdwroutine = NULL;
748  joinrel->fdw_private = NULL;
749  joinrel->unique_for_rels = NIL;
750  joinrel->non_unique_for_rels = NIL;
751  joinrel->baserestrictinfo = NIL;
752  joinrel->baserestrictcost.startup = 0;
753  joinrel->baserestrictcost.per_tuple = 0;
754  joinrel->baserestrict_min_security = UINT_MAX;
755  joinrel->joininfo = NIL;
756  joinrel->has_eclass_joins = false;
757  joinrel->consider_partitionwise_join = false; /* might get changed later */
758  joinrel->parent = NULL;
759  joinrel->top_parent = NULL;
760  joinrel->top_parent_relids = NULL;
761  joinrel->part_scheme = NULL;
762  joinrel->nparts = -1;
763  joinrel->boundinfo = NULL;
764  joinrel->partbounds_merged = false;
765  joinrel->partition_qual = NIL;
766  joinrel->part_rels = NULL;
767  joinrel->live_parts = NULL;
768  joinrel->all_partrels = NULL;
769  joinrel->partexprs = NULL;
770  joinrel->nullable_partexprs = NULL;
771 
772  /* Compute information relevant to the foreign relations. */
773  set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
774 
775  /*
776  * Fill the joinrel's tlist with just the Vars and PHVs that need to be
777  * output from this join (ie, are needed for higher joinclauses or final
778  * output).
779  *
780  * NOTE: the tlist order for a join rel will depend on which pair of outer
781  * and inner rels we first try to build it from. But the contents should
782  * be the same regardless.
783  */
784  build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
785  (sjinfo->jointype == JOIN_FULL));
786  build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
787  (sjinfo->jointype != JOIN_INNER));
788  add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
789 
790  /*
791  * add_placeholders_to_joinrel also took care of adding the ph_lateral
792  * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
793  * now we can finish computing that. This is much like the computation of
794  * the transitively-closed lateral_relids in min_join_parameterization,
795  * except that here we *do* have to consider the added PHVs.
796  */
797  joinrel->direct_lateral_relids =
798  bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
799 
800  /*
801  * Construct restrict and join clause lists for the new joinrel. (The
802  * caller might or might not need the restrictlist, but I need it anyway
803  * for set_joinrel_size_estimates().)
804  */
805  restrictlist = build_joinrel_restrictlist(root, joinrel,
806  outer_rel, inner_rel,
807  sjinfo);
808  if (restrictlist_ptr)
809  *restrictlist_ptr = restrictlist;
810  build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
811 
812  /*
813  * This is also the right place to check whether the joinrel has any
814  * pending EquivalenceClass joins.
815  */
817 
818  /* Store the partition information. */
819  build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
820  restrictlist);
821 
822  /*
823  * Set estimates of the joinrel's size.
824  */
825  set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
826  sjinfo, restrictlist);
827 
828  /*
829  * Set the consider_parallel flag if this joinrel could potentially be
830  * scanned within a parallel worker. If this flag is false for either
831  * inner_rel or outer_rel, then it must be false for the joinrel also.
832  * Even if both are true, there might be parallel-restricted expressions
833  * in the targetlist or quals.
834  *
835  * Note that if there are more than two rels in this relation, they could
836  * be divided between inner_rel and outer_rel in any arbitrary way. We
837  * assume this doesn't matter, because we should hit all the same baserels
838  * and joinclauses while building up to this joinrel no matter which we
839  * take; therefore, we should make the same decision here however we get
840  * here.
841  */
842  if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
843  is_parallel_safe(root, (Node *) restrictlist) &&
844  is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
845  joinrel->consider_parallel = true;
846 
847  /* Add the joinrel to the PlannerInfo. */
848  add_join_rel(root, joinrel);
849 
850  /*
851  * Also, if dynamic-programming join search is active, add the new joinrel
852  * to the appropriate sublist. Note: you might think the Assert on number
853  * of members should be for equality, but some of the level 1 rels might
854  * have been joinrels already, so we can only assert <=.
855  */
856  if (root->join_rel_level)
857  {
858  Assert(root->join_cur_level > 0);
859  Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
860  root->join_rel_level[root->join_cur_level] =
861  lappend(root->join_rel_level[root->join_cur_level], joinrel);
862  }
863 
864  return joinrel;
865 }
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3179
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:295
@ JOIN_INNER
Definition: nodes.h:293
@ RELOPT_JOINREL
Definition: pathnodes.h:818
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:373
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1112
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1034
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1297
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1334
List * statlist
Definition: pathnodes.h:936
List * unique_for_rels
Definition: pathnodes.h:967
List * non_unique_for_rels
Definition: pathnodes.h:969
int rel_parallel_workers
Definition: pathnodes.h:946
Index baserestrict_min_security
Definition: pathnodes.h:979
JoinType jointype
Definition: pathnodes.h:2886

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_rel()

RelOptInfo* build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 192 of file relnode.c.

193 {
194  RelOptInfo *rel;
195  RangeTblEntry *rte;
196 
197  /* Rel should not exist already */
198  Assert(relid > 0 && relid < root->simple_rel_array_size);
199  if (root->simple_rel_array[relid] != NULL)
200  elog(ERROR, "rel %d already exists", relid);
201 
202  /* Fetch RTE for relation */
203  rte = root->simple_rte_array[relid];
204  Assert(rte != NULL);
205 
206  rel = makeNode(RelOptInfo);
208  rel->relids = bms_make_singleton(relid);
209  rel->rows = 0;
210  /* cheap startup cost is interesting iff not all tuples to be retrieved */
211  rel->consider_startup = (root->tuple_fraction > 0);
212  rel->consider_param_startup = false; /* might get changed later */
213  rel->consider_parallel = false; /* might get changed later */
215  rel->pathlist = NIL;
216  rel->ppilist = NIL;
217  rel->partial_pathlist = NIL;
218  rel->cheapest_startup_path = NULL;
219  rel->cheapest_total_path = NULL;
220  rel->cheapest_unique_path = NULL;
222  rel->relid = relid;
223  rel->rtekind = rte->rtekind;
224  /* min_attr, max_attr, attr_needed, attr_widths are set below */
225  rel->notnullattnums = NULL;
226  rel->lateral_vars = NIL;
227  rel->indexlist = NIL;
228  rel->statlist = NIL;
229  rel->pages = 0;
230  rel->tuples = 0;
231  rel->allvisfrac = 0;
232  rel->eclass_indexes = NULL;
233  rel->subroot = NULL;
234  rel->subplan_params = NIL;
235  rel->rel_parallel_workers = -1; /* set up in get_relation_info */
236  rel->amflags = 0;
237  rel->serverid = InvalidOid;
238  if (rte->rtekind == RTE_RELATION)
239  {
240  Assert(parent == NULL ||
241  parent->rtekind == RTE_RELATION ||
242  parent->rtekind == RTE_SUBQUERY);
243 
244  /*
245  * For any RELATION rte, we need a userid with which to check
246  * permission access. Baserels simply use their own
247  * RTEPermissionInfo's checkAsUser.
248  *
249  * For otherrels normally there's no RTEPermissionInfo, so we use the
250  * parent's, which normally has one. The exceptional case is that the
251  * parent is a subquery, in which case the otherrel will have its own.
252  */
253  if (rel->reloptkind == RELOPT_BASEREL ||
255  parent->rtekind == RTE_SUBQUERY))
256  {
257  RTEPermissionInfo *perminfo;
258 
259  perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
260  rel->userid = perminfo->checkAsUser;
261  }
262  else
263  rel->userid = parent->userid;
264  }
265  else
266  rel->userid = InvalidOid;
267  rel->useridiscurrent = false;
268  rel->fdwroutine = NULL;
269  rel->fdw_private = NULL;
270  rel->unique_for_rels = NIL;
271  rel->non_unique_for_rels = NIL;
272  rel->baserestrictinfo = NIL;
273  rel->baserestrictcost.startup = 0;
274  rel->baserestrictcost.per_tuple = 0;
275  rel->baserestrict_min_security = UINT_MAX;
276  rel->joininfo = NIL;
277  rel->has_eclass_joins = false;
278  rel->consider_partitionwise_join = false; /* might get changed later */
279  rel->part_scheme = NULL;
280  rel->nparts = -1;
281  rel->boundinfo = NULL;
282  rel->partbounds_merged = false;
283  rel->partition_qual = NIL;
284  rel->part_rels = NULL;
285  rel->live_parts = NULL;
286  rel->all_partrels = NULL;
287  rel->partexprs = NULL;
288  rel->nullable_partexprs = NULL;
289 
290  /*
291  * Pass assorted information down the inheritance hierarchy.
292  */
293  if (parent)
294  {
295  /* We keep back-links to immediate parent and topmost parent. */
296  rel->parent = parent;
297  rel->top_parent = parent->top_parent ? parent->top_parent : parent;
298  rel->top_parent_relids = rel->top_parent->relids;
299 
300  /*
301  * A child rel is below the same outer joins as its parent. (We
302  * presume this info was already calculated for the parent.)
303  */
304  rel->nulling_relids = parent->nulling_relids;
305 
306  /*
307  * Also propagate lateral-reference information from appendrel parent
308  * rels to their child rels. We intentionally give each child rel the
309  * same minimum parameterization, even though it's quite possible that
310  * some don't reference all the lateral rels. This is because any
311  * append path for the parent will have to have the same
312  * parameterization for every child anyway, and there's no value in
313  * forcing extra reparameterize_path() calls. Similarly, a lateral
314  * reference to the parent prevents use of otherwise-movable join rels
315  * for each child.
316  *
317  * It's possible for child rels to have their own children, in which
318  * case the topmost parent's lateral info propagates all the way down.
319  */
321  rel->lateral_relids = parent->lateral_relids;
323  }
324  else
325  {
326  rel->parent = NULL;
327  rel->top_parent = NULL;
328  rel->top_parent_relids = NULL;
329  rel->nulling_relids = NULL;
330  rel->direct_lateral_relids = NULL;
331  rel->lateral_relids = NULL;
332  rel->lateral_referencers = NULL;
333  }
334 
335  /* Check type of rtable entry */
336  switch (rte->rtekind)
337  {
338  case RTE_RELATION:
339  /* Table --- retrieve statistics from the system catalogs */
340  get_relation_info(root, rte->relid, rte->inh, rel);
341  break;
342  case RTE_SUBQUERY:
343  case RTE_FUNCTION:
344  case RTE_TABLEFUNC:
345  case RTE_VALUES:
346  case RTE_CTE:
347  case RTE_NAMEDTUPLESTORE:
348 
349  /*
350  * Subquery, function, tablefunc, values list, CTE, or ENR --- set
351  * up attr range and arrays
352  *
353  * Note: 0 is included in range to support whole-row Vars
354  */
355  rel->min_attr = 0;
356  rel->max_attr = list_length(rte->eref->colnames);
357  rel->attr_needed = (Relids *)
358  palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
359  rel->attr_widths = (int32 *)
360  palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
361  break;
362  case RTE_RESULT:
363  /* RTE_RESULT has no columns, nor could it have whole-row Var */
364  rel->min_attr = 0;
365  rel->max_attr = -1;
366  rel->attr_needed = NULL;
367  rel->attr_widths = NULL;
368  break;
369  default:
370  elog(ERROR, "unrecognized RTE kind: %d",
371  (int) rte->rtekind);
372  break;
373  }
374 
375  /*
376  * We must apply the partially filled in RelOptInfo before calling
377  * apply_child_basequals due to some transformations within that function
378  * which require the RelOptInfo to be available in the simple_rel_array.
379  */
380  root->simple_rel_array[relid] = rel;
381 
382  /*
383  * Apply the parent's quals to the child, with appropriate substitution of
384  * variables. If the resulting clause is constant-FALSE or NULL after
385  * applying transformations, apply_child_basequals returns false to
386  * indicate that scanning this relation won't yield any rows. In this
387  * case, we mark the child as dummy right away. (We must do this
388  * immediately so that pruning works correctly when recursing in
389  * expand_partitioned_rtentry.)
390  */
391  if (parent)
392  {
393  AppendRelInfo *appinfo = root->append_rel_array[relid];
394 
395  Assert(appinfo != NULL);
396  if (!apply_child_basequals(root, parent, rel, rte, appinfo))
397  {
398  /*
399  * Restriction clause reduced to constant FALSE or NULL. Mark as
400  * dummy so we won't scan this relation.
401  */
402  mark_dummy_rel(rel);
403  }
404  }
405 
406  return rel;
407 }
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
signed int int32
Definition: c.h:494
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:834
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1381
void * palloc0(Size size)
Definition: mcxt.c:1346
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1034
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1035
@ RTE_VALUES
Definition: parsenodes.h:1033
@ RTE_SUBQUERY
Definition: parsenodes.h:1029
@ RTE_RESULT
Definition: parsenodes.h:1036
@ RTE_FUNCTION
Definition: parsenodes.h:1031
@ RTE_TABLEFUNC
Definition: parsenodes.h:1032
@ RTE_RELATION
Definition: parsenodes.h:1028
@ RELOPT_BASEREL
Definition: pathnodes.h:817
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:819
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:115
RTEKind rtekind
Definition: parsenodes.h:1057

References RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RangeTblEntry::relid, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2378 of file pathnode.c.

2382 {
2383  Relids required_outer;
2384 
2385  /* inner_path can require rels from outer path, but not vice versa */
2386  Assert(!bms_overlap(outer_paramrels, innerrelids));
2387  /* easy case if inner path is not parameterized */
2388  if (!inner_paramrels)
2389  return bms_copy(outer_paramrels);
2390  /* else, form the union ... */
2391  required_outer = bms_union(outer_paramrels, inner_paramrels);
2392  /* ... and remove any mention of now-satisfied outer rels */
2393  required_outer = bms_del_members(required_outer,
2394  outerrelids);
2395  return required_outer;
2396 }
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582

References Assert, bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2405 of file pathnode.c.

2406 {
2407  Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2408  Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2409  Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2410  Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2411  Relids required_outer;
2412 
2413  /*
2414  * Any parameterization of the input paths refers to topmost parents of
2415  * the relevant relations, because reparameterize_path_by_child() hasn't
2416  * been called yet. So we must consider topmost parents of the relations
2417  * being joined, too, while checking for disallowed parameterization
2418  * cases.
2419  */
2420  if (inner_path->parent->top_parent_relids)
2421  innerrelids = inner_path->parent->top_parent_relids;
2422  else
2423  innerrelids = inner_path->parent->relids;
2424 
2425  if (outer_path->parent->top_parent_relids)
2426  outerrelids = outer_path->parent->top_parent_relids;
2427  else
2428  outerrelids = outer_path->parent->relids;
2429 
2430  /* neither path can require rels from the other */
2431  Assert(!bms_overlap(outer_paramrels, innerrelids));
2432  Assert(!bms_overlap(inner_paramrels, outerrelids));
2433  /* form the union ... */
2434  required_outer = bms_union(outer_paramrels, inner_paramrels);
2435  /* we do not need an explicit test for empty; bms_union gets it right */
2436  return required_outer;
2437 }
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:182

References Assert, bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 115 of file pathnode.c.

117 {
118  Cost cost1,
119  cost2;
120 
121  if (fraction <= 0.0 || fraction >= 1.0)
122  return compare_path_costs(path1, path2, TOTAL_COST);
123  cost1 = path1->startup_cost +
124  fraction * (path1->total_cost - path1->startup_cost);
125  cost2 = path2->startup_cost +
126  fraction * (path2->total_cost - path2->startup_cost);
127  if (cost1 < cost2)
128  return -1;
129  if (cost1 > cost2)
130  return +1;
131  return 0;
132 }
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::startup_cost, TOTAL_COST, and Path::total_cost.

Referenced by choose_hashed_setop(), get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 69 of file pathnode.c.

70 {
71  if (criterion == STARTUP_COST)
72  {
73  if (path1->startup_cost < path2->startup_cost)
74  return -1;
75  if (path1->startup_cost > path2->startup_cost)
76  return +1;
77 
78  /*
79  * If paths have the same startup cost (not at all unlikely), order
80  * them by total cost.
81  */
82  if (path1->total_cost < path2->total_cost)
83  return -1;
84  if (path1->total_cost > path2->total_cost)
85  return +1;
86  }
87  else
88  {
89  if (path1->total_cost < path2->total_cost)
90  return -1;
91  if (path1->total_cost > path2->total_cost)
92  return +1;
93 
94  /*
95  * If paths have the same total cost, order them by startup cost.
96  */
97  if (path1->startup_cost < path2->startup_cost)
98  return -1;
99  if (path1->startup_cost > path2->startup_cost)
100  return +1;
101  }
102  return 0;
103 }
@ STARTUP_COST
Definition: pathnodes.h:38

References STARTUP_COST, Path::startup_cost, and Path::total_cost.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath* create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 3155 of file pathnode.c.

3165 {
3166  AggPath *pathnode = makeNode(AggPath);
3167 
3168  pathnode->path.pathtype = T_Agg;
3169  pathnode->path.parent = rel;
3170  pathnode->path.pathtarget = target;
3171  /* For now, assume we are above any joins, so no parameterization */
3172  pathnode->path.param_info = NULL;
3173  pathnode->path.parallel_aware = false;
3174  pathnode->path.parallel_safe = rel->consider_parallel &&
3175  subpath->parallel_safe;
3176  pathnode->path.parallel_workers = subpath->parallel_workers;
3177 
3178  if (aggstrategy == AGG_SORTED)
3179  {
3180  /*
3181  * Attempt to preserve the order of the subpath. Additional pathkeys
3182  * may have been added in adjust_group_pathkeys_for_groupagg() to
3183  * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3184  * belong to columns within the aggregate function, so we must strip
3185  * these additional pathkeys off as those columns are unavailable
3186  * above the aggregate node.
3187  */
3188  if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3189  pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3190  root->num_groupby_pathkeys);
3191  else
3192  pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3193  }
3194  else
3195  pathnode->path.pathkeys = NIL; /* output is unordered */
3196 
3197  pathnode->subpath = subpath;
3198 
3199  pathnode->aggstrategy = aggstrategy;
3200  pathnode->aggsplit = aggsplit;
3201  pathnode->numGroups = numGroups;
3202  pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3203  pathnode->groupClause = groupClause;
3204  pathnode->qual = qual;
3205 
3206  cost_agg(&pathnode->path, root,
3207  aggstrategy, aggcosts,
3208  list_length(groupClause), numGroups,
3209  qual,
3210  subpath->startup_cost, subpath->total_cost,
3211  subpath->rows, subpath->pathtarget->width);
3212 
3213  /* add tlist eval cost for each output row */
3214  pathnode->path.startup_cost += target->cost.startup;
3215  pathnode->path.total_cost += target->cost.startup +
3216  target->cost.per_tuple * pathnode->path.rows;
3217 
3218  return pathnode;
3219 }
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2650
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:310
@ AGG_SORTED
Definition: nodes.h:354
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2243
Cardinality numGroups
Definition: pathnodes.h:2246
AggSplit aggsplit
Definition: pathnodes.h:2245
List * groupClause
Definition: pathnodes.h:2248
uint64 transitionSpace
Definition: pathnodes.h:2247
AggStrategy aggstrategy
Definition: pathnodes.h:2244
Path path
Definition: pathnodes.h:2242
List * qual
Definition: pathnodes.h:2249
NodeTag pathtype
Definition: pathnodes.h:1615
int parallel_workers
Definition: pathnodes.h:1646
bool parallel_aware
Definition: pathnodes.h:1642

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath* create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1244 of file pathnode.c.

1250 {
1251  AppendPath *pathnode = makeNode(AppendPath);
1252  ListCell *l;
1253 
1254  Assert(!parallel_aware || parallel_workers > 0);
1255 
1256  pathnode->path.pathtype = T_Append;
1257  pathnode->path.parent = rel;
1258  pathnode->path.pathtarget = rel->reltarget;
1259 
1260  /*
1261  * If this is for a baserel (not a join or non-leaf partition), we prefer
1262  * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1263  * for the path. This supports building a Memoize path atop this path,
1264  * and if this is a partitioned table the info may be useful for run-time
1265  * pruning (cf make_partition_pruneinfo()).
1266  *
1267  * However, if we don't have "root" then that won't work and we fall back
1268  * on the simpler get_appendrel_parampathinfo. There's no point in doing
1269  * the more expensive thing for a dummy path, either.
1270  */
1271  if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1272  pathnode->path.param_info = get_baserel_parampathinfo(root,
1273  rel,
1274  required_outer);
1275  else
1276  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1277  required_outer);
1278 
1279  pathnode->path.parallel_aware = parallel_aware;
1280  pathnode->path.parallel_safe = rel->consider_parallel;
1281  pathnode->path.parallel_workers = parallel_workers;
1282  pathnode->path.pathkeys = pathkeys;
1283 
1284  /*
1285  * For parallel append, non-partial paths are sorted by descending total
1286  * costs. That way, the total time to finish all non-partial paths is
1287  * minimized. Also, the partial paths are sorted by descending startup
1288  * costs. There may be some paths that require to do startup work by a
1289  * single worker. In such case, it's better for workers to choose the
1290  * expensive ones first, whereas the leader should choose the cheapest
1291  * startup plan.
1292  */
1293  if (pathnode->path.parallel_aware)
1294  {
1295  /*
1296  * We mustn't fiddle with the order of subpaths when the Append has
1297  * pathkeys. The order they're listed in is critical to keeping the
1298  * pathkeys valid.
1299  */
1300  Assert(pathkeys == NIL);
1301 
1303  list_sort(partial_subpaths, append_startup_cost_compare);
1304  }
1305  pathnode->first_partial_path = list_length(subpaths);
1306  pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1307 
1308  /*
1309  * Apply query-wide LIMIT if known and path is for sole base relation.
1310  * (Handling this at this low level is a bit klugy.)
1311  */
1312  if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1313  pathnode->limit_tuples = root->limit_tuples;
1314  else
1315  pathnode->limit_tuples = -1.0;
1316 
1317  foreach(l, pathnode->subpaths)
1318  {
1319  Path *subpath = (Path *) lfirst(l);
1320 
1321  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1322  subpath->parallel_safe;
1323 
1324  /* All child paths must have same parameterization */
1325  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1326  }
1327 
1328  Assert(!parallel_aware || pathnode->path.parallel_safe);
1329 
1330  /*
1331  * If there's exactly one child path then the output of the Append is
1332  * necessarily ordered the same as the child's, so we can inherit the
1333  * child's pathkeys if any, overriding whatever the caller might've said.
1334  * Furthermore, if the child's parallel awareness matches the Append's,
1335  * then the Append is a no-op and will be discarded later (in setrefs.c).
1336  * Then we can inherit the child's size and cost too, effectively charging
1337  * zero for the Append. Otherwise, we must do the normal costsize
1338  * calculation.
1339  */
1340  if (list_length(pathnode->subpaths) == 1)
1341  {
1342  Path *child = (Path *) linitial(pathnode->subpaths);
1343 
1344  if (child->parallel_aware == parallel_aware)
1345  {
1346  pathnode->path.rows = child->rows;
1347  pathnode->path.startup_cost = child->startup_cost;
1348  pathnode->path.total_cost = child->total_cost;
1349  }
1350  else
1351  cost_append(pathnode);
1352  /* Must do this last, else cost_append complains */
1353  pathnode->path.pathkeys = child->pathkeys;
1354  }
1355  else
1356  cost_append(pathnode);
1357 
1358  /* If the caller provided a row estimate, override the computed value. */
1359  if (rows >= 0)
1360  pathnode->path.rows = rows;
1361 
1362  return pathnode;
1363 }
void cost_append(AppendPath *apath)
Definition: costsize.c:2231
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1397
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1375
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1557
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1868
int first_partial_path
Definition: pathnodes.h:1923
Cardinality limit_tuples
Definition: pathnodes.h:1924
List * subpaths
Definition: pathnodes.h:1921

References append_startup_cost_compare(), append_total_cost_compare(), Assert, bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath* create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1075 of file pathnode.c.

1078 {
1079  BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1080  Relids required_outer = NULL;
1081  ListCell *lc;
1082 
1083  pathnode->path.pathtype = T_BitmapAnd;
1084  pathnode->path.parent = rel;
1085  pathnode->path.pathtarget = rel->reltarget;
1086 
1087  /*
1088  * Identify the required outer rels as the union of what the child paths
1089  * depend on. (Alternatively, we could insist that the caller pass this
1090  * in, but it's more convenient and reliable to compute it here.)
1091  */
1092  foreach(lc, bitmapquals)
1093  {
1094  Path *bitmapqual = (Path *) lfirst(lc);
1095 
1096  required_outer = bms_add_members(required_outer,
1097  PATH_REQ_OUTER(bitmapqual));
1098  }
1099  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1100  required_outer);
1101 
1102  /*
1103  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1104  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1105  * set the flag for this path based only on the relation-level flag,
1106  * without actually iterating over the list of children.
1107  */
1108  pathnode->path.parallel_aware = false;
1109  pathnode->path.parallel_safe = rel->consider_parallel;
1110  pathnode->path.parallel_workers = 0;
1111 
1112  pathnode->path.pathkeys = NIL; /* always unordered */
1113 
1114  pathnode->bitmapquals = bitmapquals;
1115 
1116  /* this sets bitmapselectivity as well as the regular cost fields: */
1117  cost_bitmap_and_node(pathnode, root);
1118 
1119  return pathnode;
1120 }
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1157
List * bitmapquals
Definition: pathnodes.h:1786

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath* create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1042 of file pathnode.c.

1048 {
1049  BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1050 
1051  pathnode->path.pathtype = T_BitmapHeapScan;
1052  pathnode->path.parent = rel;
1053  pathnode->path.pathtarget = rel->reltarget;
1054  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1055  required_outer);
1056  pathnode->path.parallel_aware = (parallel_degree > 0);
1057  pathnode->path.parallel_safe = rel->consider_parallel;
1058  pathnode->path.parallel_workers = parallel_degree;
1059  pathnode->path.pathkeys = NIL; /* always unordered */
1060 
1061  pathnode->bitmapqual = bitmapqual;
1062 
1063  cost_bitmap_heap_scan(&pathnode->path, root, rel,
1064  pathnode->path.param_info,
1065  bitmapqual, loop_count);
1066 
1067  return pathnode;
1068 }
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:1013
Path * bitmapqual
Definition: pathnodes.h:1774

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath* create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1127 of file pathnode.c.

1130 {
1131  BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1132  Relids required_outer = NULL;
1133  ListCell *lc;
1134 
1135  pathnode->path.pathtype = T_BitmapOr;
1136  pathnode->path.parent = rel;
1137  pathnode->path.pathtarget = rel->reltarget;
1138 
1139  /*
1140  * Identify the required outer rels as the union of what the child paths
1141  * depend on. (Alternatively, we could insist that the caller pass this
1142  * in, but it's more convenient and reliable to compute it here.)
1143  */
1144  foreach(lc, bitmapquals)
1145  {
1146  Path *bitmapqual = (Path *) lfirst(lc);
1147 
1148  required_outer = bms_add_members(required_outer,
1149  PATH_REQ_OUTER(bitmapqual));
1150  }
1151  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1152  required_outer);
1153 
1154  /*
1155  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1156  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1157  * set the flag for this path based only on the relation-level flag,
1158  * without actually iterating over the list of children.
1159  */
1160  pathnode->path.parallel_aware = false;
1161  pathnode->path.parallel_safe = rel->consider_parallel;
1162  pathnode->path.parallel_workers = 0;
1163 
1164  pathnode->path.pathkeys = NIL; /* always unordered */
1165 
1166  pathnode->bitmapquals = bitmapquals;
1167 
1168  /* this sets bitmapselectivity as well as the regular cost fields: */
1169  cost_bitmap_or_node(pathnode, root);
1170 
1171  return pathnode;
1172 }
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1201
List * bitmapquals
Definition: pathnodes.h:1799

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path* create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2124 of file pathnode.c.

2126 {
2127  Path *pathnode = makeNode(Path);
2128 
2129  pathnode->pathtype = T_CteScan;
2130  pathnode->parent = rel;
2131  pathnode->pathtarget = rel->reltarget;
2132  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2133  required_outer);
2134  pathnode->parallel_aware = false;
2135  pathnode->parallel_safe = rel->consider_parallel;
2136  pathnode->parallel_workers = 0;
2137  pathnode->pathkeys = pathkeys;
2138 
2139  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2140 
2141  return pathnode;
2142 }
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1698

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath* create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2281 of file pathnode.c.

2289 {
2290  ForeignPath *pathnode = makeNode(ForeignPath);
2291 
2292  /*
2293  * We should use get_joinrel_parampathinfo to handle parameterized paths,
2294  * but the API of this function doesn't support it, and existing
2295  * extensions aren't yet trying to build such paths anyway. For the
2296  * moment just throw an error if someone tries it; eventually we should
2297  * revisit this.
2298  */
2299  if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2300  elog(ERROR, "parameterized foreign joins are not supported yet");
2301 
2302  pathnode->path.pathtype = T_ForeignScan;
2303  pathnode->path.parent = rel;
2304  pathnode->path.pathtarget = target ? target : rel->reltarget;
2305  pathnode->path.param_info = NULL; /* XXX see above */
2306  pathnode->path.parallel_aware = false;
2307  pathnode->path.parallel_safe = rel->consider_parallel;
2308  pathnode->path.parallel_workers = 0;
2309  pathnode->path.rows = rows;
2310  pathnode->path.startup_cost = startup_cost;
2311  pathnode->path.total_cost = total_cost;
2312  pathnode->path.pathkeys = pathkeys;
2313 
2314  pathnode->fdw_outerpath = fdw_outerpath;
2315  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2316  pathnode->fdw_private = fdw_private;
2317 
2318  return pathnode;
2319 }
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:1859
List * fdw_restrictinfo
Definition: pathnodes.h:1860
List * fdw_private
Definition: pathnodes.h:1861

References bms_is_empty, RelOptInfo::consider_parallel, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath* create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2333 of file pathnode.c.

2340 {
2341  ForeignPath *pathnode = makeNode(ForeignPath);
2342 
2343  /*
2344  * Upper relations should never have any lateral references, since joining
2345  * is complete.
2346  */
2348 
2349  pathnode->path.pathtype = T_ForeignScan;
2350  pathnode->path.parent = rel;
2351  pathnode->path.pathtarget = target ? target : rel->reltarget;
2352  pathnode->path.param_info = NULL;
2353  pathnode->path.parallel_aware = false;
2354  pathnode->path.parallel_safe = rel->consider_parallel;
2355  pathnode->path.parallel_workers = 0;
2356  pathnode->path.rows = rows;
2357  pathnode->path.startup_cost = startup_cost;
2358  pathnode->path.total_cost = total_cost;
2359  pathnode->path.pathkeys = pathkeys;
2360 
2361  pathnode->fdw_outerpath = fdw_outerpath;
2362  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2363  pathnode->fdw_private = fdw_private;
2364 
2365  return pathnode;
2366 }

References Assert, bms_is_empty, RelOptInfo::consider_parallel, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath* create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2235 of file pathnode.c.

2243 {
2244  ForeignPath *pathnode = makeNode(ForeignPath);
2245 
2246  /* Historically some FDWs were confused about when to use this */
2247  Assert(IS_SIMPLE_REL(rel));
2248 
2249  pathnode->path.pathtype = T_ForeignScan;
2250  pathnode->path.parent = rel;
2251  pathnode->path.pathtarget = target ? target : rel->reltarget;
2252  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2253  required_outer);
2254  pathnode->path.parallel_aware = false;
2255  pathnode->path.parallel_safe = rel->consider_parallel;
2256  pathnode->path.parallel_workers = 0;
2257  pathnode->path.rows = rows;
2258  pathnode->path.startup_cost = startup_cost;
2259  pathnode->path.total_cost = total_cost;
2260  pathnode->path.pathkeys = pathkeys;
2261 
2262  pathnode->fdw_outerpath = fdw_outerpath;
2263  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2264  pathnode->fdw_private = fdw_private;
2265 
2266  return pathnode;
2267 }
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:829

References Assert, RelOptInfo::consider_parallel, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path* create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2046 of file pathnode.c.

2048 {
2049  Path *pathnode = makeNode(Path);
2050 
2051  pathnode->pathtype = T_FunctionScan;
2052  pathnode->parent = rel;
2053  pathnode->pathtarget = rel->reltarget;
2054  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2055  required_outer);
2056  pathnode->parallel_aware = false;
2057  pathnode->parallel_safe = rel->consider_parallel;
2058  pathnode->parallel_workers = 0;
2059  pathnode->pathkeys = pathkeys;
2060 
2061  cost_functionscan(pathnode, root, rel, pathnode->param_info);
2062 
2063  return pathnode;
2064 }
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1531

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath* create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1881 of file pathnode.c.

1884 {
1886  Cost input_startup_cost = 0;
1887  Cost input_total_cost = 0;
1888 
1889  Assert(subpath->parallel_safe);
1890  Assert(pathkeys);
1891 
1892  pathnode->path.pathtype = T_GatherMerge;
1893  pathnode->path.parent = rel;
1894  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1895  required_outer);
1896  pathnode->path.parallel_aware = false;
1897 
1898  pathnode->subpath = subpath;
1899  pathnode->num_workers = subpath->parallel_workers;
1900  pathnode->path.pathkeys = pathkeys;
1901  pathnode->path.pathtarget = target ? target : rel->reltarget;
1902  pathnode->path.rows += subpath->rows;
1903 
1904  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1905  {
1906  /* Subpath is adequately ordered, we won't need to sort it */
1907  input_startup_cost += subpath->startup_cost;
1908  input_total_cost += subpath->total_cost;
1909  }
1910  else
1911  {
1912  /* We'll need to insert a Sort node, so include cost for that */
1913  Path sort_path; /* dummy for result of cost_sort */
1914 
1915  cost_sort(&sort_path,
1916  root,
1917  pathkeys,
1918  subpath->total_cost,
1919  subpath->rows,
1920  subpath->pathtarget->width,
1921  0.0,
1922  work_mem,
1923  -1);
1924  input_startup_cost += sort_path.startup_cost;
1925  input_total_cost += sort_path.total_cost;
1926  }
1927 
1928  cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1929  input_startup_cost, input_total_cost, rows);
1930 
1931  return pathnode;
1932 }
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:474
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2124
int work_mem
Definition: globals.c:128
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:341

References Assert, cost_gather_merge(), cost_sort(), get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), GatherMergePath::subpath, Path::total_cost, and work_mem.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath* create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 1972 of file pathnode.c.

1974 {
1975  GatherPath *pathnode = makeNode(GatherPath);
1976 
1977  Assert(subpath->parallel_safe);
1978 
1979  pathnode->path.pathtype = T_Gather;
1980  pathnode->path.parent = rel;
1981  pathnode->path.pathtarget = target;
1982  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1983  required_outer);
1984  pathnode->path.parallel_aware = false;
1985  pathnode->path.parallel_safe = false;
1986  pathnode->path.parallel_workers = 0;
1987  pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1988 
1989  pathnode->subpath = subpath;
1990  pathnode->num_workers = subpath->parallel_workers;
1991  pathnode->single_copy = false;
1992 
1993  if (pathnode->num_workers == 0)
1994  {
1995  pathnode->path.pathkeys = subpath->pathkeys;
1996  pathnode->num_workers = 1;
1997  pathnode->single_copy = true;
1998  }
1999 
2000  cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
2001 
2002  return pathnode;
2003 }
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:436
bool single_copy
Definition: pathnodes.h:2032
int num_workers
Definition: pathnodes.h:2033

References Assert, cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath* create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 3044 of file pathnode.c.

3050 {
3051  GroupPath *pathnode = makeNode(GroupPath);
3052  PathTarget *target = rel->reltarget;
3053 
3054  pathnode->path.pathtype = T_Group;
3055  pathnode->path.parent = rel;
3056  pathnode->path.pathtarget = target;
3057  /* For now, assume we are above any joins, so no parameterization */
3058  pathnode->path.param_info = NULL;
3059  pathnode->path.parallel_aware = false;
3060  pathnode->path.parallel_safe = rel->consider_parallel &&
3061  subpath->parallel_safe;
3062  pathnode->path.parallel_workers = subpath->parallel_workers;
3063  /* Group doesn't change sort ordering */
3064  pathnode->path.pathkeys = subpath->pathkeys;
3065 
3066  pathnode->subpath = subpath;
3067 
3068  pathnode->groupClause = groupClause;
3069  pathnode->qual = qual;
3070 
3071  cost_group(&pathnode->path, root,
3072  list_length(groupClause),
3073  numGroups,
3074  qual,
3075  subpath->startup_cost, subpath->total_cost,
3076  subpath->rows);
3077 
3078  /* add tlist eval cost for each output row */
3079  pathnode->path.startup_cost += target->cost.startup;
3080  pathnode->path.total_cost += target->cost.startup +
3081  target->cost.per_tuple * pathnode->path.rows;
3082 
3083  return pathnode;
3084 }
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3163
List * qual
Definition: pathnodes.h:2217
List * groupClause
Definition: pathnodes.h:2216
Path * subpath
Definition: pathnodes.h:2215
Path path
Definition: pathnodes.h:2214

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath* create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1518 of file pathnode.c.

1520 {
1522 
1523  pathnode->path.pathtype = T_Result;
1524  pathnode->path.parent = rel;
1525  pathnode->path.pathtarget = target;
1526  pathnode->path.param_info = NULL; /* there are no other rels... */
1527  pathnode->path.parallel_aware = false;
1528  pathnode->path.parallel_safe = rel->consider_parallel;
1529  pathnode->path.parallel_workers = 0;
1530  pathnode->path.pathkeys = NIL;
1531  pathnode->quals = havingqual;
1532 
1533  /*
1534  * We can't quite use cost_resultscan() because the quals we want to
1535  * account for are not baserestrict quals of the rel. Might as well just
1536  * hack it here.
1537  */
1538  pathnode->path.rows = 1;
1539  pathnode->path.startup_cost = target->cost.startup;
1540  pathnode->path.total_cost = target->cost.startup +
1541  cpu_tuple_cost + target->cost.per_tuple;
1542 
1543  /*
1544  * Add cost of qual, if any --- but we ignore its selectivity, since our
1545  * rowcount estimate should be 1 no matter what the qual is.
1546  */
1547  if (havingqual)
1548  {
1549  QualCost qual_cost;
1550 
1551  cost_qual_eval(&qual_cost, havingqual, root);
1552  /* havingqual is evaluated once at startup */
1553  pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1554  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1555  }
1556 
1557  return pathnode;
1558 }
double cpu_tuple_cost
Definition: costsize.c:121
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4640

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath* create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3237 of file pathnode.c.

3244 {
3246  PathTarget *target = rel->reltarget;
3247  ListCell *lc;
3248  bool is_first = true;
3249  bool is_first_sort = true;
3250 
3251  /* The topmost generated Plan node will be an Agg */
3252  pathnode->path.pathtype = T_Agg;
3253  pathnode->path.parent = rel;
3254  pathnode->path.pathtarget = target;
3255  pathnode->path.param_info = subpath->param_info;
3256  pathnode->path.parallel_aware = false;
3257  pathnode->path.parallel_safe = rel->consider_parallel &&
3258  subpath->parallel_safe;
3259  pathnode->path.parallel_workers = subpath->parallel_workers;
3260  pathnode->subpath = subpath;
3261 
3262  /*
3263  * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3264  * to AGG_HASHED, here if possible.
3265  */
3266  if (aggstrategy == AGG_SORTED &&
3267  list_length(rollups) == 1 &&
3268  ((RollupData *) linitial(rollups))->groupClause == NIL)
3269  aggstrategy = AGG_PLAIN;
3270 
3271  if (aggstrategy == AGG_MIXED &&
3272  list_length(rollups) == 1)
3273  aggstrategy = AGG_HASHED;
3274 
3275  /*
3276  * Output will be in sorted order by group_pathkeys if, and only if, there
3277  * is a single rollup operation on a non-empty list of grouping
3278  * expressions.
3279  */
3280  if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3281  pathnode->path.pathkeys = root->group_pathkeys;
3282  else
3283  pathnode->path.pathkeys = NIL;
3284 
3285  pathnode->aggstrategy = aggstrategy;
3286  pathnode->rollups = rollups;
3287  pathnode->qual = having_qual;
3288  pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3289 
3290  Assert(rollups != NIL);
3291  Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3292  Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3293 
3294  foreach(lc, rollups)
3295  {
3296  RollupData *rollup = lfirst(lc);
3297  List *gsets = rollup->gsets;
3298  int numGroupCols = list_length(linitial(gsets));
3299 
3300  /*
3301  * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3302  * (already-sorted) input, and following ones do their own sort.
3303  *
3304  * In AGG_HASHED mode, there is one rollup for each grouping set.
3305  *
3306  * In AGG_MIXED mode, the first rollups are hashed, the first
3307  * non-hashed one takes the (already-sorted) input, and following ones
3308  * do their own sort.
3309  */
3310  if (is_first)
3311  {
3312  cost_agg(&pathnode->path, root,
3313  aggstrategy,
3314  agg_costs,
3315  numGroupCols,
3316  rollup->numGroups,
3317  having_qual,
3318  subpath->startup_cost,
3319  subpath->total_cost,
3320  subpath->rows,
3321  subpath->pathtarget->width);
3322  is_first = false;
3323  if (!rollup->is_hashed)
3324  is_first_sort = false;
3325  }
3326  else
3327  {
3328  Path sort_path; /* dummy for result of cost_sort */
3329  Path agg_path; /* dummy for result of cost_agg */
3330 
3331  if (rollup->is_hashed || is_first_sort)
3332  {
3333  /*
3334  * Account for cost of aggregation, but don't charge input
3335  * cost again
3336  */
3337  cost_agg(&agg_path, root,
3338  rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3339  agg_costs,
3340  numGroupCols,
3341  rollup->numGroups,
3342  having_qual,
3343  0.0, 0.0,
3344  subpath->rows,
3345  subpath->pathtarget->width);
3346  if (!rollup->is_hashed)
3347  is_first_sort = false;
3348  }
3349  else
3350  {
3351  /* Account for cost of sort, but don't charge input cost again */
3352  cost_sort(&sort_path, root, NIL,
3353  0.0,
3354  subpath->rows,
3355  subpath->pathtarget->width,
3356  0.0,
3357  work_mem,
3358  -1.0);
3359 
3360  /* Account for cost of aggregation */
3361 
3362  cost_agg(&agg_path, root,
3363  AGG_SORTED,
3364  agg_costs,
3365  numGroupCols,
3366  rollup->numGroups,
3367  having_qual,
3368  sort_path.startup_cost,
3369  sort_path.total_cost,
3370  sort_path.rows,
3371  subpath->pathtarget->width);
3372  }
3373 
3374  pathnode->path.total_cost += agg_path.total_cost;
3375  pathnode->path.rows += agg_path.rows;
3376  }
3377  }
3378 
3379  /* add tlist eval cost for each output row */
3380  pathnode->path.startup_cost += target->cost.startup;
3381  pathnode->path.total_cost += target->cost.startup +
3382  target->cost.per_tuple * pathnode->path.rows;
3383 
3384  return pathnode;
3385 }
@ AGG_HASHED
Definition: nodes.h:355
@ AGG_MIXED
Definition: nodes.h:356
@ AGG_PLAIN
Definition: nodes.h:353
uint64 transitionSpace
Definition: pathnodes.h:2289
AggStrategy aggstrategy
Definition: pathnodes.h:2286
Cardinality numGroups
Definition: pathnodes.h:2273
List * gsets
Definition: pathnodes.h:2271
bool is_hashed
Definition: pathnodes.h:2275

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath* create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2619 of file pathnode.c.

2630 {
2631  HashPath *pathnode = makeNode(HashPath);
2632 
2633  pathnode->jpath.path.pathtype = T_HashJoin;
2634  pathnode->jpath.path.parent = joinrel;
2635  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2636  pathnode->jpath.path.param_info =
2638  joinrel,
2639  outer_path,
2640  inner_path,
2641  extra->sjinfo,
2642  required_outer,
2643  &restrict_clauses);
2644  pathnode->jpath.path.parallel_aware =
2645  joinrel->consider_parallel && parallel_hash;
2646  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2647  outer_path->parallel_safe && inner_path->parallel_safe;
2648  /* This is a foolish way to estimate parallel_workers, but for now... */
2649  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2650 
2651  /*
2652  * A hashjoin never has pathkeys, since its output ordering is
2653  * unpredictable due to possible batching. XXX If the inner relation is
2654  * small enough, we could instruct the executor that it must not batch,
2655  * and then we could assume that the output inherits the outer relation's
2656  * ordering, which might save a sort step. However there is considerable
2657  * downside if our estimate of the inner relation size is badly off. For
2658  * the moment we don't risk it. (Note also that if we wanted to take this
2659  * seriously, joinpath.c would have to consider many more paths for the
2660  * outer rel than it does now.)
2661  */
2662  pathnode->jpath.path.pathkeys = NIL;
2663  pathnode->jpath.jointype = jointype;
2664  pathnode->jpath.inner_unique = extra->inner_unique;
2665  pathnode->jpath.outerjoinpath = outer_path;
2666  pathnode->jpath.innerjoinpath = inner_path;
2667  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2668  pathnode->path_hashclauses = hashclauses;
2669  /* final_cost_hashjoin will fill in pathnode->num_batches */
2670 
2671  final_cost_hashjoin(root, pathnode, workspace, extra);
2672 
2673  return pathnode;
2674 }
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4181
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1671
List * path_hashclauses
Definition: pathnodes.h:2141
JoinPath jpath
Definition: pathnodes.h:2140
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3221
Path * outerjoinpath
Definition: pathnodes.h:2063
Path * innerjoinpath
Definition: pathnodes.h:2064
JoinType jointype
Definition: pathnodes.h:2058
bool inner_unique
Definition: pathnodes.h:2060
List * joinrestrictinfo
Definition: pathnodes.h:2066

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath* create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 2951 of file pathnode.c.

2957 {
2959  SortPath *pathnode = &sort->spath;
2960 
2961  pathnode->path.pathtype = T_IncrementalSort;
2962  pathnode->path.parent = rel;
2963  /* Sort doesn't project, so use source path's pathtarget */
2964  pathnode->path.pathtarget = subpath->pathtarget;
2965  /* For now, assume we are above any joins, so no parameterization */
2966  pathnode->path.param_info = NULL;
2967  pathnode->path.parallel_aware = false;
2968  pathnode->path.parallel_safe = rel->consider_parallel &&
2969  subpath->parallel_safe;
2970  pathnode->path.parallel_workers = subpath->parallel_workers;
2971  pathnode->path.pathkeys = pathkeys;
2972 
2973  pathnode->subpath = subpath;
2974 
2975  cost_incremental_sort(&pathnode->path,
2976  root, pathkeys, presorted_keys,
2977  subpath->startup_cost,
2978  subpath->total_cost,
2979  subpath->rows,
2980  subpath->pathtarget->width,
2981  0.0, /* XXX comparison_cost shouldn't be 0? */
2982  work_mem, limit_tuples);
2983 
2984  sort->nPresortedCols = presorted_keys;
2985 
2986  return sort;
2987 }
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:195
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1986
Path path
Definition: pathnodes.h:2188
Path * subpath
Definition: pathnodes.h:2189

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_final_distinct_paths(), create_one_window_path(), create_ordered_paths(), create_partial_distinct_paths(), gather_grouping_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath* create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 993 of file pathnode.c.

1004 {
1005  IndexPath *pathnode = makeNode(IndexPath);
1006  RelOptInfo *rel = index->rel;
1007 
1008  pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1009  pathnode->path.parent = rel;
1010  pathnode->path.pathtarget = rel->reltarget;
1011  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1012  required_outer);
1013  pathnode->path.parallel_aware = false;
1014  pathnode->path.parallel_safe = rel->consider_parallel;
1015  pathnode->path.parallel_workers = 0;
1016  pathnode->path.pathkeys = pathkeys;
1017 
1018  pathnode->indexinfo = index;
1019  pathnode->indexclauses = indexclauses;
1020  pathnode->indexorderbys = indexorderbys;
1021  pathnode->indexorderbycols = indexorderbycols;
1022  pathnode->indexscandir = indexscandir;
1023 
1024  cost_index(pathnode, root, loop_count, partial_path);
1025 
1026  return pathnode;
1027 }
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:549
List * indexclauses
Definition: pathnodes.h:1700
ScanDirection indexscandir
Definition: pathnodes.h:1703
Path path
Definition: pathnodes.h:1698
List * indexorderbycols
Definition: pathnodes.h:1702
List * indexorderbys
Definition: pathnodes.h:1701
IndexOptInfo * indexinfo
Definition: pathnodes.h:1699
Definition: type.h:95

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath* create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3823 of file pathnode.c.

3828 {
3829  LimitPath *pathnode = makeNode(LimitPath);
3830 
3831  pathnode->path.pathtype = T_Limit;
3832  pathnode->path.parent = rel;
3833  /* Limit doesn't project, so use source path's pathtarget */
3834  pathnode->path.pathtarget = subpath->pathtarget;
3835  /* For now, assume we are above any joins, so no parameterization */
3836  pathnode->path.param_info = NULL;
3837  pathnode->path.parallel_aware = false;
3838  pathnode->path.parallel_safe = rel->consider_parallel &&
3839  subpath->parallel_safe;
3840  pathnode->path.parallel_workers = subpath->parallel_workers;
3841  pathnode->path.rows = subpath->rows;
3842  pathnode->path.startup_cost = subpath->startup_cost;
3843  pathnode->path.total_cost = subpath->total_cost;
3844  pathnode->path.pathkeys = subpath->pathkeys;
3845  pathnode->subpath = subpath;
3846  pathnode->limitOffset = limitOffset;
3847  pathnode->limitCount = limitCount;
3848  pathnode->limitOption = limitOption;
3849 
3850  /*
3851  * Adjust the output rows count and costs according to the offset/limit.
3852  */
3853  adjust_limit_rows_costs(&pathnode->path.rows,
3854  &pathnode->path.startup_cost,
3855  &pathnode->path.total_cost,
3856  offset_est, count_est);
3857 
3858  return pathnode;
3859 }
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3878
Path path
Definition: pathnodes.h:2388
Path * subpath
Definition: pathnodes.h:2389
LimitOption limitOption
Definition: pathnodes.h:2392
Node * limitOffset
Definition: pathnodes.h:2390
Node * limitCount
Definition: pathnodes.h:2391

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath* create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3659 of file pathnode.c.

3661 {
3662  LockRowsPath *pathnode = makeNode(LockRowsPath);
3663 
3664  pathnode->path.pathtype = T_LockRows;
3665  pathnode->path.parent = rel;
3666  /* LockRows doesn't project, so use source path's pathtarget */
3667  pathnode->path.pathtarget = subpath->pathtarget;
3668  /* For now, assume we are above any joins, so no parameterization */
3669  pathnode->path.param_info = NULL;
3670  pathnode->path.parallel_aware = false;
3671  pathnode->path.parallel_safe = false;
3672  pathnode->path.parallel_workers = 0;
3673  pathnode->path.rows = subpath->rows;
3674 
3675  /*
3676  * The result cannot be assumed sorted, since locking might cause the sort
3677  * key columns to be replaced with new values.
3678  */
3679  pathnode->path.pathkeys = NIL;
3680 
3681  pathnode->subpath = subpath;
3682  pathnode->rowMarks = rowMarks;
3683  pathnode->epqParam = epqParam;
3684 
3685  /*
3686  * We should charge something extra for the costs of row locking and
3687  * possible refetches, but it's hard to say how much. For now, use
3688  * cpu_tuple_cost per row.
3689  */
3690  pathnode->path.startup_cost = subpath->startup_cost;
3691  pathnode->path.total_cost = subpath->total_cost +
3692  cpu_tuple_cost * subpath->rows;
3693 
3694  return pathnode;
3695 }
Path * subpath
Definition: pathnodes.h:2349
List * rowMarks
Definition: pathnodes.h:2350

References cpu_tuple_cost, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath* create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1566 of file pathnode.c.

1567 {
1568  MaterialPath *pathnode = makeNode(MaterialPath);
1569 
1570  Assert(subpath->parent == rel);
1571 
1572  pathnode->path.pathtype = T_Material;
1573  pathnode->path.parent = rel;
1574  pathnode->path.pathtarget = rel->reltarget;
1575  pathnode->path.param_info = subpath->param_info;
1576  pathnode->path.parallel_aware = false;
1577  pathnode->path.parallel_safe = rel->consider_parallel &&
1578  subpath->parallel_safe;
1579  pathnode->path.parallel_workers = subpath->parallel_workers;
1580  pathnode->path.pathkeys = subpath->pathkeys;
1581 
1582  pathnode->subpath = subpath;
1583 
1584  cost_material(&pathnode->path,
1585  subpath->startup_cost,
1586  subpath->total_cost,
1587  subpath->rows,
1588  subpath->pathtarget->width);
1589 
1590  return pathnode;
1591 }
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2453
Path * subpath
Definition: pathnodes.h:1971

References Assert, RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath* create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
double  calls 
)

Definition at line 1598 of file pathnode.c.

1601 {
1602  MemoizePath *pathnode = makeNode(MemoizePath);
1603 
1604  Assert(subpath->parent == rel);
1605 
1606  pathnode->path.pathtype = T_Memoize;
1607  pathnode->path.parent = rel;
1608  pathnode->path.pathtarget = rel->reltarget;
1609  pathnode->path.param_info = subpath->param_info;
1610  pathnode->path.parallel_aware = false;
1611  pathnode->path.parallel_safe = rel->consider_parallel &&
1612  subpath->parallel_safe;
1613  pathnode->path.parallel_workers = subpath->parallel_workers;
1614  pathnode->path.pathkeys = subpath->pathkeys;
1615 
1616  pathnode->subpath = subpath;
1617  pathnode->hash_operators = hash_operators;
1618  pathnode->param_exprs = param_exprs;
1619  pathnode->singlerow = singlerow;
1620  pathnode->binary_mode = binary_mode;
1621  pathnode->calls = calls;
1622 
1623  /*
1624  * For now we set est_entries to 0. cost_memoize_rescan() does all the
1625  * hard work to determine how many cache entries there are likely to be,
1626  * so it seems best to leave it up to that function to fill this field in.
1627  * If left at 0, the executor will make a guess at a good value.
1628  */
1629  pathnode->est_entries = 0;
1630 
1631  /*
1632  * Add a small additional charge for caching the first entry. All the
1633  * harder calculations for rescans are performed in cost_memoize_rescan().
1634  */
1635  pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1636  pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1637  pathnode->path.rows = subpath->rows;
1638 
1639  return pathnode;
1640 }
bool singlerow
Definition: pathnodes.h:1985
List * hash_operators
Definition: pathnodes.h:1983
uint32 est_entries
Definition: pathnodes.h:1990
bool binary_mode
Definition: pathnodes.h:1987
Cardinality calls
Definition: pathnodes.h:1989
Path * subpath
Definition: pathnodes.h:1982
List * param_exprs
Definition: pathnodes.h:1984

References Assert, MemoizePath::binary_mode, MemoizePath::calls, RelOptInfo::consider_parallel, cpu_tuple_cost, MemoizePath::est_entries, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath* create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1415 of file pathnode.c.

1420 {
1422  Cost input_startup_cost;
1423  Cost input_total_cost;
1424  ListCell *l;
1425 
1426  pathnode->path.pathtype = T_MergeAppend;
1427  pathnode->path.parent = rel;
1428  pathnode->path.pathtarget = rel->reltarget;
1429  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1430  required_outer);
1431  pathnode->path.parallel_aware = false;
1432  pathnode->path.parallel_safe = rel->consider_parallel;
1433  pathnode->path.parallel_workers = 0;
1434  pathnode->path.pathkeys = pathkeys;
1435  pathnode->subpaths = subpaths;
1436 
1437  /*
1438  * Apply query-wide LIMIT if known and path is for sole base relation.
1439  * (Handling this at this low level is a bit klugy.)
1440  */
1441  if (bms_equal(rel->relids, root->all_query_rels))
1442  pathnode->limit_tuples = root->limit_tuples;
1443  else
1444  pathnode->limit_tuples = -1.0;
1445 
1446  /*
1447  * Add up the sizes and costs of the input paths.
1448  */
1449  pathnode->path.rows = 0;
1450  input_startup_cost = 0;
1451  input_total_cost = 0;
1452  foreach(l, subpaths)
1453  {
1454  Path *subpath = (Path *) lfirst(l);
1455 
1456  pathnode->path.rows += subpath->rows;
1457  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1458  subpath->parallel_safe;
1459 
1460  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1461  {
1462  /* Subpath is adequately ordered, we won't need to sort it */
1463  input_startup_cost += subpath->startup_cost;
1464  input_total_cost += subpath->total_cost;
1465  }
1466  else
1467  {
1468  /* We'll need to insert a Sort node, so include cost for that */
1469  Path sort_path; /* dummy for result of cost_sort */
1470 
1471  cost_sort(&sort_path,
1472  root,
1473  pathkeys,
1474  subpath->total_cost,
1475  subpath->rows,
1476  subpath->pathtarget->width,
1477  0.0,
1478  work_mem,
1479  pathnode->limit_tuples);
1480  input_startup_cost += sort_path.startup_cost;
1481  input_total_cost += sort_path.total_cost;
1482  }
1483 
1484  /* All child paths must have same parameterization */
1485  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1486  }
1487 
1488  /*
1489  * Now we can compute total costs of the MergeAppend. If there's exactly
1490  * one child path and its parallel awareness matches that of the
1491  * MergeAppend, then the MergeAppend is a no-op and will be discarded
1492  * later (in setrefs.c); otherwise we do the normal cost calculation.
1493  */
1494  if (list_length(subpaths) == 1 &&
1495  ((Path *) linitial(subpaths))->parallel_aware ==
1496  pathnode->path.parallel_aware)
1497  {
1498  pathnode->path.startup_cost = input_startup_cost;
1499  pathnode->path.total_cost = input_total_cost;
1500  }
1501  else
1502  cost_merge_append(&pathnode->path, root,
1503  pathkeys, list_length(subpaths),
1504  input_startup_cost, input_total_cost,
1505  pathnode->path.rows);
1506 
1507  return pathnode;
1508 }
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2404
Cardinality limit_tuples
Definition: pathnodes.h:1946

References Assert, bms_equal(), RelOptInfo::consider_parallel, cost_merge_append(), cost_sort(), get_appendrel_parampathinfo(), lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath* create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys 
)

Definition at line 2553 of file pathnode.c.

2566 {
2567  MergePath *pathnode = makeNode(MergePath);
2568 
2569  pathnode->jpath.path.pathtype = T_MergeJoin;
2570  pathnode->jpath.path.parent = joinrel;
2571  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2572  pathnode->jpath.path.param_info =
2574  joinrel,
2575  outer_path,
2576  inner_path,
2577  extra->sjinfo,
2578  required_outer,
2579  &restrict_clauses);
2580  pathnode->jpath.path.parallel_aware = false;
2581  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2582  outer_path->parallel_safe && inner_path->parallel_safe;
2583  /* This is a foolish way to estimate parallel_workers, but for now... */
2584  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2585  pathnode->jpath.path.pathkeys = pathkeys;
2586  pathnode->jpath.jointype = jointype;
2587  pathnode->jpath.inner_unique = extra->inner_unique;
2588  pathnode->jpath.outerjoinpath = outer_path;
2589  pathnode->jpath.innerjoinpath = inner_path;
2590  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2591  pathnode->path_mergeclauses = mergeclauses;
2592  pathnode->outersortkeys = outersortkeys;
2593  pathnode->innersortkeys = innersortkeys;
2594  /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2595  /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2596 
2597  final_cost_mergejoin(root, pathnode, workspace, extra);
2598 
2599  return pathnode;
2600 }
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3745
List * outersortkeys
Definition: pathnodes.h:2123
List * innersortkeys
Definition: pathnodes.h:2124
JoinPath jpath
Definition: pathnodes.h:2121
List * path_mergeclauses
Definition: pathnodes.h:2122

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath* create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3397 of file pathnode.c.

3402 {
3403  MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3404  Cost initplan_cost;
3405  ListCell *lc;
3406 
3407  /* The topmost generated Plan node will be a Result */
3408  pathnode->path.pathtype = T_Result;
3409  pathnode->path.parent = rel;
3410  pathnode->path.pathtarget = target;
3411  /* For now, assume we are above any joins, so no parameterization */
3412  pathnode->path.param_info = NULL;
3413  pathnode->path.parallel_aware = false;
3414  pathnode->path.parallel_safe = true; /* might change below */
3415  pathnode->path.parallel_workers = 0;
3416  /* Result is one unordered row */
3417  pathnode->path.rows = 1;
3418  pathnode->path.pathkeys = NIL;
3419 
3420  pathnode->mmaggregates = mmaggregates;
3421  pathnode->quals = quals;
3422 
3423  /* Calculate cost of all the initplans, and check parallel safety */
3424  initplan_cost = 0;
3425  foreach(lc, mmaggregates)
3426  {
3427  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3428 
3429  initplan_cost += mminfo->pathcost;
3430  if (!mminfo->path->parallel_safe)
3431  pathnode->path.parallel_safe = false;
3432  }
3433 
3434  /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3435  pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3436  pathnode->path.total_cost = initplan_cost + target->cost.startup +
3437  target->cost.per_tuple + cpu_tuple_cost;
3438 
3439  /*
3440  * Add cost of qual, if any --- but we ignore its selectivity, since our
3441  * rowcount estimate should be 1 no matter what the qual is.
3442  */
3443  if (quals)
3444  {
3445  QualCost qual_cost;
3446 
3447  cost_qual_eval(&qual_cost, quals, root);
3448  pathnode->path.startup_cost += qual_cost.startup;
3449  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3450  }
3451 
3452  /*
3453  * If the initplans were all parallel-safe, also check safety of the
3454  * target and quals. (The Result node itself isn't parallelizable, but if
3455  * we are in a subquery then it can be useful for the outer query to know
3456  * that this one is parallel-safe.)
3457  */
3458  if (pathnode->path.parallel_safe)
3459  pathnode->path.parallel_safe =
3460  is_parallel_safe(root, (Node *) target->exprs) &&
3461  is_parallel_safe(root, (Node *) quals);
3462 
3463  return pathnode;
3464 }
List * quals
Definition: pathnodes.h:2299
List * mmaggregates
Definition: pathnodes.h:2298

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath* create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
bool  partColsUpdated,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3722 of file pathnode.c.

3733 {
3735 
3736  Assert(operation == CMD_MERGE ||
3737  (operation == CMD_UPDATE ?
3738  list_length(resultRelations) == list_length(updateColnosLists) :
3739  updateColnosLists == NIL));
3740  Assert(withCheckOptionLists == NIL ||
3741  list_length(resultRelations) == list_length(withCheckOptionLists));
3742  Assert(returningLists == NIL ||
3743  list_length(resultRelations) == list_length(returningLists));
3744 
3745  pathnode->path.pathtype = T_ModifyTable;
3746  pathnode->path.parent = rel;
3747  /* pathtarget is not interesting, just make it minimally valid */
3748  pathnode->path.pathtarget = rel->reltarget;
3749  /* For now, assume we are above any joins, so no parameterization */
3750  pathnode->path.param_info = NULL;
3751  pathnode->path.parallel_aware = false;
3752  pathnode->path.parallel_safe = false;
3753  pathnode->path.parallel_workers = 0;
3754  pathnode->path.pathkeys = NIL;
3755 
3756  /*
3757  * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3758  *
3759  * Currently, we don't charge anything extra for the actual table
3760  * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3761  * expressions if any. It would only be window dressing, since
3762  * ModifyTable is always a top-level node and there is no way for the
3763  * costs to change any higher-level planning choices. But we might want
3764  * to make it look better sometime.
3765  */
3766  pathnode->path.startup_cost = subpath->startup_cost;
3767  pathnode->path.total_cost = subpath->total_cost;
3768  if (returningLists != NIL)
3769  {
3770  pathnode->path.rows = subpath->rows;
3771 
3772  /*
3773  * Set width to match the subpath output. XXX this is totally wrong:
3774  * we should return an average of the RETURNING tlist widths. But
3775  * it's what happened historically, and improving it is a task for
3776  * another day. (Again, it's mostly window dressing.)
3777  */
3778  pathnode->path.pathtarget->width = subpath->pathtarget->width;
3779  }
3780  else
3781  {
3782  pathnode->path.rows = 0;
3783  pathnode->path.pathtarget->width = 0;
3784  }
3785 
3786  pathnode->subpath = subpath;
3787  pathnode->operation = operation;
3788  pathnode->canSetTag = canSetTag;
3789  pathnode->nominalRelation = nominalRelation;
3790  pathnode->rootRelation = rootRelation;
3791  pathnode->partColsUpdated = partColsUpdated;
3792  pathnode->resultRelations = resultRelations;
3793  pathnode->updateColnosLists = updateColnosLists;
3794  pathnode->withCheckOptionLists = withCheckOptionLists;
3795  pathnode->returningLists = returningLists;
3796  pathnode->rowMarks = rowMarks;
3797  pathnode->onconflict = onconflict;
3798  pathnode->epqParam = epqParam;
3799  pathnode->mergeActionLists = mergeActionLists;
3800  pathnode->mergeJoinConditions = mergeJoinConditions;
3801 
3802  return pathnode;
3803 }
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
bool partColsUpdated
Definition: pathnodes.h:2369
List * returningLists
Definition: pathnodes.h:2373
List * resultRelations
Definition: pathnodes.h:2370
List * withCheckOptionLists
Definition: pathnodes.h:2372
List * mergeJoinConditions
Definition: pathnodes.h:2379
List * updateColnosLists
Definition: pathnodes.h:2371
OnConflictExpr * onconflict
Definition: pathnodes.h:2375
CmdType operation
Definition: pathnodes.h:2365
Index rootRelation
Definition: pathnodes.h:2368
Index nominalRelation
Definition: pathnodes.h:2367
List * mergeActionLists
Definition: pathnodes.h:2377

References Assert, ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::partColsUpdated, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path* create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2150 of file pathnode.c.

2152 {
2153  Path *pathnode = makeNode(Path);
2154 
2155  pathnode->pathtype = T_NamedTuplestoreScan;
2156  pathnode->parent = rel;
2157  pathnode->pathtarget = rel->reltarget;
2158  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2159  required_outer);
2160  pathnode->parallel_aware = false;
2161  pathnode->parallel_safe = rel->consider_parallel;
2162  pathnode->parallel_workers = 0;
2163  pathnode->pathkeys = NIL; /* result is always unordered */
2164 
2165  cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2166 
2167  return pathnode;
2168 }
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1739

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath* create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2457 of file pathnode.c.

2467 {
2468  NestPath *pathnode = makeNode(NestPath);
2469  Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2470  Relids outerrelids;
2471 
2472  /*
2473  * Paths are parameterized by top-level parents, so run parameterization
2474  * tests on the parent relids.
2475  */
2476  if (outer_path->parent->top_parent_relids)
2477  outerrelids = outer_path->parent->top_parent_relids;
2478  else
2479  outerrelids = outer_path->parent->relids;
2480 
2481  /*
2482  * If the inner path is parameterized by the outer, we must drop any
2483  * restrict_clauses that are due to be moved into the inner path. We have
2484  * to do this now, rather than postpone the work till createplan time,
2485  * because the restrict_clauses list can affect the size and cost
2486  * estimates for this path. We detect such clauses by checking for serial
2487  * number match to clauses already enforced in the inner path.
2488  */
2489  if (bms_overlap(inner_req_outer, outerrelids))
2490  {
2491  Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2492  List *jclauses = NIL;
2493  ListCell *lc;
2494 
2495  foreach(lc, restrict_clauses)
2496  {
2497  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2498 
2499  if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2500  jclauses = lappend(jclauses, rinfo);
2501  }
2502  restrict_clauses = jclauses;
2503  }
2504 
2505  pathnode->jpath.path.pathtype = T_NestLoop;
2506  pathnode->jpath.path.parent = joinrel;
2507  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2508  pathnode->jpath.path.param_info =
2510  joinrel,
2511  outer_path,
2512  inner_path,
2513  extra->sjinfo,
2514  required_outer,
2515  &restrict_clauses);
2516  pathnode->jpath.path.parallel_aware = false;
2517  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2518  outer_path->parallel_safe && inner_path->parallel_safe;
2519  /* This is a foolish way to estimate parallel_workers, but for now... */
2520  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2521  pathnode->jpath.path.pathkeys = pathkeys;
2522  pathnode->jpath.jointype = jointype;
2523  pathnode->jpath.inner_unique = extra->inner_unique;
2524  pathnode->jpath.outerjoinpath = outer_path;
2525  pathnode->jpath.innerjoinpath = inner_path;
2526  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2527 
2528  final_cost_nestloop(root, pathnode, workspace, extra);
2529 
2530  return pathnode;
2531 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3308
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:1922
JoinPath jpath
Definition: pathnodes.h:2081
int rinfo_serial
Definition: pathnodes.h:2624

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath* create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2685 of file pathnode.c.

2689 {
2690  ProjectionPath *pathnode = makeNode(ProjectionPath);
2691  PathTarget *oldtarget;
2692 
2693  /*
2694  * We mustn't put a ProjectionPath directly above another; it's useless
2695  * and will confuse create_projection_plan. Rather than making sure all
2696  * callers handle that, let's implement it here, by stripping off any
2697  * ProjectionPath in what we're given. Given this rule, there won't be
2698  * more than one.
2699  */
2700  if (IsA(subpath, ProjectionPath))
2701  {
2702  ProjectionPath *subpp = (ProjectionPath *) subpath;
2703 
2704  Assert(subpp->path.parent == rel);
2705  subpath = subpp->subpath;
2707  }
2708 
2709  pathnode->path.pathtype = T_Result;
2710  pathnode->path.parent = rel;
2711  pathnode->path.pathtarget = target;
2712  /* For now, assume we are above any joins, so no parameterization */
2713  pathnode->path.param_info = NULL;
2714  pathnode->path.parallel_aware = false;
2715  pathnode->path.parallel_safe = rel->consider_parallel &&
2716  subpath->parallel_safe &&
2717  is_parallel_safe(root, (Node *) target->exprs);
2718  pathnode->path.parallel_workers = subpath->parallel_workers;
2719  /* Projection does not change the sort order */
2720  pathnode->path.pathkeys = subpath->pathkeys;
2721 
2722  pathnode->subpath = subpath;
2723 
2724  /*
2725  * We might not need a separate Result node. If the input plan node type
2726  * can project, we can just tell it to project something else. Or, if it
2727  * can't project but the desired target has the same expression list as
2728  * what the input will produce anyway, we can still give it the desired
2729  * tlist (possibly changing its ressortgroupref labels, but nothing else).
2730  * Note: in the latter case, create_projection_plan has to recheck our
2731  * conclusion; see comments therein.
2732  */
2733  oldtarget = subpath->pathtarget;
2735  equal(oldtarget->exprs, target->exprs))
2736  {
2737  /* No separate Result node needed */
2738  pathnode->dummypp = true;
2739 
2740  /*
2741  * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2742  */
2743  pathnode->path.rows = subpath->rows;
2744  pathnode->path.startup_cost = subpath->startup_cost +
2745  (target->cost.startup - oldtarget->cost.startup);
2746  pathnode->path.total_cost = subpath->total_cost +
2747  (target->cost.startup - oldtarget->cost.startup) +
2748  (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2749  }
2750  else
2751  {
2752  /* We really do need the Result node */
2753  pathnode->dummypp = false;
2754 
2755  /*
2756  * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2757  * evaluating the tlist. There is no qual to worry about.
2758  */
2759  pathnode->path.rows = subpath->rows;
2760  pathnode->path.startup_cost = subpath->startup_cost +
2761  target->cost.startup;
2762  pathnode->path.total_cost = subpath->total_cost +
2763  target->cost.startup +
2764  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2765  }
2766 
2767  return pathnode;
2768 }
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2163

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath* create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3614 of file pathnode.c.

3622 {
3624 
3625  pathnode->path.pathtype = T_RecursiveUnion;
3626  pathnode->path.parent = rel;
3627  pathnode->path.pathtarget = target;
3628  /* For now, assume we are above any joins, so no parameterization */
3629  pathnode->path.param_info = NULL;
3630  pathnode->path.parallel_aware = false;
3631  pathnode->path.parallel_safe = rel->consider_parallel &&
3632  leftpath->parallel_safe && rightpath->parallel_safe;
3633  /* Foolish, but we'll do it like joins for now: */
3634  pathnode->path.parallel_workers = leftpath->parallel_workers;
3635  /* RecursiveUnion result is always unsorted */
3636  pathnode->path.pathkeys = NIL;
3637 
3638  pathnode->leftpath = leftpath;
3639  pathnode->rightpath = rightpath;
3640  pathnode->distinctList = distinctList;
3641  pathnode->wtParam = wtParam;
3642  pathnode->numGroups = numGroups;
3643 
3644  cost_recursive_union(&pathnode->path, leftpath, rightpath);
3645 
3646  return pathnode;
3647 }
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1813
Cardinality numGroups
Definition: pathnodes.h:2340

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_resultscan_path()

Path* create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2176 of file pathnode.c.

2178 {
2179  Path *pathnode = makeNode(Path);
2180 
2181  pathnode->pathtype = T_Result;
2182  pathnode->parent = rel;
2183  pathnode->pathtarget = rel->reltarget;
2184  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2185  required_outer);
2186  pathnode->parallel_aware = false;
2187  pathnode->parallel_safe = rel->consider_parallel;
2188  pathnode->parallel_workers = 0;
2189  pathnode->pathkeys = NIL; /* result is always unordered */
2190 
2191  cost_resultscan(pathnode, root, rel, pathnode->param_info);
2192 
2193  return pathnode;
2194 }
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1776

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path* create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 952 of file pathnode.c.

953 {
954  Path *pathnode = makeNode(Path);
955 
956  pathnode->pathtype = T_SampleScan;
957  pathnode->parent = rel;
958  pathnode->pathtarget = rel->reltarget;
959  pathnode->param_info = get_baserel_parampathinfo(root, rel,
960  required_outer);
961  pathnode->parallel_aware = false;
962  pathnode->parallel_safe = rel->consider_parallel;
963  pathnode->parallel_workers = 0;
964  pathnode->pathkeys = NIL; /* samplescan has unordered result */
965 
966  cost_samplescan(pathnode, root, rel, pathnode->param_info);
967 
968  return pathnode;
969 }
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:361

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path* create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 927 of file pathnode.c.

929 {
930  Path *pathnode = makeNode(Path);
931 
932  pathnode->pathtype = T_SeqScan;
933  pathnode->parent = rel;
934  pathnode->pathtarget = rel->reltarget;
935  pathnode->param_info = get_baserel_parampathinfo(root, rel,
936  required_outer);
937  pathnode->parallel_aware = (parallel_workers > 0);
938  pathnode->parallel_safe = rel->consider_parallel;
939  pathnode->parallel_workers = parallel_workers;
940  pathnode->pathkeys = NIL; /* seqscan has unordered result */
941 
942  cost_seqscan(pathnode, root, rel, pathnode->param_info);
943 
944  return pathnode;
945 }
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:284

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath* create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2882 of file pathnode.c.

2886 {
2887  ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2888  double tlist_rows;
2889  ListCell *lc;
2890 
2891  pathnode->path.pathtype = T_ProjectSet;
2892  pathnode->path.parent = rel;
2893  pathnode->path.pathtarget = target;
2894  /* For now, assume we are above any joins, so no parameterization */
2895  pathnode->path.param_info = NULL;
2896  pathnode->path.parallel_aware = false;
2897  pathnode->path.parallel_safe = rel->consider_parallel &&
2898  subpath->parallel_safe &&
2899  is_parallel_safe(root, (Node *) target->exprs);
2900  pathnode->path.parallel_workers = subpath->parallel_workers;
2901  /* Projection does not change the sort order XXX? */
2902  pathnode->path.pathkeys = subpath->pathkeys;
2903 
2904  pathnode->subpath = subpath;
2905 
2906  /*
2907  * Estimate number of rows produced by SRFs for each row of input; if
2908  * there's more than one in this node, use the maximum.
2909  */
2910  tlist_rows = 1;
2911  foreach(lc, target->exprs)
2912  {
2913  Node *node = (Node *) lfirst(lc);
2914  double itemrows;
2915 
2916  itemrows = expression_returns_set_rows(root, node);
2917  if (tlist_rows < itemrows)
2918  tlist_rows = itemrows;
2919  }
2920 
2921  /*
2922  * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2923  * per input row, and half of cpu_tuple_cost for each added output row.
2924  * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2925  * this estimate later.
2926  */
2927  pathnode->path.rows = subpath->rows * tlist_rows;
2928  pathnode->path.startup_cost = subpath->startup_cost +
2929  target->cost.startup;
2930  pathnode->path.total_cost = subpath->total_cost +
2931  target->cost.startup +
2932  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2933  (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2934 
2935  return pathnode;
2936 }
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:289
Path * subpath
Definition: pathnodes.h:2175

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath* create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List distinctList,
AttrNumber  flagColIdx,
int  firstFlag,
double  numGroups,
double  outputRows 
)

Definition at line 3552 of file pathnode.c.

3562 {
3563  SetOpPath *pathnode = makeNode(SetOpPath);
3564 
3565  pathnode->path.pathtype = T_SetOp;
3566  pathnode->path.parent = rel;
3567  /* SetOp doesn't project, so use source path's pathtarget */
3568  pathnode->path.pathtarget = subpath->pathtarget;
3569  /* For now, assume we are above any joins, so no parameterization */
3570  pathnode->path.param_info = NULL;
3571  pathnode->path.parallel_aware = false;
3572  pathnode->path.parallel_safe = rel->consider_parallel &&
3573  subpath->parallel_safe;
3574  pathnode->path.parallel_workers = subpath->parallel_workers;
3575  /* SetOp preserves the input sort order if in sort mode */
3576  pathnode->path.pathkeys =
3577  (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3578 
3579  pathnode->subpath = subpath;
3580  pathnode->cmd = cmd;
3581  pathnode->strategy = strategy;
3582  pathnode->distinctList = distinctList;
3583  pathnode->flagColIdx = flagColIdx;
3584  pathnode->firstFlag = firstFlag;
3585  pathnode->numGroups = numGroups;
3586 
3587  /*
3588  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3589  * all columns get compared at most of the tuples.
3590  */
3591  pathnode->path.startup_cost = subpath->startup_cost;
3592  pathnode->path.total_cost = subpath->total_cost +
3593  cpu_operator_cost * subpath->rows * list_length(distinctList);
3594  pathnode->path.rows = outputRows;
3595 
3596  return pathnode;
3597 }
double cpu_operator_cost
Definition: costsize.c:123
@ SETOP_SORTED
Definition: nodes.h:405
List * distinctList
Definition: pathnodes.h:2324
Cardinality numGroups
Definition: pathnodes.h:2327
int firstFlag
Definition: pathnodes.h:2326
Path * subpath
Definition: pathnodes.h:2321
SetOpCmd cmd
Definition: pathnodes.h:2322
Path path
Definition: pathnodes.h:2320
SetOpStrategy strategy
Definition: pathnodes.h:2323
AttrNumber flagColIdx
Definition: pathnodes.h:2325

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, SetOpPath::distinctList, SetOpPath::firstFlag, SetOpPath::flagColIdx, list_length(), makeNode, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, Path::rows, SETOP_SORTED, Path::startup_cost, SetOpPath::strategy, subpath(), SetOpPath::subpath, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath* create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 3000 of file pathnode.c.

3005 {
3006  SortPath *pathnode = makeNode(SortPath);
3007 
3008  pathnode->path.pathtype = T_Sort;
3009  pathnode->path.parent = rel;
3010  /* Sort doesn't project, so use source path's pathtarget */
3011  pathnode->path.pathtarget = subpath->pathtarget;
3012  /* For now, assume we are above any joins, so no parameterization */
3013  pathnode->path.param_info = NULL;
3014  pathnode->path.parallel_aware = false;
3015  pathnode->path.parallel_safe = rel->consider_parallel &&
3016  subpath->parallel_safe;
3017  pathnode->path.parallel_workers = subpath->parallel_workers;
3018  pathnode->path.pathkeys = pathkeys;
3019 
3020  pathnode->subpath = subpath;
3021 
3022  cost_sort(&pathnode->path, root, pathkeys,
3023  subpath->total_cost,
3024  subpath->rows,
3025  subpath->pathtarget->width,
3026  0.0, /* XXX comparison_cost shouldn't be 0? */
3027  work_mem, limit_tuples);
3028 
3029  return pathnode;
3030 }

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_final_distinct_paths(), create_one_window_path(), create_ordered_paths(), create_partial_distinct_paths(), gather_grouping_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath* create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 2016 of file pathnode.c.

2019 {
2021 
2022  pathnode->path.pathtype = T_SubqueryScan;
2023  pathnode->path.parent = rel;
2024  pathnode->path.pathtarget = rel->reltarget;
2025  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2026  required_outer);
2027  pathnode->path.parallel_aware = false;
2028  pathnode->path.parallel_safe = rel->consider_parallel &&
2029  subpath->parallel_safe;
2030  pathnode->path.parallel_workers = subpath->parallel_workers;
2031  pathnode->path.pathkeys = pathkeys;
2032  pathnode->subpath = subpath;
2033 
2034  cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2035  trivial_pathtarget);
2036 
2037  return pathnode;
2038 }
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1451

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path* create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2072 of file pathnode.c.

2074 {
2075  Path *pathnode = makeNode(Path);
2076 
2077  pathnode->pathtype = T_TableFuncScan;
2078  pathnode->parent = rel;
2079  pathnode->pathtarget = rel->reltarget;
2080  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2081  required_outer);
2082  pathnode->parallel_aware = false;
2083  pathnode->parallel_safe = rel->consider_parallel;
2084  pathnode->parallel_workers = 0;
2085  pathnode->pathkeys = NIL; /* result is always unordered */
2086 
2087  cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2088 
2089  return pathnode;
2090 }
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1592

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath* create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer 
)

Definition at line 1208 of file pathnode.c.

1210 {
1211  TidRangePath *pathnode = makeNode(TidRangePath);
1212 
1213  pathnode->path.pathtype = T_TidRangeScan;
1214  pathnode->path.parent = rel;
1215  pathnode->path.pathtarget = rel->reltarget;
1216  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1217  required_outer);
1218  pathnode->path.parallel_aware = false;
1219  pathnode->path.parallel_safe = rel->consider_parallel;
1220  pathnode->path.parallel_workers = 0;
1221  pathnode->path.pathkeys = NIL; /* always unordered */
1222 
1223  pathnode->tidrangequals = tidrangequals;
1224 
1225  cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1226  pathnode->path.param_info);
1227 
1228  return pathnode;
1229 }
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1357
List * tidrangequals
Definition: pathnodes.h:1825

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath* create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1179 of file pathnode.c.

1181 {
1182  TidPath *pathnode = makeNode(TidPath);
1183 
1184  pathnode->path.pathtype = T_TidScan;
1185  pathnode->path.parent = rel;
1186  pathnode->path.pathtarget = rel->reltarget;
1187  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1188  required_outer);
1189  pathnode->path.parallel_aware = false;
1190  pathnode->path.parallel_safe = rel->consider_parallel;
1191  pathnode->path.parallel_workers = 0;
1192  pathnode->path.pathkeys = NIL; /* always unordered */
1193 
1194  pathnode->tidquals = tidquals;
1195 
1196  cost_tidscan(&pathnode->path, root, rel, tidquals,
1197  pathnode->path.param_info);
1198 
1199  return pathnode;
1200 }
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1249
List * tidquals
Definition: pathnodes.h:1813
Path path
Definition: pathnodes.h:1812

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath* create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SpecialJoinInfo sjinfo 
)

Definition at line 1654 of file pathnode.c.

1656 {
1657  UniquePath *pathnode;
1658  Path sort_path; /* dummy for result of cost_sort */
1659  Path agg_path; /* dummy for result of cost_agg */
1660  MemoryContext oldcontext;
1661  int numCols;
1662 
1663  /* Caller made a mistake if subpath isn't cheapest_total ... */
1665  Assert(subpath->parent == rel);
1666  /* ... or if SpecialJoinInfo is the wrong one */
1667  Assert(sjinfo->jointype == JOIN_SEMI);
1668  Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1669 
1670  /* If result already cached, return it */
1671  if (rel->cheapest_unique_path)
1672  return (UniquePath *) rel->cheapest_unique_path;
1673 
1674  /* If it's not possible to unique-ify, return NULL */
1675  if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1676  return NULL;
1677 
1678  /*
1679  * When called during GEQO join planning, we are in a short-lived memory
1680  * context. We must make sure that the path and any subsidiary data
1681  * structures created for a baserel survive the GEQO cycle, else the
1682  * baserel is trashed for future GEQO cycles. On the other hand, when we
1683  * are creating those for a joinrel during GEQO, we don't want them to
1684  * clutter the main planning context. Upshot is that the best solution is
1685  * to explicitly allocate memory in the same context the given RelOptInfo
1686  * is in.
1687  */
1688  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1689 
1690  pathnode = makeNode(UniquePath);
1691 
1692  pathnode->path.pathtype = T_Unique;
1693  pathnode->path.parent = rel;
1694  pathnode->path.pathtarget = rel->reltarget;
1695  pathnode->path.param_info = subpath->param_info;
1696  pathnode->path.parallel_aware = false;
1697  pathnode->path.parallel_safe = rel->consider_parallel &&
1698  subpath->parallel_safe;
1699  pathnode->path.parallel_workers = subpath->parallel_workers;
1700 
1701  /*
1702  * Assume the output is unsorted, since we don't necessarily have pathkeys
1703  * to represent it. (This might get overridden below.)
1704  */
1705  pathnode->path.pathkeys = NIL;
1706 
1707  pathnode->subpath = subpath;
1708 
1709  /*
1710  * Under GEQO and when planning child joins, the sjinfo might be
1711  * short-lived, so we'd better make copies of data structures we extract
1712  * from it.
1713  */
1714  pathnode->in_operators = copyObject(sjinfo->semi_operators);
1715  pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs);
1716 
1717  /*
1718  * If the input is a relation and it has a unique index that proves the
1719  * semi_rhs_exprs are unique, then we don't need to do anything. Note
1720  * that relation_has_unique_index_for automatically considers restriction
1721  * clauses for the rel, as well.
1722  */
1723  if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1725  sjinfo->semi_rhs_exprs,
1726  sjinfo->semi_operators))
1727  {
1728  pathnode->umethod = UNIQUE_PATH_NOOP;
1729  pathnode->path.rows = rel->rows;
1730  pathnode->path.startup_cost = subpath->startup_cost;
1731  pathnode->path.total_cost = subpath->total_cost;
1732  pathnode->path.pathkeys = subpath->pathkeys;
1733 
1734  rel->cheapest_unique_path = (Path *) pathnode;
1735 
1736  MemoryContextSwitchTo(oldcontext);
1737 
1738  return pathnode;
1739  }
1740 
1741  /*
1742  * If the input is a subquery whose output must be unique already, then we
1743  * don't need to do anything. The test for uniqueness has to consider
1744  * exactly which columns we are extracting; for example "SELECT DISTINCT
1745  * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1746  * this optimization unless semi_rhs_exprs consists only of simple Vars
1747  * referencing subquery outputs. (Possibly we could do something with
1748  * expressions in the subquery outputs, too, but for now keep it simple.)
1749  */
1750  if (rel->rtekind == RTE_SUBQUERY)
1751  {
1752  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1753 
1755  {
1756  List *sub_tlist_colnos;
1757 
1758  sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1759  rel->relid);
1760 
1761  if (sub_tlist_colnos &&
1763  sub_tlist_colnos,
1764  sjinfo->semi_operators))
1765  {
1766  pathnode->umethod = UNIQUE_PATH_NOOP;
1767  pathnode->path.rows = rel->rows;
1768  pathnode->path.startup_cost = subpath->startup_cost;
1769  pathnode->path.total_cost = subpath->total_cost;
1770  pathnode->path.pathkeys = subpath->pathkeys;
1771 
1772  rel->cheapest_unique_path = (Path *) pathnode;
1773 
1774  MemoryContextSwitchTo(oldcontext);
1775 
1776  return pathnode;
1777  }
1778  }
1779  }
1780 
1781  /* Estimate number of output rows */
1782  pathnode->path.rows = estimate_num_groups(root,
1783  sjinfo->semi_rhs_exprs,
1784  rel->rows,
1785  NULL,
1786  NULL);
1787  numCols = list_length(sjinfo->semi_rhs_exprs);
1788 
1789  if (sjinfo->semi_can_btree)
1790  {
1791  /*
1792  * Estimate cost for sort+unique implementation
1793  */
1794  cost_sort(&sort_path, root, NIL,
1795  subpath->total_cost,
1796  rel->rows,
1797  subpath->pathtarget->width,
1798  0.0,
1799  work_mem,
1800  -1.0);
1801 
1802  /*
1803  * Charge one cpu_operator_cost per comparison per input tuple. We
1804  * assume all columns get compared at most of the tuples. (XXX
1805  * probably this is an overestimate.) This should agree with
1806  * create_upper_unique_path.
1807  */
1808  sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1809  }
1810 
1811  if (sjinfo->semi_can_hash)
1812  {
1813  /*
1814  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1815  * planner.c).
1816  */
1817  int hashentrysize = subpath->pathtarget->width + 64;
1818 
1819  if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1820  {
1821  /*
1822  * We should not try to hash. Hack the SpecialJoinInfo to
1823  * remember this, in case we come through here again.
1824  */
1825  sjinfo->semi_can_hash = false;
1826  }
1827  else
1828  cost_agg(&agg_path, root,
1829  AGG_HASHED, NULL,
1830  numCols, pathnode->path.rows,
1831  NIL,
1832  subpath->startup_cost,
1833  subpath->total_cost,
1834  rel->rows,
1835  subpath->pathtarget->width);
1836  }
1837 
1838  if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1839  {
1840  if (agg_path.total_cost < sort_path.total_cost)
1841  pathnode->umethod = UNIQUE_PATH_HASH;
1842  else
1843  pathnode->umethod = UNIQUE_PATH_SORT;
1844  }
1845  else if (sjinfo->semi_can_btree)
1846  pathnode->umethod = UNIQUE_PATH_SORT;
1847  else if (sjinfo->semi_can_hash)
1848  pathnode->umethod = UNIQUE_PATH_HASH;
1849  else
1850  {
1851  /* we can get here only if we abandoned hashing above */
1852  MemoryContextSwitchTo(oldcontext);
1853  return NULL;
1854  }
1855 
1856  if (pathnode->umethod == UNIQUE_PATH_HASH)
1857  {
1858  pathnode->path.startup_cost = agg_path.startup_cost;
1859  pathnode->path.total_cost = agg_path.total_cost;
1860  }
1861  else
1862  {
1863  pathnode->path.startup_cost = sort_path.startup_cost;
1864  pathnode->path.total_cost = sort_path.total_cost;
1865  }
1866 
1867  rel->cheapest_unique_path = (Path *) pathnode;
1868 
1869  MemoryContextSwitchTo(oldcontext);
1870 
1871  return pathnode;
1872 }
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
bool query_supports_distinctness(Query *query)
Definition: analyzejoins.c:998
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:3440
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:707
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3595
#define copyObject(obj)
Definition: nodes.h:224
@ JOIN_SEMI
Definition: nodes.h:307
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:1946
@ UNIQUE_PATH_SORT
Definition: pathnodes.h:2011
@ UNIQUE_PATH_NOOP
Definition: pathnodes.h:2009
@ UNIQUE_PATH_HASH
Definition: pathnodes.h:2010
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:560
MemoryContextSwitchTo(old_ctx)
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3416
Query * subquery
Definition: parsenodes.h:1114
List * semi_rhs_exprs
Definition: pathnodes.h:2897
Relids syn_righthand
Definition: pathnodes.h:2885
List * semi_operators
Definition: pathnodes.h:2896
Path * subpath
Definition: pathnodes.h:2017
List * uniq_exprs
Definition: pathnodes.h:2020
UniquePathMethod umethod
Definition: pathnodes.h:2018
List * in_operators
Definition: pathnodes.h:2019

References AGG_HASHED, Assert, bms_equal(), RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, copyObject, cost_agg(), cost_sort(), cpu_operator_cost, estimate_num_groups(), get_hash_memory_limit(), GetMemoryChunkContext(), UniquePath::in_operators, JOIN_SEMI, SpecialJoinInfo::jointype, list_length(), makeNode, MemoryContextSwitchTo(), NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, planner_rt_fetch, query_is_distinct_for(), query_supports_distinctness(), relation_has_unique_index_for(), RelOptInfo::relid, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, Path::rows, RTE_RELATION, RTE_SUBQUERY, RelOptInfo::rtekind, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, Path::startup_cost, subpath(), UniquePath::subpath, RangeTblEntry::subquery, SpecialJoinInfo::syn_righthand, Path::total_cost, translate_sub_tlist(), UniquePath::umethod, UniquePath::uniq_exprs, UNIQUE_PATH_HASH, UNIQUE_PATH_NOOP, UNIQUE_PATH_SORT, and work_mem.

Referenced by consider_parallel_nestloop(), hash_inner_and_outer(), join_is_legal(), match_unsorted_outer(), populate_joinrel_with_paths(), and sort_inner_and_outer().

◆ create_upper_unique_path()

UpperUniquePath* create_upper_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 3103 of file pathnode.c.

3108 {
3110 
3111  pathnode->path.pathtype = T_Unique;
3112  pathnode->path.parent = rel;
3113  /* Unique doesn't project, so use source path's pathtarget */
3114  pathnode->path.pathtarget = subpath->pathtarget;
3115  /* For now, assume we are above any joins, so no parameterization */
3116  pathnode->path.param_info = NULL;
3117  pathnode->path.parallel_aware = false;
3118  pathnode->path.parallel_safe = rel->consider_parallel &&
3119  subpath->parallel_safe;
3120  pathnode->path.parallel_workers = subpath->parallel_workers;
3121  /* Unique doesn't change the input ordering */
3122  pathnode->path.pathkeys = subpath->pathkeys;
3123 
3124  pathnode->subpath = subpath;
3125  pathnode->numkeys = numCols;
3126 
3127  /*
3128  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3129  * all columns get compared at most of the tuples. (XXX probably this is
3130  * an overestimate.)
3131  */
3132  pathnode->path.startup_cost = subpath->startup_cost;
3133  pathnode->path.total_cost = subpath->total_cost +
3134  cpu_operator_cost * subpath->rows * numCols;
3135  pathnode->path.rows = numGroups;
3136 
3137  return pathnode;
3138 }

References RelOptInfo::consider_parallel, cpu_operator_cost, makeNode, UpperUniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UpperUniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UpperUniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path* create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2098 of file pathnode.c.

2100 {
2101  Path *pathnode = makeNode(Path);
2102 
2103  pathnode->pathtype = T_ValuesScan;
2104  pathnode->parent = rel;
2105  pathnode->pathtarget = rel->reltarget;
2106  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2107  required_outer);
2108  pathnode->parallel_aware = false;
2109  pathnode->parallel_safe = rel->consider_parallel;
2110  pathnode->parallel_workers = 0;
2111  pathnode->pathkeys = NIL; /* result is always unordered */
2112 
2113  cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2114 
2115  return pathnode;
2116 }
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1648

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath* create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3484 of file pathnode.c.

3492 {
3493  WindowAggPath *pathnode = makeNode(WindowAggPath);
3494 
3495  /* qual can only be set for the topwindow */
3496  Assert(qual == NIL || topwindow);
3497 
3498  pathnode->path.pathtype = T_WindowAgg;
3499  pathnode->path.parent = rel;
3500  pathnode->path.pathtarget = target;
3501  /* For now, assume we are above any joins, so no parameterization */
3502  pathnode->path.param_info = NULL;
3503  pathnode->path.parallel_aware = false;
3504  pathnode->path.parallel_safe = rel->consider_parallel &&
3505  subpath->parallel_safe;
3506  pathnode->path.parallel_workers = subpath->parallel_workers;
3507  /* WindowAgg preserves the input sort order */
3508  pathnode->path.pathkeys = subpath->pathkeys;
3509 
3510  pathnode->subpath = subpath;
3511  pathnode->winclause = winclause;
3512  pathnode->qual = qual;
3513  pathnode->topwindow = topwindow;
3514 
3515  /*
3516  * For costing purposes, assume that there are no redundant partitioning
3517  * or ordering columns; it's not worth the trouble to deal with that
3518  * corner case here. So we just pass the unmodified list lengths to
3519  * cost_windowagg.
3520  */
3521  cost_windowagg(&pathnode->path, root,
3522  windowFuncs,
3523  winclause,
3524  subpath->startup_cost,
3525  subpath->total_cost,
3526  subpath->rows);
3527 
3528  /* add tlist eval cost for each output row */
3529  pathnode->path.startup_cost += target->cost.startup;
3530  pathnode->path.total_cost += target->cost.startup +
3531  target->cost.per_tuple * pathnode->path.rows;
3532 
3533  return pathnode;
3534 }
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3068
Path * subpath
Definition: pathnodes.h:2308
WindowClause * winclause
Definition: pathnodes.h:2309

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path* create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2202 of file pathnode.c.

2204 {
2205  Path *pathnode = makeNode(Path);
2206 
2207  pathnode->pathtype = T_WorkTableScan;
2208  pathnode->parent = rel;
2209  pathnode->pathtarget = rel->reltarget;
2210  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2211  required_outer);
2212  pathnode->parallel_aware = false;
2213  pathnode->parallel_safe = rel->consider_parallel;
2214  pathnode->parallel_workers = 0;
2215  pathnode->pathkeys = NIL; /* result is always unordered */
2216 
2217  /* Cost is the same as for a regular CTE scan */
2218  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2219 
2220  return pathnode;
2221 }

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 163 of file relnode.c.

164 {
165  int new_size;
166 
167  Assert(add_size > 0);
168 
169  new_size = root->simple_rel_array_size + add_size;
170 
171  root->simple_rel_array =
172  repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
173 
174  root->simple_rte_array =
175  repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
176 
177  if (root->append_rel_array)
178  root->append_rel_array =
179  repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
180  else
181  root->append_rel_array =
182  palloc0_array(AppendRelInfo *, new_size);
183 
184  root->simple_rel_array_size = new_size;
185 }
#define palloc0_array(type, count)
Definition: fe_memutils.h:65
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:493

References add_size(), Assert, palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo* fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1470 of file relnode.c.

1471 {
1472  RelOptInfo *upperrel;
1473  ListCell *lc;
1474 
1475  /*
1476  * For the moment, our indexing data structure is just a List for each
1477  * relation kind. If we ever get so many of one kind that this stops
1478  * working well, we can improve it. No code outside this function should
1479  * assume anything about how to find a particular upperrel.
1480  */
1481 
1482  /* If we already made this upperrel for the query, return it */
1483  foreach(lc, root->upper_rels[kind])
1484  {
1485  upperrel = (RelOptInfo *) lfirst(lc);
1486 
1487  if (bms_equal(upperrel->relids, relids))
1488  return upperrel;
1489  }
1490 
1491  upperrel = makeNode(RelOptInfo);
1492  upperrel->reloptkind = RELOPT_UPPER_REL;
1493  upperrel->relids = bms_copy(relids);
1494 
1495  /* cheap startup cost is interesting iff not all tuples to be retrieved */
1496  upperrel->consider_startup = (root->tuple_fraction > 0);
1497  upperrel->consider_param_startup = false;
1498  upperrel->consider_parallel = false; /* might get changed later */
1499  upperrel->reltarget = create_empty_pathtarget();
1500  upperrel->pathlist = NIL;
1501  upperrel->cheapest_startup_path = NULL;
1502  upperrel->cheapest_total_path = NULL;
1503  upperrel->cheapest_unique_path = NULL;
1504  upperrel->cheapest_parameterized_paths = NIL;
1505 
1506  root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1507 
1508  return upperrel;
1509 }
@ RELOPT_UPPER_REL
Definition: pathnodes.h:821

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo* find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 454 of file relnode.c.

455 {
456  /* use an unsigned comparison to prevent negative array element access */
457  if ((uint32) relid < (uint32) root->simple_rel_array_size)
458  {
459  RelOptInfo *rel;
460  RangeTblEntry *rte;
461 
462  rel = root->simple_rel_array[relid];
463  if (rel)
464  return rel;
465 
466  /*
467  * We could just return NULL here, but for debugging purposes it seems
468  * best to actually verify that the relid is an outer join and not
469  * something weird.
470  */
471  rte = root->simple_rte_array[relid];
472  if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
473  return NULL;
474  }
475 
476  elog(ERROR, "no relation entry for relid %d", relid);
477 
478  return NULL; /* keep compiler quiet */
479 }
JoinType jointype
Definition: parsenodes.h:1161

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo* find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 436 of file relnode.c.

437 {
438  /* use an unsigned comparison to prevent negative array element access */
439  if ((uint32) relid < (uint32) root->simple_rel_array_size)
440  return root->simple_rel_array[relid];
441  return NULL;
442 }

References root.

Referenced by examine_simple_variable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1521 of file relnode.c.

1522 {
1523  Relids result = NULL;
1524 
1526  Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1527 
1528  do
1529  {
1530  AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1531  Index prelid = appinfo->parent_relid;
1532 
1533  result = bms_add_member(result, prelid);
1534 
1535  /* traverse up to the parent rel, loop if it's also a child rel */
1536  rel = find_base_rel(root, prelid);
1537  } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1538 
1539  Assert(rel->reloptkind == RELOPT_BASEREL);
1540 
1541  return result;
1542 }
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
unsigned int Index
Definition: c.h:614
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Index parent_relid
Definition: pathnodes.h:2957

References Assert, bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo* find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 527 of file relnode.c.

528 {
529  /*
530  * Switch to using hash lookup when list grows "too long". The threshold
531  * is arbitrary and is known only here.
532  */
533  if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
535 
536  /*
537  * Use either hashtable lookup or linear search, as appropriate.
538  *
539  * Note: the seemingly redundant hashkey variable is used to avoid taking
540  * the address of relids; unless the compiler is exceedingly smart, doing
541  * so would force relids out of a register and thus probably slow down the
542  * list-search case.
543  */
544  if (root->join_rel_hash)
545  {
546  Relids hashkey = relids;
547  JoinHashEntry *hentry;
548 
549  hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
550  &hashkey,
551  HASH_FIND,
552  NULL);
553  if (hentry)
554  return hentry->join_rel;
555  }
556  else
557  {
558  ListCell *l;
559 
560  foreach(l, root->join_rel_list)
561  {
562  RelOptInfo *rel = (RelOptInfo *) lfirst(l);
563 
564  if (bms_equal(rel->relids, relids))
565  return rel;
566  }
567  }
568 
569  return NULL;
570 }
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:486
RelOptInfo * join_rel
Definition: relnode.c:41

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo* find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1901 of file relnode.c.

1902 {
1903  ListCell *lc;
1904 
1905  foreach(lc, rel->ppilist)
1906  {
1907  ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
1908 
1909  if (bms_equal(ppi->ppi_req_outer, required_outer))
1910  return ppi;
1911  }
1912 
1913  return NULL;
1914 }
Relids ppi_req_outer
Definition: pathnodes.h:1568

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo* get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1868 of file relnode.c.

1869 {
1870  ParamPathInfo *ppi;
1871 
1872  /* If rel has LATERAL refs, every path for it should account for them */
1873  Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1874 
1875  /* Unparameterized paths have no ParamPathInfo */
1876  if (bms_is_empty(required_outer))
1877  return NULL;
1878 
1879  Assert(!bms_overlap(appendrel->relids, required_outer));
1880 
1881  /* If we already have a PPI for this parameterization, just return it */
1882  if ((ppi = find_param_path_info(appendrel, required_outer)))
1883  return ppi;
1884 
1885  /* Else build the ParamPathInfo */
1886  ppi = makeNode(ParamPathInfo);
1887  ppi->ppi_req_outer = required_outer;
1888  ppi->ppi_rows = 0;
1889  ppi->ppi_clauses = NIL;
1890  ppi->ppi_serials = NULL;
1891  appendrel->ppilist = lappend(appendrel->ppilist, ppi);
1892 
1893  return ppi;
1894 }
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1901
Cardinality ppi_rows
Definition: pathnodes.h:1569
List * ppi_clauses
Definition: pathnodes.h:1570
Bitmapset * ppi_serials
Definition: pathnodes.h:1571

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path(), and create_merge_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo* get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1557 of file relnode.c.

1559 {
1560  ParamPathInfo *ppi;
1561  Relids joinrelids;
1562  List *pclauses;
1563  List *eqclauses;
1564  Bitmapset *pserials;
1565  double rows;
1566  ListCell *lc;
1567 
1568  /* If rel has LATERAL refs, every path for it should account for them */
1569  Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1570 
1571  /* Unparameterized paths have no ParamPathInfo */
1572  if (bms_is_empty(required_outer))
1573  return NULL;
1574 
1575  Assert(!bms_overlap(baserel->relids, required_outer));
1576 
1577  /* If we already have a PPI for this parameterization, just return it */
1578  if ((ppi = find_param_path_info(baserel, required_outer)))
1579  return ppi;
1580 
1581  /*
1582  * Identify all joinclauses that are movable to this base rel given this
1583  * parameterization.
1584  */
1585  joinrelids = bms_union(baserel->relids, required_outer);
1586  pclauses = NIL;
1587  foreach(lc, baserel->joininfo)
1588  {
1589  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1590 
1591  if (join_clause_is_movable_into(rinfo,
1592  baserel->relids,
1593  joinrelids))
1594  pclauses = lappend(pclauses, rinfo);
1595  }
1596 
1597  /*
1598  * Add in joinclauses generated by EquivalenceClasses, too. (These
1599  * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1600  * builds, let's verify that.)
1601  */
1603  joinrelids,
1604  required_outer,
1605  baserel,
1606  NULL);
1607 #ifdef USE_ASSERT_CHECKING
1608  foreach(lc, eqclauses)
1609  {
1610  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1611 
1613  baserel->relids,
1614  joinrelids));
1615  }
1616 #endif
1617  pclauses = list_concat(pclauses, eqclauses);
1618 
1619  /* Compute set of serial numbers of the enforced clauses */
1620  pserials = NULL;
1621  foreach(lc, pclauses)
1622  {
1623  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1624 
1625  pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1626  }
1627 
1628  /* Estimate the number of rows returned by the parameterized scan */
1629  rows = get_parameterized_baserel_size(root, baserel, pclauses);
1630 
1631  /* And now we can build the ParamPathInfo */
1632  ppi = makeNode(ParamPathInfo);
1633  ppi->ppi_req_outer = required_outer;
1634  ppi->ppi_rows = rows;
1635  ppi->ppi_clauses = pclauses;
1636  ppi->ppi_serials = pserials;
1637  baserel->ppilist = lappend(baserel->ppilist, ppi);
1638 
1639  return ppi;
1640 }
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5269
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1392
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:670

References Assert, bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo* get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1671 of file relnode.c.

1677 {
1678  ParamPathInfo *ppi;
1679  Relids join_and_req;
1680  Relids outer_and_req;
1681  Relids inner_and_req;
1682  List *pclauses;
1683  List *eclauses;
1684  List *dropped_ecs;
1685  double rows;
1686  ListCell *lc;
1687 
1688  /* If rel has LATERAL refs, every path for it should account for them */
1689  Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1690 
1691  /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1692  if (bms_is_empty(required_outer))
1693  return NULL;
1694 
1695  Assert(!bms_overlap(joinrel->relids, required_outer));
1696 
1697  /*
1698  * Identify all joinclauses that are movable to this join rel given this
1699  * parameterization. These are the clauses that are movable into this
1700  * join, but not movable into either input path. Treat an unparameterized
1701  * input path as not accepting parameterized clauses (because it won't,
1702  * per the shortcut exit above), even though the joinclause movement rules
1703  * might allow the same clauses to be moved into a parameterized path for
1704  * that rel.
1705  */
1706  join_and_req = bms_union(joinrel->relids, required_outer);
1707  if (outer_path->param_info)
1708  outer_and_req = bms_union(outer_path->parent->relids,
1709  PATH_REQ_OUTER(outer_path));
1710  else
1711  outer_and_req = NULL; /* outer path does not accept parameters */
1712  if (inner_path->param_info)
1713  inner_and_req = bms_union(inner_path->parent->relids,
1714  PATH_REQ_OUTER(inner_path));
1715  else
1716  inner_and_req = NULL; /* inner path does not accept parameters */
1717 
1718  pclauses = NIL;
1719  foreach(lc, joinrel->joininfo)
1720  {
1721  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1722 
1723  if (join_clause_is_movable_into(rinfo,
1724  joinrel->relids,
1725  join_and_req) &&
1727  outer_path->parent->relids,
1728  outer_and_req) &&
1730  inner_path->parent->relids,
1731  inner_and_req))
1732  pclauses = lappend(pclauses, rinfo);
1733  }
1734 
1735  /* Consider joinclauses generated by EquivalenceClasses, too */
1737  join_and_req,
1738  required_outer,
1739  joinrel,
1740  NULL);
1741  /* We only want ones that aren't movable to lower levels */
1742  dropped_ecs = NIL;
1743  foreach(lc, eclauses)
1744  {
1745  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1746 
1748  joinrel->relids,
1749  join_and_req));
1750  if (join_clause_is_movable_into(rinfo,
1751  outer_path->parent->relids,
1752  outer_and_req))
1753  continue; /* drop if movable into LHS */
1754  if (join_clause_is_movable_into(rinfo,
1755  inner_path->parent->relids,
1756  inner_and_req))
1757  {
1758  /* drop if movable into RHS, but remember EC for use below */
1759  Assert(rinfo->left_ec == rinfo->right_ec);
1760  dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1761  continue;
1762  }
1763  pclauses = lappend(pclauses, rinfo);
1764  }
1765 
1766  /*
1767  * EquivalenceClasses are harder to deal with than we could wish, because
1768  * of the fact that a given EC can generate different clauses depending on
1769  * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1770  * LHS and RHS of the current join and Z is in required_outer, and further
1771  * suppose that the inner_path is parameterized by both X and Z. The code
1772  * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1773  * and in the latter case will have discarded it as being movable into the
1774  * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1775  * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1776  * not have produced both, and we can't readily tell from here which one
1777  * it did pick. If we add no clause to this join, we'll end up with
1778  * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1779  * constrained to be equal to the other members of the EC. (When we come
1780  * to join Z to this X/Y path, we will certainly drop whichever EC clause
1781  * is generated at that join, so this omission won't get fixed later.)
1782  *
1783  * To handle this, for each EC we discarded such a clause from, try to
1784  * generate a clause connecting the required_outer rels to the join's LHS
1785  * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1786  * the clause can't be moved to the LHS, add it to the current join's
1787  * restriction clauses. (If an EC cannot generate such a clause then it
1788  * has nothing that needs to be enforced here, while if the clause can be
1789  * moved into the LHS then it should have been enforced within that path.)
1790  *
1791  * Note that we don't need similar processing for ECs whose clause was
1792  * considered to be movable into the LHS, because the LHS can't refer to
1793  * the RHS so there is no comparable ambiguity about what it might
1794  * actually be enforcing internally.
1795  */
1796  if (dropped_ecs)
1797  {
1798  Relids real_outer_and_req;
1799 
1800  real_outer_and_req = bms_union(outer_path->parent->relids,
1801  required_outer);
1802  eclauses =
1804  dropped_ecs,
1805  real_outer_and_req,
1806  required_outer,
1807  outer_path->parent);
1808  foreach(lc, eclauses)
1809  {
1810  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1811 
1813  outer_path->parent->relids,
1814  real_outer_and_req));
1815  if (!join_clause_is_movable_into(rinfo,
1816  outer_path->parent->relids,
1817  outer_and_req))
1818  pclauses = lappend(pclauses, rinfo);
1819  }
1820  }
1821 
1822  /*
1823  * Now, attach the identified moved-down clauses to the caller's
1824  * restrict_clauses list. By using list_concat in this order, we leave
1825  * the original list structure of restrict_clauses undamaged.
1826  */
1827  *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1828 
1829  /* If we already have a PPI for this parameterization, just return it */
1830  if ((ppi = find_param_path_info(joinrel, required_outer)))
1831  return ppi;
1832 
1833  /* Estimate the number of rows returned by the parameterized join */
1834  rows = get_parameterized_joinrel_size(root, joinrel,
1835  outer_path,
1836  inner_path,
1837  sjinfo,
1838  *restrict_clauses);
1839 
1840  /*
1841  * And now we can build the ParamPathInfo. No point in saving the
1842  * input-pair-dependent clause list, though.
1843  *
1844  * Note: in GEQO mode, we'll be called in a temporary memory context, but
1845  * the joinrel structure is there too, so no problem.
1846  */
1847  ppi = makeNode(ParamPathInfo);
1848  ppi->ppi_req_outer = required_outer;
1849  ppi->ppi_rows = rows;
1850  ppi->ppi_clauses = NIL;
1851  ppi->ppi_serials = NULL;
1852  joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1853 
1854  return ppi;
1855 }
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5350
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1492

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset* get_param_path_clause_serials ( Path path)

Definition at line 1922 of file relnode.c.

1923 {
1924  if (path->param_info == NULL)
1925  return NULL; /* not parameterized */
1926  if (IsA(path, NestPath) ||
1927  IsA(path, MergePath) ||
1928  IsA(path, HashPath))
1929  {
1930  /*
1931  * For a join path, combine clauses enforced within either input path
1932  * with those enforced as joinrestrictinfo in this path. Note that
1933  * joinrestrictinfo may include some non-pushed-down clauses, but for
1934  * current purposes it's okay if we include those in the result. (To
1935  * be more careful, we could check for clause_relids overlapping the
1936  * path parameterization, but it's not worth the cycles for now.)
1937  */
1938  JoinPath *jpath = (JoinPath *) path;
1939  Bitmapset *pserials;
1940  ListCell *lc;
1941 
1942  pserials = NULL;
1943  pserials = bms_add_members(pserials,
1945  pserials = bms_add_members(pserials,
1947  foreach(lc, jpath->joinrestrictinfo)
1948  {
1949  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1950 
1951  pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1952  }
1953  return pserials;
1954  }
1955  else if (IsA(path, AppendPath))
1956  {
1957  /*
1958  * For an appendrel, take the intersection of the sets of clauses
1959  * enforced in each input path.
1960  */
1961  AppendPath *apath = (AppendPath *) path;
1962  Bitmapset *pserials;
1963  ListCell *lc;
1964 
1965  pserials = NULL;
1966  foreach(lc, apath->subpaths)
1967  {
1968  Path *subpath = (Path *) lfirst(lc);
1969  Bitmapset *subserials;
1970 
1971  subserials = get_param_path_clause_serials(subpath);
1972  if (lc == list_head(apath->subpaths))
1973  pserials = bms_copy(subserials);
1974  else
1975  pserials = bms_int_members(pserials, subserials);
1976  }
1977  return pserials;
1978  }
1979  else if (IsA(path, MergeAppendPath))
1980  {
1981  /* Same as AppendPath case */
1982  MergeAppendPath *apath = (MergeAppendPath *) path;
1983  Bitmapset *pserials;
1984  ListCell *lc;
1985 
1986  pserials = NULL;
1987  foreach(lc, apath->subpaths)
1988  {
1989  Path *subpath = (Path *) lfirst(lc);
1990  Bitmapset *subserials;
1991 
1992  subserials = get_param_path_clause_serials(subpath);
1993  if (lc == list_head(apath->subpaths))
1994  pserials = bms_copy(subserials);
1995  else
1996  pserials = bms_int_members(pserials, subserials);
1997  }
1998  return pserials;
1999  }
2000  else
2001  {
2002  /*
2003  * Otherwise, it's a baserel path and we can use the
2004  * previously-computed set of serial numbers.
2005  */
2006  return path->param_info->ppi_serials;
2007  }
2008 }
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1109
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), AppendPath::subpaths, and MergeAppendPath::subpaths.

Referenced by create_nestloop_path().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1034 of file relnode.c.

1038 {
1039  Relids result;
1040 
1041  /*
1042  * Basically we just need the union of the inputs' lateral_relids, less
1043  * whatever is already in the join.
1044  *
1045  * It's not immediately obvious that this is a valid way to compute the
1046  * result, because it might seem that we're ignoring possible lateral refs
1047  * of PlaceHolderVars that are due to be computed at the join but not in
1048  * either input. However, because create_lateral_join_info() already
1049  * charged all such PHV refs to each member baserel of the join, they'll
1050  * be accounted for already in the inputs' lateral_relids. Likewise, we
1051  * do not need to worry about doing transitive closure here, because that
1052  * was already accounted for in the original baserel lateral_relids.
1053  */
1054  result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1055  result = bms_del_members(result, joinrelids);
1056  return result;
1057 }

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4408 of file pathnode.c.

4409 {
4410 #define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4411 do { \
4412  if (!path_is_reparameterizable_by_child(path, child_rel)) \
4413  return false; \
4414 } while(0)
4415 
4416 #define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4417 do { \
4418  if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4419  return false; \
4420 } while(0)
4421 
4422  /*
4423  * If the path is not parameterized by the parent of the given relation,
4424  * it doesn't need reparameterization.
4425  */
4426  if (!path->param_info ||
4427  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4428  return true;
4429 
4430  /*
4431  * Check that the path type is one that reparameterize_path_by_child() can
4432  * handle, and recursively check subpaths.
4433  */
4434  switch (nodeTag(path))
4435  {
4436  case T_Path:
4437  case T_IndexPath:
4438  break;
4439 
4440  case T_BitmapHeapPath:
4441  {
4442  BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4443 
4445  }
4446  break;
4447 
4448  case T_BitmapAndPath:
4449  {
4450  BitmapAndPath *bapath = (BitmapAndPath *) path;
4451 
4453  }
4454  break;
4455 
4456  case T_BitmapOrPath:
4457  {
4458  BitmapOrPath *bopath = (BitmapOrPath *) path;
4459 
4461  }
4462  break;
4463 
4464  case T_ForeignPath:
4465  {
4466  ForeignPath *fpath = (ForeignPath *) path;
4467 
4468  if (fpath->fdw_outerpath)
4470  }
4471  break;
4472 
4473  case T_CustomPath:
4474  {
4475  CustomPath *cpath = (CustomPath *) path;
4476 
4478  }
4479  break;
4480 
4481  case T_NestPath:
4482  case T_MergePath:
4483  case T_HashPath:
4484  {
4485  JoinPath *jpath = (JoinPath *) path;
4486 
4489  }
4490  break;
4491 
4492  case T_AppendPath:
4493  {
4494  AppendPath *apath = (AppendPath *) path;
4495 
4497  }
4498  break;
4499 
4500  case T_MaterialPath:
4501  {
4502  MaterialPath *mpath = (MaterialPath *) path;
4503 
4505  }
4506  break;
4507 
4508  case T_MemoizePath:
4509  {
4510  MemoizePath *mpath = (MemoizePath *) path;
4511 
4513  }
4514  break;
4515 
4516  case T_GatherPath:
4517  {
4518  GatherPath *gpath = (GatherPath *) path;
4519 
4521  }
4522  break;
4523 
4524  default:
4525  /* We don't know how to reparameterize this path. */
4526  return false;
4527  }
4528 
4529  return true;
4530 }
#define nodeTag(nodeptr)
Definition: nodes.h:133
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:1897

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path* reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 3946 of file pathnode.c.

3949 {
3950  RelOptInfo *rel = path->parent;
3951 
3952  /* Can only increase, not decrease, path's parameterization */
3953  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3954  return NULL;
3955  switch (path->pathtype)
3956  {
3957  case T_SeqScan:
3958  return create_seqscan_path(root, rel, required_outer, 0);
3959  case T_SampleScan:
3960  return (Path *) create_samplescan_path(root, rel, required_outer);
3961  case T_IndexScan:
3962  case T_IndexOnlyScan:
3963  {
3964  IndexPath *ipath = (IndexPath *) path;
3965  IndexPath *newpath = makeNode(IndexPath);
3966 
3967  /*
3968  * We can't use create_index_path directly, and would not want
3969  * to because it would re-compute the indexqual conditions
3970  * which is wasted effort. Instead we hack things a bit:
3971  * flat-copy the path node, revise its param_info, and redo
3972  * the cost estimate.
3973  */
3974  memcpy(newpath, ipath, sizeof(IndexPath));
3975  newpath->path.param_info =
3976  get_baserel_parampathinfo(root, rel, required_outer);
3977  cost_index(newpath, root, loop_count, false);
3978  return (Path *) newpath;
3979  }
3980  case T_BitmapHeapScan:
3981  {
3982  BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3983 
3984  return (Path *) create_bitmap_heap_path(root,
3985  rel,
3986  bpath->bitmapqual,
3987  required_outer,
3988  loop_count, 0);
3989  }
3990  case T_SubqueryScan:
3991  {
3992  SubqueryScanPath *spath = (SubqueryScanPath *) path;
3993  Path *subpath = spath->subpath;
3994  bool trivial_pathtarget;
3995 
3996  /*
3997  * If existing node has zero extra cost, we must have decided
3998  * its target is trivial. (The converse is not true, because
3999  * it might have a trivial target but quals to enforce; but in
4000  * that case the new node will too, so it doesn't matter
4001  * whether we get the right answer here.)
4002  */
4003  trivial_pathtarget =
4004  (subpath->total_cost == spath->path.total_cost);
4005 
4006  return (Path *) create_subqueryscan_path(root,
4007  rel,
4008  subpath,
4009  trivial_pathtarget,
4010  spath->path.pathkeys,
4011  required_outer);
4012  }
4013  case T_Result:
4014  /* Supported only for RTE_RESULT scan paths */
4015  if (IsA(path, Path))
4016  return create_resultscan_path(root, rel, required_outer);
4017  break;
4018  case T_Append:
4019  {
4020  AppendPath *apath = (AppendPath *) path;
4021  List *childpaths = NIL;
4022  List *partialpaths = NIL;
4023  int i;
4024  ListCell *lc;
4025 
4026  /* Reparameterize the children */
4027  i = 0;
4028  foreach(lc, apath->subpaths)
4029  {
4030  Path *spath = (Path *) lfirst(lc);
4031 
4032  spath = reparameterize_path(root, spath,
4033  required_outer,
4034  loop_count);
4035  if (spath == NULL)
4036  return NULL;
4037  /* We have to re-split the regular and partial paths */
4038  if (i < apath->first_partial_path)
4039  childpaths = lappend(childpaths, spath);
4040  else
4041  partialpaths = lappend(partialpaths, spath);
4042  i++;
4043  }
4044  return (Path *)
4045  create_append_path(root, rel, childpaths, partialpaths,
4046  apath->path.pathkeys, required_outer,
4047  apath->path.parallel_workers,
4048  apath->path.parallel_aware,
4049  -1);
4050  }
4051  case T_Material:
4052  {
4053  MaterialPath *mpath = (MaterialPath *) path;
4054  Path *spath = mpath->subpath;
4055 
4056  spath = reparameterize_path(root, spath,
4057  required_outer,
4058  loop_count);
4059  if (spath == NULL)
4060  return NULL;
4061  return (Path *) create_material_path(rel, spath);
4062  }
4063  case T_Memoize:
4064  {
4065  MemoizePath *mpath = (MemoizePath *) path;
4066  Path *spath = mpath->subpath;
4067 
4068  spath = reparameterize_path(root, spath,
4069  required_outer,
4070  loop_count);
4071  if (spath == NULL)
4072  return NULL;
4073  return (Path *) create_memoize_path(root, rel,
4074  spath,
4075  mpath->param_exprs,
4076  mpath->hash_operators,
4077  mpath->singlerow,
4078  mpath->binary_mode,
4079  mpath->calls);
4080  }
4081  default:
4082  break;
4083  }
4084  return NULL;
4085 }
int i
Definition: isn.c:73
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2176
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:952
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
Definition: pathnode.c:1598
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3946
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2016
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1566
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:927
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1042

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), MemoizePath::calls, cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path().

◆ reparameterize_path_by_child()

Path* reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4112 of file pathnode.c.

4114 {
4115  Path *new_path;
4116  ParamPathInfo *new_ppi;
4117  ParamPathInfo *old_ppi;
4118  Relids required_outer;
4119 
4120 #define ADJUST_CHILD_ATTRS(node) \
4121  ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4122  (Node *) (node), \
4123  child_rel, \
4124  child_rel->top_parent))
4125 
4126 #define REPARAMETERIZE_CHILD_PATH(path) \
4127 do { \
4128  (path) = reparameterize_path_by_child(root, (path), child_rel); \
4129  if ((path) == NULL) \
4130  return NULL; \
4131 } while(0)
4132 
4133 #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4134 do { \
4135  if ((pathlist) != NIL) \
4136  { \
4137  (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4138  child_rel); \
4139  if ((pathlist) == NIL) \
4140  return NULL; \
4141  } \
4142 } while(0)
4143 
4144  /*
4145  * If the path is not parameterized by the parent of the given relation,
4146  * it doesn't need reparameterization.
4147  */
4148  if (!path->param_info ||
4149  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4150  return path;
4151 
4152  /*
4153  * If possible, reparameterize the given path.
4154  *
4155  * This function is currently only applied to the inner side of a nestloop
4156  * join that is being partitioned by the partitionwise-join code. Hence,
4157  * we need only support path types that plausibly arise in that context.
4158  * (In particular, supporting sorted path types would be a waste of code
4159  * and cycles: even if we translated them here, they'd just lose in
4160  * subsequent cost comparisons.) If we do see an unsupported path type,
4161  * that just means we won't be able to generate a partitionwise-join plan
4162  * using that path type.
4163  */
4164  switch (nodeTag(path))
4165  {
4166  case T_Path:
4167  new_path = path;
4168  ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4169  if (path->pathtype == T_SampleScan)
4170  {
4171  Index scan_relid = path->parent->relid;
4172  RangeTblEntry *rte;
4173 
4174  /* it should be a base rel with a tablesample clause... */
4175  Assert(scan_relid > 0);
4176  rte = planner_rt_fetch(scan_relid, root);
4177  Assert(rte->rtekind == RTE_RELATION);
4178  Assert(rte->tablesample != NULL);
4179 
4181  }
4182  break;
4183 
4184  case T_IndexPath:
4185  {
4186  IndexPath *ipath = (IndexPath *) path;
4187 
4190  new_path = (Path *) ipath;
4191  }
4192  break;
4193 
4194  case T_BitmapHeapPath:
4195  {
4196  BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4197 
4198  ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4200  new_path = (Path *) bhpath;
4201  }
4202  break;
4203 
4204  case T_BitmapAndPath:
4205  {
4206  BitmapAndPath *bapath = (BitmapAndPath *) path;
4207 
4209  new_path = (Path *) bapath;
4210  }
4211  break;
4212 
4213  case T_BitmapOrPath:
4214  {
4215  BitmapOrPath *bopath = (BitmapOrPath *) path;
4216 
4218  new_path = (Path *) bopath;
4219  }
4220  break;
4221 
4222  case T_ForeignPath:
4223  {
4224  ForeignPath *fpath = (ForeignPath *) path;
4226 
4227  ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4228  if (fpath->fdw_outerpath)
4230  if (fpath->fdw_restrictinfo)
4232 
4233  /* Hand over to FDW if needed. */
4234  rfpc_func =
4235  path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4236  if (rfpc_func)
4237  fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4238  child_rel);
4239  new_path = (Path *) fpath;
4240  }
4241  break;
4242 
4243  case T_CustomPath:
4244  {
4245  CustomPath *cpath = (CustomPath *) path;
4246 
4247  ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4249  if (cpath->custom_restrictinfo)
4251  if (cpath->methods &&
4253  cpath->custom_private =
4255  cpath->custom_private,
4256  child_rel);
4257  new_path = (Path *) cpath;
4258  }
4259  break;
4260 
4261  case T_NestPath:
4262  {
4263  NestPath *npath = (NestPath *) path;
4264  JoinPath *jpath = (JoinPath *) npath;
4265 
4269  new_path = (Path *) npath;
4270  }
4271  break;
4272 
4273  case T_MergePath:
4274  {
4275  MergePath *mpath = (MergePath *) path;
4276  JoinPath *jpath = (JoinPath *) mpath;
4277 
4282  new_path = (Path *) mpath;
4283  }
4284  break;
4285 
4286  case T_HashPath:
4287  {
4288  HashPath *hpath = (HashPath *) path;
4289  JoinPath *jpath = (JoinPath *) hpath;
4290 
4295  new_path = (Path *) hpath;
4296  }
4297  break;
4298 
4299  case T_AppendPath:
4300  {
4301  AppendPath *apath = (AppendPath *) path;
4302 
4304  new_path = (Path *) apath;
4305  }
4306  break;
4307 
4308  case T_MaterialPath:
4309  {
4310  MaterialPath *mpath = (MaterialPath *) path;
4311 
4313  new_path = (Path *) mpath;
4314  }
4315  break;
4316 
4317  case T_MemoizePath:
4318  {
4319  MemoizePath *mpath = (MemoizePath *) path;
4320 
4323  new_path = (Path *) mpath;
4324  }
4325  break;
4326 
4327  case T_GatherPath:
4328  {
4329  GatherPath *gpath = (GatherPath *) path;
4330 
4332  new_path = (Path *) gpath;
4333  }
4334  break;
4335 
4336  default:
4337  /* We don't know how to reparameterize this path. */
4338  return NULL;
4339  }
4340 
4341  /*
4342  * Adjust the parameterization information, which refers to the topmost
4343  * parent. The topmost parent can be multiple levels away from the given
4344  * child, hence use multi-level expression adjustment routines.
4345  */
4346  old_ppi = new_path->param_info;
4347  required_outer =
4349  child_rel,
4350  child_rel->top_parent);
4351 
4352  /* If we already have a PPI for this parameterization, just return it */
4353  new_ppi = find_param_path_info(new_path->parent, required_outer);
4354 
4355  /*
4356  * If not, build a new one and link it to the list of PPIs. For the same
4357  * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4358  * context the given RelOptInfo is in.
4359  */
4360  if (new_ppi == NULL)
4361  {
4362  MemoryContext oldcontext;
4363  RelOptInfo *rel = path->parent;
4364 
4365  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4366 
4367  new_ppi = makeNode(ParamPathInfo);
4368  new_ppi->ppi_req_outer = bms_copy(required_outer);
4369  new_ppi->ppi_rows = old_ppi->ppi_rows;
4370  new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4371  ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4372  new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4373  rel->ppilist = lappend(rel->ppilist, new_ppi);
4374 
4375  MemoryContextSwitchTo(oldcontext);
4376  }
4377  bms_free(required_outer);
4378 
4379  new_path->param_info = new_ppi;
4380 
4381  /*
4382  * Adjust the path target if the parent of the outer relation is
4383  * referenced in the targetlist. This can happen when only the parent of
4384  * outer relation is laterally referenced in this relation.
4385  */
4386  if (bms_overlap(path->parent->lateral_relids,
4387  child_rel->top_parent_relids))
4388  {
4389  new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4390  ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4391  }
4392 
4393  return new_path;
4394 }
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:588
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:1900
List * custom_private
Definition: pathnodes.h:1899
List * custom_restrictinfo
Definition: pathnodes.h:1898
List * indrestrictinfo
Definition: pathnodes.h:1171
struct TableSampleClause * tablesample
Definition: parsenodes.h:1108
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert, BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 242 of file pathnode.c.

243 {
244  Path *cheapest_startup_path;
245  Path *cheapest_total_path;
246  Path *best_param_path;
247  List *parameterized_paths;
248  ListCell *p;
249 
250  Assert(IsA(parent_rel, RelOptInfo));
251 
252  if (parent_rel->pathlist == NIL)
253  elog(ERROR, "could not devise a query plan for the given query");
254 
255  cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
256  parameterized_paths = NIL;
257 
258  foreach(p, parent_rel->pathlist)
259  {
260  Path *path = (Path *) lfirst(p);
261  int cmp;
262 
263  if (path->param_info)
264  {
265  /* Parameterized path, so add it to parameterized_paths */
266  parameterized_paths = lappend(parameterized_paths, path);
267 
268  /*
269  * If we have an unparameterized cheapest-total, we no longer care
270  * about finding the best parameterized path, so move on.
271  */
272  if (cheapest_total_path)
273  continue;
274 
275  /*
276  * Otherwise, track the best parameterized path, which is the one
277  * with least total cost among those of the minimum
278  * parameterization.
279  */
280  if (best_param_path == NULL)
281  best_param_path = path;
282  else
283  {
284  switch (bms_subset_compare(PATH_REQ_OUTER(path),
285  PATH_REQ_OUTER(best_param_path)))
286  {
287  case BMS_EQUAL:
288  /* keep the cheaper one */
289  if (compare_path_costs(path, best_param_path,
290  TOTAL_COST) < 0)
291  best_param_path = path;
292  break;
293  case BMS_SUBSET1:
294  /* new path is less-parameterized */
295  best_param_path = path;
296  break;
297  case BMS_SUBSET2:
298  /* old path is less-parameterized, keep it */
299  break;
300  case BMS_DIFFERENT:
301 
302  /*
303  * This means that neither path has the least possible
304  * parameterization for the rel. We'll sit on the old
305  * path until something better comes along.
306  */
307  break;
308  }
309  }
310  }
311  else
312  {
313  /* Unparameterized path, so consider it for cheapest slots */
314  if (cheapest_total_path == NULL)
315  {
316  cheapest_startup_path = cheapest_total_path = path;
317  continue;
318  }
319 
320  /*
321  * If we find two paths of identical costs, try to keep the
322  * better-sorted one. The paths might have unrelated sort
323  * orderings, in which case we can only guess which might be
324  * better to keep, but if one is superior then we definitely
325  * should keep that one.
326  */
327  cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
328  if (cmp > 0 ||
329  (cmp == 0 &&
330  compare_pathkeys(cheapest_startup_path->pathkeys,
331  path->pathkeys) == PATHKEYS_BETTER2))
332  cheapest_startup_path = path;
333 
334  cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
335  if (cmp > 0 ||
336  (cmp == 0 &&
337  compare_pathkeys(cheapest_total_path->pathkeys,
338  path->pathkeys) == PATHKEYS_BETTER2))
339  cheapest_total_path = path;
340  }
341  }
342 
343  /* Add cheapest unparameterized path, if any, to parameterized_paths */
344  if (cheapest_total_path)
345  parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
346 
347  /*
348  * If there is no unparameterized path, use the best parameterized path as
349  * cheapest_total_path (but not as cheapest_startup_path).
350  */
351  if (cheapest_total_path == NULL)
352  cheapest_total_path = best_param_path;
353  Assert(cheapest_total_path != NULL);
354 
355  parent_rel->cheapest_startup_path = cheapest_startup_path;
356  parent_rel->cheapest_total_path = cheapest_total_path;
357  parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
358  parent_rel->cheapest_parameterized_paths = parameterized_paths;
359 }
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert, BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partitionwise_grouping_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 94 of file relnode.c.

95 {
96  int size;
97  Index rti;
98  ListCell *lc;
99 
100  /* Arrays are accessed using RT indexes (1..N) */
101  size = list_length(root->parse->rtable) + 1;
102  root->simple_rel_array_size = size;
103 
104  /*
105  * simple_rel_array is initialized to all NULLs, since no RelOptInfos
106  * exist yet. It'll be filled by later calls to build_simple_rel().
107  */
108  root->simple_rel_array = (RelOptInfo **)
109  palloc0(size * sizeof(RelOptInfo *));
110 
111  /* simple_rte_array is an array equivalent of the rtable list */
112  root->simple_rte_array = (RangeTblEntry **)
113  palloc0(size * sizeof(RangeTblEntry *));
114  rti = 1;
115  foreach(lc, root->parse->rtable)
116  {
117  RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
118 
119  root->simple_rte_array[rti++] = rte;
120  }
121 
122  /* append_rel_array is not needed if there are no AppendRelInfos */
123  if (root->append_rel_list == NIL)
124  {
125  root->append_rel_array = NULL;
126  return;
127  }
128 
129  root->append_rel_array = (AppendRelInfo **)
130  palloc0(size * sizeof(AppendRelInfo *));
131 
132  /*
133  * append_rel_array is filled with any already-existing AppendRelInfos,
134  * which currently could only come from UNION ALL flattening. We might
135  * add more later during inheritance expansion, but it's the
136  * responsibility of the expansion code to update the array properly.
137  */
138  foreach(lc, root->append_rel_list)
139  {
140  AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
141  int child_relid = appinfo->child_relid;
142 
143  /* Sanity check */
144  Assert(child_relid < size);
145 
146  if (root->append_rel_array[child_relid])
147  elog(ERROR, "child relation already exists");
148 
149  root->append_rel_array[child_relid] = appinfo;
150  }
151 }
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static pg_noinline void Size size
Definition: slab.c:607
Index child_relid
Definition: pathnodes.h:2958

References Assert, AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), root, and size.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().