PostgreSQL Source Code  git master
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UpperUniquePathcreate_upper_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SetOpCmd cmd, SetOpStrategy strategy, List *distinctList, AttrNumber flagColIdx, int firstFlag, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 747 of file pathnode.c.

748 {
749  bool accept_new = true; /* unless we find a superior old path */
750  int insert_at = 0; /* where to insert new item */
751  ListCell *p1;
752 
753  /* Check for query cancel. */
755 
756  /* Path to be added must be parallel safe. */
757  Assert(new_path->parallel_safe);
758 
759  /* Relation should be OK for parallelism, too. */
760  Assert(parent_rel->consider_parallel);
761 
762  /*
763  * As in add_path, throw out any paths which are dominated by the new
764  * path, but throw out the new path if some existing path dominates it.
765  */
766  foreach(p1, parent_rel->partial_pathlist)
767  {
768  Path *old_path = (Path *) lfirst(p1);
769  bool remove_old = false; /* unless new proves superior */
770  PathKeysComparison keyscmp;
771 
772  /* Compare pathkeys. */
773  keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
774 
775  /* Unless pathkeys are incompatible, keep just one of the two paths. */
776  if (keyscmp != PATHKEYS_DIFFERENT)
777  {
778  if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
779  {
780  /* New path costs more; keep it only if pathkeys are better. */
781  if (keyscmp != PATHKEYS_BETTER1)
782  accept_new = false;
783  }
784  else if (old_path->total_cost > new_path->total_cost
785  * STD_FUZZ_FACTOR)
786  {
787  /* Old path costs more; keep it only if pathkeys are better. */
788  if (keyscmp != PATHKEYS_BETTER2)
789  remove_old = true;
790  }
791  else if (keyscmp == PATHKEYS_BETTER1)
792  {
793  /* Costs are about the same, new path has better pathkeys. */
794  remove_old = true;
795  }
796  else if (keyscmp == PATHKEYS_BETTER2)
797  {
798  /* Costs are about the same, old path has better pathkeys. */
799  accept_new = false;
800  }
801  else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
802  {
803  /* Pathkeys are the same, and the old path costs more. */
804  remove_old = true;
805  }
806  else
807  {
808  /*
809  * Pathkeys are the same, and new path isn't materially
810  * cheaper.
811  */
812  accept_new = false;
813  }
814  }
815 
816  /*
817  * Remove current element from partial_pathlist if dominated by new.
818  */
819  if (remove_old)
820  {
821  parent_rel->partial_pathlist =
822  foreach_delete_current(parent_rel->partial_pathlist, p1);
823  pfree(old_path);
824  }
825  else
826  {
827  /* new belongs after this old path if it has cost >= old's */
828  if (new_path->total_cost >= old_path->total_cost)
829  insert_at = foreach_current_index(p1) + 1;
830  }
831 
832  /*
833  * If we found an old path that dominates new_path, we can quit
834  * scanning the partial_pathlist; we will not add new_path, and we
835  * assume new_path cannot dominate any later path.
836  */
837  if (!accept_new)
838  break;
839  }
840 
841  if (accept_new)
842  {
843  /* Accept the new path: insert it at proper place */
844  parent_rel->partial_pathlist =
845  list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
846  }
847  else
848  {
849  /* Reject and recycle the new path */
850  pfree(new_path);
851  }
852 }
#define Assert(condition)
Definition: c.h:858
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:1521
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:302
#define STD_FUZZ_FACTOR
Definition: pathnode.c:47
PathKeysComparison
Definition: paths.h:201
@ PATHKEYS_BETTER2
Definition: paths.h:204
@ PATHKEYS_BETTER1
Definition: paths.h:203
@ PATHKEYS_DIFFERENT
Definition: paths.h:205
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1665
Cost total_cost
Definition: pathnodes.h:1662
bool parallel_safe
Definition: pathnodes.h:1655
bool consider_parallel
Definition: pathnodes.h:881
List * partial_pathlist
Definition: pathnodes.h:894

References Assert, CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_plain_partial_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
Cost  total_cost,
List pathkeys 
)

Definition at line 865 of file pathnode.c.

867 {
868  ListCell *p1;
869 
870  /*
871  * Our goal here is twofold. First, we want to find out whether this path
872  * is clearly inferior to some existing partial path. If so, we want to
873  * reject it immediately. Second, we want to find out whether this path
874  * is clearly superior to some existing partial path -- at least, modulo
875  * final cost computations. If so, we definitely want to consider it.
876  *
877  * Unlike add_path(), we always compare pathkeys here. This is because we
878  * expect partial_pathlist to be very short, and getting a definitive
879  * answer at this stage avoids the need to call add_path_precheck.
880  */
881  foreach(p1, parent_rel->partial_pathlist)
882  {
883  Path *old_path = (Path *) lfirst(p1);
884  PathKeysComparison keyscmp;
885 
886  keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
887  if (keyscmp != PATHKEYS_DIFFERENT)
888  {
889  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
890  keyscmp != PATHKEYS_BETTER1)
891  return false;
892  if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
893  keyscmp != PATHKEYS_BETTER2)
894  return true;
895  }
896  }
897 
898  /*
899  * This path is neither clearly inferior to an existing partial path nor
900  * clearly good enough that it might replace one. Compare it to
901  * non-parallel plans. If it loses even before accounting for the cost of
902  * the Gather node, we should definitely reject it.
903  *
904  * Note that we pass the total_cost to add_path_precheck twice. This is
905  * because it's never advantageous to consider the startup cost of a
906  * partial path; the resulting plans, if run in parallel, will be run to
907  * completion.
908  */
909  if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
910  NULL))
911  return false;
912 
913  return true;
914 }
bool add_path_precheck(RelOptInfo *parent_rel, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:642

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 420 of file pathnode.c.

421 {
422  bool accept_new = true; /* unless we find a superior old path */
423  int insert_at = 0; /* where to insert new item */
424  List *new_path_pathkeys;
425  ListCell *p1;
426 
427  /*
428  * This is a convenient place to check for query cancel --- no part of the
429  * planner goes very long without calling add_path().
430  */
432 
433  /* Pretend parameterized paths have no pathkeys, per comment above */
434  new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
435 
436  /*
437  * Loop to check proposed new path against old paths. Note it is possible
438  * for more than one old path to be tossed out because new_path dominates
439  * it.
440  */
441  foreach(p1, parent_rel->pathlist)
442  {
443  Path *old_path = (Path *) lfirst(p1);
444  bool remove_old = false; /* unless new proves superior */
445  PathCostComparison costcmp;
446  PathKeysComparison keyscmp;
447  BMS_Comparison outercmp;
448 
449  /*
450  * Do a fuzzy cost comparison with standard fuzziness limit.
451  */
452  costcmp = compare_path_costs_fuzzily(new_path, old_path,
454 
455  /*
456  * If the two paths compare differently for startup and total cost,
457  * then we want to keep both, and we can skip comparing pathkeys and
458  * required_outer rels. If they compare the same, proceed with the
459  * other comparisons. Row count is checked last. (We make the tests
460  * in this order because the cost comparison is most likely to turn
461  * out "different", and the pathkeys comparison next most likely. As
462  * explained above, row count very seldom makes a difference, so even
463  * though it's cheap to compare there's not much point in checking it
464  * earlier.)
465  */
466  if (costcmp != COSTS_DIFFERENT)
467  {
468  /* Similarly check to see if either dominates on pathkeys */
469  List *old_path_pathkeys;
470 
471  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
472  keyscmp = compare_pathkeys(new_path_pathkeys,
473  old_path_pathkeys);
474  if (keyscmp != PATHKEYS_DIFFERENT)
475  {
476  switch (costcmp)
477  {
478  case COSTS_EQUAL:
479  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
480  PATH_REQ_OUTER(old_path));
481  if (keyscmp == PATHKEYS_BETTER1)
482  {
483  if ((outercmp == BMS_EQUAL ||
484  outercmp == BMS_SUBSET1) &&
485  new_path->rows <= old_path->rows &&
486  new_path->parallel_safe >= old_path->parallel_safe)
487  remove_old = true; /* new dominates old */
488  }
489  else if (keyscmp == PATHKEYS_BETTER2)
490  {
491  if ((outercmp == BMS_EQUAL ||
492  outercmp == BMS_SUBSET2) &&
493  new_path->rows >= old_path->rows &&
494  new_path->parallel_safe <= old_path->parallel_safe)
495  accept_new = false; /* old dominates new */
496  }
497  else /* keyscmp == PATHKEYS_EQUAL */
498  {
499  if (outercmp == BMS_EQUAL)
500  {
501  /*
502  * Same pathkeys and outer rels, and fuzzily
503  * the same cost, so keep just one; to decide
504  * which, first check parallel-safety, then
505  * rows, then do a fuzzy cost comparison with
506  * very small fuzz limit. (We used to do an
507  * exact cost comparison, but that results in
508  * annoying platform-specific plan variations
509  * due to roundoff in the cost estimates.) If
510  * things are still tied, arbitrarily keep
511  * only the old path. Notice that we will
512  * keep only the old path even if the
513  * less-fuzzy comparison decides the startup
514  * and total costs compare differently.
515  */
516  if (new_path->parallel_safe >
517  old_path->parallel_safe)
518  remove_old = true; /* new dominates old */
519  else if (new_path->parallel_safe <
520  old_path->parallel_safe)
521  accept_new = false; /* old dominates new */
522  else if (new_path->rows < old_path->rows)
523  remove_old = true; /* new dominates old */
524  else if (new_path->rows > old_path->rows)
525  accept_new = false; /* old dominates new */
526  else if (compare_path_costs_fuzzily(new_path,
527  old_path,
528  1.0000000001) == COSTS_BETTER1)
529  remove_old = true; /* new dominates old */
530  else
531  accept_new = false; /* old equals or
532  * dominates new */
533  }
534  else if (outercmp == BMS_SUBSET1 &&
535  new_path->rows <= old_path->rows &&
536  new_path->parallel_safe >= old_path->parallel_safe)
537  remove_old = true; /* new dominates old */
538  else if (outercmp == BMS_SUBSET2 &&
539  new_path->rows >= old_path->rows &&
540  new_path->parallel_safe <= old_path->parallel_safe)
541  accept_new = false; /* old dominates new */
542  /* else different parameterizations, keep both */
543  }
544  break;
545  case COSTS_BETTER1:
546  if (keyscmp != PATHKEYS_BETTER2)
547  {
548  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
549  PATH_REQ_OUTER(old_path));
550  if ((outercmp == BMS_EQUAL ||
551  outercmp == BMS_SUBSET1) &&
552  new_path->rows <= old_path->rows &&
553  new_path->parallel_safe >= old_path->parallel_safe)
554  remove_old = true; /* new dominates old */
555  }
556  break;
557  case COSTS_BETTER2:
558  if (keyscmp != PATHKEYS_BETTER1)
559  {
560  outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
561  PATH_REQ_OUTER(old_path));
562  if ((outercmp == BMS_EQUAL ||
563  outercmp == BMS_SUBSET2) &&
564  new_path->rows >= old_path->rows &&
565  new_path->parallel_safe <= old_path->parallel_safe)
566  accept_new = false; /* old dominates new */
567  }
568  break;
569  case COSTS_DIFFERENT:
570 
571  /*
572  * can't get here, but keep this case to keep compiler
573  * quiet
574  */
575  break;
576  }
577  }
578  }
579 
580  /*
581  * Remove current element from pathlist if dominated by new.
582  */
583  if (remove_old)
584  {
585  parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
586  p1);
587 
588  /*
589  * Delete the data pointed-to by the deleted cell, if possible
590  */
591  if (!IsA(old_path, IndexPath))
592  pfree(old_path);
593  }
594  else
595  {
596  /* new belongs after this old path if it has cost >= old's */
597  if (new_path->total_cost >= old_path->total_cost)
598  insert_at = foreach_current_index(p1) + 1;
599  }
600 
601  /*
602  * If we found an old path that dominates new_path, we can quit
603  * scanning the pathlist; we will not add new_path, and we assume
604  * new_path cannot dominate any other elements of the pathlist.
605  */
606  if (!accept_new)
607  break;
608  }
609 
610  if (accept_new)
611  {
612  /* Accept the new path: insert it at proper place in pathlist */
613  parent_rel->pathlist =
614  list_insert_nth(parent_rel->pathlist, insert_at, new_path);
615  }
616  else
617  {
618  /* Reject and recycle the new path */
619  if (!IsA(new_path, IndexPath))
620  pfree(new_path);
621  }
622 }
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:164
PathCostComparison
Definition: pathnode.c:35
@ COSTS_EQUAL
Definition: pathnode.c:36
@ COSTS_BETTER1
Definition: pathnode.c:37
@ COSTS_BETTER2
Definition: pathnode.c:38
@ COSTS_DIFFERENT
Definition: pathnode.c:39
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1669
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1660
List * pathlist
Definition: pathnodes.h:892

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 642 of file pathnode.c.

645 {
646  List *new_path_pathkeys;
647  bool consider_startup;
648  ListCell *p1;
649 
650  /* Pretend parameterized paths have no pathkeys, per add_path policy */
651  new_path_pathkeys = required_outer ? NIL : pathkeys;
652 
653  /* Decide whether new path's startup cost is interesting */
654  consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
655 
656  foreach(p1, parent_rel->pathlist)
657  {
658  Path *old_path = (Path *) lfirst(p1);
659  PathKeysComparison keyscmp;
660 
661  /*
662  * We are looking for an old_path with the same parameterization (and
663  * by assumption the same rowcount) that dominates the new path on
664  * pathkeys as well as both cost metrics. If we find one, we can
665  * reject the new path.
666  *
667  * Cost comparisons here should match compare_path_costs_fuzzily.
668  */
669  if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
670  {
671  /* new path can win on startup cost only if consider_startup */
672  if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
673  !consider_startup)
674  {
675  /* new path loses on cost, so check pathkeys... */
676  List *old_path_pathkeys;
677 
678  old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
679  keyscmp = compare_pathkeys(new_path_pathkeys,
680  old_path_pathkeys);
681  if (keyscmp == PATHKEYS_EQUAL ||
682  keyscmp == PATHKEYS_BETTER2)
683  {
684  /* new path does not win on pathkeys... */
685  if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
686  {
687  /* Found an old path that dominates the new one */
688  return false;
689  }
690  }
691  }
692  }
693  else
694  {
695  /*
696  * Since the pathlist is sorted by total_cost, we can stop looking
697  * once we reach a path with a total_cost larger than the new
698  * path's.
699  */
700  break;
701  }
702  }
703 
704  return true;
705 }
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:202
Cost startup_cost
Definition: pathnodes.h:1661
bool consider_param_startup
Definition: pathnodes.h:879
bool consider_startup
Definition: pathnodes.h:877

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 3869 of file pathnode.c.

3874 {
3875  double input_rows = *rows;
3876  Cost input_startup_cost = *startup_cost;
3877  Cost input_total_cost = *total_cost;
3878 
3879  if (offset_est != 0)
3880  {
3881  double offset_rows;
3882 
3883  if (offset_est > 0)
3884  offset_rows = (double) offset_est;
3885  else
3886  offset_rows = clamp_row_est(input_rows * 0.10);
3887  if (offset_rows > *rows)
3888  offset_rows = *rows;
3889  if (input_rows > 0)
3890  *startup_cost +=
3891  (input_total_cost - input_startup_cost)
3892  * offset_rows / input_rows;
3893  *rows -= offset_rows;
3894  if (*rows < 1)
3895  *rows = 1;
3896  }
3897 
3898  if (count_est != 0)
3899  {
3900  double count_rows;
3901 
3902  if (count_est > 0)
3903  count_rows = (double) count_est;
3904  else
3905  count_rows = clamp_row_est(input_rows * 0.10);
3906  if (count_rows > *rows)
3907  count_rows = *rows;
3908  if (input_rows > 0)
3909  *total_cost = *startup_cost +
3910  (input_total_cost - input_startup_cost)
3911  * count_rows / input_rows;
3912  *rows = count_rows;
3913  if (*rows < 1)
3914  *rows = 1;
3915  }
3916 }
double clamp_row_est(double nrows)
Definition: costsize.c:202
double Cost
Definition: nodes.h:251

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path* apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2781 of file pathnode.c.

2785 {
2786  QualCost oldcost;
2787 
2788  /*
2789  * If given path can't project, we might need a Result node, so make a
2790  * separate ProjectionPath.
2791  */
2792  if (!is_projection_capable_path(path))
2793  return (Path *) create_projection_path(root, rel, path, target);
2794 
2795  /*
2796  * We can just jam the desired tlist into the existing path, being sure to
2797  * update its cost estimates appropriately.
2798  */
2799  oldcost = path->pathtarget->cost;
2800  path->pathtarget = target;
2801 
2802  path->startup_cost += target->cost.startup - oldcost.startup;
2803  path->total_cost += target->cost.startup - oldcost.startup +
2804  (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2805 
2806  /*
2807  * If the path happens to be a Gather or GatherMerge path, we'd like to
2808  * arrange for the subpath to return the required target list so that
2809  * workers can help project. But if there is something that is not
2810  * parallel-safe in the target expressions, then we can't.
2811  */
2812  if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2813  is_parallel_safe(root, (Node *) target->exprs))
2814  {
2815  /*
2816  * We always use create_projection_path here, even if the subpath is
2817  * projection-capable, so as to avoid modifying the subpath in place.
2818  * It seems unlikely at present that there could be any other
2819  * references to the subpath, but better safe than sorry.
2820  *
2821  * Note that we don't change the parallel path's cost estimates; it
2822  * might be appropriate to do so, to reflect the fact that the bulk of
2823  * the target evaluation will happen in workers.
2824  */
2825  if (IsA(path, GatherPath))
2826  {
2827  GatherPath *gpath = (GatherPath *) path;
2828 
2829  gpath->subpath = (Path *)
2831  gpath->subpath->parent,
2832  gpath->subpath,
2833  target);
2834  }
2835  else
2836  {
2837  GatherMergePath *gmpath = (GatherMergePath *) path;
2838 
2839  gmpath->subpath = (Path *)
2841  gmpath->subpath->parent,
2842  gmpath->subpath,
2843  target);
2844  }
2845  }
2846  else if (path->parallel_safe &&
2847  !is_parallel_safe(root, (Node *) target->exprs))
2848  {
2849  /*
2850  * We're inserting a parallel-restricted target list into a path
2851  * currently marked parallel-safe, so we have to mark it as no longer
2852  * safe.
2853  */
2854  path->parallel_safe = false;
2855  }
2856 
2857  return path;
2858 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:753
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7202
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2673
tree ctl root
Definition: radixtree.h:1886
Path * subpath
Definition: pathnodes.h:2042
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1533
QualCost cost
Definition: pathnodes.h:1539
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo* build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo 
)

Definition at line 881 of file relnode.c.

884 {
885  RelOptInfo *joinrel = makeNode(RelOptInfo);
886  AppendRelInfo **appinfos;
887  int nappinfos;
888 
889  /* Only joins between "other" relations land here. */
890  Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
891 
892  /* The parent joinrel should have consider_partitionwise_join set. */
893  Assert(parent_joinrel->consider_partitionwise_join);
894 
895  /*
896  * Find the AppendRelInfo structures for the child baserels. We'll need
897  * these for computing the child join's relid set, and later for mapping
898  * Vars to the child rel.
899  */
900  appinfos = find_appinfos_by_relids(root,
901  bms_union(outer_rel->relids,
902  inner_rel->relids),
903  &nappinfos);
904 
905  joinrel->reloptkind = RELOPT_OTHER_JOINREL;
906  joinrel->relids = adjust_child_relids(parent_joinrel->relids,
907  nappinfos, appinfos);
908  joinrel->rows = 0;
909  /* cheap startup cost is interesting iff not all tuples to be retrieved */
910  joinrel->consider_startup = (root->tuple_fraction > 0);
911  joinrel->consider_param_startup = false;
912  joinrel->consider_parallel = false;
913  joinrel->reltarget = create_empty_pathtarget();
914  joinrel->pathlist = NIL;
915  joinrel->ppilist = NIL;
916  joinrel->partial_pathlist = NIL;
917  joinrel->cheapest_startup_path = NULL;
918  joinrel->cheapest_total_path = NULL;
919  joinrel->cheapest_unique_path = NULL;
921  joinrel->direct_lateral_relids = NULL;
922  joinrel->lateral_relids = NULL;
923  joinrel->relid = 0; /* indicates not a baserel */
924  joinrel->rtekind = RTE_JOIN;
925  joinrel->min_attr = 0;
926  joinrel->max_attr = 0;
927  joinrel->attr_needed = NULL;
928  joinrel->attr_widths = NULL;
929  joinrel->notnullattnums = NULL;
930  joinrel->nulling_relids = NULL;
931  joinrel->lateral_vars = NIL;
932  joinrel->lateral_referencers = NULL;
933  joinrel->indexlist = NIL;
934  joinrel->pages = 0;
935  joinrel->tuples = 0;
936  joinrel->allvisfrac = 0;
937  joinrel->eclass_indexes = NULL;
938  joinrel->subroot = NULL;
939  joinrel->subplan_params = NIL;
940  joinrel->amflags = 0;
941  joinrel->serverid = InvalidOid;
942  joinrel->userid = InvalidOid;
943  joinrel->useridiscurrent = false;
944  joinrel->fdwroutine = NULL;
945  joinrel->fdw_private = NULL;
946  joinrel->baserestrictinfo = NIL;
947  joinrel->baserestrictcost.startup = 0;
948  joinrel->baserestrictcost.per_tuple = 0;
949  joinrel->joininfo = NIL;
950  joinrel->has_eclass_joins = false;
951  joinrel->consider_partitionwise_join = false; /* might get changed later */
952  joinrel->parent = parent_joinrel;
953  joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
954  joinrel->top_parent_relids = joinrel->top_parent->relids;
955  joinrel->part_scheme = NULL;
956  joinrel->nparts = -1;
957  joinrel->boundinfo = NULL;
958  joinrel->partbounds_merged = false;
959  joinrel->partition_qual = NIL;
960  joinrel->part_rels = NULL;
961  joinrel->live_parts = NULL;
962  joinrel->all_partrels = NULL;
963  joinrel->partexprs = NULL;
964  joinrel->nullable_partexprs = NULL;
965 
966  /* Compute information relevant to foreign relations. */
967  set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
968 
969  /* Set up reltarget struct */
970  build_child_join_reltarget(root, parent_joinrel, joinrel,
971  nappinfos, appinfos);
972 
973  /* Construct joininfo list. */
974  joinrel->joininfo = (List *) adjust_appendrel_attrs(root,
975  (Node *) parent_joinrel->joininfo,
976  nappinfos,
977  appinfos);
978 
979  /*
980  * Lateral relids referred in child join will be same as that referred in
981  * the parent relation.
982  */
983  joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
984  joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
985 
986  /*
987  * If the parent joinrel has pending equivalence classes, so does the
988  * child.
989  */
990  joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
991 
992  /* Is the join between partitions itself partitioned? */
993  build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
994  restrictlist);
995 
996  /* Child joinrel is parallel safe if parent is parallel safe. */
997  joinrel->consider_parallel = parent_joinrel->consider_parallel;
998 
999  /* Set estimates of the child-joinrel's size. */
1000  set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
1001  sjinfo, restrictlist);
1002 
1003  /* We build the join only once. */
1004  Assert(!find_join_rel(root, joinrel->relids));
1005 
1006  /* Add the relation to the PlannerInfo. */
1007  add_join_rel(root, joinrel);
1008 
1009  /*
1010  * We might need EquivalenceClass members corresponding to the child join,
1011  * so that we can represent sort pathkeys for it. As with children of
1012  * baserels, we shouldn't need this unless there are relevant eclass joins
1013  * (implying that a merge join might be possible) or pathkeys to sort by.
1014  */
1015  if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1017  nappinfos, appinfos,
1018  parent_joinrel, joinrel);
1019 
1020  pfree(appinfos);
1021 
1022  return joinrel;
1023 }
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:737
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:558
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5292
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2758
#define makeNode(_type_)
Definition: nodes.h:155
@ RTE_JOIN
Definition: parsenodes.h:1030
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2261
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:824
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:848
#define InvalidOid
Definition: postgres_ext.h:36
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:2017
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:527
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2425
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:589
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:627
List * baserestrictinfo
Definition: pathnodes.h:979
List * subplan_params
Definition: pathnodes.h:948
List * ppilist
Definition: pathnodes.h:893
bool useridiscurrent
Definition: pathnodes.h:962
uint32 amflags
Definition: pathnodes.h:952
List * joininfo
Definition: pathnodes.h:985
Bitmapset * notnullattnums
Definition: pathnodes.h:930
List * partition_qual
Definition: pathnodes.h:1021
Relids relids
Definition: pathnodes.h:865
struct PathTarget * reltarget
Definition: pathnodes.h:887
Index relid
Definition: pathnodes.h:912
List * lateral_vars
Definition: pathnodes.h:934
Cardinality tuples
Definition: pathnodes.h:943
Relids top_parent_relids
Definition: pathnodes.h:1003
bool partbounds_merged
Definition: pathnodes.h:1019
BlockNumber pages
Definition: pathnodes.h:942
Relids lateral_relids
Definition: pathnodes.h:907
List * cheapest_parameterized_paths
Definition: pathnodes.h:898
RelOptKind reloptkind
Definition: pathnodes.h:859
List * indexlist
Definition: pathnodes.h:938
struct Path * cheapest_unique_path
Definition: pathnodes.h:897
Relids lateral_referencers
Definition: pathnodes.h:936
struct Path * cheapest_startup_path
Definition: pathnodes.h:895
QualCost baserestrictcost
Definition: pathnodes.h:981
struct Path * cheapest_total_path
Definition: pathnodes.h:896
Oid userid
Definition: pathnodes.h:960
Bitmapset * eclass_indexes
Definition: pathnodes.h:946
Relids all_partrels
Definition: pathnodes.h:1035
Relids direct_lateral_relids
Definition: pathnodes.h:905
bool has_eclass_joins
Definition: pathnodes.h:987
Oid serverid
Definition: pathnodes.h:958
Bitmapset * live_parts
Definition: pathnodes.h:1033
bool consider_partitionwise_join
Definition: pathnodes.h:993
PlannerInfo * subroot
Definition: pathnodes.h:947
AttrNumber max_attr
Definition: pathnodes.h:920
Relids nulling_relids
Definition: pathnodes.h:932
double allvisfrac
Definition: pathnodes.h:944
Cardinality rows
Definition: pathnodes.h:871
AttrNumber min_attr
Definition: pathnodes.h:918
RTEKind rtekind
Definition: pathnodes.h:916
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_union(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_appinfos_by_relids(), find_join_rel(), RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, pfree(), RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_join_rel()

RelOptInfo* build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 665 of file relnode.c.

672 {
673  RelOptInfo *joinrel;
674  List *restrictlist;
675 
676  /* This function should be used only for join between parents. */
677  Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
678 
679  /*
680  * See if we already have a joinrel for this set of base rels.
681  */
682  joinrel = find_join_rel(root, joinrelids);
683 
684  if (joinrel)
685  {
686  /*
687  * Yes, so we only need to figure the restrictlist for this particular
688  * pair of component relations.
689  */
690  if (restrictlist_ptr)
691  *restrictlist_ptr = build_joinrel_restrictlist(root,
692  joinrel,
693  outer_rel,
694  inner_rel,
695  sjinfo);
696  return joinrel;
697  }
698 
699  /*
700  * Nope, so make one.
701  */
702  joinrel = makeNode(RelOptInfo);
703  joinrel->reloptkind = RELOPT_JOINREL;
704  joinrel->relids = bms_copy(joinrelids);
705  joinrel->rows = 0;
706  /* cheap startup cost is interesting iff not all tuples to be retrieved */
707  joinrel->consider_startup = (root->tuple_fraction > 0);
708  joinrel->consider_param_startup = false;
709  joinrel->consider_parallel = false;
710  joinrel->reltarget = create_empty_pathtarget();
711  joinrel->pathlist = NIL;
712  joinrel->ppilist = NIL;
713  joinrel->partial_pathlist = NIL;
714  joinrel->cheapest_startup_path = NULL;
715  joinrel->cheapest_total_path = NULL;
716  joinrel->cheapest_unique_path = NULL;
718  /* init direct_lateral_relids from children; we'll finish it up below */
719  joinrel->direct_lateral_relids =
720  bms_union(outer_rel->direct_lateral_relids,
721  inner_rel->direct_lateral_relids);
723  outer_rel, inner_rel);
724  joinrel->relid = 0; /* indicates not a baserel */
725  joinrel->rtekind = RTE_JOIN;
726  joinrel->min_attr = 0;
727  joinrel->max_attr = 0;
728  joinrel->attr_needed = NULL;
729  joinrel->attr_widths = NULL;
730  joinrel->notnullattnums = NULL;
731  joinrel->nulling_relids = NULL;
732  joinrel->lateral_vars = NIL;
733  joinrel->lateral_referencers = NULL;
734  joinrel->indexlist = NIL;
735  joinrel->statlist = NIL;
736  joinrel->pages = 0;
737  joinrel->tuples = 0;
738  joinrel->allvisfrac = 0;
739  joinrel->eclass_indexes = NULL;
740  joinrel->subroot = NULL;
741  joinrel->subplan_params = NIL;
742  joinrel->rel_parallel_workers = -1;
743  joinrel->amflags = 0;
744  joinrel->serverid = InvalidOid;
745  joinrel->userid = InvalidOid;
746  joinrel->useridiscurrent = false;
747  joinrel->fdwroutine = NULL;
748  joinrel->fdw_private = NULL;
749  joinrel->unique_for_rels = NIL;
750  joinrel->non_unique_for_rels = NIL;
751  joinrel->baserestrictinfo = NIL;
752  joinrel->baserestrictcost.startup = 0;
753  joinrel->baserestrictcost.per_tuple = 0;
754  joinrel->baserestrict_min_security = UINT_MAX;
755  joinrel->joininfo = NIL;
756  joinrel->has_eclass_joins = false;
757  joinrel->consider_partitionwise_join = false; /* might get changed later */
758  joinrel->parent = NULL;
759  joinrel->top_parent = NULL;
760  joinrel->top_parent_relids = NULL;
761  joinrel->part_scheme = NULL;
762  joinrel->nparts = -1;
763  joinrel->boundinfo = NULL;
764  joinrel->partbounds_merged = false;
765  joinrel->partition_qual = NIL;
766  joinrel->part_rels = NULL;
767  joinrel->live_parts = NULL;
768  joinrel->all_partrels = NULL;
769  joinrel->partexprs = NULL;
770  joinrel->nullable_partexprs = NULL;
771 
772  /* Compute information relevant to the foreign relations. */
773  set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
774 
775  /*
776  * Fill the joinrel's tlist with just the Vars and PHVs that need to be
777  * output from this join (ie, are needed for higher joinclauses or final
778  * output).
779  *
780  * NOTE: the tlist order for a join rel will depend on which pair of outer
781  * and inner rels we first try to build it from. But the contents should
782  * be the same regardless.
783  */
784  build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
785  (sjinfo->jointype == JOIN_FULL));
786  build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
787  (sjinfo->jointype != JOIN_INNER));
788  add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
789 
790  /*
791  * add_placeholders_to_joinrel also took care of adding the ph_lateral
792  * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
793  * now we can finish computing that. This is much like the computation of
794  * the transitively-closed lateral_relids in min_join_parameterization,
795  * except that here we *do* have to consider the added PHVs.
796  */
797  joinrel->direct_lateral_relids =
798  bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
799 
800  /*
801  * Construct restrict and join clause lists for the new joinrel. (The
802  * caller might or might not need the restrictlist, but I need it anyway
803  * for set_joinrel_size_estimates().)
804  */
805  restrictlist = build_joinrel_restrictlist(root, joinrel,
806  outer_rel, inner_rel,
807  sjinfo);
808  if (restrictlist_ptr)
809  *restrictlist_ptr = restrictlist;
810  build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
811 
812  /*
813  * This is also the right place to check whether the joinrel has any
814  * pending EquivalenceClass joins.
815  */
817 
818  /* Store the partition information. */
819  build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
820  restrictlist);
821 
822  /*
823  * Set estimates of the joinrel's size.
824  */
825  set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
826  sjinfo, restrictlist);
827 
828  /*
829  * Set the consider_parallel flag if this joinrel could potentially be
830  * scanned within a parallel worker. If this flag is false for either
831  * inner_rel or outer_rel, then it must be false for the joinrel also.
832  * Even if both are true, there might be parallel-restricted expressions
833  * in the targetlist or quals.
834  *
835  * Note that if there are more than two rels in this relation, they could
836  * be divided between inner_rel and outer_rel in any arbitrary way. We
837  * assume this doesn't matter, because we should hit all the same baserels
838  * and joinclauses while building up to this joinrel no matter which we
839  * take; therefore, we should make the same decision here however we get
840  * here.
841  */
842  if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
843  is_parallel_safe(root, (Node *) restrictlist) &&
844  is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
845  joinrel->consider_parallel = true;
846 
847  /* Add the joinrel to the PlannerInfo. */
848  add_join_rel(root, joinrel);
849 
850  /*
851  * Also, if dynamic-programming join search is active, add the new joinrel
852  * to the appropriate sublist. Note: you might think the Assert on number
853  * of members should be for equality, but some of the level 1 rels might
854  * have been joinrels already, so we can only assert <=.
855  */
856  if (root->join_rel_level)
857  {
858  Assert(root->join_cur_level > 0);
859  Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
860  root->join_rel_level[root->join_cur_level] =
861  lappend(root->join_rel_level[root->join_cur_level], joinrel);
862  }
863 
864  return joinrel;
865 }
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3168
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:295
@ JOIN_INNER
Definition: nodes.h:293
@ RELOPT_JOINREL
Definition: pathnodes.h:822
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:373
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1112
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1034
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1297
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1334
List * statlist
Definition: pathnodes.h:940
List * unique_for_rels
Definition: pathnodes.h:971
List * non_unique_for_rels
Definition: pathnodes.h:973
int rel_parallel_workers
Definition: pathnodes.h:950
Index baserestrict_min_security
Definition: pathnodes.h:983
JoinType jointype
Definition: pathnodes.h:2898

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_rel()

RelOptInfo* build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 192 of file relnode.c.

193 {
194  RelOptInfo *rel;
195  RangeTblEntry *rte;
196 
197  /* Rel should not exist already */
198  Assert(relid > 0 && relid < root->simple_rel_array_size);
199  if (root->simple_rel_array[relid] != NULL)
200  elog(ERROR, "rel %d already exists", relid);
201 
202  /* Fetch RTE for relation */
203  rte = root->simple_rte_array[relid];
204  Assert(rte != NULL);
205 
206  rel = makeNode(RelOptInfo);
208  rel->relids = bms_make_singleton(relid);
209  rel->rows = 0;
210  /* cheap startup cost is interesting iff not all tuples to be retrieved */
211  rel->consider_startup = (root->tuple_fraction > 0);
212  rel->consider_param_startup = false; /* might get changed later */
213  rel->consider_parallel = false; /* might get changed later */
215  rel->pathlist = NIL;
216  rel->ppilist = NIL;
217  rel->partial_pathlist = NIL;
218  rel->cheapest_startup_path = NULL;
219  rel->cheapest_total_path = NULL;
220  rel->cheapest_unique_path = NULL;
222  rel->relid = relid;
223  rel->rtekind = rte->rtekind;
224  /* min_attr, max_attr, attr_needed, attr_widths are set below */
225  rel->notnullattnums = NULL;
226  rel->lateral_vars = NIL;
227  rel->indexlist = NIL;
228  rel->statlist = NIL;
229  rel->pages = 0;
230  rel->tuples = 0;
231  rel->allvisfrac = 0;
232  rel->eclass_indexes = NULL;
233  rel->subroot = NULL;
234  rel->subplan_params = NIL;
235  rel->rel_parallel_workers = -1; /* set up in get_relation_info */
236  rel->amflags = 0;
237  rel->serverid = InvalidOid;
238  if (rte->rtekind == RTE_RELATION)
239  {
240  Assert(parent == NULL ||
241  parent->rtekind == RTE_RELATION ||
242  parent->rtekind == RTE_SUBQUERY);
243 
244  /*
245  * For any RELATION rte, we need a userid with which to check
246  * permission access. Baserels simply use their own
247  * RTEPermissionInfo's checkAsUser.
248  *
249  * For otherrels normally there's no RTEPermissionInfo, so we use the
250  * parent's, which normally has one. The exceptional case is that the
251  * parent is a subquery, in which case the otherrel will have its own.
252  */
253  if (rel->reloptkind == RELOPT_BASEREL ||
255  parent->rtekind == RTE_SUBQUERY))
256  {
257  RTEPermissionInfo *perminfo;
258 
259  perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
260  rel->userid = perminfo->checkAsUser;
261  }
262  else
263  rel->userid = parent->userid;
264  }
265  else
266  rel->userid = InvalidOid;
267  rel->useridiscurrent = false;
268  rel->fdwroutine = NULL;
269  rel->fdw_private = NULL;
270  rel->unique_for_rels = NIL;
271  rel->non_unique_for_rels = NIL;
272  rel->baserestrictinfo = NIL;
273  rel->baserestrictcost.startup = 0;
274  rel->baserestrictcost.per_tuple = 0;
275  rel->baserestrict_min_security = UINT_MAX;
276  rel->joininfo = NIL;
277  rel->has_eclass_joins = false;
278  rel->consider_partitionwise_join = false; /* might get changed later */
279  rel->part_scheme = NULL;
280  rel->nparts = -1;
281  rel->boundinfo = NULL;
282  rel->partbounds_merged = false;
283  rel->partition_qual = NIL;
284  rel->part_rels = NULL;
285  rel->live_parts = NULL;
286  rel->all_partrels = NULL;
287  rel->partexprs = NULL;
288  rel->nullable_partexprs = NULL;
289 
290  /*
291  * Pass assorted information down the inheritance hierarchy.
292  */
293  if (parent)
294  {
295  /* We keep back-links to immediate parent and topmost parent. */
296  rel->parent = parent;
297  rel->top_parent = parent->top_parent ? parent->top_parent : parent;
298  rel->top_parent_relids = rel->top_parent->relids;
299 
300  /*
301  * A child rel is below the same outer joins as its parent. (We
302  * presume this info was already calculated for the parent.)
303  */
304  rel->nulling_relids = parent->nulling_relids;
305 
306  /*
307  * Also propagate lateral-reference information from appendrel parent
308  * rels to their child rels. We intentionally give each child rel the
309  * same minimum parameterization, even though it's quite possible that
310  * some don't reference all the lateral rels. This is because any
311  * append path for the parent will have to have the same
312  * parameterization for every child anyway, and there's no value in
313  * forcing extra reparameterize_path() calls. Similarly, a lateral
314  * reference to the parent prevents use of otherwise-movable join rels
315  * for each child.
316  *
317  * It's possible for child rels to have their own children, in which
318  * case the topmost parent's lateral info propagates all the way down.
319  */
321  rel->lateral_relids = parent->lateral_relids;
323  }
324  else
325  {
326  rel->parent = NULL;
327  rel->top_parent = NULL;
328  rel->top_parent_relids = NULL;
329  rel->nulling_relids = NULL;
330  rel->direct_lateral_relids = NULL;
331  rel->lateral_relids = NULL;
332  rel->lateral_referencers = NULL;
333  }
334 
335  /* Check type of rtable entry */
336  switch (rte->rtekind)
337  {
338  case RTE_RELATION:
339  /* Table --- retrieve statistics from the system catalogs */
340  get_relation_info(root, rte->relid, rte->inh, rel);
341  break;
342  case RTE_SUBQUERY:
343  case RTE_FUNCTION:
344  case RTE_TABLEFUNC:
345  case RTE_VALUES:
346  case RTE_CTE:
347  case RTE_NAMEDTUPLESTORE:
348 
349  /*
350  * Subquery, function, tablefunc, values list, CTE, or ENR --- set
351  * up attr range and arrays
352  *
353  * Note: 0 is included in range to support whole-row Vars
354  */
355  rel->min_attr = 0;
356  rel->max_attr = list_length(rte->eref->colnames);
357  rel->attr_needed = (Relids *)
358  palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
359  rel->attr_widths = (int32 *)
360  palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
361  break;
362  case RTE_RESULT:
363  /* RTE_RESULT has no columns, nor could it have whole-row Var */
364  rel->min_attr = 0;
365  rel->max_attr = -1;
366  rel->attr_needed = NULL;
367  rel->attr_widths = NULL;
368  break;
369  default:
370  elog(ERROR, "unrecognized RTE kind: %d",
371  (int) rte->rtekind);
372  break;
373  }
374 
375  /*
376  * We must apply the partially filled in RelOptInfo before calling
377  * apply_child_basequals due to some transformations within that function
378  * which require the RelOptInfo to be available in the simple_rel_array.
379  */
380  root->simple_rel_array[relid] = rel;
381 
382  /*
383  * Apply the parent's quals to the child, with appropriate substitution of
384  * variables. If the resulting clause is constant-FALSE or NULL after
385  * applying transformations, apply_child_basequals returns false to
386  * indicate that scanning this relation won't yield any rows. In this
387  * case, we mark the child as dummy right away. (We must do this
388  * immediately so that pruning works correctly when recursing in
389  * expand_partitioned_rtentry.)
390  */
391  if (parent)
392  {
393  AppendRelInfo *appinfo = root->append_rel_array[relid];
394 
395  Assert(appinfo != NULL);
396  if (!apply_child_basequals(root, parent, rel, rte, appinfo))
397  {
398  /*
399  * Restriction clause reduced to constant FALSE or NULL. Mark as
400  * dummy so we won't scan this relation.
401  */
402  mark_dummy_rel(rel);
403  }
404  }
405 
406  return rel;
407 }
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
signed int int32
Definition: c.h:494
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:834
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1384
void * palloc0(Size size)
Definition: mcxt.c:1347
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1034
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1035
@ RTE_VALUES
Definition: parsenodes.h:1033
@ RTE_SUBQUERY
Definition: parsenodes.h:1029
@ RTE_RESULT
Definition: parsenodes.h:1036
@ RTE_FUNCTION
Definition: parsenodes.h:1031
@ RTE_TABLEFUNC
Definition: parsenodes.h:1032
@ RTE_RELATION
Definition: parsenodes.h:1028
@ RELOPT_BASEREL
Definition: pathnodes.h:821
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:823
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:115
RTEKind rtekind
Definition: parsenodes.h:1057

References RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RangeTblEntry::relid, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2366 of file pathnode.c.

2370 {
2371  Relids required_outer;
2372 
2373  /* inner_path can require rels from outer path, but not vice versa */
2374  Assert(!bms_overlap(outer_paramrels, innerrelids));
2375  /* easy case if inner path is not parameterized */
2376  if (!inner_paramrels)
2377  return bms_copy(outer_paramrels);
2378  /* else, form the union ... */
2379  required_outer = bms_union(outer_paramrels, inner_paramrels);
2380  /* ... and remove any mention of now-satisfied outer rels */
2381  required_outer = bms_del_members(required_outer,
2382  outerrelids);
2383  return required_outer;
2384 }
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582

References Assert, bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2393 of file pathnode.c.

2394 {
2395  Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2396  Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2397  Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2398  Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2399  Relids required_outer;
2400 
2401  /*
2402  * Any parameterization of the input paths refers to topmost parents of
2403  * the relevant relations, because reparameterize_path_by_child() hasn't
2404  * been called yet. So we must consider topmost parents of the relations
2405  * being joined, too, while checking for disallowed parameterization
2406  * cases.
2407  */
2408  if (inner_path->parent->top_parent_relids)
2409  innerrelids = inner_path->parent->top_parent_relids;
2410  else
2411  innerrelids = inner_path->parent->relids;
2412 
2413  if (outer_path->parent->top_parent_relids)
2414  outerrelids = outer_path->parent->top_parent_relids;
2415  else
2416  outerrelids = outer_path->parent->relids;
2417 
2418  /* neither path can require rels from the other */
2419  Assert(!bms_overlap(outer_paramrels, innerrelids));
2420  Assert(!bms_overlap(inner_paramrels, outerrelids));
2421  /* form the union ... */
2422  required_outer = bms_union(outer_paramrels, inner_paramrels);
2423  /* we do not need an explicit test for empty; bms_union gets it right */
2424  return required_outer;
2425 }
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:182

References Assert, bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 115 of file pathnode.c.

117 {
118  Cost cost1,
119  cost2;
120 
121  if (fraction <= 0.0 || fraction >= 1.0)
122  return compare_path_costs(path1, path2, TOTAL_COST);
123  cost1 = path1->startup_cost +
124  fraction * (path1->total_cost - path1->startup_cost);
125  cost2 = path2->startup_cost +
126  fraction * (path2->total_cost - path2->startup_cost);
127  if (cost1 < cost2)
128  return -1;
129  if (cost1 > cost2)
130  return +1;
131  return 0;
132 }
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::startup_cost, TOTAL_COST, and Path::total_cost.

Referenced by choose_hashed_setop(), get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 69 of file pathnode.c.

70 {
71  if (criterion == STARTUP_COST)
72  {
73  if (path1->startup_cost < path2->startup_cost)
74  return -1;
75  if (path1->startup_cost > path2->startup_cost)
76  return +1;
77 
78  /*
79  * If paths have the same startup cost (not at all unlikely), order
80  * them by total cost.
81  */
82  if (path1->total_cost < path2->total_cost)
83  return -1;
84  if (path1->total_cost > path2->total_cost)
85  return +1;
86  }
87  else
88  {
89  if (path1->total_cost < path2->total_cost)
90  return -1;
91  if (path1->total_cost > path2->total_cost)
92  return +1;
93 
94  /*
95  * If paths have the same total cost, order them by startup cost.
96  */
97  if (path1->startup_cost < path2->startup_cost)
98  return -1;
99  if (path1->startup_cost > path2->startup_cost)
100  return +1;
101  }
102  return 0;
103 }
@ STARTUP_COST
Definition: pathnodes.h:38

References STARTUP_COST, Path::startup_cost, and Path::total_cost.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath* create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 3143 of file pathnode.c.

3153 {
3154  AggPath *pathnode = makeNode(AggPath);
3155 
3156  pathnode->path.pathtype = T_Agg;
3157  pathnode->path.parent = rel;
3158  pathnode->path.pathtarget = target;
3159  /* For now, assume we are above any joins, so no parameterization */
3160  pathnode->path.param_info = NULL;
3161  pathnode->path.parallel_aware = false;
3162  pathnode->path.parallel_safe = rel->consider_parallel &&
3163  subpath->parallel_safe;
3164  pathnode->path.parallel_workers = subpath->parallel_workers;
3165 
3166  if (aggstrategy == AGG_SORTED)
3167  {
3168  /*
3169  * Attempt to preserve the order of the subpath. Additional pathkeys
3170  * may have been added in adjust_group_pathkeys_for_groupagg() to
3171  * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3172  * belong to columns within the aggregate function, so we must strip
3173  * these additional pathkeys off as those columns are unavailable
3174  * above the aggregate node.
3175  */
3176  if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3177  pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3178  root->num_groupby_pathkeys);
3179  else
3180  pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3181  }
3182  else
3183  pathnode->path.pathkeys = NIL; /* output is unordered */
3184 
3185  pathnode->subpath = subpath;
3186 
3187  pathnode->aggstrategy = aggstrategy;
3188  pathnode->aggsplit = aggsplit;
3189  pathnode->numGroups = numGroups;
3190  pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3191  pathnode->groupClause = groupClause;
3192  pathnode->qual = qual;
3193 
3194  cost_agg(&pathnode->path, root,
3195  aggstrategy, aggcosts,
3196  list_length(groupClause), numGroups,
3197  qual,
3198  subpath->startup_cost, subpath->total_cost,
3199  subpath->rows, subpath->pathtarget->width);
3200 
3201  /* add tlist eval cost for each output row */
3202  pathnode->path.startup_cost += target->cost.startup;
3203  pathnode->path.total_cost += target->cost.startup +
3204  target->cost.per_tuple * pathnode->path.rows;
3205 
3206  return pathnode;
3207 }
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2629
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:310
@ AGG_SORTED
Definition: nodes.h:355
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2254
Cardinality numGroups
Definition: pathnodes.h:2257
AggSplit aggsplit
Definition: pathnodes.h:2256
List * groupClause
Definition: pathnodes.h:2259
uint64 transitionSpace
Definition: pathnodes.h:2258
AggStrategy aggstrategy
Definition: pathnodes.h:2255
Path path
Definition: pathnodes.h:2253
List * qual
Definition: pathnodes.h:2260
NodeTag pathtype
Definition: pathnodes.h:1626
int parallel_workers
Definition: pathnodes.h:1657
bool parallel_aware
Definition: pathnodes.h:1653

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath* create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1244 of file pathnode.c.

1250 {
1251  AppendPath *pathnode = makeNode(AppendPath);
1252  ListCell *l;
1253 
1254  Assert(!parallel_aware || parallel_workers > 0);
1255 
1256  pathnode->path.pathtype = T_Append;
1257  pathnode->path.parent = rel;
1258  pathnode->path.pathtarget = rel->reltarget;
1259 
1260  /*
1261  * If this is for a baserel (not a join or non-leaf partition), we prefer
1262  * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1263  * for the path. This supports building a Memoize path atop this path,
1264  * and if this is a partitioned table the info may be useful for run-time
1265  * pruning (cf make_partition_pruneinfo()).
1266  *
1267  * However, if we don't have "root" then that won't work and we fall back
1268  * on the simpler get_appendrel_parampathinfo. There's no point in doing
1269  * the more expensive thing for a dummy path, either.
1270  */
1271  if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1272  pathnode->path.param_info = get_baserel_parampathinfo(root,
1273  rel,
1274  required_outer);
1275  else
1276  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1277  required_outer);
1278 
1279  pathnode->path.parallel_aware = parallel_aware;
1280  pathnode->path.parallel_safe = rel->consider_parallel;
1281  pathnode->path.parallel_workers = parallel_workers;
1282  pathnode->path.pathkeys = pathkeys;
1283 
1284  /*
1285  * For parallel append, non-partial paths are sorted by descending total
1286  * costs. That way, the total time to finish all non-partial paths is
1287  * minimized. Also, the partial paths are sorted by descending startup
1288  * costs. There may be some paths that require to do startup work by a
1289  * single worker. In such case, it's better for workers to choose the
1290  * expensive ones first, whereas the leader should choose the cheapest
1291  * startup plan.
1292  */
1293  if (pathnode->path.parallel_aware)
1294  {
1295  /*
1296  * We mustn't fiddle with the order of subpaths when the Append has
1297  * pathkeys. The order they're listed in is critical to keeping the
1298  * pathkeys valid.
1299  */
1300  Assert(pathkeys == NIL);
1301 
1303  list_sort(partial_subpaths, append_startup_cost_compare);
1304  }
1305  pathnode->first_partial_path = list_length(subpaths);
1306  pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1307 
1308  /*
1309  * Apply query-wide LIMIT if known and path is for sole base relation.
1310  * (Handling this at this low level is a bit klugy.)
1311  */
1312  if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1313  pathnode->limit_tuples = root->limit_tuples;
1314  else
1315  pathnode->limit_tuples = -1.0;
1316 
1317  foreach(l, pathnode->subpaths)
1318  {
1319  Path *subpath = (Path *) lfirst(l);
1320 
1321  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1322  subpath->parallel_safe;
1323 
1324  /* All child paths must have same parameterization */
1325  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1326  }
1327 
1328  Assert(!parallel_aware || pathnode->path.parallel_safe);
1329 
1330  /*
1331  * If there's exactly one child path then the output of the Append is
1332  * necessarily ordered the same as the child's, so we can inherit the
1333  * child's pathkeys if any, overriding whatever the caller might've said.
1334  * Furthermore, if the child's parallel awareness matches the Append's,
1335  * then the Append is a no-op and will be discarded later (in setrefs.c).
1336  * Then we can inherit the child's size and cost too, effectively charging
1337  * zero for the Append. Otherwise, we must do the normal costsize
1338  * calculation.
1339  */
1340  if (list_length(pathnode->subpaths) == 1)
1341  {
1342  Path *child = (Path *) linitial(pathnode->subpaths);
1343 
1344  if (child->parallel_aware == parallel_aware)
1345  {
1346  pathnode->path.rows = child->rows;
1347  pathnode->path.startup_cost = child->startup_cost;
1348  pathnode->path.total_cost = child->total_cost;
1349  }
1350  else
1351  cost_append(pathnode);
1352  /* Must do this last, else cost_append complains */
1353  pathnode->path.pathkeys = child->pathkeys;
1354  }
1355  else
1356  cost_append(pathnode);
1357 
1358  /* If the caller provided a row estimate, override the computed value. */
1359  if (rows >= 0)
1360  pathnode->path.rows = rows;
1361 
1362  return pathnode;
1363 }
void cost_append(AppendPath *apath)
Definition: costsize.c:2210
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1397
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1375
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1557
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1868
int first_partial_path
Definition: pathnodes.h:1934
Cardinality limit_tuples
Definition: pathnodes.h:1935
List * subpaths
Definition: pathnodes.h:1932

References append_startup_cost_compare(), append_total_cost_compare(), Assert, bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath* create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1075 of file pathnode.c.

1078 {
1079  BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1080  Relids required_outer = NULL;
1081  ListCell *lc;
1082 
1083  pathnode->path.pathtype = T_BitmapAnd;
1084  pathnode->path.parent = rel;
1085  pathnode->path.pathtarget = rel->reltarget;
1086 
1087  /*
1088  * Identify the required outer rels as the union of what the child paths
1089  * depend on. (Alternatively, we could insist that the caller pass this
1090  * in, but it's more convenient and reliable to compute it here.)
1091  */
1092  foreach(lc, bitmapquals)
1093  {
1094  Path *bitmapqual = (Path *) lfirst(lc);
1095 
1096  required_outer = bms_add_members(required_outer,
1097  PATH_REQ_OUTER(bitmapqual));
1098  }
1099  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1100  required_outer);
1101 
1102  /*
1103  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1104  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1105  * set the flag for this path based only on the relation-level flag,
1106  * without actually iterating over the list of children.
1107  */
1108  pathnode->path.parallel_aware = false;
1109  pathnode->path.parallel_safe = rel->consider_parallel;
1110  pathnode->path.parallel_workers = 0;
1111 
1112  pathnode->path.pathkeys = NIL; /* always unordered */
1113 
1114  pathnode->bitmapquals = bitmapquals;
1115 
1116  /* this sets bitmapselectivity as well as the regular cost fields: */
1117  cost_bitmap_and_node(pathnode, root);
1118 
1119  return pathnode;
1120 }
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1157
List * bitmapquals
Definition: pathnodes.h:1797

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath* create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1042 of file pathnode.c.

1048 {
1049  BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1050 
1051  pathnode->path.pathtype = T_BitmapHeapScan;
1052  pathnode->path.parent = rel;
1053  pathnode->path.pathtarget = rel->reltarget;
1054  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1055  required_outer);
1056  pathnode->path.parallel_aware = (parallel_degree > 0);
1057  pathnode->path.parallel_safe = rel->consider_parallel;
1058  pathnode->path.parallel_workers = parallel_degree;
1059  pathnode->path.pathkeys = NIL; /* always unordered */
1060 
1061  pathnode->bitmapqual = bitmapqual;
1062 
1063  cost_bitmap_heap_scan(&pathnode->path, root, rel,
1064  pathnode->path.param_info,
1065  bitmapqual, loop_count);
1066 
1067  return pathnode;
1068 }
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:1013
Path * bitmapqual
Definition: pathnodes.h:1785

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath* create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1127 of file pathnode.c.

1130 {
1131  BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1132  Relids required_outer = NULL;
1133  ListCell *lc;
1134 
1135  pathnode->path.pathtype = T_BitmapOr;
1136  pathnode->path.parent = rel;
1137  pathnode->path.pathtarget = rel->reltarget;
1138 
1139  /*
1140  * Identify the required outer rels as the union of what the child paths
1141  * depend on. (Alternatively, we could insist that the caller pass this
1142  * in, but it's more convenient and reliable to compute it here.)
1143  */
1144  foreach(lc, bitmapquals)
1145  {
1146  Path *bitmapqual = (Path *) lfirst(lc);
1147 
1148  required_outer = bms_add_members(required_outer,
1149  PATH_REQ_OUTER(bitmapqual));
1150  }
1151  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1152  required_outer);
1153 
1154  /*
1155  * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1156  * parallel-safe if and only if rel->consider_parallel is set. So, we can
1157  * set the flag for this path based only on the relation-level flag,
1158  * without actually iterating over the list of children.
1159  */
1160  pathnode->path.parallel_aware = false;
1161  pathnode->path.parallel_safe = rel->consider_parallel;
1162  pathnode->path.parallel_workers = 0;
1163 
1164  pathnode->path.pathkeys = NIL; /* always unordered */
1165 
1166  pathnode->bitmapquals = bitmapquals;
1167 
1168  /* this sets bitmapselectivity as well as the regular cost fields: */
1169  cost_bitmap_or_node(pathnode, root);
1170 
1171  return pathnode;
1172 }
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1201
List * bitmapquals
Definition: pathnodes.h:1810

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path* create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2112 of file pathnode.c.

2114 {
2115  Path *pathnode = makeNode(Path);
2116 
2117  pathnode->pathtype = T_CteScan;
2118  pathnode->parent = rel;
2119  pathnode->pathtarget = rel->reltarget;
2120  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2121  required_outer);
2122  pathnode->parallel_aware = false;
2123  pathnode->parallel_safe = rel->consider_parallel;
2124  pathnode->parallel_workers = 0;
2125  pathnode->pathkeys = pathkeys;
2126 
2127  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2128 
2129  return pathnode;
2130 }
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1677

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath* create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2269 of file pathnode.c.

2277 {
2278  ForeignPath *pathnode = makeNode(ForeignPath);
2279 
2280  /*
2281  * We should use get_joinrel_parampathinfo to handle parameterized paths,
2282  * but the API of this function doesn't support it, and existing
2283  * extensions aren't yet trying to build such paths anyway. For the
2284  * moment just throw an error if someone tries it; eventually we should
2285  * revisit this.
2286  */
2287  if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2288  elog(ERROR, "parameterized foreign joins are not supported yet");
2289 
2290  pathnode->path.pathtype = T_ForeignScan;
2291  pathnode->path.parent = rel;
2292  pathnode->path.pathtarget = target ? target : rel->reltarget;
2293  pathnode->path.param_info = NULL; /* XXX see above */
2294  pathnode->path.parallel_aware = false;
2295  pathnode->path.parallel_safe = rel->consider_parallel;
2296  pathnode->path.parallel_workers = 0;
2297  pathnode->path.rows = rows;
2298  pathnode->path.startup_cost = startup_cost;
2299  pathnode->path.total_cost = total_cost;
2300  pathnode->path.pathkeys = pathkeys;
2301 
2302  pathnode->fdw_outerpath = fdw_outerpath;
2303  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2304  pathnode->fdw_private = fdw_private;
2305 
2306  return pathnode;
2307 }
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:1870
List * fdw_restrictinfo
Definition: pathnodes.h:1871
List * fdw_private
Definition: pathnodes.h:1872

References bms_is_empty, RelOptInfo::consider_parallel, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath* create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2321 of file pathnode.c.

2328 {
2329  ForeignPath *pathnode = makeNode(ForeignPath);
2330 
2331  /*
2332  * Upper relations should never have any lateral references, since joining
2333  * is complete.
2334  */
2336 
2337  pathnode->path.pathtype = T_ForeignScan;
2338  pathnode->path.parent = rel;
2339  pathnode->path.pathtarget = target ? target : rel->reltarget;
2340  pathnode->path.param_info = NULL;
2341  pathnode->path.parallel_aware = false;
2342  pathnode->path.parallel_safe = rel->consider_parallel;
2343  pathnode->path.parallel_workers = 0;
2344  pathnode->path.rows = rows;
2345  pathnode->path.startup_cost = startup_cost;
2346  pathnode->path.total_cost = total_cost;
2347  pathnode->path.pathkeys = pathkeys;
2348 
2349  pathnode->fdw_outerpath = fdw_outerpath;
2350  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2351  pathnode->fdw_private = fdw_private;
2352 
2353  return pathnode;
2354 }

References Assert, bms_is_empty, RelOptInfo::consider_parallel, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath* create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2223 of file pathnode.c.

2231 {
2232  ForeignPath *pathnode = makeNode(ForeignPath);
2233 
2234  /* Historically some FDWs were confused about when to use this */
2235  Assert(IS_SIMPLE_REL(rel));
2236 
2237  pathnode->path.pathtype = T_ForeignScan;
2238  pathnode->path.parent = rel;
2239  pathnode->path.pathtarget = target ? target : rel->reltarget;
2240  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2241  required_outer);
2242  pathnode->path.parallel_aware = false;
2243  pathnode->path.parallel_safe = rel->consider_parallel;
2244  pathnode->path.parallel_workers = 0;
2245  pathnode->path.rows = rows;
2246  pathnode->path.startup_cost = startup_cost;
2247  pathnode->path.total_cost = total_cost;
2248  pathnode->path.pathkeys = pathkeys;
2249 
2250  pathnode->fdw_outerpath = fdw_outerpath;
2251  pathnode->fdw_restrictinfo = fdw_restrictinfo;
2252  pathnode->fdw_private = fdw_private;
2253 
2254  return pathnode;
2255 }
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:833

References Assert, RelOptInfo::consider_parallel, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path* create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2034 of file pathnode.c.

2036 {
2037  Path *pathnode = makeNode(Path);
2038 
2039  pathnode->pathtype = T_FunctionScan;
2040  pathnode->parent = rel;
2041  pathnode->pathtarget = rel->reltarget;
2042  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2043  required_outer);
2044  pathnode->parallel_aware = false;
2045  pathnode->parallel_safe = rel->consider_parallel;
2046  pathnode->parallel_workers = 0;
2047  pathnode->pathkeys = pathkeys;
2048 
2049  cost_functionscan(pathnode, root, rel, pathnode->param_info);
2050 
2051  return pathnode;
2052 }
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1510

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath* create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1881 of file pathnode.c.

1884 {
1886  Cost input_startup_cost = 0;
1887  Cost input_total_cost = 0;
1888 
1889  Assert(subpath->parallel_safe);
1890  Assert(pathkeys);
1891 
1892  /*
1893  * The subpath should guarantee that it is adequately ordered either by
1894  * adding an explicit sort node or by using presorted input. We cannot
1895  * add an explicit Sort node for the subpath in createplan.c on additional
1896  * pathkeys, because we can't guarantee the sort would be safe. For
1897  * example, expressions may be volatile or otherwise parallel unsafe.
1898  */
1899  if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1900  elog(ERROR, "gather merge input not sufficiently sorted");
1901 
1902  pathnode->path.pathtype = T_GatherMerge;
1903  pathnode->path.parent = rel;
1904  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1905  required_outer);
1906  pathnode->path.parallel_aware = false;
1907 
1908  pathnode->subpath = subpath;
1909  pathnode->num_workers = subpath->parallel_workers;
1910  pathnode->path.pathkeys = pathkeys;
1911  pathnode->path.pathtarget = target ? target : rel->reltarget;
1912 
1913  input_startup_cost += subpath->startup_cost;
1914  input_total_cost += subpath->total_cost;
1915 
1916  cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1917  input_startup_cost, input_total_cost, rows);
1918 
1919  return pathnode;
1920 }
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:474
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:341

References Assert, cost_gather_merge(), elog, ERROR, get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, subpath(), and GatherMergePath::subpath.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath* create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 1960 of file pathnode.c.

1962 {
1963  GatherPath *pathnode = makeNode(GatherPath);
1964 
1965  Assert(subpath->parallel_safe);
1966 
1967  pathnode->path.pathtype = T_Gather;
1968  pathnode->path.parent = rel;
1969  pathnode->path.pathtarget = target;
1970  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1971  required_outer);
1972  pathnode->path.parallel_aware = false;
1973  pathnode->path.parallel_safe = false;
1974  pathnode->path.parallel_workers = 0;
1975  pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1976 
1977  pathnode->subpath = subpath;
1978  pathnode->num_workers = subpath->parallel_workers;
1979  pathnode->single_copy = false;
1980 
1981  if (pathnode->num_workers == 0)
1982  {
1983  pathnode->path.pathkeys = subpath->pathkeys;
1984  pathnode->num_workers = 1;
1985  pathnode->single_copy = true;
1986  }
1987 
1988  cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1989 
1990  return pathnode;
1991 }
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:436
bool single_copy
Definition: pathnodes.h:2043
int num_workers
Definition: pathnodes.h:2044

References Assert, cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath* create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 3032 of file pathnode.c.

3038 {
3039  GroupPath *pathnode = makeNode(GroupPath);
3040  PathTarget *target = rel->reltarget;
3041 
3042  pathnode->path.pathtype = T_Group;
3043  pathnode->path.parent = rel;
3044  pathnode->path.pathtarget = target;
3045  /* For now, assume we are above any joins, so no parameterization */
3046  pathnode->path.param_info = NULL;
3047  pathnode->path.parallel_aware = false;
3048  pathnode->path.parallel_safe = rel->consider_parallel &&
3049  subpath->parallel_safe;
3050  pathnode->path.parallel_workers = subpath->parallel_workers;
3051  /* Group doesn't change sort ordering */
3052  pathnode->path.pathkeys = subpath->pathkeys;
3053 
3054  pathnode->subpath = subpath;
3055 
3056  pathnode->groupClause = groupClause;
3057  pathnode->qual = qual;
3058 
3059  cost_group(&pathnode->path, root,
3060  list_length(groupClause),
3061  numGroups,
3062  qual,
3063  subpath->startup_cost, subpath->total_cost,
3064  subpath->rows);
3065 
3066  /* add tlist eval cost for each output row */
3067  pathnode->path.startup_cost += target->cost.startup;
3068  pathnode->path.total_cost += target->cost.startup +
3069  target->cost.per_tuple * pathnode->path.rows;
3070 
3071  return pathnode;
3072 }
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3142
List * qual
Definition: pathnodes.h:2228
List * groupClause
Definition: pathnodes.h:2227
Path * subpath
Definition: pathnodes.h:2226
Path path
Definition: pathnodes.h:2225

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath* create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1518 of file pathnode.c.

1520 {
1522 
1523  pathnode->path.pathtype = T_Result;
1524  pathnode->path.parent = rel;
1525  pathnode->path.pathtarget = target;
1526  pathnode->path.param_info = NULL; /* there are no other rels... */
1527  pathnode->path.parallel_aware = false;
1528  pathnode->path.parallel_safe = rel->consider_parallel;
1529  pathnode->path.parallel_workers = 0;
1530  pathnode->path.pathkeys = NIL;
1531  pathnode->quals = havingqual;
1532 
1533  /*
1534  * We can't quite use cost_resultscan() because the quals we want to
1535  * account for are not baserestrict quals of the rel. Might as well just
1536  * hack it here.
1537  */
1538  pathnode->path.rows = 1;
1539  pathnode->path.startup_cost = target->cost.startup;
1540  pathnode->path.total_cost = target->cost.startup +
1541  cpu_tuple_cost + target->cost.per_tuple;
1542 
1543  /*
1544  * Add cost of qual, if any --- but we ignore its selectivity, since our
1545  * rowcount estimate should be 1 no matter what the qual is.
1546  */
1547  if (havingqual)
1548  {
1549  QualCost qual_cost;
1550 
1551  cost_qual_eval(&qual_cost, havingqual, root);
1552  /* havingqual is evaluated once at startup */
1553  pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1554  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1555  }
1556 
1557  return pathnode;
1558 }
double cpu_tuple_cost
Definition: costsize.c:121
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4619

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath* create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3225 of file pathnode.c.

3232 {
3234  PathTarget *target = rel->reltarget;
3235  ListCell *lc;
3236  bool is_first = true;
3237  bool is_first_sort = true;
3238 
3239  /* The topmost generated Plan node will be an Agg */
3240  pathnode->path.pathtype = T_Agg;
3241  pathnode->path.parent = rel;
3242  pathnode->path.pathtarget = target;
3243  pathnode->path.param_info = subpath->param_info;
3244  pathnode->path.parallel_aware = false;
3245  pathnode->path.parallel_safe = rel->consider_parallel &&
3246  subpath->parallel_safe;
3247  pathnode->path.parallel_workers = subpath->parallel_workers;
3248  pathnode->subpath = subpath;
3249 
3250  /*
3251  * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3252  * to AGG_HASHED, here if possible.
3253  */
3254  if (aggstrategy == AGG_SORTED &&
3255  list_length(rollups) == 1 &&
3256  ((RollupData *) linitial(rollups))->groupClause == NIL)
3257  aggstrategy = AGG_PLAIN;
3258 
3259  if (aggstrategy == AGG_MIXED &&
3260  list_length(rollups) == 1)
3261  aggstrategy = AGG_HASHED;
3262 
3263  /*
3264  * Output will be in sorted order by group_pathkeys if, and only if, there
3265  * is a single rollup operation on a non-empty list of grouping
3266  * expressions.
3267  */
3268  if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3269  pathnode->path.pathkeys = root->group_pathkeys;
3270  else
3271  pathnode->path.pathkeys = NIL;
3272 
3273  pathnode->aggstrategy = aggstrategy;
3274  pathnode->rollups = rollups;
3275  pathnode->qual = having_qual;
3276  pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3277 
3278  Assert(rollups != NIL);
3279  Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3280  Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3281 
3282  foreach(lc, rollups)
3283  {
3284  RollupData *rollup = lfirst(lc);
3285  List *gsets = rollup->gsets;
3286  int numGroupCols = list_length(linitial(gsets));
3287 
3288  /*
3289  * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3290  * (already-sorted) input, and following ones do their own sort.
3291  *
3292  * In AGG_HASHED mode, there is one rollup for each grouping set.
3293  *
3294  * In AGG_MIXED mode, the first rollups are hashed, the first
3295  * non-hashed one takes the (already-sorted) input, and following ones
3296  * do their own sort.
3297  */
3298  if (is_first)
3299  {
3300  cost_agg(&pathnode->path, root,
3301  aggstrategy,
3302  agg_costs,
3303  numGroupCols,
3304  rollup->numGroups,
3305  having_qual,
3306  subpath->startup_cost,
3307  subpath->total_cost,
3308  subpath->rows,
3309  subpath->pathtarget->width);
3310  is_first = false;
3311  if (!rollup->is_hashed)
3312  is_first_sort = false;
3313  }
3314  else
3315  {
3316  Path sort_path; /* dummy for result of cost_sort */
3317  Path agg_path; /* dummy for result of cost_agg */
3318 
3319  if (rollup->is_hashed || is_first_sort)
3320  {
3321  /*
3322  * Account for cost of aggregation, but don't charge input
3323  * cost again
3324  */
3325  cost_agg(&agg_path, root,
3326  rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3327  agg_costs,
3328  numGroupCols,
3329  rollup->numGroups,
3330  having_qual,
3331  0.0, 0.0,
3332  subpath->rows,
3333  subpath->pathtarget->width);
3334  if (!rollup->is_hashed)
3335  is_first_sort = false;
3336  }
3337  else
3338  {
3339  /* Account for cost of sort, but don't charge input cost again */
3340  cost_sort(&sort_path, root, NIL,
3341  0.0,
3342  subpath->rows,
3343  subpath->pathtarget->width,
3344  0.0,
3345  work_mem,
3346  -1.0);
3347 
3348  /* Account for cost of aggregation */
3349 
3350  cost_agg(&agg_path, root,
3351  AGG_SORTED,
3352  agg_costs,
3353  numGroupCols,
3354  rollup->numGroups,
3355  having_qual,
3356  sort_path.startup_cost,
3357  sort_path.total_cost,
3358  sort_path.rows,
3359  subpath->pathtarget->width);
3360  }
3361 
3362  pathnode->path.total_cost += agg_path.total_cost;
3363  pathnode->path.rows += agg_path.rows;
3364  }
3365  }
3366 
3367  /* add tlist eval cost for each output row */
3368  pathnode->path.startup_cost += target->cost.startup;
3369  pathnode->path.total_cost += target->cost.startup +
3370  target->cost.per_tuple * pathnode->path.rows;
3371 
3372  return pathnode;
3373 }
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2103
int work_mem
Definition: globals.c:129
@ AGG_HASHED
Definition: nodes.h:356
@ AGG_MIXED
Definition: nodes.h:357
@ AGG_PLAIN
Definition: nodes.h:354
uint64 transitionSpace
Definition: pathnodes.h:2300
AggStrategy aggstrategy
Definition: pathnodes.h:2297
Cardinality numGroups
Definition: pathnodes.h:2284
List * gsets
Definition: pathnodes.h:2282
bool is_hashed
Definition: pathnodes.h:2286

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath* create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2607 of file pathnode.c.

2618 {
2619  HashPath *pathnode = makeNode(HashPath);
2620 
2621  pathnode->jpath.path.pathtype = T_HashJoin;
2622  pathnode->jpath.path.parent = joinrel;
2623  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2624  pathnode->jpath.path.param_info =
2626  joinrel,
2627  outer_path,
2628  inner_path,
2629  extra->sjinfo,
2630  required_outer,
2631  &restrict_clauses);
2632  pathnode->jpath.path.parallel_aware =
2633  joinrel->consider_parallel && parallel_hash;
2634  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2635  outer_path->parallel_safe && inner_path->parallel_safe;
2636  /* This is a foolish way to estimate parallel_workers, but for now... */
2637  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2638 
2639  /*
2640  * A hashjoin never has pathkeys, since its output ordering is
2641  * unpredictable due to possible batching. XXX If the inner relation is
2642  * small enough, we could instruct the executor that it must not batch,
2643  * and then we could assume that the output inherits the outer relation's
2644  * ordering, which might save a sort step. However there is considerable
2645  * downside if our estimate of the inner relation size is badly off. For
2646  * the moment we don't risk it. (Note also that if we wanted to take this
2647  * seriously, joinpath.c would have to consider many more paths for the
2648  * outer rel than it does now.)
2649  */
2650  pathnode->jpath.path.pathkeys = NIL;
2651  pathnode->jpath.jointype = jointype;
2652  pathnode->jpath.inner_unique = extra->inner_unique;
2653  pathnode->jpath.outerjoinpath = outer_path;
2654  pathnode->jpath.innerjoinpath = inner_path;
2655  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2656  pathnode->path_hashclauses = hashclauses;
2657  /* final_cost_hashjoin will fill in pathnode->num_batches */
2658 
2659  final_cost_hashjoin(root, pathnode, workspace, extra);
2660 
2661  return pathnode;
2662 }
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4160
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1671
List * path_hashclauses
Definition: pathnodes.h:2152
JoinPath jpath
Definition: pathnodes.h:2151
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3233
Path * outerjoinpath
Definition: pathnodes.h:2074
Path * innerjoinpath
Definition: pathnodes.h:2075
JoinType jointype
Definition: pathnodes.h:2069
bool inner_unique
Definition: pathnodes.h:2071
List * joinrestrictinfo
Definition: pathnodes.h:2077

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath* create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 2939 of file pathnode.c.

2945 {
2947  SortPath *pathnode = &sort->spath;
2948 
2949  pathnode->path.pathtype = T_IncrementalSort;
2950  pathnode->path.parent = rel;
2951  /* Sort doesn't project, so use source path's pathtarget */
2952  pathnode->path.pathtarget = subpath->pathtarget;
2953  /* For now, assume we are above any joins, so no parameterization */
2954  pathnode->path.param_info = NULL;
2955  pathnode->path.parallel_aware = false;
2956  pathnode->path.parallel_safe = rel->consider_parallel &&
2957  subpath->parallel_safe;
2958  pathnode->path.parallel_workers = subpath->parallel_workers;
2959  pathnode->path.pathkeys = pathkeys;
2960 
2961  pathnode->subpath = subpath;
2962 
2963  cost_incremental_sort(&pathnode->path,
2964  root, pathkeys, presorted_keys,
2965  subpath->startup_cost,
2966  subpath->total_cost,
2967  subpath->rows,
2968  subpath->pathtarget->width,
2969  0.0, /* XXX comparison_cost shouldn't be 0? */
2970  work_mem, limit_tuples);
2971 
2972  sort->nPresortedCols = presorted_keys;
2973 
2974  return sort;
2975 }
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:195
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1965
Path path
Definition: pathnodes.h:2199
Path * subpath
Definition: pathnodes.h:2200

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_final_distinct_paths(), create_one_window_path(), create_ordered_paths(), create_partial_distinct_paths(), gather_grouping_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath* create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 993 of file pathnode.c.

1004 {
1005  IndexPath *pathnode = makeNode(IndexPath);
1006  RelOptInfo *rel = index->rel;
1007 
1008  pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1009  pathnode->path.parent = rel;
1010  pathnode->path.pathtarget = rel->reltarget;
1011  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1012  required_outer);
1013  pathnode->path.parallel_aware = false;
1014  pathnode->path.parallel_safe = rel->consider_parallel;
1015  pathnode->path.parallel_workers = 0;
1016  pathnode->path.pathkeys = pathkeys;
1017 
1018  pathnode->indexinfo = index;
1019  pathnode->indexclauses = indexclauses;
1020  pathnode->indexorderbys = indexorderbys;
1021  pathnode->indexorderbycols = indexorderbycols;
1022  pathnode->indexscandir = indexscandir;
1023 
1024  cost_index(pathnode, root, loop_count, partial_path);
1025 
1026  return pathnode;
1027 }
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:549
List * indexclauses
Definition: pathnodes.h:1711
ScanDirection indexscandir
Definition: pathnodes.h:1714
Path path
Definition: pathnodes.h:1709
List * indexorderbycols
Definition: pathnodes.h:1713
List * indexorderbys
Definition: pathnodes.h:1712
IndexOptInfo * indexinfo
Definition: pathnodes.h:1710
Definition: type.h:95

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath* create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3814 of file pathnode.c.

3819 {
3820  LimitPath *pathnode = makeNode(LimitPath);
3821 
3822  pathnode->path.pathtype = T_Limit;
3823  pathnode->path.parent = rel;
3824  /* Limit doesn't project, so use source path's pathtarget */
3825  pathnode->path.pathtarget = subpath->pathtarget;
3826  /* For now, assume we are above any joins, so no parameterization */
3827  pathnode->path.param_info = NULL;
3828  pathnode->path.parallel_aware = false;
3829  pathnode->path.parallel_safe = rel->consider_parallel &&
3830  subpath->parallel_safe;
3831  pathnode->path.parallel_workers = subpath->parallel_workers;
3832  pathnode->path.rows = subpath->rows;
3833  pathnode->path.startup_cost = subpath->startup_cost;
3834  pathnode->path.total_cost = subpath->total_cost;
3835  pathnode->path.pathkeys = subpath->pathkeys;
3836  pathnode->subpath = subpath;
3837  pathnode->limitOffset = limitOffset;
3838  pathnode->limitCount = limitCount;
3839  pathnode->limitOption = limitOption;
3840 
3841  /*
3842  * Adjust the output rows count and costs according to the offset/limit.
3843  */
3844  adjust_limit_rows_costs(&pathnode->path.rows,
3845  &pathnode->path.startup_cost,
3846  &pathnode->path.total_cost,
3847  offset_est, count_est);
3848 
3849  return pathnode;
3850 }
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3869
Path path
Definition: pathnodes.h:2400
Path * subpath
Definition: pathnodes.h:2401
LimitOption limitOption
Definition: pathnodes.h:2404
Node * limitOffset
Definition: pathnodes.h:2402
Node * limitCount
Definition: pathnodes.h:2403

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath* create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3650 of file pathnode.c.

3652 {
3653  LockRowsPath *pathnode = makeNode(LockRowsPath);
3654 
3655  pathnode->path.pathtype = T_LockRows;
3656  pathnode->path.parent = rel;
3657  /* LockRows doesn't project, so use source path's pathtarget */
3658  pathnode->path.pathtarget = subpath->pathtarget;
3659  /* For now, assume we are above any joins, so no parameterization */
3660  pathnode->path.param_info = NULL;
3661  pathnode->path.parallel_aware = false;
3662  pathnode->path.parallel_safe = false;
3663  pathnode->path.parallel_workers = 0;
3664  pathnode->path.rows = subpath->rows;
3665 
3666  /*
3667  * The result cannot be assumed sorted, since locking might cause the sort
3668  * key columns to be replaced with new values.
3669  */
3670  pathnode->path.pathkeys = NIL;
3671 
3672  pathnode->subpath = subpath;
3673  pathnode->rowMarks = rowMarks;
3674  pathnode->epqParam = epqParam;
3675 
3676  /*
3677  * We should charge something extra for the costs of row locking and
3678  * possible refetches, but it's hard to say how much. For now, use
3679  * cpu_tuple_cost per row.
3680  */
3681  pathnode->path.startup_cost = subpath->startup_cost;
3682  pathnode->path.total_cost = subpath->total_cost +
3683  cpu_tuple_cost * subpath->rows;
3684 
3685  return pathnode;
3686 }
Path * subpath
Definition: pathnodes.h:2361
List * rowMarks
Definition: pathnodes.h:2362

References cpu_tuple_cost, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath* create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1566 of file pathnode.c.

1567 {
1568  MaterialPath *pathnode = makeNode(MaterialPath);
1569 
1570  Assert(subpath->parent == rel);
1571 
1572  pathnode->path.pathtype = T_Material;
1573  pathnode->path.parent = rel;
1574  pathnode->path.pathtarget = rel->reltarget;
1575  pathnode->path.param_info = subpath->param_info;
1576  pathnode->path.parallel_aware = false;
1577  pathnode->path.parallel_safe = rel->consider_parallel &&
1578  subpath->parallel_safe;
1579  pathnode->path.parallel_workers = subpath->parallel_workers;
1580  pathnode->path.pathkeys = subpath->pathkeys;
1581 
1582  pathnode->subpath = subpath;
1583 
1584  cost_material(&pathnode->path,
1585  subpath->startup_cost,
1586  subpath->total_cost,
1587  subpath->rows,
1588  subpath->pathtarget->width);
1589 
1590  return pathnode;
1591 }
void cost_material(Path *path, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2432
Path * subpath
Definition: pathnodes.h:1982

References Assert, RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by consider_parallel_nestloop(), match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath* create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
double  calls 
)

Definition at line 1598 of file pathnode.c.

1601 {
1602  MemoizePath *pathnode = makeNode(MemoizePath);
1603 
1604  Assert(subpath->parent == rel);
1605 
1606  pathnode->path.pathtype = T_Memoize;
1607  pathnode->path.parent = rel;
1608  pathnode->path.pathtarget = rel->reltarget;
1609  pathnode->path.param_info = subpath->param_info;
1610  pathnode->path.parallel_aware = false;
1611  pathnode->path.parallel_safe = rel->consider_parallel &&
1612  subpath->parallel_safe;
1613  pathnode->path.parallel_workers = subpath->parallel_workers;
1614  pathnode->path.pathkeys = subpath->pathkeys;
1615 
1616  pathnode->subpath = subpath;
1617  pathnode->hash_operators = hash_operators;
1618  pathnode->param_exprs = param_exprs;
1619  pathnode->singlerow = singlerow;
1620  pathnode->binary_mode = binary_mode;
1621  pathnode->calls = clamp_row_est(calls);
1622 
1623  /*
1624  * For now we set est_entries to 0. cost_memoize_rescan() does all the
1625  * hard work to determine how many cache entries there are likely to be,
1626  * so it seems best to leave it up to that function to fill this field in.
1627  * If left at 0, the executor will make a guess at a good value.
1628  */
1629  pathnode->est_entries = 0;
1630 
1631  /*
1632  * Add a small additional charge for caching the first entry. All the
1633  * harder calculations for rescans are performed in cost_memoize_rescan().
1634  */
1635  pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1636  pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1637  pathnode->path.rows = subpath->rows;
1638 
1639  return pathnode;
1640 }
bool singlerow
Definition: pathnodes.h:1996
List * hash_operators
Definition: pathnodes.h:1994
uint32 est_entries
Definition: pathnodes.h:2001
bool binary_mode
Definition: pathnodes.h:1998
Cardinality calls
Definition: pathnodes.h:2000
Path * subpath
Definition: pathnodes.h:1993
List * param_exprs
Definition: pathnodes.h:1995

References Assert, MemoizePath::binary_mode, MemoizePath::calls, clamp_row_est(), RelOptInfo::consider_parallel, cpu_tuple_cost, MemoizePath::est_entries, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath* create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1415 of file pathnode.c.

1420 {
1422  Cost input_startup_cost;
1423  Cost input_total_cost;
1424  ListCell *l;
1425 
1426  pathnode->path.pathtype = T_MergeAppend;
1427  pathnode->path.parent = rel;
1428  pathnode->path.pathtarget = rel->reltarget;
1429  pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1430  required_outer);
1431  pathnode->path.parallel_aware = false;
1432  pathnode->path.parallel_safe = rel->consider_parallel;
1433  pathnode->path.parallel_workers = 0;
1434  pathnode->path.pathkeys = pathkeys;
1435  pathnode->subpaths = subpaths;
1436 
1437  /*
1438  * Apply query-wide LIMIT if known and path is for sole base relation.
1439  * (Handling this at this low level is a bit klugy.)
1440  */
1441  if (bms_equal(rel->relids, root->all_query_rels))
1442  pathnode->limit_tuples = root->limit_tuples;
1443  else
1444  pathnode->limit_tuples = -1.0;
1445 
1446  /*
1447  * Add up the sizes and costs of the input paths.
1448  */
1449  pathnode->path.rows = 0;
1450  input_startup_cost = 0;
1451  input_total_cost = 0;
1452  foreach(l, subpaths)
1453  {
1454  Path *subpath = (Path *) lfirst(l);
1455 
1456  pathnode->path.rows += subpath->rows;
1457  pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1458  subpath->parallel_safe;
1459 
1460  if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1461  {
1462  /* Subpath is adequately ordered, we won't need to sort it */
1463  input_startup_cost += subpath->startup_cost;
1464  input_total_cost += subpath->total_cost;
1465  }
1466  else
1467  {
1468  /* We'll need to insert a Sort node, so include cost for that */
1469  Path sort_path; /* dummy for result of cost_sort */
1470 
1471  cost_sort(&sort_path,
1472  root,
1473  pathkeys,
1474  subpath->total_cost,
1475  subpath->rows,
1476  subpath->pathtarget->width,
1477  0.0,
1478  work_mem,
1479  pathnode->limit_tuples);
1480  input_startup_cost += sort_path.startup_cost;
1481  input_total_cost += sort_path.total_cost;
1482  }
1483 
1484  /* All child paths must have same parameterization */
1485  Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1486  }
1487 
1488  /*
1489  * Now we can compute total costs of the MergeAppend. If there's exactly
1490  * one child path and its parallel awareness matches that of the
1491  * MergeAppend, then the MergeAppend is a no-op and will be discarded
1492  * later (in setrefs.c); otherwise we do the normal cost calculation.
1493  */
1494  if (list_length(subpaths) == 1 &&
1495  ((Path *) linitial(subpaths))->parallel_aware ==
1496  pathnode->path.parallel_aware)
1497  {
1498  pathnode->path.startup_cost = input_startup_cost;
1499  pathnode->path.total_cost = input_total_cost;
1500  }
1501  else
1502  cost_merge_append(&pathnode->path, root,
1503  pathkeys, list_length(subpaths),
1504  input_startup_cost, input_total_cost,
1505  pathnode->path.rows);
1506 
1507  return pathnode;
1508 }
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2383
Cardinality limit_tuples
Definition: pathnodes.h:1957

References Assert, bms_equal(), RelOptInfo::consider_parallel, cost_merge_append(), cost_sort(), get_appendrel_parampathinfo(), lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath* create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys 
)

Definition at line 2541 of file pathnode.c.

2554 {
2555  MergePath *pathnode = makeNode(MergePath);
2556 
2557  pathnode->jpath.path.pathtype = T_MergeJoin;
2558  pathnode->jpath.path.parent = joinrel;
2559  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2560  pathnode->jpath.path.param_info =
2562  joinrel,
2563  outer_path,
2564  inner_path,
2565  extra->sjinfo,
2566  required_outer,
2567  &restrict_clauses);
2568  pathnode->jpath.path.parallel_aware = false;
2569  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2570  outer_path->parallel_safe && inner_path->parallel_safe;
2571  /* This is a foolish way to estimate parallel_workers, but for now... */
2572  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2573  pathnode->jpath.path.pathkeys = pathkeys;
2574  pathnode->jpath.jointype = jointype;
2575  pathnode->jpath.inner_unique = extra->inner_unique;
2576  pathnode->jpath.outerjoinpath = outer_path;
2577  pathnode->jpath.innerjoinpath = inner_path;
2578  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2579  pathnode->path_mergeclauses = mergeclauses;
2580  pathnode->outersortkeys = outersortkeys;
2581  pathnode->innersortkeys = innersortkeys;
2582  /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2583  /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2584 
2585  final_cost_mergejoin(root, pathnode, workspace, extra);
2586 
2587  return pathnode;
2588 }
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3724
List * outersortkeys
Definition: pathnodes.h:2134
List * innersortkeys
Definition: pathnodes.h:2135
JoinPath jpath
Definition: pathnodes.h:2132
List * path_mergeclauses
Definition: pathnodes.h:2133

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath* create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3385 of file pathnode.c.

3390 {
3391  MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3392  Cost initplan_cost;
3393  ListCell *lc;
3394 
3395  /* The topmost generated Plan node will be a Result */
3396  pathnode->path.pathtype = T_Result;
3397  pathnode->path.parent = rel;
3398  pathnode->path.pathtarget = target;
3399  /* For now, assume we are above any joins, so no parameterization */
3400  pathnode->path.param_info = NULL;
3401  pathnode->path.parallel_aware = false;
3402  pathnode->path.parallel_safe = true; /* might change below */
3403  pathnode->path.parallel_workers = 0;
3404  /* Result is one unordered row */
3405  pathnode->path.rows = 1;
3406  pathnode->path.pathkeys = NIL;
3407 
3408  pathnode->mmaggregates = mmaggregates;
3409  pathnode->quals = quals;
3410 
3411  /* Calculate cost of all the initplans, and check parallel safety */
3412  initplan_cost = 0;
3413  foreach(lc, mmaggregates)
3414  {
3415  MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3416 
3417  initplan_cost += mminfo->pathcost;
3418  if (!mminfo->path->parallel_safe)
3419  pathnode->path.parallel_safe = false;
3420  }
3421 
3422  /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3423  pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3424  pathnode->path.total_cost = initplan_cost + target->cost.startup +
3425  target->cost.per_tuple + cpu_tuple_cost;
3426 
3427  /*
3428  * Add cost of qual, if any --- but we ignore its selectivity, since our
3429  * rowcount estimate should be 1 no matter what the qual is.
3430  */
3431  if (quals)
3432  {
3433  QualCost qual_cost;
3434 
3435  cost_qual_eval(&qual_cost, quals, root);
3436  pathnode->path.startup_cost += qual_cost.startup;
3437  pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3438  }
3439 
3440  /*
3441  * If the initplans were all parallel-safe, also check safety of the
3442  * target and quals. (The Result node itself isn't parallelizable, but if
3443  * we are in a subquery then it can be useful for the outer query to know
3444  * that this one is parallel-safe.)
3445  */
3446  if (pathnode->path.parallel_safe)
3447  pathnode->path.parallel_safe =
3448  is_parallel_safe(root, (Node *) target->exprs) &&
3449  is_parallel_safe(root, (Node *) quals);
3450 
3451  return pathnode;
3452 }
List * quals
Definition: pathnodes.h:2310
List * mmaggregates
Definition: pathnodes.h:2309

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath* create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
bool  partColsUpdated,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3713 of file pathnode.c.

3724 {
3726 
3727  Assert(operation == CMD_MERGE ||
3728  (operation == CMD_UPDATE ?
3729  list_length(resultRelations) == list_length(updateColnosLists) :
3730  updateColnosLists == NIL));
3731  Assert(withCheckOptionLists == NIL ||
3732  list_length(resultRelations) == list_length(withCheckOptionLists));
3733  Assert(returningLists == NIL ||
3734  list_length(resultRelations) == list_length(returningLists));
3735 
3736  pathnode->path.pathtype = T_ModifyTable;
3737  pathnode->path.parent = rel;
3738  /* pathtarget is not interesting, just make it minimally valid */
3739  pathnode->path.pathtarget = rel->reltarget;
3740  /* For now, assume we are above any joins, so no parameterization */
3741  pathnode->path.param_info = NULL;
3742  pathnode->path.parallel_aware = false;
3743  pathnode->path.parallel_safe = false;
3744  pathnode->path.parallel_workers = 0;
3745  pathnode->path.pathkeys = NIL;
3746 
3747  /*
3748  * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3749  *
3750  * Currently, we don't charge anything extra for the actual table
3751  * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3752  * expressions if any. It would only be window dressing, since
3753  * ModifyTable is always a top-level node and there is no way for the
3754  * costs to change any higher-level planning choices. But we might want
3755  * to make it look better sometime.
3756  */
3757  pathnode->path.startup_cost = subpath->startup_cost;
3758  pathnode->path.total_cost = subpath->total_cost;
3759  if (returningLists != NIL)
3760  {
3761  pathnode->path.rows = subpath->rows;
3762 
3763  /*
3764  * Set width to match the subpath output. XXX this is totally wrong:
3765  * we should return an average of the RETURNING tlist widths. But
3766  * it's what happened historically, and improving it is a task for
3767  * another day. (Again, it's mostly window dressing.)
3768  */
3769  pathnode->path.pathtarget->width = subpath->pathtarget->width;
3770  }
3771  else
3772  {
3773  pathnode->path.rows = 0;
3774  pathnode->path.pathtarget->width = 0;
3775  }
3776 
3777  pathnode->subpath = subpath;
3778  pathnode->operation = operation;
3779  pathnode->canSetTag = canSetTag;
3780  pathnode->nominalRelation = nominalRelation;
3781  pathnode->rootRelation = rootRelation;
3782  pathnode->partColsUpdated = partColsUpdated;
3783  pathnode->resultRelations = resultRelations;
3784  pathnode->updateColnosLists = updateColnosLists;
3785  pathnode->withCheckOptionLists = withCheckOptionLists;
3786  pathnode->returningLists = returningLists;
3787  pathnode->rowMarks = rowMarks;
3788  pathnode->onconflict = onconflict;
3789  pathnode->epqParam = epqParam;
3790  pathnode->mergeActionLists = mergeActionLists;
3791  pathnode->mergeJoinConditions = mergeJoinConditions;
3792 
3793  return pathnode;
3794 }
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
bool partColsUpdated
Definition: pathnodes.h:2381
List * returningLists
Definition: pathnodes.h:2385
List * resultRelations
Definition: pathnodes.h:2382
List * withCheckOptionLists
Definition: pathnodes.h:2384
List * mergeJoinConditions
Definition: pathnodes.h:2391
List * updateColnosLists
Definition: pathnodes.h:2383
OnConflictExpr * onconflict
Definition: pathnodes.h:2387
CmdType operation
Definition: pathnodes.h:2377
Index rootRelation
Definition: pathnodes.h:2380
Index nominalRelation
Definition: pathnodes.h:2379
List * mergeActionLists
Definition: pathnodes.h:2389

References Assert, ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::partColsUpdated, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path* create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2138 of file pathnode.c.

2140 {
2141  Path *pathnode = makeNode(Path);
2142 
2143  pathnode->pathtype = T_NamedTuplestoreScan;
2144  pathnode->parent = rel;
2145  pathnode->pathtarget = rel->reltarget;
2146  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2147  required_outer);
2148  pathnode->parallel_aware = false;
2149  pathnode->parallel_safe = rel->consider_parallel;
2150  pathnode->parallel_workers = 0;
2151  pathnode->pathkeys = NIL; /* result is always unordered */
2152 
2153  cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2154 
2155  return pathnode;
2156 }
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1718

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath* create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2445 of file pathnode.c.

2455 {
2456  NestPath *pathnode = makeNode(NestPath);
2457  Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2458  Relids outerrelids;
2459 
2460  /*
2461  * Paths are parameterized by top-level parents, so run parameterization
2462  * tests on the parent relids.
2463  */
2464  if (outer_path->parent->top_parent_relids)
2465  outerrelids = outer_path->parent->top_parent_relids;
2466  else
2467  outerrelids = outer_path->parent->relids;
2468 
2469  /*
2470  * If the inner path is parameterized by the outer, we must drop any
2471  * restrict_clauses that are due to be moved into the inner path. We have
2472  * to do this now, rather than postpone the work till createplan time,
2473  * because the restrict_clauses list can affect the size and cost
2474  * estimates for this path. We detect such clauses by checking for serial
2475  * number match to clauses already enforced in the inner path.
2476  */
2477  if (bms_overlap(inner_req_outer, outerrelids))
2478  {
2479  Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2480  List *jclauses = NIL;
2481  ListCell *lc;
2482 
2483  foreach(lc, restrict_clauses)
2484  {
2485  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2486 
2487  if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2488  jclauses = lappend(jclauses, rinfo);
2489  }
2490  restrict_clauses = jclauses;
2491  }
2492 
2493  pathnode->jpath.path.pathtype = T_NestLoop;
2494  pathnode->jpath.path.parent = joinrel;
2495  pathnode->jpath.path.pathtarget = joinrel->reltarget;
2496  pathnode->jpath.path.param_info =
2498  joinrel,
2499  outer_path,
2500  inner_path,
2501  extra->sjinfo,
2502  required_outer,
2503  &restrict_clauses);
2504  pathnode->jpath.path.parallel_aware = false;
2505  pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2506  outer_path->parallel_safe && inner_path->parallel_safe;
2507  /* This is a foolish way to estimate parallel_workers, but for now... */
2508  pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2509  pathnode->jpath.path.pathkeys = pathkeys;
2510  pathnode->jpath.jointype = jointype;
2511  pathnode->jpath.inner_unique = extra->inner_unique;
2512  pathnode->jpath.outerjoinpath = outer_path;
2513  pathnode->jpath.innerjoinpath = inner_path;
2514  pathnode->jpath.joinrestrictinfo = restrict_clauses;
2515 
2516  final_cost_nestloop(root, pathnode, workspace, extra);
2517 
2518  return pathnode;
2519 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3287
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:1922
JoinPath jpath
Definition: pathnodes.h:2092
int rinfo_serial
Definition: pathnodes.h:2636

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath* create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2673 of file pathnode.c.

2677 {
2678  ProjectionPath *pathnode = makeNode(ProjectionPath);
2679  PathTarget *oldtarget;
2680 
2681  /*
2682  * We mustn't put a ProjectionPath directly above another; it's useless
2683  * and will confuse create_projection_plan. Rather than making sure all
2684  * callers handle that, let's implement it here, by stripping off any
2685  * ProjectionPath in what we're given. Given this rule, there won't be
2686  * more than one.
2687  */
2688  if (IsA(subpath, ProjectionPath))
2689  {
2690  ProjectionPath *subpp = (ProjectionPath *) subpath;
2691 
2692  Assert(subpp->path.parent == rel);
2693  subpath = subpp->subpath;
2695  }
2696 
2697  pathnode->path.pathtype = T_Result;
2698  pathnode->path.parent = rel;
2699  pathnode->path.pathtarget = target;
2700  /* For now, assume we are above any joins, so no parameterization */
2701  pathnode->path.param_info = NULL;
2702  pathnode->path.parallel_aware = false;
2703  pathnode->path.parallel_safe = rel->consider_parallel &&
2704  subpath->parallel_safe &&
2705  is_parallel_safe(root, (Node *) target->exprs);
2706  pathnode->path.parallel_workers = subpath->parallel_workers;
2707  /* Projection does not change the sort order */
2708  pathnode->path.pathkeys = subpath->pathkeys;
2709 
2710  pathnode->subpath = subpath;
2711 
2712  /*
2713  * We might not need a separate Result node. If the input plan node type
2714  * can project, we can just tell it to project something else. Or, if it
2715  * can't project but the desired target has the same expression list as
2716  * what the input will produce anyway, we can still give it the desired
2717  * tlist (possibly changing its ressortgroupref labels, but nothing else).
2718  * Note: in the latter case, create_projection_plan has to recheck our
2719  * conclusion; see comments therein.
2720  */
2721  oldtarget = subpath->pathtarget;
2723  equal(oldtarget->exprs, target->exprs))
2724  {
2725  /* No separate Result node needed */
2726  pathnode->dummypp = true;
2727 
2728  /*
2729  * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2730  */
2731  pathnode->path.rows = subpath->rows;
2732  pathnode->path.startup_cost = subpath->startup_cost +
2733  (target->cost.startup - oldtarget->cost.startup);
2734  pathnode->path.total_cost = subpath->total_cost +
2735  (target->cost.startup - oldtarget->cost.startup) +
2736  (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2737  }
2738  else
2739  {
2740  /* We really do need the Result node */
2741  pathnode->dummypp = false;
2742 
2743  /*
2744  * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2745  * evaluating the tlist. There is no qual to worry about.
2746  */
2747  pathnode->path.rows = subpath->rows;
2748  pathnode->path.startup_cost = subpath->startup_cost +
2749  target->cost.startup;
2750  pathnode->path.total_cost = subpath->total_cost +
2751  target->cost.startup +
2752  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2753  }
2754 
2755  return pathnode;
2756 }
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2174

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath* create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3605 of file pathnode.c.

3613 {
3615 
3616  pathnode->path.pathtype = T_RecursiveUnion;
3617  pathnode->path.parent = rel;
3618  pathnode->path.pathtarget = target;
3619  /* For now, assume we are above any joins, so no parameterization */
3620  pathnode->path.param_info = NULL;
3621  pathnode->path.parallel_aware = false;
3622  pathnode->path.parallel_safe = rel->consider_parallel &&
3623  leftpath->parallel_safe && rightpath->parallel_safe;
3624  /* Foolish, but we'll do it like joins for now: */
3625  pathnode->path.parallel_workers = leftpath->parallel_workers;
3626  /* RecursiveUnion result is always unsorted */
3627  pathnode->path.pathkeys = NIL;
3628 
3629  pathnode->leftpath = leftpath;
3630  pathnode->rightpath = rightpath;
3631  pathnode->distinctList = distinctList;
3632  pathnode->wtParam = wtParam;
3633  pathnode->numGroups = numGroups;
3634 
3635  cost_recursive_union(&pathnode->path, leftpath, rightpath);
3636 
3637  return pathnode;
3638 }
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1792
Cardinality numGroups
Definition: pathnodes.h:2352

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_resultscan_path()

Path* create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2164 of file pathnode.c.

2166 {
2167  Path *pathnode = makeNode(Path);
2168 
2169  pathnode->pathtype = T_Result;
2170  pathnode->parent = rel;
2171  pathnode->pathtarget = rel->reltarget;
2172  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2173  required_outer);
2174  pathnode->parallel_aware = false;
2175  pathnode->parallel_safe = rel->consider_parallel;
2176  pathnode->parallel_workers = 0;
2177  pathnode->pathkeys = NIL; /* result is always unordered */
2178 
2179  cost_resultscan(pathnode, root, rel, pathnode->param_info);
2180 
2181  return pathnode;
2182 }
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1755

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path* create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 952 of file pathnode.c.

953 {
954  Path *pathnode = makeNode(Path);
955 
956  pathnode->pathtype = T_SampleScan;
957  pathnode->parent = rel;
958  pathnode->pathtarget = rel->reltarget;
959  pathnode->param_info = get_baserel_parampathinfo(root, rel,
960  required_outer);
961  pathnode->parallel_aware = false;
962  pathnode->parallel_safe = rel->consider_parallel;
963  pathnode->parallel_workers = 0;
964  pathnode->pathkeys = NIL; /* samplescan has unordered result */
965 
966  cost_samplescan(pathnode, root, rel, pathnode->param_info);
967 
968  return pathnode;
969 }
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:361

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path* create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 927 of file pathnode.c.

929 {
930  Path *pathnode = makeNode(Path);
931 
932  pathnode->pathtype = T_SeqScan;
933  pathnode->parent = rel;
934  pathnode->pathtarget = rel->reltarget;
935  pathnode->param_info = get_baserel_parampathinfo(root, rel,
936  required_outer);
937  pathnode->parallel_aware = (parallel_workers > 0);
938  pathnode->parallel_safe = rel->consider_parallel;
939  pathnode->parallel_workers = parallel_workers;
940  pathnode->pathkeys = NIL; /* seqscan has unordered result */
941 
942  cost_seqscan(pathnode, root, rel, pathnode->param_info);
943 
944  return pathnode;
945 }
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:284

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath* create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2870 of file pathnode.c.

2874 {
2875  ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2876  double tlist_rows;
2877  ListCell *lc;
2878 
2879  pathnode->path.pathtype = T_ProjectSet;
2880  pathnode->path.parent = rel;
2881  pathnode->path.pathtarget = target;
2882  /* For now, assume we are above any joins, so no parameterization */
2883  pathnode->path.param_info = NULL;
2884  pathnode->path.parallel_aware = false;
2885  pathnode->path.parallel_safe = rel->consider_parallel &&
2886  subpath->parallel_safe &&
2887  is_parallel_safe(root, (Node *) target->exprs);
2888  pathnode->path.parallel_workers = subpath->parallel_workers;
2889  /* Projection does not change the sort order XXX? */
2890  pathnode->path.pathkeys = subpath->pathkeys;
2891 
2892  pathnode->subpath = subpath;
2893 
2894  /*
2895  * Estimate number of rows produced by SRFs for each row of input; if
2896  * there's more than one in this node, use the maximum.
2897  */
2898  tlist_rows = 1;
2899  foreach(lc, target->exprs)
2900  {
2901  Node *node = (Node *) lfirst(lc);
2902  double itemrows;
2903 
2904  itemrows = expression_returns_set_rows(root, node);
2905  if (tlist_rows < itemrows)
2906  tlist_rows = itemrows;
2907  }
2908 
2909  /*
2910  * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2911  * per input row, and half of cpu_tuple_cost for each added output row.
2912  * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2913  * this estimate later.
2914  */
2915  pathnode->path.rows = subpath->rows * tlist_rows;
2916  pathnode->path.startup_cost = subpath->startup_cost +
2917  target->cost.startup;
2918  pathnode->path.total_cost = subpath->total_cost +
2919  target->cost.startup +
2920  (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2921  (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2922 
2923  return pathnode;
2924 }
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:289
Path * subpath
Definition: pathnodes.h:2186

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath* create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List distinctList,
AttrNumber  flagColIdx,
int  firstFlag,
double  numGroups,
double  outputRows 
)

Definition at line 3543 of file pathnode.c.

3553 {
3554  SetOpPath *pathnode = makeNode(SetOpPath);
3555 
3556  pathnode->path.pathtype = T_SetOp;
3557  pathnode->path.parent = rel;
3558  /* SetOp doesn't project, so use source path's pathtarget */
3559  pathnode->path.pathtarget = subpath->pathtarget;
3560  /* For now, assume we are above any joins, so no parameterization */
3561  pathnode->path.param_info = NULL;
3562  pathnode->path.parallel_aware = false;
3563  pathnode->path.parallel_safe = rel->consider_parallel &&
3564  subpath->parallel_safe;
3565  pathnode->path.parallel_workers = subpath->parallel_workers;
3566  /* SetOp preserves the input sort order if in sort mode */
3567  pathnode->path.pathkeys =
3568  (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3569 
3570  pathnode->subpath = subpath;
3571  pathnode->cmd = cmd;
3572  pathnode->strategy = strategy;
3573  pathnode->distinctList = distinctList;
3574  pathnode->flagColIdx = flagColIdx;
3575  pathnode->firstFlag = firstFlag;
3576  pathnode->numGroups = numGroups;
3577 
3578  /*
3579  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3580  * all columns get compared at most of the tuples.
3581  */
3582  pathnode->path.startup_cost = subpath->startup_cost;
3583  pathnode->path.total_cost = subpath->total_cost +
3584  cpu_operator_cost * subpath->rows * list_length(distinctList);
3585  pathnode->path.rows = outputRows;
3586 
3587  return pathnode;
3588 }
double cpu_operator_cost
Definition: costsize.c:123
@ SETOP_SORTED
Definition: nodes.h:406
List * distinctList
Definition: pathnodes.h:2336
Cardinality numGroups
Definition: pathnodes.h:2339
int firstFlag
Definition: pathnodes.h:2338
Path * subpath
Definition: pathnodes.h:2333
SetOpCmd cmd
Definition: pathnodes.h:2334
Path path
Definition: pathnodes.h:2332
SetOpStrategy strategy
Definition: pathnodes.h:2335
AttrNumber flagColIdx
Definition: pathnodes.h:2337

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, SetOpPath::distinctList, SetOpPath::firstFlag, SetOpPath::flagColIdx, list_length(), makeNode, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, Path::rows, SETOP_SORTED, Path::startup_cost, SetOpPath::strategy, subpath(), SetOpPath::subpath, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath* create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 2988 of file pathnode.c.

2993 {
2994  SortPath *pathnode = makeNode(SortPath);
2995 
2996  pathnode->path.pathtype = T_Sort;
2997  pathnode->path.parent = rel;
2998  /* Sort doesn't project, so use source path's pathtarget */
2999  pathnode->path.pathtarget = subpath->pathtarget;
3000  /* For now, assume we are above any joins, so no parameterization */
3001  pathnode->path.param_info = NULL;
3002  pathnode->path.parallel_aware = false;
3003  pathnode->path.parallel_safe = rel->consider_parallel &&
3004  subpath->parallel_safe;
3005  pathnode->path.parallel_workers = subpath->parallel_workers;
3006  pathnode->path.pathkeys = pathkeys;
3007 
3008  pathnode->subpath = subpath;
3009 
3010  cost_sort(&pathnode->path, root, pathkeys,
3011  subpath->total_cost,
3012  subpath->rows,
3013  subpath->pathtarget->width,
3014  0.0, /* XXX comparison_cost shouldn't be 0? */
3015  work_mem, limit_tuples);
3016 
3017  return pathnode;
3018 }

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_final_distinct_paths(), create_one_window_path(), create_ordered_paths(), create_partial_distinct_paths(), gather_grouping_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath* create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 2004 of file pathnode.c.

2007 {
2009 
2010  pathnode->path.pathtype = T_SubqueryScan;
2011  pathnode->path.parent = rel;
2012  pathnode->path.pathtarget = rel->reltarget;
2013  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2014  required_outer);
2015  pathnode->path.parallel_aware = false;
2016  pathnode->path.parallel_safe = rel->consider_parallel &&
2017  subpath->parallel_safe;
2018  pathnode->path.parallel_workers = subpath->parallel_workers;
2019  pathnode->path.pathkeys = pathkeys;
2020  pathnode->subpath = subpath;
2021 
2022  cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2023  trivial_pathtarget);
2024 
2025  return pathnode;
2026 }
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1430

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path* create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2060 of file pathnode.c.

2062 {
2063  Path *pathnode = makeNode(Path);
2064 
2065  pathnode->pathtype = T_TableFuncScan;
2066  pathnode->parent = rel;
2067  pathnode->pathtarget = rel->reltarget;
2068  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2069  required_outer);
2070  pathnode->parallel_aware = false;
2071  pathnode->parallel_safe = rel->consider_parallel;
2072  pathnode->parallel_workers = 0;
2073  pathnode->pathkeys = NIL; /* result is always unordered */
2074 
2075  cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2076 
2077  return pathnode;
2078 }
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1571

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath* create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer 
)

Definition at line 1208 of file pathnode.c.

1210 {
1211  TidRangePath *pathnode = makeNode(TidRangePath);
1212 
1213  pathnode->path.pathtype = T_TidRangeScan;
1214  pathnode->path.parent = rel;
1215  pathnode->path.pathtarget = rel->reltarget;
1216  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1217  required_outer);
1218  pathnode->path.parallel_aware = false;
1219  pathnode->path.parallel_safe = rel->consider_parallel;
1220  pathnode->path.parallel_workers = 0;
1221  pathnode->path.pathkeys = NIL; /* always unordered */
1222 
1223  pathnode->tidrangequals = tidrangequals;
1224 
1225  cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1226  pathnode->path.param_info);
1227 
1228  return pathnode;
1229 }
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1339
List * tidrangequals
Definition: pathnodes.h:1836

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath* create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1179 of file pathnode.c.

1181 {
1182  TidPath *pathnode = makeNode(TidPath);
1183 
1184  pathnode->path.pathtype = T_TidScan;
1185  pathnode->path.parent = rel;
1186  pathnode->path.pathtarget = rel->reltarget;
1187  pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1188  required_outer);
1189  pathnode->path.parallel_aware = false;
1190  pathnode->path.parallel_safe = rel->consider_parallel;
1191  pathnode->path.parallel_workers = 0;
1192  pathnode->path.pathkeys = NIL; /* always unordered */
1193 
1194  pathnode->tidquals = tidquals;
1195 
1196  cost_tidscan(&pathnode->path, root, rel, tidquals,
1197  pathnode->path.param_info);
1198 
1199  return pathnode;
1200 }
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1249
List * tidquals
Definition: pathnodes.h:1824
Path path
Definition: pathnodes.h:1823

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath* create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SpecialJoinInfo sjinfo 
)

Definition at line 1654 of file pathnode.c.

1656 {
1657  UniquePath *pathnode;
1658  Path sort_path; /* dummy for result of cost_sort */
1659  Path agg_path; /* dummy for result of cost_agg */
1660  MemoryContext oldcontext;
1661  int numCols;
1662 
1663  /* Caller made a mistake if subpath isn't cheapest_total ... */
1665  Assert(subpath->parent == rel);
1666  /* ... or if SpecialJoinInfo is the wrong one */
1667  Assert(sjinfo->jointype == JOIN_SEMI);
1668  Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1669 
1670  /* If result already cached, return it */
1671  if (rel->cheapest_unique_path)
1672  return (UniquePath *) rel->cheapest_unique_path;
1673 
1674  /* If it's not possible to unique-ify, return NULL */
1675  if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1676  return NULL;
1677 
1678  /*
1679  * When called during GEQO join planning, we are in a short-lived memory
1680  * context. We must make sure that the path and any subsidiary data
1681  * structures created for a baserel survive the GEQO cycle, else the
1682  * baserel is trashed for future GEQO cycles. On the other hand, when we
1683  * are creating those for a joinrel during GEQO, we don't want them to
1684  * clutter the main planning context. Upshot is that the best solution is
1685  * to explicitly allocate memory in the same context the given RelOptInfo
1686  * is in.
1687  */
1688  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1689 
1690  pathnode = makeNode(UniquePath);
1691 
1692  pathnode->path.pathtype = T_Unique;
1693  pathnode->path.parent = rel;
1694  pathnode->path.pathtarget = rel->reltarget;
1695  pathnode->path.param_info = subpath->param_info;
1696  pathnode->path.parallel_aware = false;
1697  pathnode->path.parallel_safe = rel->consider_parallel &&
1698  subpath->parallel_safe;
1699  pathnode->path.parallel_workers = subpath->parallel_workers;
1700 
1701  /*
1702  * Assume the output is unsorted, since we don't necessarily have pathkeys
1703  * to represent it. (This might get overridden below.)
1704  */
1705  pathnode->path.pathkeys = NIL;
1706 
1707  pathnode->subpath = subpath;
1708 
1709  /*
1710  * Under GEQO and when planning child joins, the sjinfo might be
1711  * short-lived, so we'd better make copies of data structures we extract
1712  * from it.
1713  */
1714  pathnode->in_operators = copyObject(sjinfo->semi_operators);
1715  pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs);
1716 
1717  /*
1718  * If the input is a relation and it has a unique index that proves the
1719  * semi_rhs_exprs are unique, then we don't need to do anything. Note
1720  * that relation_has_unique_index_for automatically considers restriction
1721  * clauses for the rel, as well.
1722  */
1723  if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1725  sjinfo->semi_rhs_exprs,
1726  sjinfo->semi_operators))
1727  {
1728  pathnode->umethod = UNIQUE_PATH_NOOP;
1729  pathnode->path.rows = rel->rows;
1730  pathnode->path.startup_cost = subpath->startup_cost;
1731  pathnode->path.total_cost = subpath->total_cost;
1732  pathnode->path.pathkeys = subpath->pathkeys;
1733 
1734  rel->cheapest_unique_path = (Path *) pathnode;
1735 
1736  MemoryContextSwitchTo(oldcontext);
1737 
1738  return pathnode;
1739  }
1740 
1741  /*
1742  * If the input is a subquery whose output must be unique already, then we
1743  * don't need to do anything. The test for uniqueness has to consider
1744  * exactly which columns we are extracting; for example "SELECT DISTINCT
1745  * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1746  * this optimization unless semi_rhs_exprs consists only of simple Vars
1747  * referencing subquery outputs. (Possibly we could do something with
1748  * expressions in the subquery outputs, too, but for now keep it simple.)
1749  */
1750  if (rel->rtekind == RTE_SUBQUERY)
1751  {
1752  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1753 
1755  {
1756  List *sub_tlist_colnos;
1757 
1758  sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1759  rel->relid);
1760 
1761  if (sub_tlist_colnos &&
1763  sub_tlist_colnos,
1764  sjinfo->semi_operators))
1765  {
1766  pathnode->umethod = UNIQUE_PATH_NOOP;
1767  pathnode->path.rows = rel->rows;
1768  pathnode->path.startup_cost = subpath->startup_cost;
1769  pathnode->path.total_cost = subpath->total_cost;
1770  pathnode->path.pathkeys = subpath->pathkeys;
1771 
1772  rel->cheapest_unique_path = (Path *) pathnode;
1773 
1774  MemoryContextSwitchTo(oldcontext);
1775 
1776  return pathnode;
1777  }
1778  }
1779  }
1780 
1781  /* Estimate number of output rows */
1782  pathnode->path.rows = estimate_num_groups(root,
1783  sjinfo->semi_rhs_exprs,
1784  rel->rows,
1785  NULL,
1786  NULL);
1787  numCols = list_length(sjinfo->semi_rhs_exprs);
1788 
1789  if (sjinfo->semi_can_btree)
1790  {
1791  /*
1792  * Estimate cost for sort+unique implementation
1793  */
1794  cost_sort(&sort_path, root, NIL,
1795  subpath->total_cost,
1796  rel->rows,
1797  subpath->pathtarget->width,
1798  0.0,
1799  work_mem,
1800  -1.0);
1801 
1802  /*
1803  * Charge one cpu_operator_cost per comparison per input tuple. We
1804  * assume all columns get compared at most of the tuples. (XXX
1805  * probably this is an overestimate.) This should agree with
1806  * create_upper_unique_path.
1807  */
1808  sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1809  }
1810 
1811  if (sjinfo->semi_can_hash)
1812  {
1813  /*
1814  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1815  * planner.c).
1816  */
1817  int hashentrysize = subpath->pathtarget->width + 64;
1818 
1819  if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1820  {
1821  /*
1822  * We should not try to hash. Hack the SpecialJoinInfo to
1823  * remember this, in case we come through here again.
1824  */
1825  sjinfo->semi_can_hash = false;
1826  }
1827  else
1828  cost_agg(&agg_path, root,
1829  AGG_HASHED, NULL,
1830  numCols, pathnode->path.rows,
1831  NIL,
1832  subpath->startup_cost,
1833  subpath->total_cost,
1834  rel->rows,
1835  subpath->pathtarget->width);
1836  }
1837 
1838  if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1839  {
1840  if (agg_path.total_cost < sort_path.total_cost)
1841  pathnode->umethod = UNIQUE_PATH_HASH;
1842  else
1843  pathnode->umethod = UNIQUE_PATH_SORT;
1844  }
1845  else if (sjinfo->semi_can_btree)
1846  pathnode->umethod = UNIQUE_PATH_SORT;
1847  else if (sjinfo->semi_can_hash)
1848  pathnode->umethod = UNIQUE_PATH_HASH;
1849  else
1850  {
1851  /* we can get here only if we abandoned hashing above */
1852  MemoryContextSwitchTo(oldcontext);
1853  return NULL;
1854  }
1855 
1856  if (pathnode->umethod == UNIQUE_PATH_HASH)
1857  {
1858  pathnode->path.startup_cost = agg_path.startup_cost;
1859  pathnode->path.total_cost = agg_path.total_cost;
1860  }
1861  else
1862  {
1863  pathnode->path.startup_cost = sort_path.startup_cost;
1864  pathnode->path.total_cost = sort_path.total_cost;
1865  }
1866 
1867  rel->cheapest_unique_path = (Path *) pathnode;
1868 
1869  MemoryContextSwitchTo(oldcontext);
1870 
1871  return pathnode;
1872 }
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
Definition: analyzejoins.c:995
bool query_supports_distinctness(Query *query)
Definition: analyzejoins.c:958
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:3440
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:707
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3595
#define copyObject(obj)
Definition: nodes.h:224
@ JOIN_SEMI
Definition: nodes.h:307
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:1934
@ UNIQUE_PATH_SORT
Definition: pathnodes.h:2022
@ UNIQUE_PATH_NOOP
Definition: pathnodes.h:2020
@ UNIQUE_PATH_HASH
Definition: pathnodes.h:2021
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:564
MemoryContextSwitchTo(old_ctx)
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3416
Query * subquery
Definition: parsenodes.h:1114
List * semi_rhs_exprs
Definition: pathnodes.h:2909
Relids syn_righthand
Definition: pathnodes.h:2897
List * semi_operators
Definition: pathnodes.h:2908
Path * subpath
Definition: pathnodes.h:2028
List * uniq_exprs
Definition: pathnodes.h:2031
UniquePathMethod umethod
Definition: pathnodes.h:2029
List * in_operators
Definition: pathnodes.h:2030

References AGG_HASHED, Assert, bms_equal(), RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, copyObject, cost_agg(), cost_sort(), cpu_operator_cost, estimate_num_groups(), get_hash_memory_limit(), GetMemoryChunkContext(), UniquePath::in_operators, JOIN_SEMI, SpecialJoinInfo::jointype, list_length(), makeNode, MemoryContextSwitchTo(), NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, planner_rt_fetch, query_is_distinct_for(), query_supports_distinctness(), relation_has_unique_index_for(), RelOptInfo::relid, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, Path::rows, RTE_RELATION, RTE_SUBQUERY, RelOptInfo::rtekind, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, Path::startup_cost, subpath(), UniquePath::subpath, RangeTblEntry::subquery, SpecialJoinInfo::syn_righthand, Path::total_cost, translate_sub_tlist(), UniquePath::umethod, UniquePath::uniq_exprs, UNIQUE_PATH_HASH, UNIQUE_PATH_NOOP, UNIQUE_PATH_SORT, and work_mem.

Referenced by consider_parallel_nestloop(), hash_inner_and_outer(), join_is_legal(), match_unsorted_outer(), populate_joinrel_with_paths(), and sort_inner_and_outer().

◆ create_upper_unique_path()

UpperUniquePath* create_upper_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 3091 of file pathnode.c.

3096 {
3098 
3099  pathnode->path.pathtype = T_Unique;
3100  pathnode->path.parent = rel;
3101  /* Unique doesn't project, so use source path's pathtarget */
3102  pathnode->path.pathtarget = subpath->pathtarget;
3103  /* For now, assume we are above any joins, so no parameterization */
3104  pathnode->path.param_info = NULL;
3105  pathnode->path.parallel_aware = false;
3106  pathnode->path.parallel_safe = rel->consider_parallel &&
3107  subpath->parallel_safe;
3108  pathnode->path.parallel_workers = subpath->parallel_workers;
3109  /* Unique doesn't change the input ordering */
3110  pathnode->path.pathkeys = subpath->pathkeys;
3111 
3112  pathnode->subpath = subpath;
3113  pathnode->numkeys = numCols;
3114 
3115  /*
3116  * Charge one cpu_operator_cost per comparison per input tuple. We assume
3117  * all columns get compared at most of the tuples. (XXX probably this is
3118  * an overestimate.)
3119  */
3120  pathnode->path.startup_cost = subpath->startup_cost;
3121  pathnode->path.total_cost = subpath->total_cost +
3122  cpu_operator_cost * subpath->rows * numCols;
3123  pathnode->path.rows = numGroups;
3124 
3125  return pathnode;
3126 }

References RelOptInfo::consider_parallel, cpu_operator_cost, makeNode, UpperUniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UpperUniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UpperUniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path* create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2086 of file pathnode.c.

2088 {
2089  Path *pathnode = makeNode(Path);
2090 
2091  pathnode->pathtype = T_ValuesScan;
2092  pathnode->parent = rel;
2093  pathnode->pathtarget = rel->reltarget;
2094  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2095  required_outer);
2096  pathnode->parallel_aware = false;
2097  pathnode->parallel_safe = rel->consider_parallel;
2098  pathnode->parallel_workers = 0;
2099  pathnode->pathkeys = NIL; /* result is always unordered */
2100 
2101  cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2102 
2103  return pathnode;
2104 }
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1627

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath* create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
List runCondition,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3473 of file pathnode.c.

3482 {
3483  WindowAggPath *pathnode = makeNode(WindowAggPath);
3484 
3485  /* qual can only be set for the topwindow */
3486  Assert(qual == NIL || topwindow);
3487 
3488  pathnode->path.pathtype = T_WindowAgg;
3489  pathnode->path.parent = rel;
3490  pathnode->path.pathtarget = target;
3491  /* For now, assume we are above any joins, so no parameterization */
3492  pathnode->path.param_info = NULL;
3493  pathnode->path.parallel_aware = false;
3494  pathnode->path.parallel_safe = rel->consider_parallel &&
3495  subpath->parallel_safe;
3496  pathnode->path.parallel_workers = subpath->parallel_workers;
3497  /* WindowAgg preserves the input sort order */
3498  pathnode->path.pathkeys = subpath->pathkeys;
3499 
3500  pathnode->subpath = subpath;
3501  pathnode->winclause = winclause;
3502  pathnode->qual = qual;
3503  pathnode->runCondition = runCondition;
3504  pathnode->topwindow = topwindow;
3505 
3506  /*
3507  * For costing purposes, assume that there are no redundant partitioning
3508  * or ordering columns; it's not worth the trouble to deal with that
3509  * corner case here. So we just pass the unmodified list lengths to
3510  * cost_windowagg.
3511  */
3512  cost_windowagg(&pathnode->path, root,
3513  windowFuncs,
3514  winclause,
3515  subpath->startup_cost,
3516  subpath->total_cost,
3517  subpath->rows);
3518 
3519  /* add tlist eval cost for each output row */
3520  pathnode->path.startup_cost += target->cost.startup;
3521  pathnode->path.total_cost += target->cost.startup +
3522  target->cost.per_tuple * pathnode->path.rows;
3523 
3524  return pathnode;
3525 }
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3047
List * runCondition
Definition: pathnodes.h:2322
Path * subpath
Definition: pathnodes.h:2319
WindowClause * winclause
Definition: pathnodes.h:2320

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, WindowAggPath::runCondition, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path* create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2190 of file pathnode.c.

2192 {
2193  Path *pathnode = makeNode(Path);
2194 
2195  pathnode->pathtype = T_WorkTableScan;
2196  pathnode->parent = rel;
2197  pathnode->pathtarget = rel->reltarget;
2198  pathnode->param_info = get_baserel_parampathinfo(root, rel,
2199  required_outer);
2200  pathnode->parallel_aware = false;
2201  pathnode->parallel_safe = rel->consider_parallel;
2202  pathnode->parallel_workers = 0;
2203  pathnode->pathkeys = NIL; /* result is always unordered */
2204 
2205  /* Cost is the same as for a regular CTE scan */
2206  cost_ctescan(pathnode, root, rel, pathnode->param_info);
2207 
2208  return pathnode;
2209 }

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 163 of file relnode.c.

164 {
165  int new_size;
166 
167  Assert(add_size > 0);
168 
169  new_size = root->simple_rel_array_size + add_size;
170 
171  root->simple_rel_array =
172  repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
173 
174  root->simple_rte_array =
175  repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
176 
177  if (root->append_rel_array)
178  root->append_rel_array =
179  repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
180  else
181  root->append_rel_array =
182  palloc0_array(AppendRelInfo *, new_size);
183 
184  root->simple_rel_array_size = new_size;
185 }
#define palloc0_array(type, count)
Definition: fe_memutils.h:65
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:493

References add_size(), Assert, palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo* fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1470 of file relnode.c.

1471 {
1472  RelOptInfo *upperrel;
1473  ListCell *lc;
1474 
1475  /*
1476  * For the moment, our indexing data structure is just a List for each
1477  * relation kind. If we ever get so many of one kind that this stops
1478  * working well, we can improve it. No code outside this function should
1479  * assume anything about how to find a particular upperrel.
1480  */
1481 
1482  /* If we already made this upperrel for the query, return it */
1483  foreach(lc, root->upper_rels[kind])
1484  {
1485  upperrel = (RelOptInfo *) lfirst(lc);
1486 
1487  if (bms_equal(upperrel->relids, relids))
1488  return upperrel;
1489  }
1490 
1491  upperrel = makeNode(RelOptInfo);
1492  upperrel->reloptkind = RELOPT_UPPER_REL;
1493  upperrel->relids = bms_copy(relids);
1494 
1495  /* cheap startup cost is interesting iff not all tuples to be retrieved */
1496  upperrel->consider_startup = (root->tuple_fraction > 0);
1497  upperrel->consider_param_startup = false;
1498  upperrel->consider_parallel = false; /* might get changed later */
1499  upperrel->reltarget = create_empty_pathtarget();
1500  upperrel->pathlist = NIL;
1501  upperrel->cheapest_startup_path = NULL;
1502  upperrel->cheapest_total_path = NULL;
1503  upperrel->cheapest_unique_path = NULL;
1504  upperrel->cheapest_parameterized_paths = NIL;
1505 
1506  root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1507 
1508  return upperrel;
1509 }
@ RELOPT_UPPER_REL
Definition: pathnodes.h:825

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo* find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 454 of file relnode.c.

455 {
456  /* use an unsigned comparison to prevent negative array element access */
457  if ((uint32) relid < (uint32) root->simple_rel_array_size)
458  {
459  RelOptInfo *rel;
460  RangeTblEntry *rte;
461 
462  rel = root->simple_rel_array[relid];
463  if (rel)
464  return rel;
465 
466  /*
467  * We could just return NULL here, but for debugging purposes it seems
468  * best to actually verify that the relid is an outer join and not
469  * something weird.
470  */
471  rte = root->simple_rte_array[relid];
472  if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
473  return NULL;
474  }
475 
476  elog(ERROR, "no relation entry for relid %d", relid);
477 
478  return NULL; /* keep compiler quiet */
479 }
JoinType jointype
Definition: parsenodes.h:1161

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo* find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 436 of file relnode.c.

437 {
438  /* use an unsigned comparison to prevent negative array element access */
439  if ((uint32) relid < (uint32) root->simple_rel_array_size)
440  return root->simple_rel_array[relid];
441  return NULL;
442 }

References root.

Referenced by examine_simple_variable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1521 of file relnode.c.

1522 {
1523  Relids result = NULL;
1524 
1526  Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1527 
1528  do
1529  {
1530  AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1531  Index prelid = appinfo->parent_relid;
1532 
1533  result = bms_add_member(result, prelid);
1534 
1535  /* traverse up to the parent rel, loop if it's also a child rel */
1536  rel = find_base_rel(root, prelid);
1537  } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1538 
1539  Assert(rel->reloptkind == RELOPT_BASEREL);
1540 
1541  return result;
1542 }
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
unsigned int Index
Definition: c.h:614
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Index parent_relid
Definition: pathnodes.h:2969

References Assert, bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo* find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 527 of file relnode.c.

528 {
529  /*
530  * Switch to using hash lookup when list grows "too long". The threshold
531  * is arbitrary and is known only here.
532  */
533  if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
535 
536  /*
537  * Use either hashtable lookup or linear search, as appropriate.
538  *
539  * Note: the seemingly redundant hashkey variable is used to avoid taking
540  * the address of relids; unless the compiler is exceedingly smart, doing
541  * so would force relids out of a register and thus probably slow down the
542  * list-search case.
543  */
544  if (root->join_rel_hash)
545  {
546  Relids hashkey = relids;
547  JoinHashEntry *hentry;
548 
549  hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
550  &hashkey,
551  HASH_FIND,
552  NULL);
553  if (hentry)
554  return hentry->join_rel;
555  }
556  else
557  {
558  ListCell *l;
559 
560  foreach(l, root->join_rel_list)
561  {
562  RelOptInfo *rel = (RelOptInfo *) lfirst(l);
563 
564  if (bms_equal(rel->relids, relids))
565  return rel;
566  }
567  }
568 
569  return NULL;
570 }
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:486
RelOptInfo * join_rel
Definition: relnode.c:41

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo* find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1901 of file relnode.c.

1902 {
1903  ListCell *lc;
1904 
1905  foreach(lc, rel->ppilist)
1906  {
1907  ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
1908 
1909  if (bms_equal(ppi->ppi_req_outer, required_outer))
1910  return ppi;
1911  }
1912 
1913  return NULL;
1914 }
Relids ppi_req_outer
Definition: pathnodes.h:1579

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo* get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1868 of file relnode.c.

1869 {
1870  ParamPathInfo *ppi;
1871 
1872  /* If rel has LATERAL refs, every path for it should account for them */
1873  Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1874 
1875  /* Unparameterized paths have no ParamPathInfo */
1876  if (bms_is_empty(required_outer))
1877  return NULL;
1878 
1879  Assert(!bms_overlap(appendrel->relids, required_outer));
1880 
1881  /* If we already have a PPI for this parameterization, just return it */
1882  if ((ppi = find_param_path_info(appendrel, required_outer)))
1883  return ppi;
1884 
1885  /* Else build the ParamPathInfo */
1886  ppi = makeNode(ParamPathInfo);
1887  ppi->ppi_req_outer = required_outer;
1888  ppi->ppi_rows = 0;
1889  ppi->ppi_clauses = NIL;
1890  ppi->ppi_serials = NULL;
1891  appendrel->ppilist = lappend(appendrel->ppilist, ppi);
1892 
1893  return ppi;
1894 }
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1901
Cardinality ppi_rows
Definition: pathnodes.h:1580
List * ppi_clauses
Definition: pathnodes.h:1581
Bitmapset * ppi_serials
Definition: pathnodes.h:1582

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path(), and create_merge_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo* get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1557 of file relnode.c.

1559 {
1560  ParamPathInfo *ppi;
1561  Relids joinrelids;
1562  List *pclauses;
1563  List *eqclauses;
1564  Bitmapset *pserials;
1565  double rows;
1566  ListCell *lc;
1567 
1568  /* If rel has LATERAL refs, every path for it should account for them */
1569  Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1570 
1571  /* Unparameterized paths have no ParamPathInfo */
1572  if (bms_is_empty(required_outer))
1573  return NULL;
1574 
1575  Assert(!bms_overlap(baserel->relids, required_outer));
1576 
1577  /* If we already have a PPI for this parameterization, just return it */
1578  if ((ppi = find_param_path_info(baserel, required_outer)))
1579  return ppi;
1580 
1581  /*
1582  * Identify all joinclauses that are movable to this base rel given this
1583  * parameterization.
1584  */
1585  joinrelids = bms_union(baserel->relids, required_outer);
1586  pclauses = NIL;
1587  foreach(lc, baserel->joininfo)
1588  {
1589  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1590 
1591  if (join_clause_is_movable_into(rinfo,
1592  baserel->relids,
1593  joinrelids))
1594  pclauses = lappend(pclauses, rinfo);
1595  }
1596 
1597  /*
1598  * Add in joinclauses generated by EquivalenceClasses, too. (These
1599  * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1600  * builds, let's verify that.)
1601  */
1603  joinrelids,
1604  required_outer,
1605  baserel,
1606  NULL);
1607 #ifdef USE_ASSERT_CHECKING
1608  foreach(lc, eqclauses)
1609  {
1610  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1611 
1613  baserel->relids,
1614  joinrelids));
1615  }
1616 #endif
1617  pclauses = list_concat(pclauses, eqclauses);
1618 
1619  /* Compute set of serial numbers of the enforced clauses */
1620  pserials = NULL;
1621  foreach(lc, pclauses)
1622  {
1623  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1624 
1625  pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1626  }
1627 
1628  /* Estimate the number of rows returned by the parameterized scan */
1629  rows = get_parameterized_baserel_size(root, baserel, pclauses);
1630 
1631  /* And now we can build the ParamPathInfo */
1632  ppi = makeNode(ParamPathInfo);
1633  ppi->ppi_req_outer = required_outer;
1634  ppi->ppi_rows = rows;
1635  ppi->ppi_clauses = pclauses;
1636  ppi->ppi_serials = pserials;
1637  baserel->ppilist = lappend(baserel->ppilist, ppi);
1638 
1639  return ppi;
1640 }
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5243
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1381
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:670

References Assert, bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo* get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1671 of file relnode.c.

1677 {
1678  ParamPathInfo *ppi;
1679  Relids join_and_req;
1680  Relids outer_and_req;
1681  Relids inner_and_req;
1682  List *pclauses;
1683  List *eclauses;
1684  List *dropped_ecs;
1685  double rows;
1686  ListCell *lc;
1687 
1688  /* If rel has LATERAL refs, every path for it should account for them */
1689  Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1690 
1691  /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1692  if (bms_is_empty(required_outer))
1693  return NULL;
1694 
1695  Assert(!bms_overlap(joinrel->relids, required_outer));
1696 
1697  /*
1698  * Identify all joinclauses that are movable to this join rel given this
1699  * parameterization. These are the clauses that are movable into this
1700  * join, but not movable into either input path. Treat an unparameterized
1701  * input path as not accepting parameterized clauses (because it won't,
1702  * per the shortcut exit above), even though the joinclause movement rules
1703  * might allow the same clauses to be moved into a parameterized path for
1704  * that rel.
1705  */
1706  join_and_req = bms_union(joinrel->relids, required_outer);
1707  if (outer_path->param_info)
1708  outer_and_req = bms_union(outer_path->parent->relids,
1709  PATH_REQ_OUTER(outer_path));
1710  else
1711  outer_and_req = NULL; /* outer path does not accept parameters */
1712  if (inner_path->param_info)
1713  inner_and_req = bms_union(inner_path->parent->relids,
1714  PATH_REQ_OUTER(inner_path));
1715  else
1716  inner_and_req = NULL; /* inner path does not accept parameters */
1717 
1718  pclauses = NIL;
1719  foreach(lc, joinrel->joininfo)
1720  {
1721  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1722 
1723  if (join_clause_is_movable_into(rinfo,
1724  joinrel->relids,
1725  join_and_req) &&
1727  outer_path->parent->relids,
1728  outer_and_req) &&
1730  inner_path->parent->relids,
1731  inner_and_req))
1732  pclauses = lappend(pclauses, rinfo);
1733  }
1734 
1735  /* Consider joinclauses generated by EquivalenceClasses, too */
1737  join_and_req,
1738  required_outer,
1739  joinrel,
1740  NULL);
1741  /* We only want ones that aren't movable to lower levels */
1742  dropped_ecs = NIL;
1743  foreach(lc, eclauses)
1744  {
1745  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1746 
1748  joinrel->relids,
1749  join_and_req));
1750  if (join_clause_is_movable_into(rinfo,
1751  outer_path->parent->relids,
1752  outer_and_req))
1753  continue; /* drop if movable into LHS */
1754  if (join_clause_is_movable_into(rinfo,
1755  inner_path->parent->relids,
1756  inner_and_req))
1757  {
1758  /* drop if movable into RHS, but remember EC for use below */
1759  Assert(rinfo->left_ec == rinfo->right_ec);
1760  dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1761  continue;
1762  }
1763  pclauses = lappend(pclauses, rinfo);
1764  }
1765 
1766  /*
1767  * EquivalenceClasses are harder to deal with than we could wish, because
1768  * of the fact that a given EC can generate different clauses depending on
1769  * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1770  * LHS and RHS of the current join and Z is in required_outer, and further
1771  * suppose that the inner_path is parameterized by both X and Z. The code
1772  * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1773  * and in the latter case will have discarded it as being movable into the
1774  * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1775  * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1776  * not have produced both, and we can't readily tell from here which one
1777  * it did pick. If we add no clause to this join, we'll end up with
1778  * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1779  * constrained to be equal to the other members of the EC. (When we come
1780  * to join Z to this X/Y path, we will certainly drop whichever EC clause
1781  * is generated at that join, so this omission won't get fixed later.)
1782  *
1783  * To handle this, for each EC we discarded such a clause from, try to
1784  * generate a clause connecting the required_outer rels to the join's LHS
1785  * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1786  * the clause can't be moved to the LHS, add it to the current join's
1787  * restriction clauses. (If an EC cannot generate such a clause then it
1788  * has nothing that needs to be enforced here, while if the clause can be
1789  * moved into the LHS then it should have been enforced within that path.)
1790  *
1791  * Note that we don't need similar processing for ECs whose clause was
1792  * considered to be movable into the LHS, because the LHS can't refer to
1793  * the RHS so there is no comparable ambiguity about what it might
1794  * actually be enforcing internally.
1795  */
1796  if (dropped_ecs)
1797  {
1798  Relids real_outer_and_req;
1799 
1800  real_outer_and_req = bms_union(outer_path->parent->relids,
1801  required_outer);
1802  eclauses =
1804  dropped_ecs,
1805  real_outer_and_req,
1806  required_outer,
1807  outer_path->parent);
1808  foreach(lc, eclauses)
1809  {
1810  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1811 
1813  outer_path->parent->relids,
1814  real_outer_and_req));
1815  if (!join_clause_is_movable_into(rinfo,
1816  outer_path->parent->relids,
1817  outer_and_req))
1818  pclauses = lappend(pclauses, rinfo);
1819  }
1820  }
1821 
1822  /*
1823  * Now, attach the identified moved-down clauses to the caller's
1824  * restrict_clauses list. By using list_concat in this order, we leave
1825  * the original list structure of restrict_clauses undamaged.
1826  */
1827  *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1828 
1829  /* If we already have a PPI for this parameterization, just return it */
1830  if ((ppi = find_param_path_info(joinrel, required_outer)))
1831  return ppi;
1832 
1833  /* Estimate the number of rows returned by the parameterized join */
1834  rows = get_parameterized_joinrel_size(root, joinrel,
1835  outer_path,
1836  inner_path,
1837  sjinfo,
1838  *restrict_clauses);
1839 
1840  /*
1841  * And now we can build the ParamPathInfo. No point in saving the
1842  * input-pair-dependent clause list, though.
1843  *
1844  * Note: in GEQO mode, we'll be called in a temporary memory context, but
1845  * the joinrel structure is there too, so no problem.
1846  */
1847  ppi = makeNode(ParamPathInfo);
1848  ppi->ppi_req_outer = required_outer;
1849  ppi->ppi_rows = rows;
1850  ppi->ppi_clauses = NIL;
1851  ppi->ppi_serials = NULL;
1852  joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1853 
1854  return ppi;
1855 }
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5324
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1481

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset* get_param_path_clause_serials ( Path path)

Definition at line 1922 of file relnode.c.

1923 {
1924  if (path->param_info == NULL)
1925  return NULL; /* not parameterized */
1926  if (IsA(path, NestPath) ||
1927  IsA(path, MergePath) ||
1928  IsA(path, HashPath))
1929  {
1930  /*
1931  * For a join path, combine clauses enforced within either input path
1932  * with those enforced as joinrestrictinfo in this path. Note that
1933  * joinrestrictinfo may include some non-pushed-down clauses, but for
1934  * current purposes it's okay if we include those in the result. (To
1935  * be more careful, we could check for clause_relids overlapping the
1936  * path parameterization, but it's not worth the cycles for now.)
1937  */
1938  JoinPath *jpath = (JoinPath *) path;
1939  Bitmapset *pserials;
1940  ListCell *lc;
1941 
1942  pserials = NULL;
1943  pserials = bms_add_members(pserials,
1945  pserials = bms_add_members(pserials,
1947  foreach(lc, jpath->joinrestrictinfo)
1948  {
1949  RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1950 
1951  pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1952  }
1953  return pserials;
1954  }
1955  else if (IsA(path, AppendPath))
1956  {
1957  /*
1958  * For an appendrel, take the intersection of the sets of clauses
1959  * enforced in each input path.
1960  */
1961  AppendPath *apath = (AppendPath *) path;
1962  Bitmapset *pserials;
1963  ListCell *lc;
1964 
1965  pserials = NULL;
1966  foreach(lc, apath->subpaths)
1967  {
1968  Path *subpath = (Path *) lfirst(lc);
1969  Bitmapset *subserials;
1970 
1971  subserials = get_param_path_clause_serials(subpath);
1972  if (lc == list_head(apath->subpaths))
1973  pserials = bms_copy(subserials);
1974  else
1975  pserials = bms_int_members(pserials, subserials);
1976  }
1977  return pserials;
1978  }
1979  else if (IsA(path, MergeAppendPath))
1980  {
1981  /* Same as AppendPath case */
1982  MergeAppendPath *apath = (MergeAppendPath *) path;
1983  Bitmapset *pserials;
1984  ListCell *lc;
1985 
1986  pserials = NULL;
1987  foreach(lc, apath->subpaths)
1988  {
1989  Path *subpath = (Path *) lfirst(lc);
1990  Bitmapset *subserials;
1991 
1992  subserials = get_param_path_clause_serials(subpath);
1993  if (lc == list_head(apath->subpaths))
1994  pserials = bms_copy(subserials);
1995  else
1996  pserials = bms_int_members(pserials, subserials);
1997  }
1998  return pserials;
1999  }
2000  else
2001  {
2002  /*
2003  * Otherwise, it's a baserel path and we can use the
2004  * previously-computed set of serial numbers.
2005  */
2006  return path->param_info->ppi_serials;
2007  }
2008 }
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1109
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), AppendPath::subpaths, and MergeAppendPath::subpaths.

Referenced by create_nestloop_path().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1034 of file relnode.c.

1038 {
1039  Relids result;
1040 
1041  /*
1042  * Basically we just need the union of the inputs' lateral_relids, less
1043  * whatever is already in the join.
1044  *
1045  * It's not immediately obvious that this is a valid way to compute the
1046  * result, because it might seem that we're ignoring possible lateral refs
1047  * of PlaceHolderVars that are due to be computed at the join but not in
1048  * either input. However, because create_lateral_join_info() already
1049  * charged all such PHV refs to each member baserel of the join, they'll
1050  * be accounted for already in the inputs' lateral_relids. Likewise, we
1051  * do not need to worry about doing transitive closure here, because that
1052  * was already accounted for in the original baserel lateral_relids.
1053  */
1054  result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1055  result = bms_del_members(result, joinrelids);
1056  return result;
1057 }

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4399 of file pathnode.c.

4400 {
4401 #define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4402 do { \
4403  if (!path_is_reparameterizable_by_child(path, child_rel)) \
4404  return false; \
4405 } while(0)
4406 
4407 #define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4408 do { \
4409  if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4410  return false; \
4411 } while(0)
4412 
4413  /*
4414  * If the path is not parameterized by the parent of the given relation,
4415  * it doesn't need reparameterization.
4416  */
4417  if (!path->param_info ||
4418  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4419  return true;
4420 
4421  /*
4422  * Check that the path type is one that reparameterize_path_by_child() can
4423  * handle, and recursively check subpaths.
4424  */
4425  switch (nodeTag(path))
4426  {
4427  case T_Path:
4428  case T_IndexPath:
4429  break;
4430 
4431  case T_BitmapHeapPath:
4432  {
4433  BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4434 
4436  }
4437  break;
4438 
4439  case T_BitmapAndPath:
4440  {
4441  BitmapAndPath *bapath = (BitmapAndPath *) path;
4442 
4444  }
4445  break;
4446 
4447  case T_BitmapOrPath:
4448  {
4449  BitmapOrPath *bopath = (BitmapOrPath *) path;
4450 
4452  }
4453  break;
4454 
4455  case T_ForeignPath:
4456  {
4457  ForeignPath *fpath = (ForeignPath *) path;
4458 
4459  if (fpath->fdw_outerpath)
4461  }
4462  break;
4463 
4464  case T_CustomPath:
4465  {
4466  CustomPath *cpath = (CustomPath *) path;
4467 
4469  }
4470  break;
4471 
4472  case T_NestPath:
4473  case T_MergePath:
4474  case T_HashPath:
4475  {
4476  JoinPath *jpath = (JoinPath *) path;
4477 
4480  }
4481  break;
4482 
4483  case T_AppendPath:
4484  {
4485  AppendPath *apath = (AppendPath *) path;
4486 
4488  }
4489  break;
4490 
4491  case T_MaterialPath:
4492  {
4493  MaterialPath *mpath = (MaterialPath *) path;
4494 
4496  }
4497  break;
4498 
4499  case T_MemoizePath:
4500  {
4501  MemoizePath *mpath = (MemoizePath *) path;
4502 
4504  }
4505  break;
4506 
4507  case T_GatherPath:
4508  {
4509  GatherPath *gpath = (GatherPath *) path;
4510 
4512  }
4513  break;
4514 
4515  default:
4516  /* We don't know how to reparameterize this path. */
4517  return false;
4518  }
4519 
4520  return true;
4521 }
#define nodeTag(nodeptr)
Definition: nodes.h:133
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:1908

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path* reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 3937 of file pathnode.c.

3940 {
3941  RelOptInfo *rel = path->parent;
3942 
3943  /* Can only increase, not decrease, path's parameterization */
3944  if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3945  return NULL;
3946  switch (path->pathtype)
3947  {
3948  case T_SeqScan:
3949  return create_seqscan_path(root, rel, required_outer, 0);
3950  case T_SampleScan:
3951  return (Path *) create_samplescan_path(root, rel, required_outer);
3952  case T_IndexScan:
3953  case T_IndexOnlyScan:
3954  {
3955  IndexPath *ipath = (IndexPath *) path;
3956  IndexPath *newpath = makeNode(IndexPath);
3957 
3958  /*
3959  * We can't use create_index_path directly, and would not want
3960  * to because it would re-compute the indexqual conditions
3961  * which is wasted effort. Instead we hack things a bit:
3962  * flat-copy the path node, revise its param_info, and redo
3963  * the cost estimate.
3964  */
3965  memcpy(newpath, ipath, sizeof(IndexPath));
3966  newpath->path.param_info =
3967  get_baserel_parampathinfo(root, rel, required_outer);
3968  cost_index(newpath, root, loop_count, false);
3969  return (Path *) newpath;
3970  }
3971  case T_BitmapHeapScan:
3972  {
3973  BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3974 
3975  return (Path *) create_bitmap_heap_path(root,
3976  rel,
3977  bpath->bitmapqual,
3978  required_outer,
3979  loop_count, 0);
3980  }
3981  case T_SubqueryScan:
3982  {
3983  SubqueryScanPath *spath = (SubqueryScanPath *) path;
3984  Path *subpath = spath->subpath;
3985  bool trivial_pathtarget;
3986 
3987  /*
3988  * If existing node has zero extra cost, we must have decided
3989  * its target is trivial. (The converse is not true, because
3990  * it might have a trivial target but quals to enforce; but in
3991  * that case the new node will too, so it doesn't matter
3992  * whether we get the right answer here.)
3993  */
3994  trivial_pathtarget =
3995  (subpath->total_cost == spath->path.total_cost);
3996 
3997  return (Path *) create_subqueryscan_path(root,
3998  rel,
3999  subpath,
4000  trivial_pathtarget,
4001  spath->path.pathkeys,
4002  required_outer);
4003  }
4004  case T_Result:
4005  /* Supported only for RTE_RESULT scan paths */
4006  if (IsA(path, Path))
4007  return create_resultscan_path(root, rel, required_outer);
4008  break;
4009  case T_Append:
4010  {
4011  AppendPath *apath = (AppendPath *) path;
4012  List *childpaths = NIL;
4013  List *partialpaths = NIL;
4014  int i;
4015  ListCell *lc;
4016 
4017  /* Reparameterize the children */
4018  i = 0;
4019  foreach(lc, apath->subpaths)
4020  {
4021  Path *spath = (Path *) lfirst(lc);
4022 
4023  spath = reparameterize_path(root, spath,
4024  required_outer,
4025  loop_count);
4026  if (spath == NULL)
4027  return NULL;
4028  /* We have to re-split the regular and partial paths */
4029  if (i < apath->first_partial_path)
4030  childpaths = lappend(childpaths, spath);
4031  else
4032  partialpaths = lappend(partialpaths, spath);
4033  i++;
4034  }
4035  return (Path *)
4036  create_append_path(root, rel, childpaths, partialpaths,
4037  apath->path.pathkeys, required_outer,
4038  apath->path.parallel_workers,
4039  apath->path.parallel_aware,
4040  -1);
4041  }
4042  case T_Material:
4043  {
4044  MaterialPath *mpath = (MaterialPath *) path;
4045  Path *spath = mpath->subpath;
4046 
4047  spath = reparameterize_path(root, spath,
4048  required_outer,
4049  loop_count);
4050  if (spath == NULL)
4051  return NULL;
4052  return (Path *) create_material_path(rel, spath);
4053  }
4054  case T_Memoize:
4055  {
4056  MemoizePath *mpath = (MemoizePath *) path;
4057  Path *spath = mpath->subpath;
4058 
4059  spath = reparameterize_path(root, spath,
4060  required_outer,
4061  loop_count);
4062  if (spath == NULL)
4063  return NULL;
4064  return (Path *) create_memoize_path(root, rel,
4065  spath,
4066  mpath->param_exprs,
4067  mpath->hash_operators,
4068  mpath->singlerow,
4069  mpath->binary_mode,
4070  mpath->calls);
4071  }
4072  default:
4073  break;
4074  }
4075  return NULL;
4076 }
int i
Definition: isn.c:73
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2164
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:952
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
Definition: pathnode.c:1598
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3937
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2004
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1566
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:927
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1042

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), MemoizePath::calls, cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path().

◆ reparameterize_path_by_child()

Path* reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4103 of file pathnode.c.

4105 {
4106  Path *new_path;
4107  ParamPathInfo *new_ppi;
4108  ParamPathInfo *old_ppi;
4109  Relids required_outer;
4110 
4111 #define ADJUST_CHILD_ATTRS(node) \
4112  ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4113  (Node *) (node), \
4114  child_rel, \
4115  child_rel->top_parent))
4116 
4117 #define REPARAMETERIZE_CHILD_PATH(path) \
4118 do { \
4119  (path) = reparameterize_path_by_child(root, (path), child_rel); \
4120  if ((path) == NULL) \
4121  return NULL; \
4122 } while(0)
4123 
4124 #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4125 do { \
4126  if ((pathlist) != NIL) \
4127  { \
4128  (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4129  child_rel); \
4130  if ((pathlist) == NIL) \
4131  return NULL; \
4132  } \
4133 } while(0)
4134 
4135  /*
4136  * If the path is not parameterized by the parent of the given relation,
4137  * it doesn't need reparameterization.
4138  */
4139  if (!path->param_info ||
4140  !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4141  return path;
4142 
4143  /*
4144  * If possible, reparameterize the given path.
4145  *
4146  * This function is currently only applied to the inner side of a nestloop
4147  * join that is being partitioned by the partitionwise-join code. Hence,
4148  * we need only support path types that plausibly arise in that context.
4149  * (In particular, supporting sorted path types would be a waste of code
4150  * and cycles: even if we translated them here, they'd just lose in
4151  * subsequent cost comparisons.) If we do see an unsupported path type,
4152  * that just means we won't be able to generate a partitionwise-join plan
4153  * using that path type.
4154  */
4155  switch (nodeTag(path))
4156  {
4157  case T_Path:
4158  new_path = path;
4159  ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4160  if (path->pathtype == T_SampleScan)
4161  {
4162  Index scan_relid = path->parent->relid;
4163  RangeTblEntry *rte;
4164 
4165  /* it should be a base rel with a tablesample clause... */
4166  Assert(scan_relid > 0);
4167  rte = planner_rt_fetch(scan_relid, root);
4168  Assert(rte->rtekind == RTE_RELATION);
4169  Assert(rte->tablesample != NULL);
4170 
4172  }
4173  break;
4174 
4175  case T_IndexPath:
4176  {
4177  IndexPath *ipath = (IndexPath *) path;
4178 
4181  new_path = (Path *) ipath;
4182  }
4183  break;
4184 
4185  case T_BitmapHeapPath:
4186  {
4187  BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4188 
4189  ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4191  new_path = (Path *) bhpath;
4192  }
4193  break;
4194 
4195  case T_BitmapAndPath:
4196  {
4197  BitmapAndPath *bapath = (BitmapAndPath *) path;
4198 
4200  new_path = (Path *) bapath;
4201  }
4202  break;
4203 
4204  case T_BitmapOrPath:
4205  {
4206  BitmapOrPath *bopath = (BitmapOrPath *) path;
4207 
4209  new_path = (Path *) bopath;
4210  }
4211  break;
4212 
4213  case T_ForeignPath:
4214  {
4215  ForeignPath *fpath = (ForeignPath *) path;
4217 
4218  ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4219  if (fpath->fdw_outerpath)
4221  if (fpath->fdw_restrictinfo)
4223 
4224  /* Hand over to FDW if needed. */
4225  rfpc_func =
4226  path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4227  if (rfpc_func)
4228  fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4229  child_rel);
4230  new_path = (Path *) fpath;
4231  }
4232  break;
4233 
4234  case T_CustomPath:
4235  {
4236  CustomPath *cpath = (CustomPath *) path;
4237 
4238  ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4240  if (cpath->custom_restrictinfo)
4242  if (cpath->methods &&
4244  cpath->custom_private =
4246  cpath->custom_private,
4247  child_rel);
4248  new_path = (Path *) cpath;
4249  }
4250  break;
4251 
4252  case T_NestPath:
4253  {
4254  NestPath *npath = (NestPath *) path;
4255  JoinPath *jpath = (JoinPath *) npath;
4256 
4260  new_path = (Path *) npath;
4261  }
4262  break;
4263 
4264  case T_MergePath:
4265  {
4266  MergePath *mpath = (MergePath *) path;
4267  JoinPath *jpath = (JoinPath *) mpath;
4268 
4273  new_path = (Path *) mpath;
4274  }
4275  break;
4276 
4277  case T_HashPath:
4278  {
4279  HashPath *hpath = (HashPath *) path;
4280  JoinPath *jpath = (JoinPath *) hpath;
4281 
4286  new_path = (Path *) hpath;
4287  }
4288  break;
4289 
4290  case T_AppendPath:
4291  {
4292  AppendPath *apath = (AppendPath *) path;
4293 
4295  new_path = (Path *) apath;
4296  }
4297  break;
4298 
4299  case T_MaterialPath:
4300  {
4301  MaterialPath *mpath = (MaterialPath *) path;
4302 
4304  new_path = (Path *) mpath;
4305  }
4306  break;
4307 
4308  case T_MemoizePath:
4309  {
4310  MemoizePath *mpath = (MemoizePath *) path;
4311 
4314  new_path = (Path *) mpath;
4315  }
4316  break;
4317 
4318  case T_GatherPath:
4319  {
4320  GatherPath *gpath = (GatherPath *) path;
4321 
4323  new_path = (Path *) gpath;
4324  }
4325  break;
4326 
4327  default:
4328  /* We don't know how to reparameterize this path. */
4329  return NULL;
4330  }
4331 
4332  /*
4333  * Adjust the parameterization information, which refers to the topmost
4334  * parent. The topmost parent can be multiple levels away from the given
4335  * child, hence use multi-level expression adjustment routines.
4336  */
4337  old_ppi = new_path->param_info;
4338  required_outer =
4340  child_rel,
4341  child_rel->top_parent);
4342 
4343  /* If we already have a PPI for this parameterization, just return it */
4344  new_ppi = find_param_path_info(new_path->parent, required_outer);
4345 
4346  /*
4347  * If not, build a new one and link it to the list of PPIs. For the same
4348  * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4349  * context the given RelOptInfo is in.
4350  */
4351  if (new_ppi == NULL)
4352  {
4353  MemoryContext oldcontext;
4354  RelOptInfo *rel = path->parent;
4355 
4356  oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4357 
4358  new_ppi = makeNode(ParamPathInfo);
4359  new_ppi->ppi_req_outer = bms_copy(required_outer);
4360  new_ppi->ppi_rows = old_ppi->ppi_rows;
4361  new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4362  ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4363  new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4364  rel->ppilist = lappend(rel->ppilist, new_ppi);
4365 
4366  MemoryContextSwitchTo(oldcontext);
4367  }
4368  bms_free(required_outer);
4369 
4370  new_path->param_info = new_ppi;
4371 
4372  /*
4373  * Adjust the path target if the parent of the outer relation is
4374  * referenced in the targetlist. This can happen when only the parent of
4375  * outer relation is laterally referenced in this relation.
4376  */
4377  if (bms_overlap(path->parent->lateral_relids,
4378  child_rel->top_parent_relids))
4379  {
4380  new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4381  ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4382  }
4383 
4384  return new_path;
4385 }
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:592
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:1911
List * custom_private
Definition: pathnodes.h:1910
List * custom_restrictinfo
Definition: pathnodes.h:1909
List * indrestrictinfo
Definition: pathnodes.h:1175
struct TableSampleClause * tablesample
Definition: parsenodes.h:1108
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert, BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 242 of file pathnode.c.

243 {
244  Path *cheapest_startup_path;
245  Path *cheapest_total_path;
246  Path *best_param_path;
247  List *parameterized_paths;
248  ListCell *p;
249 
250  Assert(IsA(parent_rel, RelOptInfo));
251 
252  if (parent_rel->pathlist == NIL)
253  elog(ERROR, "could not devise a query plan for the given query");
254 
255  cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
256  parameterized_paths = NIL;
257 
258  foreach(p, parent_rel->pathlist)
259  {
260  Path *path = (Path *) lfirst(p);
261  int cmp;
262 
263  if (path->param_info)
264  {
265  /* Parameterized path, so add it to parameterized_paths */
266  parameterized_paths = lappend(parameterized_paths, path);
267 
268  /*
269  * If we have an unparameterized cheapest-total, we no longer care
270  * about finding the best parameterized path, so move on.
271  */
272  if (cheapest_total_path)
273  continue;
274 
275  /*
276  * Otherwise, track the best parameterized path, which is the one
277  * with least total cost among those of the minimum
278  * parameterization.
279  */
280  if (best_param_path == NULL)
281  best_param_path = path;
282  else
283  {
284  switch (bms_subset_compare(PATH_REQ_OUTER(path),
285  PATH_REQ_OUTER(best_param_path)))
286  {
287  case BMS_EQUAL:
288  /* keep the cheaper one */
289  if (compare_path_costs(path, best_param_path,
290  TOTAL_COST) < 0)
291  best_param_path = path;
292  break;
293  case BMS_SUBSET1:
294  /* new path is less-parameterized */
295  best_param_path = path;
296  break;
297  case BMS_SUBSET2:
298  /* old path is less-parameterized, keep it */
299  break;
300  case BMS_DIFFERENT:
301 
302  /*
303  * This means that neither path has the least possible
304  * parameterization for the rel. We'll sit on the old
305  * path until something better comes along.
306  */
307  break;
308  }
309  }
310  }
311  else
312  {
313  /* Unparameterized path, so consider it for cheapest slots */
314  if (cheapest_total_path == NULL)
315  {
316  cheapest_startup_path = cheapest_total_path = path;
317  continue;
318  }
319 
320  /*
321  * If we find two paths of identical costs, try to keep the
322  * better-sorted one. The paths might have unrelated sort
323  * orderings, in which case we can only guess which might be
324  * better to keep, but if one is superior then we definitely
325  * should keep that one.
326  */
327  cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
328  if (cmp > 0 ||
329  (cmp == 0 &&
330  compare_pathkeys(cheapest_startup_path->pathkeys,
331  path->pathkeys) == PATHKEYS_BETTER2))
332  cheapest_startup_path = path;
333 
334  cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
335  if (cmp > 0 ||
336  (cmp == 0 &&
337  compare_pathkeys(cheapest_total_path->pathkeys,
338  path->pathkeys) == PATHKEYS_BETTER2))
339  cheapest_total_path = path;
340  }
341  }
342 
343  /* Add cheapest unparameterized path, if any, to parameterized_paths */
344  if (cheapest_total_path)
345  parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
346 
347  /*
348  * If there is no unparameterized path, use the best parameterized path as
349  * cheapest_total_path (but not as cheapest_startup_path).
350  */
351  if (cheapest_total_path == NULL)
352  cheapest_total_path = best_param_path;
353  Assert(cheapest_total_path != NULL);
354 
355  parent_rel->cheapest_startup_path = cheapest_startup_path;
356  parent_rel->cheapest_total_path = cheapest_total_path;
357  parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
358  parent_rel->cheapest_parameterized_paths = parameterized_paths;
359 }
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert, BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partitionwise_grouping_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 94 of file relnode.c.

95 {
96  int size;
97  Index rti;
98  ListCell *lc;
99 
100  /* Arrays are accessed using RT indexes (1..N) */
101  size = list_length(root->parse->rtable) + 1;
102  root->simple_rel_array_size = size;
103 
104  /*
105  * simple_rel_array is initialized to all NULLs, since no RelOptInfos
106  * exist yet. It'll be filled by later calls to build_simple_rel().
107  */
108  root->simple_rel_array = (RelOptInfo **)
109  palloc0(size * sizeof(RelOptInfo *));
110 
111  /* simple_rte_array is an array equivalent of the rtable list */
112  root->simple_rte_array = (RangeTblEntry **)
113  palloc0(size * sizeof(RangeTblEntry *));
114  rti = 1;
115  foreach(lc, root->parse->rtable)
116  {
117  RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
118 
119  root->simple_rte_array[rti++] = rte;
120  }
121 
122  /* append_rel_array is not needed if there are no AppendRelInfos */
123  if (root->append_rel_list == NIL)
124  {
125  root->append_rel_array = NULL;
126  return;
127  }
128 
129  root->append_rel_array = (AppendRelInfo **)
130  palloc0(size * sizeof(AppendRelInfo *));
131 
132  /*
133  * append_rel_array is filled with any already-existing AppendRelInfos,
134  * which currently could only come from UNION ALL flattening. We might
135  * add more later during inheritance expansion, but it's the
136  * responsibility of the expansion code to update the array properly.
137  */
138  foreach(lc, root->append_rel_list)
139  {
140  AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
141  int child_relid = appinfo->child_relid;
142 
143  /* Sanity check */
144  Assert(child_relid < size);
145 
146  if (root->append_rel_array[child_relid])
147  elog(ERROR, "child relation already exists");
148 
149  root->append_rel_array[child_relid] = appinfo;
150  }
151 }
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static pg_noinline void Size size
Definition: slab.c:607
Index child_relid
Definition: pathnodes.h:2970

References Assert, AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), root, and size.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().