PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UpperUniquePathcreate_upper_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, SetOpCmd cmd, SetOpStrategy strategy, List *groupList, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo, int nappinfos, AppendRelInfo **appinfos)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 795 of file pathnode.c.

796{
797 bool accept_new = true; /* unless we find a superior old path */
798 int insert_at = 0; /* where to insert new item */
799 ListCell *p1;
800
801 /* Check for query cancel. */
803
804 /* Path to be added must be parallel safe. */
805 Assert(new_path->parallel_safe);
806
807 /* Relation should be OK for parallelism, too. */
808 Assert(parent_rel->consider_parallel);
809
810 /*
811 * As in add_path, throw out any paths which are dominated by the new
812 * path, but throw out the new path if some existing path dominates it.
813 */
814 foreach(p1, parent_rel->partial_pathlist)
815 {
816 Path *old_path = (Path *) lfirst(p1);
817 bool remove_old = false; /* unless new proves superior */
818 PathKeysComparison keyscmp;
819
820 /* Compare pathkeys. */
821 keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
822
823 /* Unless pathkeys are incompatible, keep just one of the two paths. */
824 if (keyscmp != PATHKEYS_DIFFERENT)
825 {
826 if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
827 {
828 if (new_path->disabled_nodes > old_path->disabled_nodes)
829 accept_new = false;
830 else
831 remove_old = true;
832 }
833 else if (new_path->total_cost > old_path->total_cost
835 {
836 /* New path costs more; keep it only if pathkeys are better. */
837 if (keyscmp != PATHKEYS_BETTER1)
838 accept_new = false;
839 }
840 else if (old_path->total_cost > new_path->total_cost
842 {
843 /* Old path costs more; keep it only if pathkeys are better. */
844 if (keyscmp != PATHKEYS_BETTER2)
845 remove_old = true;
846 }
847 else if (keyscmp == PATHKEYS_BETTER1)
848 {
849 /* Costs are about the same, new path has better pathkeys. */
850 remove_old = true;
851 }
852 else if (keyscmp == PATHKEYS_BETTER2)
853 {
854 /* Costs are about the same, old path has better pathkeys. */
855 accept_new = false;
856 }
857 else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
858 {
859 /* Pathkeys are the same, and the old path costs more. */
860 remove_old = true;
861 }
862 else
863 {
864 /*
865 * Pathkeys are the same, and new path isn't materially
866 * cheaper.
867 */
868 accept_new = false;
869 }
870 }
871
872 /*
873 * Remove current element from partial_pathlist if dominated by new.
874 */
875 if (remove_old)
876 {
877 parent_rel->partial_pathlist =
879 pfree(old_path);
880 }
881 else
882 {
883 /* new belongs after this old path if it has cost >= old's */
884 if (new_path->total_cost >= old_path->total_cost)
885 insert_at = foreach_current_index(p1) + 1;
886 }
887
888 /*
889 * If we found an old path that dominates new_path, we can quit
890 * scanning the partial_pathlist; we will not add new_path, and we
891 * assume new_path cannot dominate any later path.
892 */
893 if (!accept_new)
894 break;
895 }
896
897 if (accept_new)
898 {
899 /* Accept the new path: insert it at proper place */
900 parent_rel->partial_pathlist =
901 list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
902 }
903 else
904 {
905 /* Reject and recycle the new path */
906 pfree(new_path);
907 }
908}
#define Assert(condition)
Definition: c.h:812
#define unlikely(x)
Definition: c.h:330
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:1521
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
#define STD_FUZZ_FACTOR
Definition: pathnode.c:47
PathKeysComparison
Definition: paths.h:203
@ PATHKEYS_BETTER2
Definition: paths.h:206
@ PATHKEYS_BETTER1
Definition: paths.h:205
@ PATHKEYS_DIFFERENT
Definition: paths.h:207
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1677
int disabled_nodes
Definition: pathnodes.h:1672
Cost total_cost
Definition: pathnodes.h:1674
bool parallel_safe
Definition: pathnodes.h:1666
bool consider_parallel
Definition: pathnodes.h:887
List * partial_pathlist
Definition: pathnodes.h:900

References Assert, CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, Path::disabled_nodes, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, Path::total_cost, and unlikely.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_plain_partial_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  total_cost,
List pathkeys 
)

Definition at line 921 of file pathnode.c.

923{
924 ListCell *p1;
925
926 /*
927 * Our goal here is twofold. First, we want to find out whether this path
928 * is clearly inferior to some existing partial path. If so, we want to
929 * reject it immediately. Second, we want to find out whether this path
930 * is clearly superior to some existing partial path -- at least, modulo
931 * final cost computations. If so, we definitely want to consider it.
932 *
933 * Unlike add_path(), we always compare pathkeys here. This is because we
934 * expect partial_pathlist to be very short, and getting a definitive
935 * answer at this stage avoids the need to call add_path_precheck.
936 */
937 foreach(p1, parent_rel->partial_pathlist)
938 {
939 Path *old_path = (Path *) lfirst(p1);
940 PathKeysComparison keyscmp;
941
942 keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
943 if (keyscmp != PATHKEYS_DIFFERENT)
944 {
945 if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
946 keyscmp != PATHKEYS_BETTER1)
947 return false;
948 if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
949 keyscmp != PATHKEYS_BETTER2)
950 return true;
951 }
952 }
953
954 /*
955 * This path is neither clearly inferior to an existing partial path nor
956 * clearly good enough that it might replace one. Compare it to
957 * non-parallel plans. If it loses even before accounting for the cost of
958 * the Gather node, we should definitely reject it.
959 *
960 * Note that we pass the total_cost to add_path_precheck twice. This is
961 * because it's never advantageous to consider the startup cost of a
962 * partial path; the resulting plans, if run in parallel, will be run to
963 * completion.
964 */
965 if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
966 pathkeys, NULL))
967 return false;
968
969 return true;
970}
bool add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:688

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 461 of file pathnode.c.

462{
463 bool accept_new = true; /* unless we find a superior old path */
464 int insert_at = 0; /* where to insert new item */
465 List *new_path_pathkeys;
466 ListCell *p1;
467
468 /*
469 * This is a convenient place to check for query cancel --- no part of the
470 * planner goes very long without calling add_path().
471 */
473
474 /* Pretend parameterized paths have no pathkeys, per comment above */
475 new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
476
477 /*
478 * Loop to check proposed new path against old paths. Note it is possible
479 * for more than one old path to be tossed out because new_path dominates
480 * it.
481 */
482 foreach(p1, parent_rel->pathlist)
483 {
484 Path *old_path = (Path *) lfirst(p1);
485 bool remove_old = false; /* unless new proves superior */
486 PathCostComparison costcmp;
487 PathKeysComparison keyscmp;
488 BMS_Comparison outercmp;
489
490 /*
491 * Do a fuzzy cost comparison with standard fuzziness limit.
492 */
493 costcmp = compare_path_costs_fuzzily(new_path, old_path,
495
496 /*
497 * If the two paths compare differently for startup and total cost,
498 * then we want to keep both, and we can skip comparing pathkeys and
499 * required_outer rels. If they compare the same, proceed with the
500 * other comparisons. Row count is checked last. (We make the tests
501 * in this order because the cost comparison is most likely to turn
502 * out "different", and the pathkeys comparison next most likely. As
503 * explained above, row count very seldom makes a difference, so even
504 * though it's cheap to compare there's not much point in checking it
505 * earlier.)
506 */
507 if (costcmp != COSTS_DIFFERENT)
508 {
509 /* Similarly check to see if either dominates on pathkeys */
510 List *old_path_pathkeys;
511
512 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
513 keyscmp = compare_pathkeys(new_path_pathkeys,
514 old_path_pathkeys);
515 if (keyscmp != PATHKEYS_DIFFERENT)
516 {
517 switch (costcmp)
518 {
519 case COSTS_EQUAL:
520 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
521 PATH_REQ_OUTER(old_path));
522 if (keyscmp == PATHKEYS_BETTER1)
523 {
524 if ((outercmp == BMS_EQUAL ||
525 outercmp == BMS_SUBSET1) &&
526 new_path->rows <= old_path->rows &&
527 new_path->parallel_safe >= old_path->parallel_safe)
528 remove_old = true; /* new dominates old */
529 }
530 else if (keyscmp == PATHKEYS_BETTER2)
531 {
532 if ((outercmp == BMS_EQUAL ||
533 outercmp == BMS_SUBSET2) &&
534 new_path->rows >= old_path->rows &&
535 new_path->parallel_safe <= old_path->parallel_safe)
536 accept_new = false; /* old dominates new */
537 }
538 else /* keyscmp == PATHKEYS_EQUAL */
539 {
540 if (outercmp == BMS_EQUAL)
541 {
542 /*
543 * Same pathkeys and outer rels, and fuzzily
544 * the same cost, so keep just one; to decide
545 * which, first check parallel-safety, then
546 * rows, then do a fuzzy cost comparison with
547 * very small fuzz limit. (We used to do an
548 * exact cost comparison, but that results in
549 * annoying platform-specific plan variations
550 * due to roundoff in the cost estimates.) If
551 * things are still tied, arbitrarily keep
552 * only the old path. Notice that we will
553 * keep only the old path even if the
554 * less-fuzzy comparison decides the startup
555 * and total costs compare differently.
556 */
557 if (new_path->parallel_safe >
558 old_path->parallel_safe)
559 remove_old = true; /* new dominates old */
560 else if (new_path->parallel_safe <
561 old_path->parallel_safe)
562 accept_new = false; /* old dominates new */
563 else if (new_path->rows < old_path->rows)
564 remove_old = true; /* new dominates old */
565 else if (new_path->rows > old_path->rows)
566 accept_new = false; /* old dominates new */
567 else if (compare_path_costs_fuzzily(new_path,
568 old_path,
569 1.0000000001) == COSTS_BETTER1)
570 remove_old = true; /* new dominates old */
571 else
572 accept_new = false; /* old equals or
573 * dominates new */
574 }
575 else if (outercmp == BMS_SUBSET1 &&
576 new_path->rows <= old_path->rows &&
577 new_path->parallel_safe >= old_path->parallel_safe)
578 remove_old = true; /* new dominates old */
579 else if (outercmp == BMS_SUBSET2 &&
580 new_path->rows >= old_path->rows &&
581 new_path->parallel_safe <= old_path->parallel_safe)
582 accept_new = false; /* old dominates new */
583 /* else different parameterizations, keep both */
584 }
585 break;
586 case COSTS_BETTER1:
587 if (keyscmp != PATHKEYS_BETTER2)
588 {
589 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
590 PATH_REQ_OUTER(old_path));
591 if ((outercmp == BMS_EQUAL ||
592 outercmp == BMS_SUBSET1) &&
593 new_path->rows <= old_path->rows &&
594 new_path->parallel_safe >= old_path->parallel_safe)
595 remove_old = true; /* new dominates old */
596 }
597 break;
598 case COSTS_BETTER2:
599 if (keyscmp != PATHKEYS_BETTER1)
600 {
601 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
602 PATH_REQ_OUTER(old_path));
603 if ((outercmp == BMS_EQUAL ||
604 outercmp == BMS_SUBSET2) &&
605 new_path->rows >= old_path->rows &&
606 new_path->parallel_safe <= old_path->parallel_safe)
607 accept_new = false; /* old dominates new */
608 }
609 break;
610 case COSTS_DIFFERENT:
611
612 /*
613 * can't get here, but keep this case to keep compiler
614 * quiet
615 */
616 break;
617 }
618 }
619 }
620
621 /*
622 * Remove current element from pathlist if dominated by new.
623 */
624 if (remove_old)
625 {
626 parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
627 p1);
628
629 /*
630 * Delete the data pointed-to by the deleted cell, if possible
631 */
632 if (!IsA(old_path, IndexPath))
633 pfree(old_path);
634 }
635 else
636 {
637 /*
638 * new belongs after this old path if it has more disabled nodes
639 * or if it has the same number of nodes but a greater total cost
640 */
641 if (new_path->disabled_nodes > old_path->disabled_nodes ||
642 (new_path->disabled_nodes == old_path->disabled_nodes &&
643 new_path->total_cost >= old_path->total_cost))
644 insert_at = foreach_current_index(p1) + 1;
645 }
646
647 /*
648 * If we found an old path that dominates new_path, we can quit
649 * scanning the pathlist; we will not add new_path, and we assume
650 * new_path cannot dominate any other elements of the pathlist.
651 */
652 if (!accept_new)
653 break;
654 }
655
656 if (accept_new)
657 {
658 /* Accept the new path: insert it at proper place in pathlist */
659 parent_rel->pathlist =
660 list_insert_nth(parent_rel->pathlist, insert_at, new_path);
661 }
662 else
663 {
664 /* Reject and recycle the new path */
665 if (!IsA(new_path, IndexPath))
666 pfree(new_path);
667 }
668}
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:182
PathCostComparison
Definition: pathnode.c:35
@ COSTS_EQUAL
Definition: pathnode.c:36
@ COSTS_BETTER1
Definition: pathnode.c:37
@ COSTS_BETTER2
Definition: pathnode.c:38
@ COSTS_DIFFERENT
Definition: pathnode.c:39
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1681
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1671
List * pathlist
Definition: pathnodes.h:898

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, Path::disabled_nodes, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 688 of file pathnode.c.

691{
692 List *new_path_pathkeys;
693 bool consider_startup;
694 ListCell *p1;
695
696 /* Pretend parameterized paths have no pathkeys, per add_path policy */
697 new_path_pathkeys = required_outer ? NIL : pathkeys;
698
699 /* Decide whether new path's startup cost is interesting */
700 consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
701
702 foreach(p1, parent_rel->pathlist)
703 {
704 Path *old_path = (Path *) lfirst(p1);
705 PathKeysComparison keyscmp;
706
707 /*
708 * Since the pathlist is sorted by disabled_nodes and then by
709 * total_cost, we can stop looking once we reach a path with more
710 * disabled nodes, or the same number of disabled nodes plus a
711 * total_cost larger than the new path's.
712 */
713 if (unlikely(old_path->disabled_nodes != disabled_nodes))
714 {
715 if (disabled_nodes < old_path->disabled_nodes)
716 break;
717 }
718 else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
719 break;
720
721 /*
722 * We are looking for an old_path with the same parameterization (and
723 * by assumption the same rowcount) that dominates the new path on
724 * pathkeys as well as both cost metrics. If we find one, we can
725 * reject the new path.
726 *
727 * Cost comparisons here should match compare_path_costs_fuzzily.
728 */
729 /* new path can win on startup cost only if consider_startup */
730 if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
731 !consider_startup)
732 {
733 /* new path loses on cost, so check pathkeys... */
734 List *old_path_pathkeys;
735
736 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
737 keyscmp = compare_pathkeys(new_path_pathkeys,
738 old_path_pathkeys);
739 if (keyscmp == PATHKEYS_EQUAL ||
740 keyscmp == PATHKEYS_BETTER2)
741 {
742 /* new path does not win on pathkeys... */
743 if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
744 {
745 /* Found an old path that dominates the new one */
746 return false;
747 }
748 }
749 }
750 }
751
752 return true;
753}
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:204
Cost startup_cost
Definition: pathnodes.h:1673
bool consider_param_startup
Definition: pathnodes.h:885
bool consider_startup
Definition: pathnodes.h:883

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, Path::disabled_nodes, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and unlikely.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 4035 of file pathnode.c.

4040{
4041 double input_rows = *rows;
4042 Cost input_startup_cost = *startup_cost;
4043 Cost input_total_cost = *total_cost;
4044
4045 if (offset_est != 0)
4046 {
4047 double offset_rows;
4048
4049 if (offset_est > 0)
4050 offset_rows = (double) offset_est;
4051 else
4052 offset_rows = clamp_row_est(input_rows * 0.10);
4053 if (offset_rows > *rows)
4054 offset_rows = *rows;
4055 if (input_rows > 0)
4056 *startup_cost +=
4057 (input_total_cost - input_startup_cost)
4058 * offset_rows / input_rows;
4059 *rows -= offset_rows;
4060 if (*rows < 1)
4061 *rows = 1;
4062 }
4063
4064 if (count_est != 0)
4065 {
4066 double count_rows;
4067
4068 if (count_est > 0)
4069 count_rows = (double) count_est;
4070 else
4071 count_rows = clamp_row_est(input_rows * 0.10);
4072 if (count_rows > *rows)
4073 count_rows = *rows;
4074 if (input_rows > 0)
4075 *total_cost = *startup_cost +
4076 (input_total_cost - input_startup_cost)
4077 * count_rows / input_rows;
4078 *rows = count_rows;
4079 if (*rows < 1)
4080 *rows = 1;
4081 }
4082}
double clamp_row_est(double nrows)
Definition: costsize.c:213
double Cost
Definition: nodes.h:251

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path * apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2873 of file pathnode.c.

2877{
2878 QualCost oldcost;
2879
2880 /*
2881 * If given path can't project, we might need a Result node, so make a
2882 * separate ProjectionPath.
2883 */
2884 if (!is_projection_capable_path(path))
2885 return (Path *) create_projection_path(root, rel, path, target);
2886
2887 /*
2888 * We can just jam the desired tlist into the existing path, being sure to
2889 * update its cost estimates appropriately.
2890 */
2891 oldcost = path->pathtarget->cost;
2892 path->pathtarget = target;
2893
2894 path->startup_cost += target->cost.startup - oldcost.startup;
2895 path->total_cost += target->cost.startup - oldcost.startup +
2896 (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2897
2898 /*
2899 * If the path happens to be a Gather or GatherMerge path, we'd like to
2900 * arrange for the subpath to return the required target list so that
2901 * workers can help project. But if there is something that is not
2902 * parallel-safe in the target expressions, then we can't.
2903 */
2904 if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2905 is_parallel_safe(root, (Node *) target->exprs))
2906 {
2907 /*
2908 * We always use create_projection_path here, even if the subpath is
2909 * projection-capable, so as to avoid modifying the subpath in place.
2910 * It seems unlikely at present that there could be any other
2911 * references to the subpath, but better safe than sorry.
2912 *
2913 * Note that we don't change the parallel path's cost estimates; it
2914 * might be appropriate to do so, to reflect the fact that the bulk of
2915 * the target evaluation will happen in workers.
2916 */
2917 if (IsA(path, GatherPath))
2918 {
2919 GatherPath *gpath = (GatherPath *) path;
2920
2921 gpath->subpath = (Path *)
2923 gpath->subpath->parent,
2924 gpath->subpath,
2925 target);
2926 }
2927 else
2928 {
2929 GatherMergePath *gmpath = (GatherMergePath *) path;
2930
2931 gmpath->subpath = (Path *)
2933 gmpath->subpath->parent,
2934 gmpath->subpath,
2935 target);
2936 }
2937 }
2938 else if (path->parallel_safe &&
2939 !is_parallel_safe(root, (Node *) target->exprs))
2940 {
2941 /*
2942 * We're inserting a parallel-restricted target list into a path
2943 * currently marked parallel-safe, so we have to mark it as no longer
2944 * safe.
2945 */
2946 path->parallel_safe = false;
2947 }
2948
2949 return path;
2950}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:752
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7303
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2763
tree ctl root
Definition: radixtree.h:1857
Path * subpath
Definition: pathnodes.h:2054
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1544
QualCost cost
Definition: pathnodes.h:1550
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo * build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo,
int  nappinfos,
AppendRelInfo **  appinfos 
)

Definition at line 882 of file relnode.c.

886{
887 RelOptInfo *joinrel = makeNode(RelOptInfo);
888
889 /* Only joins between "other" relations land here. */
890 Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
891
892 /* The parent joinrel should have consider_partitionwise_join set. */
893 Assert(parent_joinrel->consider_partitionwise_join);
894
896 joinrel->relids = adjust_child_relids(parent_joinrel->relids,
897 nappinfos, appinfos);
898 joinrel->rows = 0;
899 /* cheap startup cost is interesting iff not all tuples to be retrieved */
900 joinrel->consider_startup = (root->tuple_fraction > 0);
901 joinrel->consider_param_startup = false;
902 joinrel->consider_parallel = false;
904 joinrel->pathlist = NIL;
905 joinrel->ppilist = NIL;
906 joinrel->partial_pathlist = NIL;
907 joinrel->cheapest_startup_path = NULL;
908 joinrel->cheapest_total_path = NULL;
909 joinrel->cheapest_unique_path = NULL;
911 joinrel->direct_lateral_relids = NULL;
912 joinrel->lateral_relids = NULL;
913 joinrel->relid = 0; /* indicates not a baserel */
914 joinrel->rtekind = RTE_JOIN;
915 joinrel->min_attr = 0;
916 joinrel->max_attr = 0;
917 joinrel->attr_needed = NULL;
918 joinrel->attr_widths = NULL;
919 joinrel->notnullattnums = NULL;
920 joinrel->nulling_relids = NULL;
921 joinrel->lateral_vars = NIL;
922 joinrel->lateral_referencers = NULL;
923 joinrel->indexlist = NIL;
924 joinrel->pages = 0;
925 joinrel->tuples = 0;
926 joinrel->allvisfrac = 0;
927 joinrel->eclass_indexes = NULL;
928 joinrel->subroot = NULL;
929 joinrel->subplan_params = NIL;
930 joinrel->amflags = 0;
931 joinrel->serverid = InvalidOid;
932 joinrel->userid = InvalidOid;
933 joinrel->useridiscurrent = false;
934 joinrel->fdwroutine = NULL;
935 joinrel->fdw_private = NULL;
936 joinrel->baserestrictinfo = NIL;
937 joinrel->baserestrictcost.startup = 0;
938 joinrel->baserestrictcost.per_tuple = 0;
939 joinrel->joininfo = NIL;
940 joinrel->has_eclass_joins = false;
941 joinrel->consider_partitionwise_join = false; /* might get changed later */
942 joinrel->parent = parent_joinrel;
943 joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
944 joinrel->top_parent_relids = joinrel->top_parent->relids;
945 joinrel->part_scheme = NULL;
946 joinrel->nparts = -1;
947 joinrel->boundinfo = NULL;
948 joinrel->partbounds_merged = false;
949 joinrel->partition_qual = NIL;
950 joinrel->part_rels = NULL;
951 joinrel->live_parts = NULL;
952 joinrel->all_partrels = NULL;
953 joinrel->partexprs = NULL;
954 joinrel->nullable_partexprs = NULL;
955
956 /* Compute information relevant to foreign relations. */
957 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
958
959 /* Set up reltarget struct */
960 build_child_join_reltarget(root, parent_joinrel, joinrel,
961 nappinfos, appinfos);
962
963 /* Construct joininfo list. */
965 (Node *) parent_joinrel->joininfo,
966 nappinfos,
967 appinfos);
968
969 /*
970 * Lateral relids referred in child join will be same as that referred in
971 * the parent relation.
972 */
973 joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
974 joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
975
976 /*
977 * If the parent joinrel has pending equivalence classes, so does the
978 * child.
979 */
980 joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
981
982 /* Is the join between partitions itself partitioned? */
983 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
984 restrictlist);
985
986 /* Child joinrel is parallel safe if parent is parallel safe. */
987 joinrel->consider_parallel = parent_joinrel->consider_parallel;
988
989 /* Set estimates of the child-joinrel's size. */
990 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
991 sjinfo, restrictlist);
992
993 /* We build the join only once. */
994 Assert(!find_join_rel(root, joinrel->relids));
995
996 /* Add the relation to the PlannerInfo. */
997 add_join_rel(root, joinrel);
998
999 /*
1000 * We might need EquivalenceClass members corresponding to the child join,
1001 * so that we can represent sort pathkeys for it. As with children of
1002 * baserels, we shouldn't need this unless there are relevant eclass joins
1003 * (implying that a merge join might be possible) or pathkeys to sort by.
1004 */
1005 if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1007 nappinfos, appinfos,
1008 parent_joinrel, joinrel);
1009
1010 return joinrel;
1011}
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:557
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5404
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2812
#define makeNode(_type_)
Definition: nodes.h:155
@ RTE_JOIN
Definition: parsenodes.h:1019
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2315
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:830
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:854
#define InvalidOid
Definition: postgres_ext.h:36
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:1991
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:527
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2504
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:589
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:627
List * baserestrictinfo
Definition: pathnodes.h:985
List * subplan_params
Definition: pathnodes.h:954
List * ppilist
Definition: pathnodes.h:899
bool useridiscurrent
Definition: pathnodes.h:968
uint32 amflags
Definition: pathnodes.h:958
List * joininfo
Definition: pathnodes.h:991
Bitmapset * notnullattnums
Definition: pathnodes.h:936
List * partition_qual
Definition: pathnodes.h:1027
Relids relids
Definition: pathnodes.h:871
struct PathTarget * reltarget
Definition: pathnodes.h:893
Index relid
Definition: pathnodes.h:918
List * lateral_vars
Definition: pathnodes.h:940
Cardinality tuples
Definition: pathnodes.h:949
Relids top_parent_relids
Definition: pathnodes.h:1009
bool partbounds_merged
Definition: pathnodes.h:1025
BlockNumber pages
Definition: pathnodes.h:948
Relids lateral_relids
Definition: pathnodes.h:913
List * cheapest_parameterized_paths
Definition: pathnodes.h:904
RelOptKind reloptkind
Definition: pathnodes.h:865
List * indexlist
Definition: pathnodes.h:944
struct Path * cheapest_unique_path
Definition: pathnodes.h:903
Relids lateral_referencers
Definition: pathnodes.h:942
struct Path * cheapest_startup_path
Definition: pathnodes.h:901
QualCost baserestrictcost
Definition: pathnodes.h:987
struct Path * cheapest_total_path
Definition: pathnodes.h:902
Oid userid
Definition: pathnodes.h:966
Bitmapset * eclass_indexes
Definition: pathnodes.h:952
Relids all_partrels
Definition: pathnodes.h:1041
Relids direct_lateral_relids
Definition: pathnodes.h:911
bool has_eclass_joins
Definition: pathnodes.h:993
Oid serverid
Definition: pathnodes.h:964
Bitmapset * live_parts
Definition: pathnodes.h:1039
bool consider_partitionwise_join
Definition: pathnodes.h:999
PlannerInfo * subroot
Definition: pathnodes.h:953
AttrNumber max_attr
Definition: pathnodes.h:926
Relids nulling_relids
Definition: pathnodes.h:938
double allvisfrac
Definition: pathnodes.h:950
Cardinality rows
Definition: pathnodes.h:877
AttrNumber min_attr
Definition: pathnodes.h:924
RTEKind rtekind
Definition: pathnodes.h:922
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_join_rel(), RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_join_rel()

RelOptInfo * build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 665 of file relnode.c.

672{
673 RelOptInfo *joinrel;
674 List *restrictlist;
675
676 /* This function should be used only for join between parents. */
677 Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
678
679 /*
680 * See if we already have a joinrel for this set of base rels.
681 */
682 joinrel = find_join_rel(root, joinrelids);
683
684 if (joinrel)
685 {
686 /*
687 * Yes, so we only need to figure the restrictlist for this particular
688 * pair of component relations.
689 */
690 if (restrictlist_ptr)
691 *restrictlist_ptr = build_joinrel_restrictlist(root,
692 joinrel,
693 outer_rel,
694 inner_rel,
695 sjinfo);
696 return joinrel;
697 }
698
699 /*
700 * Nope, so make one.
701 */
702 joinrel = makeNode(RelOptInfo);
703 joinrel->reloptkind = RELOPT_JOINREL;
704 joinrel->relids = bms_copy(joinrelids);
705 joinrel->rows = 0;
706 /* cheap startup cost is interesting iff not all tuples to be retrieved */
707 joinrel->consider_startup = (root->tuple_fraction > 0);
708 joinrel->consider_param_startup = false;
709 joinrel->consider_parallel = false;
711 joinrel->pathlist = NIL;
712 joinrel->ppilist = NIL;
713 joinrel->partial_pathlist = NIL;
714 joinrel->cheapest_startup_path = NULL;
715 joinrel->cheapest_total_path = NULL;
716 joinrel->cheapest_unique_path = NULL;
718 /* init direct_lateral_relids from children; we'll finish it up below */
719 joinrel->direct_lateral_relids =
721 inner_rel->direct_lateral_relids);
723 outer_rel, inner_rel);
724 joinrel->relid = 0; /* indicates not a baserel */
725 joinrel->rtekind = RTE_JOIN;
726 joinrel->min_attr = 0;
727 joinrel->max_attr = 0;
728 joinrel->attr_needed = NULL;
729 joinrel->attr_widths = NULL;
730 joinrel->notnullattnums = NULL;
731 joinrel->nulling_relids = NULL;
732 joinrel->lateral_vars = NIL;
733 joinrel->lateral_referencers = NULL;
734 joinrel->indexlist = NIL;
735 joinrel->statlist = NIL;
736 joinrel->pages = 0;
737 joinrel->tuples = 0;
738 joinrel->allvisfrac = 0;
739 joinrel->eclass_indexes = NULL;
740 joinrel->subroot = NULL;
741 joinrel->subplan_params = NIL;
742 joinrel->rel_parallel_workers = -1;
743 joinrel->amflags = 0;
744 joinrel->serverid = InvalidOid;
745 joinrel->userid = InvalidOid;
746 joinrel->useridiscurrent = false;
747 joinrel->fdwroutine = NULL;
748 joinrel->fdw_private = NULL;
749 joinrel->unique_for_rels = NIL;
750 joinrel->non_unique_for_rels = NIL;
751 joinrel->baserestrictinfo = NIL;
752 joinrel->baserestrictcost.startup = 0;
753 joinrel->baserestrictcost.per_tuple = 0;
754 joinrel->baserestrict_min_security = UINT_MAX;
755 joinrel->joininfo = NIL;
756 joinrel->has_eclass_joins = false;
757 joinrel->consider_partitionwise_join = false; /* might get changed later */
758 joinrel->parent = NULL;
759 joinrel->top_parent = NULL;
760 joinrel->top_parent_relids = NULL;
761 joinrel->part_scheme = NULL;
762 joinrel->nparts = -1;
763 joinrel->boundinfo = NULL;
764 joinrel->partbounds_merged = false;
765 joinrel->partition_qual = NIL;
766 joinrel->part_rels = NULL;
767 joinrel->live_parts = NULL;
768 joinrel->all_partrels = NULL;
769 joinrel->partexprs = NULL;
770 joinrel->nullable_partexprs = NULL;
771
772 /* Compute information relevant to the foreign relations. */
773 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
774
775 /*
776 * Fill the joinrel's tlist with just the Vars and PHVs that need to be
777 * output from this join (ie, are needed for higher joinclauses or final
778 * output).
779 *
780 * NOTE: the tlist order for a join rel will depend on which pair of outer
781 * and inner rels we first try to build it from. But the contents should
782 * be the same regardless.
783 */
784 build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
785 (sjinfo->jointype == JOIN_FULL));
786 build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
787 (sjinfo->jointype != JOIN_INNER));
788 add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
789
790 /*
791 * add_placeholders_to_joinrel also took care of adding the ph_lateral
792 * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
793 * now we can finish computing that. This is much like the computation of
794 * the transitively-closed lateral_relids in min_join_parameterization,
795 * except that here we *do* have to consider the added PHVs.
796 */
797 joinrel->direct_lateral_relids =
798 bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
799
800 /*
801 * Construct restrict and join clause lists for the new joinrel. (The
802 * caller might or might not need the restrictlist, but I need it anyway
803 * for set_joinrel_size_estimates().)
804 */
805 restrictlist = build_joinrel_restrictlist(root, joinrel,
806 outer_rel, inner_rel,
807 sjinfo);
808 if (restrictlist_ptr)
809 *restrictlist_ptr = restrictlist;
810 build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
811
812 /*
813 * This is also the right place to check whether the joinrel has any
814 * pending EquivalenceClass joins.
815 */
817
818 /* Store the partition information. */
819 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
820 restrictlist);
821
822 /*
823 * Set estimates of the joinrel's size.
824 */
825 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
826 sjinfo, restrictlist);
827
828 /*
829 * Set the consider_parallel flag if this joinrel could potentially be
830 * scanned within a parallel worker. If this flag is false for either
831 * inner_rel or outer_rel, then it must be false for the joinrel also.
832 * Even if both are true, there might be parallel-restricted expressions
833 * in the targetlist or quals.
834 *
835 * Note that if there are more than two rels in this relation, they could
836 * be divided between inner_rel and outer_rel in any arbitrary way. We
837 * assume this doesn't matter, because we should hit all the same baserels
838 * and joinclauses while building up to this joinrel no matter which we
839 * take; therefore, we should make the same decision here however we get
840 * here.
841 */
842 if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
843 is_parallel_safe(root, (Node *) restrictlist) &&
844 is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
845 joinrel->consider_parallel = true;
846
847 /* Add the joinrel to the PlannerInfo. */
848 add_join_rel(root, joinrel);
849
850 /*
851 * Also, if dynamic-programming join search is active, add the new joinrel
852 * to the appropriate sublist. Note: you might think the Assert on number
853 * of members should be for equality, but some of the level 1 rels might
854 * have been joinrels already, so we can only assert <=.
855 */
856 if (root->join_rel_level)
857 {
858 Assert(root->join_cur_level > 0);
859 Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
860 root->join_rel_level[root->join_cur_level] =
861 lappend(root->join_rel_level[root->join_cur_level], joinrel);
862 }
863
864 return joinrel;
865}
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3222
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:295
@ JOIN_INNER
Definition: nodes.h:293
@ RELOPT_JOINREL
Definition: pathnodes.h:828
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:400
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1100
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1022
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1285
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1322
List * statlist
Definition: pathnodes.h:946
List * unique_for_rels
Definition: pathnodes.h:977
List * non_unique_for_rels
Definition: pathnodes.h:979
int rel_parallel_workers
Definition: pathnodes.h:956
Index baserestrict_min_security
Definition: pathnodes.h:989
JoinType jointype
Definition: pathnodes.h:2909

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_rel()

RelOptInfo * build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 192 of file relnode.c.

193{
194 RelOptInfo *rel;
195 RangeTblEntry *rte;
196
197 /* Rel should not exist already */
198 Assert(relid > 0 && relid < root->simple_rel_array_size);
199 if (root->simple_rel_array[relid] != NULL)
200 elog(ERROR, "rel %d already exists", relid);
201
202 /* Fetch RTE for relation */
203 rte = root->simple_rte_array[relid];
204 Assert(rte != NULL);
205
206 rel = makeNode(RelOptInfo);
208 rel->relids = bms_make_singleton(relid);
209 rel->rows = 0;
210 /* cheap startup cost is interesting iff not all tuples to be retrieved */
211 rel->consider_startup = (root->tuple_fraction > 0);
212 rel->consider_param_startup = false; /* might get changed later */
213 rel->consider_parallel = false; /* might get changed later */
215 rel->pathlist = NIL;
216 rel->ppilist = NIL;
217 rel->partial_pathlist = NIL;
218 rel->cheapest_startup_path = NULL;
219 rel->cheapest_total_path = NULL;
220 rel->cheapest_unique_path = NULL;
222 rel->relid = relid;
223 rel->rtekind = rte->rtekind;
224 /* min_attr, max_attr, attr_needed, attr_widths are set below */
225 rel->notnullattnums = NULL;
226 rel->lateral_vars = NIL;
227 rel->indexlist = NIL;
228 rel->statlist = NIL;
229 rel->pages = 0;
230 rel->tuples = 0;
231 rel->allvisfrac = 0;
232 rel->eclass_indexes = NULL;
233 rel->subroot = NULL;
234 rel->subplan_params = NIL;
235 rel->rel_parallel_workers = -1; /* set up in get_relation_info */
236 rel->amflags = 0;
237 rel->serverid = InvalidOid;
238 if (rte->rtekind == RTE_RELATION)
239 {
240 Assert(parent == NULL ||
241 parent->rtekind == RTE_RELATION ||
242 parent->rtekind == RTE_SUBQUERY);
243
244 /*
245 * For any RELATION rte, we need a userid with which to check
246 * permission access. Baserels simply use their own
247 * RTEPermissionInfo's checkAsUser.
248 *
249 * For otherrels normally there's no RTEPermissionInfo, so we use the
250 * parent's, which normally has one. The exceptional case is that the
251 * parent is a subquery, in which case the otherrel will have its own.
252 */
253 if (rel->reloptkind == RELOPT_BASEREL ||
255 parent->rtekind == RTE_SUBQUERY))
256 {
257 RTEPermissionInfo *perminfo;
258
259 perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
260 rel->userid = perminfo->checkAsUser;
261 }
262 else
263 rel->userid = parent->userid;
264 }
265 else
266 rel->userid = InvalidOid;
267 rel->useridiscurrent = false;
268 rel->fdwroutine = NULL;
269 rel->fdw_private = NULL;
270 rel->unique_for_rels = NIL;
272 rel->baserestrictinfo = NIL;
273 rel->baserestrictcost.startup = 0;
275 rel->baserestrict_min_security = UINT_MAX;
276 rel->joininfo = NIL;
277 rel->has_eclass_joins = false;
278 rel->consider_partitionwise_join = false; /* might get changed later */
279 rel->part_scheme = NULL;
280 rel->nparts = -1;
281 rel->boundinfo = NULL;
282 rel->partbounds_merged = false;
283 rel->partition_qual = NIL;
284 rel->part_rels = NULL;
285 rel->live_parts = NULL;
286 rel->all_partrels = NULL;
287 rel->partexprs = NULL;
288 rel->nullable_partexprs = NULL;
289
290 /*
291 * Pass assorted information down the inheritance hierarchy.
292 */
293 if (parent)
294 {
295 /* We keep back-links to immediate parent and topmost parent. */
296 rel->parent = parent;
297 rel->top_parent = parent->top_parent ? parent->top_parent : parent;
298 rel->top_parent_relids = rel->top_parent->relids;
299
300 /*
301 * A child rel is below the same outer joins as its parent. (We
302 * presume this info was already calculated for the parent.)
303 */
304 rel->nulling_relids = parent->nulling_relids;
305
306 /*
307 * Also propagate lateral-reference information from appendrel parent
308 * rels to their child rels. We intentionally give each child rel the
309 * same minimum parameterization, even though it's quite possible that
310 * some don't reference all the lateral rels. This is because any
311 * append path for the parent will have to have the same
312 * parameterization for every child anyway, and there's no value in
313 * forcing extra reparameterize_path() calls. Similarly, a lateral
314 * reference to the parent prevents use of otherwise-movable join rels
315 * for each child.
316 *
317 * It's possible for child rels to have their own children, in which
318 * case the topmost parent's lateral info propagates all the way down.
319 */
321 rel->lateral_relids = parent->lateral_relids;
323 }
324 else
325 {
326 rel->parent = NULL;
327 rel->top_parent = NULL;
328 rel->top_parent_relids = NULL;
329 rel->nulling_relids = NULL;
330 rel->direct_lateral_relids = NULL;
331 rel->lateral_relids = NULL;
332 rel->lateral_referencers = NULL;
333 }
334
335 /* Check type of rtable entry */
336 switch (rte->rtekind)
337 {
338 case RTE_RELATION:
339 /* Table --- retrieve statistics from the system catalogs */
340 get_relation_info(root, rte->relid, rte->inh, rel);
341 break;
342 case RTE_SUBQUERY:
343 case RTE_FUNCTION:
344 case RTE_TABLEFUNC:
345 case RTE_VALUES:
346 case RTE_CTE:
348
349 /*
350 * Subquery, function, tablefunc, values list, CTE, or ENR --- set
351 * up attr range and arrays
352 *
353 * Note: 0 is included in range to support whole-row Vars
354 */
355 rel->min_attr = 0;
356 rel->max_attr = list_length(rte->eref->colnames);
357 rel->attr_needed = (Relids *)
358 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
359 rel->attr_widths = (int32 *)
360 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
361 break;
362 case RTE_RESULT:
363 /* RTE_RESULT has no columns, nor could it have whole-row Var */
364 rel->min_attr = 0;
365 rel->max_attr = -1;
366 rel->attr_needed = NULL;
367 rel->attr_widths = NULL;
368 break;
369 default:
370 elog(ERROR, "unrecognized RTE kind: %d",
371 (int) rte->rtekind);
372 break;
373 }
374
375 /*
376 * We must apply the partially filled in RelOptInfo before calling
377 * apply_child_basequals due to some transformations within that function
378 * which require the RelOptInfo to be available in the simple_rel_array.
379 */
380 root->simple_rel_array[relid] = rel;
381
382 /*
383 * Apply the parent's quals to the child, with appropriate substitution of
384 * variables. If the resulting clause is constant-FALSE or NULL after
385 * applying transformations, apply_child_basequals returns false to
386 * indicate that scanning this relation won't yield any rows. In this
387 * case, we mark the child as dummy right away. (We must do this
388 * immediately so that pruning works correctly when recursing in
389 * expand_partitioned_rtentry.)
390 */
391 if (parent)
392 {
393 AppendRelInfo *appinfo = root->append_rel_array[relid];
394
395 Assert(appinfo != NULL);
396 if (!apply_child_basequals(root, parent, rel, rte, appinfo))
397 {
398 /*
399 * Restriction clause reduced to constant FALSE or NULL. Mark as
400 * dummy so we won't scan this relation.
401 */
402 mark_dummy_rel(rel);
403 }
404 }
405
406 return rel;
407}
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
int32_t int32
Definition: c.h:481
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:843
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1384
void * palloc0(Size size)
Definition: mcxt.c:1347
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1023
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1024
@ RTE_VALUES
Definition: parsenodes.h:1022
@ RTE_SUBQUERY
Definition: parsenodes.h:1018
@ RTE_RESULT
Definition: parsenodes.h:1025
@ RTE_FUNCTION
Definition: parsenodes.h:1020
@ RTE_TABLEFUNC
Definition: parsenodes.h:1021
@ RTE_RELATION
Definition: parsenodes.h:1017
@ RELOPT_BASEREL
Definition: pathnodes.h:827
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:829
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:116
RTEKind rtekind
Definition: parsenodes.h:1047

References RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert, RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RangeTblEntry::relid, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2456 of file pathnode.c.

2460{
2461 Relids required_outer;
2462
2463 /* inner_path can require rels from outer path, but not vice versa */
2464 Assert(!bms_overlap(outer_paramrels, innerrelids));
2465 /* easy case if inner path is not parameterized */
2466 if (!inner_paramrels)
2467 return bms_copy(outer_paramrels);
2468 /* else, form the union ... */
2469 required_outer = bms_union(outer_paramrels, inner_paramrels);
2470 /* ... and remove any mention of now-satisfied outer rels */
2471 required_outer = bms_del_members(required_outer,
2472 outerrelids);
2473 return required_outer;
2474}
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582

References Assert, bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2483 of file pathnode.c.

2484{
2485 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2486 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2487 Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2488 Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2489 Relids required_outer;
2490
2491 /*
2492 * Any parameterization of the input paths refers to topmost parents of
2493 * the relevant relations, because reparameterize_path_by_child() hasn't
2494 * been called yet. So we must consider topmost parents of the relations
2495 * being joined, too, while checking for disallowed parameterization
2496 * cases.
2497 */
2498 if (inner_path->parent->top_parent_relids)
2499 innerrelids = inner_path->parent->top_parent_relids;
2500 else
2501 innerrelids = inner_path->parent->relids;
2502
2503 if (outer_path->parent->top_parent_relids)
2504 outerrelids = outer_path->parent->top_parent_relids;
2505 else
2506 outerrelids = outer_path->parent->relids;
2507
2508 /* neither path can require rels from the other */
2509 Assert(!bms_overlap(outer_paramrels, innerrelids));
2510 Assert(!bms_overlap(inner_paramrels, outerrelids));
2511 /* form the union ... */
2512 required_outer = bms_union(outer_paramrels, inner_paramrels);
2513 /* we do not need an explicit test for empty; bms_union gets it right */
2514 return required_outer;
2515}
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:201

References Assert, bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 124 of file pathnode.c.

126{
127 Cost cost1,
128 cost2;
129
130 /* Number of disabled nodes, if different, trumps all else. */
131 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
132 {
133 if (path1->disabled_nodes < path2->disabled_nodes)
134 return -1;
135 else
136 return +1;
137 }
138
139 if (fraction <= 0.0 || fraction >= 1.0)
140 return compare_path_costs(path1, path2, TOTAL_COST);
141 cost1 = path1->startup_cost +
142 fraction * (path1->total_cost - path1->startup_cost);
143 cost2 = path2->startup_cost +
144 fraction * (path2->total_cost - path2->startup_cost);
145 if (cost1 < cost2)
146 return -1;
147 if (cost1 > cost2)
148 return +1;
149 return 0;
150}
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:69
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::disabled_nodes, Path::startup_cost, TOTAL_COST, Path::total_cost, and unlikely.

Referenced by get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 69 of file pathnode.c.

70{
71 /* Number of disabled nodes, if different, trumps all else. */
72 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
73 {
74 if (path1->disabled_nodes < path2->disabled_nodes)
75 return -1;
76 else
77 return +1;
78 }
79
80 if (criterion == STARTUP_COST)
81 {
82 if (path1->startup_cost < path2->startup_cost)
83 return -1;
84 if (path1->startup_cost > path2->startup_cost)
85 return +1;
86
87 /*
88 * If paths have the same startup cost (not at all unlikely), order
89 * them by total cost.
90 */
91 if (path1->total_cost < path2->total_cost)
92 return -1;
93 if (path1->total_cost > path2->total_cost)
94 return +1;
95 }
96 else
97 {
98 if (path1->total_cost < path2->total_cost)
99 return -1;
100 if (path1->total_cost > path2->total_cost)
101 return +1;
102
103 /*
104 * If paths have the same total cost, order them by startup cost.
105 */
106 if (path1->startup_cost < path2->startup_cost)
107 return -1;
108 if (path1->startup_cost > path2->startup_cost)
109 return +1;
110 }
111 return 0;
112}
@ STARTUP_COST
Definition: pathnodes.h:38

References Path::disabled_nodes, STARTUP_COST, Path::startup_cost, Path::total_cost, and unlikely.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath * create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 3240 of file pathnode.c.

3250{
3251 AggPath *pathnode = makeNode(AggPath);
3252
3253 pathnode->path.pathtype = T_Agg;
3254 pathnode->path.parent = rel;
3255 pathnode->path.pathtarget = target;
3256 /* For now, assume we are above any joins, so no parameterization */
3257 pathnode->path.param_info = NULL;
3258 pathnode->path.parallel_aware = false;
3259 pathnode->path.parallel_safe = rel->consider_parallel &&
3260 subpath->parallel_safe;
3261 pathnode->path.parallel_workers = subpath->parallel_workers;
3262
3263 if (aggstrategy == AGG_SORTED)
3264 {
3265 /*
3266 * Attempt to preserve the order of the subpath. Additional pathkeys
3267 * may have been added in adjust_group_pathkeys_for_groupagg() to
3268 * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3269 * belong to columns within the aggregate function, so we must strip
3270 * these additional pathkeys off as those columns are unavailable
3271 * above the aggregate node.
3272 */
3273 if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3274 pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3275 root->num_groupby_pathkeys);
3276 else
3277 pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3278 }
3279 else
3280 pathnode->path.pathkeys = NIL; /* output is unordered */
3281
3282 pathnode->subpath = subpath;
3283
3284 pathnode->aggstrategy = aggstrategy;
3285 pathnode->aggsplit = aggsplit;
3286 pathnode->numGroups = numGroups;
3287 pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3288 pathnode->groupClause = groupClause;
3289 pathnode->qual = qual;
3290
3291 cost_agg(&pathnode->path, root,
3292 aggstrategy, aggcosts,
3293 list_length(groupClause), numGroups,
3294 qual,
3295 subpath->disabled_nodes,
3296 subpath->startup_cost, subpath->total_cost,
3297 subpath->rows, subpath->pathtarget->width);
3298
3299 /* add tlist eval cost for each output row */
3300 pathnode->path.startup_cost += target->cost.startup;
3301 pathnode->path.total_cost += target->cost.startup +
3302 target->cost.per_tuple * pathnode->path.rows;
3303
3304 return pathnode;
3305}
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2682
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:308
@ AGG_SORTED
Definition: nodes.h:355
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2266
Cardinality numGroups
Definition: pathnodes.h:2269
AggSplit aggsplit
Definition: pathnodes.h:2268
List * groupClause
Definition: pathnodes.h:2271
uint64 transitionSpace
Definition: pathnodes.h:2270
AggStrategy aggstrategy
Definition: pathnodes.h:2267
Path path
Definition: pathnodes.h:2265
List * qual
Definition: pathnodes.h:2272
NodeTag pathtype
Definition: pathnodes.h:1637
int parallel_workers
Definition: pathnodes.h:1668
bool parallel_aware
Definition: pathnodes.h:1664

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath * create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1300 of file pathnode.c.

1306{
1307 AppendPath *pathnode = makeNode(AppendPath);
1308 ListCell *l;
1309
1310 Assert(!parallel_aware || parallel_workers > 0);
1311
1312 pathnode->path.pathtype = T_Append;
1313 pathnode->path.parent = rel;
1314 pathnode->path.pathtarget = rel->reltarget;
1315
1316 /*
1317 * If this is for a baserel (not a join or non-leaf partition), we prefer
1318 * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1319 * for the path. This supports building a Memoize path atop this path,
1320 * and if this is a partitioned table the info may be useful for run-time
1321 * pruning (cf make_partition_pruneinfo()).
1322 *
1323 * However, if we don't have "root" then that won't work and we fall back
1324 * on the simpler get_appendrel_parampathinfo. There's no point in doing
1325 * the more expensive thing for a dummy path, either.
1326 */
1327 if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1328 pathnode->path.param_info = get_baserel_parampathinfo(root,
1329 rel,
1330 required_outer);
1331 else
1332 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1333 required_outer);
1334
1335 pathnode->path.parallel_aware = parallel_aware;
1336 pathnode->path.parallel_safe = rel->consider_parallel;
1337 pathnode->path.parallel_workers = parallel_workers;
1338 pathnode->path.pathkeys = pathkeys;
1339
1340 /*
1341 * For parallel append, non-partial paths are sorted by descending total
1342 * costs. That way, the total time to finish all non-partial paths is
1343 * minimized. Also, the partial paths are sorted by descending startup
1344 * costs. There may be some paths that require to do startup work by a
1345 * single worker. In such case, it's better for workers to choose the
1346 * expensive ones first, whereas the leader should choose the cheapest
1347 * startup plan.
1348 */
1349 if (pathnode->path.parallel_aware)
1350 {
1351 /*
1352 * We mustn't fiddle with the order of subpaths when the Append has
1353 * pathkeys. The order they're listed in is critical to keeping the
1354 * pathkeys valid.
1355 */
1356 Assert(pathkeys == NIL);
1357
1359 list_sort(partial_subpaths, append_startup_cost_compare);
1360 }
1361 pathnode->first_partial_path = list_length(subpaths);
1362 pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1363
1364 /*
1365 * Apply query-wide LIMIT if known and path is for sole base relation.
1366 * (Handling this at this low level is a bit klugy.)
1367 */
1368 if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1369 pathnode->limit_tuples = root->limit_tuples;
1370 else
1371 pathnode->limit_tuples = -1.0;
1372
1373 foreach(l, pathnode->subpaths)
1374 {
1375 Path *subpath = (Path *) lfirst(l);
1376
1377 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1378 subpath->parallel_safe;
1379
1380 /* All child paths must have same parameterization */
1381 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1382 }
1383
1384 Assert(!parallel_aware || pathnode->path.parallel_safe);
1385
1386 /*
1387 * If there's exactly one child path then the output of the Append is
1388 * necessarily ordered the same as the child's, so we can inherit the
1389 * child's pathkeys if any, overriding whatever the caller might've said.
1390 * Furthermore, if the child's parallel awareness matches the Append's,
1391 * then the Append is a no-op and will be discarded later (in setrefs.c).
1392 * Then we can inherit the child's size and cost too, effectively charging
1393 * zero for the Append. Otherwise, we must do the normal costsize
1394 * calculation.
1395 */
1396 if (list_length(pathnode->subpaths) == 1)
1397 {
1398 Path *child = (Path *) linitial(pathnode->subpaths);
1399
1400 if (child->parallel_aware == parallel_aware)
1401 {
1402 pathnode->path.rows = child->rows;
1403 pathnode->path.startup_cost = child->startup_cost;
1404 pathnode->path.total_cost = child->total_cost;
1405 }
1406 else
1407 cost_append(pathnode);
1408 /* Must do this last, else cost_append complains */
1409 pathnode->path.pathkeys = child->pathkeys;
1410 }
1411 else
1412 cost_append(pathnode);
1413
1414 /* If the caller provided a row estimate, override the computed value. */
1415 if (rows >= 0)
1416 pathnode->path.rows = rows;
1417
1418 return pathnode;
1419}
void cost_append(AppendPath *apath)
Definition: costsize.c:2250
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1453
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1431
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1856
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1545
int first_partial_path
Definition: pathnodes.h:1946
Cardinality limit_tuples
Definition: pathnodes.h:1947
List * subpaths
Definition: pathnodes.h:1944

References append_startup_cost_compare(), append_total_cost_compare(), Assert, bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath * create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1131 of file pathnode.c.

1134{
1136 Relids required_outer = NULL;
1137 ListCell *lc;
1138
1139 pathnode->path.pathtype = T_BitmapAnd;
1140 pathnode->path.parent = rel;
1141 pathnode->path.pathtarget = rel->reltarget;
1142
1143 /*
1144 * Identify the required outer rels as the union of what the child paths
1145 * depend on. (Alternatively, we could insist that the caller pass this
1146 * in, but it's more convenient and reliable to compute it here.)
1147 */
1148 foreach(lc, bitmapquals)
1149 {
1150 Path *bitmapqual = (Path *) lfirst(lc);
1151
1152 required_outer = bms_add_members(required_outer,
1153 PATH_REQ_OUTER(bitmapqual));
1154 }
1155 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1156 required_outer);
1157
1158 /*
1159 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1160 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1161 * set the flag for this path based only on the relation-level flag,
1162 * without actually iterating over the list of children.
1163 */
1164 pathnode->path.parallel_aware = false;
1165 pathnode->path.parallel_safe = rel->consider_parallel;
1166 pathnode->path.parallel_workers = 0;
1167
1168 pathnode->path.pathkeys = NIL; /* always unordered */
1169
1170 pathnode->bitmapquals = bitmapquals;
1171
1172 /* this sets bitmapselectivity as well as the regular cost fields: */
1173 cost_bitmap_and_node(pathnode, root);
1174
1175 return pathnode;
1176}
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1165
List * bitmapquals
Definition: pathnodes.h:1809

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath * create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1098 of file pathnode.c.

1104{
1106
1107 pathnode->path.pathtype = T_BitmapHeapScan;
1108 pathnode->path.parent = rel;
1109 pathnode->path.pathtarget = rel->reltarget;
1110 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1111 required_outer);
1112 pathnode->path.parallel_aware = (parallel_degree > 0);
1113 pathnode->path.parallel_safe = rel->consider_parallel;
1114 pathnode->path.parallel_workers = parallel_degree;
1115 pathnode->path.pathkeys = NIL; /* always unordered */
1116
1117 pathnode->bitmapqual = bitmapqual;
1118
1119 cost_bitmap_heap_scan(&pathnode->path, root, rel,
1120 pathnode->path.param_info,
1121 bitmapqual, loop_count);
1122
1123 return pathnode;
1124}
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:1023
Path * bitmapqual
Definition: pathnodes.h:1797

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath * create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1183 of file pathnode.c.

1186{
1187 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1188 Relids required_outer = NULL;
1189 ListCell *lc;
1190
1191 pathnode->path.pathtype = T_BitmapOr;
1192 pathnode->path.parent = rel;
1193 pathnode->path.pathtarget = rel->reltarget;
1194
1195 /*
1196 * Identify the required outer rels as the union of what the child paths
1197 * depend on. (Alternatively, we could insist that the caller pass this
1198 * in, but it's more convenient and reliable to compute it here.)
1199 */
1200 foreach(lc, bitmapquals)
1201 {
1202 Path *bitmapqual = (Path *) lfirst(lc);
1203
1204 required_outer = bms_add_members(required_outer,
1205 PATH_REQ_OUTER(bitmapqual));
1206 }
1207 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1208 required_outer);
1209
1210 /*
1211 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1212 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1213 * set the flag for this path based only on the relation-level flag,
1214 * without actually iterating over the list of children.
1215 */
1216 pathnode->path.parallel_aware = false;
1217 pathnode->path.parallel_safe = rel->consider_parallel;
1218 pathnode->path.parallel_workers = 0;
1219
1220 pathnode->path.pathkeys = NIL; /* always unordered */
1221
1222 pathnode->bitmapquals = bitmapquals;
1223
1224 /* this sets bitmapselectivity as well as the regular cost fields: */
1225 cost_bitmap_or_node(pathnode, root);
1226
1227 return pathnode;
1228}
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1210
List * bitmapquals
Definition: pathnodes.h:1822

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path * create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2196 of file pathnode.c.

2198{
2199 Path *pathnode = makeNode(Path);
2200
2201 pathnode->pathtype = T_CteScan;
2202 pathnode->parent = rel;
2203 pathnode->pathtarget = rel->reltarget;
2204 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2205 required_outer);
2206 pathnode->parallel_aware = false;
2207 pathnode->parallel_safe = rel->consider_parallel;
2208 pathnode->parallel_workers = 0;
2209 pathnode->pathkeys = pathkeys;
2210
2211 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2212
2213 return pathnode;
2214}
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1708

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath * create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2355 of file pathnode.c.

2364{
2365 ForeignPath *pathnode = makeNode(ForeignPath);
2366
2367 /*
2368 * We should use get_joinrel_parampathinfo to handle parameterized paths,
2369 * but the API of this function doesn't support it, and existing
2370 * extensions aren't yet trying to build such paths anyway. For the
2371 * moment just throw an error if someone tries it; eventually we should
2372 * revisit this.
2373 */
2374 if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2375 elog(ERROR, "parameterized foreign joins are not supported yet");
2376
2377 pathnode->path.pathtype = T_ForeignScan;
2378 pathnode->path.parent = rel;
2379 pathnode->path.pathtarget = target ? target : rel->reltarget;
2380 pathnode->path.param_info = NULL; /* XXX see above */
2381 pathnode->path.parallel_aware = false;
2382 pathnode->path.parallel_safe = rel->consider_parallel;
2383 pathnode->path.parallel_workers = 0;
2384 pathnode->path.rows = rows;
2385 pathnode->path.disabled_nodes = disabled_nodes;
2386 pathnode->path.startup_cost = startup_cost;
2387 pathnode->path.total_cost = total_cost;
2388 pathnode->path.pathkeys = pathkeys;
2389
2390 pathnode->fdw_outerpath = fdw_outerpath;
2391 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2392 pathnode->fdw_private = fdw_private;
2393
2394 return pathnode;
2395}
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:1882
List * fdw_restrictinfo
Definition: pathnodes.h:1883
List * fdw_private
Definition: pathnodes.h:1884

References bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath * create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2409 of file pathnode.c.

2417{
2418 ForeignPath *pathnode = makeNode(ForeignPath);
2419
2420 /*
2421 * Upper relations should never have any lateral references, since joining
2422 * is complete.
2423 */
2425
2426 pathnode->path.pathtype = T_ForeignScan;
2427 pathnode->path.parent = rel;
2428 pathnode->path.pathtarget = target ? target : rel->reltarget;
2429 pathnode->path.param_info = NULL;
2430 pathnode->path.parallel_aware = false;
2431 pathnode->path.parallel_safe = rel->consider_parallel;
2432 pathnode->path.parallel_workers = 0;
2433 pathnode->path.rows = rows;
2434 pathnode->path.disabled_nodes = disabled_nodes;
2435 pathnode->path.startup_cost = startup_cost;
2436 pathnode->path.total_cost = total_cost;
2437 pathnode->path.pathkeys = pathkeys;
2438
2439 pathnode->fdw_outerpath = fdw_outerpath;
2440 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2441 pathnode->fdw_private = fdw_private;
2442
2443 return pathnode;
2444}

References Assert, bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath * create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2307 of file pathnode.c.

2316{
2317 ForeignPath *pathnode = makeNode(ForeignPath);
2318
2319 /* Historically some FDWs were confused about when to use this */
2320 Assert(IS_SIMPLE_REL(rel));
2321
2322 pathnode->path.pathtype = T_ForeignScan;
2323 pathnode->path.parent = rel;
2324 pathnode->path.pathtarget = target ? target : rel->reltarget;
2325 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2326 required_outer);
2327 pathnode->path.parallel_aware = false;
2328 pathnode->path.parallel_safe = rel->consider_parallel;
2329 pathnode->path.parallel_workers = 0;
2330 pathnode->path.rows = rows;
2331 pathnode->path.disabled_nodes = disabled_nodes;
2332 pathnode->path.startup_cost = startup_cost;
2333 pathnode->path.total_cost = total_cost;
2334 pathnode->path.pathkeys = pathkeys;
2335
2336 pathnode->fdw_outerpath = fdw_outerpath;
2337 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2338 pathnode->fdw_private = fdw_private;
2339
2340 return pathnode;
2341}
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:839

References Assert, RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path * create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 2118 of file pathnode.c.

2120{
2121 Path *pathnode = makeNode(Path);
2122
2123 pathnode->pathtype = T_FunctionScan;
2124 pathnode->parent = rel;
2125 pathnode->pathtarget = rel->reltarget;
2126 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2127 required_outer);
2128 pathnode->parallel_aware = false;
2129 pathnode->parallel_safe = rel->consider_parallel;
2130 pathnode->parallel_workers = 0;
2131 pathnode->pathkeys = pathkeys;
2132
2133 cost_functionscan(pathnode, root, rel, pathnode->param_info);
2134
2135 return pathnode;
2136}
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1538

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath * create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1962 of file pathnode.c.

1965{
1967 int input_disabled_nodes = 0;
1968 Cost input_startup_cost = 0;
1969 Cost input_total_cost = 0;
1970
1971 Assert(subpath->parallel_safe);
1972 Assert(pathkeys);
1973
1974 /*
1975 * The subpath should guarantee that it is adequately ordered either by
1976 * adding an explicit sort node or by using presorted input. We cannot
1977 * add an explicit Sort node for the subpath in createplan.c on additional
1978 * pathkeys, because we can't guarantee the sort would be safe. For
1979 * example, expressions may be volatile or otherwise parallel unsafe.
1980 */
1981 if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1982 elog(ERROR, "gather merge input not sufficiently sorted");
1983
1984 pathnode->path.pathtype = T_GatherMerge;
1985 pathnode->path.parent = rel;
1986 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1987 required_outer);
1988 pathnode->path.parallel_aware = false;
1989
1990 pathnode->subpath = subpath;
1991 pathnode->num_workers = subpath->parallel_workers;
1992 pathnode->path.pathkeys = pathkeys;
1993 pathnode->path.pathtarget = target ? target : rel->reltarget;
1994
1995 input_disabled_nodes += subpath->disabled_nodes;
1996 input_startup_cost += subpath->startup_cost;
1997 input_total_cost += subpath->total_cost;
1998
1999 cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
2000 input_disabled_nodes, input_startup_cost,
2001 input_total_cost, rows);
2002
2003 return pathnode;
2004}
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:485
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343

References Assert, cost_gather_merge(), elog, ERROR, get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, subpath(), and GatherMergePath::subpath.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath * create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 2044 of file pathnode.c.

2046{
2047 GatherPath *pathnode = makeNode(GatherPath);
2048
2049 Assert(subpath->parallel_safe);
2050
2051 pathnode->path.pathtype = T_Gather;
2052 pathnode->path.parent = rel;
2053 pathnode->path.pathtarget = target;
2054 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2055 required_outer);
2056 pathnode->path.parallel_aware = false;
2057 pathnode->path.parallel_safe = false;
2058 pathnode->path.parallel_workers = 0;
2059 pathnode->path.pathkeys = NIL; /* Gather has unordered result */
2060
2061 pathnode->subpath = subpath;
2062 pathnode->num_workers = subpath->parallel_workers;
2063 pathnode->single_copy = false;
2064
2065 if (pathnode->num_workers == 0)
2066 {
2067 pathnode->path.pathkeys = subpath->pathkeys;
2068 pathnode->num_workers = 1;
2069 pathnode->single_copy = true;
2070 }
2071
2072 cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
2073
2074 return pathnode;
2075}
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:446
bool single_copy
Definition: pathnodes.h:2055
int num_workers
Definition: pathnodes.h:2056

References Assert, cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath * create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 3127 of file pathnode.c.

3133{
3134 GroupPath *pathnode = makeNode(GroupPath);
3135 PathTarget *target = rel->reltarget;
3136
3137 pathnode->path.pathtype = T_Group;
3138 pathnode->path.parent = rel;
3139 pathnode->path.pathtarget = target;
3140 /* For now, assume we are above any joins, so no parameterization */
3141 pathnode->path.param_info = NULL;
3142 pathnode->path.parallel_aware = false;
3143 pathnode->path.parallel_safe = rel->consider_parallel &&
3144 subpath->parallel_safe;
3145 pathnode->path.parallel_workers = subpath->parallel_workers;
3146 /* Group doesn't change sort ordering */
3147 pathnode->path.pathkeys = subpath->pathkeys;
3148
3149 pathnode->subpath = subpath;
3150
3151 pathnode->groupClause = groupClause;
3152 pathnode->qual = qual;
3153
3154 cost_group(&pathnode->path, root,
3155 list_length(groupClause),
3156 numGroups,
3157 qual,
3158 subpath->disabled_nodes,
3159 subpath->startup_cost, subpath->total_cost,
3160 subpath->rows);
3161
3162 /* add tlist eval cost for each output row */
3163 pathnode->path.startup_cost += target->cost.startup;
3164 pathnode->path.total_cost += target->cost.startup +
3165 target->cost.per_tuple * pathnode->path.rows;
3166
3167 return pathnode;
3168}
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3196
List * qual
Definition: pathnodes.h:2240
List * groupClause
Definition: pathnodes.h:2239
Path * subpath
Definition: pathnodes.h:2238
Path path
Definition: pathnodes.h:2237

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath * create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1586 of file pathnode.c.

1588{
1590
1591 pathnode->path.pathtype = T_Result;
1592 pathnode->path.parent = rel;
1593 pathnode->path.pathtarget = target;
1594 pathnode->path.param_info = NULL; /* there are no other rels... */
1595 pathnode->path.parallel_aware = false;
1596 pathnode->path.parallel_safe = rel->consider_parallel;
1597 pathnode->path.parallel_workers = 0;
1598 pathnode->path.pathkeys = NIL;
1599 pathnode->quals = havingqual;
1600
1601 /*
1602 * We can't quite use cost_resultscan() because the quals we want to
1603 * account for are not baserestrict quals of the rel. Might as well just
1604 * hack it here.
1605 */
1606 pathnode->path.rows = 1;
1607 pathnode->path.startup_cost = target->cost.startup;
1608 pathnode->path.total_cost = target->cost.startup +
1609 cpu_tuple_cost + target->cost.per_tuple;
1610
1611 /*
1612 * Add cost of qual, if any --- but we ignore its selectivity, since our
1613 * rowcount estimate should be 1 no matter what the qual is.
1614 */
1615 if (havingqual)
1616 {
1617 QualCost qual_cost;
1618
1619 cost_qual_eval(&qual_cost, havingqual, root);
1620 /* havingqual is evaluated once at startup */
1621 pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1622 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1623 }
1624
1625 return pathnode;
1626}
double cpu_tuple_cost
Definition: costsize.c:132
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4732

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath * create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3323 of file pathnode.c.

3330{
3332 PathTarget *target = rel->reltarget;
3333 ListCell *lc;
3334 bool is_first = true;
3335 bool is_first_sort = true;
3336
3337 /* The topmost generated Plan node will be an Agg */
3338 pathnode->path.pathtype = T_Agg;
3339 pathnode->path.parent = rel;
3340 pathnode->path.pathtarget = target;
3341 pathnode->path.param_info = subpath->param_info;
3342 pathnode->path.parallel_aware = false;
3343 pathnode->path.parallel_safe = rel->consider_parallel &&
3344 subpath->parallel_safe;
3345 pathnode->path.parallel_workers = subpath->parallel_workers;
3346 pathnode->subpath = subpath;
3347
3348 /*
3349 * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3350 * to AGG_HASHED, here if possible.
3351 */
3352 if (aggstrategy == AGG_SORTED &&
3353 list_length(rollups) == 1 &&
3354 ((RollupData *) linitial(rollups))->groupClause == NIL)
3355 aggstrategy = AGG_PLAIN;
3356
3357 if (aggstrategy == AGG_MIXED &&
3358 list_length(rollups) == 1)
3359 aggstrategy = AGG_HASHED;
3360
3361 /*
3362 * Output will be in sorted order by group_pathkeys if, and only if, there
3363 * is a single rollup operation on a non-empty list of grouping
3364 * expressions.
3365 */
3366 if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3367 pathnode->path.pathkeys = root->group_pathkeys;
3368 else
3369 pathnode->path.pathkeys = NIL;
3370
3371 pathnode->aggstrategy = aggstrategy;
3372 pathnode->rollups = rollups;
3373 pathnode->qual = having_qual;
3374 pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3375
3376 Assert(rollups != NIL);
3377 Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3378 Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3379
3380 foreach(lc, rollups)
3381 {
3382 RollupData *rollup = lfirst(lc);
3383 List *gsets = rollup->gsets;
3384 int numGroupCols = list_length(linitial(gsets));
3385
3386 /*
3387 * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3388 * (already-sorted) input, and following ones do their own sort.
3389 *
3390 * In AGG_HASHED mode, there is one rollup for each grouping set.
3391 *
3392 * In AGG_MIXED mode, the first rollups are hashed, the first
3393 * non-hashed one takes the (already-sorted) input, and following ones
3394 * do their own sort.
3395 */
3396 if (is_first)
3397 {
3398 cost_agg(&pathnode->path, root,
3399 aggstrategy,
3400 agg_costs,
3401 numGroupCols,
3402 rollup->numGroups,
3403 having_qual,
3404 subpath->disabled_nodes,
3405 subpath->startup_cost,
3406 subpath->total_cost,
3407 subpath->rows,
3408 subpath->pathtarget->width);
3409 is_first = false;
3410 if (!rollup->is_hashed)
3411 is_first_sort = false;
3412 }
3413 else
3414 {
3415 Path sort_path; /* dummy for result of cost_sort */
3416 Path agg_path; /* dummy for result of cost_agg */
3417
3418 if (rollup->is_hashed || is_first_sort)
3419 {
3420 /*
3421 * Account for cost of aggregation, but don't charge input
3422 * cost again
3423 */
3424 cost_agg(&agg_path, root,
3425 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3426 agg_costs,
3427 numGroupCols,
3428 rollup->numGroups,
3429 having_qual,
3430 0, 0.0, 0.0,
3431 subpath->rows,
3432 subpath->pathtarget->width);
3433 if (!rollup->is_hashed)
3434 is_first_sort = false;
3435 }
3436 else
3437 {
3438 /* Account for cost of sort, but don't charge input cost again */
3439 cost_sort(&sort_path, root, NIL, 0,
3440 0.0,
3441 subpath->rows,
3442 subpath->pathtarget->width,
3443 0.0,
3444 work_mem,
3445 -1.0);
3446
3447 /* Account for cost of aggregation */
3448
3449 cost_agg(&agg_path, root,
3450 AGG_SORTED,
3451 agg_costs,
3452 numGroupCols,
3453 rollup->numGroups,
3454 having_qual,
3455 sort_path.disabled_nodes,
3456 sort_path.startup_cost,
3457 sort_path.total_cost,
3458 sort_path.rows,
3459 subpath->pathtarget->width);
3460 }
3461
3462 pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3463 pathnode->path.total_cost += agg_path.total_cost;
3464 pathnode->path.rows += agg_path.rows;
3465 }
3466 }
3467
3468 /* add tlist eval cost for each output row */
3469 pathnode->path.startup_cost += target->cost.startup;
3470 pathnode->path.total_cost += target->cost.startup +
3471 target->cost.per_tuple * pathnode->path.rows;
3472
3473 return pathnode;
3474}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
int work_mem
Definition: globals.c:130
@ AGG_HASHED
Definition: nodes.h:356
@ AGG_MIXED
Definition: nodes.h:357
@ AGG_PLAIN
Definition: nodes.h:354
uint64 transitionSpace
Definition: pathnodes.h:2312
AggStrategy aggstrategy
Definition: pathnodes.h:2309
Cardinality numGroups
Definition: pathnodes.h:2296
List * gsets
Definition: pathnodes.h:2294
bool is_hashed
Definition: pathnodes.h:2298

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), Path::disabled_nodes, RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath * create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2697 of file pathnode.c.

2708{
2709 HashPath *pathnode = makeNode(HashPath);
2710
2711 pathnode->jpath.path.pathtype = T_HashJoin;
2712 pathnode->jpath.path.parent = joinrel;
2713 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2714 pathnode->jpath.path.param_info =
2716 joinrel,
2717 outer_path,
2718 inner_path,
2719 extra->sjinfo,
2720 required_outer,
2721 &restrict_clauses);
2722 pathnode->jpath.path.parallel_aware =
2723 joinrel->consider_parallel && parallel_hash;
2724 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2725 outer_path->parallel_safe && inner_path->parallel_safe;
2726 /* This is a foolish way to estimate parallel_workers, but for now... */
2727 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2728
2729 /*
2730 * A hashjoin never has pathkeys, since its output ordering is
2731 * unpredictable due to possible batching. XXX If the inner relation is
2732 * small enough, we could instruct the executor that it must not batch,
2733 * and then we could assume that the output inherits the outer relation's
2734 * ordering, which might save a sort step. However there is considerable
2735 * downside if our estimate of the inner relation size is badly off. For
2736 * the moment we don't risk it. (Note also that if we wanted to take this
2737 * seriously, joinpath.c would have to consider many more paths for the
2738 * outer rel than it does now.)
2739 */
2740 pathnode->jpath.path.pathkeys = NIL;
2741 pathnode->jpath.jointype = jointype;
2742 pathnode->jpath.inner_unique = extra->inner_unique;
2743 pathnode->jpath.outerjoinpath = outer_path;
2744 pathnode->jpath.innerjoinpath = inner_path;
2745 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2746 pathnode->path_hashclauses = hashclauses;
2747 /* final_cost_hashjoin will fill in pathnode->num_batches */
2748
2749 final_cost_hashjoin(root, pathnode, workspace, extra);
2750
2751 return pathnode;
2752}
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4275
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1659
List * path_hashclauses
Definition: pathnodes.h:2164
JoinPath jpath
Definition: pathnodes.h:2163
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3244
Path * outerjoinpath
Definition: pathnodes.h:2086
Path * innerjoinpath
Definition: pathnodes.h:2087
JoinType jointype
Definition: pathnodes.h:2081
bool inner_unique
Definition: pathnodes.h:2083
List * joinrestrictinfo
Definition: pathnodes.h:2089

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath * create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 3032 of file pathnode.c.

3038{
3040 SortPath *pathnode = &sort->spath;
3041
3042 pathnode->path.pathtype = T_IncrementalSort;
3043 pathnode->path.parent = rel;
3044 /* Sort doesn't project, so use source path's pathtarget */
3045 pathnode->path.pathtarget = subpath->pathtarget;
3046 /* For now, assume we are above any joins, so no parameterization */
3047 pathnode->path.param_info = NULL;
3048 pathnode->path.parallel_aware = false;
3049 pathnode->path.parallel_safe = rel->consider_parallel &&
3050 subpath->parallel_safe;
3051 pathnode->path.parallel_workers = subpath->parallel_workers;
3052 pathnode->path.pathkeys = pathkeys;
3053
3054 pathnode->subpath = subpath;
3055
3056 cost_incremental_sort(&pathnode->path,
3057 root, pathkeys, presorted_keys,
3058 subpath->disabled_nodes,
3059 subpath->startup_cost,
3060 subpath->total_cost,
3061 subpath->rows,
3062 subpath->pathtarget->width,
3063 0.0, /* XXX comparison_cost shouldn't be 0? */
3064 work_mem, limit_tuples);
3065
3066 sort->nPresortedCols = presorted_keys;
3067
3068 return sort;
3069}
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:195
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2000
Path path
Definition: pathnodes.h:2211
Path * subpath
Definition: pathnodes.h:2212

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_one_window_path(), create_ordered_paths(), gather_grouping_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath * create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 1049 of file pathnode.c.

1060{
1061 IndexPath *pathnode = makeNode(IndexPath);
1062 RelOptInfo *rel = index->rel;
1063
1064 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1065 pathnode->path.parent = rel;
1066 pathnode->path.pathtarget = rel->reltarget;
1067 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1068 required_outer);
1069 pathnode->path.parallel_aware = false;
1070 pathnode->path.parallel_safe = rel->consider_parallel;
1071 pathnode->path.parallel_workers = 0;
1072 pathnode->path.pathkeys = pathkeys;
1073
1074 pathnode->indexinfo = index;
1075 pathnode->indexclauses = indexclauses;
1076 pathnode->indexorderbys = indexorderbys;
1077 pathnode->indexorderbycols = indexorderbycols;
1078 pathnode->indexscandir = indexscandir;
1079
1080 cost_index(pathnode, root, loop_count, partial_path);
1081
1082 return pathnode;
1083}
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:560
List * indexclauses
Definition: pathnodes.h:1723
ScanDirection indexscandir
Definition: pathnodes.h:1726
Path path
Definition: pathnodes.h:1721
List * indexorderbycols
Definition: pathnodes.h:1725
List * indexorderbys
Definition: pathnodes.h:1724
IndexOptInfo * indexinfo
Definition: pathnodes.h:1722
Definition: type.h:96

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath * create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3979 of file pathnode.c.

3984{
3985 LimitPath *pathnode = makeNode(LimitPath);
3986
3987 pathnode->path.pathtype = T_Limit;
3988 pathnode->path.parent = rel;
3989 /* Limit doesn't project, so use source path's pathtarget */
3990 pathnode->path.pathtarget = subpath->pathtarget;
3991 /* For now, assume we are above any joins, so no parameterization */
3992 pathnode->path.param_info = NULL;
3993 pathnode->path.parallel_aware = false;
3994 pathnode->path.parallel_safe = rel->consider_parallel &&
3995 subpath->parallel_safe;
3996 pathnode->path.parallel_workers = subpath->parallel_workers;
3997 pathnode->path.rows = subpath->rows;
3998 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3999 pathnode->path.startup_cost = subpath->startup_cost;
4000 pathnode->path.total_cost = subpath->total_cost;
4001 pathnode->path.pathkeys = subpath->pathkeys;
4002 pathnode->subpath = subpath;
4003 pathnode->limitOffset = limitOffset;
4004 pathnode->limitCount = limitCount;
4005 pathnode->limitOption = limitOption;
4006
4007 /*
4008 * Adjust the output rows count and costs according to the offset/limit.
4009 */
4011 &pathnode->path.startup_cost,
4012 &pathnode->path.total_cost,
4013 offset_est, count_est);
4014
4015 return pathnode;
4016}
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:4035
Path path
Definition: pathnodes.h:2411
Path * subpath
Definition: pathnodes.h:2412
LimitOption limitOption
Definition: pathnodes.h:2415
Node * limitOffset
Definition: pathnodes.h:2413
Node * limitCount
Definition: pathnodes.h:2414

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, Path::disabled_nodes, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath * create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3813 of file pathnode.c.

3815{
3816 LockRowsPath *pathnode = makeNode(LockRowsPath);
3817
3818 pathnode->path.pathtype = T_LockRows;
3819 pathnode->path.parent = rel;
3820 /* LockRows doesn't project, so use source path's pathtarget */
3821 pathnode->path.pathtarget = subpath->pathtarget;
3822 /* For now, assume we are above any joins, so no parameterization */
3823 pathnode->path.param_info = NULL;
3824 pathnode->path.parallel_aware = false;
3825 pathnode->path.parallel_safe = false;
3826 pathnode->path.parallel_workers = 0;
3827 pathnode->path.rows = subpath->rows;
3828
3829 /*
3830 * The result cannot be assumed sorted, since locking might cause the sort
3831 * key columns to be replaced with new values.
3832 */
3833 pathnode->path.pathkeys = NIL;
3834
3835 pathnode->subpath = subpath;
3836 pathnode->rowMarks = rowMarks;
3837 pathnode->epqParam = epqParam;
3838
3839 /*
3840 * We should charge something extra for the costs of row locking and
3841 * possible refetches, but it's hard to say how much. For now, use
3842 * cpu_tuple_cost per row.
3843 */
3844 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3845 pathnode->path.startup_cost = subpath->startup_cost;
3846 pathnode->path.total_cost = subpath->total_cost +
3847 cpu_tuple_cost * subpath->rows;
3848
3849 return pathnode;
3850}
Path * subpath
Definition: pathnodes.h:2372
List * rowMarks
Definition: pathnodes.h:2373

References cpu_tuple_cost, Path::disabled_nodes, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath * create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1634 of file pathnode.c.

1635{
1636 MaterialPath *pathnode = makeNode(MaterialPath);
1637
1638 Assert(subpath->parent == rel);
1639
1640 pathnode->path.pathtype = T_Material;
1641 pathnode->path.parent = rel;
1642 pathnode->path.pathtarget = rel->reltarget;
1643 pathnode->path.param_info = subpath->param_info;
1644 pathnode->path.parallel_aware = false;
1645 pathnode->path.parallel_safe = rel->consider_parallel &&
1646 subpath->parallel_safe;
1647 pathnode->path.parallel_workers = subpath->parallel_workers;
1648 pathnode->path.pathkeys = subpath->pathkeys;
1649
1650 pathnode->subpath = subpath;
1651
1652 cost_material(&pathnode->path,
1653 subpath->disabled_nodes,
1654 subpath->startup_cost,
1655 subpath->total_cost,
1656 subpath->rows,
1657 subpath->pathtarget->width);
1658
1659 return pathnode;
1660}
void cost_material(Path *path, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2483
Path * subpath
Definition: pathnodes.h:1994

References Assert, RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by consider_parallel_nestloop(), match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath * create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
double  calls 
)

Definition at line 1667 of file pathnode.c.

1670{
1671 MemoizePath *pathnode = makeNode(MemoizePath);
1672
1673 Assert(subpath->parent == rel);
1674
1675 pathnode->path.pathtype = T_Memoize;
1676 pathnode->path.parent = rel;
1677 pathnode->path.pathtarget = rel->reltarget;
1678 pathnode->path.param_info = subpath->param_info;
1679 pathnode->path.parallel_aware = false;
1680 pathnode->path.parallel_safe = rel->consider_parallel &&
1681 subpath->parallel_safe;
1682 pathnode->path.parallel_workers = subpath->parallel_workers;
1683 pathnode->path.pathkeys = subpath->pathkeys;
1684
1685 pathnode->subpath = subpath;
1686 pathnode->hash_operators = hash_operators;
1687 pathnode->param_exprs = param_exprs;
1688 pathnode->singlerow = singlerow;
1689 pathnode->binary_mode = binary_mode;
1690 pathnode->calls = clamp_row_est(calls);
1691
1692 /*
1693 * For now we set est_entries to 0. cost_memoize_rescan() does all the
1694 * hard work to determine how many cache entries there are likely to be,
1695 * so it seems best to leave it up to that function to fill this field in.
1696 * If left at 0, the executor will make a guess at a good value.
1697 */
1698 pathnode->est_entries = 0;
1699
1700 /* we should not generate this path type when enable_memoize=false */
1702 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1703
1704 /*
1705 * Add a small additional charge for caching the first entry. All the
1706 * harder calculations for rescans are performed in cost_memoize_rescan().
1707 */
1708 pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1709 pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1710 pathnode->path.rows = subpath->rows;
1711
1712 return pathnode;
1713}
bool enable_memoize
Definition: costsize.c:155
bool singlerow
Definition: pathnodes.h:2008
List * hash_operators
Definition: pathnodes.h:2006
uint32 est_entries
Definition: pathnodes.h:2013
bool binary_mode
Definition: pathnodes.h:2010
Cardinality calls
Definition: pathnodes.h:2012
Path * subpath
Definition: pathnodes.h:2005
List * param_exprs
Definition: pathnodes.h:2007

References Assert, MemoizePath::binary_mode, MemoizePath::calls, clamp_row_est(), RelOptInfo::consider_parallel, cpu_tuple_cost, Path::disabled_nodes, enable_memoize, MemoizePath::est_entries, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath * create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1471 of file pathnode.c.

1476{
1478 int input_disabled_nodes;
1479 Cost input_startup_cost;
1480 Cost input_total_cost;
1481 ListCell *l;
1482
1483 /*
1484 * We don't currently support parameterized MergeAppend paths, as
1485 * explained in the comments for generate_orderedappend_paths.
1486 */
1487 Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1488
1489 pathnode->path.pathtype = T_MergeAppend;
1490 pathnode->path.parent = rel;
1491 pathnode->path.pathtarget = rel->reltarget;
1492 pathnode->path.param_info = NULL;
1493 pathnode->path.parallel_aware = false;
1494 pathnode->path.parallel_safe = rel->consider_parallel;
1495 pathnode->path.parallel_workers = 0;
1496 pathnode->path.pathkeys = pathkeys;
1497 pathnode->subpaths = subpaths;
1498
1499 /*
1500 * Apply query-wide LIMIT if known and path is for sole base relation.
1501 * (Handling this at this low level is a bit klugy.)
1502 */
1503 if (bms_equal(rel->relids, root->all_query_rels))
1504 pathnode->limit_tuples = root->limit_tuples;
1505 else
1506 pathnode->limit_tuples = -1.0;
1507
1508 /*
1509 * Add up the sizes and costs of the input paths.
1510 */
1511 pathnode->path.rows = 0;
1512 input_disabled_nodes = 0;
1513 input_startup_cost = 0;
1514 input_total_cost = 0;
1515 foreach(l, subpaths)
1516 {
1517 Path *subpath = (Path *) lfirst(l);
1518
1519 /* All child paths should be unparameterized */
1521
1522 pathnode->path.rows += subpath->rows;
1523 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1524 subpath->parallel_safe;
1525
1526 if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1527 {
1528 /* Subpath is adequately ordered, we won't need to sort it */
1529 input_disabled_nodes += subpath->disabled_nodes;
1530 input_startup_cost += subpath->startup_cost;
1531 input_total_cost += subpath->total_cost;
1532 }
1533 else
1534 {
1535 /* We'll need to insert a Sort node, so include cost for that */
1536 Path sort_path; /* dummy for result of cost_sort */
1537
1538 cost_sort(&sort_path,
1539 root,
1540 pathkeys,
1541 subpath->disabled_nodes,
1542 subpath->total_cost,
1543 subpath->rows,
1544 subpath->pathtarget->width,
1545 0.0,
1546 work_mem,
1547 pathnode->limit_tuples);
1548 input_disabled_nodes += sort_path.disabled_nodes;
1549 input_startup_cost += sort_path.startup_cost;
1550 input_total_cost += sort_path.total_cost;
1551 }
1552 }
1553
1554 /*
1555 * Now we can compute total costs of the MergeAppend. If there's exactly
1556 * one child path and its parallel awareness matches that of the
1557 * MergeAppend, then the MergeAppend is a no-op and will be discarded
1558 * later (in setrefs.c); otherwise we do the normal cost calculation.
1559 */
1560 if (list_length(subpaths) == 1 &&
1561 ((Path *) linitial(subpaths))->parallel_aware ==
1562 pathnode->path.parallel_aware)
1563 {
1564 pathnode->path.disabled_nodes = input_disabled_nodes;
1565 pathnode->path.startup_cost = input_startup_cost;
1566 pathnode->path.total_cost = input_total_cost;
1567 }
1568 else
1569 cost_merge_append(&pathnode->path, root,
1570 pathkeys, list_length(subpaths),
1571 input_disabled_nodes,
1572 input_startup_cost, input_total_cost,
1573 pathnode->path.rows);
1574
1575 return pathnode;
1576}
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2432
Cardinality limit_tuples
Definition: pathnodes.h:1969

References Assert, bms_equal(), bms_is_empty, RelOptInfo::consider_parallel, cost_merge_append(), cost_sort(), Path::disabled_nodes, RelOptInfo::lateral_relids, lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath * create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys 
)

Definition at line 2631 of file pathnode.c.

2644{
2645 MergePath *pathnode = makeNode(MergePath);
2646
2647 pathnode->jpath.path.pathtype = T_MergeJoin;
2648 pathnode->jpath.path.parent = joinrel;
2649 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2650 pathnode->jpath.path.param_info =
2652 joinrel,
2653 outer_path,
2654 inner_path,
2655 extra->sjinfo,
2656 required_outer,
2657 &restrict_clauses);
2658 pathnode->jpath.path.parallel_aware = false;
2659 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2660 outer_path->parallel_safe && inner_path->parallel_safe;
2661 /* This is a foolish way to estimate parallel_workers, but for now... */
2662 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2663 pathnode->jpath.path.pathkeys = pathkeys;
2664 pathnode->jpath.jointype = jointype;
2665 pathnode->jpath.inner_unique = extra->inner_unique;
2666 pathnode->jpath.outerjoinpath = outer_path;
2667 pathnode->jpath.innerjoinpath = inner_path;
2668 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2669 pathnode->path_mergeclauses = mergeclauses;
2670 pathnode->outersortkeys = outersortkeys;
2671 pathnode->innersortkeys = innersortkeys;
2672 /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2673 /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2674
2675 final_cost_mergejoin(root, pathnode, workspace, extra);
2676
2677 return pathnode;
2678}
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3837
List * outersortkeys
Definition: pathnodes.h:2146
List * innersortkeys
Definition: pathnodes.h:2147
JoinPath jpath
Definition: pathnodes.h:2144
List * path_mergeclauses
Definition: pathnodes.h:2145

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath * create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3486 of file pathnode.c.

3491{
3493 Cost initplan_cost;
3494 int initplan_disabled_nodes = 0;
3495 ListCell *lc;
3496
3497 /* The topmost generated Plan node will be a Result */
3498 pathnode->path.pathtype = T_Result;
3499 pathnode->path.parent = rel;
3500 pathnode->path.pathtarget = target;
3501 /* For now, assume we are above any joins, so no parameterization */
3502 pathnode->path.param_info = NULL;
3503 pathnode->path.parallel_aware = false;
3504 pathnode->path.parallel_safe = true; /* might change below */
3505 pathnode->path.parallel_workers = 0;
3506 /* Result is one unordered row */
3507 pathnode->path.rows = 1;
3508 pathnode->path.pathkeys = NIL;
3509
3510 pathnode->mmaggregates = mmaggregates;
3511 pathnode->quals = quals;
3512
3513 /* Calculate cost of all the initplans, and check parallel safety */
3514 initplan_cost = 0;
3515 foreach(lc, mmaggregates)
3516 {
3517 MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3518
3519 initplan_disabled_nodes += mminfo->path->disabled_nodes;
3520 initplan_cost += mminfo->pathcost;
3521 if (!mminfo->path->parallel_safe)
3522 pathnode->path.parallel_safe = false;
3523 }
3524
3525 /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3526 pathnode->path.disabled_nodes = initplan_disabled_nodes;
3527 pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3528 pathnode->path.total_cost = initplan_cost + target->cost.startup +
3529 target->cost.per_tuple + cpu_tuple_cost;
3530
3531 /*
3532 * Add cost of qual, if any --- but we ignore its selectivity, since our
3533 * rowcount estimate should be 1 no matter what the qual is.
3534 */
3535 if (quals)
3536 {
3537 QualCost qual_cost;
3538
3539 cost_qual_eval(&qual_cost, quals, root);
3540 pathnode->path.startup_cost += qual_cost.startup;
3541 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3542 }
3543
3544 /*
3545 * If the initplans were all parallel-safe, also check safety of the
3546 * target and quals. (The Result node itself isn't parallelizable, but if
3547 * we are in a subquery then it can be useful for the outer query to know
3548 * that this one is parallel-safe.)
3549 */
3550 if (pathnode->path.parallel_safe)
3551 pathnode->path.parallel_safe =
3552 is_parallel_safe(root, (Node *) target->exprs) &&
3553 is_parallel_safe(root, (Node *) quals);
3554
3555 return pathnode;
3556}
List * quals
Definition: pathnodes.h:2322
List * mmaggregates
Definition: pathnodes.h:2321

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, Path::disabled_nodes, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath * create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
bool  partColsUpdated,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3877 of file pathnode.c.

3888{
3890
3891 Assert(operation == CMD_MERGE ||
3892 (operation == CMD_UPDATE ?
3893 list_length(resultRelations) == list_length(updateColnosLists) :
3894 updateColnosLists == NIL));
3895 Assert(withCheckOptionLists == NIL ||
3896 list_length(resultRelations) == list_length(withCheckOptionLists));
3897 Assert(returningLists == NIL ||
3898 list_length(resultRelations) == list_length(returningLists));
3899
3900 pathnode->path.pathtype = T_ModifyTable;
3901 pathnode->path.parent = rel;
3902 /* pathtarget is not interesting, just make it minimally valid */
3903 pathnode->path.pathtarget = rel->reltarget;
3904 /* For now, assume we are above any joins, so no parameterization */
3905 pathnode->path.param_info = NULL;
3906 pathnode->path.parallel_aware = false;
3907 pathnode->path.parallel_safe = false;
3908 pathnode->path.parallel_workers = 0;
3909 pathnode->path.pathkeys = NIL;
3910
3911 /*
3912 * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3913 *
3914 * Currently, we don't charge anything extra for the actual table
3915 * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3916 * expressions if any. It would only be window dressing, since
3917 * ModifyTable is always a top-level node and there is no way for the
3918 * costs to change any higher-level planning choices. But we might want
3919 * to make it look better sometime.
3920 */
3921 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3922 pathnode->path.startup_cost = subpath->startup_cost;
3923 pathnode->path.total_cost = subpath->total_cost;
3924 if (returningLists != NIL)
3925 {
3926 pathnode->path.rows = subpath->rows;
3927
3928 /*
3929 * Set width to match the subpath output. XXX this is totally wrong:
3930 * we should return an average of the RETURNING tlist widths. But
3931 * it's what happened historically, and improving it is a task for
3932 * another day. (Again, it's mostly window dressing.)
3933 */
3934 pathnode->path.pathtarget->width = subpath->pathtarget->width;
3935 }
3936 else
3937 {
3938 pathnode->path.rows = 0;
3939 pathnode->path.pathtarget->width = 0;
3940 }
3941
3942 pathnode->subpath = subpath;
3943 pathnode->operation = operation;
3944 pathnode->canSetTag = canSetTag;
3945 pathnode->nominalRelation = nominalRelation;
3946 pathnode->rootRelation = rootRelation;
3947 pathnode->partColsUpdated = partColsUpdated;
3948 pathnode->resultRelations = resultRelations;
3949 pathnode->updateColnosLists = updateColnosLists;
3950 pathnode->withCheckOptionLists = withCheckOptionLists;
3951 pathnode->returningLists = returningLists;
3952 pathnode->rowMarks = rowMarks;
3953 pathnode->onconflict = onconflict;
3954 pathnode->epqParam = epqParam;
3955 pathnode->mergeActionLists = mergeActionLists;
3956 pathnode->mergeJoinConditions = mergeJoinConditions;
3957
3958 return pathnode;
3959}
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
bool partColsUpdated
Definition: pathnodes.h:2392
List * returningLists
Definition: pathnodes.h:2396
List * resultRelations
Definition: pathnodes.h:2393
List * withCheckOptionLists
Definition: pathnodes.h:2395
List * mergeJoinConditions
Definition: pathnodes.h:2402
List * updateColnosLists
Definition: pathnodes.h:2394
OnConflictExpr * onconflict
Definition: pathnodes.h:2398
CmdType operation
Definition: pathnodes.h:2388
Index rootRelation
Definition: pathnodes.h:2391
Index nominalRelation
Definition: pathnodes.h:2390
List * mergeActionLists
Definition: pathnodes.h:2400

References Assert, ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, Path::disabled_nodes, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::partColsUpdated, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path * create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2222 of file pathnode.c.

2224{
2225 Path *pathnode = makeNode(Path);
2226
2227 pathnode->pathtype = T_NamedTuplestoreScan;
2228 pathnode->parent = rel;
2229 pathnode->pathtarget = rel->reltarget;
2230 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2231 required_outer);
2232 pathnode->parallel_aware = false;
2233 pathnode->parallel_safe = rel->consider_parallel;
2234 pathnode->parallel_workers = 0;
2235 pathnode->pathkeys = NIL; /* result is always unordered */
2236
2237 cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2238
2239 return pathnode;
2240}
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1750

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath * create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2535 of file pathnode.c.

2545{
2546 NestPath *pathnode = makeNode(NestPath);
2547 Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2548 Relids outerrelids;
2549
2550 /*
2551 * Paths are parameterized by top-level parents, so run parameterization
2552 * tests on the parent relids.
2553 */
2554 if (outer_path->parent->top_parent_relids)
2555 outerrelids = outer_path->parent->top_parent_relids;
2556 else
2557 outerrelids = outer_path->parent->relids;
2558
2559 /*
2560 * If the inner path is parameterized by the outer, we must drop any
2561 * restrict_clauses that are due to be moved into the inner path. We have
2562 * to do this now, rather than postpone the work till createplan time,
2563 * because the restrict_clauses list can affect the size and cost
2564 * estimates for this path. We detect such clauses by checking for serial
2565 * number match to clauses already enforced in the inner path.
2566 */
2567 if (bms_overlap(inner_req_outer, outerrelids))
2568 {
2569 Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2570 List *jclauses = NIL;
2571 ListCell *lc;
2572
2573 foreach(lc, restrict_clauses)
2574 {
2575 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2576
2577 if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2578 jclauses = lappend(jclauses, rinfo);
2579 }
2580 restrict_clauses = jclauses;
2581 }
2582
2583 pathnode->jpath.path.pathtype = T_NestLoop;
2584 pathnode->jpath.path.parent = joinrel;
2585 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2586 pathnode->jpath.path.param_info =
2588 joinrel,
2589 outer_path,
2590 inner_path,
2591 extra->sjinfo,
2592 required_outer,
2593 &restrict_clauses);
2594 pathnode->jpath.path.parallel_aware = false;
2595 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2596 outer_path->parallel_safe && inner_path->parallel_safe;
2597 /* This is a foolish way to estimate parallel_workers, but for now... */
2598 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2599 pathnode->jpath.path.pathkeys = pathkeys;
2600 pathnode->jpath.jointype = jointype;
2601 pathnode->jpath.inner_unique = extra->inner_unique;
2602 pathnode->jpath.outerjoinpath = outer_path;
2603 pathnode->jpath.innerjoinpath = inner_path;
2604 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2605
2606 final_cost_nestloop(root, pathnode, workspace, extra);
2607
2608 return pathnode;
2609}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3350
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:1910
JoinPath jpath
Definition: pathnodes.h:2104
int rinfo_serial
Definition: pathnodes.h:2647

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath * create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2763 of file pathnode.c.

2767{
2769 PathTarget *oldtarget;
2770
2771 /*
2772 * We mustn't put a ProjectionPath directly above another; it's useless
2773 * and will confuse create_projection_plan. Rather than making sure all
2774 * callers handle that, let's implement it here, by stripping off any
2775 * ProjectionPath in what we're given. Given this rule, there won't be
2776 * more than one.
2777 */
2779 {
2781
2782 Assert(subpp->path.parent == rel);
2783 subpath = subpp->subpath;
2785 }
2786
2787 pathnode->path.pathtype = T_Result;
2788 pathnode->path.parent = rel;
2789 pathnode->path.pathtarget = target;
2790 /* For now, assume we are above any joins, so no parameterization */
2791 pathnode->path.param_info = NULL;
2792 pathnode->path.parallel_aware = false;
2793 pathnode->path.parallel_safe = rel->consider_parallel &&
2794 subpath->parallel_safe &&
2795 is_parallel_safe(root, (Node *) target->exprs);
2796 pathnode->path.parallel_workers = subpath->parallel_workers;
2797 /* Projection does not change the sort order */
2798 pathnode->path.pathkeys = subpath->pathkeys;
2799
2800 pathnode->subpath = subpath;
2801
2802 /*
2803 * We might not need a separate Result node. If the input plan node type
2804 * can project, we can just tell it to project something else. Or, if it
2805 * can't project but the desired target has the same expression list as
2806 * what the input will produce anyway, we can still give it the desired
2807 * tlist (possibly changing its ressortgroupref labels, but nothing else).
2808 * Note: in the latter case, create_projection_plan has to recheck our
2809 * conclusion; see comments therein.
2810 */
2811 oldtarget = subpath->pathtarget;
2813 equal(oldtarget->exprs, target->exprs))
2814 {
2815 /* No separate Result node needed */
2816 pathnode->dummypp = true;
2817
2818 /*
2819 * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2820 */
2821 pathnode->path.rows = subpath->rows;
2822 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2823 pathnode->path.startup_cost = subpath->startup_cost +
2824 (target->cost.startup - oldtarget->cost.startup);
2825 pathnode->path.total_cost = subpath->total_cost +
2826 (target->cost.startup - oldtarget->cost.startup) +
2827 (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2828 }
2829 else
2830 {
2831 /* We really do need the Result node */
2832 pathnode->dummypp = false;
2833
2834 /*
2835 * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2836 * evaluating the tlist. There is no qual to worry about.
2837 */
2838 pathnode->path.rows = subpath->rows;
2839 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2840 pathnode->path.startup_cost = subpath->startup_cost +
2841 target->cost.startup;
2842 pathnode->path.total_cost = subpath->total_cost +
2843 target->cost.startup +
2844 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2845 }
2846
2847 return pathnode;
2848}
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2186

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath * create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3768 of file pathnode.c.

3776{
3778
3779 pathnode->path.pathtype = T_RecursiveUnion;
3780 pathnode->path.parent = rel;
3781 pathnode->path.pathtarget = target;
3782 /* For now, assume we are above any joins, so no parameterization */
3783 pathnode->path.param_info = NULL;
3784 pathnode->path.parallel_aware = false;
3785 pathnode->path.parallel_safe = rel->consider_parallel &&
3786 leftpath->parallel_safe && rightpath->parallel_safe;
3787 /* Foolish, but we'll do it like joins for now: */
3788 pathnode->path.parallel_workers = leftpath->parallel_workers;
3789 /* RecursiveUnion result is always unsorted */
3790 pathnode->path.pathkeys = NIL;
3791
3792 pathnode->leftpath = leftpath;
3793 pathnode->rightpath = rightpath;
3794 pathnode->distinctList = distinctList;
3795 pathnode->wtParam = wtParam;
3796 pathnode->numGroups = numGroups;
3797
3798 cost_recursive_union(&pathnode->path, leftpath, rightpath);
3799
3800 return pathnode;
3801}
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1826
Cardinality numGroups
Definition: pathnodes.h:2363

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_resultscan_path()

Path * create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2248 of file pathnode.c.

2250{
2251 Path *pathnode = makeNode(Path);
2252
2253 pathnode->pathtype = T_Result;
2254 pathnode->parent = rel;
2255 pathnode->pathtarget = rel->reltarget;
2256 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2257 required_outer);
2258 pathnode->parallel_aware = false;
2259 pathnode->parallel_safe = rel->consider_parallel;
2260 pathnode->parallel_workers = 0;
2261 pathnode->pathkeys = NIL; /* result is always unordered */
2262
2263 cost_resultscan(pathnode, root, rel, pathnode->param_info);
2264
2265 return pathnode;
2266}
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1788

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path * create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1008 of file pathnode.c.

1009{
1010 Path *pathnode = makeNode(Path);
1011
1012 pathnode->pathtype = T_SampleScan;
1013 pathnode->parent = rel;
1014 pathnode->pathtarget = rel->reltarget;
1015 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1016 required_outer);
1017 pathnode->parallel_aware = false;
1018 pathnode->parallel_safe = rel->consider_parallel;
1019 pathnode->parallel_workers = 0;
1020 pathnode->pathkeys = NIL; /* samplescan has unordered result */
1021
1022 cost_samplescan(pathnode, root, rel, pathnode->param_info);
1023
1024 return pathnode;
1025}
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:370

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path * create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 983 of file pathnode.c.

985{
986 Path *pathnode = makeNode(Path);
987
988 pathnode->pathtype = T_SeqScan;
989 pathnode->parent = rel;
990 pathnode->pathtarget = rel->reltarget;
991 pathnode->param_info = get_baserel_parampathinfo(root, rel,
992 required_outer);
993 pathnode->parallel_aware = (parallel_workers > 0);
994 pathnode->parallel_safe = rel->consider_parallel;
995 pathnode->parallel_workers = parallel_workers;
996 pathnode->pathkeys = NIL; /* seqscan has unordered result */
997
998 cost_seqscan(pathnode, root, rel, pathnode->param_info);
999
1000 return pathnode;
1001}
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:295

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath * create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2962 of file pathnode.c.

2966{
2968 double tlist_rows;
2969 ListCell *lc;
2970
2971 pathnode->path.pathtype = T_ProjectSet;
2972 pathnode->path.parent = rel;
2973 pathnode->path.pathtarget = target;
2974 /* For now, assume we are above any joins, so no parameterization */
2975 pathnode->path.param_info = NULL;
2976 pathnode->path.parallel_aware = false;
2977 pathnode->path.parallel_safe = rel->consider_parallel &&
2978 subpath->parallel_safe &&
2979 is_parallel_safe(root, (Node *) target->exprs);
2980 pathnode->path.parallel_workers = subpath->parallel_workers;
2981 /* Projection does not change the sort order XXX? */
2982 pathnode->path.pathkeys = subpath->pathkeys;
2983
2984 pathnode->subpath = subpath;
2985
2986 /*
2987 * Estimate number of rows produced by SRFs for each row of input; if
2988 * there's more than one in this node, use the maximum.
2989 */
2990 tlist_rows = 1;
2991 foreach(lc, target->exprs)
2992 {
2993 Node *node = (Node *) lfirst(lc);
2994 double itemrows;
2995
2996 itemrows = expression_returns_set_rows(root, node);
2997 if (tlist_rows < itemrows)
2998 tlist_rows = itemrows;
2999 }
3000
3001 /*
3002 * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
3003 * per input row, and half of cpu_tuple_cost for each added output row.
3004 * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
3005 * this estimate later.
3006 */
3007 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3008 pathnode->path.rows = subpath->rows * tlist_rows;
3009 pathnode->path.startup_cost = subpath->startup_cost +
3010 target->cost.startup;
3011 pathnode->path.total_cost = subpath->total_cost +
3012 target->cost.startup +
3013 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
3014 (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
3015
3016 return pathnode;
3017}
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:288
Path * subpath
Definition: pathnodes.h:2198

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath * create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List groupList,
double  numGroups,
double  outputRows 
)

Definition at line 3650 of file pathnode.c.

3659{
3660 SetOpPath *pathnode = makeNode(SetOpPath);
3661
3662 pathnode->path.pathtype = T_SetOp;
3663 pathnode->path.parent = rel;
3664 pathnode->path.pathtarget = rel->reltarget;
3665 /* For now, assume we are above any joins, so no parameterization */
3666 pathnode->path.param_info = NULL;
3667 pathnode->path.parallel_aware = false;
3668 pathnode->path.parallel_safe = rel->consider_parallel &&
3669 leftpath->parallel_safe && rightpath->parallel_safe;
3670 pathnode->path.parallel_workers =
3671 leftpath->parallel_workers + rightpath->parallel_workers;
3672 /* SetOp preserves the input sort order if in sort mode */
3673 pathnode->path.pathkeys =
3674 (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3675
3676 pathnode->leftpath = leftpath;
3677 pathnode->rightpath = rightpath;
3678 pathnode->cmd = cmd;
3679 pathnode->strategy = strategy;
3680 pathnode->groupList = groupList;
3681 pathnode->numGroups = numGroups;
3682
3683 /*
3684 * Compute cost estimates. As things stand, we end up with the same total
3685 * cost in this node for sort and hash methods, but different startup
3686 * costs. This could be refined perhaps, but it'll do for now.
3687 */
3688 pathnode->path.disabled_nodes =
3689 leftpath->disabled_nodes + rightpath->disabled_nodes;
3690 if (strategy == SETOP_SORTED)
3691 {
3692 /*
3693 * In sorted mode, we can emit output incrementally. Charge one
3694 * cpu_operator_cost per comparison per input tuple. Like cost_group,
3695 * we assume all columns get compared at most of the tuples.
3696 */
3697 pathnode->path.startup_cost =
3698 leftpath->startup_cost + rightpath->startup_cost;
3699 pathnode->path.total_cost =
3700 leftpath->total_cost + rightpath->total_cost +
3701 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3702
3703 /*
3704 * Also charge a small amount per extracted tuple. Like cost_sort,
3705 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3706 * qual-checking or projection.
3707 */
3708 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3709 }
3710 else
3711 {
3712 Size hashentrysize;
3713
3714 /*
3715 * In hashed mode, we must read all the input before we can emit
3716 * anything. Also charge comparison costs to represent the cost of
3717 * hash table lookups.
3718 */
3719 pathnode->path.startup_cost =
3720 leftpath->total_cost + rightpath->total_cost +
3721 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3722 pathnode->path.total_cost = pathnode->path.startup_cost;
3723
3724 /*
3725 * Also charge a small amount per extracted tuple. Like cost_sort,
3726 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3727 * qual-checking or projection.
3728 */
3729 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3730
3731 /*
3732 * Mark the path as disabled if enable_hashagg is off. While this
3733 * isn't exactly a HashAgg node, it seems close enough to justify
3734 * letting that switch control it.
3735 */
3736 if (!enable_hashagg)
3737 pathnode->path.disabled_nodes++;
3738
3739 /*
3740 * Also disable if it doesn't look like the hashtable will fit into
3741 * hash_mem.
3742 */
3743 hashentrysize = MAXALIGN(leftpath->pathtarget->width) +
3745 if (hashentrysize * numGroups > get_hash_memory_limit())
3746 pathnode->path.disabled_nodes++;
3747 }
3748 pathnode->path.rows = outputRows;
3749
3750 return pathnode;
3751}
#define MAXALIGN(LEN)
Definition: c.h:765
size_t Size
Definition: c.h:559
double cpu_operator_cost
Definition: costsize.c:134
bool enable_hashagg
Definition: costsize.c:152
#define SizeofMinimalTupleHeader
Definition: htup_details.h:647
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3487
@ SETOP_SORTED
Definition: nodes.h:406
Path * rightpath
Definition: pathnodes.h:2346
Cardinality numGroups
Definition: pathnodes.h:2350
Path * leftpath
Definition: pathnodes.h:2345
SetOpCmd cmd
Definition: pathnodes.h:2347
Path path
Definition: pathnodes.h:2344
SetOpStrategy strategy
Definition: pathnodes.h:2348
List * groupList
Definition: pathnodes.h:2349

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, enable_hashagg, get_hash_memory_limit(), SetOpPath::groupList, if(), SetOpPath::leftpath, list_length(), makeNode, MAXALIGN, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, SetOpPath::rightpath, Path::rows, SETOP_SORTED, SizeofMinimalTupleHeader, Path::startup_cost, SetOpPath::strategy, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath * create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 3082 of file pathnode.c.

3087{
3088 SortPath *pathnode = makeNode(SortPath);
3089
3090 pathnode->path.pathtype = T_Sort;
3091 pathnode->path.parent = rel;
3092 /* Sort doesn't project, so use source path's pathtarget */
3093 pathnode->path.pathtarget = subpath->pathtarget;
3094 /* For now, assume we are above any joins, so no parameterization */
3095 pathnode->path.param_info = NULL;
3096 pathnode->path.parallel_aware = false;
3097 pathnode->path.parallel_safe = rel->consider_parallel &&
3098 subpath->parallel_safe;
3099 pathnode->path.parallel_workers = subpath->parallel_workers;
3100 pathnode->path.pathkeys = pathkeys;
3101
3102 pathnode->subpath = subpath;
3103
3104 cost_sort(&pathnode->path, root, pathkeys,
3105 subpath->disabled_nodes,
3106 subpath->total_cost,
3107 subpath->rows,
3108 subpath->pathtarget->width,
3109 0.0, /* XXX comparison_cost shouldn't be 0? */
3110 work_mem, limit_tuples);
3111
3112 return pathnode;
3113}

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_one_window_path(), create_ordered_paths(), gather_grouping_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath * create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 2088 of file pathnode.c.

2091{
2093
2094 pathnode->path.pathtype = T_SubqueryScan;
2095 pathnode->path.parent = rel;
2096 pathnode->path.pathtarget = rel->reltarget;
2097 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2098 required_outer);
2099 pathnode->path.parallel_aware = false;
2100 pathnode->path.parallel_safe = rel->consider_parallel &&
2101 subpath->parallel_safe;
2102 pathnode->path.parallel_workers = subpath->parallel_workers;
2103 pathnode->path.pathkeys = pathkeys;
2104 pathnode->subpath = subpath;
2105
2106 cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2107 trivial_pathtarget);
2108
2109 return pathnode;
2110}
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1457

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path * create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2144 of file pathnode.c.

2146{
2147 Path *pathnode = makeNode(Path);
2148
2149 pathnode->pathtype = T_TableFuncScan;
2150 pathnode->parent = rel;
2151 pathnode->pathtarget = rel->reltarget;
2152 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2153 required_outer);
2154 pathnode->parallel_aware = false;
2155 pathnode->parallel_safe = rel->consider_parallel;
2156 pathnode->parallel_workers = 0;
2157 pathnode->pathkeys = NIL; /* result is always unordered */
2158
2159 cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2160
2161 return pathnode;
2162}
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1600

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath * create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer 
)

Definition at line 1264 of file pathnode.c.

1266{
1267 TidRangePath *pathnode = makeNode(TidRangePath);
1268
1269 pathnode->path.pathtype = T_TidRangeScan;
1270 pathnode->path.parent = rel;
1271 pathnode->path.pathtarget = rel->reltarget;
1272 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1273 required_outer);
1274 pathnode->path.parallel_aware = false;
1275 pathnode->path.parallel_safe = rel->consider_parallel;
1276 pathnode->path.parallel_workers = 0;
1277 pathnode->path.pathkeys = NIL; /* always unordered */
1278
1279 pathnode->tidrangequals = tidrangequals;
1280
1281 cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1282 pathnode->path.param_info);
1283
1284 return pathnode;
1285}
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1363
List * tidrangequals
Definition: pathnodes.h:1848

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath * create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1235 of file pathnode.c.

1237{
1238 TidPath *pathnode = makeNode(TidPath);
1239
1240 pathnode->path.pathtype = T_TidScan;
1241 pathnode->path.parent = rel;
1242 pathnode->path.pathtarget = rel->reltarget;
1243 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1244 required_outer);
1245 pathnode->path.parallel_aware = false;
1246 pathnode->path.parallel_safe = rel->consider_parallel;
1247 pathnode->path.parallel_workers = 0;
1248 pathnode->path.pathkeys = NIL; /* always unordered */
1249
1250 pathnode->tidquals = tidquals;
1251
1252 cost_tidscan(&pathnode->path, root, rel, tidquals,
1253 pathnode->path.param_info);
1254
1255 return pathnode;
1256}
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1258
List * tidquals
Definition: pathnodes.h:1836
Path path
Definition: pathnodes.h:1835

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath * create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
SpecialJoinInfo sjinfo 
)

Definition at line 1727 of file pathnode.c.

1729{
1730 UniquePath *pathnode;
1731 Path sort_path; /* dummy for result of cost_sort */
1732 Path agg_path; /* dummy for result of cost_agg */
1733 MemoryContext oldcontext;
1734 int numCols;
1735
1736 /* Caller made a mistake if subpath isn't cheapest_total ... */
1738 Assert(subpath->parent == rel);
1739 /* ... or if SpecialJoinInfo is the wrong one */
1740 Assert(sjinfo->jointype == JOIN_SEMI);
1741 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1742
1743 /* If result already cached, return it */
1744 if (rel->cheapest_unique_path)
1745 return (UniquePath *) rel->cheapest_unique_path;
1746
1747 /* If it's not possible to unique-ify, return NULL */
1748 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1749 return NULL;
1750
1751 /*
1752 * When called during GEQO join planning, we are in a short-lived memory
1753 * context. We must make sure that the path and any subsidiary data
1754 * structures created for a baserel survive the GEQO cycle, else the
1755 * baserel is trashed for future GEQO cycles. On the other hand, when we
1756 * are creating those for a joinrel during GEQO, we don't want them to
1757 * clutter the main planning context. Upshot is that the best solution is
1758 * to explicitly allocate memory in the same context the given RelOptInfo
1759 * is in.
1760 */
1762
1763 pathnode = makeNode(UniquePath);
1764
1765 pathnode->path.pathtype = T_Unique;
1766 pathnode->path.parent = rel;
1767 pathnode->path.pathtarget = rel->reltarget;
1768 pathnode->path.param_info = subpath->param_info;
1769 pathnode->path.parallel_aware = false;
1770 pathnode->path.parallel_safe = rel->consider_parallel &&
1771 subpath->parallel_safe;
1772 pathnode->path.parallel_workers = subpath->parallel_workers;
1773
1774 /*
1775 * Assume the output is unsorted, since we don't necessarily have pathkeys
1776 * to represent it. (This might get overridden below.)
1777 */
1778 pathnode->path.pathkeys = NIL;
1779
1780 pathnode->subpath = subpath;
1781
1782 /*
1783 * Under GEQO and when planning child joins, the sjinfo might be
1784 * short-lived, so we'd better make copies of data structures we extract
1785 * from it.
1786 */
1787 pathnode->in_operators = copyObject(sjinfo->semi_operators);
1788 pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs);
1789
1790 /*
1791 * If the input is a relation and it has a unique index that proves the
1792 * semi_rhs_exprs are unique, then we don't need to do anything. Note
1793 * that relation_has_unique_index_for automatically considers restriction
1794 * clauses for the rel, as well.
1795 */
1796 if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1798 sjinfo->semi_rhs_exprs,
1799 sjinfo->semi_operators))
1800 {
1801 pathnode->umethod = UNIQUE_PATH_NOOP;
1802 pathnode->path.rows = rel->rows;
1803 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1804 pathnode->path.startup_cost = subpath->startup_cost;
1805 pathnode->path.total_cost = subpath->total_cost;
1806 pathnode->path.pathkeys = subpath->pathkeys;
1807
1808 rel->cheapest_unique_path = (Path *) pathnode;
1809
1810 MemoryContextSwitchTo(oldcontext);
1811
1812 return pathnode;
1813 }
1814
1815 /*
1816 * If the input is a subquery whose output must be unique already, then we
1817 * don't need to do anything. The test for uniqueness has to consider
1818 * exactly which columns we are extracting; for example "SELECT DISTINCT
1819 * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1820 * this optimization unless semi_rhs_exprs consists only of simple Vars
1821 * referencing subquery outputs. (Possibly we could do something with
1822 * expressions in the subquery outputs, too, but for now keep it simple.)
1823 */
1824 if (rel->rtekind == RTE_SUBQUERY)
1825 {
1827
1829 {
1830 List *sub_tlist_colnos;
1831
1832 sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1833 rel->relid);
1834
1835 if (sub_tlist_colnos &&
1837 sub_tlist_colnos,
1838 sjinfo->semi_operators))
1839 {
1840 pathnode->umethod = UNIQUE_PATH_NOOP;
1841 pathnode->path.rows = rel->rows;
1842 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1843 pathnode->path.startup_cost = subpath->startup_cost;
1844 pathnode->path.total_cost = subpath->total_cost;
1845 pathnode->path.pathkeys = subpath->pathkeys;
1846
1847 rel->cheapest_unique_path = (Path *) pathnode;
1848
1849 MemoryContextSwitchTo(oldcontext);
1850
1851 return pathnode;
1852 }
1853 }
1854 }
1855
1856 /* Estimate number of output rows */
1857 pathnode->path.rows = estimate_num_groups(root,
1858 sjinfo->semi_rhs_exprs,
1859 rel->rows,
1860 NULL,
1861 NULL);
1862 numCols = list_length(sjinfo->semi_rhs_exprs);
1863
1864 if (sjinfo->semi_can_btree)
1865 {
1866 /*
1867 * Estimate cost for sort+unique implementation
1868 */
1869 cost_sort(&sort_path, root, NIL,
1870 subpath->disabled_nodes,
1871 subpath->total_cost,
1872 rel->rows,
1873 subpath->pathtarget->width,
1874 0.0,
1875 work_mem,
1876 -1.0);
1877
1878 /*
1879 * Charge one cpu_operator_cost per comparison per input tuple. We
1880 * assume all columns get compared at most of the tuples. (XXX
1881 * probably this is an overestimate.) This should agree with
1882 * create_upper_unique_path.
1883 */
1884 sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1885 }
1886
1887 if (sjinfo->semi_can_hash)
1888 {
1889 /*
1890 * Estimate the overhead per hashtable entry at 64 bytes (same as in
1891 * planner.c).
1892 */
1893 int hashentrysize = subpath->pathtarget->width + 64;
1894
1895 if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1896 {
1897 /*
1898 * We should not try to hash. Hack the SpecialJoinInfo to
1899 * remember this, in case we come through here again.
1900 */
1901 sjinfo->semi_can_hash = false;
1902 }
1903 else
1904 cost_agg(&agg_path, root,
1905 AGG_HASHED, NULL,
1906 numCols, pathnode->path.rows,
1907 NIL,
1908 subpath->disabled_nodes,
1909 subpath->startup_cost,
1910 subpath->total_cost,
1911 rel->rows,
1912 subpath->pathtarget->width);
1913 }
1914
1915 if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1916 {
1917 if (agg_path.disabled_nodes < sort_path.disabled_nodes ||
1918 (agg_path.disabled_nodes == sort_path.disabled_nodes &&
1919 agg_path.total_cost < sort_path.total_cost))
1920 pathnode->umethod = UNIQUE_PATH_HASH;
1921 else
1922 pathnode->umethod = UNIQUE_PATH_SORT;
1923 }
1924 else if (sjinfo->semi_can_btree)
1925 pathnode->umethod = UNIQUE_PATH_SORT;
1926 else if (sjinfo->semi_can_hash)
1927 pathnode->umethod = UNIQUE_PATH_HASH;
1928 else
1929 {
1930 /* we can get here only if we abandoned hashing above */
1931 MemoryContextSwitchTo(oldcontext);
1932 return NULL;
1933 }
1934
1935 if (pathnode->umethod == UNIQUE_PATH_HASH)
1936 {
1937 pathnode->path.disabled_nodes = agg_path.disabled_nodes;
1938 pathnode->path.startup_cost = agg_path.startup_cost;
1939 pathnode->path.total_cost = agg_path.total_cost;
1940 }
1941 else
1942 {
1943 pathnode->path.disabled_nodes = sort_path.disabled_nodes;
1944 pathnode->path.startup_cost = sort_path.startup_cost;
1945 pathnode->path.total_cost = sort_path.total_cost;
1946 }
1947
1948 rel->cheapest_unique_path = (Path *) pathnode;
1949
1950 MemoryContextSwitchTo(oldcontext);
1951
1952 return pathnode;
1953}
bool query_is_distinct_for(Query *query, List *colnos, List *opids)
Definition: analyzejoins.c:983
bool query_supports_distinctness(Query *query)
Definition: analyzejoins.c:946
bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, List *exprlist, List *oprlist)
Definition: indxpath.c:4149
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:707
#define copyObject(obj)
Definition: nodes.h:224
@ JOIN_SEMI
Definition: nodes.h:307
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
static List * translate_sub_tlist(List *tlist, int relid)
Definition: pathnode.c:2018
@ UNIQUE_PATH_SORT
Definition: pathnodes.h:2034
@ UNIQUE_PATH_NOOP
Definition: pathnodes.h:2032
@ UNIQUE_PATH_HASH
Definition: pathnodes.h:2033
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:570
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3430
Query * subquery
Definition: parsenodes.h:1104
List * semi_rhs_exprs
Definition: pathnodes.h:2920
Relids syn_righthand
Definition: pathnodes.h:2908
List * semi_operators
Definition: pathnodes.h:2919
Path * subpath
Definition: pathnodes.h:2040
List * uniq_exprs
Definition: pathnodes.h:2043
UniquePathMethod umethod
Definition: pathnodes.h:2041
List * in_operators
Definition: pathnodes.h:2042

References AGG_HASHED, Assert, bms_equal(), RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, copyObject, cost_agg(), cost_sort(), cpu_operator_cost, Path::disabled_nodes, estimate_num_groups(), get_hash_memory_limit(), GetMemoryChunkContext(), UniquePath::in_operators, JOIN_SEMI, SpecialJoinInfo::jointype, list_length(), makeNode, MemoryContextSwitchTo(), NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, planner_rt_fetch, query_is_distinct_for(), query_supports_distinctness(), relation_has_unique_index_for(), RelOptInfo::relid, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, Path::rows, RTE_RELATION, RTE_SUBQUERY, RelOptInfo::rtekind, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, Path::startup_cost, subpath(), UniquePath::subpath, RangeTblEntry::subquery, SpecialJoinInfo::syn_righthand, Path::total_cost, translate_sub_tlist(), UniquePath::umethod, UniquePath::uniq_exprs, UNIQUE_PATH_HASH, UNIQUE_PATH_NOOP, UNIQUE_PATH_SORT, and work_mem.

Referenced by consider_parallel_nestloop(), hash_inner_and_outer(), join_is_legal(), match_unsorted_outer(), populate_joinrel_with_paths(), and sort_inner_and_outer().

◆ create_upper_unique_path()

UpperUniquePath * create_upper_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 3187 of file pathnode.c.

3192{
3194
3195 pathnode->path.pathtype = T_Unique;
3196 pathnode->path.parent = rel;
3197 /* Unique doesn't project, so use source path's pathtarget */
3198 pathnode->path.pathtarget = subpath->pathtarget;
3199 /* For now, assume we are above any joins, so no parameterization */
3200 pathnode->path.param_info = NULL;
3201 pathnode->path.parallel_aware = false;
3202 pathnode->path.parallel_safe = rel->consider_parallel &&
3203 subpath->parallel_safe;
3204 pathnode->path.parallel_workers = subpath->parallel_workers;
3205 /* Unique doesn't change the input ordering */
3206 pathnode->path.pathkeys = subpath->pathkeys;
3207
3208 pathnode->subpath = subpath;
3209 pathnode->numkeys = numCols;
3210
3211 /*
3212 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3213 * all columns get compared at most of the tuples. (XXX probably this is
3214 * an overestimate.)
3215 */
3216 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3217 pathnode->path.startup_cost = subpath->startup_cost;
3218 pathnode->path.total_cost = subpath->total_cost +
3219 cpu_operator_cost * subpath->rows * numCols;
3220 pathnode->path.rows = numGroups;
3221
3222 return pathnode;
3223}

References RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, makeNode, UpperUniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UpperUniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UpperUniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path * create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2170 of file pathnode.c.

2172{
2173 Path *pathnode = makeNode(Path);
2174
2175 pathnode->pathtype = T_ValuesScan;
2176 pathnode->parent = rel;
2177 pathnode->pathtarget = rel->reltarget;
2178 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2179 required_outer);
2180 pathnode->parallel_aware = false;
2181 pathnode->parallel_safe = rel->consider_parallel;
2182 pathnode->parallel_workers = 0;
2183 pathnode->pathkeys = NIL; /* result is always unordered */
2184
2185 cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2186
2187 return pathnode;
2188}
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1657

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath * create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
List runCondition,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3577 of file pathnode.c.

3586{
3588
3589 /* qual can only be set for the topwindow */
3590 Assert(qual == NIL || topwindow);
3591
3592 pathnode->path.pathtype = T_WindowAgg;
3593 pathnode->path.parent = rel;
3594 pathnode->path.pathtarget = target;
3595 /* For now, assume we are above any joins, so no parameterization */
3596 pathnode->path.param_info = NULL;
3597 pathnode->path.parallel_aware = false;
3598 pathnode->path.parallel_safe = rel->consider_parallel &&
3599 subpath->parallel_safe;
3600 pathnode->path.parallel_workers = subpath->parallel_workers;
3601 /* WindowAgg preserves the input sort order */
3602 pathnode->path.pathkeys = subpath->pathkeys;
3603
3604 pathnode->subpath = subpath;
3605 pathnode->winclause = winclause;
3606 pathnode->qual = qual;
3607 pathnode->runCondition = runCondition;
3608 pathnode->topwindow = topwindow;
3609
3610 /*
3611 * For costing purposes, assume that there are no redundant partitioning
3612 * or ordering columns; it's not worth the trouble to deal with that
3613 * corner case here. So we just pass the unmodified list lengths to
3614 * cost_windowagg.
3615 */
3616 cost_windowagg(&pathnode->path, root,
3617 windowFuncs,
3618 winclause,
3619 subpath->disabled_nodes,
3620 subpath->startup_cost,
3621 subpath->total_cost,
3622 subpath->rows);
3623
3624 /* add tlist eval cost for each output row */
3625 pathnode->path.startup_cost += target->cost.startup;
3626 pathnode->path.total_cost += target->cost.startup +
3627 target->cost.per_tuple * pathnode->path.rows;
3628
3629 return pathnode;
3630}
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3099
List * runCondition
Definition: pathnodes.h:2334
Path * subpath
Definition: pathnodes.h:2331
WindowClause * winclause
Definition: pathnodes.h:2332

References Assert, RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, WindowAggPath::runCondition, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path * create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2274 of file pathnode.c.

2276{
2277 Path *pathnode = makeNode(Path);
2278
2279 pathnode->pathtype = T_WorkTableScan;
2280 pathnode->parent = rel;
2281 pathnode->pathtarget = rel->reltarget;
2282 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2283 required_outer);
2284 pathnode->parallel_aware = false;
2285 pathnode->parallel_safe = rel->consider_parallel;
2286 pathnode->parallel_workers = 0;
2287 pathnode->pathkeys = NIL; /* result is always unordered */
2288
2289 /* Cost is the same as for a regular CTE scan */
2290 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2291
2292 return pathnode;
2293}

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 163 of file relnode.c.

164{
165 int new_size;
166
167 Assert(add_size > 0);
168
169 new_size = root->simple_rel_array_size + add_size;
170
171 root->simple_rel_array =
172 repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
173
174 root->simple_rte_array =
175 repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
176
177 if (root->append_rel_array)
178 root->append_rel_array =
179 repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
180 else
181 root->append_rel_array =
182 palloc0_array(AppendRelInfo *, new_size);
183
184 root->simple_rel_array_size = new_size;
185}
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:488

References add_size(), Assert, palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo * fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1458 of file relnode.c.

1459{
1460 RelOptInfo *upperrel;
1461 ListCell *lc;
1462
1463 /*
1464 * For the moment, our indexing data structure is just a List for each
1465 * relation kind. If we ever get so many of one kind that this stops
1466 * working well, we can improve it. No code outside this function should
1467 * assume anything about how to find a particular upperrel.
1468 */
1469
1470 /* If we already made this upperrel for the query, return it */
1471 foreach(lc, root->upper_rels[kind])
1472 {
1473 upperrel = (RelOptInfo *) lfirst(lc);
1474
1475 if (bms_equal(upperrel->relids, relids))
1476 return upperrel;
1477 }
1478
1479 upperrel = makeNode(RelOptInfo);
1480 upperrel->reloptkind = RELOPT_UPPER_REL;
1481 upperrel->relids = bms_copy(relids);
1482
1483 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1484 upperrel->consider_startup = (root->tuple_fraction > 0);
1485 upperrel->consider_param_startup = false;
1486 upperrel->consider_parallel = false; /* might get changed later */
1487 upperrel->reltarget = create_empty_pathtarget();
1488 upperrel->pathlist = NIL;
1489 upperrel->cheapest_startup_path = NULL;
1490 upperrel->cheapest_total_path = NULL;
1491 upperrel->cheapest_unique_path = NULL;
1493
1494 root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1495
1496 return upperrel;
1497}
@ RELOPT_UPPER_REL
Definition: pathnodes.h:831

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo * find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 454 of file relnode.c.

455{
456 /* use an unsigned comparison to prevent negative array element access */
457 if ((uint32) relid < (uint32) root->simple_rel_array_size)
458 {
459 RelOptInfo *rel;
460 RangeTblEntry *rte;
461
462 rel = root->simple_rel_array[relid];
463 if (rel)
464 return rel;
465
466 /*
467 * We could just return NULL here, but for debugging purposes it seems
468 * best to actually verify that the relid is an outer join and not
469 * something weird.
470 */
471 rte = root->simple_rte_array[relid];
472 if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
473 return NULL;
474 }
475
476 elog(ERROR, "no relation entry for relid %d", relid);
477
478 return NULL; /* keep compiler quiet */
479}
JoinType jointype
Definition: parsenodes.h:1151

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo * find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 436 of file relnode.c.

437{
438 /* use an unsigned comparison to prevent negative array element access */
439 if ((uint32) relid < (uint32) root->simple_rel_array_size)
440 return root->simple_rel_array[relid];
441 return NULL;
442}

References root.

Referenced by examine_simple_variable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1509 of file relnode.c.

1510{
1511 Relids result = NULL;
1512
1514 Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1515
1516 do
1517 {
1518 AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1519 Index prelid = appinfo->parent_relid;
1520
1521 result = bms_add_member(result, prelid);
1522
1523 /* traverse up to the parent rel, loop if it's also a child rel */
1524 rel = find_base_rel(root, prelid);
1525 } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1526
1528
1529 return result;
1530}
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
unsigned int Index
Definition: c.h:568
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Index parent_relid
Definition: pathnodes.h:2980

References Assert, bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo * find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 527 of file relnode.c.

528{
529 /*
530 * Switch to using hash lookup when list grows "too long". The threshold
531 * is arbitrary and is known only here.
532 */
533 if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
535
536 /*
537 * Use either hashtable lookup or linear search, as appropriate.
538 *
539 * Note: the seemingly redundant hashkey variable is used to avoid taking
540 * the address of relids; unless the compiler is exceedingly smart, doing
541 * so would force relids out of a register and thus probably slow down the
542 * list-search case.
543 */
544 if (root->join_rel_hash)
545 {
546 Relids hashkey = relids;
547 JoinHashEntry *hentry;
548
549 hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
550 &hashkey,
551 HASH_FIND,
552 NULL);
553 if (hentry)
554 return hentry->join_rel;
555 }
556 else
557 {
558 ListCell *l;
559
560 foreach(l, root->join_rel_list)
561 {
562 RelOptInfo *rel = (RelOptInfo *) lfirst(l);
563
564 if (bms_equal(rel->relids, relids))
565 return rel;
566 }
567 }
568
569 return NULL;
570}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:486
RelOptInfo * join_rel
Definition: relnode.c:41

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo * find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1889 of file relnode.c.

1890{
1891 ListCell *lc;
1892
1893 foreach(lc, rel->ppilist)
1894 {
1895 ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
1896
1897 if (bms_equal(ppi->ppi_req_outer, required_outer))
1898 return ppi;
1899 }
1900
1901 return NULL;
1902}
Relids ppi_req_outer
Definition: pathnodes.h:1590

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo * get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1856 of file relnode.c.

1857{
1858 ParamPathInfo *ppi;
1859
1860 /* If rel has LATERAL refs, every path for it should account for them */
1861 Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1862
1863 /* Unparameterized paths have no ParamPathInfo */
1864 if (bms_is_empty(required_outer))
1865 return NULL;
1866
1867 Assert(!bms_overlap(appendrel->relids, required_outer));
1868
1869 /* If we already have a PPI for this parameterization, just return it */
1870 if ((ppi = find_param_path_info(appendrel, required_outer)))
1871 return ppi;
1872
1873 /* Else build the ParamPathInfo */
1874 ppi = makeNode(ParamPathInfo);
1875 ppi->ppi_req_outer = required_outer;
1876 ppi->ppi_rows = 0;
1877 ppi->ppi_clauses = NIL;
1878 ppi->ppi_serials = NULL;
1879 appendrel->ppilist = lappend(appendrel->ppilist, ppi);
1880
1881 return ppi;
1882}
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:1889
Cardinality ppi_rows
Definition: pathnodes.h:1591
List * ppi_clauses
Definition: pathnodes.h:1592
Bitmapset * ppi_serials
Definition: pathnodes.h:1593

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo * get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1545 of file relnode.c.

1547{
1548 ParamPathInfo *ppi;
1549 Relids joinrelids;
1550 List *pclauses;
1551 List *eqclauses;
1552 Bitmapset *pserials;
1553 double rows;
1554 ListCell *lc;
1555
1556 /* If rel has LATERAL refs, every path for it should account for them */
1557 Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1558
1559 /* Unparameterized paths have no ParamPathInfo */
1560 if (bms_is_empty(required_outer))
1561 return NULL;
1562
1563 Assert(!bms_overlap(baserel->relids, required_outer));
1564
1565 /* If we already have a PPI for this parameterization, just return it */
1566 if ((ppi = find_param_path_info(baserel, required_outer)))
1567 return ppi;
1568
1569 /*
1570 * Identify all joinclauses that are movable to this base rel given this
1571 * parameterization.
1572 */
1573 joinrelids = bms_union(baserel->relids, required_outer);
1574 pclauses = NIL;
1575 foreach(lc, baserel->joininfo)
1576 {
1577 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1578
1580 baserel->relids,
1581 joinrelids))
1582 pclauses = lappend(pclauses, rinfo);
1583 }
1584
1585 /*
1586 * Add in joinclauses generated by EquivalenceClasses, too. (These
1587 * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1588 * builds, let's verify that.)
1589 */
1591 joinrelids,
1592 required_outer,
1593 baserel,
1594 NULL);
1595#ifdef USE_ASSERT_CHECKING
1596 foreach(lc, eqclauses)
1597 {
1598 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1599
1601 baserel->relids,
1602 joinrelids));
1603 }
1604#endif
1605 pclauses = list_concat(pclauses, eqclauses);
1606
1607 /* Compute set of serial numbers of the enforced clauses */
1608 pserials = NULL;
1609 foreach(lc, pclauses)
1610 {
1611 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1612
1613 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1614 }
1615
1616 /* Estimate the number of rows returned by the parameterized scan */
1617 rows = get_parameterized_baserel_size(root, baserel, pclauses);
1618
1619 /* And now we can build the ParamPathInfo */
1620 ppi = makeNode(ParamPathInfo);
1621 ppi->ppi_req_outer = required_outer;
1622 ppi->ppi_rows = rows;
1623 ppi->ppi_clauses = pclauses;
1624 ppi->ppi_serials = pserials;
1625 baserel->ppilist = lappend(baserel->ppilist, ppi);
1626
1627 return ppi;
1628}
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5355
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1385
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:661

References Assert, bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo * get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1659 of file relnode.c.

1665{
1666 ParamPathInfo *ppi;
1667 Relids join_and_req;
1668 Relids outer_and_req;
1669 Relids inner_and_req;
1670 List *pclauses;
1671 List *eclauses;
1672 List *dropped_ecs;
1673 double rows;
1674 ListCell *lc;
1675
1676 /* If rel has LATERAL refs, every path for it should account for them */
1677 Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1678
1679 /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1680 if (bms_is_empty(required_outer))
1681 return NULL;
1682
1683 Assert(!bms_overlap(joinrel->relids, required_outer));
1684
1685 /*
1686 * Identify all joinclauses that are movable to this join rel given this
1687 * parameterization. These are the clauses that are movable into this
1688 * join, but not movable into either input path. Treat an unparameterized
1689 * input path as not accepting parameterized clauses (because it won't,
1690 * per the shortcut exit above), even though the joinclause movement rules
1691 * might allow the same clauses to be moved into a parameterized path for
1692 * that rel.
1693 */
1694 join_and_req = bms_union(joinrel->relids, required_outer);
1695 if (outer_path->param_info)
1696 outer_and_req = bms_union(outer_path->parent->relids,
1697 PATH_REQ_OUTER(outer_path));
1698 else
1699 outer_and_req = NULL; /* outer path does not accept parameters */
1700 if (inner_path->param_info)
1701 inner_and_req = bms_union(inner_path->parent->relids,
1702 PATH_REQ_OUTER(inner_path));
1703 else
1704 inner_and_req = NULL; /* inner path does not accept parameters */
1705
1706 pclauses = NIL;
1707 foreach(lc, joinrel->joininfo)
1708 {
1709 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1710
1712 joinrel->relids,
1713 join_and_req) &&
1715 outer_path->parent->relids,
1716 outer_and_req) &&
1718 inner_path->parent->relids,
1719 inner_and_req))
1720 pclauses = lappend(pclauses, rinfo);
1721 }
1722
1723 /* Consider joinclauses generated by EquivalenceClasses, too */
1725 join_and_req,
1726 required_outer,
1727 joinrel,
1728 NULL);
1729 /* We only want ones that aren't movable to lower levels */
1730 dropped_ecs = NIL;
1731 foreach(lc, eclauses)
1732 {
1733 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1734
1736 joinrel->relids,
1737 join_and_req));
1739 outer_path->parent->relids,
1740 outer_and_req))
1741 continue; /* drop if movable into LHS */
1743 inner_path->parent->relids,
1744 inner_and_req))
1745 {
1746 /* drop if movable into RHS, but remember EC for use below */
1747 Assert(rinfo->left_ec == rinfo->right_ec);
1748 dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1749 continue;
1750 }
1751 pclauses = lappend(pclauses, rinfo);
1752 }
1753
1754 /*
1755 * EquivalenceClasses are harder to deal with than we could wish, because
1756 * of the fact that a given EC can generate different clauses depending on
1757 * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1758 * LHS and RHS of the current join and Z is in required_outer, and further
1759 * suppose that the inner_path is parameterized by both X and Z. The code
1760 * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1761 * and in the latter case will have discarded it as being movable into the
1762 * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1763 * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1764 * not have produced both, and we can't readily tell from here which one
1765 * it did pick. If we add no clause to this join, we'll end up with
1766 * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1767 * constrained to be equal to the other members of the EC. (When we come
1768 * to join Z to this X/Y path, we will certainly drop whichever EC clause
1769 * is generated at that join, so this omission won't get fixed later.)
1770 *
1771 * To handle this, for each EC we discarded such a clause from, try to
1772 * generate a clause connecting the required_outer rels to the join's LHS
1773 * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1774 * the clause can't be moved to the LHS, add it to the current join's
1775 * restriction clauses. (If an EC cannot generate such a clause then it
1776 * has nothing that needs to be enforced here, while if the clause can be
1777 * moved into the LHS then it should have been enforced within that path.)
1778 *
1779 * Note that we don't need similar processing for ECs whose clause was
1780 * considered to be movable into the LHS, because the LHS can't refer to
1781 * the RHS so there is no comparable ambiguity about what it might
1782 * actually be enforcing internally.
1783 */
1784 if (dropped_ecs)
1785 {
1786 Relids real_outer_and_req;
1787
1788 real_outer_and_req = bms_union(outer_path->parent->relids,
1789 required_outer);
1790 eclauses =
1792 dropped_ecs,
1793 real_outer_and_req,
1794 required_outer,
1795 outer_path->parent);
1796 foreach(lc, eclauses)
1797 {
1798 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1799
1801 outer_path->parent->relids,
1802 real_outer_and_req));
1803 if (!join_clause_is_movable_into(rinfo,
1804 outer_path->parent->relids,
1805 outer_and_req))
1806 pclauses = lappend(pclauses, rinfo);
1807 }
1808 }
1809
1810 /*
1811 * Now, attach the identified moved-down clauses to the caller's
1812 * restrict_clauses list. By using list_concat in this order, we leave
1813 * the original list structure of restrict_clauses undamaged.
1814 */
1815 *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1816
1817 /* If we already have a PPI for this parameterization, just return it */
1818 if ((ppi = find_param_path_info(joinrel, required_outer)))
1819 return ppi;
1820
1821 /* Estimate the number of rows returned by the parameterized join */
1822 rows = get_parameterized_joinrel_size(root, joinrel,
1823 outer_path,
1824 inner_path,
1825 sjinfo,
1826 *restrict_clauses);
1827
1828 /*
1829 * And now we can build the ParamPathInfo. No point in saving the
1830 * input-pair-dependent clause list, though.
1831 *
1832 * Note: in GEQO mode, we'll be called in a temporary memory context, but
1833 * the joinrel structure is there too, so no problem.
1834 */
1835 ppi = makeNode(ParamPathInfo);
1836 ppi->ppi_req_outer = required_outer;
1837 ppi->ppi_rows = rows;
1838 ppi->ppi_clauses = NIL;
1839 ppi->ppi_serials = NULL;
1840 joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1841
1842 return ppi;
1843}
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5436
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1485

References Assert, bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset * get_param_path_clause_serials ( Path path)

Definition at line 1910 of file relnode.c.

1911{
1912 if (path->param_info == NULL)
1913 return NULL; /* not parameterized */
1914
1915 /*
1916 * We don't currently support parameterized MergeAppend paths, as
1917 * explained in the comments for generate_orderedappend_paths.
1918 */
1919 Assert(!IsA(path, MergeAppendPath));
1920
1921 if (IsA(path, NestPath) ||
1922 IsA(path, MergePath) ||
1923 IsA(path, HashPath))
1924 {
1925 /*
1926 * For a join path, combine clauses enforced within either input path
1927 * with those enforced as joinrestrictinfo in this path. Note that
1928 * joinrestrictinfo may include some non-pushed-down clauses, but for
1929 * current purposes it's okay if we include those in the result. (To
1930 * be more careful, we could check for clause_relids overlapping the
1931 * path parameterization, but it's not worth the cycles for now.)
1932 */
1933 JoinPath *jpath = (JoinPath *) path;
1934 Bitmapset *pserials;
1935 ListCell *lc;
1936
1937 pserials = NULL;
1938 pserials = bms_add_members(pserials,
1940 pserials = bms_add_members(pserials,
1942 foreach(lc, jpath->joinrestrictinfo)
1943 {
1944 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1945
1946 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1947 }
1948 return pserials;
1949 }
1950 else if (IsA(path, AppendPath))
1951 {
1952 /*
1953 * For an appendrel, take the intersection of the sets of clauses
1954 * enforced in each input path.
1955 */
1956 AppendPath *apath = (AppendPath *) path;
1957 Bitmapset *pserials;
1958 ListCell *lc;
1959
1960 pserials = NULL;
1961 foreach(lc, apath->subpaths)
1962 {
1963 Path *subpath = (Path *) lfirst(lc);
1964 Bitmapset *subserials;
1965
1967 if (lc == list_head(apath->subpaths))
1968 pserials = bms_copy(subserials);
1969 else
1970 pserials = bms_int_members(pserials, subserials);
1971 }
1972 return pserials;
1973 }
1974 else
1975 {
1976 /*
1977 * Otherwise, it's a baserel path and we can use the
1978 * previously-computed set of serial numbers.
1979 */
1980 return path->param_info->ppi_serials;
1981 }
1982}
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1109
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References Assert, bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), get_param_path_clause_serials(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), and AppendPath::subpaths.

Referenced by create_nestloop_path(), and get_param_path_clause_serials().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1022 of file relnode.c.

1026{
1027 Relids result;
1028
1029 /*
1030 * Basically we just need the union of the inputs' lateral_relids, less
1031 * whatever is already in the join.
1032 *
1033 * It's not immediately obvious that this is a valid way to compute the
1034 * result, because it might seem that we're ignoring possible lateral refs
1035 * of PlaceHolderVars that are due to be computed at the join but not in
1036 * either input. However, because create_lateral_join_info() already
1037 * charged all such PHV refs to each member baserel of the join, they'll
1038 * be accounted for already in the inputs' lateral_relids. Likewise, we
1039 * do not need to worry about doing transitive closure here, because that
1040 * was already accounted for in the original baserel lateral_relids.
1041 */
1042 result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1043 result = bms_del_members(result, joinrelids);
1044 return result;
1045}

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4565 of file pathnode.c.

4566{
4567#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4568do { \
4569 if (!path_is_reparameterizable_by_child(path, child_rel)) \
4570 return false; \
4571} while(0)
4572
4573#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4574do { \
4575 if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4576 return false; \
4577} while(0)
4578
4579 /*
4580 * If the path is not parameterized by the parent of the given relation,
4581 * it doesn't need reparameterization.
4582 */
4583 if (!path->param_info ||
4584 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4585 return true;
4586
4587 /*
4588 * Check that the path type is one that reparameterize_path_by_child() can
4589 * handle, and recursively check subpaths.
4590 */
4591 switch (nodeTag(path))
4592 {
4593 case T_Path:
4594 case T_IndexPath:
4595 break;
4596
4597 case T_BitmapHeapPath:
4598 {
4599 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4600
4602 }
4603 break;
4604
4605 case T_BitmapAndPath:
4606 {
4607 BitmapAndPath *bapath = (BitmapAndPath *) path;
4608
4610 }
4611 break;
4612
4613 case T_BitmapOrPath:
4614 {
4615 BitmapOrPath *bopath = (BitmapOrPath *) path;
4616
4618 }
4619 break;
4620
4621 case T_ForeignPath:
4622 {
4623 ForeignPath *fpath = (ForeignPath *) path;
4624
4625 if (fpath->fdw_outerpath)
4627 }
4628 break;
4629
4630 case T_CustomPath:
4631 {
4632 CustomPath *cpath = (CustomPath *) path;
4633
4635 }
4636 break;
4637
4638 case T_NestPath:
4639 case T_MergePath:
4640 case T_HashPath:
4641 {
4642 JoinPath *jpath = (JoinPath *) path;
4643
4646 }
4647 break;
4648
4649 case T_AppendPath:
4650 {
4651 AppendPath *apath = (AppendPath *) path;
4652
4654 }
4655 break;
4656
4657 case T_MaterialPath:
4658 {
4659 MaterialPath *mpath = (MaterialPath *) path;
4660
4662 }
4663 break;
4664
4665 case T_MemoizePath:
4666 {
4667 MemoizePath *mpath = (MemoizePath *) path;
4668
4670 }
4671 break;
4672
4673 case T_GatherPath:
4674 {
4675 GatherPath *gpath = (GatherPath *) path;
4676
4678 }
4679 break;
4680
4681 default:
4682 /* We don't know how to reparameterize this path. */
4683 return false;
4684 }
4685
4686 return true;
4687}
#define nodeTag(nodeptr)
Definition: nodes.h:133
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:1920

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path * reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 4103 of file pathnode.c.

4106{
4107 RelOptInfo *rel = path->parent;
4108
4109 /* Can only increase, not decrease, path's parameterization */
4110 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
4111 return NULL;
4112 switch (path->pathtype)
4113 {
4114 case T_SeqScan:
4115 return create_seqscan_path(root, rel, required_outer, 0);
4116 case T_SampleScan:
4117 return (Path *) create_samplescan_path(root, rel, required_outer);
4118 case T_IndexScan:
4119 case T_IndexOnlyScan:
4120 {
4121 IndexPath *ipath = (IndexPath *) path;
4122 IndexPath *newpath = makeNode(IndexPath);
4123
4124 /*
4125 * We can't use create_index_path directly, and would not want
4126 * to because it would re-compute the indexqual conditions
4127 * which is wasted effort. Instead we hack things a bit:
4128 * flat-copy the path node, revise its param_info, and redo
4129 * the cost estimate.
4130 */
4131 memcpy(newpath, ipath, sizeof(IndexPath));
4132 newpath->path.param_info =
4133 get_baserel_parampathinfo(root, rel, required_outer);
4134 cost_index(newpath, root, loop_count, false);
4135 return (Path *) newpath;
4136 }
4137 case T_BitmapHeapScan:
4138 {
4139 BitmapHeapPath *bpath = (BitmapHeapPath *) path;
4140
4142 rel,
4143 bpath->bitmapqual,
4144 required_outer,
4145 loop_count, 0);
4146 }
4147 case T_SubqueryScan:
4148 {
4149 SubqueryScanPath *spath = (SubqueryScanPath *) path;
4150 Path *subpath = spath->subpath;
4151 bool trivial_pathtarget;
4152
4153 /*
4154 * If existing node has zero extra cost, we must have decided
4155 * its target is trivial. (The converse is not true, because
4156 * it might have a trivial target but quals to enforce; but in
4157 * that case the new node will too, so it doesn't matter
4158 * whether we get the right answer here.)
4159 */
4160 trivial_pathtarget =
4161 (subpath->total_cost == spath->path.total_cost);
4162
4164 rel,
4165 subpath,
4166 trivial_pathtarget,
4167 spath->path.pathkeys,
4168 required_outer);
4169 }
4170 case T_Result:
4171 /* Supported only for RTE_RESULT scan paths */
4172 if (IsA(path, Path))
4173 return create_resultscan_path(root, rel, required_outer);
4174 break;
4175 case T_Append:
4176 {
4177 AppendPath *apath = (AppendPath *) path;
4178 List *childpaths = NIL;
4179 List *partialpaths = NIL;
4180 int i;
4181 ListCell *lc;
4182
4183 /* Reparameterize the children */
4184 i = 0;
4185 foreach(lc, apath->subpaths)
4186 {
4187 Path *spath = (Path *) lfirst(lc);
4188
4189 spath = reparameterize_path(root, spath,
4190 required_outer,
4191 loop_count);
4192 if (spath == NULL)
4193 return NULL;
4194 /* We have to re-split the regular and partial paths */
4195 if (i < apath->first_partial_path)
4196 childpaths = lappend(childpaths, spath);
4197 else
4198 partialpaths = lappend(partialpaths, spath);
4199 i++;
4200 }
4201 return (Path *)
4202 create_append_path(root, rel, childpaths, partialpaths,
4203 apath->path.pathkeys, required_outer,
4204 apath->path.parallel_workers,
4205 apath->path.parallel_aware,
4206 -1);
4207 }
4208 case T_Material:
4209 {
4210 MaterialPath *mpath = (MaterialPath *) path;
4211 Path *spath = mpath->subpath;
4212
4213 spath = reparameterize_path(root, spath,
4214 required_outer,
4215 loop_count);
4216 if (spath == NULL)
4217 return NULL;
4218 return (Path *) create_material_path(rel, spath);
4219 }
4220 case T_Memoize:
4221 {
4222 MemoizePath *mpath = (MemoizePath *) path;
4223 Path *spath = mpath->subpath;
4224
4225 spath = reparameterize_path(root, spath,
4226 required_outer,
4227 loop_count);
4228 if (spath == NULL)
4229 return NULL;
4230 return (Path *) create_memoize_path(root, rel,
4231 spath,
4232 mpath->param_exprs,
4233 mpath->hash_operators,
4234 mpath->singlerow,
4235 mpath->binary_mode,
4236 mpath->calls);
4237 }
4238 default:
4239 break;
4240 }
4241 return NULL;
4242}
int i
Definition: isn.c:72
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, double calls)
Definition: pathnode.c:1667
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:2088
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1634
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2248
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:4103

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), MemoizePath::calls, cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, reparameterize_path(), root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path(), and reparameterize_path().

◆ reparameterize_path_by_child()

Path * reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4269 of file pathnode.c.

4271{
4272 Path *new_path;
4273 ParamPathInfo *new_ppi;
4274 ParamPathInfo *old_ppi;
4275 Relids required_outer;
4276
4277#define ADJUST_CHILD_ATTRS(node) \
4278 ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4279 (Node *) (node), \
4280 child_rel, \
4281 child_rel->top_parent))
4282
4283#define REPARAMETERIZE_CHILD_PATH(path) \
4284do { \
4285 (path) = reparameterize_path_by_child(root, (path), child_rel); \
4286 if ((path) == NULL) \
4287 return NULL; \
4288} while(0)
4289
4290#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4291do { \
4292 if ((pathlist) != NIL) \
4293 { \
4294 (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4295 child_rel); \
4296 if ((pathlist) == NIL) \
4297 return NULL; \
4298 } \
4299} while(0)
4300
4301 /*
4302 * If the path is not parameterized by the parent of the given relation,
4303 * it doesn't need reparameterization.
4304 */
4305 if (!path->param_info ||
4306 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4307 return path;
4308
4309 /*
4310 * If possible, reparameterize the given path.
4311 *
4312 * This function is currently only applied to the inner side of a nestloop
4313 * join that is being partitioned by the partitionwise-join code. Hence,
4314 * we need only support path types that plausibly arise in that context.
4315 * (In particular, supporting sorted path types would be a waste of code
4316 * and cycles: even if we translated them here, they'd just lose in
4317 * subsequent cost comparisons.) If we do see an unsupported path type,
4318 * that just means we won't be able to generate a partitionwise-join plan
4319 * using that path type.
4320 */
4321 switch (nodeTag(path))
4322 {
4323 case T_Path:
4324 new_path = path;
4325 ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4326 if (path->pathtype == T_SampleScan)
4327 {
4328 Index scan_relid = path->parent->relid;
4329 RangeTblEntry *rte;
4330
4331 /* it should be a base rel with a tablesample clause... */
4332 Assert(scan_relid > 0);
4333 rte = planner_rt_fetch(scan_relid, root);
4334 Assert(rte->rtekind == RTE_RELATION);
4335 Assert(rte->tablesample != NULL);
4336
4338 }
4339 break;
4340
4341 case T_IndexPath:
4342 {
4343 IndexPath *ipath = (IndexPath *) path;
4344
4347 new_path = (Path *) ipath;
4348 }
4349 break;
4350
4351 case T_BitmapHeapPath:
4352 {
4353 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4354
4355 ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4357 new_path = (Path *) bhpath;
4358 }
4359 break;
4360
4361 case T_BitmapAndPath:
4362 {
4363 BitmapAndPath *bapath = (BitmapAndPath *) path;
4364
4366 new_path = (Path *) bapath;
4367 }
4368 break;
4369
4370 case T_BitmapOrPath:
4371 {
4372 BitmapOrPath *bopath = (BitmapOrPath *) path;
4373
4375 new_path = (Path *) bopath;
4376 }
4377 break;
4378
4379 case T_ForeignPath:
4380 {
4381 ForeignPath *fpath = (ForeignPath *) path;
4383
4384 ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4385 if (fpath->fdw_outerpath)
4387 if (fpath->fdw_restrictinfo)
4389
4390 /* Hand over to FDW if needed. */
4391 rfpc_func =
4392 path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4393 if (rfpc_func)
4394 fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4395 child_rel);
4396 new_path = (Path *) fpath;
4397 }
4398 break;
4399
4400 case T_CustomPath:
4401 {
4402 CustomPath *cpath = (CustomPath *) path;
4403
4404 ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4406 if (cpath->custom_restrictinfo)
4408 if (cpath->methods &&
4410 cpath->custom_private =
4412 cpath->custom_private,
4413 child_rel);
4414 new_path = (Path *) cpath;
4415 }
4416 break;
4417
4418 case T_NestPath:
4419 {
4420 NestPath *npath = (NestPath *) path;
4421 JoinPath *jpath = (JoinPath *) npath;
4422
4426 new_path = (Path *) npath;
4427 }
4428 break;
4429
4430 case T_MergePath:
4431 {
4432 MergePath *mpath = (MergePath *) path;
4433 JoinPath *jpath = (JoinPath *) mpath;
4434
4439 new_path = (Path *) mpath;
4440 }
4441 break;
4442
4443 case T_HashPath:
4444 {
4445 HashPath *hpath = (HashPath *) path;
4446 JoinPath *jpath = (JoinPath *) hpath;
4447
4452 new_path = (Path *) hpath;
4453 }
4454 break;
4455
4456 case T_AppendPath:
4457 {
4458 AppendPath *apath = (AppendPath *) path;
4459
4461 new_path = (Path *) apath;
4462 }
4463 break;
4464
4465 case T_MaterialPath:
4466 {
4467 MaterialPath *mpath = (MaterialPath *) path;
4468
4470 new_path = (Path *) mpath;
4471 }
4472 break;
4473
4474 case T_MemoizePath:
4475 {
4476 MemoizePath *mpath = (MemoizePath *) path;
4477
4480 new_path = (Path *) mpath;
4481 }
4482 break;
4483
4484 case T_GatherPath:
4485 {
4486 GatherPath *gpath = (GatherPath *) path;
4487
4489 new_path = (Path *) gpath;
4490 }
4491 break;
4492
4493 default:
4494 /* We don't know how to reparameterize this path. */
4495 return NULL;
4496 }
4497
4498 /*
4499 * Adjust the parameterization information, which refers to the topmost
4500 * parent. The topmost parent can be multiple levels away from the given
4501 * child, hence use multi-level expression adjustment routines.
4502 */
4503 old_ppi = new_path->param_info;
4504 required_outer =
4506 child_rel,
4507 child_rel->top_parent);
4508
4509 /* If we already have a PPI for this parameterization, just return it */
4510 new_ppi = find_param_path_info(new_path->parent, required_outer);
4511
4512 /*
4513 * If not, build a new one and link it to the list of PPIs. For the same
4514 * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4515 * context the given RelOptInfo is in.
4516 */
4517 if (new_ppi == NULL)
4518 {
4519 MemoryContext oldcontext;
4520 RelOptInfo *rel = path->parent;
4521
4523
4524 new_ppi = makeNode(ParamPathInfo);
4525 new_ppi->ppi_req_outer = bms_copy(required_outer);
4526 new_ppi->ppi_rows = old_ppi->ppi_rows;
4527 new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4529 new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4530 rel->ppilist = lappend(rel->ppilist, new_ppi);
4531
4532 MemoryContextSwitchTo(oldcontext);
4533 }
4534 bms_free(required_outer);
4535
4536 new_path->param_info = new_ppi;
4537
4538 /*
4539 * Adjust the path target if the parent of the outer relation is
4540 * referenced in the targetlist. This can happen when only the parent of
4541 * outer relation is laterally referenced in this relation.
4542 */
4543 if (bms_overlap(path->parent->lateral_relids,
4544 child_rel->top_parent_relids))
4545 {
4546 new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4547 ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4548 }
4549
4550 return new_path;
4551}
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:591
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:1923
List * custom_private
Definition: pathnodes.h:1922
List * custom_restrictinfo
Definition: pathnodes.h:1921
List * indrestrictinfo
Definition: pathnodes.h:1184
struct TableSampleClause * tablesample
Definition: parsenodes.h:1098
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert, BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 269 of file pathnode.c.

270{
271 Path *cheapest_startup_path;
272 Path *cheapest_total_path;
273 Path *best_param_path;
274 List *parameterized_paths;
275 ListCell *p;
276
277 Assert(IsA(parent_rel, RelOptInfo));
278
279 if (parent_rel->pathlist == NIL)
280 elog(ERROR, "could not devise a query plan for the given query");
281
282 cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
283 parameterized_paths = NIL;
284
285 foreach(p, parent_rel->pathlist)
286 {
287 Path *path = (Path *) lfirst(p);
288 int cmp;
289
290 if (path->param_info)
291 {
292 /* Parameterized path, so add it to parameterized_paths */
293 parameterized_paths = lappend(parameterized_paths, path);
294
295 /*
296 * If we have an unparameterized cheapest-total, we no longer care
297 * about finding the best parameterized path, so move on.
298 */
299 if (cheapest_total_path)
300 continue;
301
302 /*
303 * Otherwise, track the best parameterized path, which is the one
304 * with least total cost among those of the minimum
305 * parameterization.
306 */
307 if (best_param_path == NULL)
308 best_param_path = path;
309 else
310 {
312 PATH_REQ_OUTER(best_param_path)))
313 {
314 case BMS_EQUAL:
315 /* keep the cheaper one */
316 if (compare_path_costs(path, best_param_path,
317 TOTAL_COST) < 0)
318 best_param_path = path;
319 break;
320 case BMS_SUBSET1:
321 /* new path is less-parameterized */
322 best_param_path = path;
323 break;
324 case BMS_SUBSET2:
325 /* old path is less-parameterized, keep it */
326 break;
327 case BMS_DIFFERENT:
328
329 /*
330 * This means that neither path has the least possible
331 * parameterization for the rel. We'll sit on the old
332 * path until something better comes along.
333 */
334 break;
335 }
336 }
337 }
338 else
339 {
340 /* Unparameterized path, so consider it for cheapest slots */
341 if (cheapest_total_path == NULL)
342 {
343 cheapest_startup_path = cheapest_total_path = path;
344 continue;
345 }
346
347 /*
348 * If we find two paths of identical costs, try to keep the
349 * better-sorted one. The paths might have unrelated sort
350 * orderings, in which case we can only guess which might be
351 * better to keep, but if one is superior then we definitely
352 * should keep that one.
353 */
354 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
355 if (cmp > 0 ||
356 (cmp == 0 &&
357 compare_pathkeys(cheapest_startup_path->pathkeys,
358 path->pathkeys) == PATHKEYS_BETTER2))
359 cheapest_startup_path = path;
360
361 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
362 if (cmp > 0 ||
363 (cmp == 0 &&
364 compare_pathkeys(cheapest_total_path->pathkeys,
365 path->pathkeys) == PATHKEYS_BETTER2))
366 cheapest_total_path = path;
367 }
368 }
369
370 /* Add cheapest unparameterized path, if any, to parameterized_paths */
371 if (cheapest_total_path)
372 parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
373
374 /*
375 * If there is no unparameterized path, use the best parameterized path as
376 * cheapest_total_path (but not as cheapest_startup_path).
377 */
378 if (cheapest_total_path == NULL)
379 cheapest_total_path = best_param_path;
380 Assert(cheapest_total_path != NULL);
381
382 parent_rel->cheapest_startup_path = cheapest_startup_path;
383 parent_rel->cheapest_total_path = cheapest_total_path;
384 parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
385 parent_rel->cheapest_parameterized_paths = parameterized_paths;
386}
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert, BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::cheapest_unique_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partitionwise_grouping_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 94 of file relnode.c.

95{
96 int size;
97 Index rti;
98 ListCell *lc;
99
100 /* Arrays are accessed using RT indexes (1..N) */
101 size = list_length(root->parse->rtable) + 1;
102 root->simple_rel_array_size = size;
103
104 /*
105 * simple_rel_array is initialized to all NULLs, since no RelOptInfos
106 * exist yet. It'll be filled by later calls to build_simple_rel().
107 */
108 root->simple_rel_array = (RelOptInfo **)
109 palloc0(size * sizeof(RelOptInfo *));
110
111 /* simple_rte_array is an array equivalent of the rtable list */
112 root->simple_rte_array = (RangeTblEntry **)
113 palloc0(size * sizeof(RangeTblEntry *));
114 rti = 1;
115 foreach(lc, root->parse->rtable)
116 {
117 RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
118
119 root->simple_rte_array[rti++] = rte;
120 }
121
122 /* append_rel_array is not needed if there are no AppendRelInfos */
123 if (root->append_rel_list == NIL)
124 {
125 root->append_rel_array = NULL;
126 return;
127 }
128
129 root->append_rel_array = (AppendRelInfo **)
130 palloc0(size * sizeof(AppendRelInfo *));
131
132 /*
133 * append_rel_array is filled with any already-existing AppendRelInfos,
134 * which currently could only come from UNION ALL flattening. We might
135 * add more later during inheritance expansion, but it's the
136 * responsibility of the expansion code to update the array properly.
137 */
138 foreach(lc, root->append_rel_list)
139 {
141 int child_relid = appinfo->child_relid;
142
143 /* Sanity check */
144 Assert(child_relid < size);
145
146 if (root->append_rel_array[child_relid])
147 elog(ERROR, "child relation already exists");
148
149 root->append_rel_array[child_relid] = appinfo;
150 }
151}
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static pg_noinline void Size size
Definition: slab.c:607
Index child_relid
Definition: pathnodes.h:2981

References Assert, AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), root, and size.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().