PostgreSQL Source Code git master
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer, int parallel_workers)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, Cardinality est_calls)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys, int outer_presorted_keys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, SetOpCmd cmd, SetOpStrategy strategy, List *groupList, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfobuild_simple_grouped_rel (PlannerInfo *root, RelOptInfo *rel)
 
RelOptInfobuild_grouped_rel (PlannerInfo *root, RelOptInfo *rel)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo, int nappinfos, AppendRelInfo **appinfos)
 
RelAggInfocreate_rel_agg_info (PlannerInfo *root, RelOptInfo *rel, bool calculate_grouped_rows)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 795 of file pathnode.c.

796{
797 bool accept_new = true; /* unless we find a superior old path */
798 int insert_at = 0; /* where to insert new item */
799 ListCell *p1;
800
801 /* Check for query cancel. */
803
804 /* Path to be added must be parallel safe. */
805 Assert(new_path->parallel_safe);
806
807 /* Relation should be OK for parallelism, too. */
808 Assert(parent_rel->consider_parallel);
809
810 /*
811 * As in add_path, throw out any paths which are dominated by the new
812 * path, but throw out the new path if some existing path dominates it.
813 */
814 foreach(p1, parent_rel->partial_pathlist)
815 {
816 Path *old_path = (Path *) lfirst(p1);
817 bool remove_old = false; /* unless new proves superior */
818 PathKeysComparison keyscmp;
819
820 /* Compare pathkeys. */
821 keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
822
823 /* Unless pathkeys are incompatible, keep just one of the two paths. */
824 if (keyscmp != PATHKEYS_DIFFERENT)
825 {
826 if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
827 {
828 if (new_path->disabled_nodes > old_path->disabled_nodes)
829 accept_new = false;
830 else
831 remove_old = true;
832 }
833 else if (new_path->total_cost > old_path->total_cost
835 {
836 /* New path costs more; keep it only if pathkeys are better. */
837 if (keyscmp != PATHKEYS_BETTER1)
838 accept_new = false;
839 }
840 else if (old_path->total_cost > new_path->total_cost
842 {
843 /* Old path costs more; keep it only if pathkeys are better. */
844 if (keyscmp != PATHKEYS_BETTER2)
845 remove_old = true;
846 }
847 else if (keyscmp == PATHKEYS_BETTER1)
848 {
849 /* Costs are about the same, new path has better pathkeys. */
850 remove_old = true;
851 }
852 else if (keyscmp == PATHKEYS_BETTER2)
853 {
854 /* Costs are about the same, old path has better pathkeys. */
855 accept_new = false;
856 }
857 else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
858 {
859 /* Pathkeys are the same, and the old path costs more. */
860 remove_old = true;
861 }
862 else
863 {
864 /*
865 * Pathkeys are the same, and new path isn't materially
866 * cheaper.
867 */
868 accept_new = false;
869 }
870 }
871
872 /*
873 * Remove current element from partial_pathlist if dominated by new.
874 */
875 if (remove_old)
876 {
877 parent_rel->partial_pathlist =
879 pfree(old_path);
880 }
881 else
882 {
883 /* new belongs after this old path if it has cost >= old's */
884 if (new_path->total_cost >= old_path->total_cost)
885 insert_at = foreach_current_index(p1) + 1;
886 }
887
888 /*
889 * If we found an old path that dominates new_path, we can quit
890 * scanning the partial_pathlist; we will not add new_path, and we
891 * assume new_path cannot dominate any later path.
892 */
893 if (!accept_new)
894 break;
895 }
896
897 if (accept_new)
898 {
899 /* Accept the new path: insert it at proper place */
900 parent_rel->partial_pathlist =
901 list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
902 }
903 else
904 {
905 /* Reject and recycle the new path */
906 pfree(new_path);
907 }
908}
#define unlikely(x)
Definition: c.h:407
Assert(PointerIsAligned(start, uint64))
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:1594
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
#define STD_FUZZ_FACTOR
Definition: pathnode.c:49
PathKeysComparison
Definition: paths.h:211
@ PATHKEYS_BETTER2
Definition: paths.h:214
@ PATHKEYS_BETTER1
Definition: paths.h:213
@ PATHKEYS_DIFFERENT
Definition: paths.h:215
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1913
int disabled_nodes
Definition: pathnodes.h:1908
Cost total_cost
Definition: pathnodes.h:1910
bool parallel_safe
Definition: pathnodes.h:1902
bool consider_parallel
Definition: pathnodes.h:943
List * partial_pathlist
Definition: pathnodes.h:956

References Assert(), CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, Path::disabled_nodes, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, Path::total_cost, and unlikely.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), create_plain_partial_paths(), create_tidscan_paths(), generate_grouped_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  total_cost,
List pathkeys 
)

Definition at line 921 of file pathnode.c.

923{
924 ListCell *p1;
925
926 /*
927 * Our goal here is twofold. First, we want to find out whether this path
928 * is clearly inferior to some existing partial path. If so, we want to
929 * reject it immediately. Second, we want to find out whether this path
930 * is clearly superior to some existing partial path -- at least, modulo
931 * final cost computations. If so, we definitely want to consider it.
932 *
933 * Unlike add_path(), we always compare pathkeys here. This is because we
934 * expect partial_pathlist to be very short, and getting a definitive
935 * answer at this stage avoids the need to call add_path_precheck.
936 */
937 foreach(p1, parent_rel->partial_pathlist)
938 {
939 Path *old_path = (Path *) lfirst(p1);
940 PathKeysComparison keyscmp;
941
942 keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
943 if (keyscmp != PATHKEYS_DIFFERENT)
944 {
945 if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
946 keyscmp != PATHKEYS_BETTER1)
947 return false;
948 if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
949 keyscmp != PATHKEYS_BETTER2)
950 return true;
951 }
952 }
953
954 /*
955 * This path is neither clearly inferior to an existing partial path nor
956 * clearly good enough that it might replace one. Compare it to
957 * non-parallel plans. If it loses even before accounting for the cost of
958 * the Gather node, we should definitely reject it.
959 *
960 * Note that we pass the total_cost to add_path_precheck twice. This is
961 * because it's never advantageous to consider the startup cost of a
962 * partial path; the resulting plans, if run in parallel, will be run to
963 * completion.
964 */
965 if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
966 pathkeys, NULL))
967 return false;
968
969 return true;
970}
bool add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:688

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 461 of file pathnode.c.

462{
463 bool accept_new = true; /* unless we find a superior old path */
464 int insert_at = 0; /* where to insert new item */
465 List *new_path_pathkeys;
466 ListCell *p1;
467
468 /*
469 * This is a convenient place to check for query cancel --- no part of the
470 * planner goes very long without calling add_path().
471 */
473
474 /* Pretend parameterized paths have no pathkeys, per comment above */
475 new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
476
477 /*
478 * Loop to check proposed new path against old paths. Note it is possible
479 * for more than one old path to be tossed out because new_path dominates
480 * it.
481 */
482 foreach(p1, parent_rel->pathlist)
483 {
484 Path *old_path = (Path *) lfirst(p1);
485 bool remove_old = false; /* unless new proves superior */
486 PathCostComparison costcmp;
487 PathKeysComparison keyscmp;
488 BMS_Comparison outercmp;
489
490 /*
491 * Do a fuzzy cost comparison with standard fuzziness limit.
492 */
493 costcmp = compare_path_costs_fuzzily(new_path, old_path,
495
496 /*
497 * If the two paths compare differently for startup and total cost,
498 * then we want to keep both, and we can skip comparing pathkeys and
499 * required_outer rels. If they compare the same, proceed with the
500 * other comparisons. Row count is checked last. (We make the tests
501 * in this order because the cost comparison is most likely to turn
502 * out "different", and the pathkeys comparison next most likely. As
503 * explained above, row count very seldom makes a difference, so even
504 * though it's cheap to compare there's not much point in checking it
505 * earlier.)
506 */
507 if (costcmp != COSTS_DIFFERENT)
508 {
509 /* Similarly check to see if either dominates on pathkeys */
510 List *old_path_pathkeys;
511
512 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
513 keyscmp = compare_pathkeys(new_path_pathkeys,
514 old_path_pathkeys);
515 if (keyscmp != PATHKEYS_DIFFERENT)
516 {
517 switch (costcmp)
518 {
519 case COSTS_EQUAL:
520 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
521 PATH_REQ_OUTER(old_path));
522 if (keyscmp == PATHKEYS_BETTER1)
523 {
524 if ((outercmp == BMS_EQUAL ||
525 outercmp == BMS_SUBSET1) &&
526 new_path->rows <= old_path->rows &&
527 new_path->parallel_safe >= old_path->parallel_safe)
528 remove_old = true; /* new dominates old */
529 }
530 else if (keyscmp == PATHKEYS_BETTER2)
531 {
532 if ((outercmp == BMS_EQUAL ||
533 outercmp == BMS_SUBSET2) &&
534 new_path->rows >= old_path->rows &&
535 new_path->parallel_safe <= old_path->parallel_safe)
536 accept_new = false; /* old dominates new */
537 }
538 else /* keyscmp == PATHKEYS_EQUAL */
539 {
540 if (outercmp == BMS_EQUAL)
541 {
542 /*
543 * Same pathkeys and outer rels, and fuzzily
544 * the same cost, so keep just one; to decide
545 * which, first check parallel-safety, then
546 * rows, then do a fuzzy cost comparison with
547 * very small fuzz limit. (We used to do an
548 * exact cost comparison, but that results in
549 * annoying platform-specific plan variations
550 * due to roundoff in the cost estimates.) If
551 * things are still tied, arbitrarily keep
552 * only the old path. Notice that we will
553 * keep only the old path even if the
554 * less-fuzzy comparison decides the startup
555 * and total costs compare differently.
556 */
557 if (new_path->parallel_safe >
558 old_path->parallel_safe)
559 remove_old = true; /* new dominates old */
560 else if (new_path->parallel_safe <
561 old_path->parallel_safe)
562 accept_new = false; /* old dominates new */
563 else if (new_path->rows < old_path->rows)
564 remove_old = true; /* new dominates old */
565 else if (new_path->rows > old_path->rows)
566 accept_new = false; /* old dominates new */
567 else if (compare_path_costs_fuzzily(new_path,
568 old_path,
569 1.0000000001) == COSTS_BETTER1)
570 remove_old = true; /* new dominates old */
571 else
572 accept_new = false; /* old equals or
573 * dominates new */
574 }
575 else if (outercmp == BMS_SUBSET1 &&
576 new_path->rows <= old_path->rows &&
577 new_path->parallel_safe >= old_path->parallel_safe)
578 remove_old = true; /* new dominates old */
579 else if (outercmp == BMS_SUBSET2 &&
580 new_path->rows >= old_path->rows &&
581 new_path->parallel_safe <= old_path->parallel_safe)
582 accept_new = false; /* old dominates new */
583 /* else different parameterizations, keep both */
584 }
585 break;
586 case COSTS_BETTER1:
587 if (keyscmp != PATHKEYS_BETTER2)
588 {
589 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
590 PATH_REQ_OUTER(old_path));
591 if ((outercmp == BMS_EQUAL ||
592 outercmp == BMS_SUBSET1) &&
593 new_path->rows <= old_path->rows &&
594 new_path->parallel_safe >= old_path->parallel_safe)
595 remove_old = true; /* new dominates old */
596 }
597 break;
598 case COSTS_BETTER2:
599 if (keyscmp != PATHKEYS_BETTER1)
600 {
601 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
602 PATH_REQ_OUTER(old_path));
603 if ((outercmp == BMS_EQUAL ||
604 outercmp == BMS_SUBSET2) &&
605 new_path->rows >= old_path->rows &&
606 new_path->parallel_safe <= old_path->parallel_safe)
607 accept_new = false; /* old dominates new */
608 }
609 break;
610 case COSTS_DIFFERENT:
611
612 /*
613 * can't get here, but keep this case to keep compiler
614 * quiet
615 */
616 break;
617 }
618 }
619 }
620
621 /*
622 * Remove current element from pathlist if dominated by new.
623 */
624 if (remove_old)
625 {
626 parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
627 p1);
628
629 /*
630 * Delete the data pointed-to by the deleted cell, if possible
631 */
632 if (!IsA(old_path, IndexPath))
633 pfree(old_path);
634 }
635 else
636 {
637 /*
638 * new belongs after this old path if it has more disabled nodes
639 * or if it has the same number of nodes but a greater total cost
640 */
641 if (new_path->disabled_nodes > old_path->disabled_nodes ||
642 (new_path->disabled_nodes == old_path->disabled_nodes &&
643 new_path->total_cost >= old_path->total_cost))
644 insert_at = foreach_current_index(p1) + 1;
645 }
646
647 /*
648 * If we found an old path that dominates new_path, we can quit
649 * scanning the pathlist; we will not add new_path, and we assume
650 * new_path cannot dominate any other elements of the pathlist.
651 */
652 if (!accept_new)
653 break;
654 }
655
656 if (accept_new)
657 {
658 /* Accept the new path: insert it at proper place in pathlist */
659 parent_rel->pathlist =
660 list_insert_nth(parent_rel->pathlist, insert_at, new_path);
661 }
662 else
663 {
664 /* Reject and recycle the new path */
665 if (!IsA(new_path, IndexPath))
666 pfree(new_path);
667 }
668}
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:183
PathCostComparison
Definition: pathnode.c:37
@ COSTS_EQUAL
Definition: pathnode.c:38
@ COSTS_BETTER1
Definition: pathnode.c:39
@ COSTS_BETTER2
Definition: pathnode.c:40
@ COSTS_DIFFERENT
Definition: pathnode.c:41
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1917
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1907
List * pathlist
Definition: pathnodes.h:954

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, Path::disabled_nodes, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_final_unique_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_grouped_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 688 of file pathnode.c.

691{
692 List *new_path_pathkeys;
693 bool consider_startup;
694 ListCell *p1;
695
696 /* Pretend parameterized paths have no pathkeys, per add_path policy */
697 new_path_pathkeys = required_outer ? NIL : pathkeys;
698
699 /* Decide whether new path's startup cost is interesting */
700 consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
701
702 foreach(p1, parent_rel->pathlist)
703 {
704 Path *old_path = (Path *) lfirst(p1);
705 PathKeysComparison keyscmp;
706
707 /*
708 * Since the pathlist is sorted by disabled_nodes and then by
709 * total_cost, we can stop looking once we reach a path with more
710 * disabled nodes, or the same number of disabled nodes plus a
711 * total_cost larger than the new path's.
712 */
713 if (unlikely(old_path->disabled_nodes != disabled_nodes))
714 {
715 if (disabled_nodes < old_path->disabled_nodes)
716 break;
717 }
718 else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
719 break;
720
721 /*
722 * We are looking for an old_path with the same parameterization (and
723 * by assumption the same rowcount) that dominates the new path on
724 * pathkeys as well as both cost metrics. If we find one, we can
725 * reject the new path.
726 *
727 * Cost comparisons here should match compare_path_costs_fuzzily.
728 */
729 /* new path can win on startup cost only if consider_startup */
730 if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
731 !consider_startup)
732 {
733 /* new path loses on cost, so check pathkeys... */
734 List *old_path_pathkeys;
735
736 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
737 keyscmp = compare_pathkeys(new_path_pathkeys,
738 old_path_pathkeys);
739 if (keyscmp == PATHKEYS_EQUAL ||
740 keyscmp == PATHKEYS_BETTER2)
741 {
742 /* new path does not win on pathkeys... */
743 if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
744 {
745 /* Found an old path that dominates the new one */
746 return false;
747 }
748 }
749 }
750 }
751
752 return true;
753}
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:212
Cost startup_cost
Definition: pathnodes.h:1909
bool consider_param_startup
Definition: pathnodes.h:941
bool consider_startup
Definition: pathnodes.h:939

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, Path::disabled_nodes, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and unlikely.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 3786 of file pathnode.c.

3791{
3792 double input_rows = *rows;
3793 Cost input_startup_cost = *startup_cost;
3794 Cost input_total_cost = *total_cost;
3795
3796 if (offset_est != 0)
3797 {
3798 double offset_rows;
3799
3800 if (offset_est > 0)
3801 offset_rows = (double) offset_est;
3802 else
3803 offset_rows = clamp_row_est(input_rows * 0.10);
3804 if (offset_rows > *rows)
3805 offset_rows = *rows;
3806 if (input_rows > 0)
3807 *startup_cost +=
3808 (input_total_cost - input_startup_cost)
3809 * offset_rows / input_rows;
3810 *rows -= offset_rows;
3811 if (*rows < 1)
3812 *rows = 1;
3813 }
3814
3815 if (count_est != 0)
3816 {
3817 double count_rows;
3818
3819 if (count_est > 0)
3820 count_rows = (double) count_est;
3821 else
3822 count_rows = clamp_row_est(input_rows * 0.10);
3823 if (count_rows > *rows)
3824 count_rows = *rows;
3825 if (input_rows > 0)
3826 *total_cost = *startup_cost +
3827 (input_total_cost - input_startup_cost)
3828 * count_rows / input_rows;
3829 *rows = count_rows;
3830 if (*rows < 1)
3831 *rows = 1;
3832 }
3833}
double clamp_row_est(double nrows)
Definition: costsize.c:213
double Cost
Definition: nodes.h:261

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path * apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2634 of file pathnode.c.

2638{
2639 QualCost oldcost;
2640
2641 /*
2642 * If given path can't project, we might need a Result node, so make a
2643 * separate ProjectionPath.
2644 */
2645 if (!is_projection_capable_path(path))
2646 return (Path *) create_projection_path(root, rel, path, target);
2647
2648 /*
2649 * We can just jam the desired tlist into the existing path, being sure to
2650 * update its cost estimates appropriately.
2651 */
2652 oldcost = path->pathtarget->cost;
2653 path->pathtarget = target;
2654
2655 path->startup_cost += target->cost.startup - oldcost.startup;
2656 path->total_cost += target->cost.startup - oldcost.startup +
2657 (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2658
2659 /*
2660 * If the path happens to be a Gather or GatherMerge path, we'd like to
2661 * arrange for the subpath to return the required target list so that
2662 * workers can help project. But if there is something that is not
2663 * parallel-safe in the target expressions, then we can't.
2664 */
2665 if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2666 is_parallel_safe(root, (Node *) target->exprs))
2667 {
2668 /*
2669 * We always use create_projection_path here, even if the subpath is
2670 * projection-capable, so as to avoid modifying the subpath in place.
2671 * It seems unlikely at present that there could be any other
2672 * references to the subpath, but better safe than sorry.
2673 *
2674 * Note that we don't change the parallel path's cost estimates; it
2675 * might be appropriate to do so, to reflect the fact that the bulk of
2676 * the target evaluation will happen in workers.
2677 */
2678 if (IsA(path, GatherPath))
2679 {
2680 GatherPath *gpath = (GatherPath *) path;
2681
2682 gpath->subpath = (Path *)
2684 gpath->subpath->parent,
2685 gpath->subpath,
2686 target);
2687 }
2688 else
2689 {
2690 GatherMergePath *gmpath = (GatherMergePath *) path;
2691
2692 gmpath->subpath = (Path *)
2694 gmpath->subpath->parent,
2695 gmpath->subpath,
2696 target);
2697 }
2698 }
2699 else if (path->parallel_safe &&
2700 !is_parallel_safe(root, (Node *) target->exprs))
2701 {
2702 /*
2703 * We're inserting a parallel-restricted target list into a path
2704 * currently marked parallel-safe, so we have to mark it as no longer
2705 * safe.
2706 */
2707 path->parallel_safe = false;
2708 }
2709
2710 return path;
2711}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:765
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7217
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2525
tree ctl root
Definition: radixtree.h:1857
Path * subpath
Definition: pathnodes.h:2264
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1780
QualCost cost
Definition: pathnodes.h:1786
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo * build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo,
int  nappinfos,
AppendRelInfo **  appinfos 
)

Definition at line 1001 of file relnode.c.

1005{
1006 RelOptInfo *joinrel = makeNode(RelOptInfo);
1007
1008 /* Only joins between "other" relations land here. */
1009 Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
1010
1011 /* The parent joinrel should have consider_partitionwise_join set. */
1012 Assert(parent_joinrel->consider_partitionwise_join);
1013
1015 joinrel->relids = adjust_child_relids(parent_joinrel->relids,
1016 nappinfos, appinfos);
1017 joinrel->rows = 0;
1018 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1019 joinrel->consider_startup = (root->tuple_fraction > 0);
1020 joinrel->consider_param_startup = false;
1021 joinrel->consider_parallel = false;
1023 joinrel->pathlist = NIL;
1024 joinrel->ppilist = NIL;
1025 joinrel->partial_pathlist = NIL;
1026 joinrel->cheapest_startup_path = NULL;
1027 joinrel->cheapest_total_path = NULL;
1029 joinrel->direct_lateral_relids = NULL;
1030 joinrel->lateral_relids = NULL;
1031 joinrel->relid = 0; /* indicates not a baserel */
1032 joinrel->rtekind = RTE_JOIN;
1033 joinrel->min_attr = 0;
1034 joinrel->max_attr = 0;
1035 joinrel->attr_needed = NULL;
1036 joinrel->attr_widths = NULL;
1037 joinrel->notnullattnums = NULL;
1038 joinrel->nulling_relids = NULL;
1039 joinrel->lateral_vars = NIL;
1040 joinrel->lateral_referencers = NULL;
1041 joinrel->indexlist = NIL;
1042 joinrel->pages = 0;
1043 joinrel->tuples = 0;
1044 joinrel->allvisfrac = 0;
1045 joinrel->eclass_indexes = NULL;
1046 joinrel->subroot = NULL;
1047 joinrel->subplan_params = NIL;
1048 joinrel->amflags = 0;
1049 joinrel->serverid = InvalidOid;
1050 joinrel->userid = InvalidOid;
1051 joinrel->useridiscurrent = false;
1052 joinrel->fdwroutine = NULL;
1053 joinrel->fdw_private = NULL;
1054 joinrel->unique_rel = NULL;
1055 joinrel->unique_pathkeys = NIL;
1056 joinrel->unique_groupclause = NIL;
1057 joinrel->baserestrictinfo = NIL;
1058 joinrel->baserestrictcost.startup = 0;
1059 joinrel->baserestrictcost.per_tuple = 0;
1060 joinrel->joininfo = NIL;
1061 joinrel->has_eclass_joins = false;
1062 joinrel->consider_partitionwise_join = false; /* might get changed later */
1063 joinrel->agg_info = NULL;
1064 joinrel->grouped_rel = NULL;
1065 joinrel->parent = parent_joinrel;
1066 joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
1067 joinrel->top_parent_relids = joinrel->top_parent->relids;
1068 joinrel->part_scheme = NULL;
1069 joinrel->nparts = -1;
1070 joinrel->boundinfo = NULL;
1071 joinrel->partbounds_merged = false;
1072 joinrel->partition_qual = NIL;
1073 joinrel->part_rels = NULL;
1074 joinrel->live_parts = NULL;
1075 joinrel->all_partrels = NULL;
1076 joinrel->partexprs = NULL;
1077 joinrel->nullable_partexprs = NULL;
1078
1079 /* Compute information relevant to foreign relations. */
1080 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
1081
1082 /* Set up reltarget struct */
1083 build_child_join_reltarget(root, parent_joinrel, joinrel,
1084 nappinfos, appinfos);
1085
1086 /* Construct joininfo list. */
1088 (Node *) parent_joinrel->joininfo,
1089 nappinfos,
1090 appinfos);
1091
1092 /*
1093 * Lateral relids referred in child join will be same as that referred in
1094 * the parent relation.
1095 */
1096 joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
1097 joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
1098
1099 /*
1100 * If the parent joinrel has pending equivalence classes, so does the
1101 * child.
1102 */
1103 joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
1104
1105 /* Is the join between partitions itself partitioned? */
1106 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
1107 restrictlist);
1108
1109 /* Child joinrel is parallel safe if parent is parallel safe. */
1110 joinrel->consider_parallel = parent_joinrel->consider_parallel;
1111
1112 /* Set estimates of the child-joinrel's size. */
1113 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
1114 sjinfo, restrictlist);
1115
1116 /* We build the join only once. */
1117 Assert(!find_join_rel(root, joinrel->relids));
1118
1119 /* Add the relation to the PlannerInfo. */
1120 add_join_rel(root, joinrel);
1121
1122 /*
1123 * We might need EquivalenceClass members corresponding to the child join,
1124 * so that we can represent sort pathkeys for it. As with children of
1125 * baserels, we shouldn't need this unless there are relevant eclass joins
1126 * (implying that a merge join might be possible) or pathkeys to sort by.
1127 */
1128 if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1130 nappinfos, appinfos,
1131 parent_joinrel, joinrel);
1132
1133 return joinrel;
1134}
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:625
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5453
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2941
#define makeNode(_type_)
Definition: nodes.h:161
@ RTE_JOIN
Definition: parsenodes.h:1045
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2291
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:886
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:910
#define InvalidOid
Definition: postgres_ext.h:37
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:2113
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:642
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2626
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:704
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:742
List * baserestrictinfo
Definition: pathnodes.h:1046
List * subplan_params
Definition: pathnodes.h:1005
List * ppilist
Definition: pathnodes.h:955
bool useridiscurrent
Definition: pathnodes.h:1019
uint32 amflags
Definition: pathnodes.h:1009
List * joininfo
Definition: pathnodes.h:1052
Bitmapset * notnullattnums
Definition: pathnodes.h:987
List * partition_qual
Definition: pathnodes.h:1096
Relids relids
Definition: pathnodes.h:927
struct PathTarget * reltarget
Definition: pathnodes.h:949
Index relid
Definition: pathnodes.h:973
List * lateral_vars
Definition: pathnodes.h:991
struct RelAggInfo * agg_info
Definition: pathnodes.h:1066
List * unique_pathkeys
Definition: pathnodes.h:1038
Cardinality tuples
Definition: pathnodes.h:1000
Relids top_parent_relids
Definition: pathnodes.h:1078
bool partbounds_merged
Definition: pathnodes.h:1094
BlockNumber pages
Definition: pathnodes.h:999
Relids lateral_relids
Definition: pathnodes.h:968
List * cheapest_parameterized_paths
Definition: pathnodes.h:959
RelOptKind reloptkind
Definition: pathnodes.h:921
List * indexlist
Definition: pathnodes.h:995
Relids lateral_referencers
Definition: pathnodes.h:993
struct Path * cheapest_startup_path
Definition: pathnodes.h:957
QualCost baserestrictcost
Definition: pathnodes.h:1048
struct Path * cheapest_total_path
Definition: pathnodes.h:958
List * unique_groupclause
Definition: pathnodes.h:1040
struct RelOptInfo * grouped_rel
Definition: pathnodes.h:1068
Bitmapset * eclass_indexes
Definition: pathnodes.h:1003
Relids all_partrels
Definition: pathnodes.h:1110
Relids direct_lateral_relids
Definition: pathnodes.h:966
bool has_eclass_joins
Definition: pathnodes.h:1054
Oid serverid
Definition: pathnodes.h:1015
Bitmapset * live_parts
Definition: pathnodes.h:1108
bool consider_partitionwise_join
Definition: pathnodes.h:1060
PlannerInfo * subroot
Definition: pathnodes.h:1004
AttrNumber max_attr
Definition: pathnodes.h:981
Relids nulling_relids
Definition: pathnodes.h:989
double allvisfrac
Definition: pathnodes.h:1001
struct RelOptInfo * unique_rel
Definition: pathnodes.h:1036
Cardinality rows
Definition: pathnodes.h:933
AttrNumber min_attr
Definition: pathnodes.h:979
RTEKind rtekind
Definition: pathnodes.h:977
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_join_rel(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_grouped_rel()

RelOptInfo * build_grouped_rel ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 484 of file relnode.c.

485{
486 RelOptInfo *grouped_rel;
487
488 grouped_rel = makeNode(RelOptInfo);
489 memcpy(grouped_rel, rel, sizeof(RelOptInfo));
490
491 /*
492 * clear path info
493 */
494 grouped_rel->pathlist = NIL;
495 grouped_rel->ppilist = NIL;
496 grouped_rel->partial_pathlist = NIL;
497 grouped_rel->cheapest_startup_path = NULL;
498 grouped_rel->cheapest_total_path = NULL;
499 grouped_rel->cheapest_parameterized_paths = NIL;
500
501 /*
502 * clear partition info
503 */
504 grouped_rel->part_scheme = NULL;
505 grouped_rel->nparts = -1;
506 grouped_rel->boundinfo = NULL;
507 grouped_rel->partbounds_merged = false;
508 grouped_rel->partition_qual = NIL;
509 grouped_rel->part_rels = NULL;
510 grouped_rel->live_parts = NULL;
511 grouped_rel->all_partrels = NULL;
512 grouped_rel->partexprs = NULL;
513 grouped_rel->nullable_partexprs = NULL;
514 grouped_rel->consider_partitionwise_join = false;
515
516 /*
517 * clear size estimates
518 */
519 grouped_rel->rows = 0;
520
521 return grouped_rel;
522}

References RelOptInfo::all_partrels, RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_partitionwise_join, RelOptInfo::live_parts, makeNode, NIL, RelOptInfo::nparts, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, RelOptInfo::ppilist, and RelOptInfo::rows.

Referenced by build_simple_grouped_rel(), and make_grouped_join_rel().

◆ build_join_rel()

RelOptInfo * build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 780 of file relnode.c.

787{
788 RelOptInfo *joinrel;
789 List *restrictlist;
790
791 /* This function should be used only for join between parents. */
792 Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
793
794 /*
795 * See if we already have a joinrel for this set of base rels.
796 */
797 joinrel = find_join_rel(root, joinrelids);
798
799 if (joinrel)
800 {
801 /*
802 * Yes, so we only need to figure the restrictlist for this particular
803 * pair of component relations.
804 */
805 if (restrictlist_ptr)
806 *restrictlist_ptr = build_joinrel_restrictlist(root,
807 joinrel,
808 outer_rel,
809 inner_rel,
810 sjinfo);
811 return joinrel;
812 }
813
814 /*
815 * Nope, so make one.
816 */
817 joinrel = makeNode(RelOptInfo);
818 joinrel->reloptkind = RELOPT_JOINREL;
819 joinrel->relids = bms_copy(joinrelids);
820 joinrel->rows = 0;
821 /* cheap startup cost is interesting iff not all tuples to be retrieved */
822 joinrel->consider_startup = (root->tuple_fraction > 0);
823 joinrel->consider_param_startup = false;
824 joinrel->consider_parallel = false;
826 joinrel->pathlist = NIL;
827 joinrel->ppilist = NIL;
828 joinrel->partial_pathlist = NIL;
829 joinrel->cheapest_startup_path = NULL;
830 joinrel->cheapest_total_path = NULL;
832 /* init direct_lateral_relids from children; we'll finish it up below */
833 joinrel->direct_lateral_relids =
835 inner_rel->direct_lateral_relids);
837 outer_rel, inner_rel);
838 joinrel->relid = 0; /* indicates not a baserel */
839 joinrel->rtekind = RTE_JOIN;
840 joinrel->min_attr = 0;
841 joinrel->max_attr = 0;
842 joinrel->attr_needed = NULL;
843 joinrel->attr_widths = NULL;
844 joinrel->notnullattnums = NULL;
845 joinrel->nulling_relids = NULL;
846 joinrel->lateral_vars = NIL;
847 joinrel->lateral_referencers = NULL;
848 joinrel->indexlist = NIL;
849 joinrel->statlist = NIL;
850 joinrel->pages = 0;
851 joinrel->tuples = 0;
852 joinrel->allvisfrac = 0;
853 joinrel->eclass_indexes = NULL;
854 joinrel->subroot = NULL;
855 joinrel->subplan_params = NIL;
856 joinrel->rel_parallel_workers = -1;
857 joinrel->amflags = 0;
858 joinrel->serverid = InvalidOid;
859 joinrel->userid = InvalidOid;
860 joinrel->useridiscurrent = false;
861 joinrel->fdwroutine = NULL;
862 joinrel->fdw_private = NULL;
863 joinrel->unique_for_rels = NIL;
864 joinrel->non_unique_for_rels = NIL;
865 joinrel->unique_rel = NULL;
866 joinrel->unique_pathkeys = NIL;
867 joinrel->unique_groupclause = NIL;
868 joinrel->baserestrictinfo = NIL;
869 joinrel->baserestrictcost.startup = 0;
870 joinrel->baserestrictcost.per_tuple = 0;
871 joinrel->baserestrict_min_security = UINT_MAX;
872 joinrel->joininfo = NIL;
873 joinrel->has_eclass_joins = false;
874 joinrel->consider_partitionwise_join = false; /* might get changed later */
875 joinrel->agg_info = NULL;
876 joinrel->grouped_rel = NULL;
877 joinrel->parent = NULL;
878 joinrel->top_parent = NULL;
879 joinrel->top_parent_relids = NULL;
880 joinrel->part_scheme = NULL;
881 joinrel->nparts = -1;
882 joinrel->boundinfo = NULL;
883 joinrel->partbounds_merged = false;
884 joinrel->partition_qual = NIL;
885 joinrel->part_rels = NULL;
886 joinrel->live_parts = NULL;
887 joinrel->all_partrels = NULL;
888 joinrel->partexprs = NULL;
889 joinrel->nullable_partexprs = NULL;
890
891 /* Compute information relevant to the foreign relations. */
892 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
893
894 /*
895 * Fill the joinrel's tlist with just the Vars and PHVs that need to be
896 * output from this join (ie, are needed for higher joinclauses or final
897 * output).
898 *
899 * NOTE: the tlist order for a join rel will depend on which pair of outer
900 * and inner rels we first try to build it from. But the contents should
901 * be the same regardless.
902 */
903 build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
904 (sjinfo->jointype == JOIN_FULL));
905 build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
906 (sjinfo->jointype != JOIN_INNER));
907 add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
908
909 /*
910 * add_placeholders_to_joinrel also took care of adding the ph_lateral
911 * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
912 * now we can finish computing that. This is much like the computation of
913 * the transitively-closed lateral_relids in min_join_parameterization,
914 * except that here we *do* have to consider the added PHVs.
915 */
916 joinrel->direct_lateral_relids =
917 bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
918
919 /*
920 * Construct restrict and join clause lists for the new joinrel. (The
921 * caller might or might not need the restrictlist, but I need it anyway
922 * for set_joinrel_size_estimates().)
923 */
924 restrictlist = build_joinrel_restrictlist(root, joinrel,
925 outer_rel, inner_rel,
926 sjinfo);
927 if (restrictlist_ptr)
928 *restrictlist_ptr = restrictlist;
929 build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
930
931 /*
932 * This is also the right place to check whether the joinrel has any
933 * pending EquivalenceClass joins.
934 */
936
937 /* Store the partition information. */
938 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
939 restrictlist);
940
941 /*
942 * Set estimates of the joinrel's size.
943 */
944 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
945 sjinfo, restrictlist);
946
947 /*
948 * Set the consider_parallel flag if this joinrel could potentially be
949 * scanned within a parallel worker. If this flag is false for either
950 * inner_rel or outer_rel, then it must be false for the joinrel also.
951 * Even if both are true, there might be parallel-restricted expressions
952 * in the targetlist or quals.
953 *
954 * Note that if there are more than two rels in this relation, they could
955 * be divided between inner_rel and outer_rel in any arbitrary way. We
956 * assume this doesn't matter, because we should hit all the same baserels
957 * and joinclauses while building up to this joinrel no matter which we
958 * take; therefore, we should make the same decision here however we get
959 * here.
960 */
961 if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
962 is_parallel_safe(root, (Node *) restrictlist) &&
963 is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
964 joinrel->consider_parallel = true;
965
966 /* Add the joinrel to the PlannerInfo. */
967 add_join_rel(root, joinrel);
968
969 /*
970 * Also, if dynamic-programming join search is active, add the new joinrel
971 * to the appropriate sublist. Note: you might think the Assert on number
972 * of members should be for equality, but some of the level 1 rels might
973 * have been joinrels already, so we can only assert <=.
974 */
975 if (root->join_rel_level)
976 {
977 Assert(root->join_cur_level > 0);
978 Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
979 root->join_rel_level[root->join_cur_level] =
980 lappend(root->join_rel_level[root->join_cur_level], joinrel);
981 }
982
983 return joinrel;
984}
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1160
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:750
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3446
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:305
@ JOIN_INNER
Definition: nodes.h:303
@ RELOPT_JOINREL
Definition: pathnodes.h:884
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:400
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1223
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1145
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1408
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1445
List * statlist
Definition: pathnodes.h:997
List * unique_for_rels
Definition: pathnodes.h:1028
List * non_unique_for_rels
Definition: pathnodes.h:1030
int rel_parallel_workers
Definition: pathnodes.h:1007
Index baserestrict_min_security
Definition: pathnodes.h:1050
JoinType jointype
Definition: pathnodes.h:3121

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_grouped_rel()

RelOptInfo * build_simple_grouped_rel ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 433 of file relnode.c.

434{
435 RelOptInfo *grouped_rel;
436 RelAggInfo *agg_info;
437
438 /*
439 * We should have available aggregate expressions and grouping
440 * expressions, otherwise we cannot reach here.
441 */
442 Assert(root->agg_clause_list != NIL);
443 Assert(root->group_expr_list != NIL);
444
445 /* nothing to do for dummy rel */
446 if (IS_DUMMY_REL(rel))
447 return NULL;
448
449 /*
450 * Prepare the information needed to create grouped paths for this simple
451 * relation.
452 */
453 agg_info = create_rel_agg_info(root, rel, true);
454 if (agg_info == NULL)
455 return NULL;
456
457 /*
458 * If grouped paths for the given simple relation are not considered
459 * useful, skip building the grouped relation.
460 */
461 if (!agg_info->agg_useful)
462 return NULL;
463
464 /* Track the set of relids at which partial aggregation is applied */
465 agg_info->apply_agg_at = bms_copy(rel->relids);
466
467 /* build the grouped relation */
468 grouped_rel = build_grouped_rel(root, rel);
469 grouped_rel->reltarget = agg_info->target;
470 grouped_rel->rows = agg_info->grouped_rows;
471 grouped_rel->agg_info = agg_info;
472
473 rel->grouped_rel = grouped_rel;
474
475 return grouped_rel;
476}
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2194
RelOptInfo * build_grouped_rel(PlannerInfo *root, RelOptInfo *rel)
Definition: relnode.c:484
RelAggInfo * create_rel_agg_info(PlannerInfo *root, RelOptInfo *rel, bool calculate_grouped_rows)
Definition: relnode.c:2655
Relids apply_agg_at
Definition: pathnodes.h:1206
bool agg_useful
Definition: pathnodes.h:1212
Cardinality grouped_rows
Definition: pathnodes.h:1209
struct PathTarget * target
Definition: pathnodes.h:1195

References RelOptInfo::agg_info, RelAggInfo::agg_useful, RelAggInfo::apply_agg_at, Assert(), bms_copy(), build_grouped_rel(), create_rel_agg_info(), RelOptInfo::grouped_rel, RelAggInfo::grouped_rows, IS_DUMMY_REL, NIL, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, and RelAggInfo::target.

Referenced by setup_simple_grouped_rels().

◆ build_simple_rel()

RelOptInfo * build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 206 of file relnode.c.

207{
208 RelOptInfo *rel;
209 RangeTblEntry *rte;
210
211 /* Rel should not exist already */
212 Assert(relid > 0 && relid < root->simple_rel_array_size);
213 if (root->simple_rel_array[relid] != NULL)
214 elog(ERROR, "rel %d already exists", relid);
215
216 /* Fetch RTE for relation */
217 rte = root->simple_rte_array[relid];
218 Assert(rte != NULL);
219
220 rel = makeNode(RelOptInfo);
222 rel->relids = bms_make_singleton(relid);
223 rel->rows = 0;
224 /* cheap startup cost is interesting iff not all tuples to be retrieved */
225 rel->consider_startup = (root->tuple_fraction > 0);
226 rel->consider_param_startup = false; /* might get changed later */
227 rel->consider_parallel = false; /* might get changed later */
229 rel->pathlist = NIL;
230 rel->ppilist = NIL;
231 rel->partial_pathlist = NIL;
232 rel->cheapest_startup_path = NULL;
233 rel->cheapest_total_path = NULL;
235 rel->relid = relid;
236 rel->rtekind = rte->rtekind;
237 /* min_attr, max_attr, attr_needed, attr_widths are set below */
238 rel->notnullattnums = NULL;
239 rel->lateral_vars = NIL;
240 rel->indexlist = NIL;
241 rel->statlist = NIL;
242 rel->pages = 0;
243 rel->tuples = 0;
244 rel->allvisfrac = 0;
245 rel->eclass_indexes = NULL;
246 rel->subroot = NULL;
247 rel->subplan_params = NIL;
248 rel->rel_parallel_workers = -1; /* set up in get_relation_info */
249 rel->amflags = 0;
250 rel->serverid = InvalidOid;
251 if (rte->rtekind == RTE_RELATION)
252 {
253 Assert(parent == NULL ||
254 parent->rtekind == RTE_RELATION ||
255 parent->rtekind == RTE_SUBQUERY);
256
257 /*
258 * For any RELATION rte, we need a userid with which to check
259 * permission access. Baserels simply use their own
260 * RTEPermissionInfo's checkAsUser.
261 *
262 * For otherrels normally there's no RTEPermissionInfo, so we use the
263 * parent's, which normally has one. The exceptional case is that the
264 * parent is a subquery, in which case the otherrel will have its own.
265 */
266 if (rel->reloptkind == RELOPT_BASEREL ||
268 parent->rtekind == RTE_SUBQUERY))
269 {
270 RTEPermissionInfo *perminfo;
271
272 perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
273 rel->userid = perminfo->checkAsUser;
274 }
275 else
276 rel->userid = parent->userid;
277 }
278 else
279 rel->userid = InvalidOid;
280 rel->useridiscurrent = false;
281 rel->fdwroutine = NULL;
282 rel->fdw_private = NULL;
283 rel->unique_for_rels = NIL;
285 rel->unique_rel = NULL;
286 rel->unique_pathkeys = NIL;
287 rel->unique_groupclause = NIL;
288 rel->baserestrictinfo = NIL;
289 rel->baserestrictcost.startup = 0;
291 rel->baserestrict_min_security = UINT_MAX;
292 rel->joininfo = NIL;
293 rel->has_eclass_joins = false;
294 rel->consider_partitionwise_join = false; /* might get changed later */
295 rel->agg_info = NULL;
296 rel->grouped_rel = NULL;
297 rel->part_scheme = NULL;
298 rel->nparts = -1;
299 rel->boundinfo = NULL;
300 rel->partbounds_merged = false;
301 rel->partition_qual = NIL;
302 rel->part_rels = NULL;
303 rel->live_parts = NULL;
304 rel->all_partrels = NULL;
305 rel->partexprs = NULL;
306 rel->nullable_partexprs = NULL;
307
308 /*
309 * Pass assorted information down the inheritance hierarchy.
310 */
311 if (parent)
312 {
313 /* We keep back-links to immediate parent and topmost parent. */
314 rel->parent = parent;
315 rel->top_parent = parent->top_parent ? parent->top_parent : parent;
316 rel->top_parent_relids = rel->top_parent->relids;
317
318 /*
319 * A child rel is below the same outer joins as its parent. (We
320 * presume this info was already calculated for the parent.)
321 */
322 rel->nulling_relids = parent->nulling_relids;
323
324 /*
325 * Also propagate lateral-reference information from appendrel parent
326 * rels to their child rels. We intentionally give each child rel the
327 * same minimum parameterization, even though it's quite possible that
328 * some don't reference all the lateral rels. This is because any
329 * append path for the parent will have to have the same
330 * parameterization for every child anyway, and there's no value in
331 * forcing extra reparameterize_path() calls. Similarly, a lateral
332 * reference to the parent prevents use of otherwise-movable join rels
333 * for each child.
334 *
335 * It's possible for child rels to have their own children, in which
336 * case the topmost parent's lateral info propagates all the way down.
337 */
339 rel->lateral_relids = parent->lateral_relids;
341 }
342 else
343 {
344 rel->parent = NULL;
345 rel->top_parent = NULL;
346 rel->top_parent_relids = NULL;
347 rel->nulling_relids = NULL;
348 rel->direct_lateral_relids = NULL;
349 rel->lateral_relids = NULL;
350 rel->lateral_referencers = NULL;
351 }
352
353 /* Check type of rtable entry */
354 switch (rte->rtekind)
355 {
356 case RTE_RELATION:
357 /* Table --- retrieve statistics from the system catalogs */
358 get_relation_info(root, rte->relid, rte->inh, rel);
359 break;
360 case RTE_SUBQUERY:
361 case RTE_FUNCTION:
362 case RTE_TABLEFUNC:
363 case RTE_VALUES:
364 case RTE_CTE:
366
367 /*
368 * Subquery, function, tablefunc, values list, CTE, or ENR --- set
369 * up attr range and arrays
370 *
371 * Note: 0 is included in range to support whole-row Vars
372 */
373 rel->min_attr = 0;
374 rel->max_attr = list_length(rte->eref->colnames);
375 rel->attr_needed = (Relids *)
376 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
377 rel->attr_widths = (int32 *)
378 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
379 break;
380 case RTE_RESULT:
381 /* RTE_RESULT has no columns, nor could it have whole-row Var */
382 rel->min_attr = 0;
383 rel->max_attr = -1;
384 rel->attr_needed = NULL;
385 rel->attr_widths = NULL;
386 break;
387 default:
388 elog(ERROR, "unrecognized RTE kind: %d",
389 (int) rte->rtekind);
390 break;
391 }
392
393 /*
394 * We must apply the partially filled in RelOptInfo before calling
395 * apply_child_basequals due to some transformations within that function
396 * which require the RelOptInfo to be available in the simple_rel_array.
397 */
398 root->simple_rel_array[relid] = rel;
399
400 /*
401 * Apply the parent's quals to the child, with appropriate substitution of
402 * variables. If the resulting clause is constant-FALSE or NULL after
403 * applying transformations, apply_child_basequals returns false to
404 * indicate that scanning this relation won't yield any rows. In this
405 * case, we mark the child as dummy right away. (We must do this
406 * immediately so that pruning works correctly when recursing in
407 * expand_partitioned_rtentry.)
408 */
409 if (parent)
410 {
411 AppendRelInfo *appinfo = root->append_rel_array[relid];
412
413 Assert(appinfo != NULL);
414 if (!apply_child_basequals(root, parent, rel, rte, appinfo))
415 {
416 /*
417 * Restriction clause reduced to constant FALSE or NULL. Mark as
418 * dummy so we won't scan this relation.
419 */
420 mark_dummy_rel(rel);
421 }
422 }
423
424 return rel;
425}
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
int32_t int32
Definition: c.h:537
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:838
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1513
void * palloc0(Size size)
Definition: mcxt.c:1395
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1049
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1050
@ RTE_VALUES
Definition: parsenodes.h:1048
@ RTE_SUBQUERY
Definition: parsenodes.h:1044
@ RTE_RESULT
Definition: parsenodes.h:1051
@ RTE_FUNCTION
Definition: parsenodes.h:1046
@ RTE_TABLEFUNC
Definition: parsenodes.h:1047
@ RTE_RELATION
Definition: parsenodes.h:1043
@ RELOPT_BASEREL
Definition: pathnodes.h:883
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:885
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:124
RTEKind rtekind
Definition: parsenodes.h:1078

References RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2215 of file pathnode.c.

2219{
2220 Relids required_outer;
2221
2222 /* inner_path can require rels from outer path, but not vice versa */
2223 Assert(!bms_overlap(outer_paramrels, innerrelids));
2224 /* easy case if inner path is not parameterized */
2225 if (!inner_paramrels)
2226 return bms_copy(outer_paramrels);
2227 /* else, form the union ... */
2228 required_outer = bms_union(outer_paramrels, inner_paramrels);
2229 /* ... and remove any mention of now-satisfied outer rels */
2230 required_outer = bms_del_members(required_outer,
2231 outerrelids);
2232 return required_outer;
2233}
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:581

References Assert(), bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2242 of file pathnode.c.

2243{
2244 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2245 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2246 Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2247 Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2248 Relids required_outer;
2249
2250 /*
2251 * Any parameterization of the input paths refers to topmost parents of
2252 * the relevant relations, because reparameterize_path_by_child() hasn't
2253 * been called yet. So we must consider topmost parents of the relations
2254 * being joined, too, while checking for disallowed parameterization
2255 * cases.
2256 */
2257 if (inner_path->parent->top_parent_relids)
2258 innerrelids = inner_path->parent->top_parent_relids;
2259 else
2260 innerrelids = inner_path->parent->relids;
2261
2262 if (outer_path->parent->top_parent_relids)
2263 outerrelids = outer_path->parent->top_parent_relids;
2264 else
2265 outerrelids = outer_path->parent->relids;
2266
2267 /* neither path can require rels from the other */
2268 Assert(!bms_overlap(outer_paramrels, innerrelids));
2269 Assert(!bms_overlap(inner_paramrels, outerrelids));
2270 /* form the union ... */
2271 required_outer = bms_union(outer_paramrels, inner_paramrels);
2272 /* we do not need an explicit test for empty; bms_union gets it right */
2273 return required_outer;
2274}
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:228

References Assert(), bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 125 of file pathnode.c.

127{
128 Cost cost1,
129 cost2;
130
131 /* Number of disabled nodes, if different, trumps all else. */
132 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
133 {
134 if (path1->disabled_nodes < path2->disabled_nodes)
135 return -1;
136 else
137 return +1;
138 }
139
140 if (fraction <= 0.0 || fraction >= 1.0)
141 return compare_path_costs(path1, path2, TOTAL_COST);
142 cost1 = path1->startup_cost +
143 fraction * (path1->total_cost - path1->startup_cost);
144 cost2 = path2->startup_cost +
145 fraction * (path2->total_cost - path2->startup_cost);
146 if (cost1 < cost2)
147 return -1;
148 if (cost1 > cost2)
149 return +1;
150 return 0;
151}
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:70
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::disabled_nodes, Path::startup_cost, TOTAL_COST, Path::total_cost, and unlikely.

Referenced by get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 70 of file pathnode.c.

71{
72 /* Number of disabled nodes, if different, trumps all else. */
73 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
74 {
75 if (path1->disabled_nodes < path2->disabled_nodes)
76 return -1;
77 else
78 return +1;
79 }
80
81 if (criterion == STARTUP_COST)
82 {
83 if (path1->startup_cost < path2->startup_cost)
84 return -1;
85 if (path1->startup_cost > path2->startup_cost)
86 return +1;
87
88 /*
89 * If paths have the same startup cost (not at all unlikely), order
90 * them by total cost.
91 */
92 if (path1->total_cost < path2->total_cost)
93 return -1;
94 if (path1->total_cost > path2->total_cost)
95 return +1;
96 }
97 else
98 {
99 if (path1->total_cost < path2->total_cost)
100 return -1;
101 if (path1->total_cost > path2->total_cost)
102 return +1;
103
104 /*
105 * If paths have the same total cost, order them by startup cost.
106 */
107 if (path1->startup_cost < path2->startup_cost)
108 return -1;
109 if (path1->startup_cost > path2->startup_cost)
110 return +1;
111 }
112 return 0;
113}
@ STARTUP_COST
Definition: pathnodes.h:38

References Path::disabled_nodes, STARTUP_COST, Path::startup_cost, Path::total_cost, and unlikely.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath * create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 2995 of file pathnode.c.

3005{
3006 AggPath *pathnode = makeNode(AggPath);
3007
3008 pathnode->path.pathtype = T_Agg;
3009 pathnode->path.parent = rel;
3010 pathnode->path.pathtarget = target;
3011 pathnode->path.param_info = subpath->param_info;
3012 pathnode->path.parallel_aware = false;
3013 pathnode->path.parallel_safe = rel->consider_parallel &&
3014 subpath->parallel_safe;
3015 pathnode->path.parallel_workers = subpath->parallel_workers;
3016
3017 if (aggstrategy == AGG_SORTED)
3018 {
3019 /*
3020 * Attempt to preserve the order of the subpath. Additional pathkeys
3021 * may have been added in adjust_group_pathkeys_for_groupagg() to
3022 * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3023 * belong to columns within the aggregate function, so we must strip
3024 * these additional pathkeys off as those columns are unavailable
3025 * above the aggregate node.
3026 */
3027 if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3028 pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3029 root->num_groupby_pathkeys);
3030 else
3031 pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3032 }
3033 else
3034 pathnode->path.pathkeys = NIL; /* output is unordered */
3035
3036 pathnode->subpath = subpath;
3037
3038 pathnode->aggstrategy = aggstrategy;
3039 pathnode->aggsplit = aggsplit;
3040 pathnode->numGroups = numGroups;
3041 pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3042 pathnode->groupClause = groupClause;
3043 pathnode->qual = qual;
3044
3045 cost_agg(&pathnode->path, root,
3046 aggstrategy, aggcosts,
3047 list_length(groupClause), numGroups,
3048 qual,
3049 subpath->disabled_nodes,
3050 subpath->startup_cost, subpath->total_cost,
3051 subpath->rows, subpath->pathtarget->width);
3052
3053 /* add tlist eval cost for each output row */
3054 pathnode->path.startup_cost += target->cost.startup;
3055 pathnode->path.total_cost += target->cost.startup +
3056 target->cost.per_tuple * pathnode->path.rows;
3057
3058 return pathnode;
3059}
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2704
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
@ AGG_SORTED
Definition: nodes.h:365
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2484
Cardinality numGroups
Definition: pathnodes.h:2487
AggSplit aggsplit
Definition: pathnodes.h:2486
List * groupClause
Definition: pathnodes.h:2489
uint64 transitionSpace
Definition: pathnodes.h:2488
AggStrategy aggstrategy
Definition: pathnodes.h:2485
Path path
Definition: pathnodes.h:2483
List * qual
Definition: pathnodes.h:2490
NodeTag pathtype
Definition: pathnodes.h:1873
int parallel_workers
Definition: pathnodes.h:1904
bool parallel_aware
Definition: pathnodes.h:1900

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_final_unique_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), generate_grouped_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath * create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1301 of file pathnode.c.

1307{
1308 AppendPath *pathnode = makeNode(AppendPath);
1309 ListCell *l;
1310
1311 Assert(!parallel_aware || parallel_workers > 0);
1312
1313 pathnode->path.pathtype = T_Append;
1314 pathnode->path.parent = rel;
1315 pathnode->path.pathtarget = rel->reltarget;
1316
1317 /*
1318 * If this is for a baserel (not a join or non-leaf partition), we prefer
1319 * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1320 * for the path. This supports building a Memoize path atop this path,
1321 * and if this is a partitioned table the info may be useful for run-time
1322 * pruning (cf make_partition_pruneinfo()).
1323 *
1324 * However, if we don't have "root" then that won't work and we fall back
1325 * on the simpler get_appendrel_parampathinfo. There's no point in doing
1326 * the more expensive thing for a dummy path, either.
1327 */
1328 if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1329 pathnode->path.param_info = get_baserel_parampathinfo(root,
1330 rel,
1331 required_outer);
1332 else
1333 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1334 required_outer);
1335
1336 pathnode->path.parallel_aware = parallel_aware;
1337 pathnode->path.parallel_safe = rel->consider_parallel;
1338 pathnode->path.parallel_workers = parallel_workers;
1339 pathnode->path.pathkeys = pathkeys;
1340
1341 /*
1342 * For parallel append, non-partial paths are sorted by descending total
1343 * costs. That way, the total time to finish all non-partial paths is
1344 * minimized. Also, the partial paths are sorted by descending startup
1345 * costs. There may be some paths that require to do startup work by a
1346 * single worker. In such case, it's better for workers to choose the
1347 * expensive ones first, whereas the leader should choose the cheapest
1348 * startup plan.
1349 */
1350 if (pathnode->path.parallel_aware)
1351 {
1352 /*
1353 * We mustn't fiddle with the order of subpaths when the Append has
1354 * pathkeys. The order they're listed in is critical to keeping the
1355 * pathkeys valid.
1356 */
1357 Assert(pathkeys == NIL);
1358
1360 list_sort(partial_subpaths, append_startup_cost_compare);
1361 }
1362 pathnode->first_partial_path = list_length(subpaths);
1363 pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1364
1365 /*
1366 * Apply query-wide LIMIT if known and path is for sole base relation.
1367 * (Handling this at this low level is a bit klugy.)
1368 */
1369 if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1370 pathnode->limit_tuples = root->limit_tuples;
1371 else
1372 pathnode->limit_tuples = -1.0;
1373
1374 foreach(l, pathnode->subpaths)
1375 {
1376 Path *subpath = (Path *) lfirst(l);
1377
1378 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1379 subpath->parallel_safe;
1380
1381 /* All child paths must have same parameterization */
1382 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1383 }
1384
1385 Assert(!parallel_aware || pathnode->path.parallel_safe);
1386
1387 /*
1388 * If there's exactly one child path then the output of the Append is
1389 * necessarily ordered the same as the child's, so we can inherit the
1390 * child's pathkeys if any, overriding whatever the caller might've said.
1391 * Furthermore, if the child's parallel awareness matches the Append's,
1392 * then the Append is a no-op and will be discarded later (in setrefs.c).
1393 * Then we can inherit the child's size and cost too, effectively charging
1394 * zero for the Append. Otherwise, we must do the normal costsize
1395 * calculation.
1396 */
1397 if (list_length(pathnode->subpaths) == 1)
1398 {
1399 Path *child = (Path *) linitial(pathnode->subpaths);
1400
1401 if (child->parallel_aware == parallel_aware)
1402 {
1403 pathnode->path.rows = child->rows;
1404 pathnode->path.startup_cost = child->startup_cost;
1405 pathnode->path.total_cost = child->total_cost;
1406 }
1407 else
1408 cost_append(pathnode, root);
1409 /* Must do this last, else cost_append complains */
1410 pathnode->path.pathkeys = child->pathkeys;
1411 }
1412 else
1413 cost_append(pathnode, root);
1414
1415 /* If the caller provided a row estimate, override the computed value. */
1416 if (rows >= 0)
1417 pathnode->path.rows = rows;
1418
1419 return pathnode;
1420}
void cost_append(AppendPath *apath, PlannerInfo *root)
Definition: costsize.c:2240
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1454
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1432
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1978
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1667
int first_partial_path
Definition: pathnodes.h:2182
Cardinality limit_tuples
Definition: pathnodes.h:2183
List * subpaths
Definition: pathnodes.h:2180

References append_startup_cost_compare(), append_total_cost_compare(), Assert(), bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath * create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1131 of file pathnode.c.

1134{
1136 Relids required_outer = NULL;
1137 ListCell *lc;
1138
1139 pathnode->path.pathtype = T_BitmapAnd;
1140 pathnode->path.parent = rel;
1141 pathnode->path.pathtarget = rel->reltarget;
1142
1143 /*
1144 * Identify the required outer rels as the union of what the child paths
1145 * depend on. (Alternatively, we could insist that the caller pass this
1146 * in, but it's more convenient and reliable to compute it here.)
1147 */
1148 foreach(lc, bitmapquals)
1149 {
1150 Path *bitmapqual = (Path *) lfirst(lc);
1151
1152 required_outer = bms_add_members(required_outer,
1153 PATH_REQ_OUTER(bitmapqual));
1154 }
1155 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1156 required_outer);
1157
1158 /*
1159 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1160 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1161 * set the flag for this path based only on the relation-level flag,
1162 * without actually iterating over the list of children.
1163 */
1164 pathnode->path.parallel_aware = false;
1165 pathnode->path.parallel_safe = rel->consider_parallel;
1166 pathnode->path.parallel_workers = 0;
1167
1168 pathnode->path.pathkeys = NIL; /* always unordered */
1169
1170 pathnode->bitmapquals = bitmapquals;
1171
1172 /* this sets bitmapselectivity as well as the regular cost fields: */
1173 cost_bitmap_and_node(pathnode, root);
1174
1175 return pathnode;
1176}
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:916
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1139
List * bitmapquals
Definition: pathnodes.h:2045

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath * create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1098 of file pathnode.c.

1104{
1106
1107 pathnode->path.pathtype = T_BitmapHeapScan;
1108 pathnode->path.parent = rel;
1109 pathnode->path.pathtarget = rel->reltarget;
1110 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1111 required_outer);
1112 pathnode->path.parallel_aware = (parallel_degree > 0);
1113 pathnode->path.parallel_safe = rel->consider_parallel;
1114 pathnode->path.parallel_workers = parallel_degree;
1115 pathnode->path.pathkeys = NIL; /* always unordered */
1116
1117 pathnode->bitmapqual = bitmapqual;
1118
1119 cost_bitmap_heap_scan(&pathnode->path, root, rel,
1120 pathnode->path.param_info,
1121 bitmapqual, loop_count);
1122
1123 return pathnode;
1124}
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:997
Path * bitmapqual
Definition: pathnodes.h:2033

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath * create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1183 of file pathnode.c.

1186{
1187 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1188 Relids required_outer = NULL;
1189 ListCell *lc;
1190
1191 pathnode->path.pathtype = T_BitmapOr;
1192 pathnode->path.parent = rel;
1193 pathnode->path.pathtarget = rel->reltarget;
1194
1195 /*
1196 * Identify the required outer rels as the union of what the child paths
1197 * depend on. (Alternatively, we could insist that the caller pass this
1198 * in, but it's more convenient and reliable to compute it here.)
1199 */
1200 foreach(lc, bitmapquals)
1201 {
1202 Path *bitmapqual = (Path *) lfirst(lc);
1203
1204 required_outer = bms_add_members(required_outer,
1205 PATH_REQ_OUTER(bitmapqual));
1206 }
1207 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1208 required_outer);
1209
1210 /*
1211 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1212 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1213 * set the flag for this path based only on the relation-level flag,
1214 * without actually iterating over the list of children.
1215 */
1216 pathnode->path.parallel_aware = false;
1217 pathnode->path.parallel_safe = rel->consider_parallel;
1218 pathnode->path.parallel_workers = 0;
1219
1220 pathnode->path.pathkeys = NIL; /* always unordered */
1221
1222 pathnode->bitmapquals = bitmapquals;
1223
1224 /* this sets bitmapselectivity as well as the regular cost fields: */
1225 cost_bitmap_or_node(pathnode, root);
1226
1227 return pathnode;
1228}
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1184
List * bitmapquals
Definition: pathnodes.h:2058

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path * create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 1955 of file pathnode.c.

1957{
1958 Path *pathnode = makeNode(Path);
1959
1960 pathnode->pathtype = T_CteScan;
1961 pathnode->parent = rel;
1962 pathnode->pathtarget = rel->reltarget;
1963 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1964 required_outer);
1965 pathnode->parallel_aware = false;
1966 pathnode->parallel_safe = rel->consider_parallel;
1967 pathnode->parallel_workers = 0;
1968 pathnode->pathkeys = pathkeys;
1969
1970 cost_ctescan(pathnode, root, rel, pathnode->param_info);
1971
1972 return pathnode;
1973}
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1698

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath * create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2114 of file pathnode.c.

2123{
2124 ForeignPath *pathnode = makeNode(ForeignPath);
2125
2126 /*
2127 * We should use get_joinrel_parampathinfo to handle parameterized paths,
2128 * but the API of this function doesn't support it, and existing
2129 * extensions aren't yet trying to build such paths anyway. For the
2130 * moment just throw an error if someone tries it; eventually we should
2131 * revisit this.
2132 */
2133 if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2134 elog(ERROR, "parameterized foreign joins are not supported yet");
2135
2136 pathnode->path.pathtype = T_ForeignScan;
2137 pathnode->path.parent = rel;
2138 pathnode->path.pathtarget = target ? target : rel->reltarget;
2139 pathnode->path.param_info = NULL; /* XXX see above */
2140 pathnode->path.parallel_aware = false;
2141 pathnode->path.parallel_safe = rel->consider_parallel;
2142 pathnode->path.parallel_workers = 0;
2143 pathnode->path.rows = rows;
2144 pathnode->path.disabled_nodes = disabled_nodes;
2145 pathnode->path.startup_cost = startup_cost;
2146 pathnode->path.total_cost = total_cost;
2147 pathnode->path.pathkeys = pathkeys;
2148
2149 pathnode->fdw_outerpath = fdw_outerpath;
2150 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2151 pathnode->fdw_private = fdw_private;
2152
2153 return pathnode;
2154}
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:2118
List * fdw_restrictinfo
Definition: pathnodes.h:2119
List * fdw_private
Definition: pathnodes.h:2120

References bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath * create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2168 of file pathnode.c.

2176{
2177 ForeignPath *pathnode = makeNode(ForeignPath);
2178
2179 /*
2180 * Upper relations should never have any lateral references, since joining
2181 * is complete.
2182 */
2184
2185 pathnode->path.pathtype = T_ForeignScan;
2186 pathnode->path.parent = rel;
2187 pathnode->path.pathtarget = target ? target : rel->reltarget;
2188 pathnode->path.param_info = NULL;
2189 pathnode->path.parallel_aware = false;
2190 pathnode->path.parallel_safe = rel->consider_parallel;
2191 pathnode->path.parallel_workers = 0;
2192 pathnode->path.rows = rows;
2193 pathnode->path.disabled_nodes = disabled_nodes;
2194 pathnode->path.startup_cost = startup_cost;
2195 pathnode->path.total_cost = total_cost;
2196 pathnode->path.pathkeys = pathkeys;
2197
2198 pathnode->fdw_outerpath = fdw_outerpath;
2199 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2200 pathnode->fdw_private = fdw_private;
2201
2202 return pathnode;
2203}

References Assert(), bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath * create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2066 of file pathnode.c.

2075{
2076 ForeignPath *pathnode = makeNode(ForeignPath);
2077
2078 /* Historically some FDWs were confused about when to use this */
2079 Assert(IS_SIMPLE_REL(rel));
2080
2081 pathnode->path.pathtype = T_ForeignScan;
2082 pathnode->path.parent = rel;
2083 pathnode->path.pathtarget = target ? target : rel->reltarget;
2084 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2085 required_outer);
2086 pathnode->path.parallel_aware = false;
2087 pathnode->path.parallel_safe = rel->consider_parallel;
2088 pathnode->path.parallel_workers = 0;
2089 pathnode->path.rows = rows;
2090 pathnode->path.disabled_nodes = disabled_nodes;
2091 pathnode->path.startup_cost = startup_cost;
2092 pathnode->path.total_cost = total_cost;
2093 pathnode->path.pathkeys = pathkeys;
2094
2095 pathnode->fdw_outerpath = fdw_outerpath;
2096 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2097 pathnode->fdw_private = fdw_private;
2098
2099 return pathnode;
2100}
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:895

References Assert(), RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path * create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 1877 of file pathnode.c.

1879{
1880 Path *pathnode = makeNode(Path);
1881
1882 pathnode->pathtype = T_FunctionScan;
1883 pathnode->parent = rel;
1884 pathnode->pathtarget = rel->reltarget;
1885 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1886 required_outer);
1887 pathnode->parallel_aware = false;
1888 pathnode->parallel_safe = rel->consider_parallel;
1889 pathnode->parallel_workers = 0;
1890 pathnode->pathkeys = pathkeys;
1891
1892 cost_functionscan(pathnode, root, rel, pathnode->param_info);
1893
1894 return pathnode;
1895}
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1528

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath * create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1751 of file pathnode.c.

1754{
1756 int input_disabled_nodes = 0;
1757 Cost input_startup_cost = 0;
1758 Cost input_total_cost = 0;
1759
1760 Assert(subpath->parallel_safe);
1761 Assert(pathkeys);
1762
1763 /*
1764 * The subpath should guarantee that it is adequately ordered either by
1765 * adding an explicit sort node or by using presorted input. We cannot
1766 * add an explicit Sort node for the subpath in createplan.c on additional
1767 * pathkeys, because we can't guarantee the sort would be safe. For
1768 * example, expressions may be volatile or otherwise parallel unsafe.
1769 */
1770 if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1771 elog(ERROR, "gather merge input not sufficiently sorted");
1772
1773 pathnode->path.pathtype = T_GatherMerge;
1774 pathnode->path.parent = rel;
1775 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1776 required_outer);
1777 pathnode->path.parallel_aware = false;
1778
1779 pathnode->subpath = subpath;
1780 pathnode->num_workers = subpath->parallel_workers;
1781 pathnode->path.pathkeys = pathkeys;
1782 pathnode->path.pathtarget = target ? target : rel->reltarget;
1783
1784 input_disabled_nodes += subpath->disabled_nodes;
1785 input_startup_cost += subpath->startup_cost;
1786 input_total_cost += subpath->total_cost;
1787
1788 cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1789 input_disabled_nodes, input_startup_cost,
1790 input_total_cost, rows);
1791
1792 return pathnode;
1793}
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:459
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343

References Assert(), cost_gather_merge(), elog, ERROR, get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, subpath(), and GatherMergePath::subpath.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath * create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 1803 of file pathnode.c.

1805{
1806 GatherPath *pathnode = makeNode(GatherPath);
1807
1808 Assert(subpath->parallel_safe);
1809
1810 pathnode->path.pathtype = T_Gather;
1811 pathnode->path.parent = rel;
1812 pathnode->path.pathtarget = target;
1813 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1814 required_outer);
1815 pathnode->path.parallel_aware = false;
1816 pathnode->path.parallel_safe = false;
1817 pathnode->path.parallel_workers = 0;
1818 pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1819
1820 pathnode->subpath = subpath;
1821 pathnode->num_workers = subpath->parallel_workers;
1822 pathnode->single_copy = false;
1823
1824 if (pathnode->num_workers == 0)
1825 {
1826 pathnode->path.pathkeys = subpath->pathkeys;
1827 pathnode->num_workers = 1;
1828 pathnode->single_copy = true;
1829 }
1830
1831 cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1832
1833 return pathnode;
1834}
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:420
bool single_copy
Definition: pathnodes.h:2265
int num_workers
Definition: pathnodes.h:2266

References Assert(), cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath * create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 2886 of file pathnode.c.

2892{
2893 GroupPath *pathnode = makeNode(GroupPath);
2894 PathTarget *target = rel->reltarget;
2895
2896 pathnode->path.pathtype = T_Group;
2897 pathnode->path.parent = rel;
2898 pathnode->path.pathtarget = target;
2899 /* For now, assume we are above any joins, so no parameterization */
2900 pathnode->path.param_info = NULL;
2901 pathnode->path.parallel_aware = false;
2902 pathnode->path.parallel_safe = rel->consider_parallel &&
2903 subpath->parallel_safe;
2904 pathnode->path.parallel_workers = subpath->parallel_workers;
2905 /* Group doesn't change sort ordering */
2906 pathnode->path.pathkeys = subpath->pathkeys;
2907
2908 pathnode->subpath = subpath;
2909
2910 pathnode->groupClause = groupClause;
2911 pathnode->qual = qual;
2912
2913 cost_group(&pathnode->path, root,
2914 list_length(groupClause),
2915 numGroups,
2916 qual,
2917 subpath->disabled_nodes,
2918 subpath->startup_cost, subpath->total_cost,
2919 subpath->rows);
2920
2921 /* add tlist eval cost for each output row */
2922 pathnode->path.startup_cost += target->cost.startup;
2923 pathnode->path.total_cost += target->cost.startup +
2924 target->cost.per_tuple * pathnode->path.rows;
2925
2926 return pathnode;
2927}
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3217
List * qual
Definition: pathnodes.h:2458
List * groupClause
Definition: pathnodes.h:2457
Path * subpath
Definition: pathnodes.h:2456
Path path
Definition: pathnodes.h:2455

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath * create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1610 of file pathnode.c.

1612{
1614
1615 pathnode->path.pathtype = T_Result;
1616 pathnode->path.parent = rel;
1617 pathnode->path.pathtarget = target;
1618 pathnode->path.param_info = NULL; /* there are no other rels... */
1619 pathnode->path.parallel_aware = false;
1620 pathnode->path.parallel_safe = rel->consider_parallel;
1621 pathnode->path.parallel_workers = 0;
1622 pathnode->path.pathkeys = NIL;
1623 pathnode->quals = havingqual;
1624
1625 /*
1626 * We can't quite use cost_resultscan() because the quals we want to
1627 * account for are not baserestrict quals of the rel. Might as well just
1628 * hack it here.
1629 */
1630 pathnode->path.rows = 1;
1631 pathnode->path.startup_cost = target->cost.startup;
1632 pathnode->path.total_cost = target->cost.startup +
1633 cpu_tuple_cost + target->cost.per_tuple;
1634
1635 /*
1636 * Add cost of qual, if any --- but we ignore its selectivity, since our
1637 * rowcount estimate should be 1 no matter what the qual is.
1638 */
1639 if (havingqual)
1640 {
1641 QualCost qual_cost;
1642
1643 cost_qual_eval(&qual_cost, havingqual, root);
1644 /* havingqual is evaluated once at startup */
1645 pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1646 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1647 }
1648
1649 return pathnode;
1650}
double cpu_tuple_cost
Definition: costsize.c:132
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4781

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath * create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3077 of file pathnode.c.

3084{
3086 PathTarget *target = rel->reltarget;
3087 ListCell *lc;
3088 bool is_first = true;
3089 bool is_first_sort = true;
3090
3091 /* The topmost generated Plan node will be an Agg */
3092 pathnode->path.pathtype = T_Agg;
3093 pathnode->path.parent = rel;
3094 pathnode->path.pathtarget = target;
3095 pathnode->path.param_info = subpath->param_info;
3096 pathnode->path.parallel_aware = false;
3097 pathnode->path.parallel_safe = rel->consider_parallel &&
3098 subpath->parallel_safe;
3099 pathnode->path.parallel_workers = subpath->parallel_workers;
3100 pathnode->subpath = subpath;
3101
3102 /*
3103 * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3104 * to AGG_HASHED, here if possible.
3105 */
3106 if (aggstrategy == AGG_SORTED &&
3107 list_length(rollups) == 1 &&
3108 ((RollupData *) linitial(rollups))->groupClause == NIL)
3109 aggstrategy = AGG_PLAIN;
3110
3111 if (aggstrategy == AGG_MIXED &&
3112 list_length(rollups) == 1)
3113 aggstrategy = AGG_HASHED;
3114
3115 /*
3116 * Output will be in sorted order by group_pathkeys if, and only if, there
3117 * is a single rollup operation on a non-empty list of grouping
3118 * expressions.
3119 */
3120 if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3121 pathnode->path.pathkeys = root->group_pathkeys;
3122 else
3123 pathnode->path.pathkeys = NIL;
3124
3125 pathnode->aggstrategy = aggstrategy;
3126 pathnode->rollups = rollups;
3127 pathnode->qual = having_qual;
3128 pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3129
3130 Assert(rollups != NIL);
3131 Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3132 Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3133
3134 foreach(lc, rollups)
3135 {
3136 RollupData *rollup = lfirst(lc);
3137 List *gsets = rollup->gsets;
3138 int numGroupCols = list_length(linitial(gsets));
3139
3140 /*
3141 * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3142 * (already-sorted) input, and following ones do their own sort.
3143 *
3144 * In AGG_HASHED mode, there is one rollup for each grouping set.
3145 *
3146 * In AGG_MIXED mode, the first rollups are hashed, the first
3147 * non-hashed one takes the (already-sorted) input, and following ones
3148 * do their own sort.
3149 */
3150 if (is_first)
3151 {
3152 cost_agg(&pathnode->path, root,
3153 aggstrategy,
3154 agg_costs,
3155 numGroupCols,
3156 rollup->numGroups,
3157 having_qual,
3158 subpath->disabled_nodes,
3159 subpath->startup_cost,
3160 subpath->total_cost,
3161 subpath->rows,
3162 subpath->pathtarget->width);
3163 is_first = false;
3164 if (!rollup->is_hashed)
3165 is_first_sort = false;
3166 }
3167 else
3168 {
3169 Path sort_path; /* dummy for result of cost_sort */
3170 Path agg_path; /* dummy for result of cost_agg */
3171
3172 if (rollup->is_hashed || is_first_sort)
3173 {
3174 /*
3175 * Account for cost of aggregation, but don't charge input
3176 * cost again
3177 */
3178 cost_agg(&agg_path, root,
3179 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3180 agg_costs,
3181 numGroupCols,
3182 rollup->numGroups,
3183 having_qual,
3184 0, 0.0, 0.0,
3185 subpath->rows,
3186 subpath->pathtarget->width);
3187 if (!rollup->is_hashed)
3188 is_first_sort = false;
3189 }
3190 else
3191 {
3192 /* Account for cost of sort, but don't charge input cost again */
3193 cost_sort(&sort_path, root, NIL, 0,
3194 0.0,
3195 subpath->rows,
3196 subpath->pathtarget->width,
3197 0.0,
3198 work_mem,
3199 -1.0);
3200
3201 /* Account for cost of aggregation */
3202
3203 cost_agg(&agg_path, root,
3204 AGG_SORTED,
3205 agg_costs,
3206 numGroupCols,
3207 rollup->numGroups,
3208 having_qual,
3209 sort_path.disabled_nodes,
3210 sort_path.startup_cost,
3211 sort_path.total_cost,
3212 sort_path.rows,
3213 subpath->pathtarget->width);
3214 }
3215
3216 pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3217 pathnode->path.total_cost += agg_path.total_cost;
3218 pathnode->path.rows += agg_path.rows;
3219 }
3220 }
3221
3222 /* add tlist eval cost for each output row */
3223 pathnode->path.startup_cost += target->cost.startup;
3224 pathnode->path.total_cost += target->cost.startup +
3225 target->cost.per_tuple * pathnode->path.rows;
3226
3227 return pathnode;
3228}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2134
int work_mem
Definition: globals.c:131
@ AGG_HASHED
Definition: nodes.h:366
@ AGG_MIXED
Definition: nodes.h:367
@ AGG_PLAIN
Definition: nodes.h:364
uint64 transitionSpace
Definition: pathnodes.h:2530
AggStrategy aggstrategy
Definition: pathnodes.h:2527
Cardinality numGroups
Definition: pathnodes.h:2514
List * gsets
Definition: pathnodes.h:2512
bool is_hashed
Definition: pathnodes.h:2516

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), Path::disabled_nodes, RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath * create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2459 of file pathnode.c.

2470{
2471 HashPath *pathnode = makeNode(HashPath);
2472
2473 pathnode->jpath.path.pathtype = T_HashJoin;
2474 pathnode->jpath.path.parent = joinrel;
2475 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2476 pathnode->jpath.path.param_info =
2478 joinrel,
2479 outer_path,
2480 inner_path,
2481 extra->sjinfo,
2482 required_outer,
2483 &restrict_clauses);
2484 pathnode->jpath.path.parallel_aware =
2485 joinrel->consider_parallel && parallel_hash;
2486 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2487 outer_path->parallel_safe && inner_path->parallel_safe;
2488 /* This is a foolish way to estimate parallel_workers, but for now... */
2489 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2490
2491 /*
2492 * A hashjoin never has pathkeys, since its output ordering is
2493 * unpredictable due to possible batching. XXX If the inner relation is
2494 * small enough, we could instruct the executor that it must not batch,
2495 * and then we could assume that the output inherits the outer relation's
2496 * ordering, which might save a sort step. However there is considerable
2497 * downside if our estimate of the inner relation size is badly off. For
2498 * the moment we don't risk it. (Note also that if we wanted to take this
2499 * seriously, joinpath.c would have to consider many more paths for the
2500 * outer rel than it does now.)
2501 */
2502 pathnode->jpath.path.pathkeys = NIL;
2503 pathnode->jpath.jointype = jointype;
2504 pathnode->jpath.inner_unique = extra->inner_unique;
2505 pathnode->jpath.outerjoinpath = outer_path;
2506 pathnode->jpath.innerjoinpath = inner_path;
2507 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2508 pathnode->path_hashclauses = hashclauses;
2509 /* final_cost_hashjoin will fill in pathnode->num_batches */
2510
2511 final_cost_hashjoin(root, pathnode, workspace, extra);
2512
2513 return pathnode;
2514}
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4299
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1781
List * path_hashclauses
Definition: pathnodes.h:2382
JoinPath jpath
Definition: pathnodes.h:2381
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3499
Path * outerjoinpath
Definition: pathnodes.h:2296
Path * innerjoinpath
Definition: pathnodes.h:2297
JoinType jointype
Definition: pathnodes.h:2291
bool inner_unique
Definition: pathnodes.h:2293
List * joinrestrictinfo
Definition: pathnodes.h:2299

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath * create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 2793 of file pathnode.c.

2799{
2801 SortPath *pathnode = &sort->spath;
2802
2803 pathnode->path.pathtype = T_IncrementalSort;
2804 pathnode->path.parent = rel;
2805 /* Sort doesn't project, so use source path's pathtarget */
2806 pathnode->path.pathtarget = subpath->pathtarget;
2807 pathnode->path.param_info = subpath->param_info;
2808 pathnode->path.parallel_aware = false;
2809 pathnode->path.parallel_safe = rel->consider_parallel &&
2810 subpath->parallel_safe;
2811 pathnode->path.parallel_workers = subpath->parallel_workers;
2812 pathnode->path.pathkeys = pathkeys;
2813
2814 pathnode->subpath = subpath;
2815
2816 cost_incremental_sort(&pathnode->path,
2817 root, pathkeys, presorted_keys,
2818 subpath->disabled_nodes,
2819 subpath->startup_cost,
2820 subpath->total_cost,
2821 subpath->rows,
2822 subpath->pathtarget->width,
2823 0.0, /* XXX comparison_cost shouldn't be 0? */
2824 work_mem, limit_tuples);
2825
2826 sort->nPresortedCols = presorted_keys;
2827
2828 return sort;
2829}
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:198
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1990
Path path
Definition: pathnodes.h:2429
Path * subpath
Definition: pathnodes.h:2430

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_final_unique_paths(), create_one_window_path(), create_ordered_paths(), create_partial_unique_paths(), gather_grouping_paths(), generate_grouped_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath * create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 1049 of file pathnode.c.

1060{
1061 IndexPath *pathnode = makeNode(IndexPath);
1062 RelOptInfo *rel = index->rel;
1063
1064 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1065 pathnode->path.parent = rel;
1066 pathnode->path.pathtarget = rel->reltarget;
1067 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1068 required_outer);
1069 pathnode->path.parallel_aware = false;
1070 pathnode->path.parallel_safe = rel->consider_parallel;
1071 pathnode->path.parallel_workers = 0;
1072 pathnode->path.pathkeys = pathkeys;
1073
1074 pathnode->indexinfo = index;
1075 pathnode->indexclauses = indexclauses;
1076 pathnode->indexorderbys = indexorderbys;
1077 pathnode->indexorderbycols = indexorderbycols;
1078 pathnode->indexscandir = indexscandir;
1079
1080 cost_index(pathnode, root, loop_count, partial_path);
1081
1082 return pathnode;
1083}
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:534
List * indexclauses
Definition: pathnodes.h:1959
ScanDirection indexscandir
Definition: pathnodes.h:1962
Path path
Definition: pathnodes.h:1957
List * indexorderbycols
Definition: pathnodes.h:1961
List * indexorderbys
Definition: pathnodes.h:1960
IndexOptInfo * indexinfo
Definition: pathnodes.h:1958
Definition: type.h:96

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath * create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3730 of file pathnode.c.

3735{
3736 LimitPath *pathnode = makeNode(LimitPath);
3737
3738 pathnode->path.pathtype = T_Limit;
3739 pathnode->path.parent = rel;
3740 /* Limit doesn't project, so use source path's pathtarget */
3741 pathnode->path.pathtarget = subpath->pathtarget;
3742 /* For now, assume we are above any joins, so no parameterization */
3743 pathnode->path.param_info = NULL;
3744 pathnode->path.parallel_aware = false;
3745 pathnode->path.parallel_safe = rel->consider_parallel &&
3746 subpath->parallel_safe;
3747 pathnode->path.parallel_workers = subpath->parallel_workers;
3748 pathnode->path.rows = subpath->rows;
3749 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3750 pathnode->path.startup_cost = subpath->startup_cost;
3751 pathnode->path.total_cost = subpath->total_cost;
3752 pathnode->path.pathkeys = subpath->pathkeys;
3753 pathnode->subpath = subpath;
3754 pathnode->limitOffset = limitOffset;
3755 pathnode->limitCount = limitCount;
3756 pathnode->limitOption = limitOption;
3757
3758 /*
3759 * Adjust the output rows count and costs according to the offset/limit.
3760 */
3762 &pathnode->path.startup_cost,
3763 &pathnode->path.total_cost,
3764 offset_est, count_est);
3765
3766 return pathnode;
3767}
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3786
Path path
Definition: pathnodes.h:2628
Path * subpath
Definition: pathnodes.h:2629
LimitOption limitOption
Definition: pathnodes.h:2632
Node * limitOffset
Definition: pathnodes.h:2630
Node * limitCount
Definition: pathnodes.h:2631

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, Path::disabled_nodes, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath * create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3568 of file pathnode.c.

3570{
3571 LockRowsPath *pathnode = makeNode(LockRowsPath);
3572
3573 pathnode->path.pathtype = T_LockRows;
3574 pathnode->path.parent = rel;
3575 /* LockRows doesn't project, so use source path's pathtarget */
3576 pathnode->path.pathtarget = subpath->pathtarget;
3577 /* For now, assume we are above any joins, so no parameterization */
3578 pathnode->path.param_info = NULL;
3579 pathnode->path.parallel_aware = false;
3580 pathnode->path.parallel_safe = false;
3581 pathnode->path.parallel_workers = 0;
3582 pathnode->path.rows = subpath->rows;
3583
3584 /*
3585 * The result cannot be assumed sorted, since locking might cause the sort
3586 * key columns to be replaced with new values.
3587 */
3588 pathnode->path.pathkeys = NIL;
3589
3590 pathnode->subpath = subpath;
3591 pathnode->rowMarks = rowMarks;
3592 pathnode->epqParam = epqParam;
3593
3594 /*
3595 * We should charge something extra for the costs of row locking and
3596 * possible refetches, but it's hard to say how much. For now, use
3597 * cpu_tuple_cost per row.
3598 */
3599 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3600 pathnode->path.startup_cost = subpath->startup_cost;
3601 pathnode->path.total_cost = subpath->total_cost +
3602 cpu_tuple_cost * subpath->rows;
3603
3604 return pathnode;
3605}
Path * subpath
Definition: pathnodes.h:2590
List * rowMarks
Definition: pathnodes.h:2591

References cpu_tuple_cost, Path::disabled_nodes, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath * create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1658 of file pathnode.c.

1659{
1660 MaterialPath *pathnode = makeNode(MaterialPath);
1661
1662 Assert(subpath->parent == rel);
1663
1664 pathnode->path.pathtype = T_Material;
1665 pathnode->path.parent = rel;
1666 pathnode->path.pathtarget = rel->reltarget;
1667 pathnode->path.param_info = subpath->param_info;
1668 pathnode->path.parallel_aware = false;
1669 pathnode->path.parallel_safe = rel->consider_parallel &&
1670 subpath->parallel_safe;
1671 pathnode->path.parallel_workers = subpath->parallel_workers;
1672 pathnode->path.pathkeys = subpath->pathkeys;
1673
1674 pathnode->subpath = subpath;
1675
1676 cost_material(&pathnode->path,
1677 subpath->disabled_nodes,
1678 subpath->startup_cost,
1679 subpath->total_cost,
1680 subpath->rows,
1681 subpath->pathtarget->width);
1682
1683 return pathnode;
1684}
void cost_material(Path *path, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2499
Path * subpath
Definition: pathnodes.h:2230

References Assert(), RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by consider_parallel_nestloop(), match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath * create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
Cardinality  est_calls 
)

Definition at line 1691 of file pathnode.c.

1694{
1695 MemoizePath *pathnode = makeNode(MemoizePath);
1696
1697 Assert(subpath->parent == rel);
1698
1699 pathnode->path.pathtype = T_Memoize;
1700 pathnode->path.parent = rel;
1701 pathnode->path.pathtarget = rel->reltarget;
1702 pathnode->path.param_info = subpath->param_info;
1703 pathnode->path.parallel_aware = false;
1704 pathnode->path.parallel_safe = rel->consider_parallel &&
1705 subpath->parallel_safe;
1706 pathnode->path.parallel_workers = subpath->parallel_workers;
1707 pathnode->path.pathkeys = subpath->pathkeys;
1708
1709 pathnode->subpath = subpath;
1710 pathnode->hash_operators = hash_operators;
1711 pathnode->param_exprs = param_exprs;
1712 pathnode->singlerow = singlerow;
1713 pathnode->binary_mode = binary_mode;
1714
1715 /*
1716 * For now we set est_entries to 0. cost_memoize_rescan() does all the
1717 * hard work to determine how many cache entries there are likely to be,
1718 * so it seems best to leave it up to that function to fill this field in.
1719 * If left at 0, the executor will make a guess at a good value.
1720 */
1721 pathnode->est_entries = 0;
1722
1723 pathnode->est_calls = clamp_row_est(est_calls);
1724
1725 /* These will also be set later in cost_memoize_rescan() */
1726 pathnode->est_unique_keys = 0.0;
1727 pathnode->est_hit_ratio = 0.0;
1728
1729 /* we should not generate this path type when enable_memoize=false */
1731 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1732
1733 /*
1734 * Add a small additional charge for caching the first entry. All the
1735 * harder calculations for rescans are performed in cost_memoize_rescan().
1736 */
1737 pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1738 pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1739 pathnode->path.rows = subpath->rows;
1740
1741 return pathnode;
1742}
bool enable_memoize
Definition: costsize.c:155
Cardinality est_calls
Definition: pathnodes.h:2251
bool singlerow
Definition: pathnodes.h:2244
List * hash_operators
Definition: pathnodes.h:2242
uint32 est_entries
Definition: pathnodes.h:2248
bool binary_mode
Definition: pathnodes.h:2246
double est_hit_ratio
Definition: pathnodes.h:2253
Cardinality est_unique_keys
Definition: pathnodes.h:2252
Path * subpath
Definition: pathnodes.h:2241
List * param_exprs
Definition: pathnodes.h:2243

References Assert(), MemoizePath::binary_mode, clamp_row_est(), RelOptInfo::consider_parallel, cpu_tuple_cost, Path::disabled_nodes, enable_memoize, MemoizePath::est_calls, MemoizePath::est_entries, MemoizePath::est_hit_ratio, MemoizePath::est_unique_keys, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath * create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1472 of file pathnode.c.

1477{
1479 int input_disabled_nodes;
1480 Cost input_startup_cost;
1481 Cost input_total_cost;
1482 ListCell *l;
1483
1484 /*
1485 * We don't currently support parameterized MergeAppend paths, as
1486 * explained in the comments for generate_orderedappend_paths.
1487 */
1488 Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1489
1490 pathnode->path.pathtype = T_MergeAppend;
1491 pathnode->path.parent = rel;
1492 pathnode->path.pathtarget = rel->reltarget;
1493 pathnode->path.param_info = NULL;
1494 pathnode->path.parallel_aware = false;
1495 pathnode->path.parallel_safe = rel->consider_parallel;
1496 pathnode->path.parallel_workers = 0;
1497 pathnode->path.pathkeys = pathkeys;
1498 pathnode->subpaths = subpaths;
1499
1500 /*
1501 * Apply query-wide LIMIT if known and path is for sole base relation.
1502 * (Handling this at this low level is a bit klugy.)
1503 */
1504 if (bms_equal(rel->relids, root->all_query_rels))
1505 pathnode->limit_tuples = root->limit_tuples;
1506 else
1507 pathnode->limit_tuples = -1.0;
1508
1509 /*
1510 * Add up the sizes and costs of the input paths.
1511 */
1512 pathnode->path.rows = 0;
1513 input_disabled_nodes = 0;
1514 input_startup_cost = 0;
1515 input_total_cost = 0;
1516 foreach(l, subpaths)
1517 {
1518 Path *subpath = (Path *) lfirst(l);
1519 int presorted_keys;
1520 Path sort_path; /* dummy for result of
1521 * cost_sort/cost_incremental_sort */
1522
1523 /* All child paths should be unparameterized */
1525
1526 pathnode->path.rows += subpath->rows;
1527 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1528 subpath->parallel_safe;
1529
1530 if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
1531 &presorted_keys))
1532 {
1533 /*
1534 * We'll need to insert a Sort node, so include costs for that. We
1535 * choose to use incremental sort if it is enabled and there are
1536 * presorted keys; otherwise we use full sort.
1537 *
1538 * We can use the parent's LIMIT if any, since we certainly won't
1539 * pull more than that many tuples from any child.
1540 */
1541 if (enable_incremental_sort && presorted_keys > 0)
1542 {
1543 cost_incremental_sort(&sort_path,
1544 root,
1545 pathkeys,
1546 presorted_keys,
1547 subpath->disabled_nodes,
1548 subpath->startup_cost,
1549 subpath->total_cost,
1550 subpath->rows,
1551 subpath->pathtarget->width,
1552 0.0,
1553 work_mem,
1554 pathnode->limit_tuples);
1555 }
1556 else
1557 {
1558 cost_sort(&sort_path,
1559 root,
1560 pathkeys,
1561 subpath->disabled_nodes,
1562 subpath->total_cost,
1563 subpath->rows,
1564 subpath->pathtarget->width,
1565 0.0,
1566 work_mem,
1567 pathnode->limit_tuples);
1568 }
1569
1570 subpath = &sort_path;
1571 }
1572
1573 input_disabled_nodes += subpath->disabled_nodes;
1574 input_startup_cost += subpath->startup_cost;
1575 input_total_cost += subpath->total_cost;
1576 }
1577
1578 /*
1579 * Now we can compute total costs of the MergeAppend. If there's exactly
1580 * one child path and its parallel awareness matches that of the
1581 * MergeAppend, then the MergeAppend is a no-op and will be discarded
1582 * later (in setrefs.c); otherwise we do the normal cost calculation.
1583 */
1584 if (list_length(subpaths) == 1 &&
1585 ((Path *) linitial(subpaths))->parallel_aware ==
1586 pathnode->path.parallel_aware)
1587 {
1588 pathnode->path.disabled_nodes = input_disabled_nodes;
1589 pathnode->path.startup_cost = input_startup_cost;
1590 pathnode->path.total_cost = input_total_cost;
1591 }
1592 else
1593 cost_merge_append(&pathnode->path, root,
1594 pathkeys, list_length(subpaths),
1595 input_disabled_nodes,
1596 input_startup_cost, input_total_cost,
1597 pathnode->path.rows);
1598
1599 return pathnode;
1600}
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2448
bool enable_incremental_sort
Definition: costsize.c:151
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
Cardinality limit_tuples
Definition: pathnodes.h:2205

References Assert(), bms_equal(), bms_is_empty, RelOptInfo::consider_parallel, cost_incremental_sort(), cost_merge_append(), cost_sort(), Path::disabled_nodes, enable_incremental_sort, RelOptInfo::lateral_relids, lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_count_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath * create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys,
int  outer_presorted_keys 
)

Definition at line 2391 of file pathnode.c.

2405{
2406 MergePath *pathnode = makeNode(MergePath);
2407
2408 pathnode->jpath.path.pathtype = T_MergeJoin;
2409 pathnode->jpath.path.parent = joinrel;
2410 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2411 pathnode->jpath.path.param_info =
2413 joinrel,
2414 outer_path,
2415 inner_path,
2416 extra->sjinfo,
2417 required_outer,
2418 &restrict_clauses);
2419 pathnode->jpath.path.parallel_aware = false;
2420 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2421 outer_path->parallel_safe && inner_path->parallel_safe;
2422 /* This is a foolish way to estimate parallel_workers, but for now... */
2423 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2424 pathnode->jpath.path.pathkeys = pathkeys;
2425 pathnode->jpath.jointype = jointype;
2426 pathnode->jpath.inner_unique = extra->inner_unique;
2427 pathnode->jpath.outerjoinpath = outer_path;
2428 pathnode->jpath.innerjoinpath = inner_path;
2429 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2430 pathnode->path_mergeclauses = mergeclauses;
2431 pathnode->outersortkeys = outersortkeys;
2432 pathnode->innersortkeys = innersortkeys;
2433 pathnode->outer_presorted_keys = outer_presorted_keys;
2434 /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2435 /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2436
2437 final_cost_mergejoin(root, pathnode, workspace, extra);
2438
2439 return pathnode;
2440}
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3859
List * outersortkeys
Definition: pathnodes.h:2362
List * innersortkeys
Definition: pathnodes.h:2363
JoinPath jpath
Definition: pathnodes.h:2360
int outer_presorted_keys
Definition: pathnodes.h:2364
List * path_mergeclauses
Definition: pathnodes.h:2361

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, MergePath::outer_presorted_keys, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath * create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3240 of file pathnode.c.

3245{
3247 Cost initplan_cost;
3248 int initplan_disabled_nodes = 0;
3249 ListCell *lc;
3250
3251 /* The topmost generated Plan node will be a Result */
3252 pathnode->path.pathtype = T_Result;
3253 pathnode->path.parent = rel;
3254 pathnode->path.pathtarget = target;
3255 /* For now, assume we are above any joins, so no parameterization */
3256 pathnode->path.param_info = NULL;
3257 pathnode->path.parallel_aware = false;
3258 pathnode->path.parallel_safe = true; /* might change below */
3259 pathnode->path.parallel_workers = 0;
3260 /* Result is one unordered row */
3261 pathnode->path.rows = 1;
3262 pathnode->path.pathkeys = NIL;
3263
3264 pathnode->mmaggregates = mmaggregates;
3265 pathnode->quals = quals;
3266
3267 /* Calculate cost of all the initplans, and check parallel safety */
3268 initplan_cost = 0;
3269 foreach(lc, mmaggregates)
3270 {
3271 MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3272
3273 initplan_disabled_nodes += mminfo->path->disabled_nodes;
3274 initplan_cost += mminfo->pathcost;
3275 if (!mminfo->path->parallel_safe)
3276 pathnode->path.parallel_safe = false;
3277 }
3278
3279 /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3280 pathnode->path.disabled_nodes = initplan_disabled_nodes;
3281 pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3282 pathnode->path.total_cost = initplan_cost + target->cost.startup +
3283 target->cost.per_tuple + cpu_tuple_cost;
3284
3285 /*
3286 * Add cost of qual, if any --- but we ignore its selectivity, since our
3287 * rowcount estimate should be 1 no matter what the qual is.
3288 */
3289 if (quals)
3290 {
3291 QualCost qual_cost;
3292
3293 cost_qual_eval(&qual_cost, quals, root);
3294 pathnode->path.startup_cost += qual_cost.startup;
3295 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3296 }
3297
3298 /*
3299 * If the initplans were all parallel-safe, also check safety of the
3300 * target and quals. (The Result node itself isn't parallelizable, but if
3301 * we are in a subquery then it can be useful for the outer query to know
3302 * that this one is parallel-safe.)
3303 */
3304 if (pathnode->path.parallel_safe)
3305 pathnode->path.parallel_safe =
3306 is_parallel_safe(root, (Node *) target->exprs) &&
3307 is_parallel_safe(root, (Node *) quals);
3308
3309 return pathnode;
3310}
List * quals
Definition: pathnodes.h:2540
List * mmaggregates
Definition: pathnodes.h:2539

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, Path::disabled_nodes, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath * create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3630 of file pathnode.c.

3640{
3642
3643 Assert(operation == CMD_MERGE ||
3644 (operation == CMD_UPDATE ?
3645 list_length(resultRelations) == list_length(updateColnosLists) :
3646 updateColnosLists == NIL));
3647 Assert(withCheckOptionLists == NIL ||
3648 list_length(resultRelations) == list_length(withCheckOptionLists));
3649 Assert(returningLists == NIL ||
3650 list_length(resultRelations) == list_length(returningLists));
3651
3652 pathnode->path.pathtype = T_ModifyTable;
3653 pathnode->path.parent = rel;
3654 /* pathtarget is not interesting, just make it minimally valid */
3655 pathnode->path.pathtarget = rel->reltarget;
3656 /* For now, assume we are above any joins, so no parameterization */
3657 pathnode->path.param_info = NULL;
3658 pathnode->path.parallel_aware = false;
3659 pathnode->path.parallel_safe = false;
3660 pathnode->path.parallel_workers = 0;
3661 pathnode->path.pathkeys = NIL;
3662
3663 /*
3664 * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3665 *
3666 * Currently, we don't charge anything extra for the actual table
3667 * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3668 * expressions if any. It would only be window dressing, since
3669 * ModifyTable is always a top-level node and there is no way for the
3670 * costs to change any higher-level planning choices. But we might want
3671 * to make it look better sometime.
3672 */
3673 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3674 pathnode->path.startup_cost = subpath->startup_cost;
3675 pathnode->path.total_cost = subpath->total_cost;
3676 if (returningLists != NIL)
3677 {
3678 pathnode->path.rows = subpath->rows;
3679
3680 /*
3681 * Set width to match the subpath output. XXX this is totally wrong:
3682 * we should return an average of the RETURNING tlist widths. But
3683 * it's what happened historically, and improving it is a task for
3684 * another day. (Again, it's mostly window dressing.)
3685 */
3686 pathnode->path.pathtarget->width = subpath->pathtarget->width;
3687 }
3688 else
3689 {
3690 pathnode->path.rows = 0;
3691 pathnode->path.pathtarget->width = 0;
3692 }
3693
3694 pathnode->subpath = subpath;
3695 pathnode->operation = operation;
3696 pathnode->canSetTag = canSetTag;
3697 pathnode->nominalRelation = nominalRelation;
3698 pathnode->rootRelation = rootRelation;
3699 pathnode->resultRelations = resultRelations;
3700 pathnode->updateColnosLists = updateColnosLists;
3701 pathnode->withCheckOptionLists = withCheckOptionLists;
3702 pathnode->returningLists = returningLists;
3703 pathnode->rowMarks = rowMarks;
3704 pathnode->onconflict = onconflict;
3705 pathnode->epqParam = epqParam;
3706 pathnode->mergeActionLists = mergeActionLists;
3707 pathnode->mergeJoinConditions = mergeJoinConditions;
3708
3709 return pathnode;
3710}
@ CMD_MERGE
Definition: nodes.h:279
@ CMD_UPDATE
Definition: nodes.h:276
List * returningLists
Definition: pathnodes.h:2613
List * resultRelations
Definition: pathnodes.h:2610
List * withCheckOptionLists
Definition: pathnodes.h:2612
List * mergeJoinConditions
Definition: pathnodes.h:2619
List * updateColnosLists
Definition: pathnodes.h:2611
OnConflictExpr * onconflict
Definition: pathnodes.h:2615
CmdType operation
Definition: pathnodes.h:2606
Index rootRelation
Definition: pathnodes.h:2609
Index nominalRelation
Definition: pathnodes.h:2608
List * mergeActionLists
Definition: pathnodes.h:2617

References Assert(), ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, Path::disabled_nodes, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path * create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1981 of file pathnode.c.

1983{
1984 Path *pathnode = makeNode(Path);
1985
1986 pathnode->pathtype = T_NamedTuplestoreScan;
1987 pathnode->parent = rel;
1988 pathnode->pathtarget = rel->reltarget;
1989 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1990 required_outer);
1991 pathnode->parallel_aware = false;
1992 pathnode->parallel_safe = rel->consider_parallel;
1993 pathnode->parallel_workers = 0;
1994 pathnode->pathkeys = NIL; /* result is always unordered */
1995
1996 cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
1997
1998 return pathnode;
1999}
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1740

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath * create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2294 of file pathnode.c.

2304{
2305 NestPath *pathnode = makeNode(NestPath);
2306 Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2307 Relids outerrelids;
2308
2309 /*
2310 * Paths are parameterized by top-level parents, so run parameterization
2311 * tests on the parent relids.
2312 */
2313 if (outer_path->parent->top_parent_relids)
2314 outerrelids = outer_path->parent->top_parent_relids;
2315 else
2316 outerrelids = outer_path->parent->relids;
2317
2318 /*
2319 * If the inner path is parameterized by the outer, we must drop any
2320 * restrict_clauses that are due to be moved into the inner path. We have
2321 * to do this now, rather than postpone the work till createplan time,
2322 * because the restrict_clauses list can affect the size and cost
2323 * estimates for this path. We detect such clauses by checking for serial
2324 * number match to clauses already enforced in the inner path.
2325 */
2326 if (bms_overlap(inner_req_outer, outerrelids))
2327 {
2328 Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2329 List *jclauses = NIL;
2330 ListCell *lc;
2331
2332 foreach(lc, restrict_clauses)
2333 {
2334 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2335
2336 if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2337 jclauses = lappend(jclauses, rinfo);
2338 }
2339 restrict_clauses = jclauses;
2340 }
2341
2342 pathnode->jpath.path.pathtype = T_NestLoop;
2343 pathnode->jpath.path.parent = joinrel;
2344 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2345 pathnode->jpath.path.param_info =
2347 joinrel,
2348 outer_path,
2349 inner_path,
2350 extra->sjinfo,
2351 required_outer,
2352 &restrict_clauses);
2353 pathnode->jpath.path.parallel_aware = false;
2354 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2355 outer_path->parallel_safe && inner_path->parallel_safe;
2356 /* This is a foolish way to estimate parallel_workers, but for now... */
2357 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2358 pathnode->jpath.path.pathkeys = pathkeys;
2359 pathnode->jpath.jointype = jointype;
2360 pathnode->jpath.inner_unique = extra->inner_unique;
2361 pathnode->jpath.outerjoinpath = outer_path;
2362 pathnode->jpath.innerjoinpath = inner_path;
2363 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2364
2365 final_cost_nestloop(root, pathnode, workspace, extra);
2366
2367 return pathnode;
2368}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3371
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:2032
JoinPath jpath
Definition: pathnodes.h:2314
int rinfo_serial
Definition: pathnodes.h:2864

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath * create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2525 of file pathnode.c.

2529{
2531 PathTarget *oldtarget;
2532
2533 /*
2534 * We mustn't put a ProjectionPath directly above another; it's useless
2535 * and will confuse create_projection_plan. Rather than making sure all
2536 * callers handle that, let's implement it here, by stripping off any
2537 * ProjectionPath in what we're given. Given this rule, there won't be
2538 * more than one.
2539 */
2541 {
2543
2544 Assert(subpp->path.parent == rel);
2545 subpath = subpp->subpath;
2547 }
2548
2549 pathnode->path.pathtype = T_Result;
2550 pathnode->path.parent = rel;
2551 pathnode->path.pathtarget = target;
2552 pathnode->path.param_info = subpath->param_info;
2553 pathnode->path.parallel_aware = false;
2554 pathnode->path.parallel_safe = rel->consider_parallel &&
2555 subpath->parallel_safe &&
2556 is_parallel_safe(root, (Node *) target->exprs);
2557 pathnode->path.parallel_workers = subpath->parallel_workers;
2558 /* Projection does not change the sort order */
2559 pathnode->path.pathkeys = subpath->pathkeys;
2560
2561 pathnode->subpath = subpath;
2562
2563 /*
2564 * We might not need a separate Result node. If the input plan node type
2565 * can project, we can just tell it to project something else. Or, if it
2566 * can't project but the desired target has the same expression list as
2567 * what the input will produce anyway, we can still give it the desired
2568 * tlist (possibly changing its ressortgroupref labels, but nothing else).
2569 * Note: in the latter case, create_projection_plan has to recheck our
2570 * conclusion; see comments therein.
2571 */
2572 oldtarget = subpath->pathtarget;
2574 equal(oldtarget->exprs, target->exprs))
2575 {
2576 /* No separate Result node needed */
2577 pathnode->dummypp = true;
2578
2579 /*
2580 * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2581 */
2582 pathnode->path.rows = subpath->rows;
2583 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2584 pathnode->path.startup_cost = subpath->startup_cost +
2585 (target->cost.startup - oldtarget->cost.startup);
2586 pathnode->path.total_cost = subpath->total_cost +
2587 (target->cost.startup - oldtarget->cost.startup) +
2588 (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2589 }
2590 else
2591 {
2592 /* We really do need the Result node */
2593 pathnode->dummypp = false;
2594
2595 /*
2596 * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2597 * evaluating the tlist. There is no qual to worry about.
2598 */
2599 pathnode->path.rows = subpath->rows;
2600 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2601 pathnode->path.startup_cost = subpath->startup_cost +
2602 target->cost.startup;
2603 pathnode->path.total_cost = subpath->total_cost +
2604 target->cost.startup +
2605 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2606 }
2607
2608 return pathnode;
2609}
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2404

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), create_final_unique_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), generate_grouped_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath * create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3523 of file pathnode.c.

3531{
3533
3534 pathnode->path.pathtype = T_RecursiveUnion;
3535 pathnode->path.parent = rel;
3536 pathnode->path.pathtarget = target;
3537 /* For now, assume we are above any joins, so no parameterization */
3538 pathnode->path.param_info = NULL;
3539 pathnode->path.parallel_aware = false;
3540 pathnode->path.parallel_safe = rel->consider_parallel &&
3541 leftpath->parallel_safe && rightpath->parallel_safe;
3542 /* Foolish, but we'll do it like joins for now: */
3543 pathnode->path.parallel_workers = leftpath->parallel_workers;
3544 /* RecursiveUnion result is always unsorted */
3545 pathnode->path.pathkeys = NIL;
3546
3547 pathnode->leftpath = leftpath;
3548 pathnode->rightpath = rightpath;
3549 pathnode->distinctList = distinctList;
3550 pathnode->wtParam = wtParam;
3551 pathnode->numGroups = numGroups;
3552
3553 cost_recursive_union(&pathnode->path, leftpath, rightpath);
3554
3555 return pathnode;
3556}
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1816
Cardinality numGroups
Definition: pathnodes.h:2581

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_rel_agg_info()

RelAggInfo * create_rel_agg_info ( PlannerInfo root,
RelOptInfo rel,
bool  calculate_grouped_rows 
)

Definition at line 2655 of file relnode.c.

2657{
2658 ListCell *lc;
2659 RelAggInfo *result;
2660 PathTarget *agg_input;
2661 PathTarget *target;
2662 List *group_clauses = NIL;
2663 List *group_exprs = NIL;
2664
2665 /*
2666 * The lists of aggregate expressions and grouping expressions should have
2667 * been constructed.
2668 */
2669 Assert(root->agg_clause_list != NIL);
2670 Assert(root->group_expr_list != NIL);
2671
2672 /*
2673 * If this is a child rel, the grouped rel for its parent rel must have
2674 * been created if it can. So we can just use parent's RelAggInfo if
2675 * there is one, with appropriate variable substitutions.
2676 */
2677 if (IS_OTHER_REL(rel))
2678 {
2679 RelOptInfo *grouped_rel;
2680 RelAggInfo *agg_info;
2681
2682 grouped_rel = rel->top_parent->grouped_rel;
2683 if (grouped_rel == NULL)
2684 return NULL;
2685
2686 Assert(IS_GROUPED_REL(grouped_rel));
2687
2688 /* Must do multi-level transformation */
2689 agg_info = (RelAggInfo *)
2691 (Node *) grouped_rel->agg_info,
2692 rel,
2693 rel->top_parent);
2694
2695 agg_info->apply_agg_at = NULL; /* caller will change this later */
2696
2697 if (calculate_grouped_rows)
2698 {
2699 agg_info->grouped_rows =
2701 rel->rows, NULL, NULL);
2702
2703 /*
2704 * The grouped paths for the given relation are considered useful
2705 * iff the average group size is no less than
2706 * min_eager_agg_group_size.
2707 */
2708 agg_info->agg_useful =
2709 (rel->rows / agg_info->grouped_rows) >= min_eager_agg_group_size;
2710 }
2711
2712 return agg_info;
2713 }
2714
2715 /* Check if it's possible to produce grouped paths for this relation. */
2717 return NULL;
2718
2719 /*
2720 * Create targets for the grouped paths and for the input paths of the
2721 * grouped paths.
2722 */
2723 target = create_empty_pathtarget();
2724 agg_input = create_empty_pathtarget();
2725
2726 /* ... and initialize these targets */
2727 if (!init_grouping_targets(root, rel, target, agg_input,
2728 &group_clauses, &group_exprs))
2729 return NULL;
2730
2731 /*
2732 * Eager aggregation is not applicable if there are no available grouping
2733 * expressions.
2734 */
2735 if (group_clauses == NIL)
2736 return NULL;
2737
2738 /* Add aggregates to the grouping target */
2739 foreach(lc, root->agg_clause_list)
2740 {
2741 AggClauseInfo *ac_info = lfirst_node(AggClauseInfo, lc);
2742 Aggref *aggref;
2743
2744 Assert(IsA(ac_info->aggref, Aggref));
2745
2746 aggref = (Aggref *) copyObject(ac_info->aggref);
2748
2749 add_column_to_pathtarget(target, (Expr *) aggref, 0);
2750 }
2751
2752 /* Set the estimated eval cost and output width for both targets */
2754 set_pathtarget_cost_width(root, agg_input);
2755
2756 /* build the RelAggInfo result */
2757 result = makeNode(RelAggInfo);
2758 result->target = target;
2759 result->agg_input = agg_input;
2760 result->group_clauses = group_clauses;
2761 result->group_exprs = group_exprs;
2762 result->apply_agg_at = NULL; /* caller will change this later */
2763
2764 if (calculate_grouped_rows)
2765 {
2767 rel->rows, NULL, NULL);
2768
2769 /*
2770 * The grouped paths for the given relation are considered useful iff
2771 * the average group size is no less than min_eager_agg_group_size.
2772 */
2773 result->agg_useful =
2774 (rel->rows / result->grouped_rows) >= min_eager_agg_group_size;
2775 }
2776
2777 return result;
2778}
double min_eager_agg_group_size
Definition: allpaths.c:84
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:592
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6392
#define copyObject(obj)
Definition: nodes.h:232
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:389
#define IS_GROUPED_REL(rel)
Definition: pathnodes.h:1161
#define lfirst_node(type, lc)
Definition: pg_list.h:176
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5762
static bool init_grouping_targets(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, PathTarget *agg_input, List **group_clauses, List **group_exprs)
Definition: relnode.c:2865
static bool eager_aggregation_possible_for_relation(PlannerInfo *root, RelOptInfo *rel)
Definition: relnode.c:2785
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3768
Aggref * aggref
Definition: pathnodes.h:3373
List * group_exprs
Definition: pathnodes.h:1203
List * group_clauses
Definition: pathnodes.h:1201
struct PathTarget * agg_input
Definition: pathnodes.h:1198
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), adjust_appendrel_attrs_multilevel(), RelOptInfo::agg_info, RelAggInfo::agg_input, RelAggInfo::agg_useful, AggClauseInfo::aggref, AGGSPLIT_INITIAL_SERIAL, RelAggInfo::apply_agg_at, Assert(), copyObject, create_empty_pathtarget(), eager_aggregation_possible_for_relation(), estimate_num_groups(), RelAggInfo::group_clauses, RelAggInfo::group_exprs, RelOptInfo::grouped_rel, RelAggInfo::grouped_rows, init_grouping_targets(), IS_GROUPED_REL, IS_OTHER_REL, IsA, lfirst_node, makeNode, mark_partial_aggref(), min_eager_agg_group_size, NIL, root, RelOptInfo::rows, set_pathtarget_cost_width(), and RelAggInfo::target.

Referenced by build_simple_grouped_rel(), and make_grouped_join_rel().

◆ create_resultscan_path()

Path * create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2007 of file pathnode.c.

2009{
2010 Path *pathnode = makeNode(Path);
2011
2012 pathnode->pathtype = T_Result;
2013 pathnode->parent = rel;
2014 pathnode->pathtarget = rel->reltarget;
2015 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2016 required_outer);
2017 pathnode->parallel_aware = false;
2018 pathnode->parallel_safe = rel->consider_parallel;
2019 pathnode->parallel_workers = 0;
2020 pathnode->pathkeys = NIL; /* result is always unordered */
2021
2022 cost_resultscan(pathnode, root, rel, pathnode->param_info);
2023
2024 return pathnode;
2025}
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1778

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path * create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1008 of file pathnode.c.

1009{
1010 Path *pathnode = makeNode(Path);
1011
1012 pathnode->pathtype = T_SampleScan;
1013 pathnode->parent = rel;
1014 pathnode->pathtarget = rel->reltarget;
1015 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1016 required_outer);
1017 pathnode->parallel_aware = false;
1018 pathnode->parallel_safe = rel->consider_parallel;
1019 pathnode->parallel_workers = 0;
1020 pathnode->pathkeys = NIL; /* samplescan has unordered result */
1021
1022 cost_samplescan(pathnode, root, rel, pathnode->param_info);
1023
1024 return pathnode;
1025}
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:344

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path * create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 983 of file pathnode.c.

985{
986 Path *pathnode = makeNode(Path);
987
988 pathnode->pathtype = T_SeqScan;
989 pathnode->parent = rel;
990 pathnode->pathtarget = rel->reltarget;
991 pathnode->param_info = get_baserel_parampathinfo(root, rel,
992 required_outer);
993 pathnode->parallel_aware = (parallel_workers > 0);
994 pathnode->parallel_safe = rel->consider_parallel;
995 pathnode->parallel_workers = parallel_workers;
996 pathnode->pathkeys = NIL; /* seqscan has unordered result */
997
998 cost_seqscan(pathnode, root, rel, pathnode->param_info);
999
1000 return pathnode;
1001}
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:269

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath * create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2723 of file pathnode.c.

2727{
2729 double tlist_rows;
2730 ListCell *lc;
2731
2732 pathnode->path.pathtype = T_ProjectSet;
2733 pathnode->path.parent = rel;
2734 pathnode->path.pathtarget = target;
2735 /* For now, assume we are above any joins, so no parameterization */
2736 pathnode->path.param_info = NULL;
2737 pathnode->path.parallel_aware = false;
2738 pathnode->path.parallel_safe = rel->consider_parallel &&
2739 subpath->parallel_safe &&
2740 is_parallel_safe(root, (Node *) target->exprs);
2741 pathnode->path.parallel_workers = subpath->parallel_workers;
2742 /* Projection does not change the sort order XXX? */
2743 pathnode->path.pathkeys = subpath->pathkeys;
2744
2745 pathnode->subpath = subpath;
2746
2747 /*
2748 * Estimate number of rows produced by SRFs for each row of input; if
2749 * there's more than one in this node, use the maximum.
2750 */
2751 tlist_rows = 1;
2752 foreach(lc, target->exprs)
2753 {
2754 Node *node = (Node *) lfirst(lc);
2755 double itemrows;
2756
2757 itemrows = expression_returns_set_rows(root, node);
2758 if (tlist_rows < itemrows)
2759 tlist_rows = itemrows;
2760 }
2761
2762 /*
2763 * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2764 * per input row, and half of cpu_tuple_cost for each added output row.
2765 * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2766 * this estimate later.
2767 */
2768 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2769 pathnode->path.rows = subpath->rows * tlist_rows;
2770 pathnode->path.startup_cost = subpath->startup_cost +
2771 target->cost.startup;
2772 pathnode->path.total_cost = subpath->total_cost +
2773 target->cost.startup +
2774 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2775 (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2776
2777 return pathnode;
2778}
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:301
Path * subpath
Definition: pathnodes.h:2416

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath * create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List groupList,
double  numGroups,
double  outputRows 
)

Definition at line 3404 of file pathnode.c.

3413{
3414 SetOpPath *pathnode = makeNode(SetOpPath);
3415
3416 pathnode->path.pathtype = T_SetOp;
3417 pathnode->path.parent = rel;
3418 pathnode->path.pathtarget = rel->reltarget;
3419 /* For now, assume we are above any joins, so no parameterization */
3420 pathnode->path.param_info = NULL;
3421 pathnode->path.parallel_aware = false;
3422 pathnode->path.parallel_safe = rel->consider_parallel &&
3423 leftpath->parallel_safe && rightpath->parallel_safe;
3424 pathnode->path.parallel_workers =
3425 leftpath->parallel_workers + rightpath->parallel_workers;
3426 /* SetOp preserves the input sort order if in sort mode */
3427 pathnode->path.pathkeys =
3428 (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3429
3430 pathnode->leftpath = leftpath;
3431 pathnode->rightpath = rightpath;
3432 pathnode->cmd = cmd;
3433 pathnode->strategy = strategy;
3434 pathnode->groupList = groupList;
3435 pathnode->numGroups = numGroups;
3436
3437 /*
3438 * Compute cost estimates. As things stand, we end up with the same total
3439 * cost in this node for sort and hash methods, but different startup
3440 * costs. This could be refined perhaps, but it'll do for now.
3441 */
3442 pathnode->path.disabled_nodes =
3443 leftpath->disabled_nodes + rightpath->disabled_nodes;
3444 if (strategy == SETOP_SORTED)
3445 {
3446 /*
3447 * In sorted mode, we can emit output incrementally. Charge one
3448 * cpu_operator_cost per comparison per input tuple. Like cost_group,
3449 * we assume all columns get compared at most of the tuples.
3450 */
3451 pathnode->path.startup_cost =
3452 leftpath->startup_cost + rightpath->startup_cost;
3453 pathnode->path.total_cost =
3454 leftpath->total_cost + rightpath->total_cost +
3455 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3456
3457 /*
3458 * Also charge a small amount per extracted tuple. Like cost_sort,
3459 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3460 * qual-checking or projection.
3461 */
3462 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3463 }
3464 else
3465 {
3466 Size hashtablesize;
3467
3468 /*
3469 * In hashed mode, we must read all the input before we can emit
3470 * anything. Also charge comparison costs to represent the cost of
3471 * hash table lookups.
3472 */
3473 pathnode->path.startup_cost =
3474 leftpath->total_cost + rightpath->total_cost +
3475 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3476 pathnode->path.total_cost = pathnode->path.startup_cost;
3477
3478 /*
3479 * Also charge a small amount per extracted tuple. Like cost_sort,
3480 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3481 * qual-checking or projection.
3482 */
3483 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3484
3485 /*
3486 * Mark the path as disabled if enable_hashagg is off. While this
3487 * isn't exactly a HashAgg node, it seems close enough to justify
3488 * letting that switch control it.
3489 */
3490 if (!enable_hashagg)
3491 pathnode->path.disabled_nodes++;
3492
3493 /*
3494 * Also disable if it doesn't look like the hashtable will fit into
3495 * hash_mem. (Note: reject on equality, to ensure that an estimate of
3496 * SIZE_MAX disables hashing regardless of the hash_mem limit.)
3497 */
3498 hashtablesize = EstimateSetOpHashTableSpace(numGroups,
3499 leftpath->pathtarget->width);
3500 if (hashtablesize >= get_hash_memory_limit())
3501 pathnode->path.disabled_nodes++;
3502 }
3503 pathnode->path.rows = outputRows;
3504
3505 return pathnode;
3506}
size_t Size
Definition: c.h:613
double cpu_operator_cost
Definition: costsize.c:134
bool enable_hashagg
Definition: costsize.c:152
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3621
Size EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
Definition: nodeSetOp.c:115
@ SETOP_SORTED
Definition: nodes.h:416
Path * rightpath
Definition: pathnodes.h:2564
Cardinality numGroups
Definition: pathnodes.h:2568
Path * leftpath
Definition: pathnodes.h:2563
SetOpCmd cmd
Definition: pathnodes.h:2565
Path path
Definition: pathnodes.h:2562
SetOpStrategy strategy
Definition: pathnodes.h:2566
List * groupList
Definition: pathnodes.h:2567

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, enable_hashagg, EstimateSetOpHashTableSpace(), get_hash_memory_limit(), SetOpPath::groupList, if(), SetOpPath::leftpath, list_length(), makeNode, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, SetOpPath::rightpath, Path::rows, SETOP_SORTED, Path::startup_cost, SetOpPath::strategy, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath * create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 2842 of file pathnode.c.

2847{
2848 SortPath *pathnode = makeNode(SortPath);
2849
2850 pathnode->path.pathtype = T_Sort;
2851 pathnode->path.parent = rel;
2852 /* Sort doesn't project, so use source path's pathtarget */
2853 pathnode->path.pathtarget = subpath->pathtarget;
2854 pathnode->path.param_info = subpath->param_info;
2855 pathnode->path.parallel_aware = false;
2856 pathnode->path.parallel_safe = rel->consider_parallel &&
2857 subpath->parallel_safe;
2858 pathnode->path.parallel_workers = subpath->parallel_workers;
2859 pathnode->path.pathkeys = pathkeys;
2860
2861 pathnode->subpath = subpath;
2862
2863 cost_sort(&pathnode->path, root, pathkeys,
2864 subpath->disabled_nodes,
2865 subpath->total_cost,
2866 subpath->rows,
2867 subpath->pathtarget->width,
2868 0.0, /* XXX comparison_cost shouldn't be 0? */
2869 work_mem, limit_tuples);
2870
2871 return pathnode;
2872}

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_final_unique_paths(), create_one_window_path(), create_ordered_paths(), create_partial_unique_paths(), gather_grouping_paths(), generate_grouped_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath * create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 1847 of file pathnode.c.

1850{
1852
1853 pathnode->path.pathtype = T_SubqueryScan;
1854 pathnode->path.parent = rel;
1855 pathnode->path.pathtarget = rel->reltarget;
1856 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1857 required_outer);
1858 pathnode->path.parallel_aware = false;
1859 pathnode->path.parallel_safe = rel->consider_parallel &&
1860 subpath->parallel_safe;
1861 pathnode->path.parallel_workers = subpath->parallel_workers;
1862 pathnode->path.pathkeys = pathkeys;
1863 pathnode->subpath = subpath;
1864
1865 cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
1866 trivial_pathtarget);
1867
1868 return pathnode;
1869}
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1447

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path * create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1903 of file pathnode.c.

1905{
1906 Path *pathnode = makeNode(Path);
1907
1908 pathnode->pathtype = T_TableFuncScan;
1909 pathnode->parent = rel;
1910 pathnode->pathtarget = rel->reltarget;
1911 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1912 required_outer);
1913 pathnode->parallel_aware = false;
1914 pathnode->parallel_safe = rel->consider_parallel;
1915 pathnode->parallel_workers = 0;
1916 pathnode->pathkeys = NIL; /* result is always unordered */
1917
1918 cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
1919
1920 return pathnode;
1921}
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1590

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath * create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 1264 of file pathnode.c.

1267{
1268 TidRangePath *pathnode = makeNode(TidRangePath);
1269
1270 pathnode->path.pathtype = T_TidRangeScan;
1271 pathnode->path.parent = rel;
1272 pathnode->path.pathtarget = rel->reltarget;
1273 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1274 required_outer);
1275 pathnode->path.parallel_aware = (parallel_workers > 0);
1276 pathnode->path.parallel_safe = rel->consider_parallel;
1277 pathnode->path.parallel_workers = parallel_workers;
1278 pathnode->path.pathkeys = NIL; /* always unordered */
1279
1280 pathnode->tidrangequals = tidrangequals;
1281
1282 cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1283 pathnode->path.param_info);
1284
1285 return pathnode;
1286}
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1337
List * tidrangequals
Definition: pathnodes.h:2084

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath * create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1235 of file pathnode.c.

1237{
1238 TidPath *pathnode = makeNode(TidPath);
1239
1240 pathnode->path.pathtype = T_TidScan;
1241 pathnode->path.parent = rel;
1242 pathnode->path.pathtarget = rel->reltarget;
1243 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1244 required_outer);
1245 pathnode->path.parallel_aware = false;
1246 pathnode->path.parallel_safe = rel->consider_parallel;
1247 pathnode->path.parallel_workers = 0;
1248 pathnode->path.pathkeys = NIL; /* always unordered */
1249
1250 pathnode->tidquals = tidquals;
1251
1252 cost_tidscan(&pathnode->path, root, rel, tidquals,
1253 pathnode->path.param_info);
1254
1255 return pathnode;
1256}
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1232
List * tidquals
Definition: pathnodes.h:2072
Path path
Definition: pathnodes.h:2071

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath * create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 2943 of file pathnode.c.

2948{
2949 UniquePath *pathnode = makeNode(UniquePath);
2950
2951 pathnode->path.pathtype = T_Unique;
2952 pathnode->path.parent = rel;
2953 /* Unique doesn't project, so use source path's pathtarget */
2954 pathnode->path.pathtarget = subpath->pathtarget;
2955 pathnode->path.param_info = subpath->param_info;
2956 pathnode->path.parallel_aware = false;
2957 pathnode->path.parallel_safe = rel->consider_parallel &&
2958 subpath->parallel_safe;
2959 pathnode->path.parallel_workers = subpath->parallel_workers;
2960 /* Unique doesn't change the input ordering */
2961 pathnode->path.pathkeys = subpath->pathkeys;
2962
2963 pathnode->subpath = subpath;
2964 pathnode->numkeys = numCols;
2965
2966 /*
2967 * Charge one cpu_operator_cost per comparison per input tuple. We assume
2968 * all columns get compared at most of the tuples. (XXX probably this is
2969 * an overestimate.)
2970 */
2971 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2972 pathnode->path.startup_cost = subpath->startup_cost;
2973 pathnode->path.total_cost = subpath->total_cost +
2974 cpu_operator_cost * subpath->rows * numCols;
2975 pathnode->path.rows = numGroups;
2976
2977 return pathnode;
2978}
Path * subpath
Definition: pathnodes.h:2470

References RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, makeNode, UniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_final_unique_paths(), create_partial_distinct_paths(), create_partial_unique_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path * create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1929 of file pathnode.c.

1931{
1932 Path *pathnode = makeNode(Path);
1933
1934 pathnode->pathtype = T_ValuesScan;
1935 pathnode->parent = rel;
1936 pathnode->pathtarget = rel->reltarget;
1937 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1938 required_outer);
1939 pathnode->parallel_aware = false;
1940 pathnode->parallel_safe = rel->consider_parallel;
1941 pathnode->parallel_workers = 0;
1942 pathnode->pathkeys = NIL; /* result is always unordered */
1943
1944 cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1945
1946 return pathnode;
1947}
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1647

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath * create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
List runCondition,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3331 of file pathnode.c.

3340{
3342
3343 /* qual can only be set for the topwindow */
3344 Assert(qual == NIL || topwindow);
3345
3346 pathnode->path.pathtype = T_WindowAgg;
3347 pathnode->path.parent = rel;
3348 pathnode->path.pathtarget = target;
3349 /* For now, assume we are above any joins, so no parameterization */
3350 pathnode->path.param_info = NULL;
3351 pathnode->path.parallel_aware = false;
3352 pathnode->path.parallel_safe = rel->consider_parallel &&
3353 subpath->parallel_safe;
3354 pathnode->path.parallel_workers = subpath->parallel_workers;
3355 /* WindowAgg preserves the input sort order */
3356 pathnode->path.pathkeys = subpath->pathkeys;
3357
3358 pathnode->subpath = subpath;
3359 pathnode->winclause = winclause;
3360 pathnode->qual = qual;
3361 pathnode->runCondition = runCondition;
3362 pathnode->topwindow = topwindow;
3363
3364 /*
3365 * For costing purposes, assume that there are no redundant partitioning
3366 * or ordering columns; it's not worth the trouble to deal with that
3367 * corner case here. So we just pass the unmodified list lengths to
3368 * cost_windowagg.
3369 */
3370 cost_windowagg(&pathnode->path, root,
3371 windowFuncs,
3372 winclause,
3373 subpath->disabled_nodes,
3374 subpath->startup_cost,
3375 subpath->total_cost,
3376 subpath->rows);
3377
3378 /* add tlist eval cost for each output row */
3379 pathnode->path.startup_cost += target->cost.startup;
3380 pathnode->path.total_cost += target->cost.startup +
3381 target->cost.per_tuple * pathnode->path.rows;
3382
3383 return pathnode;
3384}
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3120
List * runCondition
Definition: pathnodes.h:2552
Path * subpath
Definition: pathnodes.h:2549
WindowClause * winclause
Definition: pathnodes.h:2550

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, WindowAggPath::runCondition, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path * create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2033 of file pathnode.c.

2035{
2036 Path *pathnode = makeNode(Path);
2037
2038 pathnode->pathtype = T_WorkTableScan;
2039 pathnode->parent = rel;
2040 pathnode->pathtarget = rel->reltarget;
2041 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2042 required_outer);
2043 pathnode->parallel_aware = false;
2044 pathnode->parallel_safe = rel->consider_parallel;
2045 pathnode->parallel_workers = 0;
2046 pathnode->pathkeys = NIL; /* result is always unordered */
2047
2048 /* Cost is the same as for a regular CTE scan */
2049 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2050
2051 return pathnode;
2052}

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 177 of file relnode.c.

178{
179 int new_size;
180
181 Assert(add_size > 0);
182
183 new_size = root->simple_rel_array_size + add_size;
184
185 root->simple_rel_array =
186 repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
187
188 root->simple_rte_array =
189 repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
190
191 if (root->append_rel_array)
192 root->append_rel_array =
193 repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
194 else
195 root->append_rel_array =
196 palloc0_array(AppendRelInfo *, new_size);
197
198 root->simple_rel_array_size = new_size;
199}
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:495

References add_size(), Assert(), palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo * fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1581 of file relnode.c.

1582{
1583 RelOptInfo *upperrel;
1584 ListCell *lc;
1585
1586 /*
1587 * For the moment, our indexing data structure is just a List for each
1588 * relation kind. If we ever get so many of one kind that this stops
1589 * working well, we can improve it. No code outside this function should
1590 * assume anything about how to find a particular upperrel.
1591 */
1592
1593 /* If we already made this upperrel for the query, return it */
1594 foreach(lc, root->upper_rels[kind])
1595 {
1596 upperrel = (RelOptInfo *) lfirst(lc);
1597
1598 if (bms_equal(upperrel->relids, relids))
1599 return upperrel;
1600 }
1601
1602 upperrel = makeNode(RelOptInfo);
1603 upperrel->reloptkind = RELOPT_UPPER_REL;
1604 upperrel->relids = bms_copy(relids);
1605
1606 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1607 upperrel->consider_startup = (root->tuple_fraction > 0);
1608 upperrel->consider_param_startup = false;
1609 upperrel->consider_parallel = false; /* might get changed later */
1610 upperrel->reltarget = create_empty_pathtarget();
1611 upperrel->pathlist = NIL;
1612 upperrel->cheapest_startup_path = NULL;
1613 upperrel->cheapest_total_path = NULL;
1615
1616 root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1617
1618 return upperrel;
1619}
@ RELOPT_UPPER_REL
Definition: pathnodes.h:887

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo * find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 569 of file relnode.c.

570{
571 /* use an unsigned comparison to prevent negative array element access */
572 if ((uint32) relid < (uint32) root->simple_rel_array_size)
573 {
574 RelOptInfo *rel;
575 RangeTblEntry *rte;
576
577 rel = root->simple_rel_array[relid];
578 if (rel)
579 return rel;
580
581 /*
582 * We could just return NULL here, but for debugging purposes it seems
583 * best to actually verify that the relid is an outer join and not
584 * something weird.
585 */
586 rte = root->simple_rte_array[relid];
587 if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
588 return NULL;
589 }
590
591 elog(ERROR, "no relation entry for relid %d", relid);
592
593 return NULL; /* keep compiler quiet */
594}
JoinType jointype
Definition: parsenodes.h:1182

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), eager_aggregation_possible_for_relation(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo * find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 551 of file relnode.c.

552{
553 /* use an unsigned comparison to prevent negative array element access */
554 if ((uint32) relid < (uint32) root->simple_rel_array_size)
555 return root->simple_rel_array[relid];
556 return NULL;
557}

References root.

Referenced by all_rows_selectable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1631 of file relnode.c.

1632{
1633 Relids result = NULL;
1634
1636 Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1637
1638 do
1639 {
1640 AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1641 Index prelid = appinfo->parent_relid;
1642
1643 result = bms_add_member(result, prelid);
1644
1645 /* traverse up to the parent rel, loop if it's also a child rel */
1646 rel = find_base_rel(root, prelid);
1647 } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1648
1650
1651 return result;
1652}
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:814
unsigned int Index
Definition: c.h:622
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:529
Index parent_relid
Definition: pathnodes.h:3192

References Assert(), bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo * find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 642 of file relnode.c.

643{
644 /*
645 * Switch to using hash lookup when list grows "too long". The threshold
646 * is arbitrary and is known only here.
647 */
648 if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
650
651 /*
652 * Use either hashtable lookup or linear search, as appropriate.
653 *
654 * Note: the seemingly redundant hashkey variable is used to avoid taking
655 * the address of relids; unless the compiler is exceedingly smart, doing
656 * so would force relids out of a register and thus probably slow down the
657 * list-search case.
658 */
659 if (root->join_rel_hash)
660 {
661 Relids hashkey = relids;
662 JoinHashEntry *hentry;
663
664 hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
665 &hashkey,
666 HASH_FIND,
667 NULL);
668 if (hentry)
669 return hentry->join_rel;
670 }
671 else
672 {
673 ListCell *l;
674
675 foreach(l, root->join_rel_list)
676 {
677 RelOptInfo *rel = (RelOptInfo *) lfirst(l);
678
679 if (bms_equal(rel->relids, relids))
680 return rel;
681 }
682 }
683
684 return NULL;
685}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:601
RelOptInfo * join_rel
Definition: relnode.c:47

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo * find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2011 of file relnode.c.

2012{
2013 ListCell *lc;
2014
2015 foreach(lc, rel->ppilist)
2016 {
2017 ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
2018
2019 if (bms_equal(ppi->ppi_req_outer, required_outer))
2020 return ppi;
2021 }
2022
2023 return NULL;
2024}
Relids ppi_req_outer
Definition: pathnodes.h:1826

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo * get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1978 of file relnode.c.

1979{
1980 ParamPathInfo *ppi;
1981
1982 /* If rel has LATERAL refs, every path for it should account for them */
1983 Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1984
1985 /* Unparameterized paths have no ParamPathInfo */
1986 if (bms_is_empty(required_outer))
1987 return NULL;
1988
1989 Assert(!bms_overlap(appendrel->relids, required_outer));
1990
1991 /* If we already have a PPI for this parameterization, just return it */
1992 if ((ppi = find_param_path_info(appendrel, required_outer)))
1993 return ppi;
1994
1995 /* Else build the ParamPathInfo */
1996 ppi = makeNode(ParamPathInfo);
1997 ppi->ppi_req_outer = required_outer;
1998 ppi->ppi_rows = 0;
1999 ppi->ppi_clauses = NIL;
2000 ppi->ppi_serials = NULL;
2001 appendrel->ppilist = lappend(appendrel->ppilist, ppi);
2002
2003 return ppi;
2004}
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:2011
Cardinality ppi_rows
Definition: pathnodes.h:1827
List * ppi_clauses
Definition: pathnodes.h:1828
Bitmapset * ppi_serials
Definition: pathnodes.h:1829

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo * get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1667 of file relnode.c.

1669{
1670 ParamPathInfo *ppi;
1671 Relids joinrelids;
1672 List *pclauses;
1673 List *eqclauses;
1674 Bitmapset *pserials;
1675 double rows;
1676 ListCell *lc;
1677
1678 /* If rel has LATERAL refs, every path for it should account for them */
1679 Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1680
1681 /* Unparameterized paths have no ParamPathInfo */
1682 if (bms_is_empty(required_outer))
1683 return NULL;
1684
1685 Assert(!bms_overlap(baserel->relids, required_outer));
1686
1687 /* If we already have a PPI for this parameterization, just return it */
1688 if ((ppi = find_param_path_info(baserel, required_outer)))
1689 return ppi;
1690
1691 /*
1692 * Identify all joinclauses that are movable to this base rel given this
1693 * parameterization.
1694 */
1695 joinrelids = bms_union(baserel->relids, required_outer);
1696 pclauses = NIL;
1697 foreach(lc, baserel->joininfo)
1698 {
1699 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1700
1702 baserel->relids,
1703 joinrelids))
1704 pclauses = lappend(pclauses, rinfo);
1705 }
1706
1707 /*
1708 * Add in joinclauses generated by EquivalenceClasses, too. (These
1709 * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1710 * builds, let's verify that.)
1711 */
1713 joinrelids,
1714 required_outer,
1715 baserel,
1716 NULL);
1717#ifdef USE_ASSERT_CHECKING
1718 foreach(lc, eqclauses)
1719 {
1720 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1721
1723 baserel->relids,
1724 joinrelids));
1725 }
1726#endif
1727 pclauses = list_concat(pclauses, eqclauses);
1728
1729 /* Compute set of serial numbers of the enforced clauses */
1730 pserials = NULL;
1731 foreach(lc, pclauses)
1732 {
1733 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1734
1735 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1736 }
1737
1738 /* Estimate the number of rows returned by the parameterized scan */
1739 rows = get_parameterized_baserel_size(root, baserel, pclauses);
1740
1741 /* And now we can build the ParamPathInfo */
1742 ppi = makeNode(ParamPathInfo);
1743 ppi->ppi_req_outer = required_outer;
1744 ppi->ppi_rows = rows;
1745 ppi->ppi_clauses = pclauses;
1746 ppi->ppi_serials = pserials;
1747 baserel->ppilist = lappend(baserel->ppilist, ppi);
1748
1749 return ppi;
1750}
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5404
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1550
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:661

References Assert(), bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo * get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1781 of file relnode.c.

1787{
1788 ParamPathInfo *ppi;
1789 Relids join_and_req;
1790 Relids outer_and_req;
1791 Relids inner_and_req;
1792 List *pclauses;
1793 List *eclauses;
1794 List *dropped_ecs;
1795 double rows;
1796 ListCell *lc;
1797
1798 /* If rel has LATERAL refs, every path for it should account for them */
1799 Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1800
1801 /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1802 if (bms_is_empty(required_outer))
1803 return NULL;
1804
1805 Assert(!bms_overlap(joinrel->relids, required_outer));
1806
1807 /*
1808 * Identify all joinclauses that are movable to this join rel given this
1809 * parameterization. These are the clauses that are movable into this
1810 * join, but not movable into either input path. Treat an unparameterized
1811 * input path as not accepting parameterized clauses (because it won't,
1812 * per the shortcut exit above), even though the joinclause movement rules
1813 * might allow the same clauses to be moved into a parameterized path for
1814 * that rel.
1815 */
1816 join_and_req = bms_union(joinrel->relids, required_outer);
1817 if (outer_path->param_info)
1818 outer_and_req = bms_union(outer_path->parent->relids,
1819 PATH_REQ_OUTER(outer_path));
1820 else
1821 outer_and_req = NULL; /* outer path does not accept parameters */
1822 if (inner_path->param_info)
1823 inner_and_req = bms_union(inner_path->parent->relids,
1824 PATH_REQ_OUTER(inner_path));
1825 else
1826 inner_and_req = NULL; /* inner path does not accept parameters */
1827
1828 pclauses = NIL;
1829 foreach(lc, joinrel->joininfo)
1830 {
1831 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1832
1834 joinrel->relids,
1835 join_and_req) &&
1837 outer_path->parent->relids,
1838 outer_and_req) &&
1840 inner_path->parent->relids,
1841 inner_and_req))
1842 pclauses = lappend(pclauses, rinfo);
1843 }
1844
1845 /* Consider joinclauses generated by EquivalenceClasses, too */
1847 join_and_req,
1848 required_outer,
1849 joinrel,
1850 NULL);
1851 /* We only want ones that aren't movable to lower levels */
1852 dropped_ecs = NIL;
1853 foreach(lc, eclauses)
1854 {
1855 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1856
1858 joinrel->relids,
1859 join_and_req));
1861 outer_path->parent->relids,
1862 outer_and_req))
1863 continue; /* drop if movable into LHS */
1865 inner_path->parent->relids,
1866 inner_and_req))
1867 {
1868 /* drop if movable into RHS, but remember EC for use below */
1869 Assert(rinfo->left_ec == rinfo->right_ec);
1870 dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1871 continue;
1872 }
1873 pclauses = lappend(pclauses, rinfo);
1874 }
1875
1876 /*
1877 * EquivalenceClasses are harder to deal with than we could wish, because
1878 * of the fact that a given EC can generate different clauses depending on
1879 * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1880 * LHS and RHS of the current join and Z is in required_outer, and further
1881 * suppose that the inner_path is parameterized by both X and Z. The code
1882 * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1883 * and in the latter case will have discarded it as being movable into the
1884 * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1885 * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1886 * not have produced both, and we can't readily tell from here which one
1887 * it did pick. If we add no clause to this join, we'll end up with
1888 * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1889 * constrained to be equal to the other members of the EC. (When we come
1890 * to join Z to this X/Y path, we will certainly drop whichever EC clause
1891 * is generated at that join, so this omission won't get fixed later.)
1892 *
1893 * To handle this, for each EC we discarded such a clause from, try to
1894 * generate a clause connecting the required_outer rels to the join's LHS
1895 * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1896 * the clause can't be moved to the LHS, add it to the current join's
1897 * restriction clauses. (If an EC cannot generate such a clause then it
1898 * has nothing that needs to be enforced here, while if the clause can be
1899 * moved into the LHS then it should have been enforced within that path.)
1900 *
1901 * Note that we don't need similar processing for ECs whose clause was
1902 * considered to be movable into the LHS, because the LHS can't refer to
1903 * the RHS so there is no comparable ambiguity about what it might
1904 * actually be enforcing internally.
1905 */
1906 if (dropped_ecs)
1907 {
1908 Relids real_outer_and_req;
1909
1910 real_outer_and_req = bms_union(outer_path->parent->relids,
1911 required_outer);
1912 eclauses =
1914 dropped_ecs,
1915 real_outer_and_req,
1916 required_outer,
1917 outer_path->parent);
1918 foreach(lc, eclauses)
1919 {
1920 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1921
1923 outer_path->parent->relids,
1924 real_outer_and_req));
1925 if (!join_clause_is_movable_into(rinfo,
1926 outer_path->parent->relids,
1927 outer_and_req))
1928 pclauses = lappend(pclauses, rinfo);
1929 }
1930 }
1931
1932 /*
1933 * Now, attach the identified moved-down clauses to the caller's
1934 * restrict_clauses list. By using list_concat in this order, we leave
1935 * the original list structure of restrict_clauses undamaged.
1936 */
1937 *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1938
1939 /* If we already have a PPI for this parameterization, just return it */
1940 if ((ppi = find_param_path_info(joinrel, required_outer)))
1941 return ppi;
1942
1943 /* Estimate the number of rows returned by the parameterized join */
1944 rows = get_parameterized_joinrel_size(root, joinrel,
1945 outer_path,
1946 inner_path,
1947 sjinfo,
1948 *restrict_clauses);
1949
1950 /*
1951 * And now we can build the ParamPathInfo. No point in saving the
1952 * input-pair-dependent clause list, though.
1953 *
1954 * Note: in GEQO mode, we'll be called in a temporary memory context, but
1955 * the joinrel structure is there too, so no problem.
1956 */
1957 ppi = makeNode(ParamPathInfo);
1958 ppi->ppi_req_outer = required_outer;
1959 ppi->ppi_rows = rows;
1960 ppi->ppi_clauses = NIL;
1961 ppi->ppi_serials = NULL;
1962 joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1963
1964 return ppi;
1965}
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5485
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1650

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset * get_param_path_clause_serials ( Path path)

Definition at line 2032 of file relnode.c.

2033{
2034 if (path->param_info == NULL)
2035 return NULL; /* not parameterized */
2036
2037 /*
2038 * We don't currently support parameterized MergeAppend paths, as
2039 * explained in the comments for generate_orderedappend_paths.
2040 */
2041 Assert(!IsA(path, MergeAppendPath));
2042
2043 if (IsA(path, NestPath) ||
2044 IsA(path, MergePath) ||
2045 IsA(path, HashPath))
2046 {
2047 /*
2048 * For a join path, combine clauses enforced within either input path
2049 * with those enforced as joinrestrictinfo in this path. Note that
2050 * joinrestrictinfo may include some non-pushed-down clauses, but for
2051 * current purposes it's okay if we include those in the result. (To
2052 * be more careful, we could check for clause_relids overlapping the
2053 * path parameterization, but it's not worth the cycles for now.)
2054 */
2055 JoinPath *jpath = (JoinPath *) path;
2056 Bitmapset *pserials;
2057 ListCell *lc;
2058
2059 pserials = NULL;
2060 pserials = bms_add_members(pserials,
2062 pserials = bms_add_members(pserials,
2064 foreach(lc, jpath->joinrestrictinfo)
2065 {
2066 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2067
2068 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
2069 }
2070 return pserials;
2071 }
2072 else if (IsA(path, AppendPath))
2073 {
2074 /*
2075 * For an appendrel, take the intersection of the sets of clauses
2076 * enforced in each input path.
2077 */
2078 AppendPath *apath = (AppendPath *) path;
2079 Bitmapset *pserials;
2080 ListCell *lc;
2081
2082 pserials = NULL;
2083 foreach(lc, apath->subpaths)
2084 {
2085 Path *subpath = (Path *) lfirst(lc);
2086 Bitmapset *subserials;
2087
2089 if (lc == list_head(apath->subpaths))
2090 pserials = bms_copy(subserials);
2091 else
2092 pserials = bms_int_members(pserials, subserials);
2093 }
2094 return pserials;
2095 }
2096 else
2097 {
2098 /*
2099 * Otherwise, it's a baserel path and we can use the
2100 * previously-computed set of serial numbers.
2101 */
2102 return path->param_info->ppi_serials;
2103 }
2104}
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1108
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References Assert(), bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), get_param_path_clause_serials(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), and AppendPath::subpaths.

Referenced by create_nestloop_path(), and get_param_path_clause_serials().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1145 of file relnode.c.

1149{
1150 Relids result;
1151
1152 /*
1153 * Basically we just need the union of the inputs' lateral_relids, less
1154 * whatever is already in the join.
1155 *
1156 * It's not immediately obvious that this is a valid way to compute the
1157 * result, because it might seem that we're ignoring possible lateral refs
1158 * of PlaceHolderVars that are due to be computed at the join but not in
1159 * either input. However, because create_lateral_join_info() already
1160 * charged all such PHV refs to each member baserel of the join, they'll
1161 * be accounted for already in the inputs' lateral_relids. Likewise, we
1162 * do not need to worry about doing transitive closure here, because that
1163 * was already accounted for in the original baserel lateral_relids.
1164 */
1165 result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1166 result = bms_del_members(result, joinrelids);
1167 return result;
1168}

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4316 of file pathnode.c.

4317{
4318#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4319do { \
4320 if (!path_is_reparameterizable_by_child(path, child_rel)) \
4321 return false; \
4322} while(0)
4323
4324#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4325do { \
4326 if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4327 return false; \
4328} while(0)
4329
4330 /*
4331 * If the path is not parameterized by the parent of the given relation,
4332 * it doesn't need reparameterization.
4333 */
4334 if (!path->param_info ||
4335 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4336 return true;
4337
4338 /*
4339 * Check that the path type is one that reparameterize_path_by_child() can
4340 * handle, and recursively check subpaths.
4341 */
4342 switch (nodeTag(path))
4343 {
4344 case T_Path:
4345 case T_IndexPath:
4346 break;
4347
4348 case T_BitmapHeapPath:
4349 {
4350 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4351
4353 }
4354 break;
4355
4356 case T_BitmapAndPath:
4357 {
4358 BitmapAndPath *bapath = (BitmapAndPath *) path;
4359
4361 }
4362 break;
4363
4364 case T_BitmapOrPath:
4365 {
4366 BitmapOrPath *bopath = (BitmapOrPath *) path;
4367
4369 }
4370 break;
4371
4372 case T_ForeignPath:
4373 {
4374 ForeignPath *fpath = (ForeignPath *) path;
4375
4376 if (fpath->fdw_outerpath)
4378 }
4379 break;
4380
4381 case T_CustomPath:
4382 {
4383 CustomPath *cpath = (CustomPath *) path;
4384
4386 }
4387 break;
4388
4389 case T_NestPath:
4390 case T_MergePath:
4391 case T_HashPath:
4392 {
4393 JoinPath *jpath = (JoinPath *) path;
4394
4397 }
4398 break;
4399
4400 case T_AppendPath:
4401 {
4402 AppendPath *apath = (AppendPath *) path;
4403
4405 }
4406 break;
4407
4408 case T_MaterialPath:
4409 {
4410 MaterialPath *mpath = (MaterialPath *) path;
4411
4413 }
4414 break;
4415
4416 case T_MemoizePath:
4417 {
4418 MemoizePath *mpath = (MemoizePath *) path;
4419
4421 }
4422 break;
4423
4424 case T_GatherPath:
4425 {
4426 GatherPath *gpath = (GatherPath *) path;
4427
4429 }
4430 break;
4431
4432 default:
4433 /* We don't know how to reparameterize this path. */
4434 return false;
4435 }
4436
4437 return true;
4438}
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:2156

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path * reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 3854 of file pathnode.c.

3857{
3858 RelOptInfo *rel = path->parent;
3859
3860 /* Can only increase, not decrease, path's parameterization */
3861 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3862 return NULL;
3863 switch (path->pathtype)
3864 {
3865 case T_SeqScan:
3866 return create_seqscan_path(root, rel, required_outer, 0);
3867 case T_SampleScan:
3868 return create_samplescan_path(root, rel, required_outer);
3869 case T_IndexScan:
3870 case T_IndexOnlyScan:
3871 {
3872 IndexPath *ipath = (IndexPath *) path;
3873 IndexPath *newpath = makeNode(IndexPath);
3874
3875 /*
3876 * We can't use create_index_path directly, and would not want
3877 * to because it would re-compute the indexqual conditions
3878 * which is wasted effort. Instead we hack things a bit:
3879 * flat-copy the path node, revise its param_info, and redo
3880 * the cost estimate.
3881 */
3882 memcpy(newpath, ipath, sizeof(IndexPath));
3883 newpath->path.param_info =
3884 get_baserel_parampathinfo(root, rel, required_outer);
3885 cost_index(newpath, root, loop_count, false);
3886 return (Path *) newpath;
3887 }
3888 case T_BitmapHeapScan:
3889 {
3890 BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3891
3893 rel,
3894 bpath->bitmapqual,
3895 required_outer,
3896 loop_count, 0);
3897 }
3898 case T_SubqueryScan:
3899 {
3900 SubqueryScanPath *spath = (SubqueryScanPath *) path;
3901 Path *subpath = spath->subpath;
3902 bool trivial_pathtarget;
3903
3904 /*
3905 * If existing node has zero extra cost, we must have decided
3906 * its target is trivial. (The converse is not true, because
3907 * it might have a trivial target but quals to enforce; but in
3908 * that case the new node will too, so it doesn't matter
3909 * whether we get the right answer here.)
3910 */
3911 trivial_pathtarget =
3912 (subpath->total_cost == spath->path.total_cost);
3913
3915 rel,
3916 subpath,
3917 trivial_pathtarget,
3918 spath->path.pathkeys,
3919 required_outer);
3920 }
3921 case T_Result:
3922 /* Supported only for RTE_RESULT scan paths */
3923 if (IsA(path, Path))
3924 return create_resultscan_path(root, rel, required_outer);
3925 break;
3926 case T_Append:
3927 {
3928 AppendPath *apath = (AppendPath *) path;
3929 List *childpaths = NIL;
3930 List *partialpaths = NIL;
3931 int i;
3932 ListCell *lc;
3933
3934 /* Reparameterize the children */
3935 i = 0;
3936 foreach(lc, apath->subpaths)
3937 {
3938 Path *spath = (Path *) lfirst(lc);
3939
3940 spath = reparameterize_path(root, spath,
3941 required_outer,
3942 loop_count);
3943 if (spath == NULL)
3944 return NULL;
3945 /* We have to re-split the regular and partial paths */
3946 if (i < apath->first_partial_path)
3947 childpaths = lappend(childpaths, spath);
3948 else
3949 partialpaths = lappend(partialpaths, spath);
3950 i++;
3951 }
3952 return (Path *)
3953 create_append_path(root, rel, childpaths, partialpaths,
3954 apath->path.pathkeys, required_outer,
3955 apath->path.parallel_workers,
3956 apath->path.parallel_aware,
3957 -1);
3958 }
3959 case T_Material:
3960 {
3961 MaterialPath *mpath = (MaterialPath *) path;
3962 Path *spath = mpath->subpath;
3963
3964 spath = reparameterize_path(root, spath,
3965 required_outer,
3966 loop_count);
3967 if (spath == NULL)
3968 return NULL;
3969 return (Path *) create_material_path(rel, spath);
3970 }
3971 case T_Memoize:
3972 {
3973 MemoizePath *mpath = (MemoizePath *) path;
3974 Path *spath = mpath->subpath;
3975
3976 spath = reparameterize_path(root, spath,
3977 required_outer,
3978 loop_count);
3979 if (spath == NULL)
3980 return NULL;
3981 return (Path *) create_memoize_path(root, rel,
3982 spath,
3983 mpath->param_exprs,
3984 mpath->hash_operators,
3985 mpath->singlerow,
3986 mpath->binary_mode,
3987 mpath->est_calls);
3988 }
3989 default:
3990 break;
3991 }
3992 return NULL;
3993}
int i
Definition: isn.c:77
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, Cardinality est_calls)
Definition: pathnode.c:1691
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1301
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1847
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1658
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2007
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3854

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), MemoizePath::est_calls, get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, reparameterize_path(), root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path(), and reparameterize_path().

◆ reparameterize_path_by_child()

Path * reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4020 of file pathnode.c.

4022{
4023 Path *new_path;
4024 ParamPathInfo *new_ppi;
4025 ParamPathInfo *old_ppi;
4026 Relids required_outer;
4027
4028#define ADJUST_CHILD_ATTRS(node) \
4029 ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4030 (Node *) (node), \
4031 child_rel, \
4032 child_rel->top_parent))
4033
4034#define REPARAMETERIZE_CHILD_PATH(path) \
4035do { \
4036 (path) = reparameterize_path_by_child(root, (path), child_rel); \
4037 if ((path) == NULL) \
4038 return NULL; \
4039} while(0)
4040
4041#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4042do { \
4043 if ((pathlist) != NIL) \
4044 { \
4045 (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4046 child_rel); \
4047 if ((pathlist) == NIL) \
4048 return NULL; \
4049 } \
4050} while(0)
4051
4052 /*
4053 * If the path is not parameterized by the parent of the given relation,
4054 * it doesn't need reparameterization.
4055 */
4056 if (!path->param_info ||
4057 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4058 return path;
4059
4060 /*
4061 * If possible, reparameterize the given path.
4062 *
4063 * This function is currently only applied to the inner side of a nestloop
4064 * join that is being partitioned by the partitionwise-join code. Hence,
4065 * we need only support path types that plausibly arise in that context.
4066 * (In particular, supporting sorted path types would be a waste of code
4067 * and cycles: even if we translated them here, they'd just lose in
4068 * subsequent cost comparisons.) If we do see an unsupported path type,
4069 * that just means we won't be able to generate a partitionwise-join plan
4070 * using that path type.
4071 */
4072 switch (nodeTag(path))
4073 {
4074 case T_Path:
4075 new_path = path;
4076 ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4077 if (path->pathtype == T_SampleScan)
4078 {
4079 Index scan_relid = path->parent->relid;
4080 RangeTblEntry *rte;
4081
4082 /* it should be a base rel with a tablesample clause... */
4083 Assert(scan_relid > 0);
4084 rte = planner_rt_fetch(scan_relid, root);
4085 Assert(rte->rtekind == RTE_RELATION);
4086 Assert(rte->tablesample != NULL);
4087
4089 }
4090 break;
4091
4092 case T_IndexPath:
4093 {
4094 IndexPath *ipath = (IndexPath *) path;
4095
4098 new_path = (Path *) ipath;
4099 }
4100 break;
4101
4102 case T_BitmapHeapPath:
4103 {
4104 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4105
4106 ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4108 new_path = (Path *) bhpath;
4109 }
4110 break;
4111
4112 case T_BitmapAndPath:
4113 {
4114 BitmapAndPath *bapath = (BitmapAndPath *) path;
4115
4117 new_path = (Path *) bapath;
4118 }
4119 break;
4120
4121 case T_BitmapOrPath:
4122 {
4123 BitmapOrPath *bopath = (BitmapOrPath *) path;
4124
4126 new_path = (Path *) bopath;
4127 }
4128 break;
4129
4130 case T_ForeignPath:
4131 {
4132 ForeignPath *fpath = (ForeignPath *) path;
4134
4135 ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4136 if (fpath->fdw_outerpath)
4138 if (fpath->fdw_restrictinfo)
4140
4141 /* Hand over to FDW if needed. */
4142 rfpc_func =
4143 path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4144 if (rfpc_func)
4145 fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4146 child_rel);
4147 new_path = (Path *) fpath;
4148 }
4149 break;
4150
4151 case T_CustomPath:
4152 {
4153 CustomPath *cpath = (CustomPath *) path;
4154
4155 ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4157 if (cpath->custom_restrictinfo)
4159 if (cpath->methods &&
4161 cpath->custom_private =
4163 cpath->custom_private,
4164 child_rel);
4165 new_path = (Path *) cpath;
4166 }
4167 break;
4168
4169 case T_NestPath:
4170 {
4171 NestPath *npath = (NestPath *) path;
4172 JoinPath *jpath = (JoinPath *) npath;
4173
4177 new_path = (Path *) npath;
4178 }
4179 break;
4180
4181 case T_MergePath:
4182 {
4183 MergePath *mpath = (MergePath *) path;
4184 JoinPath *jpath = (JoinPath *) mpath;
4185
4190 new_path = (Path *) mpath;
4191 }
4192 break;
4193
4194 case T_HashPath:
4195 {
4196 HashPath *hpath = (HashPath *) path;
4197 JoinPath *jpath = (JoinPath *) hpath;
4198
4203 new_path = (Path *) hpath;
4204 }
4205 break;
4206
4207 case T_AppendPath:
4208 {
4209 AppendPath *apath = (AppendPath *) path;
4210
4212 new_path = (Path *) apath;
4213 }
4214 break;
4215
4216 case T_MaterialPath:
4217 {
4218 MaterialPath *mpath = (MaterialPath *) path;
4219
4221 new_path = (Path *) mpath;
4222 }
4223 break;
4224
4225 case T_MemoizePath:
4226 {
4227 MemoizePath *mpath = (MemoizePath *) path;
4228
4231 new_path = (Path *) mpath;
4232 }
4233 break;
4234
4235 case T_GatherPath:
4236 {
4237 GatherPath *gpath = (GatherPath *) path;
4238
4240 new_path = (Path *) gpath;
4241 }
4242 break;
4243
4244 default:
4245 /* We don't know how to reparameterize this path. */
4246 return NULL;
4247 }
4248
4249 /*
4250 * Adjust the parameterization information, which refers to the topmost
4251 * parent. The topmost parent can be multiple levels away from the given
4252 * child, hence use multi-level expression adjustment routines.
4253 */
4254 old_ppi = new_path->param_info;
4255 required_outer =
4257 child_rel,
4258 child_rel->top_parent);
4259
4260 /* If we already have a PPI for this parameterization, just return it */
4261 new_ppi = find_param_path_info(new_path->parent, required_outer);
4262
4263 /*
4264 * If not, build a new one and link it to the list of PPIs. For the same
4265 * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4266 * context the given RelOptInfo is in.
4267 */
4268 if (new_ppi == NULL)
4269 {
4270 MemoryContext oldcontext;
4271 RelOptInfo *rel = path->parent;
4272
4274
4275 new_ppi = makeNode(ParamPathInfo);
4276 new_ppi->ppi_req_outer = bms_copy(required_outer);
4277 new_ppi->ppi_rows = old_ppi->ppi_rows;
4278 new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4280 new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4281 rel->ppilist = lappend(rel->ppilist, new_ppi);
4282
4283 MemoryContextSwitchTo(oldcontext);
4284 }
4285 bms_free(required_outer);
4286
4287 new_path->param_info = new_ppi;
4288
4289 /*
4290 * Adjust the path target if the parent of the outer relation is
4291 * referenced in the targetlist. This can happen when only the parent of
4292 * outer relation is laterally referenced in this relation.
4293 */
4294 if (bms_overlap(path->parent->lateral_relids,
4295 child_rel->top_parent_relids))
4296 {
4297 new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4298 ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4299 }
4300
4301 return new_path;
4302}
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:659
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:753
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:610
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:2159
List * custom_private
Definition: pathnodes.h:2158
List * custom_restrictinfo
Definition: pathnodes.h:2157
List * indrestrictinfo
Definition: pathnodes.h:1321
struct TableSampleClause * tablesample
Definition: parsenodes.h:1129
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert(), BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 270 of file pathnode.c.

271{
272 Path *cheapest_startup_path;
273 Path *cheapest_total_path;
274 Path *best_param_path;
275 List *parameterized_paths;
276 ListCell *p;
277
278 Assert(IsA(parent_rel, RelOptInfo));
279
280 if (parent_rel->pathlist == NIL)
281 elog(ERROR, "could not devise a query plan for the given query");
282
283 cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
284 parameterized_paths = NIL;
285
286 foreach(p, parent_rel->pathlist)
287 {
288 Path *path = (Path *) lfirst(p);
289 int cmp;
290
291 if (path->param_info)
292 {
293 /* Parameterized path, so add it to parameterized_paths */
294 parameterized_paths = lappend(parameterized_paths, path);
295
296 /*
297 * If we have an unparameterized cheapest-total, we no longer care
298 * about finding the best parameterized path, so move on.
299 */
300 if (cheapest_total_path)
301 continue;
302
303 /*
304 * Otherwise, track the best parameterized path, which is the one
305 * with least total cost among those of the minimum
306 * parameterization.
307 */
308 if (best_param_path == NULL)
309 best_param_path = path;
310 else
311 {
313 PATH_REQ_OUTER(best_param_path)))
314 {
315 case BMS_EQUAL:
316 /* keep the cheaper one */
317 if (compare_path_costs(path, best_param_path,
318 TOTAL_COST) < 0)
319 best_param_path = path;
320 break;
321 case BMS_SUBSET1:
322 /* new path is less-parameterized */
323 best_param_path = path;
324 break;
325 case BMS_SUBSET2:
326 /* old path is less-parameterized, keep it */
327 break;
328 case BMS_DIFFERENT:
329
330 /*
331 * This means that neither path has the least possible
332 * parameterization for the rel. We'll sit on the old
333 * path until something better comes along.
334 */
335 break;
336 }
337 }
338 }
339 else
340 {
341 /* Unparameterized path, so consider it for cheapest slots */
342 if (cheapest_total_path == NULL)
343 {
344 cheapest_startup_path = cheapest_total_path = path;
345 continue;
346 }
347
348 /*
349 * If we find two paths of identical costs, try to keep the
350 * better-sorted one. The paths might have unrelated sort
351 * orderings, in which case we can only guess which might be
352 * better to keep, but if one is superior then we definitely
353 * should keep that one.
354 */
355 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
356 if (cmp > 0 ||
357 (cmp == 0 &&
358 compare_pathkeys(cheapest_startup_path->pathkeys,
359 path->pathkeys) == PATHKEYS_BETTER2))
360 cheapest_startup_path = path;
361
362 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
363 if (cmp > 0 ||
364 (cmp == 0 &&
365 compare_pathkeys(cheapest_total_path->pathkeys,
366 path->pathkeys) == PATHKEYS_BETTER2))
367 cheapest_total_path = path;
368 }
369 }
370
371 /* Add cheapest unparameterized path, if any, to parameterized_paths */
372 if (cheapest_total_path)
373 parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
374
375 /*
376 * If there is no unparameterized path, use the best parameterized path as
377 * cheapest_total_path (but not as cheapest_startup_path).
378 */
379 if (cheapest_total_path == NULL)
380 cheapest_total_path = best_param_path;
381 Assert(cheapest_total_path != NULL);
382
383 parent_rel->cheapest_startup_path = cheapest_startup_path;
384 parent_rel->cheapest_total_path = cheapest_total_path;
385 parent_rel->cheapest_parameterized_paths = parameterized_paths;
386}
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert(), BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partial_unique_paths(), create_partitionwise_grouping_paths(), create_unique_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_grouped_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 108 of file relnode.c.

109{
110 int size;
111 Index rti;
112 ListCell *lc;
113
114 /* Arrays are accessed using RT indexes (1..N) */
115 size = list_length(root->parse->rtable) + 1;
116 root->simple_rel_array_size = size;
117
118 /*
119 * simple_rel_array is initialized to all NULLs, since no RelOptInfos
120 * exist yet. It'll be filled by later calls to build_simple_rel().
121 */
122 root->simple_rel_array = (RelOptInfo **)
123 palloc0(size * sizeof(RelOptInfo *));
124
125 /* simple_rte_array is an array equivalent of the rtable list */
126 root->simple_rte_array = (RangeTblEntry **)
127 palloc0(size * sizeof(RangeTblEntry *));
128 rti = 1;
129 foreach(lc, root->parse->rtable)
130 {
131 RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
132
133 root->simple_rte_array[rti++] = rte;
134 }
135
136 /* append_rel_array is not needed if there are no AppendRelInfos */
137 if (root->append_rel_list == NIL)
138 {
139 root->append_rel_array = NULL;
140 return;
141 }
142
143 root->append_rel_array = (AppendRelInfo **)
144 palloc0(size * sizeof(AppendRelInfo *));
145
146 /*
147 * append_rel_array is filled with any already-existing AppendRelInfos,
148 * which currently could only come from UNION ALL flattening. We might
149 * add more later during inheritance expansion, but it's the
150 * responsibility of the expansion code to update the array properly.
151 */
152 foreach(lc, root->append_rel_list)
153 {
155 int child_relid = appinfo->child_relid;
156
157 /* Sanity check */
158 Assert(child_relid < size);
159
160 if (root->append_rel_array[child_relid])
161 elog(ERROR, "child relation already exists");
162
163 root->append_rel_array[child_relid] = appinfo;
164 }
165}
Index child_relid
Definition: pathnodes.h:3193

References Assert(), AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), and root.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().