PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 
#define EXPRKIND_GROUPEXPR   13
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static Listget_useful_pathkeys_for_distinct (PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static void name_active_windows (List *activeWindows)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
bool enable_distinct_reordering = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 87 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 90 of file planner.c.

◆ EXPRKIND_GROUPEXPR

#define EXPRKIND_GROUPEXPR   13

Definition at line 93 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 86 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 88 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 80 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 82 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 83 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 91 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 92 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 89 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 81 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 84 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 85 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6980 of file planner.c.

6986{
6987 Query *parse = root->parse;
6988 Path *cheapest_path = input_rel->cheapest_total_path;
6989 ListCell *lc;
6990 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6991 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6992 List *havingQual = (List *) extra->havingQual;
6993 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6994
6995 if (can_sort)
6996 {
6997 /*
6998 * Use any available suitably-sorted path as input, and also consider
6999 * sorting the cheapest-total path and incremental sort on any paths
7000 * with presorted keys.
7001 */
7002 foreach(lc, input_rel->pathlist)
7003 {
7004 ListCell *lc2;
7005 Path *path = (Path *) lfirst(lc);
7006 Path *path_save = path;
7007 List *pathkey_orderings = NIL;
7008
7009 /* generate alternative group orderings that might be useful */
7010 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7011
7012 Assert(list_length(pathkey_orderings) > 0);
7013
7014 foreach(lc2, pathkey_orderings)
7015 {
7016 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7017
7018 /* restore the path (we replace it in the loop) */
7019 path = path_save;
7020
7021 path = make_ordered_path(root,
7022 grouped_rel,
7023 path,
7024 cheapest_path,
7025 info->pathkeys,
7026 -1.0);
7027 if (path == NULL)
7028 continue;
7029
7030 /* Now decide what to stick atop it */
7031 if (parse->groupingSets)
7032 {
7033 consider_groupingsets_paths(root, grouped_rel,
7034 path, true, can_hash,
7035 gd, agg_costs, dNumGroups);
7036 }
7037 else if (parse->hasAggs)
7038 {
7039 /*
7040 * We have aggregation, possibly with plain GROUP BY. Make
7041 * an AggPath.
7042 */
7043 add_path(grouped_rel, (Path *)
7045 grouped_rel,
7046 path,
7047 grouped_rel->reltarget,
7048 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7050 info->clauses,
7051 havingQual,
7052 agg_costs,
7053 dNumGroups));
7054 }
7055 else if (parse->groupClause)
7056 {
7057 /*
7058 * We have GROUP BY without aggregation or grouping sets.
7059 * Make a GroupPath.
7060 */
7061 add_path(grouped_rel, (Path *)
7063 grouped_rel,
7064 path,
7065 info->clauses,
7066 havingQual,
7067 dNumGroups));
7068 }
7069 else
7070 {
7071 /* Other cases should have been handled above */
7072 Assert(false);
7073 }
7074 }
7075 }
7076
7077 /*
7078 * Instead of operating directly on the input relation, we can
7079 * consider finalizing a partially aggregated path.
7080 */
7081 if (partially_grouped_rel != NULL)
7082 {
7083 foreach(lc, partially_grouped_rel->pathlist)
7084 {
7085 ListCell *lc2;
7086 Path *path = (Path *) lfirst(lc);
7087 Path *path_save = path;
7088 List *pathkey_orderings = NIL;
7089
7090 /* generate alternative group orderings that might be useful */
7091 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7092
7093 Assert(list_length(pathkey_orderings) > 0);
7094
7095 /* process all potentially interesting grouping reorderings */
7096 foreach(lc2, pathkey_orderings)
7097 {
7098 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7099
7100 /* restore the path (we replace it in the loop) */
7101 path = path_save;
7102
7103 path = make_ordered_path(root,
7104 grouped_rel,
7105 path,
7106 partially_grouped_rel->cheapest_total_path,
7107 info->pathkeys,
7108 -1.0);
7109
7110 if (path == NULL)
7111 continue;
7112
7113 if (parse->hasAggs)
7114 add_path(grouped_rel, (Path *)
7116 grouped_rel,
7117 path,
7118 grouped_rel->reltarget,
7119 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7121 info->clauses,
7122 havingQual,
7123 agg_final_costs,
7124 dNumGroups));
7125 else
7126 add_path(grouped_rel, (Path *)
7128 grouped_rel,
7129 path,
7130 info->clauses,
7131 havingQual,
7132 dNumGroups));
7133
7134 }
7135 }
7136 }
7137 }
7138
7139 if (can_hash)
7140 {
7141 if (parse->groupingSets)
7142 {
7143 /*
7144 * Try for a hash-only groupingsets path over unsorted input.
7145 */
7146 consider_groupingsets_paths(root, grouped_rel,
7147 cheapest_path, false, true,
7148 gd, agg_costs, dNumGroups);
7149 }
7150 else
7151 {
7152 /*
7153 * Generate a HashAgg Path. We just need an Agg over the
7154 * cheapest-total input path, since input order won't matter.
7155 */
7156 add_path(grouped_rel, (Path *)
7157 create_agg_path(root, grouped_rel,
7158 cheapest_path,
7159 grouped_rel->reltarget,
7160 AGG_HASHED,
7162 root->processed_groupClause,
7163 havingQual,
7164 agg_costs,
7165 dNumGroups));
7166 }
7167
7168 /*
7169 * Generate a Finalize HashAgg Path atop of the cheapest partially
7170 * grouped path, assuming there is one
7171 */
7172 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7173 {
7174 Path *path = partially_grouped_rel->cheapest_total_path;
7175
7176 add_path(grouped_rel, (Path *)
7178 grouped_rel,
7179 path,
7180 grouped_rel->reltarget,
7181 AGG_HASHED,
7183 root->processed_groupClause,
7184 havingQual,
7185 agg_final_costs,
7186 dNumGroups));
7187 }
7188 }
7189
7190 /*
7191 * When partitionwise aggregate is used, we might have fully aggregated
7192 * paths in the partial pathlist, because add_paths_to_append_rel() will
7193 * consider a path for grouped_rel consisting of a Parallel Append of
7194 * non-partial paths from each child.
7195 */
7196 if (grouped_rel->partial_pathlist != NIL)
7197 gather_grouping_paths(root, grouped_rel);
7198}
Assert(PointerIsAligned(start, uint64))
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
@ AGG_SORTED
Definition: nodes.h:361
@ AGG_HASHED
Definition: nodes.h:362
@ AGG_PLAIN
Definition: nodes.h:360
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:387
@ AGGSPLIT_SIMPLE
Definition: nodes.h:383
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3127
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3240
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3304
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3303
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7570
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7511
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4072
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3344
Definition: pg_list.h:54
struct PathTarget * reltarget
Definition: pathnodes.h:920
List * pathlist
Definition: pathnodes.h:925
struct Path * cheapest_total_path
Definition: pathnodes.h:929
List * partial_pathlist
Definition: pathnodes.h:927

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::pathlist, RelOptInfo::reltarget, and root.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3173 of file planner.c.

3174{
3175 List *grouppathkeys = root->group_pathkeys;
3176 List *bestpathkeys;
3177 Bitmapset *bestaggs;
3178 Bitmapset *unprocessed_aggs;
3179 ListCell *lc;
3180 int i;
3181
3182 /* Shouldn't be here if there are grouping sets */
3183 Assert(root->parse->groupingSets == NIL);
3184 /* Shouldn't be here unless there are some ordered aggregates */
3185 Assert(root->numOrderedAggs > 0);
3186
3187 /* Do nothing if disabled */
3189 return;
3190
3191 /*
3192 * Make a first pass over all AggInfos to collect a Bitmapset containing
3193 * the indexes of all AggInfos to be processed below.
3194 */
3195 unprocessed_aggs = NULL;
3196 foreach(lc, root->agginfos)
3197 {
3198 AggInfo *agginfo = lfirst_node(AggInfo, lc);
3199 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3200
3201 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3202 continue;
3203
3204 /* only add aggregates with a DISTINCT or ORDER BY */
3205 if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3206 unprocessed_aggs = bms_add_member(unprocessed_aggs,
3208 }
3209
3210 /*
3211 * Now process all the unprocessed_aggs to find the best set of pathkeys
3212 * for the given set of aggregates.
3213 *
3214 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3215 * this during the first loop using the pathkeys for the very first
3216 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3217 * a more strict set of compatible pathkeys. Once the outer loop is
3218 * complete, we mark off all the aggregates with compatible pathkeys then
3219 * remove those from the unprocessed_aggs and repeat the process to try to
3220 * find another set of pathkeys that are suitable for a larger number of
3221 * aggregates. The outer loop will stop when there are not enough
3222 * unprocessed aggregates for it to be possible to find a set of pathkeys
3223 * to suit a larger number of aggregates.
3224 */
3225 bestpathkeys = NIL;
3226 bestaggs = NULL;
3227 while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3228 {
3229 Bitmapset *aggindexes = NULL;
3230 List *currpathkeys = NIL;
3231
3232 i = -1;
3233 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3234 {
3235 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3236 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3237 List *sortlist;
3238 List *pathkeys;
3239
3240 if (aggref->aggdistinct != NIL)
3241 sortlist = aggref->aggdistinct;
3242 else
3243 sortlist = aggref->aggorder;
3244
3245 pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3246 aggref->args);
3247
3248 /*
3249 * Ignore Aggrefs which have volatile functions in their ORDER BY
3250 * or DISTINCT clause.
3251 */
3252 if (has_volatile_pathkey(pathkeys))
3253 {
3254 unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3255 continue;
3256 }
3257
3258 /*
3259 * When not set yet, take the pathkeys from the first unprocessed
3260 * aggregate.
3261 */
3262 if (currpathkeys == NIL)
3263 {
3264 currpathkeys = pathkeys;
3265
3266 /* include the GROUP BY pathkeys, if they exist */
3267 if (grouppathkeys != NIL)
3268 currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3269 currpathkeys);
3270
3271 /* record that we found pathkeys for this aggregate */
3272 aggindexes = bms_add_member(aggindexes, i);
3273 }
3274 else
3275 {
3276 /* now look for a stronger set of matching pathkeys */
3277
3278 /* include the GROUP BY pathkeys, if they exist */
3279 if (grouppathkeys != NIL)
3280 pathkeys = append_pathkeys(list_copy(grouppathkeys),
3281 pathkeys);
3282
3283 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3284 switch (compare_pathkeys(currpathkeys, pathkeys))
3285 {
3286 case PATHKEYS_BETTER2:
3287 /* 'pathkeys' are stronger, use these ones instead */
3288 currpathkeys = pathkeys;
3289 /* FALLTHROUGH */
3290
3291 case PATHKEYS_BETTER1:
3292 /* 'pathkeys' are less strict */
3293 /* FALLTHROUGH */
3294
3295 case PATHKEYS_EQUAL:
3296 /* mark this aggregate as covered by 'currpathkeys' */
3297 aggindexes = bms_add_member(aggindexes, i);
3298 break;
3299
3300 case PATHKEYS_DIFFERENT:
3301 break;
3302 }
3303 }
3304 }
3305
3306 /* remove the aggregates that we've just processed */
3307 unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3308
3309 /*
3310 * If this pass included more aggregates than the previous best then
3311 * use these ones as the best set.
3312 */
3313 if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3314 {
3315 bestaggs = aggindexes;
3316 bestpathkeys = currpathkeys;
3317 }
3318 }
3319
3320 /*
3321 * If we found any ordered aggregates, update root->group_pathkeys to add
3322 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3323 * the original GROUP BY pathkeys already.
3324 */
3325 if (bestpathkeys != NIL)
3326 root->group_pathkeys = bestpathkeys;
3327
3328 /*
3329 * Now that we've found the best set of aggregates we can set the
3330 * presorted flag to indicate to the executor that it needn't bother
3331 * performing a sort for these Aggrefs. We're able to do this now as
3332 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3333 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3334 * of ordered aggregates.
3335 */
3336 i = -1;
3337 while ((i = bms_next_member(bestaggs, i)) >= 0)
3338 {
3339 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3340
3341 foreach(lc, agginfo->aggrefs)
3342 {
3343 Aggref *aggref = lfirst_node(Aggref, lc);
3344
3345 aggref->aggpresorted = true;
3346 }
3347 }
3348}
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
bool enable_presorted_aggregate
Definition: costsize.c:164
int i
Definition: isn.c:77
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1335
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
@ PATHKEYS_BETTER2
Definition: paths.h:211
@ PATHKEYS_BETTER1
Definition: paths.h:210
@ PATHKEYS_DIFFERENT
Definition: paths.h:212
@ PATHKEYS_EQUAL
Definition: paths.h:209
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3128
List * aggrefs
Definition: pathnodes.h:3426
List * aggdistinct
Definition: primnodes.h:491
List * args
Definition: primnodes.h:485
List * aggorder
Definition: primnodes.h:488

References Aggref::aggdistinct, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert(), bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6529 of file planner.c.

6531{
6532 ListCell *lc;
6533
6534 Assert(list_length(targets) == list_length(targets_contain_srfs));
6535 Assert(!linitial_int(targets_contain_srfs));
6536
6537 /* If no SRFs appear at this plan level, nothing to do */
6538 if (list_length(targets) == 1)
6539 return;
6540
6541 /*
6542 * Stack SRF-evaluation nodes atop each path for the rel.
6543 *
6544 * In principle we should re-run set_cheapest() here to identify the
6545 * cheapest path, but it seems unlikely that adding the same tlist eval
6546 * costs to all the paths would change that, so we don't bother. Instead,
6547 * just assume that the cheapest-startup and cheapest-total paths remain
6548 * so. (There should be no parameterized paths anymore, so we needn't
6549 * worry about updating cheapest_parameterized_paths.)
6550 */
6551 foreach(lc, rel->pathlist)
6552 {
6553 Path *subpath = (Path *) lfirst(lc);
6554 Path *newpath = subpath;
6555 ListCell *lc1,
6556 *lc2;
6557
6558 Assert(subpath->param_info == NULL);
6559 forboth(lc1, targets, lc2, targets_contain_srfs)
6560 {
6561 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6562 bool contains_srfs = (bool) lfirst_int(lc2);
6563
6564 /* If this level doesn't contain SRFs, do regular projection */
6565 if (contains_srfs)
6566 newpath = (Path *) create_set_projection_path(root,
6567 rel,
6568 newpath,
6569 thistarget);
6570 else
6571 newpath = (Path *) apply_projection_to_path(root,
6572 rel,
6573 newpath,
6574 thistarget);
6575 }
6576 lfirst(lc) = newpath;
6577 if (subpath == rel->cheapest_startup_path)
6578 rel->cheapest_startup_path = newpath;
6579 if (subpath == rel->cheapest_total_path)
6580 rel->cheapest_total_path = newpath;
6581 }
6582
6583 /* Likewise for partial paths, if any */
6584 foreach(lc, rel->partial_pathlist)
6585 {
6586 Path *subpath = (Path *) lfirst(lc);
6587 Path *newpath = subpath;
6588 ListCell *lc1,
6589 *lc2;
6590
6591 Assert(subpath->param_info == NULL);
6592 forboth(lc1, targets, lc2, targets_contain_srfs)
6593 {
6594 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6595 bool contains_srfs = (bool) lfirst_int(lc2);
6596
6597 /* If this level doesn't contain SRFs, do regular projection */
6598 if (contains_srfs)
6599 newpath = (Path *) create_set_projection_path(root,
6600 rel,
6601 newpath,
6602 thistarget);
6603 else
6604 {
6605 /* avoid apply_projection_to_path, in case of multiple refs */
6606 newpath = (Path *) create_projection_path(root,
6607 rel,
6608 newpath,
6609 thistarget);
6610 }
6611 }
6612 lfirst(lc) = newpath;
6613 }
6614}
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2962
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2763
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2873
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:928

References apply_projection_to_path(), Assert(), RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7695 of file planner.c.

7701{
7702 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7703 PathTarget *scanjoin_target;
7704 ListCell *lc;
7705
7706 /* This recurses, so be paranoid. */
7708
7709 /*
7710 * If the rel is partitioned, we want to drop its existing paths and
7711 * generate new ones. This function would still be correct if we kept the
7712 * existing paths: we'd modify them to generate the correct target above
7713 * the partitioning Append, and then they'd compete on cost with paths
7714 * generating the target below the Append. However, in our current cost
7715 * model the latter way is always the same or cheaper cost, so modifying
7716 * the existing paths would just be useless work. Moreover, when the cost
7717 * is the same, varying roundoff errors might sometimes allow an existing
7718 * path to be picked, resulting in undesirable cross-platform plan
7719 * variations. So we drop old paths and thereby force the work to be done
7720 * below the Append, except in the case of a non-parallel-safe target.
7721 *
7722 * Some care is needed, because we have to allow
7723 * generate_useful_gather_paths to see the old partial paths in the next
7724 * stanza. Hence, zap the main pathlist here, then allow
7725 * generate_useful_gather_paths to add path(s) to the main list, and
7726 * finally zap the partial pathlist.
7727 */
7728 if (rel_is_partitioned)
7729 rel->pathlist = NIL;
7730
7731 /*
7732 * If the scan/join target is not parallel-safe, partial paths cannot
7733 * generate it.
7734 */
7735 if (!scanjoin_target_parallel_safe)
7736 {
7737 /*
7738 * Since we can't generate the final scan/join target in parallel
7739 * workers, this is our last opportunity to use any partial paths that
7740 * exist; so build Gather path(s) that use them and emit whatever the
7741 * current reltarget is. We don't do this in the case where the
7742 * target is parallel-safe, since we will be able to generate superior
7743 * paths by doing it after the final scan/join target has been
7744 * applied.
7745 */
7747
7748 /* Can't use parallel query above this level. */
7749 rel->partial_pathlist = NIL;
7750 rel->consider_parallel = false;
7751 }
7752
7753 /* Finish dropping old paths for a partitioned rel, per comment above */
7754 if (rel_is_partitioned)
7755 rel->partial_pathlist = NIL;
7756
7757 /* Extract SRF-free scan/join target. */
7758 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7759
7760 /*
7761 * Apply the SRF-free scan/join target to each existing path.
7762 *
7763 * If the tlist exprs are the same, we can just inject the sortgroupref
7764 * information into the existing pathtargets. Otherwise, replace each
7765 * path with a projection path that generates the SRF-free scan/join
7766 * target. This can't change the ordering of paths within rel->pathlist,
7767 * so we just modify the list in place.
7768 */
7769 foreach(lc, rel->pathlist)
7770 {
7771 Path *subpath = (Path *) lfirst(lc);
7772
7773 /* Shouldn't have any parameterized paths anymore */
7774 Assert(subpath->param_info == NULL);
7775
7776 if (tlist_same_exprs)
7777 subpath->pathtarget->sortgrouprefs =
7778 scanjoin_target->sortgrouprefs;
7779 else
7780 {
7781 Path *newpath;
7782
7783 newpath = (Path *) create_projection_path(root, rel, subpath,
7784 scanjoin_target);
7785 lfirst(lc) = newpath;
7786 }
7787 }
7788
7789 /* Likewise adjust the targets for any partial paths. */
7790 foreach(lc, rel->partial_pathlist)
7791 {
7792 Path *subpath = (Path *) lfirst(lc);
7793
7794 /* Shouldn't have any parameterized paths anymore */
7795 Assert(subpath->param_info == NULL);
7796
7797 if (tlist_same_exprs)
7798 subpath->pathtarget->sortgrouprefs =
7799 scanjoin_target->sortgrouprefs;
7800 else
7801 {
7802 Path *newpath;
7803
7804 newpath = (Path *) create_projection_path(root, rel, subpath,
7805 scanjoin_target);
7806 lfirst(lc) = newpath;
7807 }
7808 }
7809
7810 /*
7811 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7812 * atop each existing path. (Note that this function doesn't look at the
7813 * cheapest-path fields, which is a good thing because they're bogus right
7814 * now.)
7815 */
7816 if (root->parse->hasTargetSRFs)
7818 scanjoin_targets,
7819 scanjoin_targets_contain_srfs);
7820
7821 /*
7822 * Update the rel's target to be the final (with SRFs) scan/join target.
7823 * This now matches the actual output of all the paths, and we might get
7824 * confused in createplan.c if they don't agree. We must do this now so
7825 * that any append paths made in the next part will use the correct
7826 * pathtarget (cf. create_append_path).
7827 *
7828 * Note that this is also necessary if GetForeignUpperPaths() gets called
7829 * on the final scan/join relation or on any of its children, since the
7830 * FDW might look at the rel's target to create ForeignPaths.
7831 */
7832 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7833
7834 /*
7835 * If the relation is partitioned, recursively apply the scan/join target
7836 * to all partitions, and generate brand-new Append paths in which the
7837 * scan/join target is computed below the Append rather than above it.
7838 * Since Append is not projection-capable, that might save a separate
7839 * Result node, and it also is important for partitionwise aggregate.
7840 */
7841 if (rel_is_partitioned)
7842 {
7843 List *live_children = NIL;
7844 int i;
7845
7846 /* Adjust each partition. */
7847 i = -1;
7848 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7849 {
7850 RelOptInfo *child_rel = rel->part_rels[i];
7851 AppendRelInfo **appinfos;
7852 int nappinfos;
7853 List *child_scanjoin_targets = NIL;
7854
7855 Assert(child_rel != NULL);
7856
7857 /* Dummy children can be ignored. */
7858 if (IS_DUMMY_REL(child_rel))
7859 continue;
7860
7861 /* Translate scan/join targets for this child. */
7862 appinfos = find_appinfos_by_relids(root, child_rel->relids,
7863 &nappinfos);
7864 foreach(lc, scanjoin_targets)
7865 {
7866 PathTarget *target = lfirst_node(PathTarget, lc);
7867
7868 target = copy_pathtarget(target);
7869 target->exprs = (List *)
7871 (Node *) target->exprs,
7872 nappinfos, appinfos);
7873 child_scanjoin_targets = lappend(child_scanjoin_targets,
7874 target);
7875 }
7876 pfree(appinfos);
7877
7878 /* Recursion does the real work. */
7880 child_scanjoin_targets,
7881 scanjoin_targets_contain_srfs,
7882 scanjoin_target_parallel_safe,
7884
7885 /* Save non-dummy children for Append paths. */
7886 if (!IS_DUMMY_REL(child_rel))
7887 live_children = lappend(live_children, child_rel);
7888 }
7889
7890 /* Build new paths for this relation by appending child paths. */
7891 add_paths_to_append_rel(root, rel, live_children);
7892 }
7893
7894 /*
7895 * Consider generating Gather or Gather Merge paths. We must only do this
7896 * if the relation is parallel safe, and we don't do it for child rels to
7897 * avoid creating multiple Gather nodes within the same plan. We must do
7898 * this after all paths have been generated and before set_cheapest, since
7899 * one of the generated paths may turn out to be the cheapest one.
7900 */
7901 if (rel->consider_parallel && !IS_OTHER_REL(rel))
7903
7904 /*
7905 * Reassess which paths are the cheapest, now that we've potentially added
7906 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7907 * this relation.
7908 */
7909 set_cheapest(rel);
7910}
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3220
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1321
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:753
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * lappend(List *list, void *datum)
Definition: list.c:339
void pfree(void *pointer)
Definition: mcxt.c:1524
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1997
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1089
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:881
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7695
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6529
void check_stack_depth(void)
Definition: stack_depth.c:95
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1583
Relids relids
Definition: pathnodes.h:898
bool consider_parallel
Definition: pathnodes.h:914
Bitmapset * live_parts
Definition: pathnodes.h:1066
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert(), bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7653 of file planner.c.

7654{
7655 Query *parse = root->parse;
7656
7657 if (!parse->hasAggs && parse->groupClause == NIL)
7658 {
7659 /*
7660 * We don't know how to do parallel aggregation unless we have either
7661 * some aggregates or a grouping clause.
7662 */
7663 return false;
7664 }
7665 else if (parse->groupingSets)
7666 {
7667 /* We don't know how to do grouping sets in parallel. */
7668 return false;
7669 }
7670 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7671 {
7672 /* Insufficient support for partial mode. */
7673 return false;
7674 }
7675
7676 /* Everything looks good. */
7677 return true;
7678}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5990 of file planner.c.

5991{
5992 const WindowClauseSortData *wcsa = a;
5993 const WindowClauseSortData *wcsb = b;
5994 ListCell *item_a;
5995 ListCell *item_b;
5996
5997 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5998 {
6001
6002 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6003 return -1;
6004 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6005 return 1;
6006 else if (sca->sortop > scb->sortop)
6007 return -1;
6008 else if (sca->sortop < scb->sortop)
6009 return 1;
6010 else if (sca->nulls_first && !scb->nulls_first)
6011 return -1;
6012 else if (!sca->nulls_first && scb->nulls_first)
6013 return 1;
6014 /* no need to compare eqop, since it is fully determined by sortop */
6015 }
6016
6017 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6018 return -1;
6019 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6020 return 1;
6021
6022 return 0;
6023}
int b
Definition: isn.c:74
int a
Definition: isn.c:73
Index tleSortGroupRef
Definition: parsenodes.h:1452

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4072 of file planner.c.

4080{
4081 Query *parse = root->parse;
4082 Size hash_mem_limit = get_hash_memory_limit();
4083
4084 /*
4085 * If we're not being offered sorted input, then only consider plans that
4086 * can be done entirely by hashing.
4087 *
4088 * We can hash everything if it looks like it'll fit in hash_mem. But if
4089 * the input is actually sorted despite not being advertised as such, we
4090 * prefer to make use of that in order to use less memory.
4091 *
4092 * If none of the grouping sets are sortable, then ignore the hash_mem
4093 * limit and generate a path anyway, since otherwise we'll just fail.
4094 */
4095 if (!is_sorted)
4096 {
4097 List *new_rollups = NIL;
4098 RollupData *unhashed_rollup = NULL;
4099 List *sets_data;
4100 List *empty_sets_data = NIL;
4101 List *empty_sets = NIL;
4102 ListCell *lc;
4103 ListCell *l_start = list_head(gd->rollups);
4104 AggStrategy strat = AGG_HASHED;
4105 double hashsize;
4106 double exclude_groups = 0.0;
4107
4108 Assert(can_hash);
4109
4110 /*
4111 * If the input is coincidentally sorted usefully (which can happen
4112 * even if is_sorted is false, since that only means that our caller
4113 * has set up the sorting for us), then save some hashtable space by
4114 * making use of that. But we need to watch out for degenerate cases:
4115 *
4116 * 1) If there are any empty grouping sets, then group_pathkeys might
4117 * be NIL if all non-empty grouping sets are unsortable. In this case,
4118 * there will be a rollup containing only empty groups, and the
4119 * pathkeys_contained_in test is vacuously true; this is ok.
4120 *
4121 * XXX: the above relies on the fact that group_pathkeys is generated
4122 * from the first rollup. If we add the ability to consider multiple
4123 * sort orders for grouping input, this assumption might fail.
4124 *
4125 * 2) If there are no empty sets and only unsortable sets, then the
4126 * rollups list will be empty (and thus l_start == NULL), and
4127 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4128 * pathkeys_contained_in test doesn't cause us to crash.
4129 */
4130 if (l_start != NULL &&
4131 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4132 {
4133 unhashed_rollup = lfirst_node(RollupData, l_start);
4134 exclude_groups = unhashed_rollup->numGroups;
4135 l_start = lnext(gd->rollups, l_start);
4136 }
4137
4139 path,
4140 agg_costs,
4141 dNumGroups - exclude_groups);
4142
4143 /*
4144 * gd->rollups is empty if we have only unsortable columns to work
4145 * with. Override hash_mem in that case; otherwise, we'll rely on the
4146 * sorted-input case to generate usable mixed paths.
4147 */
4148 if (hashsize > hash_mem_limit && gd->rollups)
4149 return; /* nope, won't fit */
4150
4151 /*
4152 * We need to burst the existing rollups list into individual grouping
4153 * sets and recompute a groupClause for each set.
4154 */
4155 sets_data = list_copy(gd->unsortable_sets);
4156
4157 for_each_cell(lc, gd->rollups, l_start)
4158 {
4159 RollupData *rollup = lfirst_node(RollupData, lc);
4160
4161 /*
4162 * If we find an unhashable rollup that's not been skipped by the
4163 * "actually sorted" check above, we can't cope; we'd need sorted
4164 * input (with a different sort order) but we can't get that here.
4165 * So bail out; we'll get a valid path from the is_sorted case
4166 * instead.
4167 *
4168 * The mere presence of empty grouping sets doesn't make a rollup
4169 * unhashable (see preprocess_grouping_sets), we handle those
4170 * specially below.
4171 */
4172 if (!rollup->hashable)
4173 return;
4174
4175 sets_data = list_concat(sets_data, rollup->gsets_data);
4176 }
4177 foreach(lc, sets_data)
4178 {
4180 List *gset = gs->set;
4181 RollupData *rollup;
4182
4183 if (gset == NIL)
4184 {
4185 /* Empty grouping sets can't be hashed. */
4186 empty_sets_data = lappend(empty_sets_data, gs);
4187 empty_sets = lappend(empty_sets, NIL);
4188 }
4189 else
4190 {
4191 rollup = makeNode(RollupData);
4192
4193 rollup->groupClause = preprocess_groupclause(root, gset);
4194 rollup->gsets_data = list_make1(gs);
4195 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4196 rollup->gsets_data,
4198 rollup->numGroups = gs->numGroups;
4199 rollup->hashable = true;
4200 rollup->is_hashed = true;
4201 new_rollups = lappend(new_rollups, rollup);
4202 }
4203 }
4204
4205 /*
4206 * If we didn't find anything nonempty to hash, then bail. We'll
4207 * generate a path from the is_sorted case.
4208 */
4209 if (new_rollups == NIL)
4210 return;
4211
4212 /*
4213 * If there were empty grouping sets they should have been in the
4214 * first rollup.
4215 */
4216 Assert(!unhashed_rollup || !empty_sets);
4217
4218 if (unhashed_rollup)
4219 {
4220 new_rollups = lappend(new_rollups, unhashed_rollup);
4221 strat = AGG_MIXED;
4222 }
4223 else if (empty_sets)
4224 {
4225 RollupData *rollup = makeNode(RollupData);
4226
4227 rollup->groupClause = NIL;
4228 rollup->gsets_data = empty_sets_data;
4229 rollup->gsets = empty_sets;
4230 rollup->numGroups = list_length(empty_sets);
4231 rollup->hashable = false;
4232 rollup->is_hashed = false;
4233 new_rollups = lappend(new_rollups, rollup);
4234 strat = AGG_MIXED;
4235 }
4236
4237 add_path(grouped_rel, (Path *)
4239 grouped_rel,
4240 path,
4241 (List *) parse->havingQual,
4242 strat,
4243 new_rollups,
4244 agg_costs));
4245 return;
4246 }
4247
4248 /*
4249 * If we have sorted input but nothing we can do with it, bail.
4250 */
4251 if (gd->rollups == NIL)
4252 return;
4253
4254 /*
4255 * Given sorted input, we try and make two paths: one sorted and one mixed
4256 * sort/hash. (We need to try both because hashagg might be disabled, or
4257 * some columns might not be sortable.)
4258 *
4259 * can_hash is passed in as false if some obstacle elsewhere (such as
4260 * ordered aggs) means that we shouldn't consider hashing at all.
4261 */
4262 if (can_hash && gd->any_hashable)
4263 {
4264 List *rollups = NIL;
4265 List *hash_sets = list_copy(gd->unsortable_sets);
4266 double availspace = hash_mem_limit;
4267 ListCell *lc;
4268
4269 /*
4270 * Account first for space needed for groups we can't sort at all.
4271 */
4272 availspace -= estimate_hashagg_tablesize(root,
4273 path,
4274 agg_costs,
4275 gd->dNumHashGroups);
4276
4277 if (availspace > 0 && list_length(gd->rollups) > 1)
4278 {
4279 double scale;
4280 int num_rollups = list_length(gd->rollups);
4281 int k_capacity;
4282 int *k_weights = palloc(num_rollups * sizeof(int));
4283 Bitmapset *hash_items = NULL;
4284 int i;
4285
4286 /*
4287 * We treat this as a knapsack problem: the knapsack capacity
4288 * represents hash_mem, the item weights are the estimated memory
4289 * usage of the hashtables needed to implement a single rollup,
4290 * and we really ought to use the cost saving as the item value;
4291 * however, currently the costs assigned to sort nodes don't
4292 * reflect the comparison costs well, and so we treat all items as
4293 * of equal value (each rollup we hash instead saves us one sort).
4294 *
4295 * To use the discrete knapsack, we need to scale the values to a
4296 * reasonably small bounded range. We choose to allow a 5% error
4297 * margin; we have no more than 4096 rollups in the worst possible
4298 * case, which with a 5% error margin will require a bit over 42MB
4299 * of workspace. (Anyone wanting to plan queries that complex had
4300 * better have the memory for it. In more reasonable cases, with
4301 * no more than a couple of dozen rollups, the memory usage will
4302 * be negligible.)
4303 *
4304 * k_capacity is naturally bounded, but we clamp the values for
4305 * scale and weight (below) to avoid overflows or underflows (or
4306 * uselessly trying to use a scale factor less than 1 byte).
4307 */
4308 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4309 k_capacity = (int) floor(availspace / scale);
4310
4311 /*
4312 * We leave the first rollup out of consideration since it's the
4313 * one that matches the input sort order. We assign indexes "i"
4314 * to only those entries considered for hashing; the second loop,
4315 * below, must use the same condition.
4316 */
4317 i = 0;
4318 for_each_from(lc, gd->rollups, 1)
4319 {
4320 RollupData *rollup = lfirst_node(RollupData, lc);
4321
4322 if (rollup->hashable)
4323 {
4324 double sz = estimate_hashagg_tablesize(root,
4325 path,
4326 agg_costs,
4327 rollup->numGroups);
4328
4329 /*
4330 * If sz is enormous, but hash_mem (and hence scale) is
4331 * small, avoid integer overflow here.
4332 */
4333 k_weights[i] = (int) Min(floor(sz / scale),
4334 k_capacity + 1.0);
4335 ++i;
4336 }
4337 }
4338
4339 /*
4340 * Apply knapsack algorithm; compute the set of items which
4341 * maximizes the value stored (in this case the number of sorts
4342 * saved) while keeping the total size (approximately) within
4343 * capacity.
4344 */
4345 if (i > 0)
4346 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4347
4348 if (!bms_is_empty(hash_items))
4349 {
4350 rollups = list_make1(linitial(gd->rollups));
4351
4352 i = 0;
4353 for_each_from(lc, gd->rollups, 1)
4354 {
4355 RollupData *rollup = lfirst_node(RollupData, lc);
4356
4357 if (rollup->hashable)
4358 {
4359 if (bms_is_member(i, hash_items))
4360 hash_sets = list_concat(hash_sets,
4361 rollup->gsets_data);
4362 else
4363 rollups = lappend(rollups, rollup);
4364 ++i;
4365 }
4366 else
4367 rollups = lappend(rollups, rollup);
4368 }
4369 }
4370 }
4371
4372 if (!rollups && hash_sets)
4373 rollups = list_copy(gd->rollups);
4374
4375 foreach(lc, hash_sets)
4376 {
4378 RollupData *rollup = makeNode(RollupData);
4379
4380 Assert(gs->set != NIL);
4381
4383 rollup->gsets_data = list_make1(gs);
4384 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4385 rollup->gsets_data,
4387 rollup->numGroups = gs->numGroups;
4388 rollup->hashable = true;
4389 rollup->is_hashed = true;
4390 rollups = lcons(rollup, rollups);
4391 }
4392
4393 if (rollups)
4394 {
4395 add_path(grouped_rel, (Path *)
4397 grouped_rel,
4398 path,
4399 (List *) parse->havingQual,
4400 AGG_MIXED,
4401 rollups,
4402 agg_costs));
4403 }
4404 }
4405
4406 /*
4407 * Now try the simple sorted case.
4408 */
4409 if (!gd->unsortable_sets)
4410 add_path(grouped_rel, (Path *)
4412 grouped_rel,
4413 path,
4414 (List *) parse->havingQual,
4415 AGG_SORTED,
4416 gd->rollups,
4417 agg_costs));
4418}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define Min(x, y)
Definition: c.h:975
#define Max(x, y)
Definition: c.h:969
size_t Size
Definition: c.h:576
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * lcons(void *datum, List *list)
Definition: list.c:495
void * palloc(Size size)
Definition: mcxt.c:1317
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3616
AggStrategy
Definition: nodes.h:359
@ AGG_MIXED
Definition: nodes.h:363
#define makeNode(_type_)
Definition: nodes.h:161
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3323
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
static int scale
Definition: pgbench.c:182
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2772
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2306
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:4122
Cardinality numGroups
Definition: pathnodes.h:2324
List * pathkeys
Definition: pathnodes.h:1716
Cardinality numGroups
Definition: pathnodes.h:2335
List * groupClause
Definition: pathnodes.h:2332
List * gsets_data
Definition: pathnodes.h:2334
bool hashable
Definition: pathnodes.h:2336
List * gsets
Definition: pathnodes.h:2333
bool is_hashed
Definition: pathnodes.h:2337
int * tleref_to_colnum_map
Definition: planner.c:107
List * unsortable_sets
Definition: planner.c:106
double dNumHashGroups
Definition: planner.c:102

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert(), bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, root, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3868 of file planner.c.

3870{
3871 Query *parse = root->parse;
3872 int nrows;
3873 Path *path;
3874
3875 nrows = list_length(parse->groupingSets);
3876 if (nrows > 1)
3877 {
3878 /*
3879 * Doesn't seem worthwhile writing code to cons up a generate_series
3880 * or a values scan to emit multiple rows. Instead just make N clones
3881 * and append them. (With a volatile HAVING clause, this means you
3882 * might get between 0 and N output rows. Offhand I think that's
3883 * desired.)
3884 */
3885 List *paths = NIL;
3886
3887 while (--nrows >= 0)
3888 {
3889 path = (Path *)
3890 create_group_result_path(root, grouped_rel,
3891 grouped_rel->reltarget,
3892 (List *) parse->havingQual);
3893 paths = lappend(paths, path);
3894 }
3895 path = (Path *)
3897 grouped_rel,
3898 paths,
3899 NIL,
3900 NIL,
3901 NULL,
3902 0,
3903 false,
3904 -1);
3905 }
3906 else
3907 {
3908 /* No grouping sets, or just one, so one output row */
3909 path = (Path *)
3910 create_group_result_path(root, grouped_rel,
3911 grouped_rel->reltarget,
3912 (List *) parse->havingQual);
3913 }
3914
3915 add_path(grouped_rel, path);
3916}
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1586

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4691 of file planner.c.

4693{
4694 RelOptInfo *distinct_rel;
4695
4696 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4697 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4698
4699 /*
4700 * We don't compute anything at this level, so distinct_rel will be
4701 * parallel-safe if the input rel is parallel-safe. In particular, if
4702 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4703 * output those expressions, and will not be parallel-safe unless those
4704 * expressions are parallel-safe.
4705 */
4706 distinct_rel->consider_parallel = input_rel->consider_parallel;
4707
4708 /*
4709 * If the input rel belongs to a single FDW, so does the distinct_rel.
4710 */
4711 distinct_rel->serverid = input_rel->serverid;
4712 distinct_rel->userid = input_rel->userid;
4713 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4714 distinct_rel->fdwroutine = input_rel->fdwroutine;
4715
4716 /* build distinct paths based on input_rel's pathlist */
4717 create_final_distinct_paths(root, input_rel, distinct_rel);
4718
4719 /* now build distinct paths based on input_rel's partial_pathlist */
4720 create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4721
4722 /* Give a helpful error if we failed to create any paths */
4723 if (distinct_rel->pathlist == NIL)
4724 ereport(ERROR,
4725 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4726 errmsg("could not implement DISTINCT"),
4727 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4728
4729 /*
4730 * If there is an FDW that's responsible for all baserels of the query,
4731 * let it consider adding ForeignPaths.
4732 */
4733 if (distinct_rel->fdwroutine &&
4734 distinct_rel->fdwroutine->GetForeignUpperPaths)
4735 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4737 input_rel,
4738 distinct_rel,
4739 NULL);
4740
4741 /* Let extensions possibly add some more paths */
4743 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4744 distinct_rel, NULL);
4745
4746 /* Now choose the best path(s) */
4747 set_cheapest(distinct_rel);
4748
4749 return distinct_rel;
4750}
int errdetail(const char *fmt,...)
Definition: elog.c:1204
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4944
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4761
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:76
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
bool useridiscurrent
Definition: pathnodes.h:995
Oid userid
Definition: pathnodes.h:993
Oid serverid
Definition: pathnodes.h:991

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 4944 of file planner.c.

4946{
4947 Query *parse = root->parse;
4948 Path *cheapest_input_path = input_rel->cheapest_total_path;
4949 double numDistinctRows;
4950 bool allow_hash;
4951
4952 /* Estimate number of distinct rows there will be */
4953 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4954 root->hasHavingQual)
4955 {
4956 /*
4957 * If there was grouping or aggregation, use the number of input rows
4958 * as the estimated number of DISTINCT rows (ie, assume the input is
4959 * already mostly unique).
4960 */
4961 numDistinctRows = cheapest_input_path->rows;
4962 }
4963 else
4964 {
4965 /*
4966 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4967 */
4968 List *distinctExprs;
4969
4970 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4971 parse->targetList);
4972 numDistinctRows = estimate_num_groups(root, distinctExprs,
4973 cheapest_input_path->rows,
4974 NULL, NULL);
4975 }
4976
4977 /*
4978 * Consider sort-based implementations of DISTINCT, if possible.
4979 */
4980 if (grouping_is_sortable(root->processed_distinctClause))
4981 {
4982 /*
4983 * Firstly, if we have any adequately-presorted paths, just stick a
4984 * Unique node on those. We also, consider doing an explicit sort of
4985 * the cheapest input path and Unique'ing that. If any paths have
4986 * presorted keys then we'll create an incremental sort atop of those
4987 * before adding a unique node on the top. We'll also attempt to
4988 * reorder the required pathkeys to match the input path's pathkeys as
4989 * much as possible, in hopes of avoiding a possible need to re-sort.
4990 *
4991 * When we have DISTINCT ON, we must sort by the more rigorous of
4992 * DISTINCT and ORDER BY, else it won't have the desired behavior.
4993 * Also, if we do have to do an explicit sort, we might as well use
4994 * the more rigorous ordering to avoid a second sort later. (Note
4995 * that the parser will have ensured that one clause is a prefix of
4996 * the other.)
4997 */
4998 List *needed_pathkeys;
4999 ListCell *lc;
5000 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5001
5002 if (parse->hasDistinctOn &&
5003 list_length(root->distinct_pathkeys) <
5004 list_length(root->sort_pathkeys))
5005 needed_pathkeys = root->sort_pathkeys;
5006 else
5007 needed_pathkeys = root->distinct_pathkeys;
5008
5009 foreach(lc, input_rel->pathlist)
5010 {
5011 Path *input_path = (Path *) lfirst(lc);
5012 Path *sorted_path;
5013 List *useful_pathkeys_list = NIL;
5014
5015 useful_pathkeys_list =
5017 needed_pathkeys,
5018 input_path->pathkeys);
5019 Assert(list_length(useful_pathkeys_list) > 0);
5020
5021 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5022 {
5023 sorted_path = make_ordered_path(root,
5024 distinct_rel,
5025 input_path,
5026 cheapest_input_path,
5027 useful_pathkeys,
5028 limittuples);
5029
5030 if (sorted_path == NULL)
5031 continue;
5032
5033 /*
5034 * distinct_pathkeys may have become empty if all of the
5035 * pathkeys were determined to be redundant. If all of the
5036 * pathkeys are redundant then each DISTINCT target must only
5037 * allow a single value, therefore all resulting tuples must
5038 * be identical (or at least indistinguishable by an equality
5039 * check). We can uniquify these tuples simply by just taking
5040 * the first tuple. All we do here is add a path to do "LIMIT
5041 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5042 * still have a non-NIL sort_pathkeys list, so we must still
5043 * only do this with paths which are correctly sorted by
5044 * sort_pathkeys.
5045 */
5046 if (root->distinct_pathkeys == NIL)
5047 {
5048 Node *limitCount;
5049
5050 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5051 sizeof(int64),
5052 Int64GetDatum(1), false,
5054
5055 /*
5056 * If the query already has a LIMIT clause, then we could
5057 * end up with a duplicate LimitPath in the final plan.
5058 * That does not seem worth troubling over too much.
5059 */
5060 add_path(distinct_rel, (Path *)
5061 create_limit_path(root, distinct_rel, sorted_path,
5062 NULL, limitCount,
5063 LIMIT_OPTION_COUNT, 0, 1));
5064 }
5065 else
5066 {
5067 add_path(distinct_rel, (Path *)
5068 create_upper_unique_path(root, distinct_rel,
5069 sorted_path,
5070 list_length(root->distinct_pathkeys),
5071 numDistinctRows));
5072 }
5073 }
5074 }
5075 }
5076
5077 /*
5078 * Consider hash-based implementations of DISTINCT, if possible.
5079 *
5080 * If we were not able to make any other types of path, we *must* hash or
5081 * die trying. If we do have other choices, there are two things that
5082 * should prevent selection of hashing: if the query uses DISTINCT ON
5083 * (because it won't really have the expected behavior if we hash), or if
5084 * enable_hashagg is off.
5085 *
5086 * Note: grouping_is_hashable() is much more expensive to check than the
5087 * other gating conditions, so we want to do it last.
5088 */
5089 if (distinct_rel->pathlist == NIL)
5090 allow_hash = true; /* we have no alternatives */
5091 else if (parse->hasDistinctOn || !enable_hashagg)
5092 allow_hash = false; /* policy-based decision not to hash */
5093 else
5094 allow_hash = true; /* default */
5095
5096 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5097 {
5098 /* Generate hashed aggregate path --- no sort needed */
5099 add_path(distinct_rel, (Path *)
5101 distinct_rel,
5102 cheapest_input_path,
5103 cheapest_input_path->pathtarget,
5104 AGG_HASHED,
5106 root->processed_distinctClause,
5107 NIL,
5108 NULL,
5109 numDistinctRows));
5110 }
5111
5112 return distinct_rel;
5113}
int64_t int64
Definition: c.h:499
#define FLOAT8PASSBYVAL
Definition: c.h:606
bool enable_hashagg
Definition: costsize.c:152
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:350
@ LIMIT_OPTION_COUNT
Definition: nodes.h:437
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3979
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3187
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5124
#define InvalidOid
Definition: postgres_ext.h:35
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3446
Cardinality rows
Definition: pathnodes.h:1710
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, create_agg_path(), create_limit_path(), create_upper_unique_path(), enable_hashagg, estimate_num_groups(), FLOAT8PASSBYVAL, foreach_node, get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), make_ordered_path(), makeConst(), NIL, parse(), Path::pathkeys, RelOptInfo::pathlist, root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3681 of file planner.c.

3686{
3687 Query *parse = root->parse;
3688 RelOptInfo *grouped_rel;
3689 RelOptInfo *partially_grouped_rel;
3690 AggClauseCosts agg_costs;
3691
3692 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3694
3695 /*
3696 * Create grouping relation to hold fully aggregated grouping and/or
3697 * aggregation paths.
3698 */
3699 grouped_rel = make_grouping_rel(root, input_rel, target,
3700 target_parallel_safe, parse->havingQual);
3701
3702 /*
3703 * Create either paths for a degenerate grouping or paths for ordinary
3704 * grouping, as appropriate.
3705 */
3707 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3708 else
3709 {
3710 int flags = 0;
3711 GroupPathExtraData extra;
3712
3713 /*
3714 * Determine whether it's possible to perform sort-based
3715 * implementations of grouping. (Note that if processed_groupClause
3716 * is empty, grouping_is_sortable() is trivially true, and all the
3717 * pathkeys_contained_in() tests will succeed too, so that we'll
3718 * consider every surviving input path.)
3719 *
3720 * If we have grouping sets, we might be able to sort some but not all
3721 * of them; in this case, we need can_sort to be true as long as we
3722 * must consider any sorted-input plan.
3723 */
3724 if ((gd && gd->rollups != NIL)
3725 || grouping_is_sortable(root->processed_groupClause))
3726 flags |= GROUPING_CAN_USE_SORT;
3727
3728 /*
3729 * Determine whether we should consider hash-based implementations of
3730 * grouping.
3731 *
3732 * Hashed aggregation only applies if we're grouping. If we have
3733 * grouping sets, some groups might be hashable but others not; in
3734 * this case we set can_hash true as long as there is nothing globally
3735 * preventing us from hashing (and we should therefore consider plans
3736 * with hashes).
3737 *
3738 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3739 * BY aggregates. (Doing so would imply storing *all* the input
3740 * values in the hash table, and/or running many sorts in parallel,
3741 * either of which seems like a certain loser.) We similarly don't
3742 * support ordered-set aggregates in hashed aggregation, but that case
3743 * is also included in the numOrderedAggs count.
3744 *
3745 * Note: grouping_is_hashable() is much more expensive to check than
3746 * the other gating conditions, so we want to do it last.
3747 */
3748 if ((parse->groupClause != NIL &&
3749 root->numOrderedAggs == 0 &&
3750 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3751 flags |= GROUPING_CAN_USE_HASH;
3752
3753 /*
3754 * Determine whether partial aggregation is possible.
3755 */
3756 if (can_partial_agg(root))
3757 flags |= GROUPING_CAN_PARTIAL_AGG;
3758
3759 extra.flags = flags;
3760 extra.target_parallel_safe = target_parallel_safe;
3761 extra.havingQual = parse->havingQual;
3762 extra.targetList = parse->targetList;
3763 extra.partial_costs_set = false;
3764
3765 /*
3766 * Determine whether partitionwise aggregation is in theory possible.
3767 * It can be disabled by the user, and for now, we don't try to
3768 * support grouping sets. create_ordinary_grouping_paths() will check
3769 * additional conditions, such as whether input_rel is partitioned.
3770 */
3771 if (enable_partitionwise_aggregate && !parse->groupingSets)
3773 else
3775
3776 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3777 &agg_costs, gd, &extra,
3778 &partially_grouped_rel);
3779 }
3780
3781 set_cheapest(grouped_rel);
3782 return grouped_rel;
3783}
#define MemSet(start, val, len)
Definition: c.h:991
bool enable_partitionwise_aggregate
Definition: costsize.c:160
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3321
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3320
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3305
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3868
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3847
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3932
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7653
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3794
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
PartitionwiseAggregateType patype
Definition: pathnodes.h:3350

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4521 of file planner.c.

4528{
4529 PathTarget *window_target;
4530 ListCell *l;
4531 List *topqual = NIL;
4532
4533 /*
4534 * Since each window clause could require a different sort order, we stack
4535 * up a WindowAgg node for each clause, with sort steps between them as
4536 * needed. (We assume that select_active_windows chose a good order for
4537 * executing the clauses in.)
4538 *
4539 * input_target should contain all Vars and Aggs needed for the result.
4540 * (In some cases we wouldn't need to propagate all of these all the way
4541 * to the top, since they might only be needed as inputs to WindowFuncs.
4542 * It's probably not worth trying to optimize that though.) It must also
4543 * contain all window partitioning and sorting expressions, to ensure
4544 * they're computed only once at the bottom of the stack (that's critical
4545 * for volatile functions). As we climb up the stack, we'll add outputs
4546 * for the WindowFuncs computed at each level.
4547 */
4548 window_target = input_target;
4549
4550 foreach(l, activeWindows)
4551 {
4553 List *window_pathkeys;
4554 List *runcondition = NIL;
4555 int presorted_keys;
4556 bool is_sorted;
4557 bool topwindow;
4558 ListCell *lc2;
4559
4560 window_pathkeys = make_pathkeys_for_window(root,
4561 wc,
4562 root->processed_tlist);
4563
4564 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4565 path->pathkeys,
4566 &presorted_keys);
4567
4568 /* Sort if necessary */
4569 if (!is_sorted)
4570 {
4571 /*
4572 * No presorted keys or incremental sort disabled, just perform a
4573 * complete sort.
4574 */
4575 if (presorted_keys == 0 || !enable_incremental_sort)
4576 path = (Path *) create_sort_path(root, window_rel,
4577 path,
4578 window_pathkeys,
4579 -1.0);
4580 else
4581 {
4582 /*
4583 * Since we have presorted keys and incremental sort is
4584 * enabled, just use incremental sort.
4585 */
4587 window_rel,
4588 path,
4589 window_pathkeys,
4590 presorted_keys,
4591 -1.0);
4592 }
4593 }
4594
4595 if (lnext(activeWindows, l))
4596 {
4597 /*
4598 * Add the current WindowFuncs to the output target for this
4599 * intermediate WindowAggPath. We must copy window_target to
4600 * avoid changing the previous path's target.
4601 *
4602 * Note: a WindowFunc adds nothing to the target's eval costs; but
4603 * we do need to account for the increase in tlist width.
4604 */
4605 int64 tuple_width = window_target->width;
4606
4607 window_target = copy_pathtarget(window_target);
4608 foreach(lc2, wflists->windowFuncs[wc->winref])
4609 {
4610 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4611
4612 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4613 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4614 }
4615 window_target->width = clamp_width_est(tuple_width);
4616 }
4617 else
4618 {
4619 /* Install the goal target in the topmost WindowAgg */
4620 window_target = output_target;
4621 }
4622
4623 /* mark the final item in the list as the top-level window */
4624 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4625
4626 /*
4627 * Collect the WindowFuncRunConditions from each WindowFunc and
4628 * convert them into OpExprs
4629 */
4630 foreach(lc2, wflists->windowFuncs[wc->winref])
4631 {
4632 ListCell *lc3;
4633 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4634
4635 foreach(lc3, wfunc->runCondition)
4636 {
4637 WindowFuncRunCondition *wfuncrc =
4639 Expr *opexpr;
4640 Expr *leftop;
4641 Expr *rightop;
4642
4643 if (wfuncrc->wfunc_left)
4644 {
4645 leftop = (Expr *) copyObject(wfunc);
4646 rightop = copyObject(wfuncrc->arg);
4647 }
4648 else
4649 {
4650 leftop = copyObject(wfuncrc->arg);
4651 rightop = (Expr *) copyObject(wfunc);
4652 }
4653
4654 opexpr = make_opclause(wfuncrc->opno,
4655 BOOLOID,
4656 false,
4657 leftop,
4658 rightop,
4659 InvalidOid,
4660 wfuncrc->inputcollid);
4661
4662 runcondition = lappend(runcondition, opexpr);
4663
4664 if (!topwindow)
4665 topqual = lappend(topqual, opexpr);
4666 }
4667 }
4668
4669 path = (Path *)
4670 create_windowagg_path(root, window_rel, path, window_target,
4671 wflists->windowFuncs[wc->winref],
4672 runcondition, wc,
4673 topwindow ? topqual : NIL, topwindow);
4674 }
4675
4676 add_path(window_rel, path);
4677}
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_incremental_sort
Definition: costsize.c:151
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2718
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:701
#define copyObject(obj)
Definition: nodes.h:230
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3577
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6179
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), WindowFuncRunCondition::arg, clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, WindowFuncRunCondition::opno, Path::pathkeys, pathkeys_count_contained_in(), root, WindowFuncRunCondition::wfunc_left, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5209 of file planner.c.

5214{
5215 Path *cheapest_input_path = input_rel->cheapest_total_path;
5216 RelOptInfo *ordered_rel;
5217 ListCell *lc;
5218
5219 /* For now, do all work in the (ORDERED, NULL) upperrel */
5220 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5221
5222 /*
5223 * If the input relation is not parallel-safe, then the ordered relation
5224 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5225 * target list is parallel-safe.
5226 */
5227 if (input_rel->consider_parallel && target_parallel_safe)
5228 ordered_rel->consider_parallel = true;
5229
5230 /*
5231 * If the input rel belongs to a single FDW, so does the ordered_rel.
5232 */
5233 ordered_rel->serverid = input_rel->serverid;
5234 ordered_rel->userid = input_rel->userid;
5235 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5236 ordered_rel->fdwroutine = input_rel->fdwroutine;
5237
5238 foreach(lc, input_rel->pathlist)
5239 {
5240 Path *input_path = (Path *) lfirst(lc);
5241 Path *sorted_path;
5242 bool is_sorted;
5243 int presorted_keys;
5244
5245 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5246 input_path->pathkeys, &presorted_keys);
5247
5248 if (is_sorted)
5249 sorted_path = input_path;
5250 else
5251 {
5252 /*
5253 * Try at least sorting the cheapest path and also try
5254 * incrementally sorting any path which is partially sorted
5255 * already (no need to deal with paths which have presorted keys
5256 * when incremental sort is disabled unless it's the cheapest
5257 * input path).
5258 */
5259 if (input_path != cheapest_input_path &&
5260 (presorted_keys == 0 || !enable_incremental_sort))
5261 continue;
5262
5263 /*
5264 * We've no need to consider both a sort and incremental sort.
5265 * We'll just do a sort if there are no presorted keys and an
5266 * incremental sort when there are presorted keys.
5267 */
5268 if (presorted_keys == 0 || !enable_incremental_sort)
5269 sorted_path = (Path *) create_sort_path(root,
5270 ordered_rel,
5271 input_path,
5272 root->sort_pathkeys,
5273 limit_tuples);
5274 else
5275 sorted_path = (Path *) create_incremental_sort_path(root,
5276 ordered_rel,
5277 input_path,
5278 root->sort_pathkeys,
5279 presorted_keys,
5280 limit_tuples);
5281 }
5282
5283 /*
5284 * If the pathtarget of the result path has different expressions from
5285 * the target to be applied, a projection step is needed.
5286 */
5287 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5288 sorted_path = apply_projection_to_path(root, ordered_rel,
5289 sorted_path, target);
5290
5291 add_path(ordered_rel, sorted_path);
5292 }
5293
5294 /*
5295 * generate_gather_paths() will have already generated a simple Gather
5296 * path for the best parallel path, if any, and the loop above will have
5297 * considered sorting it. Similarly, generate_gather_paths() will also
5298 * have generated order-preserving Gather Merge plans which can be used
5299 * without sorting if they happen to match the sort_pathkeys, and the loop
5300 * above will have handled those as well. However, there's one more
5301 * possibility: it may make sense to sort the cheapest partial path or
5302 * incrementally sort any partial path that is partially sorted according
5303 * to the required output order and then use Gather Merge.
5304 */
5305 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5306 input_rel->partial_pathlist != NIL)
5307 {
5308 Path *cheapest_partial_path;
5309
5310 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5311
5312 foreach(lc, input_rel->partial_pathlist)
5313 {
5314 Path *input_path = (Path *) lfirst(lc);
5315 Path *sorted_path;
5316 bool is_sorted;
5317 int presorted_keys;
5318 double total_groups;
5319
5320 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5321 input_path->pathkeys,
5322 &presorted_keys);
5323
5324 if (is_sorted)
5325 continue;
5326
5327 /*
5328 * Try at least sorting the cheapest path and also try
5329 * incrementally sorting any path which is partially sorted
5330 * already (no need to deal with paths which have presorted keys
5331 * when incremental sort is disabled unless it's the cheapest
5332 * partial path).
5333 */
5334 if (input_path != cheapest_partial_path &&
5335 (presorted_keys == 0 || !enable_incremental_sort))
5336 continue;
5337
5338 /*
5339 * We've no need to consider both a sort and incremental sort.
5340 * We'll just do a sort if there are no presorted keys and an
5341 * incremental sort when there are presorted keys.
5342 */
5343 if (presorted_keys == 0 || !enable_incremental_sort)
5344 sorted_path = (Path *) create_sort_path(root,
5345 ordered_rel,
5346 input_path,
5347 root->sort_pathkeys,
5348 limit_tuples);
5349 else
5350 sorted_path = (Path *) create_incremental_sort_path(root,
5351 ordered_rel,
5352 input_path,
5353 root->sort_pathkeys,
5354 presorted_keys,
5355 limit_tuples);
5356 total_groups = compute_gather_rows(sorted_path);
5357 sorted_path = (Path *)
5358 create_gather_merge_path(root, ordered_rel,
5359 sorted_path,
5360 sorted_path->pathtarget,
5361 root->sort_pathkeys, NULL,
5362 &total_groups);
5363
5364 /*
5365 * If the pathtarget of the result path has different expressions
5366 * from the target to be applied, a projection step is needed.
5367 */
5368 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5369 sorted_path = apply_projection_to_path(root, ordered_rel,
5370 sorted_path, target);
5371
5372 add_path(ordered_rel, sorted_path);
5373 }
5374 }
5375
5376 /*
5377 * If there is an FDW that's responsible for all baserels of the query,
5378 * let it consider adding ForeignPaths.
5379 */
5380 if (ordered_rel->fdwroutine &&
5381 ordered_rel->fdwroutine->GetForeignUpperPaths)
5382 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5383 input_rel, ordered_rel,
5384 NULL);
5385
5386 /* Let extensions possibly add some more paths */
5388 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5389 input_rel, ordered_rel, NULL);
5390
5391 /*
5392 * No need to bother with set_cheapest here; grouping_planner does not
5393 * need us to do it.
5394 */
5395 Assert(ordered_rel->pathlist != NIL);
5396
5397 return ordered_rel;
5398}
double compute_gather_rows(Path *path)
Definition: costsize.c:6610
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
@ UPPERREL_ORDERED
Definition: pathnodes.h:78

References add_path(), apply_projection_to_path(), Assert(), RelOptInfo::cheapest_total_path, compute_gather_rows(), RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, equal(), PathTarget::exprs, fetch_upper_rel(), lfirst, linitial, NIL, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3932 of file planner.c.

3938{
3939 Path *cheapest_path = input_rel->cheapest_total_path;
3940 RelOptInfo *partially_grouped_rel = NULL;
3941 double dNumGroups;
3943
3944 /*
3945 * If this is the topmost grouping relation or if the parent relation is
3946 * doing some form of partitionwise aggregation, then we may be able to do
3947 * it at this level also. However, if the input relation is not
3948 * partitioned, partitionwise aggregate is impossible.
3949 */
3950 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3951 IS_PARTITIONED_REL(input_rel))
3952 {
3953 /*
3954 * If this is the topmost relation or if the parent relation is doing
3955 * full partitionwise aggregation, then we can do full partitionwise
3956 * aggregation provided that the GROUP BY clause contains all of the
3957 * partitioning columns at this level and the collation used by GROUP
3958 * BY matches the partitioning collation. Otherwise, we can do at
3959 * most partial partitionwise aggregation. But if partial aggregation
3960 * is not supported in general then we can't use it for partitionwise
3961 * aggregation either.
3962 *
3963 * Check parse->groupClause not processed_groupClause, because it's
3964 * okay if some of the partitioning columns were proved redundant.
3965 */
3966 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3967 group_by_has_partkey(input_rel, extra->targetList,
3968 root->parse->groupClause))
3970 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3972 else
3974 }
3975
3976 /*
3977 * Before generating paths for grouped_rel, we first generate any possible
3978 * partially grouped paths; that way, later code can easily consider both
3979 * parallel and non-parallel approaches to grouping.
3980 */
3981 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3982 {
3983 bool force_rel_creation;
3984
3985 /*
3986 * If we're doing partitionwise aggregation at this level, force
3987 * creation of a partially_grouped_rel so we can add partitionwise
3988 * paths to it.
3989 */
3990 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3991
3992 partially_grouped_rel =
3994 grouped_rel,
3995 input_rel,
3996 gd,
3997 extra,
3998 force_rel_creation);
3999 }
4000
4001 /* Set out parameter. */
4002 *partially_grouped_rel_p = partially_grouped_rel;
4003
4004 /* Apply partitionwise aggregation technique, if possible. */
4005 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4006 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4007 partially_grouped_rel, agg_costs,
4008 gd, patype, extra);
4009
4010 /* If we are doing partial aggregation only, return. */
4012 {
4013 Assert(partially_grouped_rel);
4014
4015 if (partially_grouped_rel->pathlist)
4016 set_cheapest(partially_grouped_rel);
4017
4018 return;
4019 }
4020
4021 /* Gather any partially grouped partial paths. */
4022 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4023 {
4024 gather_grouping_paths(root, partially_grouped_rel);
4025 set_cheapest(partially_grouped_rel);
4026 }
4027
4028 /*
4029 * Estimate number of groups.
4030 */
4031 dNumGroups = get_number_of_groups(root,
4032 cheapest_path->rows,
4033 gd,
4034 extra->targetList);
4035
4036 /* Build final grouping paths */
4037 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4038 partially_grouped_rel, agg_costs, gd,
4039 dNumGroups, extra);
4040
4041 /* Give a helpful error if we failed to find any implementation */
4042 if (grouped_rel->pathlist == NIL)
4043 ereport(ERROR,
4044 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4045 errmsg("could not implement GROUP BY"),
4046 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4047
4048 /*
4049 * If there is an FDW that's responsible for all baserels of the query,
4050 * let it consider adding ForeignPaths.
4051 */
4052 if (grouped_rel->fdwroutine &&
4053 grouped_rel->fdwroutine->GetForeignUpperPaths)
4054 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4055 input_rel, grouped_rel,
4056 extra);
4057
4058 /* Let extensions possibly add some more paths */
4060 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4061 input_rel, grouped_rel,
4062 extra);
4063}
PartitionwiseAggregateType
Definition: pathnodes.h:3319
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3322
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7217
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6980
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3559
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7930
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8074

References add_paths_to_grouping_rel(), Assert(), RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4761 of file planner.c.

4764{
4765 RelOptInfo *partial_distinct_rel;
4766 Query *parse;
4767 List *distinctExprs;
4768 double numDistinctRows;
4769 Path *cheapest_partial_path;
4770 ListCell *lc;
4771
4772 /* nothing to do when there are no partial paths in the input rel */
4773 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4774 return;
4775
4776 parse = root->parse;
4777
4778 /* can't do parallel DISTINCT ON */
4779 if (parse->hasDistinctOn)
4780 return;
4781
4782 partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4783 NULL);
4784 partial_distinct_rel->reltarget = target;
4785 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4786
4787 /*
4788 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4789 */
4790 partial_distinct_rel->serverid = input_rel->serverid;
4791 partial_distinct_rel->userid = input_rel->userid;
4792 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4793 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4794
4795 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4796
4797 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4798 parse->targetList);
4799
4800 /* estimate how many distinct rows we'll get from each worker */
4801 numDistinctRows = estimate_num_groups(root, distinctExprs,
4802 cheapest_partial_path->rows,
4803 NULL, NULL);
4804
4805 /*
4806 * Try sorting the cheapest path and incrementally sorting any paths with
4807 * presorted keys and put a unique paths atop of those. We'll also
4808 * attempt to reorder the required pathkeys to match the input path's
4809 * pathkeys as much as possible, in hopes of avoiding a possible need to
4810 * re-sort.
4811 */
4812 if (grouping_is_sortable(root->processed_distinctClause))
4813 {
4814 foreach(lc, input_rel->partial_pathlist)
4815 {
4816 Path *input_path = (Path *) lfirst(lc);
4817 Path *sorted_path;
4818 List *useful_pathkeys_list = NIL;
4819
4820 useful_pathkeys_list =
4822 root->distinct_pathkeys,
4823 input_path->pathkeys);
4824 Assert(list_length(useful_pathkeys_list) > 0);
4825
4826 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4827 {
4828 sorted_path = make_ordered_path(root,
4829 partial_distinct_rel,
4830 input_path,
4831 cheapest_partial_path,
4832 useful_pathkeys,
4833 -1.0);
4834
4835 if (sorted_path == NULL)
4836 continue;
4837
4838 /*
4839 * An empty distinct_pathkeys means all tuples have the same
4840 * value for the DISTINCT clause. See
4841 * create_final_distinct_paths()
4842 */
4843 if (root->distinct_pathkeys == NIL)
4844 {
4845 Node *limitCount;
4846
4847 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4848 sizeof(int64),
4849 Int64GetDatum(1), false,
4851
4852 /*
4853 * Apply a LimitPath onto the partial path to restrict the
4854 * tuples from each worker to 1.
4855 * create_final_distinct_paths will need to apply an
4856 * additional LimitPath to restrict this to a single row
4857 * after the Gather node. If the query already has a
4858 * LIMIT clause, then we could end up with three Limit
4859 * nodes in the final plan. Consolidating the top two of
4860 * these could be done, but does not seem worth troubling
4861 * over.
4862 */
4863 add_partial_path(partial_distinct_rel, (Path *)
4864 create_limit_path(root, partial_distinct_rel,
4865 sorted_path,
4866 NULL,
4867 limitCount,
4869 0, 1));
4870 }
4871 else
4872 {
4873 add_partial_path(partial_distinct_rel, (Path *)
4874 create_upper_unique_path(root, partial_distinct_rel,
4875 sorted_path,
4876 list_length(root->distinct_pathkeys),
4877 numDistinctRows));
4878 }
4879 }
4880 }
4881 }
4882
4883 /*
4884 * Now try hash aggregate paths, if enabled and hashing is possible. Since
4885 * we're not on the hook to ensure we do our best to create at least one
4886 * path here, we treat enable_hashagg as a hard off-switch rather than the
4887 * slightly softer variant in create_final_distinct_paths.
4888 */
4889 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4890 {
4891 add_partial_path(partial_distinct_rel, (Path *)
4893 partial_distinct_rel,
4894 cheapest_partial_path,
4895 cheapest_partial_path->pathtarget,
4896 AGG_HASHED,
4898 root->processed_distinctClause,
4899 NIL,
4900 NULL,
4901 numDistinctRows));
4902 }
4903
4904 /*
4905 * If there is an FDW that's responsible for all baserels of the query,
4906 * let it consider adding ForeignPaths.
4907 */
4908 if (partial_distinct_rel->fdwroutine &&
4909 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4910 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4912 input_rel,
4913 partial_distinct_rel,
4914 NULL);
4915
4916 /* Let extensions possibly add some more partial paths */
4918 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4919 input_rel, partial_distinct_rel, NULL);
4920
4921 if (partial_distinct_rel->partial_pathlist != NIL)
4922 {
4923 generate_useful_gather_paths(root, partial_distinct_rel, true);
4924 set_cheapest(partial_distinct_rel);
4925
4926 /*
4927 * Finally, create paths to distinctify the final result. This step
4928 * is needed to remove any duplicates due to combining rows from
4929 * parallel workers.
4930 */
4931 create_final_distinct_paths(root, partial_distinct_rel,
4932 final_distinct_rel);
4933 }
4934}
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_limit_path(), create_upper_paths_hook, create_upper_unique_path(), enable_hashagg, estimate_num_groups(), fetch_upper_rel(), FLOAT8PASSBYVAL, foreach_node, generate_useful_gather_paths(), get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), make_ordered_path(), makeConst(), NIL, parse(), RelOptInfo::partial_pathlist, Path::pathkeys, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7217 of file planner.c.

7223{
7224 Query *parse = root->parse;
7225 RelOptInfo *partially_grouped_rel;
7226 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7227 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7228 Path *cheapest_partial_path = NULL;
7229 Path *cheapest_total_path = NULL;
7230 double dNumPartialGroups = 0;
7231 double dNumPartialPartialGroups = 0;
7232 ListCell *lc;
7233 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7234 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7235
7236 /*
7237 * Consider whether we should generate partially aggregated non-partial
7238 * paths. We can only do this if we have a non-partial path, and only if
7239 * the parent of the input rel is performing partial partitionwise
7240 * aggregation. (Note that extra->patype is the type of partitionwise
7241 * aggregation being used at the parent level, not this level.)
7242 */
7243 if (input_rel->pathlist != NIL &&
7245 cheapest_total_path = input_rel->cheapest_total_path;
7246
7247 /*
7248 * If parallelism is possible for grouped_rel, then we should consider
7249 * generating partially-grouped partial paths. However, if the input rel
7250 * has no partial paths, then we can't.
7251 */
7252 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7253 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7254
7255 /*
7256 * If we can't partially aggregate partial paths, and we can't partially
7257 * aggregate non-partial paths, then don't bother creating the new
7258 * RelOptInfo at all, unless the caller specified force_rel_creation.
7259 */
7260 if (cheapest_total_path == NULL &&
7261 cheapest_partial_path == NULL &&
7262 !force_rel_creation)
7263 return NULL;
7264
7265 /*
7266 * Build a new upper relation to represent the result of partially
7267 * aggregating the rows from the input relation.
7268 */
7269 partially_grouped_rel = fetch_upper_rel(root,
7271 grouped_rel->relids);
7272 partially_grouped_rel->consider_parallel =
7273 grouped_rel->consider_parallel;
7274 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7275 partially_grouped_rel->serverid = grouped_rel->serverid;
7276 partially_grouped_rel->userid = grouped_rel->userid;
7277 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7278 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7279
7280 /*
7281 * Build target list for partial aggregate paths. These paths cannot just
7282 * emit the same tlist as regular aggregate paths, because (1) we must
7283 * include Vars and Aggrefs needed in HAVING, which might not appear in
7284 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7285 */
7286 partially_grouped_rel->reltarget =
7288 extra->havingQual);
7289
7290 if (!extra->partial_costs_set)
7291 {
7292 /*
7293 * Collect statistics about aggregates for estimating costs of
7294 * performing aggregation in parallel.
7295 */
7296 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7297 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7298 if (parse->hasAggs)
7299 {
7300 /* partial phase */
7302 agg_partial_costs);
7303
7304 /* final phase */
7306 agg_final_costs);
7307 }
7308
7309 extra->partial_costs_set = true;
7310 }
7311
7312 /* Estimate number of partial groups. */
7313 if (cheapest_total_path != NULL)
7314 dNumPartialGroups =
7316 cheapest_total_path->rows,
7317 gd,
7318 extra->targetList);
7319 if (cheapest_partial_path != NULL)
7320 dNumPartialPartialGroups =
7322 cheapest_partial_path->rows,
7323 gd,
7324 extra->targetList);
7325
7326 if (can_sort && cheapest_total_path != NULL)
7327 {
7328 /* This should have been checked previously */
7329 Assert(parse->hasAggs || parse->groupClause);
7330
7331 /*
7332 * Use any available suitably-sorted path as input, and also consider
7333 * sorting the cheapest partial path.
7334 */
7335 foreach(lc, input_rel->pathlist)
7336 {
7337 ListCell *lc2;
7338 Path *path = (Path *) lfirst(lc);
7339 Path *path_save = path;
7340 List *pathkey_orderings = NIL;
7341
7342 /* generate alternative group orderings that might be useful */
7343 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7344
7345 Assert(list_length(pathkey_orderings) > 0);
7346
7347 /* process all potentially interesting grouping reorderings */
7348 foreach(lc2, pathkey_orderings)
7349 {
7350 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7351
7352 /* restore the path (we replace it in the loop) */
7353 path = path_save;
7354
7355 path = make_ordered_path(root,
7356 partially_grouped_rel,
7357 path,
7358 cheapest_total_path,
7359 info->pathkeys,
7360 -1.0);
7361
7362 if (path == NULL)
7363 continue;
7364
7365 if (parse->hasAggs)
7366 add_path(partially_grouped_rel, (Path *)
7368 partially_grouped_rel,
7369 path,
7370 partially_grouped_rel->reltarget,
7371 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7373 info->clauses,
7374 NIL,
7375 agg_partial_costs,
7376 dNumPartialGroups));
7377 else
7378 add_path(partially_grouped_rel, (Path *)
7380 partially_grouped_rel,
7381 path,
7382 info->clauses,
7383 NIL,
7384 dNumPartialGroups));
7385 }
7386 }
7387 }
7388
7389 if (can_sort && cheapest_partial_path != NULL)
7390 {
7391 /* Similar to above logic, but for partial paths. */
7392 foreach(lc, input_rel->partial_pathlist)
7393 {
7394 ListCell *lc2;
7395 Path *path = (Path *) lfirst(lc);
7396 Path *path_save = path;
7397 List *pathkey_orderings = NIL;
7398
7399 /* generate alternative group orderings that might be useful */
7400 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7401
7402 Assert(list_length(pathkey_orderings) > 0);
7403
7404 /* process all potentially interesting grouping reorderings */
7405 foreach(lc2, pathkey_orderings)
7406 {
7407 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7408
7409
7410 /* restore the path (we replace it in the loop) */
7411 path = path_save;
7412
7413 path = make_ordered_path(root,
7414 partially_grouped_rel,
7415 path,
7416 cheapest_partial_path,
7417 info->pathkeys,
7418 -1.0);
7419
7420 if (path == NULL)
7421 continue;
7422
7423 if (parse->hasAggs)
7424 add_partial_path(partially_grouped_rel, (Path *)
7426 partially_grouped_rel,
7427 path,
7428 partially_grouped_rel->reltarget,
7429 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7431 info->clauses,
7432 NIL,
7433 agg_partial_costs,
7434 dNumPartialPartialGroups));
7435 else
7436 add_partial_path(partially_grouped_rel, (Path *)
7438 partially_grouped_rel,
7439 path,
7440 info->clauses,
7441 NIL,
7442 dNumPartialPartialGroups));
7443 }
7444 }
7445 }
7446
7447 /*
7448 * Add a partially-grouped HashAgg Path where possible
7449 */
7450 if (can_hash && cheapest_total_path != NULL)
7451 {
7452 /* Checked above */
7453 Assert(parse->hasAggs || parse->groupClause);
7454
7455 add_path(partially_grouped_rel, (Path *)
7457 partially_grouped_rel,
7458 cheapest_total_path,
7459 partially_grouped_rel->reltarget,
7460 AGG_HASHED,
7462 root->processed_groupClause,
7463 NIL,
7464 agg_partial_costs,
7465 dNumPartialGroups));
7466 }
7467
7468 /*
7469 * Now add a partially-grouped HashAgg partial Path where possible
7470 */
7471 if (can_hash && cheapest_partial_path != NULL)
7472 {
7473 add_partial_path(partially_grouped_rel, (Path *)
7475 partially_grouped_rel,
7476 cheapest_partial_path,
7477 partially_grouped_rel->reltarget,
7478 AGG_HASHED,
7480 root->processed_groupClause,
7481 NIL,
7482 agg_partial_costs,
7483 dNumPartialPartialGroups));
7484 }
7485
7486 /*
7487 * If there is an FDW that's responsible for all baserels of the query,
7488 * let it consider adding partially grouped ForeignPaths.
7489 */
7490 if (partially_grouped_rel->fdwroutine &&
7491 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7492 {
7493 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7494
7495 fdwroutine->GetForeignUpperPaths(root,
7497 input_rel, partially_grouped_rel,
7498 extra);
7499 }
7500
7501 return partially_grouped_rel;
7502}
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:385
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5541
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3343
RelOptKind reloptkind
Definition: pathnodes.h:892

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert(), RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7930 of file planner.c.

7938{
7939 List *grouped_live_children = NIL;
7940 List *partially_grouped_live_children = NIL;
7941 PathTarget *target = grouped_rel->reltarget;
7942 bool partial_grouping_valid = true;
7943 int i;
7944
7947 partially_grouped_rel != NULL);
7948
7949 /* Add paths for partitionwise aggregation/grouping. */
7950 i = -1;
7951 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7952 {
7953 RelOptInfo *child_input_rel = input_rel->part_rels[i];
7954 PathTarget *child_target;
7955 AppendRelInfo **appinfos;
7956 int nappinfos;
7957 GroupPathExtraData child_extra;
7958 RelOptInfo *child_grouped_rel;
7959 RelOptInfo *child_partially_grouped_rel;
7960
7961 Assert(child_input_rel != NULL);
7962
7963 /* Dummy children can be ignored. */
7964 if (IS_DUMMY_REL(child_input_rel))
7965 continue;
7966
7967 child_target = copy_pathtarget(target);
7968
7969 /*
7970 * Copy the given "extra" structure as is and then override the
7971 * members specific to this child.
7972 */
7973 memcpy(&child_extra, extra, sizeof(child_extra));
7974
7975 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7976 &nappinfos);
7977
7978 child_target->exprs = (List *)
7980 (Node *) target->exprs,
7981 nappinfos, appinfos);
7982
7983 /* Translate havingQual and targetList. */
7984 child_extra.havingQual = (Node *)
7986 extra->havingQual,
7987 nappinfos, appinfos);
7988 child_extra.targetList = (List *)
7990 (Node *) extra->targetList,
7991 nappinfos, appinfos);
7992
7993 /*
7994 * extra->patype was the value computed for our parent rel; patype is
7995 * the value for this relation. For the child, our value is its
7996 * parent rel's value.
7997 */
7998 child_extra.patype = patype;
7999
8000 /*
8001 * Create grouping relation to hold fully aggregated grouping and/or
8002 * aggregation paths for the child.
8003 */
8004 child_grouped_rel = make_grouping_rel(root, child_input_rel,
8005 child_target,
8006 extra->target_parallel_safe,
8007 child_extra.havingQual);
8008
8009 /* Create grouping paths for this child relation. */
8010 create_ordinary_grouping_paths(root, child_input_rel,
8011 child_grouped_rel,
8012 agg_costs, gd, &child_extra,
8013 &child_partially_grouped_rel);
8014
8015 if (child_partially_grouped_rel)
8016 {
8017 partially_grouped_live_children =
8018 lappend(partially_grouped_live_children,
8019 child_partially_grouped_rel);
8020 }
8021 else
8022 partial_grouping_valid = false;
8023
8024 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8025 {
8026 set_cheapest(child_grouped_rel);
8027 grouped_live_children = lappend(grouped_live_children,
8028 child_grouped_rel);
8029 }
8030
8031 pfree(appinfos);
8032 }
8033
8034 /*
8035 * Try to create append paths for partially grouped children. For full
8036 * partitionwise aggregation, we might have paths in the partial_pathlist
8037 * if parallel aggregation is possible. For partial partitionwise
8038 * aggregation, we may have paths in both pathlist and partial_pathlist.
8039 *
8040 * NB: We must have a partially grouped path for every child in order to
8041 * generate a partially grouped path for this relation.
8042 */
8043 if (partially_grouped_rel && partial_grouping_valid)
8044 {
8045 Assert(partially_grouped_live_children != NIL);
8046
8047 add_paths_to_append_rel(root, partially_grouped_rel,
8048 partially_grouped_live_children);
8049
8050 /*
8051 * We need call set_cheapest, since the finalization step will use the
8052 * cheapest path from the rel.
8053 */
8054 if (partially_grouped_rel->pathlist)
8055 set_cheapest(partially_grouped_rel);
8056 }
8057
8058 /* If possible, create append paths for fully grouped children. */
8059 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8060 {
8061 Assert(grouped_live_children != NIL);
8062
8063 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8064 }
8065}

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert(), bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4434 of file planner.c.

4441{
4442 RelOptInfo *window_rel;
4443 ListCell *lc;
4444
4445 /* For now, do all work in the (WINDOW, NULL) upperrel */
4446 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4447
4448 /*
4449 * If the input relation is not parallel-safe, then the window relation
4450 * can't be parallel-safe, either. Otherwise, we need to examine the
4451 * target list and active windows for non-parallel-safe constructs.
4452 */
4453 if (input_rel->consider_parallel && output_target_parallel_safe &&
4454 is_parallel_safe(root, (Node *) activeWindows))
4455 window_rel->consider_parallel = true;
4456
4457 /*
4458 * If the input rel belongs to a single FDW, so does the window rel.
4459 */
4460 window_rel->serverid = input_rel->serverid;
4461 window_rel->userid = input_rel->userid;
4462 window_rel->useridiscurrent = input_rel->useridiscurrent;
4463 window_rel->fdwroutine = input_rel->fdwroutine;
4464
4465 /*
4466 * Consider computing window functions starting from the existing
4467 * cheapest-total path (which will likely require a sort) as well as any
4468 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4469 */
4470 foreach(lc, input_rel->pathlist)
4471 {
4472 Path *path = (Path *) lfirst(lc);
4473 int presorted_keys;
4474
4475 if (path == input_rel->cheapest_total_path ||
4476 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4477 &presorted_keys) ||
4478 presorted_keys > 0)
4480 window_rel,
4481 path,
4482 input_target,
4483 output_target,
4484 wflists,
4485 activeWindows);
4486 }
4487
4488 /*
4489 * If there is an FDW that's responsible for all baserels of the query,
4490 * let it consider adding ForeignPaths.
4491 */
4492 if (window_rel->fdwroutine &&
4493 window_rel->fdwroutine->GetForeignUpperPaths)
4494 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4495 input_rel, window_rel,
4496 NULL);
4497
4498 /* Let extensions possibly add some more paths */
4500 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4501 input_rel, window_rel, NULL);
4502
4503 /* Now choose the best path(s) */
4504 set_cheapest(window_rel);
4505
4506 return window_rel;
4507}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:754
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4521

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ expression_planner()

Expr * expression_planner ( Expr expr)

Definition at line 6645 of file planner.c.

6646{
6647 Node *result;
6648
6649 /*
6650 * Convert named-argument function calls, insert default arguments and
6651 * simplify constant subexprs
6652 */
6653 result = eval_const_expressions(NULL, (Node *) expr);
6654
6655 /* Fill in opfuncid values if missing */
6656 fix_opfuncids(result);
6657
6658 return (Expr *) result;
6659}
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2256
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1841

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr * expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6672 of file planner.c.

6675{
6676 Node *result;
6677 PlannerGlobal glob;
6679
6680 /* Make up dummy planner state so we can use setrefs machinery */
6681 MemSet(&glob, 0, sizeof(glob));
6682 glob.type = T_PlannerGlobal;
6683 glob.relationOids = NIL;
6684 glob.invalItems = NIL;
6685
6686 MemSet(&root, 0, sizeof(root));
6687 root.type = T_PlannerInfo;
6688 root.glob = &glob;
6689
6690 /*
6691 * Convert named-argument function calls, insert default arguments and
6692 * simplify constant subexprs. Collect identities of inlined functions
6693 * and elided domains, too.
6694 */
6695 result = eval_const_expressions(&root, (Node *) expr);
6696
6697 /* Fill in opfuncid values if missing */
6698 fix_opfuncids(result);
6699
6700 /*
6701 * Now walk the finished expression to find anything else we ought to
6702 * record as an expression dependency.
6703 */
6704 (void) extract_query_dependencies_walker(result, &root);
6705
6706 *relationOids = glob.relationOids;
6707 *invalItems = glob.invalItems;
6708
6709 return (Expr *) result;
6710}
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3669
List * invalItems
Definition: pathnodes.h:154
List * relationOids
Definition: pathnodes.h:151

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2868 of file planner.c.

2869{
2870 int num_sets_raw = list_length(groupingSets);
2871 int num_empty = 0;
2872 int num_sets = 0; /* distinct sets */
2873 int num_chains = 0;
2874 List *result = NIL;
2875 List **results;
2876 List **orig_sets;
2877 Bitmapset **set_masks;
2878 int *chains;
2879 short **adjacency;
2880 short *adjacency_buf;
2882 int i;
2883 int j;
2884 int j_size;
2885 ListCell *lc1 = list_head(groupingSets);
2886 ListCell *lc;
2887
2888 /*
2889 * Start by stripping out empty sets. The algorithm doesn't require this,
2890 * but the planner currently needs all empty sets to be returned in the
2891 * first list, so we strip them here and add them back after.
2892 */
2893 while (lc1 && lfirst(lc1) == NIL)
2894 {
2895 ++num_empty;
2896 lc1 = lnext(groupingSets, lc1);
2897 }
2898
2899 /* bail out now if it turns out that all we had were empty sets. */
2900 if (!lc1)
2901 return list_make1(groupingSets);
2902
2903 /*----------
2904 * We don't strictly need to remove duplicate sets here, but if we don't,
2905 * they tend to become scattered through the result, which is a bit
2906 * confusing (and irritating if we ever decide to optimize them out).
2907 * So we remove them here and add them back after.
2908 *
2909 * For each non-duplicate set, we fill in the following:
2910 *
2911 * orig_sets[i] = list of the original set lists
2912 * set_masks[i] = bitmapset for testing inclusion
2913 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2914 *
2915 * chains[i] will be the result group this set is assigned to.
2916 *
2917 * We index all of these from 1 rather than 0 because it is convenient
2918 * to leave 0 free for the NIL node in the graph algorithm.
2919 *----------
2920 */
2921 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2922 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2923 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2924 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2925
2926 j_size = 0;
2927 j = 0;
2928 i = 1;
2929
2930 for_each_cell(lc, groupingSets, lc1)
2931 {
2932 List *candidate = (List *) lfirst(lc);
2933 Bitmapset *candidate_set = NULL;
2934 ListCell *lc2;
2935 int dup_of = 0;
2936
2937 foreach(lc2, candidate)
2938 {
2939 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2940 }
2941
2942 /* we can only be a dup if we're the same length as a previous set */
2943 if (j_size == list_length(candidate))
2944 {
2945 int k;
2946
2947 for (k = j; k < i; ++k)
2948 {
2949 if (bms_equal(set_masks[k], candidate_set))
2950 {
2951 dup_of = k;
2952 break;
2953 }
2954 }
2955 }
2956 else if (j_size < list_length(candidate))
2957 {
2958 j_size = list_length(candidate);
2959 j = i;
2960 }
2961
2962 if (dup_of > 0)
2963 {
2964 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2965 bms_free(candidate_set);
2966 }
2967 else
2968 {
2969 int k;
2970 int n_adj = 0;
2971
2972 orig_sets[i] = list_make1(candidate);
2973 set_masks[i] = candidate_set;
2974
2975 /* fill in adjacency list; no need to compare equal-size sets */
2976
2977 for (k = j - 1; k > 0; --k)
2978 {
2979 if (bms_is_subset(set_masks[k], candidate_set))
2980 adjacency_buf[++n_adj] = k;
2981 }
2982
2983 if (n_adj > 0)
2984 {
2985 adjacency_buf[0] = n_adj;
2986 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2987 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2988 }
2989 else
2990 adjacency[i] = NULL;
2991
2992 ++i;
2993 }
2994 }
2995
2996 num_sets = i - 1;
2997
2998 /*
2999 * Apply the graph matching algorithm to do the work.
3000 */
3001 state = BipartiteMatch(num_sets, num_sets, adjacency);
3002
3003 /*
3004 * Now, the state->pair* fields have the info we need to assign sets to
3005 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3006 * pair_vu[v] = u (both will be true, but we check both so that we can do
3007 * it in one pass)
3008 */
3009 chains = palloc0((num_sets + 1) * sizeof(int));
3010
3011 for (i = 1; i <= num_sets; ++i)
3012 {
3013 int u = state->pair_vu[i];
3014 int v = state->pair_uv[i];
3015
3016 if (u > 0 && u < i)
3017 chains[i] = chains[u];
3018 else if (v > 0 && v < i)
3019 chains[i] = chains[v];
3020 else
3021 chains[i] = ++num_chains;
3022 }
3023
3024 /* build result lists. */
3025 results = palloc0((num_chains + 1) * sizeof(List *));
3026
3027 for (i = 1; i <= num_sets; ++i)
3028 {
3029 int c = chains[i];
3030
3031 Assert(c > 0);
3032
3033 results[c] = list_concat(results[c], orig_sets[i]);
3034 }
3035
3036 /* push any empty sets back on the first list. */
3037 while (num_empty-- > 0)
3038 results[1] = lcons(NIL, results[1]);
3039
3040 /* make result list */
3041 for (i = 1; i <= num_chains; ++i)
3042 result = lappend(result, results[i]);
3043
3044 /*
3045 * Free all the things.
3046 *
3047 * (This is over-fussy for small sets but for large sets we could have
3048 * tied up a nontrivial amount of memory.)
3049 */
3051 pfree(results);
3052 pfree(chains);
3053 for (i = 1; i <= num_sets; ++i)
3054 if (adjacency[i])
3055 pfree(adjacency[i]);
3056 pfree(adjacency);
3057 pfree(adjacency_buf);
3058 pfree(orig_sets);
3059 for (i = 1; i <= num_sets; ++i)
3060 bms_free(set_masks[i]);
3061 pfree(set_masks);
3062
3063 return result;
3064}
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int j
Definition: isn.c:78
void * palloc0(Size size)
Definition: mcxt.c:1347
char * c
Definition: regguts.h:323

References Assert(), BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7570 of file planner.c.

7571{
7572 ListCell *lc;
7573 Path *cheapest_partial_path;
7574 List *groupby_pathkeys;
7575
7576 /*
7577 * This occurs after any partial aggregation has taken place, so trim off
7578 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7579 */
7580 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7581 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7582 root->num_groupby_pathkeys);
7583 else
7584 groupby_pathkeys = root->group_pathkeys;
7585
7586 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7588
7589 cheapest_partial_path = linitial(rel->partial_pathlist);
7590
7591 /* XXX Shouldn't this also consider the group-key-reordering? */
7592 foreach(lc, rel->partial_pathlist)
7593 {
7594 Path *path = (Path *) lfirst(lc);
7595 bool is_sorted;
7596 int presorted_keys;
7597 double total_groups;
7598
7599 is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7600 path->pathkeys,
7601 &presorted_keys);
7602
7603 if (is_sorted)
7604 continue;
7605
7606 /*
7607 * Try at least sorting the cheapest path and also try incrementally
7608 * sorting any path which is partially sorted already (no need to deal
7609 * with paths which have presorted keys when incremental sort is
7610 * disabled unless it's the cheapest input path).
7611 */
7612 if (path != cheapest_partial_path &&
7613 (presorted_keys == 0 || !enable_incremental_sort))
7614 continue;
7615
7616 /*
7617 * We've no need to consider both a sort and incremental sort. We'll
7618 * just do a sort if there are no presorted keys and an incremental
7619 * sort when there are presorted keys.
7620 */
7621 if (presorted_keys == 0 || !enable_incremental_sort)
7622 path = (Path *) create_sort_path(root, rel, path,
7623 groupby_pathkeys,
7624 -1.0);
7625 else
7627 rel,
7628 path,
7629 groupby_pathkeys,
7630 presorted_keys,
7631 -1.0);
7632 total_groups = compute_gather_rows(path);
7633 path = (Path *)
7635 rel,
7636 path,
7637 rel->reltarget,
7638 groupby_pathkeys,
7639 NULL,
7640 &total_groups);
7641
7642 add_path(rel, path);
7643 }
7644}
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593

References add_path(), compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, and root.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8161 of file planner.c.

8162{
8163 List *grouplist = copyObject(op->groupClauses);
8164 ListCell *lg;
8165 ListCell *lt;
8166 ListCell *ct;
8167
8168 lg = list_head(grouplist);
8169 ct = list_head(op->colTypes);
8170 foreach(lt, targetlist)
8171 {
8172 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8173 SortGroupClause *sgc;
8174 Oid coltype;
8175
8176 /* resjunk columns could have sortgrouprefs. Leave these alone */
8177 if (tle->resjunk)
8178 continue;
8179
8180 /*
8181 * We expect every non-resjunk target to have a SortGroupClause and
8182 * colTypes.
8183 */
8184 Assert(lg != NULL);
8185 Assert(ct != NULL);
8186 sgc = (SortGroupClause *) lfirst(lg);
8187 coltype = lfirst_oid(ct);
8188
8189 /* reject if target type isn't the same as the setop target type */
8190 if (coltype != exprType((Node *) tle->expr))
8191 return NIL;
8192
8193 lg = lnext(grouplist, lg);
8194 ct = lnext(op->colTypes, ct);
8195
8196 /* assign a tleSortGroupRef, or reuse the existing one */
8197 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8198 }
8199
8200 Assert(lg == NULL);
8201 Assert(ct == NULL);
8202
8203 return grouplist;
8204}
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
#define lfirst_oid(lc)
Definition: pg_list.h:174
unsigned int Oid
Definition: postgres_ext.h:30
Expr * expr
Definition: primnodes.h:2219

References Assert(), assignSortGroupRef(), copyObject, TargetEntry::expr, exprType(), lfirst, lfirst_oid, list_head(), lnext(), NIL, and SortGroupClause::tleSortGroupRef.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path * get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6483 of file planner.c.

6484{
6485 Path *best_path = rel->cheapest_total_path;
6486 ListCell *l;
6487
6488 /* If all tuples will be retrieved, just return the cheapest-total path */
6489 if (tuple_fraction <= 0.0)
6490 return best_path;
6491
6492 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6493 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6494 tuple_fraction /= best_path->rows;
6495
6496 foreach(l, rel->pathlist)
6497 {
6498 Path *path = (Path *) lfirst(l);
6499
6500 if (path->param_info)
6501 continue;
6502
6503 if (path == rel->cheapest_total_path ||
6504 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6505 continue;
6506
6507 best_path = path;
6508 }
6509
6510 return best_path;
6511}
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:124

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by add_paths_to_append_rel(), make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3559 of file planner.c.

3563{
3564 Query *parse = root->parse;
3565 double dNumGroups;
3566
3567 if (parse->groupClause)
3568 {
3569 List *groupExprs;
3570
3571 if (parse->groupingSets)
3572 {
3573 /* Add up the estimates for each grouping set */
3574 ListCell *lc;
3575
3576 Assert(gd); /* keep Coverity happy */
3577
3578 dNumGroups = 0;
3579
3580 foreach(lc, gd->rollups)
3581 {
3582 RollupData *rollup = lfirst_node(RollupData, lc);
3583 ListCell *lc2;
3584 ListCell *lc3;
3585
3586 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3587 target_list);
3588
3589 rollup->numGroups = 0.0;
3590
3591 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3592 {
3593 List *gset = (List *) lfirst(lc2);
3595 double numGroups = estimate_num_groups(root,
3596 groupExprs,
3597 path_rows,
3598 &gset,
3599 NULL);
3600
3601 gs->numGroups = numGroups;
3602 rollup->numGroups += numGroups;
3603 }
3604
3605 dNumGroups += rollup->numGroups;
3606 }
3607
3608 if (gd->hash_sets_idx)
3609 {
3610 ListCell *lc2;
3611
3612 gd->dNumHashGroups = 0;
3613
3614 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3615 target_list);
3616
3617 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3618 {
3619 List *gset = (List *) lfirst(lc);
3621 double numGroups = estimate_num_groups(root,
3622 groupExprs,
3623 path_rows,
3624 &gset,
3625 NULL);
3626
3627 gs->numGroups = numGroups;
3628 gd->dNumHashGroups += numGroups;
3629 }
3630
3631 dNumGroups += gd->dNumHashGroups;
3632 }
3633 }
3634 else
3635 {
3636 /* Plain GROUP BY -- estimate based on optimized groupClause */
3637 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3638 target_list);
3639
3640 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3641 NULL, NULL);
3642 }
3643 }
3644 else if (parse->groupingSets)
3645 {
3646 /* Empty grouping sets ... one result row for each one */
3647 dNumGroups = list_length(parse->groupingSets);
3648 }
3649 else if (parse->hasAggs || root->hasHavingQual)
3650 {
3651 /* Plain aggregation, one result row */
3652 dNumGroups = 1;
3653 }
3654 else
3655 {
3656 /* Not grouping */
3657 dNumGroups = 1;
3658 }
3659
3660 return dNumGroups;
3661}
List * hash_sets_idx
Definition: planner.c:101

References Assert(), grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), grouping_sets_data::rollups, root, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ get_useful_pathkeys_for_distinct()

static List * get_useful_pathkeys_for_distinct ( PlannerInfo root,
List needed_pathkeys,
List path_pathkeys 
)
static

Definition at line 5124 of file planner.c.

5126{
5127 List *useful_pathkeys_list = NIL;
5128 List *useful_pathkeys = NIL;
5129
5130 /* always include the given 'needed_pathkeys' */
5131 useful_pathkeys_list = lappend(useful_pathkeys_list,
5132 needed_pathkeys);
5133
5135 return useful_pathkeys_list;
5136
5137 /*
5138 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5139 * that match 'needed_pathkeys', but only up to the longest matching
5140 * prefix.
5141 *
5142 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5143 * list matches initial distinctClause pathkeys; otherwise, it won't have
5144 * the desired behavior.
5145 */
5146 foreach_node(PathKey, pathkey, path_pathkeys)
5147 {
5148 /*
5149 * The PathKey nodes are canonical, so they can be checked for
5150 * equality by simple pointer comparison.
5151 */
5152 if (!list_member_ptr(needed_pathkeys, pathkey))
5153 break;
5154 if (root->parse->hasDistinctOn &&
5155 !list_member_ptr(root->distinct_pathkeys, pathkey))
5156 break;
5157
5158 useful_pathkeys = lappend(useful_pathkeys, pathkey);
5159 }
5160
5161 /* If no match at all, no point in reordering needed_pathkeys */
5162 if (useful_pathkeys == NIL)
5163 return useful_pathkeys_list;
5164
5165 /*
5166 * If not full match, the resulting pathkey list is not useful without
5167 * incremental sort.
5168 */
5169 if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5171 return useful_pathkeys_list;
5172
5173 /* Append the remaining PathKey nodes in needed_pathkeys */
5174 useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5175 needed_pathkeys);
5176
5177 /*
5178 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5179 * just drop it.
5180 */
5181 if (compare_pathkeys(needed_pathkeys,
5182 useful_pathkeys) == PATHKEYS_EQUAL)
5183 return useful_pathkeys_list;
5184
5185 useful_pathkeys_list = lappend(useful_pathkeys_list,
5186 useful_pathkeys);
5187
5188 return useful_pathkeys_list;
5189}
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
bool enable_distinct_reordering
Definition: planner.c:70

References compare_pathkeys(), enable_distinct_reordering, enable_incremental_sort, foreach_node, lappend(), list_concat_unique_ptr(), list_length(), list_member_ptr(), NIL, PATHKEYS_EQUAL, and root.

Referenced by create_final_distinct_paths(), and create_partial_distinct_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8074 of file planner.c.

8077{
8078 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8079 int cnt = 0;
8080 int partnatts;
8081
8082 /* Input relation should be partitioned. */
8083 Assert(input_rel->part_scheme);
8084
8085 /* Rule out early, if there are no partition keys present. */
8086 if (!input_rel->partexprs)
8087 return false;
8088
8089 partnatts = input_rel->part_scheme->partnatts;
8090
8091 for (cnt = 0; cnt < partnatts; cnt++)
8092 {
8093 List *partexprs = input_rel->partexprs[cnt];
8094 ListCell *lc;
8095 bool found = false;
8096
8097 foreach(lc, partexprs)
8098 {
8099 ListCell *lg;
8100 Expr *partexpr = lfirst(lc);
8101 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8102
8103 foreach(lg, groupexprs)
8104 {
8105 Expr *groupexpr = lfirst(lg);
8106 Oid groupcoll = exprCollation((Node *) groupexpr);
8107
8108 /*
8109 * Note: we can assume there is at most one RelabelType node;
8110 * eval_const_expressions() will have simplified if more than
8111 * one.
8112 */
8113 if (IsA(groupexpr, RelabelType))
8114 groupexpr = ((RelabelType *) groupexpr)->arg;
8115
8116 if (equal(groupexpr, partexpr))
8117 {
8118 /*
8119 * Reject a match if the grouping collation does not match
8120 * the partitioning collation.
8121 */
8122 if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8123 partcoll != groupcoll)
8124 return false;
8125
8126 found = true;
8127 break;
8128 }
8129 }
8130
8131 if (found)
8132 break;
8133 }
8134
8135 /*
8136 * If none of the partition key expressions match with any of the
8137 * GROUP BY expression, return false.
8138 */
8139 if (!found)
8140 return false;
8141 }
8142
8143 return true;
8144}
#define OidIsValid(objectId)
Definition: c.h:746
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
#define IsA(nodeptr, _type_)
Definition: nodes.h:164

References Assert(), equal(), exprCollation(), get_sortgrouplist_exprs(), IsA, lfirst, and OidIsValid.

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1381 of file planner.c.

1383{
1384 Query *parse = root->parse;
1385 int64 offset_est = 0;
1386 int64 count_est = 0;
1387 double limit_tuples = -1.0;
1388 bool have_postponed_srfs = false;
1389 PathTarget *final_target;
1390 List *final_targets;
1391 List *final_targets_contain_srfs;
1392 bool final_target_parallel_safe;
1393 RelOptInfo *current_rel;
1394 RelOptInfo *final_rel;
1395 FinalPathExtraData extra;
1396 ListCell *lc;
1397
1398 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1399 if (parse->limitCount || parse->limitOffset)
1400 {
1401 tuple_fraction = preprocess_limit(root, tuple_fraction,
1402 &offset_est, &count_est);
1403
1404 /*
1405 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1406 * estimate the effects of using a bounded sort.
1407 */
1408 if (count_est > 0 && offset_est >= 0)
1409 limit_tuples = (double) count_est + (double) offset_est;
1410 }
1411
1412 /* Make tuple_fraction accessible to lower-level routines */
1413 root->tuple_fraction = tuple_fraction;
1414
1415 if (parse->setOperations)
1416 {
1417 /*
1418 * Construct Paths for set operations. The results will not need any
1419 * work except perhaps a top-level sort and/or LIMIT. Note that any
1420 * special work for recursive unions is the responsibility of
1421 * plan_set_operations.
1422 */
1423 current_rel = plan_set_operations(root);
1424
1425 /*
1426 * We should not need to call preprocess_targetlist, since we must be
1427 * in a SELECT query node. Instead, use the processed_tlist returned
1428 * by plan_set_operations (since this tells whether it returned any
1429 * resjunk columns!), and transfer any sort key information from the
1430 * original tlist.
1431 */
1432 Assert(parse->commandType == CMD_SELECT);
1433
1434 /* for safety, copy processed_tlist instead of modifying in-place */
1435 root->processed_tlist =
1436 postprocess_setop_tlist(copyObject(root->processed_tlist),
1437 parse->targetList);
1438
1439 /* Also extract the PathTarget form of the setop result tlist */
1440 final_target = current_rel->cheapest_total_path->pathtarget;
1441
1442 /* And check whether it's parallel safe */
1443 final_target_parallel_safe =
1444 is_parallel_safe(root, (Node *) final_target->exprs);
1445
1446 /* The setop result tlist couldn't contain any SRFs */
1447 Assert(!parse->hasTargetSRFs);
1448 final_targets = final_targets_contain_srfs = NIL;
1449
1450 /*
1451 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1452 * checked already, but let's make sure).
1453 */
1454 if (parse->rowMarks)
1455 ereport(ERROR,
1456 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1457 /*------
1458 translator: %s is a SQL row locking clause such as FOR UPDATE */
1459 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1461 parse->rowMarks)->strength))));
1462
1463 /*
1464 * Calculate pathkeys that represent result ordering requirements
1465 */
1466 Assert(parse->distinctClause == NIL);
1467 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1468 parse->sortClause,
1469 root->processed_tlist);
1470 }
1471 else
1472 {
1473 /* No set operations, do regular planning */
1474 PathTarget *sort_input_target;
1475 List *sort_input_targets;
1476 List *sort_input_targets_contain_srfs;
1477 bool sort_input_target_parallel_safe;
1478 PathTarget *grouping_target;
1479 List *grouping_targets;
1480 List *grouping_targets_contain_srfs;
1481 bool grouping_target_parallel_safe;
1482 PathTarget *scanjoin_target;
1483 List *scanjoin_targets;
1484 List *scanjoin_targets_contain_srfs;
1485 bool scanjoin_target_parallel_safe;
1486 bool scanjoin_target_same_exprs;
1487 bool have_grouping;
1488 WindowFuncLists *wflists = NULL;
1489 List *activeWindows = NIL;
1490 grouping_sets_data *gset_data = NULL;
1491 standard_qp_extra qp_extra;
1492
1493 /* A recursive query should always have setOperations */
1494 Assert(!root->hasRecursion);
1495
1496 /* Preprocess grouping sets and GROUP BY clause, if any */
1497 if (parse->groupingSets)
1498 {
1499 gset_data = preprocess_grouping_sets(root);
1500 }
1501 else if (parse->groupClause)
1502 {
1503 /* Preprocess regular GROUP BY clause, if any */
1504 root->processed_groupClause = preprocess_groupclause(root, NIL);
1505 }
1506
1507 /*
1508 * Preprocess targetlist. Note that much of the remaining planning
1509 * work will be done with the PathTarget representation of tlists, but
1510 * we must also maintain the full representation of the final tlist so
1511 * that we can transfer its decoration (resnames etc) to the topmost
1512 * tlist of the finished Plan. This is kept in processed_tlist.
1513 */
1515
1516 /*
1517 * Mark all the aggregates with resolved aggtranstypes, and detect
1518 * aggregates that are duplicates or can share transition state. We
1519 * must do this before slicing and dicing the tlist into various
1520 * pathtargets, else some copies of the Aggref nodes might escape
1521 * being marked.
1522 */
1523 if (parse->hasAggs)
1524 {
1525 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1526 preprocess_aggrefs(root, (Node *) parse->havingQual);
1527 }
1528
1529 /*
1530 * Locate any window functions in the tlist. (We don't need to look
1531 * anywhere else, since expressions used in ORDER BY will be in there
1532 * too.) Note that they could all have been eliminated by constant
1533 * folding, in which case we don't need to do any more work.
1534 */
1535 if (parse->hasWindowFuncs)
1536 {
1537 wflists = find_window_functions((Node *) root->processed_tlist,
1538 list_length(parse->windowClause));
1539 if (wflists->numWindowFuncs > 0)
1540 {
1541 /*
1542 * See if any modifications can be made to each WindowClause
1543 * to allow the executor to execute the WindowFuncs more
1544 * quickly.
1545 */
1546 optimize_window_clauses(root, wflists);
1547
1548 /* Extract the list of windows actually in use. */
1549 activeWindows = select_active_windows(root, wflists);
1550
1551 /* Make sure they all have names, for EXPLAIN's use. */
1552 name_active_windows(activeWindows);
1553 }
1554 else
1555 parse->hasWindowFuncs = false;
1556 }
1557
1558 /*
1559 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1560 * adding logic between here and the query_planner() call. Anything
1561 * that is needed in MIN/MAX-optimizable cases will have to be
1562 * duplicated in planagg.c.
1563 */
1564 if (parse->hasAggs)
1566
1567 /*
1568 * Figure out whether there's a hard limit on the number of rows that
1569 * query_planner's result subplan needs to return. Even if we know a
1570 * hard limit overall, it doesn't apply if the query has any
1571 * grouping/aggregation operations, or SRFs in the tlist.
1572 */
1573 if (parse->groupClause ||
1574 parse->groupingSets ||
1575 parse->distinctClause ||
1576 parse->hasAggs ||
1577 parse->hasWindowFuncs ||
1578 parse->hasTargetSRFs ||
1579 root->hasHavingQual)
1580 root->limit_tuples = -1.0;
1581 else
1582 root->limit_tuples = limit_tuples;
1583
1584 /* Set up data needed by standard_qp_callback */
1585 qp_extra.activeWindows = activeWindows;
1586 qp_extra.gset_data = gset_data;
1587
1588 /*
1589 * If we're a subquery for a set operation, store the SetOperationStmt
1590 * in qp_extra.
1591 */
1592 qp_extra.setop = setops;
1593
1594 /*
1595 * Generate the best unsorted and presorted paths for the scan/join
1596 * portion of this Query, ie the processing represented by the
1597 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1598 * We also generate (in standard_qp_callback) pathkey representations
1599 * of the query's sort clause, distinct clause, etc.
1600 */
1601 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1602
1603 /*
1604 * Convert the query's result tlist into PathTarget format.
1605 *
1606 * Note: this cannot be done before query_planner() has performed
1607 * appendrel expansion, because that might add resjunk entries to
1608 * root->processed_tlist. Waiting till afterwards is also helpful
1609 * because the target width estimates can use per-Var width numbers
1610 * that were obtained within query_planner().
1611 */
1612 final_target = create_pathtarget(root, root->processed_tlist);
1613 final_target_parallel_safe =
1614 is_parallel_safe(root, (Node *) final_target->exprs);
1615
1616 /*
1617 * If ORDER BY was given, consider whether we should use a post-sort
1618 * projection, and compute the adjusted target for preceding steps if
1619 * so.
1620 */
1621 if (parse->sortClause)
1622 {
1623 sort_input_target = make_sort_input_target(root,
1624 final_target,
1625 &have_postponed_srfs);
1626 sort_input_target_parallel_safe =
1627 is_parallel_safe(root, (Node *) sort_input_target->exprs);
1628 }
1629 else
1630 {
1631 sort_input_target = final_target;
1632 sort_input_target_parallel_safe = final_target_parallel_safe;
1633 }
1634
1635 /*
1636 * If we have window functions to deal with, the output from any
1637 * grouping step needs to be what the window functions want;
1638 * otherwise, it should be sort_input_target.
1639 */
1640 if (activeWindows)
1641 {
1642 grouping_target = make_window_input_target(root,
1643 final_target,
1644 activeWindows);
1645 grouping_target_parallel_safe =
1646 is_parallel_safe(root, (Node *) grouping_target->exprs);
1647 }
1648 else
1649 {
1650 grouping_target = sort_input_target;
1651 grouping_target_parallel_safe = sort_input_target_parallel_safe;
1652 }
1653
1654 /*
1655 * If we have grouping or aggregation to do, the topmost scan/join
1656 * plan node must emit what the grouping step wants; otherwise, it
1657 * should emit grouping_target.
1658 */
1659 have_grouping = (parse->groupClause || parse->groupingSets ||
1660 parse->hasAggs || root->hasHavingQual);
1661 if (have_grouping)
1662 {
1663 scanjoin_target = make_group_input_target(root, final_target);
1664 scanjoin_target_parallel_safe =
1665 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1666 }
1667 else
1668 {
1669 scanjoin_target = grouping_target;
1670 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1671 }
1672
1673 /*
1674 * If there are any SRFs in the targetlist, we must separate each of
1675 * these PathTargets into SRF-computing and SRF-free targets. Replace
1676 * each of the named targets with a SRF-free version, and remember the
1677 * list of additional projection steps we need to add afterwards.
1678 */
1679 if (parse->hasTargetSRFs)
1680 {
1681 /* final_target doesn't recompute any SRFs in sort_input_target */
1682 split_pathtarget_at_srfs(root, final_target, sort_input_target,
1683 &final_targets,
1684 &final_targets_contain_srfs);
1685 final_target = linitial_node(PathTarget, final_targets);
1686 Assert(!linitial_int(final_targets_contain_srfs));
1687 /* likewise for sort_input_target vs. grouping_target */
1688 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1689 &sort_input_targets,
1690 &sort_input_targets_contain_srfs);
1691 sort_input_target = linitial_node(PathTarget, sort_input_targets);
1692 Assert(!linitial_int(sort_input_targets_contain_srfs));
1693 /* likewise for grouping_target vs. scanjoin_target */
1694 split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1695 &grouping_targets,
1696 &grouping_targets_contain_srfs);
1697 grouping_target = linitial_node(PathTarget, grouping_targets);
1698 Assert(!linitial_int(grouping_targets_contain_srfs));
1699 /* scanjoin_target will not have any SRFs precomputed for it */
1700 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1701 &scanjoin_targets,
1702 &scanjoin_targets_contain_srfs);
1703 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1704 Assert(!linitial_int(scanjoin_targets_contain_srfs));
1705 }
1706 else
1707 {
1708 /* initialize lists; for most of these, dummy values are OK */
1709 final_targets = final_targets_contain_srfs = NIL;
1710 sort_input_targets = sort_input_targets_contain_srfs = NIL;
1711 grouping_targets = grouping_targets_contain_srfs = NIL;
1712 scanjoin_targets = list_make1(scanjoin_target);
1713 scanjoin_targets_contain_srfs = NIL;
1714 }
1715
1716 /* Apply scan/join target. */
1717 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1718 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1719 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1720 scanjoin_targets_contain_srfs,
1721 scanjoin_target_parallel_safe,
1722 scanjoin_target_same_exprs);
1723
1724 /*
1725 * Save the various upper-rel PathTargets we just computed into
1726 * root->upper_targets[]. The core code doesn't use this, but it
1727 * provides a convenient place for extensions to get at the info. For
1728 * consistency, we save all the intermediate targets, even though some
1729 * of the corresponding upperrels might not be needed for this query.
1730 */
1731 root->upper_targets[UPPERREL_FINAL] = final_target;
1732 root->upper_targets[UPPERREL_ORDERED] = final_target;
1733 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1734 root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1735 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1736 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1737
1738 /*
1739 * If we have grouping and/or aggregation, consider ways to implement
1740 * that. We build a new upperrel representing the output of this
1741 * phase.
1742 */
1743 if (have_grouping)
1744 {
1745 current_rel = create_grouping_paths(root,
1746 current_rel,
1747 grouping_target,
1748 grouping_target_parallel_safe,
1749 gset_data);
1750 /* Fix things up if grouping_target contains SRFs */
1751 if (parse->hasTargetSRFs)
1752 adjust_paths_for_srfs(root, current_rel,
1753 grouping_targets,
1754 grouping_targets_contain_srfs);
1755 }
1756
1757 /*
1758 * If we have window functions, consider ways to implement those. We
1759 * build a new upperrel representing the output of this phase.
1760 */
1761 if (activeWindows)
1762 {
1763 current_rel = create_window_paths(root,
1764 current_rel,
1765 grouping_target,
1766 sort_input_target,
1767 sort_input_target_parallel_safe,
1768 wflists,
1769 activeWindows);
1770 /* Fix things up if sort_input_target contains SRFs */
1771 if (parse->hasTargetSRFs)
1772 adjust_paths_for_srfs(root, current_rel,
1773 sort_input_targets,
1774 sort_input_targets_contain_srfs);
1775 }
1776
1777 /*
1778 * If there is a DISTINCT clause, consider ways to implement that. We
1779 * build a new upperrel representing the output of this phase.
1780 */
1781 if (parse->distinctClause)
1782 {
1783 current_rel = create_distinct_paths(root,
1784 current_rel,
1785 sort_input_target);
1786 }
1787 } /* end of if (setOperations) */
1788
1789 /*
1790 * If ORDER BY was given, consider ways to implement that, and generate a
1791 * new upperrel containing only paths that emit the correct ordering and
1792 * project the correct final_target. We can apply the original
1793 * limit_tuples limit in sort costing here, but only if there are no
1794 * postponed SRFs.
1795 */
1796 if (parse->sortClause)
1797 {
1798 current_rel = create_ordered_paths(root,
1799 current_rel,
1800 final_target,
1801 final_target_parallel_safe,
1802 have_postponed_srfs ? -1.0 :
1803 limit_tuples);
1804 /* Fix things up if final_target contains SRFs */
1805 if (parse->hasTargetSRFs)
1806 adjust_paths_for_srfs(root, current_rel,
1807 final_targets,
1808 final_targets_contain_srfs);
1809 }
1810
1811 /*
1812 * Now we are prepared to build the final-output upperrel.
1813 */
1814 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1815
1816 /*
1817 * If the input rel is marked consider_parallel and there's nothing that's
1818 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1819 * consider_parallel as well. Note that if the query has rowMarks or is
1820 * not a SELECT, consider_parallel will be false for every relation in the
1821 * query.
1822 */
1823 if (current_rel->consider_parallel &&
1824 is_parallel_safe(root, parse->limitOffset) &&
1825 is_parallel_safe(root, parse->limitCount))
1826 final_rel->consider_parallel = true;
1827
1828 /*
1829 * If the current_rel belongs to a single FDW, so does the final_rel.
1830 */
1831 final_rel->serverid = current_rel->serverid;
1832 final_rel->userid = current_rel->userid;
1833 final_rel->useridiscurrent = current_rel->useridiscurrent;
1834 final_rel->fdwroutine = current_rel->fdwroutine;
1835
1836 /*
1837 * Generate paths for the final_rel. Insert all surviving paths, with
1838 * LockRows, Limit, and/or ModifyTable steps added if needed.
1839 */
1840 foreach(lc, current_rel->pathlist)
1841 {
1842 Path *path = (Path *) lfirst(lc);
1843
1844 /*
1845 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1846 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1847 * here. If there are only non-locking rowmarks, they should be
1848 * handled by the ModifyTable node instead. However, root->rowMarks
1849 * is what goes into the LockRows node.)
1850 */
1851 if (parse->rowMarks)
1852 {
1853 path = (Path *) create_lockrows_path(root, final_rel, path,
1854 root->rowMarks,
1856 }
1857
1858 /*
1859 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1860 */
1861 if (limit_needed(parse))
1862 {
1863 path = (Path *) create_limit_path(root, final_rel, path,
1864 parse->limitOffset,
1865 parse->limitCount,
1866 parse->limitOption,
1867 offset_est, count_est);
1868 }
1869
1870 /*
1871 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1872 */
1873 if (parse->commandType != CMD_SELECT)
1874 {
1875 Index rootRelation;
1876 List *resultRelations = NIL;
1877 List *updateColnosLists = NIL;
1878 List *withCheckOptionLists = NIL;
1879 List *returningLists = NIL;
1880 List *mergeActionLists = NIL;
1881 List *mergeJoinConditions = NIL;
1882 List *rowMarks;
1883
1884 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1885 {
1886 /* Inherited UPDATE/DELETE/MERGE */
1887 RelOptInfo *top_result_rel = find_base_rel(root,
1888 parse->resultRelation);
1889 int resultRelation = -1;
1890
1891 /* Pass the root result rel forward to the executor. */
1892 rootRelation = parse->resultRelation;
1893
1894 /* Add only leaf children to ModifyTable. */
1895 while ((resultRelation = bms_next_member(root->leaf_result_relids,
1896 resultRelation)) >= 0)
1897 {
1898 RelOptInfo *this_result_rel = find_base_rel(root,
1899 resultRelation);
1900
1901 /*
1902 * Also exclude any leaf rels that have turned dummy since
1903 * being added to the list, for example, by being excluded
1904 * by constraint exclusion.
1905 */
1906 if (IS_DUMMY_REL(this_result_rel))
1907 continue;
1908
1909 /* Build per-target-rel lists needed by ModifyTable */
1910 resultRelations = lappend_int(resultRelations,
1911 resultRelation);
1912 if (parse->commandType == CMD_UPDATE)
1913 {
1914 List *update_colnos = root->update_colnos;
1915
1916 if (this_result_rel != top_result_rel)
1917 update_colnos =
1919 update_colnos,
1920 this_result_rel->relid,
1921 top_result_rel->relid);
1922 updateColnosLists = lappend(updateColnosLists,
1923 update_colnos);
1924 }
1925 if (parse->withCheckOptions)
1926 {
1927 List *withCheckOptions = parse->withCheckOptions;
1928
1929 if (this_result_rel != top_result_rel)
1930 withCheckOptions = (List *)
1932 (Node *) withCheckOptions,
1933 this_result_rel,
1934 top_result_rel);
1935 withCheckOptionLists = lappend(withCheckOptionLists,
1936 withCheckOptions);
1937 }
1938 if (parse->returningList)
1939 {
1940 List *returningList = parse->returningList;
1941
1942 if (this_result_rel != top_result_rel)
1943 returningList = (List *)
1945 (Node *) returningList,
1946 this_result_rel,
1947 top_result_rel);
1948 returningLists = lappend(returningLists,
1949 returningList);
1950 }
1951 if (parse->mergeActionList)
1952 {
1953 ListCell *l;
1954 List *mergeActionList = NIL;
1955
1956 /*
1957 * Copy MergeActions and translate stuff that
1958 * references attribute numbers.
1959 */
1960 foreach(l, parse->mergeActionList)
1961 {
1963 *leaf_action = copyObject(action);
1964
1965 leaf_action->qual =
1967 (Node *) action->qual,
1968 this_result_rel,
1969 top_result_rel);
1970 leaf_action->targetList = (List *)
1972 (Node *) action->targetList,
1973 this_result_rel,
1974 top_result_rel);
1975 if (leaf_action->commandType == CMD_UPDATE)
1976 leaf_action->updateColnos =
1978 action->updateColnos,
1979 this_result_rel->relid,
1980 top_result_rel->relid);
1981 mergeActionList = lappend(mergeActionList,
1982 leaf_action);
1983 }
1984
1985 mergeActionLists = lappend(mergeActionLists,
1986 mergeActionList);
1987 }
1988 if (parse->commandType == CMD_MERGE)
1989 {
1990 Node *mergeJoinCondition = parse->mergeJoinCondition;
1991
1992 if (this_result_rel != top_result_rel)
1993 mergeJoinCondition =
1995 mergeJoinCondition,
1996 this_result_rel,
1997 top_result_rel);
1998 mergeJoinConditions = lappend(mergeJoinConditions,
1999 mergeJoinCondition);
2000 }
2001 }
2002
2003 if (resultRelations == NIL)
2004 {
2005 /*
2006 * We managed to exclude every child rel, so generate a
2007 * dummy one-relation plan using info for the top target
2008 * rel (even though that may not be a leaf target).
2009 * Although it's clear that no data will be updated or
2010 * deleted, we still need to have a ModifyTable node so
2011 * that any statement triggers will be executed. (This
2012 * could be cleaner if we fixed nodeModifyTable.c to allow
2013 * zero target relations, but that probably wouldn't be a
2014 * net win.)
2015 */
2016 resultRelations = list_make1_int(parse->resultRelation);
2017 if (parse->commandType == CMD_UPDATE)
2018 updateColnosLists = list_make1(root->update_colnos);
2019 if (parse->withCheckOptions)
2020 withCheckOptionLists = list_make1(parse->withCheckOptions);
2021 if (parse->returningList)
2022 returningLists = list_make1(parse->returningList);
2023 if (parse->mergeActionList)
2024 mergeActionLists = list_make1(parse->mergeActionList);
2025 if (parse->commandType == CMD_MERGE)
2026 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2027 }
2028 }
2029 else
2030 {
2031 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2032 rootRelation = 0; /* there's no separate root rel */
2033 resultRelations = list_make1_int(parse->resultRelation);
2034 if (parse->commandType == CMD_UPDATE)
2035 updateColnosLists = list_make1(root->update_colnos);
2036 if (parse->withCheckOptions)
2037 withCheckOptionLists = list_make1(parse->withCheckOptions);
2038 if (parse->returningList)
2039 returningLists = list_make1(parse->returningList);
2040 if (parse->mergeActionList)
2041 mergeActionLists = list_make1(parse->mergeActionList);
2042 if (parse->commandType == CMD_MERGE)
2043 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2044 }
2045
2046 /*
2047 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2048 * will have dealt with fetching non-locked marked rows, else we
2049 * need to have ModifyTable do that.
2050 */
2051 if (parse->rowMarks)
2052 rowMarks = NIL;
2053 else
2054 rowMarks = root->rowMarks;
2055
2056 path = (Path *)
2057 create_modifytable_path(root, final_rel,
2058 path,
2059 parse->commandType,
2060 parse->canSetTag,
2061 parse->resultRelation,
2062 rootRelation,
2063 root->partColsUpdated,
2064 resultRelations,
2065 updateColnosLists,
2066 withCheckOptionLists,
2067 returningLists,
2068 rowMarks,
2069 parse->onConflict,
2070 mergeActionLists,
2071 mergeJoinConditions,
2073 }
2074
2075 /* And shove it into final_rel */
2076 add_path(final_rel, path);
2077 }
2078
2079 /*
2080 * Generate partial paths for final_rel, too, if outer query levels might
2081 * be able to make use of them.
2082 */
2083 if (final_rel->consider_parallel && root->query_level > 1 &&
2085 {
2086 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2087 foreach(lc, current_rel->partial_pathlist)
2088 {
2089 Path *partial_path = (Path *) lfirst(lc);
2090
2091 add_partial_path(final_rel, partial_path);
2092 }
2093 }
2094
2096 extra.limit_tuples = limit_tuples;
2097 extra.count_est = count_est;
2098 extra.offset_est = offset_est;
2099
2100 /*
2101 * If there is an FDW that's responsible for all baserels of the query,
2102 * let it consider adding ForeignPaths.
2103 */
2104 if (final_rel->fdwroutine &&
2105 final_rel->fdwroutine->GetForeignUpperPaths)
2106 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2107 current_rel, final_rel,
2108 &extra);
2109
2110 /* Let extensions possibly add some more paths */
2112 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2113 current_rel, final_rel, &extra);
2114
2115 /* Note: currently, we leave it to callers to do set_cheapest() */
2116}
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:682
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:541
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
@ BMS_MULTIPLE
Definition: bitmapset.h:73
unsigned int Index
Definition: c.h:585
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:229
List * lappend_int(List *list, int datum)
Definition: list.c:357
@ CMD_MERGE
Definition: nodes.h:275
@ CMD_UPDATE
Definition: nodes.h:272
@ CMD_SELECT
Definition: nodes.h:271
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:711
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3446
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3813
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3877
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5679
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2521
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6059
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4691
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5716
static void name_active_windows(List *activeWindows)
Definition: planner.c:5939
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6307
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2125
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5429
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5856
bool limit_needed(Query *parse)
Definition: planner.c:2706
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5209
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3681
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3354
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4434
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:63
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:93
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Cardinality limit_tuples
Definition: pathnodes.h:3366
Index relid
Definition: pathnodes.h:945
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:124
grouping_sets_data * gset_data
Definition: planner.c:125
SetOperationStmt * setop
Definition: planner.c:126
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert(), assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_MERGE, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), name_active_windows(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, root, select_active_windows(), RelOptInfo::serverid, standard_qp_extra::setop, split_pathtarget_at_srfs(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3128 of file planner.c.

3129{
3130 ListCell *lc;
3131
3132 foreach(lc, keys)
3133 {
3134 PathKey *pathkey = lfirst_node(PathKey, lc);
3135
3136 if (pathkey->pk_eclass->ec_has_volatile)
3137 return true;
3138 }
3139
3140 return false;
3141}

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3847 of file planner.c.

3848{
3849 Query *parse = root->parse;
3850
3851 return (root->hasHavingQual || parse->groupingSets) &&
3852 !parse->hasAggs && parse->groupClause == NIL;
3853}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2706 of file planner.c.

2707{
2708 Node *node;
2709
2710 node = parse->limitCount;
2711 if (node)
2712 {
2713 if (IsA(node, Const))
2714 {
2715 /* NULL indicates LIMIT ALL, ie, no limit */
2716 if (!((Const *) node)->constisnull)
2717 return true; /* LIMIT with a constant value */
2718 }
2719 else
2720 return true; /* non-constant LIMIT */
2721 }
2722
2723 node = parse->limitOffset;
2724 if (node)
2725 {
2726 if (IsA(node, Const))
2727 {
2728 /* Treat NULL as no offset; the executor would too */
2729 if (!((Const *) node)->constisnull)
2730 {
2731 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2732
2733 if (offset != 0)
2734 return true; /* OFFSET with a nonzero value */
2735 }
2736 }
2737 else
2738 return true; /* non-constant OFFSET */
2739 }
2740
2741 return false; /* don't need a Limit plan node */
2742}
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:390

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5429 of file planner.c.

5430{
5431 Query *parse = root->parse;
5432 PathTarget *input_target;
5433 List *non_group_cols;
5434 List *non_group_vars;
5435 int i;
5436 ListCell *lc;
5437
5438 /*
5439 * We must build a target containing all grouping columns, plus any other
5440 * Vars mentioned in the query's targetlist and HAVING qual.
5441 */
5442 input_target = create_empty_pathtarget();
5443 non_group_cols = NIL;
5444
5445 i = 0;
5446 foreach(lc, final_target->exprs)
5447 {
5448 Expr *expr = (Expr *) lfirst(lc);
5449 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5450
5451 if (sgref && root->processed_groupClause &&
5453 root->processed_groupClause) != NULL)
5454 {
5455 /*
5456 * It's a grouping column, so add it to the input target as-is.
5457 *
5458 * Note that the target is logically below the grouping step. So
5459 * with grouping sets we need to remove the RT index of the
5460 * grouping step if there is any from the target expression.
5461 */
5462 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5463 {
5464 Assert(root->group_rtindex > 0);
5465 expr = (Expr *)
5466 remove_nulling_relids((Node *) expr,
5467 bms_make_singleton(root->group_rtindex),
5468 NULL);
5469 }
5470 add_column_to_pathtarget(input_target, expr, sgref);
5471 }
5472 else
5473 {
5474 /*
5475 * Non-grouping column, so just remember the expression for later
5476 * call to pull_var_clause.
5477 */
5478 non_group_cols = lappend(non_group_cols, expr);
5479 }
5480
5481 i++;
5482 }
5483
5484 /*
5485 * If there's a HAVING clause, we'll need the Vars it uses, too.
5486 */
5487 if (parse->havingQual)
5488 non_group_cols = lappend(non_group_cols, parse->havingQual);
5489
5490 /*
5491 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5492 * add them to the input target if not already present. (A Var used
5493 * directly as a GROUP BY item will be present already.) Note this
5494 * includes Vars used in resjunk items, so we are covering the needs of
5495 * ORDER BY and window specifications. Vars used within Aggrefs and
5496 * WindowFuncs will be pulled out here, too.
5497 *
5498 * Note that the target is logically below the grouping step. So with
5499 * grouping sets we need to remove the RT index of the grouping step if
5500 * there is any from the non-group Vars.
5501 */
5502 non_group_vars = pull_var_clause((Node *) non_group_cols,
5506 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5507 {
5508 Assert(root->group_rtindex > 0);
5509 non_group_vars = (List *)
5510 remove_nulling_relids((Node *) non_group_vars,
5511 bms_make_singleton(root->group_rtindex),
5512 NULL);
5513 }
5514 add_new_columns_to_pathtarget(input_target, non_group_vars);
5515
5516 /* clean up cruft */
5517 list_free(non_group_vars);
5518 list_free(non_group_cols);
5519
5520 /* XXX this causes some redundant cost calculation ... */
5521 return set_pathtarget_cost_width(root, input_target);
5522}
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6352
void list_free(List *list)
Definition: list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:193
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:195
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:196
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1599
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert(), bms_make_singleton(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, remove_nulling_relids(), root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3794 of file planner.c.

3797{
3798 RelOptInfo *grouped_rel;
3799
3800 if (IS_OTHER_REL(input_rel))
3801 {
3803 input_rel->relids);
3804 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3805 }
3806 else
3807 {
3808 /*
3809 * By tradition, the relids set for the main grouping relation is
3810 * NULL. (This could be changed, but might require adjustments
3811 * elsewhere.)
3812 */
3813 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3814 }
3815
3816 /* Set target. */
3817 grouped_rel->reltarget = target;
3818
3819 /*
3820 * If the input relation is not parallel-safe, then the grouped relation
3821 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3822 * target list and HAVING quals are parallel-safe.
3823 */
3824 if (input_rel->consider_parallel && target_parallel_safe &&
3825 is_parallel_safe(root, (Node *) havingQual))
3826 grouped_rel->consider_parallel = true;
3827
3828 /*
3829 * If the input rel belongs to a single FDW, so does the grouped rel.
3830 */
3831 grouped_rel->serverid = input_rel->serverid;
3832 grouped_rel->userid = input_rel->userid;
3833 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3834 grouped_rel->fdwroutine = input_rel->fdwroutine;
3835
3836 return grouped_rel;
3837}
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:859

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path * make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys,
double  limit_tuples 
)
static

Definition at line 7511 of file planner.c.

7513{
7514 bool is_sorted;
7515 int presorted_keys;
7516
7517 is_sorted = pathkeys_count_contained_in(pathkeys,
7518 path->pathkeys,
7519 &presorted_keys);
7520
7521 if (!is_sorted)
7522 {
7523 /*
7524 * Try at least sorting the cheapest path and also try incrementally
7525 * sorting any path which is partially sorted already (no need to deal
7526 * with paths which have presorted keys when incremental sort is
7527 * disabled unless it's the cheapest input path).
7528 */
7529 if (path != cheapest_path &&
7530 (presorted_keys == 0 || !enable_incremental_sort))
7531 return NULL;
7532
7533 /*
7534 * We've no need to consider both a sort and incremental sort. We'll
7535 * just do a sort if there are no presorted keys and an incremental
7536 * sort when there are presorted keys.
7537 */
7538 if (presorted_keys == 0 || !enable_incremental_sort)
7539 path = (Path *) create_sort_path(root,
7540 rel,
7541 path,
7542 pathkeys,
7543 limit_tuples);
7544 else
7546 rel,
7547 path,
7548 pathkeys,
7549 presorted_keys,
7550 limit_tuples);
7551 }
7552
7553 return path;
7554}

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5541 of file planner.c.

5544{
5545 PathTarget *partial_target;
5546 List *non_group_cols;
5547 List *non_group_exprs;
5548 int i;
5549 ListCell *lc;
5550
5551 partial_target = create_empty_pathtarget();
5552 non_group_cols = NIL;
5553
5554 i = 0;
5555 foreach(lc, grouping_target->exprs)
5556 {
5557 Expr *expr = (Expr *) lfirst(lc);
5558 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5559
5560 if (sgref && root->processed_groupClause &&
5562 root->processed_groupClause) != NULL)
5563 {
5564 /*
5565 * It's a grouping column, so add it to the partial_target as-is.
5566 * (This allows the upper agg step to repeat the grouping calcs.)
5567 */
5568 add_column_to_pathtarget(partial_target, expr, sgref);
5569 }
5570 else
5571 {
5572 /*
5573 * Non-grouping column, so just remember the expression for later
5574 * call to pull_var_clause.
5575 */
5576 non_group_cols = lappend(non_group_cols, expr);
5577 }
5578
5579 i++;
5580 }
5581
5582 /*
5583 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5584 */
5585 if (havingQual)
5586 non_group_cols = lappend(non_group_cols, havingQual);
5587
5588 /*
5589 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5590 * non-group cols (plus HAVING), and add them to the partial_target if not
5591 * already present. (An expression used directly as a GROUP BY item will
5592 * be present already.) Note this includes Vars used in resjunk items, so
5593 * we are covering the needs of ORDER BY and window specifications.
5594 */
5595 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5599
5600 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5601
5602 /*
5603 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5604 * are at the top level of the target list, so we can just scan the list
5605 * rather than recursing through the expression trees.
5606 */
5607 foreach(lc, partial_target->exprs)
5608 {
5609 Aggref *aggref = (Aggref *) lfirst(lc);
5610
5611 if (IsA(aggref, Aggref))
5612 {
5613 Aggref *newaggref;
5614
5615 /*
5616 * We shouldn't need to copy the substructure of the Aggref node,
5617 * but flat-copy the node itself to avoid damaging other trees.
5618 */
5619 newaggref = makeNode(Aggref);
5620 memcpy(newaggref, aggref, sizeof(Aggref));
5621
5622 /* For now, assume serialization is required */
5624
5625 lfirst(lc) = newaggref;
5626 }
5627 }
5628
5629 /* clean up cruft */
5630 list_free(non_group_exprs);
5631 list_free(non_group_cols);
5632
5633 /* XXX this causes some redundant cost calculation ... */
5634 return set_pathtarget_cost_width(root, partial_target);
5635}
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:192
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5644

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6179 of file planner.c.

6181{
6182 List *window_pathkeys = NIL;
6183
6184 /* Throw error if can't sort */
6186 ereport(ERROR,
6187 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6188 errmsg("could not implement window PARTITION BY"),
6189 errdetail("Window partitioning columns must be of sortable datatypes.")));
6191 ereport(ERROR,
6192 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6193 errmsg("could not implement window ORDER BY"),
6194 errdetail("Window ordering columns must be of sortable datatypes.")));
6195
6196 /*
6197 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6198 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6199 */
6200 if (wc->partitionClause != NIL)
6201 {
6202 bool sortable;
6203
6205 &wc->partitionClause,
6206 tlist,
6207 true,
6208 false,
6209 &sortable,
6210 false);
6211
6212 Assert(sortable);
6213 }
6214
6215 /*
6216 * In principle, we could also consider removing redundant ORDER BY items
6217 * too as doing so does not alter the result of peer row checks done by
6218 * the executor. However, we must *not* remove the ordering column for
6219 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6220 * if it's known to be equal to some partitioning column.
6221 */
6222 if (wc->orderClause != NIL)
6223 {
6224 List *orderby_pathkeys;
6225
6226 orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6227 wc->orderClause,
6228 tlist);
6229
6230 /* Okay, make the combined pathkeys */
6231 if (window_pathkeys != NIL)
6232 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6233 else
6234 window_pathkeys = orderby_pathkeys;
6235 }
6236
6237 return window_pathkeys;
6238}
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition: pathkeys.c:1380
List * partitionClause
Definition: parsenodes.h:1557
List * orderClause
Definition: parsenodes.h:1559

References append_pathkeys(), Assert(), ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), NIL, WindowClause::orderClause, WindowClause::partitionClause, and root.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool *  have_postponed_srfs 
)
static

Definition at line 6307 of file planner.c.

6310{
6311 Query *parse = root->parse;
6312 PathTarget *input_target;
6313 int ncols;
6314 bool *col_is_srf;
6315 bool *postpone_col;
6316 bool have_srf;
6317 bool have_volatile;
6318 bool have_expensive;
6319 bool have_srf_sortcols;
6320 bool postpone_srfs;
6321 List *postponable_cols;
6322 List *postponable_vars;
6323 int i;
6324 ListCell *lc;
6325
6326 /* Shouldn't get here unless query has ORDER BY */
6327 Assert(parse->sortClause);
6328
6329 *have_postponed_srfs = false; /* default result */
6330
6331 /* Inspect tlist and collect per-column information */
6332 ncols = list_length(final_target->exprs);
6333 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6334 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6335 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6336
6337 i = 0;
6338 foreach(lc, final_target->exprs)
6339 {
6340 Expr *expr = (Expr *) lfirst(lc);
6341
6342 /*
6343 * If the column has a sortgroupref, assume it has to be evaluated
6344 * before sorting. Generally such columns would be ORDER BY, GROUP
6345 * BY, etc targets. One exception is columns that were removed from
6346 * GROUP BY by remove_useless_groupby_columns() ... but those would
6347 * only be Vars anyway. There don't seem to be any cases where it
6348 * would be worth the trouble to double-check.
6349 */
6350 if (get_pathtarget_sortgroupref(final_target, i) == 0)
6351 {
6352 /*
6353 * Check for SRF or volatile functions. Check the SRF case first
6354 * because we must know whether we have any postponed SRFs.
6355 */
6356 if (parse->hasTargetSRFs &&
6357 expression_returns_set((Node *) expr))
6358 {
6359 /* We'll decide below whether these are postponable */
6360 col_is_srf[i] = true;
6361 have_srf = true;
6362 }
6363 else if (contain_volatile_functions((Node *) expr))
6364 {
6365 /* Unconditionally postpone */
6366 postpone_col[i] = true;
6367 have_volatile = true;
6368 }
6369 else
6370 {
6371 /*
6372 * Else check the cost. XXX it's annoying to have to do this
6373 * when set_pathtarget_cost_width() just did it. Refactor to
6374 * allow sharing the work?
6375 */
6376 QualCost cost;
6377
6378 cost_qual_eval_node(&cost, (Node *) expr, root);
6379
6380 /*
6381 * We arbitrarily define "expensive" as "more than 10X
6382 * cpu_operator_cost". Note this will take in any PL function
6383 * with default cost.
6384 */
6385 if (cost.per_tuple > 10 * cpu_operator_cost)
6386 {
6387 postpone_col[i] = true;
6388 have_expensive = true;
6389 }
6390 }
6391 }
6392 else
6393 {
6394 /* For sortgroupref cols, just check if any contain SRFs */
6395 if (!have_srf_sortcols &&
6396 parse->hasTargetSRFs &&
6397 expression_returns_set((Node *) expr))
6398 have_srf_sortcols = true;
6399 }
6400
6401 i++;
6402 }
6403
6404 /*
6405 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6406 */
6407 postpone_srfs = (have_srf && !have_srf_sortcols);
6408
6409 /*
6410 * If we don't need a post-sort projection, just return final_target.
6411 */
6412 if (!(postpone_srfs || have_volatile ||
6413 (have_expensive &&
6414 (parse->limitCount || root->tuple_fraction > 0))))
6415 return final_target;
6416
6417 /*
6418 * Report whether the post-sort projection will contain set-returning
6419 * functions. This is important because it affects whether the Sort can
6420 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6421 * to return.
6422 */
6423 *have_postponed_srfs = postpone_srfs;
6424
6425 /*
6426 * Construct the sort-input target, taking all non-postponable columns and
6427 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6428 * the postponable ones.
6429 */
6430 input_target = create_empty_pathtarget();
6431 postponable_cols = NIL;
6432
6433 i = 0;
6434 foreach(lc, final_target->exprs)
6435 {
6436 Expr *expr = (Expr *) lfirst(lc);
6437
6438 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6439 postponable_cols = lappend(postponable_cols, expr);
6440 else
6441 add_column_to_pathtarget(input_target, expr,
6442 get_pathtarget_sortgroupref(final_target, i));
6443
6444 i++;
6445 }
6446
6447 /*
6448 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6449 * postponable columns, and add them to the sort-input target if not
6450 * already present. (Some might be there already.) We mustn't
6451 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6452 * would be unable to recompute them.
6453 */
6454 postponable_vars = pull_var_clause((Node *) postponable_cols,
6458 add_new_columns_to_pathtarget(input_target, postponable_vars);
6459
6460 /* clean up cruft */
6461 list_free(postponable_vars);
6462 list_free(postponable_cols);
6463
6464 /* XXX this represents even more redundant cost calculation ... */
6465 return set_pathtarget_cost_width(root, input_target);
6466}
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:539
double cpu_operator_cost
Definition: costsize.c:134
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4767
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:194
Cost per_tuple
Definition: pathnodes.h:48

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert(), contain_volatile_functions(), cost_qual_eval_node(), cpu_operator_cost, create_empty_pathtarget(), expression_returns_set(), PathTarget::exprs, get_pathtarget_sortgroupref, i, lappend(), lfirst, list_free(), list_length(), NIL, palloc0(), parse(), QualCost::per_tuple, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_INCLUDE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_window_input_target()

static PathTarget * make_window_input_target ( PlannerInfo root,
PathTarget final_target,
List activeWindows 
)
static

Definition at line 6059 of file planner.c.

6062{
6063 PathTarget *input_target;
6064 Bitmapset *sgrefs;
6065 List *flattenable_cols;
6066 List *flattenable_vars;
6067 int i;
6068 ListCell *lc;
6069
6070 Assert(root->parse->hasWindowFuncs);
6071
6072 /*
6073 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6074 * into a bitmapset for convenient reference below.
6075 */
6076 sgrefs = NULL;
6077 foreach(lc, activeWindows)
6078 {
6080 ListCell *lc2;
6081
6082 foreach(lc2, wc->partitionClause)
6083 {
6085
6086 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6087 }
6088 foreach(lc2, wc->orderClause)
6089 {
6091
6092 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6093 }
6094 }
6095
6096 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6097 foreach(lc, root->processed_groupClause)
6098 {
6100
6101 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6102 }
6103
6104 /*
6105 * Construct a target containing all the non-flattenable targetlist items,
6106 * and save aside the others for a moment.
6107 */
6108 input_target = create_empty_pathtarget();
6109 flattenable_cols = NIL;
6110
6111 i = 0;
6112 foreach(lc, final_target->exprs)
6113 {
6114 Expr *expr = (Expr *) lfirst(lc);
6115 Index sgref = get_pathtarget_sortgroupref(final_target, i);
6116
6117 /*
6118 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6119 * that such items can't contain window functions, so it's okay to
6120 * compute them below the WindowAgg nodes.)
6121 */
6122 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6123 {
6124 /*
6125 * Don't want to deconstruct this value, so add it to the input
6126 * target as-is.
6127 */
6128 add_column_to_pathtarget(input_target, expr, sgref);
6129 }
6130 else
6131 {
6132 /*
6133 * Column is to be flattened, so just remember the expression for
6134 * later call to pull_var_clause.
6135 */
6136 flattenable_cols = lappend(flattenable_cols, expr);
6137 }
6138
6139 i++;
6140 }
6141
6142 /*
6143 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6144 * add them to the input target if not already present. (Some might be
6145 * there already because they're used directly as window/group clauses.)
6146 *
6147 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6148 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6149 * at higher levels. On the other hand, we should recurse into
6150 * WindowFuncs to make sure their input expressions are available.
6151 */
6152 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6156 add_new_columns_to_pathtarget(input_target, flattenable_vars);
6157
6158 /* clean up cruft */
6159 list_free(flattenable_vars);
6160 list_free(flattenable_cols);
6161
6162 /* XXX this causes some redundant cost calculation ... */
6163 return set_pathtarget_cost_width(root, input_target);
6164}

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert(), bms_add_member(), bms_is_member(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, i, lappend(), lfirst, lfirst_node, list_free(), NIL, WindowClause::orderClause, WindowClause::partitionClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, set_pathtarget_cost_width(), and SortGroupClause::tleSortGroupRef.

Referenced by grouping_planner().

◆ mark_partial_aggref()

void mark_partial_aggref ( Aggref agg,
AggSplit  aggsplit 
)

Definition at line 5644 of file planner.c.

5645{
5646 /* aggtranstype should be computed by this point */
5647 Assert(OidIsValid(agg->aggtranstype));
5648 /* ... but aggsplit should still be as the parser left it */
5649 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5650
5651 /* Mark the Aggref with the intended partial-aggregation mode */
5652 agg->aggsplit = aggsplit;
5653
5654 /*
5655 * Adjust result type if needed. Normally, a partial aggregate returns
5656 * the aggregate's transition type; but if that's INTERNAL and we're
5657 * serializing, it returns BYTEA instead.
5658 */
5659 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5660 {
5661 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5662 agg->aggtype = BYTEAOID;
5663 else
5664 agg->aggtype = agg->aggtranstype;
5665 }
5666}
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:392
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:393

References AGGSPLIT_SIMPLE, Assert(), DO_AGGSPLIT_SERIALIZE, DO_AGGSPLIT_SKIPFINAL, and OidIsValid.

Referenced by convert_combining_aggrefs(), and make_partial_grouping_target().

◆ name_active_windows()

static void name_active_windows ( List activeWindows)
static

Definition at line 5939 of file planner.c.

5940{
5941 int next_n = 1;
5942 char newname[16];
5943 ListCell *lc;
5944
5945 foreach(lc, activeWindows)
5946 {
5948
5949 /* Nothing to do if it has a name already. */
5950 if (wc->name)
5951 continue;
5952
5953 /* Select a name not currently present in the list. */
5954 for (;;)
5955 {
5956 ListCell *lc2;
5957
5958 snprintf(newname, sizeof(newname), "w%d", next_n++);
5959 foreach(lc2, activeWindows)
5960 {
5962
5963 if (wc2->name && strcmp(wc2->name, newname) == 0)
5964 break; /* matched */
5965 }
5966 if (lc2 == NULL)
5967 break; /* reached the end with no match */
5968 }
5969 wc->name = pstrdup(newname);
5970 }
5971}
char * pstrdup(const char *in)
Definition: mcxt.c:1699
#define snprintf
Definition: port.h:239

References lfirst_node, pstrdup(), and snprintf.

Referenced by grouping_planner().

◆ optimize_window_clauses()

static void optimize_window_clauses ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5716 of file planner.c.

5717{
5718 List *windowClause = root->parse->windowClause;
5719 ListCell *lc;
5720
5721 foreach(lc, windowClause)
5722 {
5724 ListCell *lc2;
5725 int optimizedFrameOptions = 0;
5726
5727 Assert(wc->winref <= wflists->maxWinRef);
5728
5729 /* skip any WindowClauses that have no WindowFuncs */
5730 if (wflists->windowFuncs[wc->winref] == NIL)
5731 continue;
5732
5733 foreach(lc2, wflists->windowFuncs[wc->winref])
5734 {
5737 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5738 Oid prosupport;
5739
5740 prosupport = get_func_support(wfunc->winfnoid);
5741
5742 /* Check if there's a support function for 'wfunc' */
5743 if (!OidIsValid(prosupport))
5744 break; /* can't optimize this WindowClause */
5745
5746 req.type = T_SupportRequestOptimizeWindowClause;
5747 req.window_clause = wc;
5748 req.window_func = wfunc;
5749 req.frameOptions = wc->frameOptions;
5750
5751 /* call the support function */
5754 PointerGetDatum(&req)));
5755
5756 /*
5757 * Skip to next WindowClause if the support function does not
5758 * support this request type.
5759 */
5760 if (res == NULL)
5761 break;
5762
5763 /*
5764 * Save these frameOptions for the first WindowFunc for this
5765 * WindowClause.
5766 */
5767 if (foreach_current_index(lc2) == 0)
5768 optimizedFrameOptions = res->frameOptions;
5769
5770 /*
5771 * On subsequent WindowFuncs, if the frameOptions are not the same
5772 * then we're unable to optimize the frameOptions for this
5773 * WindowClause.
5774 */
5775 else if (optimizedFrameOptions != res->frameOptions)
5776 break; /* skip to the next WindowClause, if any */
5777 }
5778
5779 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5780 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5781 {
5782 ListCell *lc3;
5783
5784 /* apply the new frame options */
5785 wc->frameOptions = optimizedFrameOptions;
5786
5787 /*
5788 * We now check to see if changing the frameOptions has caused
5789 * this WindowClause to be a duplicate of some other WindowClause.
5790 * This can only happen if we have multiple WindowClauses, so
5791 * don't bother if there's only 1.
5792 */
5793 if (list_length(windowClause) == 1)
5794 continue;
5795
5796 /*
5797 * Do the duplicate check and reuse the existing WindowClause if
5798 * we find a duplicate.
5799 */
5800 foreach(lc3, windowClause)
5801 {
5802 WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5803
5804 /* skip over the WindowClause we're currently editing */
5805 if (existing_wc == wc)
5806 continue;
5807
5808 /*
5809 * Perform the same duplicate check that is done in
5810 * transformWindowFuncCall.
5811 */
5812 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5813 equal(wc->orderClause, existing_wc->orderClause) &&
5814 wc->frameOptions == existing_wc->frameOptions &&
5815 equal(wc->startOffset, existing_wc->startOffset) &&
5816 equal(wc->endOffset, existing_wc->endOffset))
5817 {
5818 ListCell *lc4;
5819
5820 /*
5821 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5822 * This required adjusting each WindowFunc's winref and
5823 * moving the WindowFuncs in 'wc' to the list of
5824 * WindowFuncs in 'existing_wc'.
5825 */
5826 foreach(lc4, wflists->windowFuncs[wc->winref])
5827 {
5828 WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5829
5830 wfunc->winref = existing_wc->winref;
5831 }
5832
5833 /* move list items */
5834 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5835 wflists->windowFuncs[wc->winref]);
5836 wflists->windowFuncs[wc->winref] = NIL;
5837
5838 /*
5839 * transformWindowFuncCall() should have made sure there
5840 * are no other duplicates, so we needn't bother looking
5841 * any further.
5842 */
5843 break;
5844 }
5845 }
5846 }
5847 }
5848}
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1998
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
struct WindowClause * window_clause
Definition: supportnodes.h:339
Node * startOffset
Definition: parsenodes.h:1561
Node * endOffset
Definition: parsenodes.h:1562
Index maxWinRef
Definition: clauses.h:22
Index winref
Definition: primnodes.h:598
Oid winfnoid
Definition: primnodes.h:584

References Assert(), DatumGetPointer(), WindowClause::endOffset, equal(), foreach_current_index, WindowClause::frameOptions, SupportRequestOptimizeWindowClause::frameOptions, get_func_support(), if(), lfirst_node, list_concat(), list_length(), WindowFuncLists::maxWinRef, NIL, OidFunctionCall1, OidIsValid, WindowClause::orderClause, WindowClause::partitionClause, PointerGetDatum(), root, WindowClause::startOffset, SupportRequestOptimizeWindowClause::type, SupportRequestOptimizeWindowClause::window_clause, SupportRequestOptimizeWindowClause::window_func, WindowFuncLists::windowFuncs, WindowFunc::winfnoid, WindowClause::winref, and WindowFunc::winref.

Referenced by grouping_planner().

◆ plan_cluster_use_sort()

bool plan_cluster_use_sort ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6725 of file planner.c.

6726{
6728 Query *query;
6729 PlannerGlobal *glob;
6730 RangeTblEntry *rte;
6731 RelOptInfo *rel;
6732 IndexOptInfo *indexInfo;
6733 QualCost indexExprCost;
6734 Cost comparisonCost;
6735 Path *seqScanPath;
6736 Path seqScanAndSortPath;
6737 IndexPath *indexScanPath;
6738 ListCell *lc;
6739
6740 /* We can short-circuit the cost comparison if indexscans are disabled */
6741 if (!enable_indexscan)
6742 return true; /* use sort */
6743
6744 /* Set up mostly-dummy planner state */
6745 query = makeNode(Query);
6746 query->commandType = CMD_SELECT;
6747
6748 glob = makeNode(PlannerGlobal);
6749
6751 root->parse = query;
6752 root->glob = glob;
6753 root->query_level = 1;
6754 root->planner_cxt = CurrentMemoryContext;
6755 root->wt_param_id = -1;
6756 root->join_domains = list_make1(makeNode(JoinDomain));
6757
6758 /* Build a minimal RTE for the rel */
6759 rte = makeNode(RangeTblEntry);
6760 rte->rtekind = RTE_RELATION;
6761 rte->relid = tableOid;
6762 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6763 rte->rellockmode = AccessShareLock;
6764 rte->lateral = false;
6765 rte->inh = false;
6766 rte->inFromCl = true;
6767 query->rtable = list_make1(rte);
6768 addRTEPermissionInfo(&query->rteperminfos, rte);
6769
6770 /* Set up RTE/RelOptInfo arrays */
6772
6773 /* Build RelOptInfo */
6774 rel = build_simple_rel(root, 1, NULL);
6775
6776 /* Locate IndexOptInfo for the target index */
6777 indexInfo = NULL;
6778 foreach(lc, rel->indexlist)
6779 {
6780 indexInfo = lfirst_node(IndexOptInfo, lc);
6781 if (indexInfo->indexoid == indexOid)
6782 break;
6783 }
6784
6785 /*
6786 * It's possible that get_relation_info did not generate an IndexOptInfo
6787 * for the desired index; this could happen if it's not yet reached its
6788 * indcheckxmin usability horizon, or if it's a system index and we're
6789 * ignoring system indexes. In such cases we should tell CLUSTER to not
6790 * trust the index contents but use seqscan-and-sort.
6791 */
6792 if (lc == NULL) /* not in the list? */
6793 return true; /* use sort */
6794
6795 /*
6796 * Rather than doing all the pushups that would be needed to use
6797 * set_baserel_size_estimates, just do a quick hack for rows and width.
6798 */
6799 rel->rows = rel->tuples;
6800 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6801
6802 root->total_table_pages = rel->pages;
6803
6804 /*
6805 * Determine eval cost of the index expressions, if any. We need to
6806 * charge twice that amount for each tuple comparison that happens during
6807 * the sort, since tuplesort.c will have to re-evaluate the index
6808 * expressions each time. (XXX that's pretty inefficient...)
6809 */
6810 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6811 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6812
6813 /* Estimate the cost of seq scan + sort */
6814 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6815 cost_sort(&seqScanAndSortPath, root, NIL,
6816 seqScanPath->disabled_nodes,
6817 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6818 comparisonCost, maintenance_work_mem, -1.0);
6819
6820 /* Estimate the cost of index scan */
6821 indexScanPath = create_index_path(root, indexInfo,
6822 NIL, NIL, NIL, NIL,
6823 ForwardScanDirection, false,
6824 NULL, 1.0, false);
6825
6826 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6827}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4741
bool enable_indexscan
Definition: costsize.c:146
int maintenance_work_mem
Definition: globals.c:133
#define AccessShareLock
Definition: lockdefs.h:36
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
double Cost
Definition: nodes.h:257
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
@ RTE_RELATION
Definition: parsenodes.h:1026
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1049
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1236
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:94
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:192
@ ForwardScanDirection
Definition: sdir.h:28
Path path
Definition: pathnodes.h:1760
int disabled_nodes
Definition: pathnodes.h:1711
Cost total_cost
Definition: pathnodes.h:1713
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:170
CmdType commandType
Definition: parsenodes.h:121
RTEKind rtekind
Definition: parsenodes.h:1061
Cardinality tuples
Definition: pathnodes.h:976
BlockNumber pages
Definition: pathnodes.h:975
List * indexlist
Definition: pathnodes.h:971
Cardinality rows
Definition: pathnodes.h:904

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, cost_qual_eval(), cost_sort(), create_index_path(), create_seqscan_path(), CurrentMemoryContext, Path::disabled_nodes, enable_indexscan, ForwardScanDirection, get_relation_data_width(), RelOptInfo::indexlist, IndexOptInfo::indexoid, RangeTblEntry::inh, lfirst_node, list_make1, maintenance_work_mem, makeNode, NIL, RelOptInfo::pages, IndexPath::path, QualCost::per_tuple, RelOptInfo::reltarget, root, RelOptInfo::rows, Query::rtable, RTE_RELATION, RangeTblEntry::rtekind, setup_simple_rel_arrays(), QualCost::startup, Path::total_cost, RelOptInfo::tuples, and PathTarget::width.

Referenced by copy_table_data().

◆ plan_create_index_workers()

int plan_create_index_workers ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6847 of file planner.c.

6848{
6850 Query *query;
6851 PlannerGlobal *glob;
6852 RangeTblEntry *rte;
6853 Relation heap;
6855 RelOptInfo *rel;
6856 int parallel_workers;
6857 BlockNumber heap_blocks;
6858 double reltuples;
6859 double allvisfrac;
6860
6861 /*
6862 * We don't allow performing parallel operation in standalone backend or
6863 * when parallelism is disabled.
6864 */
6866 return 0;
6867
6868 /* Set up largely-dummy planner state */
6869 query = makeNode(Query);
6870 query->commandType = CMD_SELECT;
6871
6872 glob = makeNode(PlannerGlobal);
6873
6875 root->parse = query;
6876 root->glob = glob;
6877 root->query_level = 1;
6878 root->planner_cxt = CurrentMemoryContext;
6879 root->wt_param_id = -1;
6880 root->join_domains = list_make1(makeNode(JoinDomain));
6881
6882 /*
6883 * Build a minimal RTE.
6884 *
6885 * Mark the RTE with inh = true. This is a kludge to prevent
6886 * get_relation_info() from fetching index info, which is necessary
6887 * because it does not expect that any IndexOptInfo is currently
6888 * undergoing REINDEX.
6889 */
6890 rte = makeNode(RangeTblEntry);
6891 rte->rtekind = RTE_RELATION;
6892 rte->relid = tableOid;
6893 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6894 rte->rellockmode = AccessShareLock;
6895 rte->lateral = false;
6896 rte->inh = true;
6897 rte->inFromCl = true;
6898 query->rtable = list_make1(rte);
6899 addRTEPermissionInfo(&query->rteperminfos, rte);
6900
6901 /* Set up RTE/RelOptInfo arrays */
6903
6904 /* Build RelOptInfo */
6905 rel = build_simple_rel(root, 1, NULL);
6906
6907 /* Rels are assumed already locked by the caller */
6908 heap = table_open(tableOid, NoLock);
6909 index = index_open(indexOid, NoLock);
6910
6911 /*
6912 * Determine if it's safe to proceed.
6913 *
6914 * Currently, parallel workers can't access the leader's temporary tables.
6915 * Furthermore, any index predicate or index expressions must be parallel
6916 * safe.
6917 */
6918 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6921 {
6922 parallel_workers = 0;
6923 goto done;
6924 }
6925
6926 /*
6927 * If parallel_workers storage parameter is set for the table, accept that
6928 * as the number of parallel worker processes to launch (though still cap
6929 * at max_parallel_maintenance_workers). Note that we deliberately do not
6930 * consider any other factor when parallel_workers is set. (e.g., memory
6931 * use by workers.)
6932 */
6933 if (rel->rel_parallel_workers != -1)
6934 {
6935 parallel_workers = Min(rel->rel_parallel_workers,
6937 goto done;
6938 }
6939
6940 /*
6941 * Estimate heap relation size ourselves, since rel->pages cannot be
6942 * trusted (heap RTE was marked as inheritance parent)
6943 */
6944 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6945
6946 /*
6947 * Determine number of workers to scan the heap relation using generic
6948 * model
6949 */
6950 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6952
6953 /*
6954 * Cap workers based on available maintenance_work_mem as needed.
6955 *
6956 * Note that each tuplesort participant receives an even share of the
6957 * total maintenance_work_mem budget. Aim to leave participants
6958 * (including the leader as a participant) with no less than 32MB of
6959 * memory. This leaves cases where maintenance_work_mem is set to 64MB
6960 * immediately past the threshold of being capable of launching a single
6961 * parallel worker to sort.
6962 */
6963 while (parallel_workers > 0 &&
6964 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
6965 parallel_workers--;
6966
6967done:
6969 table_close(heap, NoLock);
6970
6971 return parallel_workers;
6972}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4234
uint32 BlockNumber
Definition: block.h:31
int max_parallel_maintenance_workers
Definition: globals.c:134
bool IsUnderPostmaster
Definition: globals.c:120
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define NoLock
Definition: lockdefs.h:34
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1069
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5193
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5080
int rel_parallel_workers
Definition: pathnodes.h:983
Form_pg_class rd_rel
Definition: rel.h:111
Definition: type.h:96
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, compute_parallel_worker(), CurrentMemoryContext, estimate_rel_size(), index_close(), index_open(), RangeTblEntry::inh, is_parallel_safe(), IsUnderPostmaster, list_make1, maintenance_work_mem, makeNode, max_parallel_maintenance_workers, Min, NoLock, RelationData::rd_rel, RelOptInfo::rel_parallel_workers, RelationGetIndexExpressions(), RelationGetIndexPredicate(), root, Query::rtable, RTE_RELATION, RangeTblEntry::rtekind, setup_simple_rel_arrays(), table_close(), and table_open().

Referenced by index_build().

◆ planner()

PlannedStmt * planner ( Query parse,
const char *  query_string,
int  cursorOptions,
ParamListInfo  boundParams 
)

Definition at line 286 of file planner.c.

288{
289 PlannedStmt *result;
290
291 if (planner_hook)
292 result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
293 else
294 result = standard_planner(parse, query_string, cursorOptions, boundParams);
295
296 pgstat_report_plan_id(result->planId, false);
297
298 return result;
299}
void pgstat_report_plan_id(uint64 plan_id, bool force)
planner_hook_type planner_hook
Definition: planner.c:73
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:302

References parse(), pgstat_report_plan_id(), PlannedStmt::planId, planner_hook, and standard_planner().

Referenced by pg_plan_query().

◆ postprocess_setop_tlist()

static List * postprocess_setop_tlist ( List new_tlist,
List orig_tlist 
)
static

Definition at line 5679 of file planner.c.

5680{
5681 ListCell *l;
5682 ListCell *orig_tlist_item = list_head(orig_tlist);
5683
5684 foreach(l, new_tlist)
5685 {
5686 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5687 TargetEntry *orig_tle;
5688
5689 /* ignore resjunk columns in setop result */
5690 if (new_tle->resjunk)
5691 continue;
5692
5693 Assert(orig_tlist_item != NULL);
5694 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5695 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5696 if (orig_tle->resjunk) /* should not happen */
5697 elog(ERROR, "resjunk output columns are not implemented");
5698 Assert(new_tle->resno == orig_tle->resno);
5699 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5700 }
5701 if (orig_tlist_item != NULL)
5702 elog(ERROR, "resjunk output columns are not implemented");
5703 return new_tlist;
5704}
#define elog(elevel,...)
Definition: elog.h:226
AttrNumber resno
Definition: primnodes.h:2221
Index ressortgroupref
Definition: primnodes.h:2225

References Assert(), elog, ERROR, lfirst_node, list_head(), lnext(), TargetEntry::resno, and TargetEntry::ressortgroupref.

Referenced by grouping_planner().

◆ preprocess_expression()

static Node * preprocess_expression ( PlannerInfo root,
Node expr,
int  kind 
)
static

Definition at line 1202 of file planner.c.

1203{
1204 /*
1205 * Fall out quickly if expression is empty. This occurs often enough to
1206 * be worth checking. Note that null->null is the correct conversion for
1207 * implicit-AND result format, too.
1208 */
1209 if (expr == NULL)
1210 return NULL;
1211
1212 /*
1213 * If the query has any join RTEs, replace join alias variables with
1214 * base-relation variables. We must do this first, since any expressions
1215 * we may extract from the joinaliasvars lists have not been preprocessed.
1216 * For example, if we did this after sublink processing, sublinks expanded
1217 * out from join aliases would not get processed. But we can skip this in
1218 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1219 * they can't contain any Vars of the current query level.
1220 */
1221 if (root->hasJoinRTEs &&
1222 !(kind == EXPRKIND_RTFUNC ||
1223 kind == EXPRKIND_VALUES ||
1224 kind == EXPRKIND_TABLESAMPLE ||
1225 kind == EXPRKIND_TABLEFUNC))
1226 expr = flatten_join_alias_vars(root, root->parse, expr);
1227
1228 /*
1229 * Simplify constant expressions. For function RTEs, this was already
1230 * done by preprocess_function_rtes. (But note we must do it again for
1231 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1232 * un-simplified subexpressions inserted by flattening of subqueries or
1233 * join alias variables.)
1234 *
1235 * Note: an essential effect of this is to convert named-argument function
1236 * calls to positional notation and insert the current actual values of
1237 * any default arguments for functions. To ensure that happens, we *must*
1238 * process all expressions here. Previous PG versions sometimes skipped
1239 * const-simplification if it didn't seem worth the trouble, but we can't
1240 * do that anymore.
1241 *
1242 * Note: this also flattens nested AND and OR expressions into N-argument
1243 * form. All processing of a qual expression after this point must be
1244 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1245 * with AND directly under AND, nor OR directly under OR.
1246 */
1247 if (kind != EXPRKIND_RTFUNC)
1248 expr = eval_const_expressions(root, expr);
1249
1250 /*
1251 * If it's a qual or havingQual, canonicalize it.
1252 */
1253 if (kind == EXPRKIND_QUAL)
1254 {
1255 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1256
1257#ifdef OPTIMIZER_DEBUG
1258 printf("After canonicalize_qual()\n");
1259 pprint(expr);
1260#endif
1261 }
1262
1263 /*
1264 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1265 * hashfuncid of any that might execute more quickly by using hash lookups
1266 * instead of a linear search.
1267 */
1268 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1269 {
1271 }
1272
1273 /* Expand SubLinks to SubPlans */
1274 if (root->parse->hasSubLinks)
1275 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1276
1277 /*
1278 * XXX do not insert anything here unless you have grokked the comments in
1279 * SS_replace_correlation_vars ...
1280 */
1281
1282 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1283 if (root->query_level > 1)
1284 expr = SS_replace_correlation_vars(root, expr);
1285
1286 /*
1287 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1288 * don't want to do this before eval_const_expressions, since the latter
1289 * would be unable to simplify a top-level AND correctly. Also,
1290 * SS_process_sublinks expects explicit-AND format.)
1291 */
1292 if (kind == EXPRKIND_QUAL)
1293 expr = (Node *) make_ands_implicit((Expr *) expr);
1294
1295 return expr;
1296}
void pprint(const void *obj)
Definition: print.c:54
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2289
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:810
#define EXPRKIND_TARGET
Definition: planner.c:81
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:89
#define EXPRKIND_VALUES
Definition: planner.c:84
#define EXPRKIND_QUAL
Definition: planner.c:80
#define EXPRKIND_TABLEFUNC
Definition: planner.c:91
#define EXPRKIND_RTFUNC
Definition: planner.c:82
#define printf(...)
Definition: port.h:245
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:2026
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:1971
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:789

References canonicalize_qual(), convert_saop_to_hashed_saop(), eval_const_expressions(), EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_TABLEFUNC, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, flatten_join_alias_vars(), make_ands_implicit(), pprint(), printf, root, SS_process_sublinks(), and SS_replace_correlation_vars().

Referenced by preprocess_phv_expression(), preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_groupclause()

static List * preprocess_groupclause ( PlannerInfo root,
List force 
)
static

Definition at line 2772 of file planner.c.

2773{
2774 Query *parse = root->parse;
2775 List *new_groupclause = NIL;
2776 ListCell *sl;
2777 ListCell *gl;
2778
2779 /* For grouping sets, we need to force the ordering */
2780 if (force)
2781 {
2782 foreach(sl, force)
2783 {
2784 Index ref = lfirst_int(sl);
2785 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2786
2787 new_groupclause = lappend(new_groupclause, cl);
2788 }
2789
2790 return new_groupclause;
2791 }
2792
2793 /* If no ORDER BY, nothing useful to do here */
2794 if (parse->sortClause == NIL)
2795 return list_copy(parse->groupClause);
2796
2797 /*
2798 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2799 * items, but only as far as we can make a matching prefix.
2800 *
2801 * This code assumes that the sortClause contains no duplicate items.
2802 */
2803 foreach(sl, parse->sortClause)
2804 {
2806
2807 foreach(gl, parse->groupClause)
2808 {
2810
2811 if (equal(gc, sc))
2812 {
2813 new_groupclause = lappend(new_groupclause, gc);
2814 break;
2815 }
2816 }
2817 if (gl == NULL)
2818 break; /* no match, so stop scanning */
2819 }
2820
2821
2822 /* If no match at all, no point in reordering GROUP BY */
2823 if (new_groupclause == NIL)
2824 return list_copy(parse->groupClause);
2825
2826 /*
2827 * Add any remaining GROUP BY items to the new list. We don't require a
2828 * complete match, because even partial match allows ORDER BY to be
2829 * implemented using incremental sort. Also, give up if there are any
2830 * non-sortable GROUP BY items, since then there's no hope anyway.
2831 */
2832 foreach(gl, parse->groupClause)
2833 {
2835
2836 if (list_member_ptr(new_groupclause, gc))
2837 continue; /* it matched an ORDER BY item */
2838 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2839 return list_copy(parse->groupClause);
2840 new_groupclause = lappend(new_groupclause, gc);
2841 }
2842
2843 /* Success --- install the rearranged GROUP BY list */
2844 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2845 return new_groupclause;
2846}
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:422

References Assert(), equal(), get_sortgroupref_clause(), lappend(), lfirst_int, lfirst_node, list_copy(), list_length(), list_member_ptr(), NIL, OidIsValid, parse(), root, and SortGroupClause::sortop.

Referenced by consider_groupingsets_paths(), grouping_planner(), and preprocess_grouping_sets().

◆ preprocess_grouping_sets()

static grouping_sets_data * preprocess_grouping_sets ( PlannerInfo root)
static

Definition at line 2125 of file planner.c.

2126{
2127 Query *parse = root->parse;
2128 List *sets;
2129 int maxref = 0;
2130 ListCell *lc_set;
2132
2133 parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2134
2135 gd->any_hashable = false;
2136 gd->unhashable_refs = NULL;
2137 gd->unsortable_refs = NULL;
2138 gd->unsortable_sets = NIL;
2139
2140 /*
2141 * We don't currently make any attempt to optimize the groupClause when
2142 * there are grouping sets, so just duplicate it in processed_groupClause.
2143 */
2144 root->processed_groupClause = parse->groupClause;
2145
2146 if (parse->groupClause)
2147 {
2148 ListCell *lc;
2149
2150 foreach(lc, parse->groupClause)
2151 {
2153 Index ref = gc->tleSortGroupRef;
2154
2155 if (ref > maxref)
2156 maxref = ref;
2157
2158 if (!gc->hashable)
2160
2161 if (!OidIsValid(gc->sortop))
2163 }
2164 }
2165
2166 /* Allocate workspace array for remapping */
2167 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2168
2169 /*
2170 * If we have any unsortable sets, we must extract them before trying to
2171 * prepare rollups. Unsortable sets don't go through
2172 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2173 * here.
2174 */
2175 if (!bms_is_empty(gd->unsortable_refs))
2176 {
2177 List *sortable_sets = NIL;
2178 ListCell *lc;
2179
2180 foreach(lc, parse->groupingSets)
2181 {
2182 List *gset = (List *) lfirst(lc);
2183
2184 if (bms_overlap_list(gd->unsortable_refs, gset))
2185 {
2187
2188 gs->set = gset;
2190
2191 /*
2192 * We must enforce here that an unsortable set is hashable;
2193 * later code assumes this. Parse analysis only checks that
2194 * every individual column is either hashable or sortable.
2195 *
2196 * Note that passing this test doesn't guarantee we can
2197 * generate a plan; there might be other showstoppers.
2198 */
2199 if (bms_overlap_list(gd->unhashable_refs, gset))
2200 ereport(ERROR,
2201 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2202 errmsg("could not implement GROUP BY"),
2203 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2204 }
2205 else
2206 sortable_sets = lappend(sortable_sets, gset);
2207 }
2208
2209 if (sortable_sets)
2210 sets = extract_rollup_sets(sortable_sets);
2211 else
2212 sets = NIL;
2213 }
2214 else
2215 sets = extract_rollup_sets(parse->groupingSets);
2216
2217 foreach(lc_set, sets)
2218 {
2219 List *current_sets = (List *) lfirst(lc_set);
2220 RollupData *rollup = makeNode(RollupData);
2221 GroupingSetData *gs;
2222
2223 /*
2224 * Reorder the current list of grouping sets into correct prefix
2225 * order. If only one aggregation pass is needed, try to make the
2226 * list match the ORDER BY clause; if more than one pass is needed, we
2227 * don't bother with that.
2228 *
2229 * Note that this reorders the sets from smallest-member-first to
2230 * largest-member-first, and applies the GroupingSetData annotations,
2231 * though the data will be filled in later.
2232 */
2233 current_sets = reorder_grouping_sets(current_sets,
2234 (list_length(sets) == 1
2235 ? parse->sortClause
2236 : NIL));
2237
2238 /*
2239 * Get the initial (and therefore largest) grouping set.
2240 */
2241 gs = linitial_node(GroupingSetData, current_sets);
2242
2243 /*
2244 * Order the groupClause appropriately. If the first grouping set is
2245 * empty, then the groupClause must also be empty; otherwise we have
2246 * to force the groupClause to match that grouping set's order.
2247 *
2248 * (The first grouping set can be empty even though parse->groupClause
2249 * is not empty only if all non-empty grouping sets are unsortable.
2250 * The groupClauses for hashed grouping sets are built later on.)
2251 */
2252 if (gs->set)
2254 else
2255 rollup->groupClause = NIL;
2256
2257 /*
2258 * Is it hashable? We pretend empty sets are hashable even though we
2259 * actually force them not to be hashed later. But don't bother if
2260 * there's nothing but empty sets (since in that case we can't hash
2261 * anything).
2262 */
2263 if (gs->set &&
2265 {
2266 rollup->hashable = true;
2267 gd->any_hashable = true;
2268 }
2269
2270 /*
2271 * Now that we've pinned down an order for the groupClause for this
2272 * list of grouping sets, we need to remap the entries in the grouping
2273 * sets from sortgrouprefs to plain indices (0-based) into the
2274 * groupClause for this collection of grouping sets. We keep the
2275 * original form for later use, though.
2276 */
2277 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2278 current_sets,
2280 rollup->gsets_data = current_sets;
2281
2282 gd->rollups = lappend(gd->rollups, rollup);
2283 }
2284
2285 if (gd->unsortable_sets)
2286 {
2287 /*
2288 * We have not yet pinned down a groupclause for this, but we will
2289 * need index-based lists for estimation purposes. Construct
2290 * hash_sets_idx based on the entire original groupclause for now.
2291 */
2292 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2293 gd->unsortable_sets,
2295 gd->any_hashable = true;
2296 }
2297
2298 return gd;
2299}
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:608
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1894
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3080
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2868
Bitmapset * unhashable_refs
Definition: planner.c:105
Bitmapset * unsortable_refs
Definition: planner.c:104

References grouping_sets_data::any_hashable, bms_add_member(), bms_is_empty, bms_overlap_list(), ereport, errcode(), errdetail(), errmsg(), ERROR, expand_grouping_sets(), extract_rollup_sets(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, RollupData::hashable, lappend(), lfirst, lfirst_node, linitial_node, list_length(), makeNode, NIL, OidIsValid, palloc(), palloc0(), parse(), preprocess_groupclause(), remap_to_groupclause_idx(), reorder_grouping_sets(), grouping_sets_data::rollups, root, GroupingSetData::set, SortGroupClause::sortop, grouping_sets_data::tleref_to_colnum_map, SortGroupClause::tleSortGroupRef, grouping_sets_data::unhashable_refs, grouping_sets_data::unsortable_refs, and grouping_sets_data::unsortable_sets.

Referenced by grouping_planner().

◆ preprocess_limit()

static double preprocess_limit ( PlannerInfo root,
double  tuple_fraction,
int64 offset_est,
int64 count_est 
)
static

Definition at line 2521 of file planner.c.

2523{
2524 Query *parse = root->parse;
2525 Node *est;
2526 double limit_fraction;
2527
2528 /* Should not be called unless LIMIT or OFFSET */
2529 Assert(parse->limitCount || parse->limitOffset);
2530
2531 /*
2532 * Try to obtain the clause values. We use estimate_expression_value
2533 * primarily because it can sometimes do something useful with Params.
2534 */
2535 if (parse->limitCount)
2536 {
2537 est = estimate_expression_value(root, parse->limitCount);
2538 if (est && IsA(est, Const))
2539 {
2540 if (((Const *) est)->constisnull)
2541 {
2542 /* NULL indicates LIMIT ALL, ie, no limit */
2543 *count_est = 0; /* treat as not present */
2544 }
2545 else
2546 {
2547 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2548 if (*count_est <= 0)
2549 *count_est = 1; /* force to at least 1 */
2550 }
2551 }
2552 else
2553 *count_est = -1; /* can't estimate */
2554 }
2555 else
2556 *count_est = 0; /* not present */
2557
2558 if (parse->limitOffset)
2559 {
2560 est = estimate_expression_value(root, parse->limitOffset);
2561 if (est && IsA(est, Const))
2562 {
2563 if (((Const *) est)->constisnull)
2564 {
2565 /* Treat NULL as no offset; the executor will too */
2566 *offset_est = 0; /* treat as not present */
2567 }
2568 else
2569 {
2570 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2571 if (*offset_est < 0)
2572 *offset_est = 0; /* treat as not present */
2573 }
2574 }
2575 else
2576 *offset_est = -1; /* can't estimate */
2577 }
2578 else
2579 *offset_est = 0; /* not present */
2580
2581 if (*count_est != 0)
2582 {
2583 /*
2584 * A LIMIT clause limits the absolute number of tuples returned.
2585 * However, if it's not a constant LIMIT then we have to guess; for
2586 * lack of a better idea, assume 10% of the plan's result is wanted.
2587 */
2588 if (*count_est < 0 || *offset_est < 0)
2589 {
2590 /* LIMIT or OFFSET is an expression ... punt ... */
2591 limit_fraction = 0.10;
2592 }
2593 else
2594 {
2595 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2596 limit_fraction = (double) *count_est + (double) *offset_est;
2597 }
2598
2599 /*
2600 * If we have absolute limits from both caller and LIMIT, use the
2601 * smaller value; likewise if they are both fractional. If one is
2602 * fractional and the other absolute, we can't easily determine which
2603 * is smaller, but we use the heuristic that the absolute will usually
2604 * be smaller.
2605 */
2606 if (tuple_fraction >= 1.0)
2607 {
2608 if (limit_fraction >= 1.0)
2609 {
2610 /* both absolute */
2611 tuple_fraction = Min(tuple_fraction, limit_fraction);
2612 }
2613 else
2614 {
2615 /* caller absolute, limit fractional; use caller's value */
2616 }
2617 }
2618 else if (tuple_fraction > 0.0)
2619 {
2620 if (limit_fraction >= 1.0)
2621 {
2622 /* caller fractional, limit absolute; use limit */
2623 tuple_fraction = limit_fraction;
2624 }
2625 else
2626 {
2627 /* both fractional */
2628 tuple_fraction = Min(tuple_fraction, limit_fraction);
2629 }
2630 }
2631 else
2632 {
2633 /* no info from caller, just use limit */
2634 tuple_fraction = limit_fraction;
2635 }
2636 }
2637 else if (*offset_est != 0 && tuple_fraction > 0.0)
2638 {
2639 /*
2640 * We have an OFFSET but no LIMIT. This acts entirely differently
2641 * from the LIMIT case: here, we need to increase rather than decrease
2642 * the caller's tuple_fraction, because the OFFSET acts to cause more
2643 * tuples to be fetched instead of fewer. This only matters if we got
2644 * a tuple_fraction > 0, however.
2645 *
2646 * As above, use 10% if OFFSET is present but unestimatable.
2647 */
2648 if (*offset_est < 0)
2649 limit_fraction = 0.10;
2650 else
2651 limit_fraction = (double) *offset_est;
2652
2653 /*
2654 * If we have absolute counts from both caller and OFFSET, add them
2655 * together; likewise if they are both fractional. If one is
2656 * fractional and the other absolute, we want to take the larger, and
2657 * we heuristically assume that's the fractional one.
2658 */
2659 if (tuple_fraction >= 1.0)
2660 {
2661 if (limit_fraction >= 1.0)
2662 {
2663 /* both absolute, so add them together */
2664 tuple_fraction += limit_fraction;
2665 }
2666 else
2667 {
2668 /* caller absolute, limit fractional; use limit */
2669 tuple_fraction = limit_fraction;
2670 }
2671 }
2672 else
2673 {
2674 if (limit_fraction >= 1.0)
2675 {
2676 /* caller fractional, limit absolute; use caller's value */
2677 }
2678 else
2679 {
2680 /* both fractional, so add them together */
2681 tuple_fraction += limit_fraction;
2682 if (tuple_fraction >= 1.0)
2683 tuple_fraction = 0.0; /* assume fetch all */
2684 }
2685 }
2686 }
2687
2688 return tuple_fraction;
2689}
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2397

References Assert(), DatumGetInt64(), estimate_expression_value(), IsA, Min, parse(), and root.

Referenced by grouping_planner().

◆ preprocess_phv_expression()

Expr * preprocess_phv_expression ( PlannerInfo root,
Expr expr 
)

Definition at line 1348 of file planner.c.

1349{
1350 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1351}
#define EXPRKIND_PHV
Definition: planner.c:88
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1202

References EXPRKIND_PHV, preprocess_expression(), and root.

Referenced by extract_lateral_references().

◆ preprocess_qual_conditions()

static void preprocess_qual_conditions ( PlannerInfo root,
Node jtnode 
)
static

Definition at line 1304 of file planner.c.

1305{
1306 if (jtnode == NULL)
1307 return;
1308 if (IsA(jtnode, RangeTblRef))
1309 {
1310 /* nothing to do here */
1311 }
1312 else if (IsA(jtnode, FromExpr))
1313 {
1314 FromExpr *f = (FromExpr *) jtnode;
1315 ListCell *l;
1316
1317 foreach(l, f->fromlist)
1319
1321 }
1322 else if (IsA(jtnode, JoinExpr))
1323 {
1324 JoinExpr *j = (JoinExpr *) jtnode;
1325
1328
1329 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1330 }
1331 else
1332 elog(ERROR, "unrecognized node type: %d",
1333 (int) nodeTag(jtnode));
1334}
#define nodeTag(nodeptr)
Definition: nodes.h:139
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1304
Node * quals
Definition: primnodes.h:2338
List * fromlist
Definition: primnodes.h:2337

References elog, ERROR, EXPRKIND_QUAL, FromExpr::fromlist, IsA, j, lfirst, nodeTag, preprocess_expression(), preprocess_qual_conditions(), FromExpr::quals, and root.

Referenced by preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_rowmarks()

static void preprocess_rowmarks ( PlannerInfo root)
static

Definition at line 2343 of file planner.c.

2344{
2345 Query *parse = root->parse;
2346 Bitmapset *rels;
2347 List *prowmarks;
2348 ListCell *l;
2349 int i;
2350
2351 if (parse->rowMarks)
2352 {
2353 /*
2354 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2355 * grouping, since grouping renders a reference to individual tuple
2356 * CTIDs invalid. This is also checked at parse time, but that's
2357 * insufficient because of rule substitution, query pullup, etc.
2358 */
2360 parse->rowMarks)->strength);
2361 }
2362 else
2363 {
2364 /*
2365 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2366 * UPDATE/SHARE.
2367 */
2368 if (parse->commandType != CMD_UPDATE &&
2369 parse->commandType != CMD_DELETE &&
2370 parse->commandType != CMD_MERGE)
2371 return;
2372 }
2373
2374 /*
2375 * We need to have rowmarks for all base relations except the target. We
2376 * make a bitmapset of all base rels and then remove the items we don't
2377 * need or have FOR [KEY] UPDATE/SHARE marks for.
2378 */
2379 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2380 if (parse->resultRelation)
2381 rels = bms_del_member(rels, parse->resultRelation);
2382
2383 /*
2384 * Convert RowMarkClauses to PlanRowMark representation.
2385 */
2386 prowmarks = NIL;
2387 foreach(l, parse->rowMarks)
2388 {
2390 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2391 PlanRowMark *newrc;
2392
2393 /*
2394 * Currently, it is syntactically impossible to have FOR UPDATE et al
2395 * applied to an update/delete target rel. If that ever becomes
2396 * possible, we should drop the target from the PlanRowMark list.
2397 */
2398 Assert(rc->rti != parse->resultRelation);
2399
2400 /*
2401 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2402 * can't support true locking. Subqueries that got flattened into the
2403 * main query should be ignored completely. Any that didn't will get
2404 * ROW_MARK_COPY items in the next loop.
2405 */
2406 if (rte->rtekind != RTE_RELATION)
2407 continue;
2408
2409 rels = bms_del_member(rels, rc->rti);
2410
2411 newrc = makeNode(PlanRowMark);
2412 newrc->rti = newrc->prti = rc->rti;
2413 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2414 newrc->markType = select_rowmark_type(rte, rc->strength);
2415 newrc->allMarkTypes = (1 << newrc->markType);
2416 newrc->strength = rc->strength;
2417 newrc->waitPolicy = rc->waitPolicy;
2418 newrc->isParent = false;
2419
2420 prowmarks = lappend(prowmarks, newrc);
2421 }
2422
2423 /*
2424 * Now, add rowmarks for any non-target, non-locked base relations.
2425 */
2426 i = 0;
2427 foreach(l, parse->rtable)
2428 {
2430 PlanRowMark *newrc;
2431
2432 i++;
2433 if (!bms_is_member(i, rels))
2434 continue;
2435
2436 newrc = makeNode(PlanRowMark);
2437 newrc->rti = newrc->prti = i;
2438 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2439 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2440 newrc->allMarkTypes = (1 << newrc->markType);
2441 newrc->strength = LCS_NONE;
2442 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2443 newrc->isParent = false;
2444
2445 prowmarks = lappend(prowmarks, newrc);
2446 }
2447
2448 root->rowMarks = prowmarks;
2449}
@ LockWaitBlock
Definition: lockoptions.h:39
@ LCS_NONE
Definition: lockoptions.h:23
@ CMD_DELETE
Definition: nodes.h:274
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3471
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2455
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
LockClauseStrength strength
Definition: plannodes.h:1550
Index prti
Definition: plannodes.h:1542
RowMarkType markType
Definition: plannodes.h:1546
LockWaitPolicy waitPolicy
Definition: plannodes.h:1552
bool isParent
Definition: plannodes.h:1554
Index rowmarkId
Definition: plannodes.h:1544
int allMarkTypes
Definition: plannodes.h:1548
LockClauseStrength strength
Definition: parsenodes.h:1594
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1595

References PlanRowMark::allMarkTypes, Assert(), bms_del_member(), bms_is_member(), CheckSelectLocking(), CMD_DELETE, CMD_MERGE, CMD_UPDATE, get_relids_in_jointree(), i, PlanRowMark::isParent, lappend(), LCS_NONE, lfirst_node, linitial_node, LockWaitBlock, makeNode, PlanRowMark::markType, NIL, parse(), PlanRowMark::prti, root, PlanRowMark::rowmarkId, rt_fetch, RTE_RELATION, RangeTblEntry::rtekind, RowMarkClause::rti, PlanRowMark::rti, select_rowmark_type(), RowMarkClause::strength, PlanRowMark::strength, RowMarkClause::waitPolicy, and PlanRowMark::waitPolicy.

Referenced by subquery_planner().

◆ remap_to_groupclause_idx()

static List * remap_to_groupclause_idx ( List groupClause,
List gsets,
int *  tleref_to_colnum_map 
)
static

Definition at line 2306 of file planner.c.

2309{
2310 int ref = 0;
2311 List *result = NIL;
2312 ListCell *lc;
2313
2314 foreach(lc, groupClause)
2315 {
2317
2318 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2319 }
2320
2321 foreach(lc, gsets)
2322 {
2323 List *set = NIL;
2324 ListCell *lc2;
2326
2327 foreach(lc2, gs->set)
2328 {
2329 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2330 }
2331
2332 result = lappend(result, set);
2333 }
2334
2335 return result;
2336}

References lappend(), lappend_int(), lfirst_int, lfirst_node, NIL, GroupingSetData::set, and SortGroupClause::tleSortGroupRef.

Referenced by consider_groupingsets_paths(), and preprocess_grouping_sets().

◆ reorder_grouping_sets()

static List * reorder_grouping_sets ( List groupingSets,
List sortclause 
)
static

Definition at line 3080 of file planner.c.

3081{
3082 ListCell *lc;
3083 List *previous = NIL;
3084 List *result = NIL;
3085
3086 foreach(lc, groupingSets)
3087 {
3088 List *candidate = (List *) lfirst(lc);
3089 List *new_elems = list_difference_int(candidate, previous);
3091
3092 while (list_length(sortclause) > list_length(previous) &&
3093 new_elems != NIL)
3094 {
3095 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3096 int ref = sc->tleSortGroupRef;
3097
3098 if (list_member_int(new_elems, ref))
3099 {
3100 previous = lappend_int(previous, ref);
3101 new_elems = list_delete_int(new_elems, ref);
3102 }
3103 else
3104 {
3105 /* diverged from the sortclause; give up on it */
3106 sortclause = NIL;
3107 break;
3108 }
3109 }
3110
3111 previous = list_concat(previous, new_elems);
3112
3113 gs->set = list_copy(previous);
3114 result = lcons(gs, result);
3115 }
3116
3117 list_free(previous);
3118
3119 return result;
3120}
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
List * list_delete_int(List *list, int datum)
Definition: list.c:891
bool list_member_int(const List *list, int datum)
Definition: list.c:702
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299

References lappend_int(), lcons(), lfirst, list_concat(), list_copy(), list_delete_int(), list_difference_int(), list_free(), list_length(), list_member_int(), list_nth(), makeNode, NIL, GroupingSetData::set, and SortGroupClause::tleSortGroupRef.

Referenced by preprocess_grouping_sets().

◆ select_active_windows()

static List * select_active_windows ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5856 of file planner.c.

5857{
5858 List *windowClause = root->parse->windowClause;
5859 List *result = NIL;
5860 ListCell *lc;
5861 int nActive = 0;
5863 * list_length(windowClause));
5864
5865 /* First, construct an array of the active windows */
5866 foreach(lc, windowClause)
5867 {
5869
5870 /* It's only active if wflists shows some related WindowFuncs */
5871 Assert(wc->winref <= wflists->maxWinRef);
5872 if (wflists->windowFuncs[wc->winref] == NIL)
5873 continue;
5874
5875 actives[nActive].wc = wc; /* original clause */
5876
5877 /*
5878 * For sorting, we want the list of partition keys followed by the
5879 * list of sort keys. But pathkeys construction will remove duplicates
5880 * between the two, so we can as well (even though we can't detect all
5881 * of the duplicates, since some may come from ECs - that might mean
5882 * we miss optimization chances here). We must, however, ensure that
5883 * the order of entries is preserved with respect to the ones we do
5884 * keep.
5885 *
5886 * partitionClause and orderClause had their own duplicates removed in
5887 * parse analysis, so we're only concerned here with removing
5888 * orderClause entries that also appear in partitionClause.
5889 */
5890 actives[nActive].uniqueOrder =
5892 wc->orderClause);
5893 nActive++;
5894 }
5895
5896 /*
5897 * Sort active windows by their partitioning/ordering clauses, ignoring
5898 * any framing clauses, so that the windows that need the same sorting are
5899 * adjacent in the list. When we come to generate paths, this will avoid
5900 * inserting additional Sort nodes.
5901 *
5902 * This is how we implement a specific requirement from the SQL standard,
5903 * which says that when two or more windows are order-equivalent (i.e.
5904 * have matching partition and order clauses, even if their names or
5905 * framing clauses differ), then all peer rows must be presented in the
5906 * same order in all of them. If we allowed multiple sort nodes for such
5907 * cases, we'd risk having the peer rows end up in different orders in
5908 * equivalent windows due to sort instability. (See General Rule 4 of
5909 * <window clause> in SQL2008 - SQL2016.)
5910 *
5911 * Additionally, if the entire list of clauses of one window is a prefix
5912 * of another, put first the window with stronger sorting requirements.
5913 * This way we will first sort for stronger window, and won't have to sort
5914 * again for the weaker one.
5915 */
5916 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5917
5918 /* build ordered list of the original WindowClause nodes */
5919 for (int i = 0; i < nActive; i++)
5920 result = lappend(result, actives[i].wc);
5921
5922 pfree(actives);
5923
5924 return result;
5925}
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:5990
#define qsort(a, b, c, d)
Definition: port.h:479
WindowClause * wc
Definition: planner.c:116

References Assert(), common_prefix_cmp(), i, lappend(), lfirst_node, list_concat_unique(), list_copy(), list_length(), WindowFuncLists::maxWinRef, NIL, WindowClause::orderClause, palloc(), WindowClause::partitionClause, pfree(), qsort, root, WindowClauseSortData::uniqueOrder, WindowClauseSortData::wc, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by grouping_planner().

◆ select_rowmark_type()

RowMarkType select_rowmark_type ( RangeTblEntry rte,
LockClauseStrength  strength 
)

Definition at line 2455 of file planner.c.

2456{
2457 if (rte->rtekind != RTE_RELATION)
2458 {
2459 /* If it's not a table at all, use ROW_MARK_COPY */
2460 return ROW_MARK_COPY;
2461 }
2462 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2463 {
2464 /* Let the FDW select the rowmark type, if it wants to */
2465 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2466
2467 if (fdwroutine->GetForeignRowMarkType != NULL)
2468 return fdwroutine->GetForeignRowMarkType(rte, strength);
2469 /* Otherwise, use ROW_MARK_COPY by default */
2470 return ROW_MARK_COPY;
2471 }
2472 else
2473 {
2474 /* Regular table, apply the appropriate lock type */
2475 switch (strength)
2476 {
2477 case LCS_NONE:
2478
2479 /*
2480 * We don't need a tuple lock, only the ability to re-fetch
2481 * the row.
2482 */
2483 return ROW_MARK_REFERENCE;
2484 break;
2485 case LCS_FORKEYSHARE:
2486 return ROW_MARK_KEYSHARE;
2487 break;
2488 case LCS_FORSHARE:
2489 return ROW_MARK_SHARE;
2490 break;
2491 case LCS_FORNOKEYUPDATE:
2493 break;
2494 case LCS_FORUPDATE:
2495 return ROW_MARK_EXCLUSIVE;
2496 break;
2497 }
2498 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2499 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2500 }
2501}
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:419
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
@ ROW_MARK_COPY
Definition: plannodes.h:1491
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1490
@ ROW_MARK_SHARE
Definition: plannodes.h:1488
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1486
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1487
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1489
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247

References elog, ERROR, GetFdwRoutineByRelId(), FdwRoutine::GetForeignRowMarkType, LCS_FORKEYSHARE, LCS_FORNOKEYUPDATE, LCS_FORSHARE, LCS_FORUPDATE, LCS_NONE, ROW_MARK_COPY, ROW_MARK_EXCLUSIVE, ROW_MARK_KEYSHARE, ROW_MARK_NOKEYEXCLUSIVE, ROW_MARK_REFERENCE, ROW_MARK_SHARE, RTE_RELATION, and RangeTblEntry::rtekind.

Referenced by expand_single_inheritance_child(), and preprocess_rowmarks().

◆ standard_planner()

PlannedStmt * standard_planner ( Query parse,
const char *  query_string,
int  cursorOptions,
ParamListInfo  boundParams 
)

Definition at line 302 of file planner.c.

304{
305 PlannedStmt *result;
306 PlannerGlobal *glob;
307 double tuple_fraction;
309 RelOptInfo *final_rel;
310 Path *best_path;
311 Plan *top_plan;
312 ListCell *lp,
313 *lr;
314
315 /*
316 * Set up global state for this planner invocation. This data is needed
317 * across all levels of sub-Query that might exist in the given command,
318 * so we keep it in a separate struct that's linked to by each per-Query
319 * PlannerInfo.
320 */
321 glob = makeNode(PlannerGlobal);
322
323 glob->boundParams = boundParams;
324 glob->subplans = NIL;
325 glob->subpaths = NIL;
326 glob->subroots = NIL;
327 glob->rewindPlanIDs = NULL;
328 glob->finalrtable = NIL;
329 glob->finalrteperminfos = NIL;
330 glob->finalrowmarks = NIL;
331 glob->resultRelations = NIL;
332 glob->appendRelations = NIL;
333 glob->relationOids = NIL;
334 glob->invalItems = NIL;
335 glob->paramExecTypes = NIL;
336 glob->lastPHId = 0;
337 glob->lastRowMarkId = 0;
338 glob->lastPlanNodeId = 0;
339 glob->transientPlan = false;
340 glob->dependsOnRole = false;
341
342 /*
343 * Assess whether it's feasible to use parallel mode for this query. We
344 * can't do this in a standalone backend, or if the command will try to
345 * modify any data, or if this is a cursor operation, or if GUCs are set
346 * to values that don't permit parallelism, or if parallel-unsafe
347 * functions are present in the query tree.
348 *
349 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
350 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
351 * the command is writing into a completely new table which workers won't
352 * be able to see. If the workers could see the table, the fact that
353 * group locking would cause them to ignore the leader's heavyweight GIN
354 * page locks would make this unsafe. We'll have to fix that somehow if
355 * we want to allow parallel inserts in general; updates and deletes have
356 * additional problems especially around combo CIDs.)
357 *
358 * For now, we don't try to use parallel mode if we're running inside a
359 * parallel worker. We might eventually be able to relax this
360 * restriction, but for now it seems best not to have parallel workers
361 * trying to create their own parallel workers.
362 */
363 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
365 parse->commandType == CMD_SELECT &&
366 !parse->hasModifyingCTE &&
369 {
370 /* all the cheap tests pass, so scan the query tree */
372 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
373 }
374 else
375 {
376 /* skip the query tree scan, just assume it's unsafe */
377 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
378 glob->parallelModeOK = false;
379 }
380
381 /*
382 * glob->parallelModeNeeded is normally set to false here and changed to
383 * true during plan creation if a Gather or Gather Merge plan is actually
384 * created (cf. create_gather_plan, create_gather_merge_plan).
385 *
386 * However, if debug_parallel_query = on or debug_parallel_query =
387 * regress, then we impose parallel mode whenever it's safe to do so, even
388 * if the final plan doesn't use parallelism. It's not safe to do so if
389 * the query contains anything parallel-unsafe; parallelModeOK will be
390 * false in that case. Note that parallelModeOK can't change after this
391 * point. Otherwise, everything in the query is either parallel-safe or
392 * parallel-restricted, and in either case it should be OK to impose
393 * parallel-mode restrictions. If that ends up breaking something, then
394 * either some function the user included in the query is incorrectly
395 * labeled as parallel-safe or parallel-restricted when in reality it's
396 * parallel-unsafe, or else the query planner itself has a bug.
397 */
398 glob->parallelModeNeeded = glob->parallelModeOK &&
400
401 /* Determine what fraction of the plan is likely to be scanned */
402 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
403 {
404 /*
405 * We have no real idea how many tuples the user will ultimately FETCH
406 * from a cursor, but it is often the case that he doesn't want 'em
407 * all, or would prefer a fast-start plan anyway so that he can
408 * process some of the tuples sooner. Use a GUC parameter to decide
409 * what fraction to optimize for.
410 */
411 tuple_fraction = cursor_tuple_fraction;
412
413 /*
414 * We document cursor_tuple_fraction as simply being a fraction, which
415 * means the edge cases 0 and 1 have to be treated specially here. We
416 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
417 */
418 if (tuple_fraction >= 1.0)
419 tuple_fraction = 0.0;
420 else if (tuple_fraction <= 0.0)
421 tuple_fraction = 1e-10;
422 }
423 else
424 {
425 /* Default assumption is we need all the tuples */
426 tuple_fraction = 0.0;
427 }
428
429 /* primary planning entry point (may recurse for subqueries) */
430 root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
431
432 /* Select best Path and turn it into a Plan */
433 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
434 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
435
436 top_plan = create_plan(root, best_path);
437
438 /*
439 * If creating a plan for a scrollable cursor, make sure it can run
440 * backwards on demand. Add a Material node at the top at need.
441 */
442 if (cursorOptions & CURSOR_OPT_SCROLL)
443 {
444 if (!ExecSupportsBackwardScan(top_plan))
445 top_plan = materialize_finished_plan(top_plan);
446 }
447
448 /*
449 * Optionally add a Gather node for testing purposes, provided this is
450 * actually a safe thing to do.
451 *
452 * We can add Gather even when top_plan has parallel-safe initPlans, but
453 * then we have to move the initPlans to the Gather node because of
454 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
455 * regression tests when debug_parallel_query = regress, because initPlans
456 * that would normally appear on the top_plan move to the Gather, causing
457 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
458 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
459 */
461 top_plan->parallel_safe &&
462 (top_plan->initPlan == NIL ||
464 {
465 Gather *gather = makeNode(Gather);
466 Cost initplan_cost;
467 bool unsafe_initplans;
468
469 gather->plan.targetlist = top_plan->targetlist;
470 gather->plan.qual = NIL;
471 gather->plan.lefttree = top_plan;
472 gather->plan.righttree = NULL;
473 gather->num_workers = 1;
474 gather->single_copy = true;
476
477 /* Transfer any initPlans to the new top node */
478 gather->plan.initPlan = top_plan->initPlan;
479 top_plan->initPlan = NIL;
480
481 /*
482 * Since this Gather has no parallel-aware descendants to signal to,
483 * we don't need a rescan Param.
484 */
485 gather->rescan_param = -1;
486
487 /*
488 * Ideally we'd use cost_gather here, but setting up dummy path data
489 * to satisfy it doesn't seem much cleaner than knowing what it does.
490 */
491 gather->plan.startup_cost = top_plan->startup_cost +
493 gather->plan.total_cost = top_plan->total_cost +
495 gather->plan.plan_rows = top_plan->plan_rows;
496 gather->plan.plan_width = top_plan->plan_width;
497 gather->plan.parallel_aware = false;
498 gather->plan.parallel_safe = false;
499
500 /*
501 * Delete the initplans' cost from top_plan. We needn't add it to the
502 * Gather node, since the above coding already included it there.
503 */
505 &initplan_cost, &unsafe_initplans);
506 top_plan->startup_cost -= initplan_cost;
507 top_plan->total_cost -= initplan_cost;
508
509 /* use parallel mode for parallel plans. */
510 root->glob->parallelModeNeeded = true;
511
512 top_plan = &gather->plan;
513 }
514
515 /*
516 * If any Params were generated, run through the plan tree and compute
517 * each plan node's extParam/allParam sets. Ideally we'd merge this into
518 * set_plan_references' tree traversal, but for now it has to be separate
519 * because we need to visit subplans before not after main plan.
520 */
521 if (glob->paramExecTypes != NIL)
522 {
523 Assert(list_length(glob->subplans) == list_length(glob->subroots));
524 forboth(lp, glob->subplans, lr, glob->subroots)
525 {
526 Plan *subplan = (Plan *) lfirst(lp);
527 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
528
529 SS_finalize_plan(subroot, subplan);
530 }
531 SS_finalize_plan(root, top_plan);
532 }
533
534 /* final cleanup of the plan */
535 Assert(glob->finalrtable == NIL);
536 Assert(glob->finalrteperminfos == NIL);
537 Assert(glob->finalrowmarks == NIL);
538 Assert(glob->resultRelations == NIL);
539 Assert(glob->appendRelations == NIL);
540 top_plan = set_plan_references(root, top_plan);
541 /* ... and the subplans (both regular subplans and initplans) */
542 Assert(list_length(glob->subplans) == list_length(glob->subroots));
543 forboth(lp, glob->subplans, lr, glob->subroots)
544 {
545 Plan *subplan = (Plan *) lfirst(lp);
546 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
547
548 lfirst(lp) = set_plan_references(subroot, subplan);
549 }
550
551 /* build the PlannedStmt result */
552 result = makeNode(PlannedStmt);
553
554 result->commandType = parse->commandType;
555 result->queryId = parse->queryId;
556 result->hasReturning = (parse->returningList != NIL);
557 result->hasModifyingCTE = parse->hasModifyingCTE;
558 result->canSetTag = parse->canSetTag;
559 result->transientPlan = glob->transientPlan;
560 result->dependsOnRole = glob->dependsOnRole;
562 result->planTree = top_plan;
563 result->partPruneInfos = glob->partPruneInfos;
564 result->rtable = glob->finalrtable;
566 glob->prunableRelids);
567 result->permInfos = glob->finalrteperminfos;
568 result->resultRelations = glob->resultRelations;
569 result->firstResultRels = glob->firstResultRels;
570 result->appendRelations = glob->appendRelations;
571 result->subplans = glob->subplans;
572 result->rewindPlanIDs = glob->rewindPlanIDs;
573 result->rowMarks = glob->finalrowmarks;
574 result->relationOids = glob->relationOids;
575 result->invalItems = glob->invalItems;
576 result->paramExecTypes = glob->paramExecTypes;
577 /* utilityStmt should be null, but we might as well copy it */
578 result->utilityStmt = parse->utilityStmt;
579 result->stmt_location = parse->stmt_location;
580 result->stmt_len = parse->stmt_len;
581
582 result->jitFlags = PGJIT_NONE;
583 if (jit_enabled && jit_above_cost >= 0 &&
584 top_plan->total_cost > jit_above_cost)
585 {
586 result->jitFlags |= PGJIT_PERFORM;
587
588 /*
589 * Decide how much effort should be put into generating better code.
590 */
591 if (jit_optimize_above_cost >= 0 &&
593 result->jitFlags |= PGJIT_OPT3;
594 if (jit_inline_above_cost >= 0 &&
596 result->jitFlags |= PGJIT_INLINE;
597
598 /*
599 * Decide which operations should be JITed.
600 */
601 if (jit_expressions)
602 result->jitFlags |= PGJIT_EXPR;
604 result->jitFlags |= PGJIT_DEFORM;
605 }
606
607 if (glob->partition_directory != NULL)
608 DestroyPartitionDirectory(glob->partition_directory);
609
610 return result;
611}
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:346
char max_parallel_hazard(Query *parse)
Definition: clauses.c:735
int max_parallel_workers_per_gather
Definition: costsize.c:143
double parallel_setup_cost
Definition: costsize.c:136
double parallel_tuple_cost
Definition: costsize.c:135
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6594
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:337
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:511
#define IsParallelWorker()
Definition: parallel.h:60
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:108
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:106
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3376
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3382
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3385
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:484
double cursor_tuple_fraction
Definition: planner.c:67
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6483
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:647
int debug_parallel_query
Definition: planner.c:68
e
Definition: preproc-init.c:82
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:288
int num_workers
Definition: plannodes.h:1289
bool invisible
Definition: plannodes.h:1295
bool single_copy
Definition: plannodes.h:1293
Plan plan
Definition: plannodes.h:1287
int rescan_param
Definition: plannodes.h:1291
struct Plan * lefttree
Definition: plannodes.h:213
Cost total_cost
Definition: plannodes.h:179
struct Plan * righttree
Definition: plannodes.h:214
bool parallel_aware
Definition: plannodes.h:193
Cost startup_cost
Definition: plannodes.h:177
List * qual
Definition: plannodes.h:211
int plan_width
Definition: plannodes.h:187
bool parallel_safe
Definition: plannodes.h:195
Cardinality plan_rows
Definition: plannodes.h:185
List * targetlist
Definition: plannodes.h:209
List * initPlan
Definition: plannodes.h:216
struct Plan * planTree
Definition: plannodes.h:83
List * firstResultRels
Definition: plannodes.h:113
bool hasModifyingCTE
Definition: plannodes.h:65
List * appendRelations
Definition: plannodes.h:116
List * permInfos
Definition: plannodes.h:102
bool canSetTag
Definition: plannodes.h:68
List * rowMarks
Definition: plannodes.h:127
int jitFlags
Definition: plannodes.h:80
Bitmapset * rewindPlanIDs
Definition: plannodes.h:124
ParseLoc stmt_len
Definition: plannodes.h:145
bool hasReturning
Definition: plannodes.h:62
ParseLoc stmt_location
Definition: plannodes.h:143
List * invalItems
Definition: plannodes.h:133
bool transientPlan
Definition: plannodes.h:71
List * resultRelations
Definition: plannodes.h:106
List * subplans
Definition: plannodes.h:121
List * relationOids
Definition: plannodes.h:130
bool dependsOnRole
Definition: plannodes.h:74
Bitmapset * unprunableRelids
Definition: plannodes.h:97
CmdType commandType
Definition: plannodes.h:53
Node * utilityStmt
Definition: plannodes.h:139
List * rtable
Definition: plannodes.h:91
List * partPruneInfos
Definition: plannodes.h:88
List * paramExecTypes
Definition: plannodes.h:136
bool parallelModeNeeded
Definition: plannodes.h:77
uint64 queryId
Definition: plannodes.h:56
Bitmapset * prunableRelids
Definition: pathnodes.h:130
int lastPlanNodeId
Definition: pathnodes.h:166
char maxParallelHazard
Definition: pathnodes.h:181
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:172
Bitmapset * allRelids
Definition: pathnodes.h:123
List * appendRelations
Definition: pathnodes.h:145
List * finalrowmarks
Definition: pathnodes.h:136
List * paramExecTypes
Definition: pathnodes.h:157
bool parallelModeOK
Definition: pathnodes.h:175
bool transientPlan
Definition: pathnodes.h:169
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:114
List * finalrteperminfos
Definition: pathnodes.h:133
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:160
Index lastRowMarkId
Definition: pathnodes.h:163
List * resultRelations
Definition: pathnodes.h:139
List * partPruneInfos
Definition: pathnodes.h:148
List * finalrtable
Definition: pathnodes.h:117
List * firstResultRels
Definition: pathnodes.h:142
bool parallelModeNeeded
Definition: pathnodes.h:178
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2368
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2312

References PlannerGlobal::allRelids, PlannerGlobal::appendRelations, PlannedStmt::appendRelations, Assert(), bms_difference(), PlannedStmt::canSetTag, CMD_SELECT, PlannedStmt::commandType, create_plan(), CURSOR_OPT_FAST_PLAN, CURSOR_OPT_PARALLEL_OK, CURSOR_OPT_SCROLL, cursor_tuple_fraction, DEBUG_PARALLEL_OFF, debug_parallel_query, DEBUG_PARALLEL_REGRESS, PlannerGlobal::dependsOnRole, PlannedStmt::dependsOnRole, DestroyPartitionDirectory(), ExecSupportsBackwardScan(), fetch_upper_rel(), PlannerGlobal::finalrowmarks, PlannerGlobal::finalrtable, PlannerGlobal::finalrteperminfos, PlannerGlobal::firstResultRels, PlannedStmt::firstResultRels, forboth, get_cheapest_fractional_path(), PlannedStmt::hasModifyingCTE, PlannedStmt::hasReturning, Plan::initPlan, PlannerGlobal::invalItems, PlannedStmt::invalItems, Gather::invisible, IsParallelWorker, IsUnderPostmaster, jit_above_cost, jit_enabled, jit_expressions, jit_inline_above_cost, jit_optimize_above_cost, jit_tuple_deforming, PlannedStmt::jitFlags, PlannerGlobal::lastPHId, PlannerGlobal::lastPlanNodeId, PlannerGlobal::lastRowMarkId, Plan::lefttree, lfirst, lfirst_node, list_length(), makeNode, materialize_finished_plan(), max_parallel_hazard(), max_parallel_workers_per_gather, PlannerGlobal::maxParallelHazard, NIL, Gather::num_workers, Plan::parallel_aware, Plan::parallel_safe, parallel_setup_cost, parallel_tuple_cost, PlannerGlobal::parallelModeNeeded, PlannedStmt::parallelModeNeeded, PlannerGlobal::parallelModeOK, PlannerGlobal::paramExecTypes, PlannedStmt::paramExecTypes, parse(), PlannerGlobal::partPruneInfos, PlannedStmt::partPruneInfos, PlannedStmt::permInfos, PGJIT_DEFORM, PGJIT_EXPR, PGJIT_INLINE, PGJIT_NONE, PGJIT_OPT3, PGJIT_PERFORM, Gather::plan, Plan::plan_rows, Plan::plan_width, PlannedStmt::planTree, PlannerGlobal::prunableRelids, Plan::qual, PlannedStmt::queryId, PlannerGlobal::relationOids, PlannedStmt::relationOids, Gather::rescan_param, PlannerGlobal::resultRelations, PlannedStmt::resultRelations, PlannerGlobal::rewindPlanIDs, PlannedStmt::rewindPlanIDs, Plan::righttree, root, PlannedStmt::rowMarks, PlannedStmt::rtable, set_plan_references(), Gather::single_copy, SS_compute_initplan_cost(), SS_finalize_plan(), Plan::startup_cost, PlannedStmt::stmt_len, PlannedStmt::stmt_location, PlannerGlobal::subpaths, PlannerGlobal::subplans, PlannedStmt::subplans, subquery_planner(), Plan::targetlist, Plan::total_cost, PlannerGlobal::transientPlan, PlannedStmt::transientPlan, PlannedStmt::unprunableRelids, UPPERREL_FINAL, and PlannedStmt::utilityStmt.

Referenced by delay_execution_planner(), pgss_planner(), and planner().

◆ standard_qp_callback()

static void standard_qp_callback ( PlannerInfo root,
void *  extra 
)
static

Definition at line 3354 of file planner.c.

3355{
3356 Query *parse = root->parse;
3357 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3358 List *tlist = root->processed_tlist;
3359 List *activeWindows = qp_extra->activeWindows;
3360
3361 /*
3362 * Calculate pathkeys that represent grouping/ordering and/or ordered
3363 * aggregate requirements.
3364 */
3365 if (qp_extra->gset_data)
3366 {
3367 /*
3368 * With grouping sets, just use the first RollupData's groupClause. We
3369 * don't make any effort to optimize grouping clauses when there are
3370 * grouping sets, nor can we combine aggregate ordering keys with
3371 * grouping.
3372 */
3373 List *rollups = qp_extra->gset_data->rollups;
3374 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3375
3376 if (grouping_is_sortable(groupClause))
3377 {
3378 bool sortable;
3379
3380 /*
3381 * The groupClause is logically below the grouping step. So if
3382 * there is an RTE entry for the grouping step, we need to remove
3383 * its RT index from the sort expressions before we make PathKeys
3384 * for them.
3385 */
3386 root->group_pathkeys =
3388 &groupClause,
3389 tlist,
3390 false,
3391 parse->hasGroupRTE,
3392 &sortable,
3393 false);
3394 Assert(sortable);
3395 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3396 }
3397 else
3398 {
3399 root->group_pathkeys = NIL;
3400 root->num_groupby_pathkeys = 0;
3401 }
3402 }
3403 else if (parse->groupClause || root->numOrderedAggs > 0)
3404 {
3405 /*
3406 * With a plain GROUP BY list, we can remove any grouping items that
3407 * are proven redundant by EquivalenceClass processing. For example,
3408 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3409 * especially common cases, but they're nearly free to detect. Note
3410 * that we remove redundant items from processed_groupClause but not
3411 * the original parse->groupClause.
3412 */
3413 bool sortable;
3414
3415 /*
3416 * Convert group clauses into pathkeys. Set the ec_sortref field of
3417 * EquivalenceClass'es if it's not set yet.
3418 */
3419 root->group_pathkeys =
3421 &root->processed_groupClause,
3422 tlist,
3423 true,
3424 false,
3425 &sortable,
3426 true);
3427 if (!sortable)
3428 {
3429 /* Can't sort; no point in considering aggregate ordering either */
3430 root->group_pathkeys = NIL;
3431 root->num_groupby_pathkeys = 0;
3432 }
3433 else
3434 {
3435 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3436 /* If we have ordered aggs, consider adding onto group_pathkeys */
3437 if (root->numOrderedAggs > 0)
3439 }
3440 }
3441 else
3442 {
3443 root->group_pathkeys = NIL;
3444 root->num_groupby_pathkeys = 0;
3445 }
3446
3447 /* We consider only the first (bottom) window in pathkeys logic */
3448 if (activeWindows != NIL)
3449 {
3450 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3451
3452 root->window_pathkeys = make_pathkeys_for_window(root,
3453 wc,
3454 tlist);
3455 }
3456 else
3457 root->window_pathkeys = NIL;
3458
3459 /*
3460 * As with GROUP BY, we can discard any DISTINCT items that are proven
3461 * redundant by EquivalenceClass processing. The non-redundant list is
3462 * kept in root->processed_distinctClause, leaving the original
3463 * parse->distinctClause alone.
3464 */
3465 if (parse->distinctClause)
3466 {
3467 bool sortable;
3468
3469 /* Make a copy since pathkey processing can modify the list */
3470 root->processed_distinctClause = list_copy(parse->distinctClause);
3471 root->distinct_pathkeys =
3473 &root->processed_distinctClause,
3474 tlist,
3475 true,
3476 false,
3477 &sortable,
3478 false);
3479 if (!sortable)
3480 root->distinct_pathkeys = NIL;
3481 }
3482 else
3483 root->distinct_pathkeys = NIL;
3484
3485 root->sort_pathkeys =
3487 parse->sortClause,
3488 tlist);
3489
3490 /* setting setop_pathkeys might be useful to the union planner */
3491 if (qp_extra->setop != NULL)
3492 {
3493 List *groupClauses;
3494 bool sortable;
3495
3496 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3497
3498 root->setop_pathkeys =
3500 &groupClauses,
3501 tlist,
3502 false,
3503 false,
3504 &sortable,
3505 false);
3506 if (!sortable)
3507 root->setop_pathkeys = NIL;
3508 }
3509 else
3510 root->setop_pathkeys = NIL;
3511
3512 /*
3513 * Figure out whether we want a sorted result from query_planner.
3514 *
3515 * If we have a sortable GROUP BY clause, then we want a result sorted
3516 * properly for grouping. Otherwise, if we have window functions to
3517 * evaluate, we try to sort for the first window. Otherwise, if there's a
3518 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3519 * we try to produce output that's sufficiently well sorted for the
3520 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3521 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3522 * for a set operation which can benefit from presorted results and have a
3523 * sortable targetlist, we want to sort by the target list.
3524 *
3525 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3526 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3527 * that might just leave us failing to exploit an available sort order at
3528 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3529 * much easier, since we know that the parser ensured that one is a
3530 * superset of the other.
3531 */
3532 if (root->group_pathkeys)
3533 root->query_pathkeys = root->group_pathkeys;
3534 else if (root->window_pathkeys)
3535 root->query_pathkeys = root->window_pathkeys;
3536 else if (list_length(root->distinct_pathkeys) >
3537 list_length(root->sort_pathkeys))
3538 root->query_pathkeys = root->distinct_pathkeys;
3539 else if (root->sort_pathkeys)
3540 root->query_pathkeys = root->sort_pathkeys;
3541 else if (root->setop_pathkeys != NIL)
3542 root->query_pathkeys = root->setop_pathkeys;
3543 else
3544 root->query_pathkeys = NIL;
3545}
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3173
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:8161

References standard_qp_extra::activeWindows, adjust_group_pathkeys_for_groupagg(), Assert(), generate_setop_child_grouplist(), grouping_is_sortable(), standard_qp_extra::gset_data, linitial_node, list_copy(), list_length(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), make_pathkeys_for_window(), NIL, parse(), grouping_sets_data::rollups, root, and standard_qp_extra::setop.

Referenced by grouping_planner().

◆ subquery_planner()

PlannerInfo * subquery_planner ( PlannerGlobal glob,
Query parse,
PlannerInfo parent_root,
bool  hasRecursion,
double  tuple_fraction,
SetOperationStmt setops 
)

Definition at line 647 of file planner.c.

650{
652 List *newWithCheckOptions;
653 List *newHaving;
654 bool hasOuterJoins;
655 bool hasResultRTEs;
656 RelOptInfo *final_rel;
657 ListCell *l;
658
659 /* Create a PlannerInfo data structure for this subquery */
661 root->parse = parse;
662 root->glob = glob;
663 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
664 root->parent_root = parent_root;
665 root->plan_params = NIL;
666 root->outer_params = NULL;
667 root->planner_cxt = CurrentMemoryContext;
668 root->init_plans = NIL;
669 root->cte_plan_ids = NIL;
670 root->multiexpr_params = NIL;
671 root->join_domains = NIL;
672 root->eq_classes = NIL;
673 root->ec_merging_done = false;
674 root->last_rinfo_serial = 0;
675 root->all_result_relids =
676 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
677 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
678 root->append_rel_list = NIL;
679 root->row_identity_vars = NIL;
680 root->rowMarks = NIL;
681 memset(root->upper_rels, 0, sizeof(root->upper_rels));
682 memset(root->upper_targets, 0, sizeof(root->upper_targets));
683 root->processed_groupClause = NIL;
684 root->processed_distinctClause = NIL;
685 root->processed_tlist = NIL;
686 root->update_colnos = NIL;
687 root->grouping_map = NULL;
688 root->minmax_aggs = NIL;
689 root->qual_security_level = 0;
690 root->hasPseudoConstantQuals = false;
691 root->hasAlternativeSubPlans = false;
692 root->placeholdersFrozen = false;
693 root->hasRecursion = hasRecursion;
694 if (hasRecursion)
695 root->wt_param_id = assign_special_exec_param(root);
696 else
697 root->wt_param_id = -1;
698 root->non_recursive_path = NULL;
699 root->partColsUpdated = false;
700
701 /*
702 * Create the top-level join domain. This won't have valid contents until
703 * deconstruct_jointree fills it in, but the node needs to exist before
704 * that so we can build EquivalenceClasses referencing it.
705 */
706 root->join_domains = list_make1(makeNode(JoinDomain));
707
708 /*
709 * If there is a WITH list, process each WITH query and either convert it
710 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
711 */
712 if (parse->cteList)
714
715 /*
716 * If it's a MERGE command, transform the joinlist as appropriate.
717 */
719
720 /*
721 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
722 * that we don't need so many special cases to deal with that situation.
723 */
725
726 /*
727 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
728 * to transform them into joins. Note that this step does not descend
729 * into subqueries; if we pull up any subqueries below, their SubLinks are
730 * processed just before pulling them up.
731 */
732 if (parse->hasSubLinks)
734
735 /*
736 * Scan the rangetable for function RTEs, do const-simplification on them,
737 * and then inline them if possible (producing subqueries that might get
738 * pulled up next). Recursion issues here are handled in the same way as
739 * for SubLinks.
740 */
742
743 /*
744 * Scan the rangetable for relations with virtual generated columns, and
745 * replace all Var nodes in the query that reference these columns with
746 * the generation expressions. Recursion issues here are handled in the
747 * same way as for SubLinks.
748 */
750
751 /*
752 * Check to see if any subqueries in the jointree can be merged into this
753 * query.
754 */
756
757 /*
758 * If this is a simple UNION ALL query, flatten it into an appendrel. We
759 * do this now because it requires applying pull_up_subqueries to the leaf
760 * queries of the UNION ALL, which weren't touched above because they
761 * weren't referenced by the jointree (they will be after we do this).
762 */
763 if (parse->setOperations)
765
766 /*
767 * Survey the rangetable to see what kinds of entries are present. We can
768 * skip some later processing if relevant SQL features are not used; for
769 * example if there are no JOIN RTEs we can avoid the expense of doing
770 * flatten_join_alias_vars(). This must be done after we have finished
771 * adding rangetable entries, of course. (Note: actually, processing of
772 * inherited or partitioned rels can cause RTEs for their child tables to
773 * get added later; but those must all be RTE_RELATION entries, so they
774 * don't invalidate the conclusions drawn here.)
775 */
776 root->hasJoinRTEs = false;
777 root->hasLateralRTEs = false;
778 root->group_rtindex = 0;
779 hasOuterJoins = false;
780 hasResultRTEs = false;
781 foreach(l, parse->rtable)
782 {
784
785 switch (rte->rtekind)
786 {
787 case RTE_RELATION:
788 if (rte->inh)
789 {
790 /*
791 * Check to see if the relation actually has any children;
792 * if not, clear the inh flag so we can treat it as a
793 * plain base relation.
794 *
795 * Note: this could give a false-positive result, if the
796 * rel once had children but no longer does. We used to
797 * be able to clear rte->inh later on when we discovered
798 * that, but no more; we have to handle such cases as
799 * full-fledged inheritance.
800 */
801 rte->inh = has_subclass(rte->relid);
802 }
803 break;
804 case RTE_JOIN:
805 root->hasJoinRTEs = true;
806 if (IS_OUTER_JOIN(rte->jointype))
807 hasOuterJoins = true;
808 break;
809 case RTE_RESULT:
810 hasResultRTEs = true;
811 break;
812 case RTE_GROUP:
813 Assert(parse->hasGroupRTE);
814 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
815 break;
816 default:
817 /* No work here for other RTE types */
818 break;
819 }
820
821 if (rte->lateral)
822 root->hasLateralRTEs = true;
823
824 /*
825 * We can also determine the maximum security level required for any
826 * securityQuals now. Addition of inheritance-child RTEs won't affect
827 * this, because child tables don't have their own securityQuals; see
828 * expand_single_inheritance_child().
829 */
830 if (rte->securityQuals)
831 root->qual_security_level = Max(root->qual_security_level,
832 list_length(rte->securityQuals));
833 }
834
835 /*
836 * If we have now verified that the query target relation is
837 * non-inheriting, mark it as a leaf target.
838 */
839 if (parse->resultRelation)
840 {
841 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
842
843 if (!rte->inh)
844 root->leaf_result_relids =
845 bms_make_singleton(parse->resultRelation);
846 }
847
848 /*
849 * Preprocess RowMark information. We need to do this after subquery
850 * pullup, so that all base relations are present.
851 */
853
854 /*
855 * Set hasHavingQual to remember if HAVING clause is present. Needed
856 * because preprocess_expression will reduce a constant-true condition to
857 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
858 */
859 root->hasHavingQual = (parse->havingQual != NULL);
860
861 /*
862 * Do expression preprocessing on targetlist and quals, as well as other
863 * random expressions in the querytree. Note that we do not need to
864 * handle sort/group expressions explicitly, because they are actually
865 * part of the targetlist.
866 */
867 parse->targetList = (List *)
868 preprocess_expression(root, (Node *) parse->targetList,
870
871 newWithCheckOptions = NIL;
872 foreach(l, parse->withCheckOptions)
873 {
875
876 wco->qual = preprocess_expression(root, wco->qual,
878 if (wco->qual != NULL)
879 newWithCheckOptions = lappend(newWithCheckOptions, wco);
880 }
881 parse->withCheckOptions = newWithCheckOptions;
882
883 parse->returningList = (List *)
884 preprocess_expression(root, (Node *) parse->returningList,
886
888
889 parse->havingQual = preprocess_expression(root, parse->havingQual,
891
892 foreach(l, parse->windowClause)
893 {
895
896 /* partitionClause/orderClause are sort/group expressions */
901 }
902
903 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
905 parse->limitCount = preprocess_expression(root, parse->limitCount,
907
908 if (parse->onConflict)
909 {
910 parse->onConflict->arbiterElems = (List *)
912 (Node *) parse->onConflict->arbiterElems,
914 parse->onConflict->arbiterWhere =
916 parse->onConflict->arbiterWhere,
918 parse->onConflict->onConflictSet = (List *)
920 (Node *) parse->onConflict->onConflictSet,
922 parse->onConflict->onConflictWhere =
924 parse->onConflict->onConflictWhere,
926 /* exclRelTlist contains only Vars, so no preprocessing needed */
927 }
928
929 foreach(l, parse->mergeActionList)
930 {
932
933 action->targetList = (List *)
935 (Node *) action->targetList,
937 action->qual =
939 (Node *) action->qual,
941 }
942
943 parse->mergeJoinCondition =
944 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
945
946 root->append_rel_list = (List *)
947 preprocess_expression(root, (Node *) root->append_rel_list,
949
950 /* Also need to preprocess expressions within RTEs */
951 foreach(l, parse->rtable)
952 {
954 int kind;
955 ListCell *lcsq;
956
957 if (rte->rtekind == RTE_RELATION)
958 {
959 if (rte->tablesample)
962 (Node *) rte->tablesample,
964 }
965 else if (rte->rtekind == RTE_SUBQUERY)
966 {
967 /*
968 * We don't want to do all preprocessing yet on the subquery's
969 * expressions, since that will happen when we plan it. But if it
970 * contains any join aliases of our level, those have to get
971 * expanded now, because planning of the subquery won't do it.
972 * That's only possible if the subquery is LATERAL.
973 */
974 if (rte->lateral && root->hasJoinRTEs)
975 rte->subquery = (Query *)
977 (Node *) rte->subquery);
978 }
979 else if (rte->rtekind == RTE_FUNCTION)
980 {
981 /* Preprocess the function expression(s) fully */
982 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
983 rte->functions = (List *)
984 preprocess_expression(root, (Node *) rte->functions, kind);
985 }
986 else if (rte->rtekind == RTE_TABLEFUNC)
987 {
988 /* Preprocess the function expression(s) fully */
989 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
990 rte->tablefunc = (TableFunc *)
991 preprocess_expression(root, (Node *) rte->tablefunc, kind);
992 }
993 else if (rte->rtekind == RTE_VALUES)
994 {
995 /* Preprocess the values lists fully */
996 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
997 rte->values_lists = (List *)
999 }
1000 else if (rte->rtekind == RTE_GROUP)
1001 {
1002 /* Preprocess the groupexprs list fully */
1003 rte->groupexprs = (List *)
1004 preprocess_expression(root, (Node *) rte->groupexprs,
1006 }
1007
1008 /*
1009 * Process each element of the securityQuals list as if it were a
1010 * separate qual expression (as indeed it is). We need to do it this
1011 * way to get proper canonicalization of AND/OR structure. Note that
1012 * this converts each element into an implicit-AND sublist.
1013 */
1014 foreach(lcsq, rte->securityQuals)
1015 {
1017 (Node *) lfirst(lcsq),
1019 }
1020 }
1021
1022 /*
1023 * Now that we are done preprocessing expressions, and in particular done
1024 * flattening join alias variables, get rid of the joinaliasvars lists.
1025 * They no longer match what expressions in the rest of the tree look
1026 * like, because we have not preprocessed expressions in those lists (and
1027 * do not want to; for example, expanding a SubLink there would result in
1028 * a useless unreferenced subplan). Leaving them in place simply creates
1029 * a hazard for later scans of the tree. We could try to prevent that by
1030 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1031 * but that doesn't sound very reliable.
1032 */
1033 if (root->hasJoinRTEs)
1034 {
1035 foreach(l, parse->rtable)
1036 {
1038
1039 rte->joinaliasvars = NIL;
1040 }
1041 }
1042
1043 /*
1044 * Replace any Vars in the subquery's targetlist and havingQual that
1045 * reference GROUP outputs with the underlying grouping expressions.
1046 *
1047 * Note that we need to perform this replacement after we've preprocessed
1048 * the grouping expressions. This is to ensure that there is only one
1049 * instance of SubPlan for each SubLink contained within the grouping
1050 * expressions.
1051 */
1052 if (parse->hasGroupRTE)
1053 {
1054 parse->targetList = (List *)
1055 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1056 parse->havingQual =
1057 flatten_group_exprs(root, root->parse, parse->havingQual);
1058 }
1059
1060 /* Constant-folding might have removed all set-returning functions */
1061 if (parse->hasTargetSRFs)
1062 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1063
1064 /*
1065 * In some cases we may want to transfer a HAVING clause into WHERE. We
1066 * cannot do so if the HAVING clause contains aggregates (obviously) or
1067 * volatile functions (since a HAVING clause is supposed to be executed
1068 * only once per group). We also can't do this if there are any nonempty
1069 * grouping sets and the clause references any columns that are nullable
1070 * by the grouping sets; moving such a clause into WHERE would potentially
1071 * change the results. (If there are only empty grouping sets, then the
1072 * HAVING clause must be degenerate as discussed below.)
1073 *
1074 * Also, it may be that the clause is so expensive to execute that we're
1075 * better off doing it only once per group, despite the loss of
1076 * selectivity. This is hard to estimate short of doing the entire
1077 * planning process twice, so we use a heuristic: clauses containing
1078 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1079 * clause into WHERE, in hopes of eliminating tuples before aggregation
1080 * instead of after.
1081 *
1082 * If the query has explicit grouping then we can simply move such a
1083 * clause into WHERE; any group that fails the clause will not be in the
1084 * output because none of its tuples will reach the grouping or
1085 * aggregation stage. Otherwise we must have a degenerate (variable-free)
1086 * HAVING clause, which we put in WHERE so that query_planner() can use it
1087 * in a gating Result node, but also keep in HAVING to ensure that we
1088 * don't emit a bogus aggregated row. (This could be done better, but it
1089 * seems not worth optimizing.)
1090 *
1091 * Note that a HAVING clause may contain expressions that are not fully
1092 * preprocessed. This can happen if these expressions are part of
1093 * grouping items. In such cases, they are replaced with GROUP Vars in
1094 * the parser and then replaced back after we've done with expression
1095 * preprocessing on havingQual. This is not an issue if the clause
1096 * remains in HAVING, because these expressions will be matched to lower
1097 * target items in setrefs.c. However, if the clause is moved or copied
1098 * into WHERE, we need to ensure that these expressions are fully
1099 * preprocessed.
1100 *
1101 * Note that both havingQual and parse->jointree->quals are in
1102 * implicitly-ANDed-list form at this point, even though they are declared
1103 * as Node *.
1104 */
1105 newHaving = NIL;
1106 foreach(l, (List *) parse->havingQual)
1107 {
1108 Node *havingclause = (Node *) lfirst(l);
1109
1110 if (contain_agg_clause(havingclause) ||
1111 contain_volatile_functions(havingclause) ||
1112 contain_subplans(havingclause) ||
1113 (parse->groupClause && parse->groupingSets &&
1114 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1115 {
1116 /* keep it in HAVING */
1117 newHaving = lappend(newHaving, havingclause);
1118 }
1119 else if (parse->groupClause)
1120 {
1121 Node *whereclause;
1122
1123 /* Preprocess the HAVING clause fully */
1124 whereclause = preprocess_expression(root, havingclause,
1126 /* ... and move it to WHERE */
1127 parse->jointree->quals = (Node *)
1128 list_concat((List *) parse->jointree->quals,
1129 (List *) whereclause);
1130 }
1131 else
1132 {
1133 Node *whereclause;
1134
1135 /* Preprocess the HAVING clause fully */
1136 whereclause = preprocess_expression(root, copyObject(havingclause),
1138 /* ... and put a copy in WHERE */
1139 parse->jointree->quals = (Node *)
1140 list_concat((List *) parse->jointree->quals,
1141 (List *) whereclause);
1142 /* ... and also keep it in HAVING */
1143 newHaving = lappend(newHaving, havingclause);
1144 }
1145 }
1146 parse->havingQual = (Node *) newHaving;
1147
1148 /*
1149 * If we have any outer joins, try to reduce them to plain inner joins.
1150 * This step is most easily done after we've done expression
1151 * preprocessing.
1152 */
1153 if (hasOuterJoins)
1155
1156 /*
1157 * If we have any RTE_RESULT relations, see if they can be deleted from
1158 * the jointree. We also rely on this processing to flatten single-child
1159 * FromExprs underneath outer joins. This step is most effectively done
1160 * after we've done expression preprocessing and outer join reduction.
1161 */
1162 if (hasResultRTEs || hasOuterJoins)
1164
1165 /*
1166 * Do the main planning.
1167 */
1168 grouping_planner(root, tuple_fraction, setops);
1169
1170 /*
1171 * Capture the set of outer-level param IDs we have access to, for use in
1172 * extParam/allParam calculations later.
1173 */
1175
1176 /*
1177 * If any initPlans were created in this query level, adjust the surviving
1178 * Paths' costs and parallel-safety flags to account for them. The
1179 * initPlans won't actually get attached to the plan tree till
1180 * create_plan() runs, but we must include their effects now.
1181 */
1182 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1183 SS_charge_for_initplans(root, final_rel);
1184
1185 /*
1186 * Make sure we've identified the cheapest Path for the final rel. (By
1187 * doing this here not in grouping_planner, we include initPlan costs in
1188 * the decision, though it's unlikely that will change anything.)
1189 */
1190 set_cheapest(final_rel);
1191
1192 return root;
1193}
bool contain_agg_clause(Node *clause)
Definition: clauses.c:179
bool contain_subplans(Node *clause)
Definition: clauses.c:331
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:344
@ RTE_JOIN
Definition: parsenodes.h:1028
@ RTE_VALUES
Definition: parsenodes.h:1031
@ RTE_SUBQUERY
Definition: parsenodes.h:1027
@ RTE_RESULT
Definition: parsenodes.h:1034
@ RTE_FUNCTION
Definition: parsenodes.h:1029
@ RTE_TABLEFUNC
Definition: parsenodes.h:1030
@ RTE_GROUP
Definition: parsenodes.h:1037
bool has_subclass(Oid relationId)
Definition: pg_inherits.c:355
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:333
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:92
#define EXPRKIND_APPINFO
Definition: planner.c:87
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2343
#define EXPRKIND_GROUPEXPR
Definition: planner.c:93
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:83
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:85
#define EXPRKIND_LIMIT
Definition: planner.c:86
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1381
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:90
void preprocess_function_rtes(PlannerInfo *root)
Definition: prepjointree.c:914
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:183
void remove_useless_result_rtes(PlannerInfo *root)
Query * expand_virtual_generated_columns(PlannerInfo *root)
Definition: prepjointree.c:969
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:468
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:410
void pull_up_subqueries(PlannerInfo *root)
void reduce_outer_joins(PlannerInfo *root)
Index query_level
Definition: pathnodes.h:232
TableFunc * tablefunc
Definition: parsenodes.h:1198
struct TableSampleClause * tablesample
Definition: parsenodes.h:1112
Query * subquery
Definition: parsenodes.h:1118
List * values_lists
Definition: parsenodes.h:1204
JoinType jointype
Definition: parsenodes.h:1165
List * functions
Definition: parsenodes.h:1191
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:880
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2184
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2248
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:968
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:114

References generate_unaccent_rules::action, Assert(), assign_special_exec_param(), bms_is_member(), bms_make_singleton(), contain_agg_clause(), contain_subplans(), contain_volatile_functions(), copyObject, CurrentMemoryContext, WindowClause::endOffset, expand_virtual_generated_columns(), expression_returns_set(), EXPRKIND_APPINFO, EXPRKIND_ARBITER_ELEM, EXPRKIND_GROUPEXPR, EXPRKIND_LIMIT, EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_RTFUNC_LATERAL, EXPRKIND_TABLEFUNC, EXPRKIND_TABLEFUNC_LATERAL, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, EXPRKIND_VALUES_LATERAL, fetch_upper_rel(), flatten_group_exprs(), flatten_join_alias_vars(), flatten_simple_union_all(), RangeTblEntry::functions, grouping_planner(), has_subclass(), RangeTblEntry::inh, IS_OUTER_JOIN, RangeTblEntry::jointype, lappend(), lfirst, lfirst_node, list_cell_number(), list_concat(), list_length(), list_make1, makeNode, Max, NIL, parse(), preprocess_expression(), preprocess_function_rtes(), preprocess_qual_conditions(), preprocess_rowmarks(), pull_up_sublinks(), pull_up_subqueries(), pull_varnos(), WithCheckOption::qual, PlannerInfo::query_level, reduce_outer_joins(), remove_useless_result_rtes(), replace_empty_jointree(), root, rt_fetch, RTE_FUNCTION, RTE_GROUP, RTE_JOIN, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, set_cheapest(), SS_charge_for_initplans(), SS_identify_outer_params(), SS_process_ctes(), WindowClause::startOffset, RangeTblEntry::subquery, RangeTblEntry::tablefunc, RangeTblEntry::tablesample, transform_MERGE_to_join(), UPPERREL_FINAL, and RangeTblEntry::values_lists.

Referenced by make_subplan(), recurse_set_operations(), set_subquery_pathlist(), SS_process_ctes(), and standard_planner().

Variable Documentation

◆ create_upper_paths_hook

◆ cursor_tuple_fraction

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION

Definition at line 67 of file planner.c.

Referenced by standard_planner().

◆ debug_parallel_query

int debug_parallel_query = DEBUG_PARALLEL_OFF

Definition at line 68 of file planner.c.

Referenced by ProcessParallelMessage(), query_planner(), and standard_planner().

◆ enable_distinct_reordering

bool enable_distinct_reordering = true

Definition at line 70 of file planner.c.

Referenced by get_useful_pathkeys_for_distinct().

◆ parallel_leader_participation

bool parallel_leader_participation = true

Definition at line 69 of file planner.c.

Referenced by ExecGather(), ExecGatherMerge(), ExecInitGather(), and get_parallel_divisor().

◆ planner_hook

planner_hook_type planner_hook = NULL

Definition at line 73 of file planner.c.

Referenced by _PG_init(), and planner().