PostgreSQL Source Code git master
Loading...
Searching...
No Matches
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "utils/acl.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 
#define EXPRKIND_GROUPEXPR   13
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static Listget_useful_pathkeys_for_distinct (PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static void name_active_windows (List *activeWindows)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
static void create_final_unique_paths (PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
 
static void create_partial_unique_paths (PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 
RelOptInfocreate_unique_paths (PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
 
charchoose_plan_name (PlannerGlobal *glob, const char *name, bool always_number)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
bool enable_distinct_reordering = true
 
planner_hook_type planner_hook = NULL
 
planner_setup_hook_type planner_setup_hook = NULL
 
planner_shutdown_hook_type planner_shutdown_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 94 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 97 of file planner.c.

◆ EXPRKIND_GROUPEXPR

#define EXPRKIND_GROUPEXPR   13

Definition at line 100 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 93 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 95 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 87 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 89 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 90 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 98 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 99 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 96 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 88 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 91 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 92 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra 
)
static

Definition at line 7152 of file planner.c.

7158{
7159 Query *parse = root->parse;
7160 Path *cheapest_path = input_rel->cheapest_total_path;
7162 ListCell *lc;
7163 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7164 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7165 List *havingQual = (List *) extra->havingQual;
7166 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7167 double dNumGroups = 0;
7168 double dNumFinalGroups = 0;
7169
7170 /*
7171 * Estimate number of groups for non-split aggregation.
7172 */
7174 cheapest_path->rows,
7175 gd,
7176 extra->targetList);
7177
7179 {
7181 partially_grouped_rel->cheapest_total_path;
7182
7183 /*
7184 * Estimate number of groups for final phase of partial aggregation.
7185 */
7189 gd,
7190 extra->targetList);
7191 }
7192
7193 if (can_sort)
7194 {
7195 /*
7196 * Use any available suitably-sorted path as input, and also consider
7197 * sorting the cheapest-total path and incremental sort on any paths
7198 * with presorted keys.
7199 */
7200 foreach(lc, input_rel->pathlist)
7201 {
7202 ListCell *lc2;
7203 Path *path = (Path *) lfirst(lc);
7204 Path *path_save = path;
7206
7207 /* generate alternative group orderings that might be useful */
7209
7211
7212 foreach(lc2, pathkey_orderings)
7213 {
7215
7216 /* restore the path (we replace it in the loop) */
7217 path = path_save;
7218
7219 path = make_ordered_path(root,
7220 grouped_rel,
7221 path,
7223 info->pathkeys,
7224 -1.0);
7225 if (path == NULL)
7226 continue;
7227
7228 /* Now decide what to stick atop it */
7229 if (parse->groupingSets)
7230 {
7231 consider_groupingsets_paths(root, grouped_rel,
7232 path, true, can_hash,
7234 }
7235 else if (parse->hasAggs)
7236 {
7237 /*
7238 * We have aggregation, possibly with plain GROUP BY. Make
7239 * an AggPath.
7240 */
7241 add_path(grouped_rel, (Path *)
7243 grouped_rel,
7244 path,
7245 grouped_rel->reltarget,
7246 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7248 info->clauses,
7249 havingQual,
7250 agg_costs,
7251 dNumGroups));
7252 }
7253 else if (parse->groupClause)
7254 {
7255 /*
7256 * We have GROUP BY without aggregation or grouping sets.
7257 * Make a GroupPath.
7258 */
7259 add_path(grouped_rel, (Path *)
7261 grouped_rel,
7262 path,
7263 info->clauses,
7264 havingQual,
7265 dNumGroups));
7266 }
7267 else
7268 {
7269 /* Other cases should have been handled above */
7270 Assert(false);
7271 }
7272 }
7273 }
7274
7275 /*
7276 * Instead of operating directly on the input relation, we can
7277 * consider finalizing a partially aggregated path.
7278 */
7280 {
7281 foreach(lc, partially_grouped_rel->pathlist)
7282 {
7283 ListCell *lc2;
7284 Path *path = (Path *) lfirst(lc);
7285 Path *path_save = path;
7287
7288 /* generate alternative group orderings that might be useful */
7290
7292
7293 /* process all potentially interesting grouping reorderings */
7294 foreach(lc2, pathkey_orderings)
7295 {
7297
7298 /* restore the path (we replace it in the loop) */
7299 path = path_save;
7300
7301 path = make_ordered_path(root,
7302 grouped_rel,
7303 path,
7305 info->pathkeys,
7306 -1.0);
7307
7308 if (path == NULL)
7309 continue;
7310
7311 if (parse->hasAggs)
7312 add_path(grouped_rel, (Path *)
7314 grouped_rel,
7315 path,
7316 grouped_rel->reltarget,
7317 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7319 info->clauses,
7320 havingQual,
7321 agg_final_costs,
7323 else
7324 add_path(grouped_rel, (Path *)
7326 grouped_rel,
7327 path,
7328 info->clauses,
7329 havingQual,
7331
7332 }
7333 }
7334 }
7335 }
7336
7337 if (can_hash)
7338 {
7339 if (parse->groupingSets)
7340 {
7341 /*
7342 * Try for a hash-only groupingsets path over unsorted input.
7343 */
7344 consider_groupingsets_paths(root, grouped_rel,
7345 cheapest_path, false, true,
7347 }
7348 else
7349 {
7350 /*
7351 * Generate a HashAgg Path. We just need an Agg over the
7352 * cheapest-total input path, since input order won't matter.
7353 */
7354 add_path(grouped_rel, (Path *)
7355 create_agg_path(root, grouped_rel,
7357 grouped_rel->reltarget,
7358 AGG_HASHED,
7360 root->processed_groupClause,
7361 havingQual,
7362 agg_costs,
7363 dNumGroups));
7364 }
7365
7366 /*
7367 * Generate a Finalize HashAgg Path atop of the cheapest partially
7368 * grouped path, assuming there is one
7369 */
7371 {
7372 add_path(grouped_rel, (Path *)
7374 grouped_rel,
7376 grouped_rel->reltarget,
7377 AGG_HASHED,
7379 root->processed_groupClause,
7380 havingQual,
7381 agg_final_costs,
7383 }
7384 }
7385
7386 /*
7387 * When partitionwise aggregate is used, we might have fully aggregated
7388 * paths in the partial pathlist, because add_paths_to_append_rel() will
7389 * consider a path for grouped_rel consisting of a Parallel Append of
7390 * non-partial paths from each child.
7391 */
7392 if (grouped_rel->partial_pathlist != NIL)
7393 gather_grouping_paths(root, grouped_rel);
7394}
#define Assert(condition)
Definition c.h:873
void parse(int)
Definition parse.c:49
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGG_PLAIN
Definition nodes.h:364
@ AGGSPLIT_FINAL_DESERIAL
Definition nodes.h:391
@ AGGSPLIT_SIMPLE
Definition nodes.h:387
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition pathkeys.c:467
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition pathnode.c:2892
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:459
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition pathnode.c:3001
#define GROUPING_CAN_USE_HASH
Definition pathnodes.h:3600
#define GROUPING_CAN_USE_SORT
Definition pathnodes.h:3599
#define lfirst(lc)
Definition pg_list.h:172
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition planner.c:7824
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition planner.c:7765
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition planner.c:4241
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition planner.c:3734
static int fb(int x)
tree ctl root
Definition radixtree.h:1857
AggClauseCosts agg_final_costs
Definition pathnodes.h:3640
Definition pg_list.h:54
struct PathTarget * reltarget
Definition pathnodes.h:1027
List * partial_pathlist
Definition pathnodes.h:1034

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), fb(), GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::reltarget, root, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3305 of file planner.c.

3306{
3307 List *grouppathkeys = root->group_pathkeys;
3311 ListCell *lc;
3312 int i;
3313
3314 /* Shouldn't be here if there are grouping sets */
3315 Assert(root->parse->groupingSets == NIL);
3316 /* Shouldn't be here unless there are some ordered aggregates */
3317 Assert(root->numOrderedAggs > 0);
3318
3319 /* Do nothing if disabled */
3321 return;
3322
3323 /*
3324 * Make a first pass over all AggInfos to collect a Bitmapset containing
3325 * the indexes of all AggInfos to be processed below.
3326 */
3328 foreach(lc, root->agginfos)
3329 {
3331 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3332
3333 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3334 continue;
3335
3336 /* Skip unless there's a DISTINCT or ORDER BY clause */
3337 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3338 continue;
3339
3340 /* Additional safety checks are needed if there's a FILTER clause */
3341 if (aggref->aggfilter != NULL)
3342 {
3343 ListCell *lc2;
3344 bool allow_presort = true;
3345
3346 /*
3347 * When the Aggref has a FILTER clause, it's possible that the
3348 * filter removes rows that cannot be sorted because the
3349 * expression to sort by results in an error during its
3350 * evaluation. This is a problem for presorting as that happens
3351 * before the FILTER, whereas without presorting, the Aggregate
3352 * node will apply the FILTER *before* sorting. So that we never
3353 * try to sort anything that might error, here we aim to skip over
3354 * any Aggrefs with arguments with expressions which, when
3355 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3356 * may be more cases that should be allowed, but more thought
3357 * needs to be given. Err on the side of caution.
3358 */
3359 foreach(lc2, aggref->args)
3360 {
3362 Expr *expr = tle->expr;
3363
3364 while (IsA(expr, RelabelType))
3365 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3366
3367 /* Common case, Vars and Consts are ok */
3368 if (IsA(expr, Var) || IsA(expr, Const))
3369 continue;
3370
3371 /* Unsupported. Don't try to presort for this Aggref */
3372 allow_presort = false;
3373 break;
3374 }
3375
3376 /* Skip unsupported Aggrefs */
3377 if (!allow_presort)
3378 continue;
3379 }
3380
3383 }
3384
3385 /*
3386 * Now process all the unprocessed_aggs to find the best set of pathkeys
3387 * for the given set of aggregates.
3388 *
3389 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3390 * this during the first loop using the pathkeys for the very first
3391 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3392 * a more strict set of compatible pathkeys. Once the outer loop is
3393 * complete, we mark off all the aggregates with compatible pathkeys then
3394 * remove those from the unprocessed_aggs and repeat the process to try to
3395 * find another set of pathkeys that are suitable for a larger number of
3396 * aggregates. The outer loop will stop when there are not enough
3397 * unprocessed aggregates for it to be possible to find a set of pathkeys
3398 * to suit a larger number of aggregates.
3399 */
3400 bestpathkeys = NIL;
3401 bestaggs = NULL;
3403 {
3406
3407 i = -1;
3408 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3409 {
3410 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3411 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3412 List *sortlist;
3413 List *pathkeys;
3414
3415 if (aggref->aggdistinct != NIL)
3416 sortlist = aggref->aggdistinct;
3417 else
3418 sortlist = aggref->aggorder;
3419
3421 aggref->args);
3422
3423 /*
3424 * Ignore Aggrefs which have volatile functions in their ORDER BY
3425 * or DISTINCT clause.
3426 */
3427 if (has_volatile_pathkey(pathkeys))
3428 {
3430 continue;
3431 }
3432
3433 /*
3434 * When not set yet, take the pathkeys from the first unprocessed
3435 * aggregate.
3436 */
3437 if (currpathkeys == NIL)
3438 {
3439 currpathkeys = pathkeys;
3440
3441 /* include the GROUP BY pathkeys, if they exist */
3442 if (grouppathkeys != NIL)
3444 currpathkeys);
3445
3446 /* record that we found pathkeys for this aggregate */
3448 }
3449 else
3450 {
3451 /* now look for a stronger set of matching pathkeys */
3452
3453 /* include the GROUP BY pathkeys, if they exist */
3454 if (grouppathkeys != NIL)
3456 pathkeys);
3457
3458 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3459 switch (compare_pathkeys(currpathkeys, pathkeys))
3460 {
3461 case PATHKEYS_BETTER2:
3462 /* 'pathkeys' are stronger, use these ones instead */
3463 currpathkeys = pathkeys;
3464 /* FALLTHROUGH */
3465
3466 case PATHKEYS_BETTER1:
3467 /* 'pathkeys' are less strict */
3468 /* FALLTHROUGH */
3469
3470 case PATHKEYS_EQUAL:
3471 /* mark this aggregate as covered by 'currpathkeys' */
3473 break;
3474
3475 case PATHKEYS_DIFFERENT:
3476 break;
3477 }
3478 }
3479 }
3480
3481 /* remove the aggregates that we've just processed */
3483
3484 /*
3485 * If this pass included more aggregates than the previous best then
3486 * use these ones as the best set.
3487 */
3489 {
3492 }
3493 }
3494
3495 /*
3496 * If we found any ordered aggregates, update root->group_pathkeys to add
3497 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3498 * the original GROUP BY pathkeys already.
3499 */
3500 if (bestpathkeys != NIL)
3501 root->group_pathkeys = bestpathkeys;
3502
3503 /*
3504 * Now that we've found the best set of aggregates we can set the
3505 * presorted flag to indicate to the executor that it needn't bother
3506 * performing a sort for these Aggrefs. We're able to do this now as
3507 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3508 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3509 * of ordered aggregates.
3510 */
3511 i = -1;
3512 while ((i = bms_next_member(bestaggs, i)) >= 0)
3513 {
3514 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3515
3516 foreach(lc, agginfo->aggrefs)
3517 {
3518 Aggref *aggref = lfirst_node(Aggref, lc);
3519
3520 aggref->aggpresorted = true;
3521 }
3522 }
3523}
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1305
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:1160
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition bitmapset.c:867
int bms_num_members(const Bitmapset *a)
Definition bitmapset.c:750
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:814
bool enable_presorted_aggregate
Definition costsize.c:164
int i
Definition isn.c:77
List * list_copy(const List *oldlist)
Definition list.c:1573
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define castNode(_type_, nodeptr)
Definition nodes.h:182
List * append_pathkeys(List *target, List *source)
Definition pathkeys.c:107
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition pathkeys.c:1336
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition pathkeys.c:304
@ PATHKEYS_BETTER2
Definition paths.h:222
@ PATHKEYS_BETTER1
Definition paths.h:221
@ PATHKEYS_DIFFERENT
Definition paths.h:223
@ PATHKEYS_EQUAL
Definition paths.h:220
void * arg
#define lfirst_node(type, lc)
Definition pg_list.h:176
#define linitial_node(type, l)
Definition pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition pg_list.h:403
#define list_nth_node(type, list, n)
Definition pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition planner.c:3260
List * aggdistinct
Definition primnodes.h:493
List * args
Definition primnodes.h:487
Expr * aggfilter
Definition primnodes.h:496
List * aggorder
Definition primnodes.h:490

References Aggref::aggdistinct, Aggref::aggfilter, Aggref::aggorder, append_pathkeys(), arg, Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), castNode, compare_pathkeys(), enable_presorted_aggregate, fb(), foreach_current_index, has_volatile_pathkey(), i, IsA, lfirst, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6701 of file planner.c.

6703{
6704 ListCell *lc;
6705
6708
6709 /* If no SRFs appear at this plan level, nothing to do */
6710 if (list_length(targets) == 1)
6711 return;
6712
6713 /*
6714 * Stack SRF-evaluation nodes atop each path for the rel.
6715 *
6716 * In principle we should re-run set_cheapest() here to identify the
6717 * cheapest path, but it seems unlikely that adding the same tlist eval
6718 * costs to all the paths would change that, so we don't bother. Instead,
6719 * just assume that the cheapest-startup and cheapest-total paths remain
6720 * so. (There should be no parameterized paths anymore, so we needn't
6721 * worry about updating cheapest_parameterized_paths.)
6722 */
6723 foreach(lc, rel->pathlist)
6724 {
6725 Path *subpath = (Path *) lfirst(lc);
6726 Path *newpath = subpath;
6727 ListCell *lc1,
6728 *lc2;
6729
6730 Assert(subpath->param_info == NULL);
6732 {
6734 bool contains_srfs = (bool) lfirst_int(lc2);
6735
6736 /* If this level doesn't contain SRFs, do regular projection */
6737 if (contains_srfs)
6739 rel,
6740 newpath,
6741 thistarget);
6742 else
6744 rel,
6745 newpath,
6746 thistarget);
6747 }
6748 lfirst(lc) = newpath;
6749 if (subpath == rel->cheapest_startup_path)
6751 if (subpath == rel->cheapest_total_path)
6753 }
6754
6755 /* Likewise for partial paths, if any */
6756 foreach(lc, rel->partial_pathlist)
6757 {
6758 Path *subpath = (Path *) lfirst(lc);
6759 Path *newpath = subpath;
6760 ListCell *lc1,
6761 *lc2;
6762
6763 Assert(subpath->param_info == NULL);
6765 {
6767 bool contains_srfs = (bool) lfirst_int(lc2);
6768
6769 /* If this level doesn't contain SRFs, do regular projection */
6770 if (contains_srfs)
6772 rel,
6773 newpath,
6774 thistarget);
6775 else
6776 {
6777 /* avoid apply_projection_to_path, in case of multiple refs */
6779 rel,
6780 newpath,
6781 thistarget);
6782 }
6783 }
6784 lfirst(lc) = newpath;
6785 }
6786}
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2729
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2531
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition pathnode.c:2640
#define forboth(cell1, list1, cell2, list2)
Definition pg_list.h:518
#define lfirst_int(lc)
Definition pg_list.h:173
#define linitial_int(l)
Definition pg_list.h:179
List * pathlist
Definition pathnodes.h:1032
struct Path * cheapest_startup_path
Definition pathnodes.h:1035
struct Path * cheapest_total_path
Definition pathnodes.h:1036

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), fb(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7949 of file planner.c.

7955{
7958 ListCell *lc;
7959
7960 /* This recurses, so be paranoid. */
7962
7963 /*
7964 * If the rel only has Append and MergeAppend paths, we want to drop its
7965 * existing paths and generate new ones. This function would still be
7966 * correct if we kept the existing paths: we'd modify them to generate the
7967 * correct target above the partitioning Append, and then they'd compete
7968 * on cost with paths generating the target below the Append. However, in
7969 * our current cost model the latter way is always the same or cheaper
7970 * cost, so modifying the existing paths would just be useless work.
7971 * Moreover, when the cost is the same, varying roundoff errors might
7972 * sometimes allow an existing path to be picked, resulting in undesirable
7973 * cross-platform plan variations. So we drop old paths and thereby force
7974 * the work to be done below the Append.
7975 *
7976 * However, there are several cases when this optimization is not safe. If
7977 * the rel isn't partitioned, then none of the paths will be Append or
7978 * MergeAppend paths, so we should definitely not do this. If it is
7979 * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7980 * but it can also have join paths that we can't afford to discard.
7981 *
7982 * Some care is needed, because we have to allow
7983 * generate_useful_gather_paths to see the old partial paths in the next
7984 * stanza. Hence, zap the main pathlist here, then allow
7985 * generate_useful_gather_paths to add path(s) to the main list, and
7986 * finally zap the partial pathlist.
7987 */
7989 rel->pathlist = NIL;
7990
7991 /*
7992 * If the scan/join target is not parallel-safe, partial paths cannot
7993 * generate it.
7994 */
7996 {
7997 /*
7998 * Since we can't generate the final scan/join target in parallel
7999 * workers, this is our last opportunity to use any partial paths that
8000 * exist; so build Gather path(s) that use them and emit whatever the
8001 * current reltarget is. We don't do this in the case where the
8002 * target is parallel-safe, since we will be able to generate superior
8003 * paths by doing it after the final scan/join target has been
8004 * applied.
8005 */
8007
8008 /* Can't use parallel query above this level. */
8009 rel->partial_pathlist = NIL;
8010 rel->consider_parallel = false;
8011 }
8012
8013 /* Finish dropping old paths for a partitioned rel, per comment above */
8015 rel->partial_pathlist = NIL;
8016
8017 /* Extract SRF-free scan/join target. */
8019
8020 /*
8021 * Apply the SRF-free scan/join target to each existing path.
8022 *
8023 * If the tlist exprs are the same, we can just inject the sortgroupref
8024 * information into the existing pathtargets. Otherwise, replace each
8025 * path with a projection path that generates the SRF-free scan/join
8026 * target. This can't change the ordering of paths within rel->pathlist,
8027 * so we just modify the list in place.
8028 */
8029 foreach(lc, rel->pathlist)
8030 {
8031 Path *subpath = (Path *) lfirst(lc);
8032
8033 /* Shouldn't have any parameterized paths anymore */
8034 Assert(subpath->param_info == NULL);
8035
8036 if (tlist_same_exprs)
8037 subpath->pathtarget->sortgrouprefs =
8038 scanjoin_target->sortgrouprefs;
8039 else
8040 {
8041 Path *newpath;
8042
8045 lfirst(lc) = newpath;
8046 }
8047 }
8048
8049 /* Likewise adjust the targets for any partial paths. */
8050 foreach(lc, rel->partial_pathlist)
8051 {
8052 Path *subpath = (Path *) lfirst(lc);
8053
8054 /* Shouldn't have any parameterized paths anymore */
8055 Assert(subpath->param_info == NULL);
8056
8057 if (tlist_same_exprs)
8058 subpath->pathtarget->sortgrouprefs =
8059 scanjoin_target->sortgrouprefs;
8060 else
8061 {
8062 Path *newpath;
8063
8066 lfirst(lc) = newpath;
8067 }
8068 }
8069
8070 /*
8071 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8072 * atop each existing path. (Note that this function doesn't look at the
8073 * cheapest-path fields, which is a good thing because they're bogus right
8074 * now.)
8075 */
8076 if (root->parse->hasTargetSRFs)
8080
8081 /*
8082 * Update the rel's target to be the final (with SRFs) scan/join target.
8083 * This now matches the actual output of all the paths, and we might get
8084 * confused in createplan.c if they don't agree. We must do this now so
8085 * that any append paths made in the next part will use the correct
8086 * pathtarget (cf. create_append_path).
8087 *
8088 * Note that this is also necessary if GetForeignUpperPaths() gets called
8089 * on the final scan/join relation or on any of its children, since the
8090 * FDW might look at the rel's target to create ForeignPaths.
8091 */
8093
8094 /*
8095 * If the relation is partitioned, recursively apply the scan/join target
8096 * to all partitions, and generate brand-new Append paths in which the
8097 * scan/join target is computed below the Append rather than above it.
8098 * Since Append is not projection-capable, that might save a separate
8099 * Result node, and it also is important for partitionwise aggregate.
8100 */
8102 {
8104 int i;
8105
8106 /* Adjust each partition. */
8107 i = -1;
8108 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8109 {
8110 RelOptInfo *child_rel = rel->part_rels[i];
8111 AppendRelInfo **appinfos;
8112 int nappinfos;
8114
8115 Assert(child_rel != NULL);
8116
8117 /* Dummy children can be ignored. */
8119 continue;
8120
8121 /* Translate scan/join targets for this child. */
8122 appinfos = find_appinfos_by_relids(root, child_rel->relids,
8123 &nappinfos);
8124 foreach(lc, scanjoin_targets)
8125 {
8126 PathTarget *target = lfirst_node(PathTarget, lc);
8127
8128 target = copy_pathtarget(target);
8129 target->exprs = (List *)
8131 (Node *) target->exprs,
8132 nappinfos, appinfos);
8134 target);
8135 }
8136 pfree(appinfos);
8137
8138 /* Recursion does the real work. */
8144
8145 /* Save non-dummy children for Append paths. */
8146 if (!IS_DUMMY_REL(child_rel))
8148 }
8149
8150 /* Build new paths for this relation by appending child paths. */
8152 }
8153
8154 /*
8155 * Consider generating Gather or Gather Merge paths. We must only do this
8156 * if the relation is parallel safe, and we don't do it for child rels to
8157 * avoid creating multiple Gather nodes within the same plan. We must do
8158 * this after all paths have been generated and before set_cheapest, since
8159 * one of the generated paths may turn out to be the cheapest one.
8160 */
8161 if (rel->consider_parallel && !IS_OTHER_REL(rel))
8163
8164 /*
8165 * Reassess which paths are the cheapest, now that we've potentially added
8166 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8167 * this relation.
8168 */
8169 set_cheapest(rel);
8170}
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition allpaths.c:3325
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition allpaths.c:1404
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition appendinfo.c:804
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition appendinfo.c:200
List * lappend(List *list, void *datum)
Definition list.c:339
void pfree(void *pointer)
Definition mcxt.c:1616
void set_cheapest(RelOptInfo *parent_rel)
Definition pathnode.c:268
#define IS_SIMPLE_REL(rel)
Definition pathnodes.h:971
#define IS_DUMMY_REL(r)
Definition pathnodes.h:2272
#define IS_PARTITIONED_REL(rel)
Definition pathnodes.h:1213
#define IS_OTHER_REL(rel)
Definition pathnodes.h:986
#define llast_node(type, l)
Definition pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition planner.c:7949
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition planner.c:6701
void check_stack_depth(void)
Definition stack_depth.c:95
Definition nodes.h:135
List * exprs
Definition pathnodes.h:1858
bool consider_parallel
Definition pathnodes.h:1019
Bitmapset * live_parts
Definition pathnodes.h:1186
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition tlist.c:227
PathTarget * copy_pathtarget(PathTarget *src)
Definition tlist.c:666

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, fb(), find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, IS_SIMPLE_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7907 of file planner.c.

7908{
7909 Query *parse = root->parse;
7910
7911 if (!parse->hasAggs && parse->groupClause == NIL)
7912 {
7913 /*
7914 * We don't know how to do parallel aggregation unless we have either
7915 * some aggregates or a grouping clause.
7916 */
7917 return false;
7918 }
7919 else if (parse->groupingSets)
7920 {
7921 /* We don't know how to do grouping sets in parallel. */
7922 return false;
7923 }
7924 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7925 {
7926 /* Insufficient support for partial mode. */
7927 return false;
7928 }
7929
7930 /* Everything looks good. */
7931 return true;
7932}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ choose_plan_name()

char * choose_plan_name ( PlannerGlobal glob,
const char name,
bool  always_number 
)

Definition at line 9022 of file planner.c.

9023{
9024 unsigned n;
9025
9026 /*
9027 * If a numeric suffix is not required, then search the list of
9028 * previously-assigned names for a match. If none is found, then we can
9029 * use the provided name without modification.
9030 */
9031 if (!always_number)
9032 {
9033 bool found = false;
9034
9035 foreach_ptr(char, subplan_name, glob->subplanNames)
9036 {
9037 if (strcmp(subplan_name, name) == 0)
9038 {
9039 found = true;
9040 break;
9041 }
9042 }
9043
9044 if (!found)
9045 {
9046 /* pstrdup here is just to avoid cast-away-const */
9047 char *chosen_name = pstrdup(name);
9048
9049 glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9050 return chosen_name;
9051 }
9052 }
9053
9054 /*
9055 * If a numeric suffix is required or if the un-suffixed name is already
9056 * in use, then loop until we find a positive integer that produces a
9057 * novel name.
9058 */
9059 for (n = 1; true; ++n)
9060 {
9061 char *proposed_name = psprintf("%s_%u", name, n);
9062 bool found = false;
9063
9064 foreach_ptr(char, subplan_name, glob->subplanNames)
9065 {
9067 {
9068 found = true;
9069 break;
9070 }
9071 }
9072
9073 if (!found)
9074 {
9075 glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9076 return proposed_name;
9077 }
9078
9080 }
9081}
char * pstrdup(const char *in)
Definition mcxt.c:1781
#define foreach_ptr(type, var, lst)
Definition pg_list.h:469
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
const char * name

References fb(), foreach_ptr, lappend(), name, pfree(), psprintf(), and pstrdup().

Referenced by build_minmax_path(), make_subplan(), recurse_set_operations(), set_subquery_pathlist(), and SS_process_ctes().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void a,
const void b 
)
static

Definition at line 6162 of file planner.c.

6163{
6164 const WindowClauseSortData *wcsa = a;
6165 const WindowClauseSortData *wcsb = b;
6168
6169 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6170 {
6173
6174 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6175 return -1;
6176 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6177 return 1;
6178 else if (sca->sortop > scb->sortop)
6179 return -1;
6180 else if (sca->sortop < scb->sortop)
6181 return 1;
6182 else if (sca->nulls_first && !scb->nulls_first)
6183 return -1;
6184 else if (!sca->nulls_first && scb->nulls_first)
6185 return 1;
6186 /* no need to compare eqop, since it is fully determined by sortop */
6187 }
6188
6189 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6190 return -1;
6191 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6192 return 1;
6193
6194 return 0;
6195}
int b
Definition isn.c:74
int a
Definition isn.c:73

References a, b, fb(), forboth, lfirst_node, and list_length().

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4241 of file planner.c.

4249{
4250 Query *parse = root->parse;
4251 Size hash_mem_limit = get_hash_memory_limit();
4252
4253 /*
4254 * If we're not being offered sorted input, then only consider plans that
4255 * can be done entirely by hashing.
4256 *
4257 * We can hash everything if it looks like it'll fit in hash_mem. But if
4258 * the input is actually sorted despite not being advertised as such, we
4259 * prefer to make use of that in order to use less memory.
4260 *
4261 * If none of the grouping sets are sortable, then ignore the hash_mem
4262 * limit and generate a path anyway, since otherwise we'll just fail.
4263 */
4264 if (!is_sorted)
4265 {
4266 List *new_rollups = NIL;
4268 List *sets_data;
4270 List *empty_sets = NIL;
4271 ListCell *lc;
4272 ListCell *l_start = list_head(gd->rollups);
4274 double hashsize;
4275 double exclude_groups = 0.0;
4276
4278
4279 /*
4280 * If the input is coincidentally sorted usefully (which can happen
4281 * even if is_sorted is false, since that only means that our caller
4282 * has set up the sorting for us), then save some hashtable space by
4283 * making use of that. But we need to watch out for degenerate cases:
4284 *
4285 * 1) If there are any empty grouping sets, then group_pathkeys might
4286 * be NIL if all non-empty grouping sets are unsortable. In this case,
4287 * there will be a rollup containing only empty groups, and the
4288 * pathkeys_contained_in test is vacuously true; this is ok.
4289 *
4290 * XXX: the above relies on the fact that group_pathkeys is generated
4291 * from the first rollup. If we add the ability to consider multiple
4292 * sort orders for grouping input, this assumption might fail.
4293 *
4294 * 2) If there are no empty sets and only unsortable sets, then the
4295 * rollups list will be empty (and thus l_start == NULL), and
4296 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4297 * pathkeys_contained_in test doesn't cause us to crash.
4298 */
4299 if (l_start != NULL &&
4300 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4301 {
4303 exclude_groups = unhashed_rollup->numGroups;
4304 l_start = lnext(gd->rollups, l_start);
4305 }
4306
4308 path,
4309 agg_costs,
4311
4312 /*
4313 * gd->rollups is empty if we have only unsortable columns to work
4314 * with. Override hash_mem in that case; otherwise, we'll rely on the
4315 * sorted-input case to generate usable mixed paths.
4316 */
4317 if (hashsize > hash_mem_limit && gd->rollups)
4318 return; /* nope, won't fit */
4319
4320 /*
4321 * We need to burst the existing rollups list into individual grouping
4322 * sets and recompute a groupClause for each set.
4323 */
4324 sets_data = list_copy(gd->unsortable_sets);
4325
4326 for_each_cell(lc, gd->rollups, l_start)
4327 {
4329
4330 /*
4331 * If we find an unhashable rollup that's not been skipped by the
4332 * "actually sorted" check above, we can't cope; we'd need sorted
4333 * input (with a different sort order) but we can't get that here.
4334 * So bail out; we'll get a valid path from the is_sorted case
4335 * instead.
4336 *
4337 * The mere presence of empty grouping sets doesn't make a rollup
4338 * unhashable (see preprocess_grouping_sets), we handle those
4339 * specially below.
4340 */
4341 if (!rollup->hashable)
4342 return;
4343
4344 sets_data = list_concat(sets_data, rollup->gsets_data);
4345 }
4346 foreach(lc, sets_data)
4347 {
4349 List *gset = gs->set;
4351
4352 if (gset == NIL)
4353 {
4354 /* Empty grouping sets can't be hashed. */
4357 }
4358 else
4359 {
4361
4362 rollup->groupClause = preprocess_groupclause(root, gset);
4363 rollup->gsets_data = list_make1(gs);
4364 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4365 rollup->gsets_data,
4366 gd->tleref_to_colnum_map);
4367 rollup->numGroups = gs->numGroups;
4368 rollup->hashable = true;
4369 rollup->is_hashed = true;
4371 }
4372 }
4373
4374 /*
4375 * If we didn't find anything nonempty to hash, then bail. We'll
4376 * generate a path from the is_sorted case.
4377 */
4378 if (new_rollups == NIL)
4379 return;
4380
4381 /*
4382 * If there were empty grouping sets they should have been in the
4383 * first rollup.
4384 */
4386
4387 if (unhashed_rollup)
4388 {
4390 strat = AGG_MIXED;
4391 }
4392 else if (empty_sets)
4393 {
4395
4396 rollup->groupClause = NIL;
4397 rollup->gsets_data = empty_sets_data;
4398 rollup->gsets = empty_sets;
4399 rollup->numGroups = list_length(empty_sets);
4400 rollup->hashable = false;
4401 rollup->is_hashed = false;
4403 strat = AGG_MIXED;
4404 }
4405
4406 add_path(grouped_rel, (Path *)
4408 grouped_rel,
4409 path,
4410 (List *) parse->havingQual,
4411 strat,
4413 agg_costs));
4414 return;
4415 }
4416
4417 /*
4418 * If we have sorted input but nothing we can do with it, bail.
4419 */
4420 if (gd->rollups == NIL)
4421 return;
4422
4423 /*
4424 * Given sorted input, we try and make two paths: one sorted and one mixed
4425 * sort/hash. (We need to try both because hashagg might be disabled, or
4426 * some columns might not be sortable.)
4427 *
4428 * can_hash is passed in as false if some obstacle elsewhere (such as
4429 * ordered aggs) means that we shouldn't consider hashing at all.
4430 */
4431 if (can_hash && gd->any_hashable)
4432 {
4433 List *rollups = NIL;
4434 List *hash_sets = list_copy(gd->unsortable_sets);
4435 double availspace = hash_mem_limit;
4436 ListCell *lc;
4437
4438 /*
4439 * Account first for space needed for groups we can't sort at all.
4440 */
4442 path,
4443 agg_costs,
4444 gd->dNumHashGroups);
4445
4446 if (availspace > 0 && list_length(gd->rollups) > 1)
4447 {
4448 double scale;
4449 int num_rollups = list_length(gd->rollups);
4450 int k_capacity;
4451 int *k_weights = palloc(num_rollups * sizeof(int));
4453 int i;
4454
4455 /*
4456 * We treat this as a knapsack problem: the knapsack capacity
4457 * represents hash_mem, the item weights are the estimated memory
4458 * usage of the hashtables needed to implement a single rollup,
4459 * and we really ought to use the cost saving as the item value;
4460 * however, currently the costs assigned to sort nodes don't
4461 * reflect the comparison costs well, and so we treat all items as
4462 * of equal value (each rollup we hash instead saves us one sort).
4463 *
4464 * To use the discrete knapsack, we need to scale the values to a
4465 * reasonably small bounded range. We choose to allow a 5% error
4466 * margin; we have no more than 4096 rollups in the worst possible
4467 * case, which with a 5% error margin will require a bit over 42MB
4468 * of workspace. (Anyone wanting to plan queries that complex had
4469 * better have the memory for it. In more reasonable cases, with
4470 * no more than a couple of dozen rollups, the memory usage will
4471 * be negligible.)
4472 *
4473 * k_capacity is naturally bounded, but we clamp the values for
4474 * scale and weight (below) to avoid overflows or underflows (or
4475 * uselessly trying to use a scale factor less than 1 byte).
4476 */
4477 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4479
4480 /*
4481 * We leave the first rollup out of consideration since it's the
4482 * one that matches the input sort order. We assign indexes "i"
4483 * to only those entries considered for hashing; the second loop,
4484 * below, must use the same condition.
4485 */
4486 i = 0;
4487 for_each_from(lc, gd->rollups, 1)
4488 {
4490
4491 if (rollup->hashable)
4492 {
4494 path,
4495 agg_costs,
4496 rollup->numGroups);
4497
4498 /*
4499 * If sz is enormous, but hash_mem (and hence scale) is
4500 * small, avoid integer overflow here.
4501 */
4502 k_weights[i] = (int) Min(floor(sz / scale),
4503 k_capacity + 1.0);
4504 ++i;
4505 }
4506 }
4507
4508 /*
4509 * Apply knapsack algorithm; compute the set of items which
4510 * maximizes the value stored (in this case the number of sorts
4511 * saved) while keeping the total size (approximately) within
4512 * capacity.
4513 */
4514 if (i > 0)
4516
4518 {
4519 rollups = list_make1(linitial(gd->rollups));
4520
4521 i = 0;
4522 for_each_from(lc, gd->rollups, 1)
4523 {
4525
4526 if (rollup->hashable)
4527 {
4530 rollup->gsets_data);
4531 else
4532 rollups = lappend(rollups, rollup);
4533 ++i;
4534 }
4535 else
4536 rollups = lappend(rollups, rollup);
4537 }
4538 }
4539 }
4540
4541 if (!rollups && hash_sets)
4542 rollups = list_copy(gd->rollups);
4543
4544 foreach(lc, hash_sets)
4545 {
4548
4549 Assert(gs->set != NIL);
4550
4551 rollup->groupClause = preprocess_groupclause(root, gs->set);
4552 rollup->gsets_data = list_make1(gs);
4553 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4554 rollup->gsets_data,
4555 gd->tleref_to_colnum_map);
4556 rollup->numGroups = gs->numGroups;
4557 rollup->hashable = true;
4558 rollup->is_hashed = true;
4559 rollups = lcons(rollup, rollups);
4560 }
4561
4562 if (rollups)
4563 {
4564 add_path(grouped_rel, (Path *)
4566 grouped_rel,
4567 path,
4568 (List *) parse->havingQual,
4569 AGG_MIXED,
4570 rollups,
4571 agg_costs));
4572 }
4573 }
4574
4575 /*
4576 * Now try the simple sorted case.
4577 */
4578 if (!gd->unsortable_sets)
4579 add_path(grouped_rel, (Path *)
4581 grouped_rel,
4582 path,
4583 (List *) parse->havingQual,
4584 AGG_SORTED,
4585 gd->rollups,
4586 agg_costs));
4587}
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
#define bms_is_empty(a)
Definition bitmapset.h:118
#define Min(x, y)
Definition c.h:997
#define Max(x, y)
Definition c.h:991
size_t Size
Definition c.h:619
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition knapsack.c:51
List * list_concat(List *list1, const List *list2)
Definition list.c:561
List * lcons(void *datum, List *list)
Definition list.c:495
void * palloc(Size size)
Definition mcxt.c:1387
size_t get_hash_memory_limit(void)
Definition nodeHash.c:3621
AggStrategy
Definition nodes.h:363
@ AGG_MIXED
Definition nodes.h:367
#define makeNode(_type_)
Definition nodes.h:161
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition pathnode.c:3083
#define list_make1(x1)
Definition pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition pg_list.h:438
#define for_each_from(cell, lst, N)
Definition pg_list.h:414
#define linitial(l)
Definition pg_list.h:178
static ListCell * list_head(const List *l)
Definition pg_list.h:128
static ListCell * lnext(const List *l, const ListCell *c)
Definition pg_list.h:343
static int scale
Definition pgbench.c:182
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition planner.c:2904
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition planner.c:2438
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition selfuncs.c:4521
List * pathkeys
Definition pathnodes.h:1991

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), estimate_hashagg_tablesize(), fb(), for_each_cell, for_each_from, get_hash_memory_limit(), i, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), root, and scale.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 4046 of file planner.c.

4048{
4049 Query *parse = root->parse;
4050 int nrows;
4051 Path *path;
4052
4053 nrows = list_length(parse->groupingSets);
4054 if (nrows > 1)
4055 {
4056 /*
4057 * Doesn't seem worthwhile writing code to cons up a generate_series
4058 * or a values scan to emit multiple rows. Instead just make N clones
4059 * and append them. (With a volatile HAVING clause, this means you
4060 * might get between 0 and N output rows. Offhand I think that's
4061 * desired.)
4062 */
4063 List *paths = NIL;
4064
4065 while (--nrows >= 0)
4066 {
4067 path = (Path *)
4068 create_group_result_path(root, grouped_rel,
4069 grouped_rel->reltarget,
4070 (List *) parse->havingQual);
4071 paths = lappend(paths, path);
4072 }
4073 path = (Path *)
4075 grouped_rel,
4076 paths,
4077 NIL,
4078 NIL,
4079 NULL,
4080 0,
4081 false,
4082 -1);
4083 }
4084 else
4085 {
4086 /* No grouping sets, or just one, so one output row */
4087 path = (Path *)
4088 create_group_result_path(root, grouped_rel,
4089 grouped_rel->reltarget,
4090 (List *) parse->havingQual);
4091 }
4092
4093 add_path(grouped_rel, path);
4094}
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition pathnode.c:1299
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition pathnode.c:1608

References add_path(), create_append_path(), create_group_result_path(), fb(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4860 of file planner.c.

4862{
4864
4865 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4867
4868 /*
4869 * We don't compute anything at this level, so distinct_rel will be
4870 * parallel-safe if the input rel is parallel-safe. In particular, if
4871 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4872 * output those expressions, and will not be parallel-safe unless those
4873 * expressions are parallel-safe.
4874 */
4875 distinct_rel->consider_parallel = input_rel->consider_parallel;
4876
4877 /*
4878 * If the input rel belongs to a single FDW, so does the distinct_rel.
4879 */
4880 distinct_rel->serverid = input_rel->serverid;
4881 distinct_rel->userid = input_rel->userid;
4882 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4883 distinct_rel->fdwroutine = input_rel->fdwroutine;
4884
4885 /* build distinct paths based on input_rel's pathlist */
4887
4888 /* now build distinct paths based on input_rel's partial_pathlist */
4890
4891 /* Give a helpful error if we failed to create any paths */
4892 if (distinct_rel->pathlist == NIL)
4893 ereport(ERROR,
4895 errmsg("could not implement DISTINCT"),
4896 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4897
4898 /*
4899 * If there is an FDW that's responsible for all baserels of the query,
4900 * let it consider adding ForeignPaths.
4901 */
4902 if (distinct_rel->fdwroutine &&
4903 distinct_rel->fdwroutine->GetForeignUpperPaths)
4904 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4906 input_rel,
4908 NULL);
4909
4910 /* Let extensions possibly add some more paths */
4912 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4914
4915 /* Now choose the best path(s) */
4917
4918 return distinct_rel;
4919}
int errdetail(const char *fmt,...)
Definition elog.c:1216
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define ERROR
Definition elog.h:39
#define ereport(elevel,...)
Definition elog.h:150
@ UPPERREL_DISTINCT
Definition pathnodes.h:150
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition planner.c:5113
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition planner.c:4930
create_upper_paths_hook_type create_upper_paths_hook
Definition planner.c:83
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1606

References create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), fetch_upper_rel(), NIL, root, set_cheapest(), and UPPERREL_DISTINCT.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 5113 of file planner.c.

5115{
5116 Query *parse = root->parse;
5117 Path *cheapest_input_path = input_rel->cheapest_total_path;
5118 double numDistinctRows;
5119 bool allow_hash;
5120
5121 /* Estimate number of distinct rows there will be */
5122 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5123 root->hasHavingQual)
5124 {
5125 /*
5126 * If there was grouping or aggregation, use the number of input rows
5127 * as the estimated number of DISTINCT rows (ie, assume the input is
5128 * already mostly unique).
5129 */
5131 }
5132 else
5133 {
5134 /*
5135 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5136 */
5138
5139 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5140 parse->targetList);
5142 cheapest_input_path->rows,
5143 NULL, NULL);
5144 }
5145
5146 /*
5147 * Consider sort-based implementations of DISTINCT, if possible.
5148 */
5149 if (grouping_is_sortable(root->processed_distinctClause))
5150 {
5151 /*
5152 * Firstly, if we have any adequately-presorted paths, just stick a
5153 * Unique node on those. We also, consider doing an explicit sort of
5154 * the cheapest input path and Unique'ing that. If any paths have
5155 * presorted keys then we'll create an incremental sort atop of those
5156 * before adding a unique node on the top. We'll also attempt to
5157 * reorder the required pathkeys to match the input path's pathkeys as
5158 * much as possible, in hopes of avoiding a possible need to re-sort.
5159 *
5160 * When we have DISTINCT ON, we must sort by the more rigorous of
5161 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5162 * Also, if we do have to do an explicit sort, we might as well use
5163 * the more rigorous ordering to avoid a second sort later. (Note
5164 * that the parser will have ensured that one clause is a prefix of
5165 * the other.)
5166 */
5168 ListCell *lc;
5169 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5170
5171 if (parse->hasDistinctOn &&
5172 list_length(root->distinct_pathkeys) <
5173 list_length(root->sort_pathkeys))
5174 needed_pathkeys = root->sort_pathkeys;
5175 else
5176 needed_pathkeys = root->distinct_pathkeys;
5177
5178 foreach(lc, input_rel->pathlist)
5179 {
5180 Path *input_path = (Path *) lfirst(lc);
5183
5187 input_path->pathkeys);
5189
5191 {
5194 input_path,
5197 limittuples);
5198
5199 if (sorted_path == NULL)
5200 continue;
5201
5202 /*
5203 * distinct_pathkeys may have become empty if all of the
5204 * pathkeys were determined to be redundant. If all of the
5205 * pathkeys are redundant then each DISTINCT target must only
5206 * allow a single value, therefore all resulting tuples must
5207 * be identical (or at least indistinguishable by an equality
5208 * check). We can uniquify these tuples simply by just taking
5209 * the first tuple. All we do here is add a path to do "LIMIT
5210 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5211 * still have a non-NIL sort_pathkeys list, so we must still
5212 * only do this with paths which are correctly sorted by
5213 * sort_pathkeys.
5214 */
5215 if (root->distinct_pathkeys == NIL)
5216 {
5217 Node *limitCount;
5218
5219 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5220 sizeof(int64),
5221 Int64GetDatum(1), false,
5222 true);
5223
5224 /*
5225 * If the query already has a LIMIT clause, then we could
5226 * end up with a duplicate LimitPath in the final plan.
5227 * That does not seem worth troubling over too much.
5228 */
5231 NULL, limitCount,
5232 LIMIT_OPTION_COUNT, 0, 1));
5233 }
5234 else
5235 {
5239 list_length(root->distinct_pathkeys),
5241 }
5242 }
5243 }
5244 }
5245
5246 /*
5247 * Consider hash-based implementations of DISTINCT, if possible.
5248 *
5249 * If we were not able to make any other types of path, we *must* hash or
5250 * die trying. If we do have other choices, there are two things that
5251 * should prevent selection of hashing: if the query uses DISTINCT ON
5252 * (because it won't really have the expected behavior if we hash), or if
5253 * enable_hashagg is off.
5254 *
5255 * Note: grouping_is_hashable() is much more expensive to check than the
5256 * other gating conditions, so we want to do it last.
5257 */
5258 if (distinct_rel->pathlist == NIL)
5259 allow_hash = true; /* we have no alternatives */
5260 else if (parse->hasDistinctOn || !enable_hashagg)
5261 allow_hash = false; /* policy-based decision not to hash */
5262 else
5263 allow_hash = true; /* default */
5264
5265 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5266 {
5267 /* Generate hashed aggregate path --- no sort needed */
5272 cheapest_input_path->pathtarget,
5273 AGG_HASHED,
5275 root->processed_distinctClause,
5276 NIL,
5277 NULL,
5279 }
5280
5281 return distinct_rel;
5282}
int64_t int64
Definition c.h:543
bool enable_hashagg
Definition costsize.c:152
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition makefuncs.c:350
@ LIMIT_OPTION_COUNT
Definition nodes.h:441
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition pathnode.c:3736
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition pathnode.c:2949
#define foreach_node(type, var, lst)
Definition pg_list.h:496
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition planner.c:5293
static Datum Int64GetDatum(int64 X)
Definition postgres.h:423
#define InvalidOid
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3771
Cardinality rows
Definition pathnodes.h:1985
bool grouping_is_sortable(List *groupClause)
Definition tlist.c:549
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition tlist.c:401
bool grouping_is_hashable(List *groupClause)
Definition tlist.c:569

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, create_agg_path(), create_limit_path(), create_unique_path(), enable_hashagg, estimate_num_groups(), fb(), foreach_node, get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), make_ordered_path(), makeConst(), NIL, parse(), root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_final_unique_paths()

static void create_final_unique_paths ( PlannerInfo root,
RelOptInfo input_rel,
List sortPathkeys,
List groupClause,
SpecialJoinInfo sjinfo,
RelOptInfo unique_rel 
)
static

Definition at line 8739 of file planner.c.

8742{
8743 Path *cheapest_input_path = input_rel->cheapest_total_path;
8744
8745 /* Estimate number of output rows */
8746 unique_rel->rows = estimate_num_groups(root,
8747 sjinfo->semi_rhs_exprs,
8748 cheapest_input_path->rows,
8749 NULL,
8750 NULL);
8751
8752 /* Consider sort-based implementations, if possible. */
8753 if (sjinfo->semi_can_btree)
8754 {
8755 ListCell *lc;
8756
8757 /*
8758 * Use any available suitably-sorted path as input, and also consider
8759 * sorting the cheapest-total path and incremental sort on any paths
8760 * with presorted keys.
8761 *
8762 * To save planning time, we ignore parameterized input paths unless
8763 * they are the cheapest-total path.
8764 */
8765 foreach(lc, input_rel->pathlist)
8766 {
8767 Path *input_path = (Path *) lfirst(lc);
8768 Path *path;
8769 bool is_sorted;
8770 int presorted_keys;
8771
8772 /*
8773 * Ignore parameterized paths that are not the cheapest-total
8774 * path.
8775 */
8776 if (input_path->param_info &&
8778 continue;
8779
8781 input_path->pathkeys,
8782 &presorted_keys);
8783
8784 /*
8785 * Ignore paths that are not suitably or partially sorted, unless
8786 * they are the cheapest total path (no need to deal with paths
8787 * which have presorted keys when incremental sort is disabled).
8788 */
8790 (presorted_keys == 0 || !enable_incremental_sort))
8791 continue;
8792
8793 /*
8794 * Make a separate ProjectionPath in case we need a Result node.
8795 */
8796 path = (Path *) create_projection_path(root,
8797 unique_rel,
8798 input_path,
8799 unique_rel->reltarget);
8800
8801 if (!is_sorted)
8802 {
8803 /*
8804 * We've no need to consider both a sort and incremental sort.
8805 * We'll just do a sort if there are no presorted keys and an
8806 * incremental sort when there are presorted keys.
8807 */
8808 if (presorted_keys == 0 || !enable_incremental_sort)
8809 path = (Path *) create_sort_path(root,
8810 unique_rel,
8811 path,
8813 -1.0);
8814 else
8816 unique_rel,
8817 path,
8819 presorted_keys,
8820 -1.0);
8821 }
8822
8823 path = (Path *) create_unique_path(root, unique_rel, path,
8825 unique_rel->rows);
8826
8827 add_path(unique_rel, path);
8828 }
8829 }
8830
8831 /* Consider hash-based implementation, if possible. */
8832 if (sjinfo->semi_can_hash)
8833 {
8834 Path *path;
8835
8836 /*
8837 * Make a separate ProjectionPath in case we need a Result node.
8838 */
8839 path = (Path *) create_projection_path(root,
8840 unique_rel,
8842 unique_rel->reltarget);
8843
8844 path = (Path *) create_agg_path(root,
8845 unique_rel,
8846 path,
8847 cheapest_input_path->pathtarget,
8848 AGG_HASHED,
8850 groupClause,
8851 NIL,
8852 NULL,
8853 unique_rel->rows);
8854
8855 add_path(unique_rel, path);
8856 }
8857}
bool enable_incremental_sort
Definition costsize.c:151
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition pathnode.c:2799
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition pathnode.c:2848
Cardinality rows
Definition pathnodes.h:1009
List * semi_rhs_exprs
Definition pathnodes.h:3210

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, create_agg_path(), create_incremental_sort_path(), create_projection_path(), create_sort_path(), create_unique_path(), enable_incremental_sort, estimate_num_groups(), fb(), lfirst, list_length(), NIL, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, RelOptInfo::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, and SpecialJoinInfo::semi_rhs_exprs.

Referenced by create_partial_unique_paths(), and create_unique_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3856 of file planner.c.

3861{
3862 Query *parse = root->parse;
3863 RelOptInfo *grouped_rel;
3866
3867 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3869
3870 /*
3871 * Create grouping relation to hold fully aggregated grouping and/or
3872 * aggregation paths.
3873 */
3874 grouped_rel = make_grouping_rel(root, input_rel, target,
3875 target_parallel_safe, parse->havingQual);
3876
3877 /*
3878 * Create either paths for a degenerate grouping or paths for ordinary
3879 * grouping, as appropriate.
3880 */
3883 else
3884 {
3885 int flags = 0;
3886 GroupPathExtraData extra;
3887
3888 /*
3889 * Determine whether it's possible to perform sort-based
3890 * implementations of grouping. (Note that if processed_groupClause
3891 * is empty, grouping_is_sortable() is trivially true, and all the
3892 * pathkeys_contained_in() tests will succeed too, so that we'll
3893 * consider every surviving input path.)
3894 *
3895 * If we have grouping sets, we might be able to sort some but not all
3896 * of them; in this case, we need can_sort to be true as long as we
3897 * must consider any sorted-input plan.
3898 */
3899 if ((gd && gd->rollups != NIL)
3900 || grouping_is_sortable(root->processed_groupClause))
3901 flags |= GROUPING_CAN_USE_SORT;
3902
3903 /*
3904 * Determine whether we should consider hash-based implementations of
3905 * grouping.
3906 *
3907 * Hashed aggregation only applies if we're grouping. If we have
3908 * grouping sets, some groups might be hashable but others not; in
3909 * this case we set can_hash true as long as there is nothing globally
3910 * preventing us from hashing (and we should therefore consider plans
3911 * with hashes).
3912 *
3913 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3914 * BY aggregates. (Doing so would imply storing *all* the input
3915 * values in the hash table, and/or running many sorts in parallel,
3916 * either of which seems like a certain loser.) We similarly don't
3917 * support ordered-set aggregates in hashed aggregation, but that case
3918 * is also included in the numOrderedAggs count.
3919 *
3920 * Note: grouping_is_hashable() is much more expensive to check than
3921 * the other gating conditions, so we want to do it last.
3922 */
3923 if ((parse->groupClause != NIL &&
3924 root->numOrderedAggs == 0 &&
3925 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3926 flags |= GROUPING_CAN_USE_HASH;
3927
3928 /*
3929 * Determine whether partial aggregation is possible.
3930 */
3931 if (can_partial_agg(root))
3932 flags |= GROUPING_CAN_PARTIAL_AGG;
3933
3934 extra.flags = flags;
3935 extra.target_parallel_safe = target_parallel_safe;
3936 extra.havingQual = parse->havingQual;
3937 extra.targetList = parse->targetList;
3938 extra.partial_costs_set = false;
3939
3940 /*
3941 * Determine whether partitionwise aggregation is in theory possible.
3942 * It can be disabled by the user, and for now, we don't try to
3943 * support grouping sets. create_ordinary_grouping_paths() will check
3944 * additional conditions, such as whether input_rel is partitioned.
3945 */
3946 if (enable_partitionwise_aggregate && !parse->groupingSets)
3948 else
3950
3952 &agg_costs, gd, &extra,
3954 }
3955
3956 set_cheapest(grouped_rel);
3957 return grouped_rel;
3958}
#define MemSet(start, val, len)
Definition c.h:1013
bool enable_partitionwise_aggregate
Definition costsize.c:160
@ PARTITIONWISE_AGGREGATE_FULL
Definition pathnodes.h:3617
@ PARTITIONWISE_AGGREGATE_NONE
Definition pathnodes.h:3616
#define GROUPING_CAN_PARTIAL_AGG
Definition pathnodes.h:3601
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition planner.c:4046
static bool is_degenerate_grouping(PlannerInfo *root)
Definition planner.c:4025
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition planner.c:4110
static bool can_partial_agg(PlannerInfo *root)
Definition planner.c:7907
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition planner.c:3969
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition prepagg.c:559
PartitionwiseAggregateType patype
Definition pathnodes.h:3646

References AGGSPLIT_SIMPLE, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, fb(), GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4690 of file planner.c.

4697{
4699 ListCell *l;
4700 List *topqual = NIL;
4701
4702 /*
4703 * Since each window clause could require a different sort order, we stack
4704 * up a WindowAgg node for each clause, with sort steps between them as
4705 * needed. (We assume that select_active_windows chose a good order for
4706 * executing the clauses in.)
4707 *
4708 * input_target should contain all Vars and Aggs needed for the result.
4709 * (In some cases we wouldn't need to propagate all of these all the way
4710 * to the top, since they might only be needed as inputs to WindowFuncs.
4711 * It's probably not worth trying to optimize that though.) It must also
4712 * contain all window partitioning and sorting expressions, to ensure
4713 * they're computed only once at the bottom of the stack (that's critical
4714 * for volatile functions). As we climb up the stack, we'll add outputs
4715 * for the WindowFuncs computed at each level.
4716 */
4718
4719 foreach(l, activeWindows)
4720 {
4722 List *window_pathkeys;
4723 List *runcondition = NIL;
4724 int presorted_keys;
4725 bool is_sorted;
4726 bool topwindow;
4727 ListCell *lc2;
4728
4729 window_pathkeys = make_pathkeys_for_window(root,
4730 wc,
4731 root->processed_tlist);
4732
4733 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4734 path->pathkeys,
4735 &presorted_keys);
4736
4737 /* Sort if necessary */
4738 if (!is_sorted)
4739 {
4740 /*
4741 * No presorted keys or incremental sort disabled, just perform a
4742 * complete sort.
4743 */
4744 if (presorted_keys == 0 || !enable_incremental_sort)
4746 path,
4747 window_pathkeys,
4748 -1.0);
4749 else
4750 {
4751 /*
4752 * Since we have presorted keys and incremental sort is
4753 * enabled, just use incremental sort.
4754 */
4756 window_rel,
4757 path,
4758 window_pathkeys,
4759 presorted_keys,
4760 -1.0);
4761 }
4762 }
4763
4764 if (lnext(activeWindows, l))
4765 {
4766 /*
4767 * Add the current WindowFuncs to the output target for this
4768 * intermediate WindowAggPath. We must copy window_target to
4769 * avoid changing the previous path's target.
4770 *
4771 * Note: a WindowFunc adds nothing to the target's eval costs; but
4772 * we do need to account for the increase in tlist width.
4773 */
4775
4777 foreach(lc2, wflists->windowFuncs[wc->winref])
4778 {
4780
4782 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4783 }
4785 }
4786 else
4787 {
4788 /* Install the goal target in the topmost WindowAgg */
4790 }
4791
4792 /* mark the final item in the list as the top-level window */
4793 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4794
4795 /*
4796 * Collect the WindowFuncRunConditions from each WindowFunc and
4797 * convert them into OpExprs
4798 */
4799 foreach(lc2, wflists->windowFuncs[wc->winref])
4800 {
4801 ListCell *lc3;
4803
4804 foreach(lc3, wfunc->runCondition)
4805 {
4808 Expr *opexpr;
4809 Expr *leftop;
4810 Expr *rightop;
4811
4812 if (wfuncrc->wfunc_left)
4813 {
4814 leftop = (Expr *) copyObject(wfunc);
4815 rightop = copyObject(wfuncrc->arg);
4816 }
4817 else
4818 {
4819 leftop = copyObject(wfuncrc->arg);
4820 rightop = (Expr *) copyObject(wfunc);
4821 }
4822
4823 opexpr = make_opclause(wfuncrc->opno,
4824 BOOLOID,
4825 false,
4826 leftop,
4827 rightop,
4828 InvalidOid,
4829 wfuncrc->inputcollid);
4830
4831 runcondition = lappend(runcondition, opexpr);
4832
4833 if (!topwindow)
4834 topqual = lappend(topqual, opexpr);
4835 }
4836 }
4837
4838 path = (Path *)
4840 wflists->windowFuncs[wc->winref],
4841 runcondition, wc,
4842 topwindow ? topqual : NIL, topwindow);
4843 }
4844
4845 add_path(window_rel, path);
4846}
int32 clamp_width_est(int64 tuple_width)
Definition costsize.c:242
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2728
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition makefuncs.c:701
#define copyObject(obj)
Definition nodes.h:232
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition pathnode.c:3337
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition planner.c:6351
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition tlist.c:704

References add_column_to_pathtarget(), add_path(), clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, fb(), foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), root, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5378 of file planner.c.

5383{
5384 Path *cheapest_input_path = input_rel->cheapest_total_path;
5386 ListCell *lc;
5387
5388 /* For now, do all work in the (ORDERED, NULL) upperrel */
5390
5391 /*
5392 * If the input relation is not parallel-safe, then the ordered relation
5393 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5394 * target list is parallel-safe.
5395 */
5396 if (input_rel->consider_parallel && target_parallel_safe)
5397 ordered_rel->consider_parallel = true;
5398
5399 /* Assume that the same path generation strategies are allowed. */
5400 ordered_rel->pgs_mask = input_rel->pgs_mask;
5401
5402 /*
5403 * If the input rel belongs to a single FDW, so does the ordered_rel.
5404 */
5405 ordered_rel->serverid = input_rel->serverid;
5406 ordered_rel->userid = input_rel->userid;
5407 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5408 ordered_rel->fdwroutine = input_rel->fdwroutine;
5409
5410 foreach(lc, input_rel->pathlist)
5411 {
5412 Path *input_path = (Path *) lfirst(lc);
5414 bool is_sorted;
5415 int presorted_keys;
5416
5418 input_path->pathkeys, &presorted_keys);
5419
5420 if (is_sorted)
5422 else
5423 {
5424 /*
5425 * Try at least sorting the cheapest path and also try
5426 * incrementally sorting any path which is partially sorted
5427 * already (no need to deal with paths which have presorted keys
5428 * when incremental sort is disabled unless it's the cheapest
5429 * input path).
5430 */
5432 (presorted_keys == 0 || !enable_incremental_sort))
5433 continue;
5434
5435 /*
5436 * We've no need to consider both a sort and incremental sort.
5437 * We'll just do a sort if there are no presorted keys and an
5438 * incremental sort when there are presorted keys.
5439 */
5440 if (presorted_keys == 0 || !enable_incremental_sort)
5443 input_path,
5444 root->sort_pathkeys,
5445 limit_tuples);
5446 else
5449 input_path,
5450 root->sort_pathkeys,
5451 presorted_keys,
5452 limit_tuples);
5453 }
5454
5455 /*
5456 * If the pathtarget of the result path has different expressions from
5457 * the target to be applied, a projection step is needed.
5458 */
5459 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5461 sorted_path, target);
5462
5464 }
5465
5466 /*
5467 * generate_gather_paths() will have already generated a simple Gather
5468 * path for the best parallel path, if any, and the loop above will have
5469 * considered sorting it. Similarly, generate_gather_paths() will also
5470 * have generated order-preserving Gather Merge plans which can be used
5471 * without sorting if they happen to match the sort_pathkeys, and the loop
5472 * above will have handled those as well. However, there's one more
5473 * possibility: it may make sense to sort the cheapest partial path or
5474 * incrementally sort any partial path that is partially sorted according
5475 * to the required output order and then use Gather Merge.
5476 */
5477 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5478 input_rel->partial_pathlist != NIL)
5479 {
5481
5482 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5483
5484 foreach(lc, input_rel->partial_pathlist)
5485 {
5486 Path *input_path = (Path *) lfirst(lc);
5488 bool is_sorted;
5489 int presorted_keys;
5490 double total_groups;
5491
5493 input_path->pathkeys,
5494 &presorted_keys);
5495
5496 if (is_sorted)
5497 continue;
5498
5499 /*
5500 * Try at least sorting the cheapest path and also try
5501 * incrementally sorting any path which is partially sorted
5502 * already (no need to deal with paths which have presorted keys
5503 * when incremental sort is disabled unless it's the cheapest
5504 * partial path).
5505 */
5507 (presorted_keys == 0 || !enable_incremental_sort))
5508 continue;
5509
5510 /*
5511 * We've no need to consider both a sort and incremental sort.
5512 * We'll just do a sort if there are no presorted keys and an
5513 * incremental sort when there are presorted keys.
5514 */
5515 if (presorted_keys == 0 || !enable_incremental_sort)
5518 input_path,
5519 root->sort_pathkeys,
5520 limit_tuples);
5521 else
5524 input_path,
5525 root->sort_pathkeys,
5526 presorted_keys,
5527 limit_tuples);
5529 sorted_path = (Path *)
5532 sorted_path->pathtarget,
5533 root->sort_pathkeys, NULL,
5534 &total_groups);
5535
5536 /*
5537 * If the pathtarget of the result path has different expressions
5538 * from the target to be applied, a projection step is needed.
5539 */
5540 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5542 sorted_path, target);
5543
5545 }
5546 }
5547
5548 /*
5549 * If there is an FDW that's responsible for all baserels of the query,
5550 * let it consider adding ForeignPaths.
5551 */
5552 if (ordered_rel->fdwroutine &&
5553 ordered_rel->fdwroutine->GetForeignUpperPaths)
5554 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5556 NULL);
5557
5558 /* Let extensions possibly add some more paths */
5560 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5562
5563 /*
5564 * No need to bother with set_cheapest here; grouping_planner does not
5565 * need us to do it.
5566 */
5567 Assert(ordered_rel->pathlist != NIL);
5568
5569 return ordered_rel;
5570}
double compute_gather_rows(Path *path)
Definition costsize.c:6772
bool equal(const void *a, const void *b)
Definition equalfuncs.c:223
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition pathnode.c:1757
@ UPPERREL_ORDERED
Definition pathnodes.h:151

References add_path(), apply_projection_to_path(), Assert, compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, equal(), PathTarget::exprs, fb(), fetch_upper_rel(), lfirst, linitial, NIL, pathkeys_count_contained_in(), root, and UPPERREL_ORDERED.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 4110 of file planner.c.

4116{
4119
4120 /*
4121 * If this is the topmost grouping relation or if the parent relation is
4122 * doing some form of partitionwise aggregation, then we may be able to do
4123 * it at this level also. However, if the input relation is not
4124 * partitioned, partitionwise aggregate is impossible.
4125 */
4126 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4128 {
4129 /*
4130 * If this is the topmost relation or if the parent relation is doing
4131 * full partitionwise aggregation, then we can do full partitionwise
4132 * aggregation provided that the GROUP BY clause contains all of the
4133 * partitioning columns at this level and the collation used by GROUP
4134 * BY matches the partitioning collation. Otherwise, we can do at
4135 * most partial partitionwise aggregation. But if partial aggregation
4136 * is not supported in general then we can't use it for partitionwise
4137 * aggregation either.
4138 *
4139 * Check parse->groupClause not processed_groupClause, because it's
4140 * okay if some of the partitioning columns were proved redundant.
4141 */
4142 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4144 root->parse->groupClause))
4146 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4148 else
4150 }
4151
4152 /*
4153 * Before generating paths for grouped_rel, we first generate any possible
4154 * partially grouped paths; that way, later code can easily consider both
4155 * parallel and non-parallel approaches to grouping.
4156 */
4157 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4158 {
4159 bool force_rel_creation;
4160
4161 /*
4162 * If we're doing partitionwise aggregation at this level, force
4163 * creation of a partially_grouped_rel so we can add partitionwise
4164 * paths to it.
4165 */
4167
4170 grouped_rel,
4171 input_rel,
4172 gd,
4173 extra,
4175 }
4176
4177 /* Set out parameter. */
4179
4180 /* Apply partitionwise aggregation technique, if possible. */
4181 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4184 gd, patype, extra);
4185
4186 /* If we are doing partial aggregation only, return. */
4188 {
4190
4191 if (partially_grouped_rel->pathlist)
4193
4194 return;
4195 }
4196
4197 /* Gather any partially grouped partial paths. */
4198 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4200
4201 /* Now choose the best path(s) for partially_grouped_rel. */
4204
4205 /* Build final grouping paths */
4208 extra);
4209
4210 /* Give a helpful error if we failed to find any implementation */
4211 if (grouped_rel->pathlist == NIL)
4212 ereport(ERROR,
4214 errmsg("could not implement GROUP BY"),
4215 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4216
4217 /*
4218 * If there is an FDW that's responsible for all baserels of the query,
4219 * let it consider adding ForeignPaths.
4220 */
4221 if (grouped_rel->fdwroutine &&
4222 grouped_rel->fdwroutine->GetForeignUpperPaths)
4223 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4224 input_rel, grouped_rel,
4225 extra);
4226
4227 /* Let extensions possibly add some more paths */
4229 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4230 input_rel, grouped_rel,
4231 extra);
4232}
PartitionwiseAggregateType
Definition pathnodes.h:3615
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition pathnodes.h:3618
@ UPPERREL_GROUP_AGG
Definition pathnodes.h:147
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
Definition planner.c:7152
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition planner.c:7413
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition planner.c:8190
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition planner.c:8327

References add_paths_to_grouping_rel(), Assert, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), GroupPathExtraData::flags, gather_grouping_paths(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4930 of file planner.c.

4933{
4935 Query *parse;
4937 double numDistinctRows;
4939 ListCell *lc;
4940
4941 /* nothing to do when there are no partial paths in the input rel */
4942 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4943 return;
4944
4945 parse = root->parse;
4946
4947 /* can't do parallel DISTINCT ON */
4948 if (parse->hasDistinctOn)
4949 return;
4950
4952 NULL);
4953 partial_distinct_rel->reltarget = target;
4954 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4955
4956 /*
4957 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4958 */
4959 partial_distinct_rel->serverid = input_rel->serverid;
4960 partial_distinct_rel->userid = input_rel->userid;
4961 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4962 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4963
4964 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4965
4966 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4967 parse->targetList);
4968
4969 /* estimate how many distinct rows we'll get from each worker */
4972 NULL, NULL);
4973
4974 /*
4975 * Try sorting the cheapest path and incrementally sorting any paths with
4976 * presorted keys and put a unique paths atop of those. We'll also
4977 * attempt to reorder the required pathkeys to match the input path's
4978 * pathkeys as much as possible, in hopes of avoiding a possible need to
4979 * re-sort.
4980 */
4981 if (grouping_is_sortable(root->processed_distinctClause))
4982 {
4983 foreach(lc, input_rel->partial_pathlist)
4984 {
4985 Path *input_path = (Path *) lfirst(lc);
4988
4991 root->distinct_pathkeys,
4992 input_path->pathkeys);
4994
4996 {
4999 input_path,
5002 -1.0);
5003
5004 if (sorted_path == NULL)
5005 continue;
5006
5007 /*
5008 * An empty distinct_pathkeys means all tuples have the same
5009 * value for the DISTINCT clause. See
5010 * create_final_distinct_paths()
5011 */
5012 if (root->distinct_pathkeys == NIL)
5013 {
5014 Node *limitCount;
5015
5016 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5017 sizeof(int64),
5018 Int64GetDatum(1), false,
5019 true);
5020
5021 /*
5022 * Apply a LimitPath onto the partial path to restrict the
5023 * tuples from each worker to 1.
5024 * create_final_distinct_paths will need to apply an
5025 * additional LimitPath to restrict this to a single row
5026 * after the Gather node. If the query already has a
5027 * LIMIT clause, then we could end up with three Limit
5028 * nodes in the final plan. Consolidating the top two of
5029 * these could be done, but does not seem worth troubling
5030 * over.
5031 */
5035 NULL,
5036 limitCount,
5038 0, 1));
5039 }
5040 else
5041 {
5045 list_length(root->distinct_pathkeys),
5047 }
5048 }
5049 }
5050 }
5051
5052 /*
5053 * Now try hash aggregate paths, if enabled and hashing is possible. Since
5054 * we're not on the hook to ensure we do our best to create at least one
5055 * path here, we treat enable_hashagg as a hard off-switch rather than the
5056 * slightly softer variant in create_final_distinct_paths.
5057 */
5058 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5059 {
5064 cheapest_partial_path->pathtarget,
5065 AGG_HASHED,
5067 root->processed_distinctClause,
5068 NIL,
5069 NULL,
5071 }
5072
5073 /*
5074 * If there is an FDW that's responsible for all baserels of the query,
5075 * let it consider adding ForeignPaths.
5076 */
5077 if (partial_distinct_rel->fdwroutine &&
5078 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5079 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5081 input_rel,
5083 NULL);
5084
5085 /* Let extensions possibly add some more partial paths */
5087 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5089
5090 if (partial_distinct_rel->partial_pathlist != NIL)
5091 {
5094
5095 /*
5096 * Finally, create paths to distinctify the final result. This step
5097 * is needed to remove any duplicates due to combining rows from
5098 * parallel workers.
5099 */
5102 }
5103}
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:793
@ UPPERREL_PARTIAL_DISTINCT
Definition pathnodes.h:149

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, create_agg_path(), create_final_distinct_paths(), create_limit_path(), create_unique_path(), create_upper_paths_hook, enable_hashagg, estimate_num_groups(), fb(), fetch_upper_rel(), foreach_node, generate_useful_gather_paths(), get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), make_ordered_path(), makeConst(), NIL, parse(), root, set_cheapest(), and UPPERREL_PARTIAL_DISTINCT.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7413 of file planner.c.

7419{
7420 Query *parse = root->parse;
7423 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7424 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7426 Path *cheapest_total_path = NULL;
7427 double dNumPartialGroups = 0;
7428 double dNumPartialPartialGroups = 0;
7429 ListCell *lc;
7430 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7431 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7432
7433 /*
7434 * Check whether any partially aggregated paths have been generated
7435 * through eager aggregation.
7436 */
7437 if (input_rel->grouped_rel &&
7438 !IS_DUMMY_REL(input_rel->grouped_rel) &&
7439 input_rel->grouped_rel->pathlist != NIL)
7440 eager_agg_rel = input_rel->grouped_rel;
7441
7442 /*
7443 * Consider whether we should generate partially aggregated non-partial
7444 * paths. We can only do this if we have a non-partial path, and only if
7445 * the parent of the input rel is performing partial partitionwise
7446 * aggregation. (Note that extra->patype is the type of partitionwise
7447 * aggregation being used at the parent level, not this level.)
7448 */
7449 if (input_rel->pathlist != NIL &&
7451 cheapest_total_path = input_rel->cheapest_total_path;
7452
7453 /*
7454 * If parallelism is possible for grouped_rel, then we should consider
7455 * generating partially-grouped partial paths. However, if the input rel
7456 * has no partial paths, then we can't.
7457 */
7458 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7459 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7460
7461 /*
7462 * If we can't partially aggregate partial paths, and we can't partially
7463 * aggregate non-partial paths, and no partially aggregated paths were
7464 * generated by eager aggregation, then don't bother creating the new
7465 * RelOptInfo at all, unless the caller specified force_rel_creation.
7466 */
7467 if (cheapest_total_path == NULL &&
7469 eager_agg_rel == NULL &&
7471 return NULL;
7472
7473 /*
7474 * Build a new upper relation to represent the result of partially
7475 * aggregating the rows from the input relation.
7476 */
7479 grouped_rel->relids);
7480 partially_grouped_rel->consider_parallel =
7481 grouped_rel->consider_parallel;
7482 partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7483 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7484 partially_grouped_rel->serverid = grouped_rel->serverid;
7485 partially_grouped_rel->userid = grouped_rel->userid;
7486 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7487 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7488
7489 /*
7490 * Build target list for partial aggregate paths. These paths cannot just
7491 * emit the same tlist as regular aggregate paths, because (1) we must
7492 * include Vars and Aggrefs needed in HAVING, which might not appear in
7493 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7494 */
7497 extra->havingQual);
7498
7499 if (!extra->partial_costs_set)
7500 {
7501 /*
7502 * Collect statistics about aggregates for estimating costs of
7503 * performing aggregation in parallel.
7504 */
7505 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7506 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7507 if (parse->hasAggs)
7508 {
7509 /* partial phase */
7511 agg_partial_costs);
7512
7513 /* final phase */
7515 agg_final_costs);
7516 }
7517
7518 extra->partial_costs_set = true;
7519 }
7520
7521 /* Estimate number of partial groups. */
7522 if (cheapest_total_path != NULL)
7525 cheapest_total_path->rows,
7526 gd,
7527 extra->targetList);
7532 gd,
7533 extra->targetList);
7534
7535 if (can_sort && cheapest_total_path != NULL)
7536 {
7537 /* This should have been checked previously */
7538 Assert(parse->hasAggs || parse->groupClause);
7539
7540 /*
7541 * Use any available suitably-sorted path as input, and also consider
7542 * sorting the cheapest partial path.
7543 */
7544 foreach(lc, input_rel->pathlist)
7545 {
7546 ListCell *lc2;
7547 Path *path = (Path *) lfirst(lc);
7548 Path *path_save = path;
7550
7551 /* generate alternative group orderings that might be useful */
7553
7555
7556 /* process all potentially interesting grouping reorderings */
7557 foreach(lc2, pathkey_orderings)
7558 {
7560
7561 /* restore the path (we replace it in the loop) */
7562 path = path_save;
7563
7564 path = make_ordered_path(root,
7566 path,
7567 cheapest_total_path,
7568 info->pathkeys,
7569 -1.0);
7570
7571 if (path == NULL)
7572 continue;
7573
7574 if (parse->hasAggs)
7578 path,
7579 partially_grouped_rel->reltarget,
7580 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7582 info->clauses,
7583 NIL,
7584 agg_partial_costs,
7586 else
7590 path,
7591 info->clauses,
7592 NIL,
7594 }
7595 }
7596 }
7597
7599 {
7600 /* Similar to above logic, but for partial paths. */
7601 foreach(lc, input_rel->partial_pathlist)
7602 {
7603 ListCell *lc2;
7604 Path *path = (Path *) lfirst(lc);
7605 Path *path_save = path;
7607
7608 /* generate alternative group orderings that might be useful */
7610
7612
7613 /* process all potentially interesting grouping reorderings */
7614 foreach(lc2, pathkey_orderings)
7615 {
7617
7618
7619 /* restore the path (we replace it in the loop) */
7620 path = path_save;
7621
7622 path = make_ordered_path(root,
7624 path,
7626 info->pathkeys,
7627 -1.0);
7628
7629 if (path == NULL)
7630 continue;
7631
7632 if (parse->hasAggs)
7636 path,
7637 partially_grouped_rel->reltarget,
7638 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7640 info->clauses,
7641 NIL,
7642 agg_partial_costs,
7644 else
7648 path,
7649 info->clauses,
7650 NIL,
7652 }
7653 }
7654 }
7655
7656 /*
7657 * Add a partially-grouped HashAgg Path where possible
7658 */
7659 if (can_hash && cheapest_total_path != NULL)
7660 {
7661 /* Checked above */
7662 Assert(parse->hasAggs || parse->groupClause);
7663
7667 cheapest_total_path,
7668 partially_grouped_rel->reltarget,
7669 AGG_HASHED,
7671 root->processed_groupClause,
7672 NIL,
7673 agg_partial_costs,
7675 }
7676
7677 /*
7678 * Now add a partially-grouped HashAgg partial Path where possible
7679 */
7681 {
7686 partially_grouped_rel->reltarget,
7687 AGG_HASHED,
7689 root->processed_groupClause,
7690 NIL,
7691 agg_partial_costs,
7693 }
7694
7695 /*
7696 * Add any partially aggregated paths generated by eager aggregation to
7697 * the new upper relation after applying projection steps as needed.
7698 */
7699 if (eager_agg_rel)
7700 {
7701 /* Add the paths */
7702 foreach(lc, eager_agg_rel->pathlist)
7703 {
7704 Path *path = (Path *) lfirst(lc);
7705
7706 /* Shouldn't have any parameterized paths anymore */
7707 Assert(path->param_info == NULL);
7708
7709 path = (Path *) create_projection_path(root,
7711 path,
7712 partially_grouped_rel->reltarget);
7713
7715 }
7716
7717 /*
7718 * Likewise add the partial paths, but only if parallelism is possible
7719 * for partially_grouped_rel.
7720 */
7721 if (partially_grouped_rel->consider_parallel)
7722 {
7723 foreach(lc, eager_agg_rel->partial_pathlist)
7724 {
7725 Path *path = (Path *) lfirst(lc);
7726
7727 /* Shouldn't have any parameterized paths anymore */
7728 Assert(path->param_info == NULL);
7729
7730 path = (Path *) create_projection_path(root,
7732 path,
7733 partially_grouped_rel->reltarget);
7734
7736 }
7737 }
7738 }
7739
7740 /*
7741 * If there is an FDW that's responsible for all baserels of the query,
7742 * let it consider adding partially grouped ForeignPaths.
7743 */
7744 if (partially_grouped_rel->fdwroutine &&
7745 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7746 {
7747 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7748
7749 fdwroutine->GetForeignUpperPaths(root,
7752 extra);
7753 }
7754
7755 return partially_grouped_rel;
7756}
@ AGGSPLIT_INITIAL_SERIAL
Definition nodes.h:389
@ UPPERREL_PARTIAL_GROUP_AGG
Definition pathnodes.h:145
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition planner.c:5713
GetForeignUpperPaths_function GetForeignUpperPaths
Definition fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition pathnodes.h:3639
bool useridiscurrent
Definition pathnodes.h:1097
Relids relids
Definition pathnodes.h:1003
uint64 pgs_mask
Definition pathnodes.h:1021
RelOptKind reloptkind
Definition pathnodes.h:997

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), create_projection_path(), fb(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, IS_DUMMY_REL, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, GroupPathExtraData::patype, RelOptInfo::pgs_mask, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partial_unique_paths()

static void create_partial_unique_paths ( PlannerInfo root,
RelOptInfo input_rel,
List sortPathkeys,
List groupClause,
SpecialJoinInfo sjinfo,
RelOptInfo unique_rel 
)
static

Definition at line 8864 of file planner.c.

8867{
8870
8871 /* nothing to do when there are no partial paths in the input rel */
8872 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8873 return;
8874
8875 /*
8876 * nothing to do if there's anything in the targetlist that's
8877 * parallel-restricted.
8878 */
8879 if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8880 return;
8881
8882 cheapest_partial_path = linitial(input_rel->partial_pathlist);
8883
8886
8887 /*
8888 * clear path info
8889 */
8890 partial_unique_rel->pathlist = NIL;
8891 partial_unique_rel->ppilist = NIL;
8892 partial_unique_rel->partial_pathlist = NIL;
8893 partial_unique_rel->cheapest_startup_path = NULL;
8894 partial_unique_rel->cheapest_total_path = NULL;
8895 partial_unique_rel->cheapest_parameterized_paths = NIL;
8896
8897 /* Estimate number of output rows */
8899 sjinfo->semi_rhs_exprs,
8901 NULL,
8902 NULL);
8903 partial_unique_rel->reltarget = unique_rel->reltarget;
8904
8905 /* Consider sort-based implementations, if possible. */
8906 if (sjinfo->semi_can_btree)
8907 {
8908 ListCell *lc;
8909
8910 /*
8911 * Use any available suitably-sorted path as input, and also consider
8912 * sorting the cheapest partial path and incremental sort on any paths
8913 * with presorted keys.
8914 */
8915 foreach(lc, input_rel->partial_pathlist)
8916 {
8917 Path *input_path = (Path *) lfirst(lc);
8918 Path *path;
8919 bool is_sorted;
8920 int presorted_keys;
8921
8923 input_path->pathkeys,
8924 &presorted_keys);
8925
8926 /*
8927 * Ignore paths that are not suitably or partially sorted, unless
8928 * they are the cheapest partial path (no need to deal with paths
8929 * which have presorted keys when incremental sort is disabled).
8930 */
8932 (presorted_keys == 0 || !enable_incremental_sort))
8933 continue;
8934
8935 /*
8936 * Make a separate ProjectionPath in case we need a Result node.
8937 */
8938 path = (Path *) create_projection_path(root,
8940 input_path,
8941 partial_unique_rel->reltarget);
8942
8943 if (!is_sorted)
8944 {
8945 /*
8946 * We've no need to consider both a sort and incremental sort.
8947 * We'll just do a sort if there are no presorted keys and an
8948 * incremental sort when there are presorted keys.
8949 */
8950 if (presorted_keys == 0 || !enable_incremental_sort)
8951 path = (Path *) create_sort_path(root,
8953 path,
8955 -1.0);
8956 else
8959 path,
8961 presorted_keys,
8962 -1.0);
8963 }
8964
8967 partial_unique_rel->rows);
8968
8970 }
8971 }
8972
8973 /* Consider hash-based implementation, if possible. */
8974 if (sjinfo->semi_can_hash)
8975 {
8976 Path *path;
8977
8978 /*
8979 * Make a separate ProjectionPath in case we need a Result node.
8980 */
8981 path = (Path *) create_projection_path(root,
8984 partial_unique_rel->reltarget);
8985
8986 path = (Path *) create_agg_path(root,
8988 path,
8989 cheapest_partial_path->pathtarget,
8990 AGG_HASHED,
8992 groupClause,
8993 NIL,
8994 NULL,
8995 partial_unique_rel->rows);
8996
8998 }
8999
9000 if (partial_unique_rel->partial_pathlist != NIL)
9001 {
9004
9005 /*
9006 * Finally, create paths to unique-ify the final result. This step is
9007 * needed to remove any duplicates due to combining rows from parallel
9008 * workers.
9009 */
9011 sortPathkeys, groupClause,
9012 sjinfo, unique_rel);
9013 }
9014}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition clauses.c:762
static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8739

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, create_agg_path(), create_final_unique_paths(), create_incremental_sort_path(), create_projection_path(), create_sort_path(), create_unique_path(), enable_incremental_sort, estimate_num_groups(), PathTarget::exprs, fb(), generate_useful_gather_paths(), is_parallel_safe(), lfirst, linitial, list_length(), makeNode, NIL, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, Path::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_rhs_exprs, and set_cheapest().

Referenced by create_unique_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 8190 of file planner.c.

8198{
8201 PathTarget *target = grouped_rel->reltarget;
8202 bool partial_grouping_valid = true;
8203 int i;
8204
8208
8209 /* Add paths for partitionwise aggregation/grouping. */
8210 i = -1;
8211 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8212 {
8213 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8215 AppendRelInfo **appinfos;
8216 int nappinfos;
8220
8222
8223 /* Dummy children can be ignored. */
8225 continue;
8226
8227 child_target = copy_pathtarget(target);
8228
8229 /*
8230 * Copy the given "extra" structure as is and then override the
8231 * members specific to this child.
8232 */
8233 memcpy(&child_extra, extra, sizeof(child_extra));
8234
8235 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8236 &nappinfos);
8237
8238 child_target->exprs = (List *)
8240 (Node *) target->exprs,
8241 nappinfos, appinfos);
8242
8243 /* Translate havingQual and targetList. */
8244 child_extra.havingQual = (Node *)
8246 extra->havingQual,
8247 nappinfos, appinfos);
8248 child_extra.targetList = (List *)
8250 (Node *) extra->targetList,
8251 nappinfos, appinfos);
8252
8253 /*
8254 * extra->patype was the value computed for our parent rel; patype is
8255 * the value for this relation. For the child, our value is its
8256 * parent rel's value.
8257 */
8258 child_extra.patype = patype;
8259
8260 /*
8261 * Create grouping relation to hold fully aggregated grouping and/or
8262 * aggregation paths for the child.
8263 */
8266 extra->target_parallel_safe,
8267 child_extra.havingQual);
8268
8269 /* Create grouping paths for this child relation. */
8274
8276 {
8280 }
8281 else
8282 partial_grouping_valid = false;
8283
8284 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8285 {
8289 }
8290
8291 pfree(appinfos);
8292 }
8293
8294 /*
8295 * Try to create append paths for partially grouped children. For full
8296 * partitionwise aggregation, we might have paths in the partial_pathlist
8297 * if parallel aggregation is possible. For partial partitionwise
8298 * aggregation, we may have paths in both pathlist and partial_pathlist.
8299 *
8300 * NB: We must have a partially grouped path for every child in order to
8301 * generate a partially grouped path for this relation.
8302 */
8304 {
8306
8309 }
8310
8311 /* If possible, create append paths for fully grouped children. */
8312 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8313 {
8315
8317 }
8318}

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, fb(), find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, pfree(), RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_unique_paths()

RelOptInfo * create_unique_paths ( PlannerInfo root,
RelOptInfo rel,
SpecialJoinInfo sjinfo 
)

Definition at line 8470 of file planner.c.

8471{
8472 RelOptInfo *unique_rel;
8474 List *groupClause = NIL;
8475 MemoryContext oldcontext;
8476
8477 /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8478 Assert(sjinfo->jointype == JOIN_SEMI);
8479 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8480
8481 /* If result already cached, return it */
8482 if (rel->unique_rel)
8483 return rel->unique_rel;
8484
8485 /* If it's not possible to unique-ify, return NULL */
8486 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8487 return NULL;
8488
8489 /*
8490 * Punt if this is a child relation and we failed to build a unique-ified
8491 * relation for its parent. This can happen if all the RHS columns were
8492 * found to be equated to constants when unique-ifying the parent table,
8493 * leaving no columns to unique-ify.
8494 */
8495 if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8496 return NULL;
8497
8498 /*
8499 * When called during GEQO join planning, we are in a short-lived memory
8500 * context. We must make sure that the unique rel and any subsidiary data
8501 * structures created for a baserel survive the GEQO cycle, else the
8502 * baserel is trashed for future GEQO cycles. On the other hand, when we
8503 * are creating those for a joinrel during GEQO, we don't want them to
8504 * clutter the main planning context. Upshot is that the best solution is
8505 * to explicitly allocate memory in the same context the given RelOptInfo
8506 * is in.
8507 */
8509
8510 unique_rel = makeNode(RelOptInfo);
8511 memcpy(unique_rel, rel, sizeof(RelOptInfo));
8512
8513 /*
8514 * clear path info
8515 */
8516 unique_rel->pathlist = NIL;
8517 unique_rel->ppilist = NIL;
8518 unique_rel->partial_pathlist = NIL;
8519 unique_rel->cheapest_startup_path = NULL;
8520 unique_rel->cheapest_total_path = NULL;
8521 unique_rel->cheapest_parameterized_paths = NIL;
8522
8523 /*
8524 * Build the target list for the unique rel. We also build the pathkeys
8525 * that represent the ordering requirements for the sort-based
8526 * implementation, and the list of SortGroupClause nodes that represent
8527 * the columns to be grouped on for the hash-based implementation.
8528 *
8529 * For a child rel, we can construct these fields from those of its
8530 * parent.
8531 */
8532 if (IS_OTHER_REL(rel))
8533 {
8536
8537 parent_unique_target = rel->top_parent->unique_rel->reltarget;
8538
8540
8541 /* Translate the target expressions */
8542 child_unique_target->exprs = (List *)
8544 (Node *) parent_unique_target->exprs,
8545 rel,
8546 rel->top_parent);
8547
8548 unique_rel->reltarget = child_unique_target;
8549
8550 sortPathkeys = rel->top_parent->unique_pathkeys;
8551 groupClause = rel->top_parent->unique_groupclause;
8552 }
8553 else
8554 {
8555 List *newtlist;
8556 int nextresno;
8557 List *sortList = NIL;
8558 ListCell *lc1;
8559 ListCell *lc2;
8560
8561 /*
8562 * The values we are supposed to unique-ify may be expressions in the
8563 * variables of the input rel's targetlist. We have to add any such
8564 * expressions to the unique rel's targetlist.
8565 *
8566 * To complicate matters, some of the values to be unique-ified may be
8567 * known redundant by the EquivalenceClass machinery (e.g., because
8568 * they have been equated to constants). There is no need to compare
8569 * such values during unique-ification, and indeed we had better not
8570 * try because the Vars involved may not have propagated as high as
8571 * the semijoin's level. We use make_pathkeys_for_sortclauses to
8572 * detect such cases, which is a tad inefficient but it doesn't seem
8573 * worth building specialized infrastructure for this.
8574 */
8577
8578 forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8579 {
8580 Expr *uniqexpr = lfirst(lc1);
8582 Oid sortop;
8584 bool made_tle = false;
8585
8587 if (!tle)
8588 {
8590 nextresno,
8591 NULL,
8592 false);
8594 nextresno++;
8595 made_tle = true;
8596 }
8597
8598 /*
8599 * Try to build an ORDER BY list to sort the input compatibly. We
8600 * do this for each sortable clause even when the clauses are not
8601 * all sortable, so that we can detect clauses that are redundant
8602 * according to the pathkey machinery.
8603 */
8605 if (OidIsValid(sortop))
8606 {
8607 Oid eqop;
8609
8610 /*
8611 * The Unique node will need equality operators. Normally
8612 * these are the same as the IN clause operators, but if those
8613 * are cross-type operators then the equality operators are
8614 * the ones for the IN clause operators' RHS datatype.
8615 */
8616 eqop = get_equality_op_for_ordering_op(sortop, NULL);
8617 if (!OidIsValid(eqop)) /* shouldn't happen */
8618 elog(ERROR, "could not find equality operator for ordering operator %u",
8619 sortop);
8620
8622 sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8623 sortcl->eqop = eqop;
8624 sortcl->sortop = sortop;
8625 sortcl->reverse_sort = false;
8626 sortcl->nulls_first = false;
8627 sortcl->hashable = false; /* no need to make this accurate */
8629
8630 /*
8631 * At each step, convert the SortGroupClause list to pathkey
8632 * form. If the just-added SortGroupClause is redundant, the
8633 * result will be shorter than the SortGroupClause list.
8634 */
8636 newtlist);
8638 {
8639 /* Drop the redundant SortGroupClause */
8642 /* Undo tlist addition, if we made one */
8643 if (made_tle)
8644 {
8646 nextresno--;
8647 }
8648 /* We need not consider this clause for hashing, either */
8649 continue;
8650 }
8651 }
8652 else if (sjinfo->semi_can_btree) /* shouldn't happen */
8653 elog(ERROR, "could not find ordering operator for equality operator %u",
8654 in_oper);
8655
8656 if (sjinfo->semi_can_hash)
8657 {
8658 /* Create a GROUP BY list for the Agg node to use */
8659 Oid eq_oper;
8661
8662 /*
8663 * Get the hashable equality operators for the Agg node to
8664 * use. Normally these are the same as the IN clause
8665 * operators, but if those are cross-type operators then the
8666 * equality operators are the ones for the IN clause
8667 * operators' RHS datatype.
8668 */
8670 elog(ERROR, "could not find compatible hash operator for operator %u",
8671 in_oper);
8672
8674 groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8675 groupcl->eqop = eq_oper;
8676 groupcl->sortop = sortop;
8677 groupcl->reverse_sort = false;
8678 groupcl->nulls_first = false;
8679 groupcl->hashable = true;
8680 groupClause = lappend(groupClause, groupcl);
8681 }
8682 }
8683
8684 /*
8685 * Done building the sortPathkeys and groupClause. But the
8686 * sortPathkeys are bogus if not all the clauses were sortable.
8687 */
8688 if (!sjinfo->semi_can_btree)
8689 sortPathkeys = NIL;
8690
8691 /*
8692 * It can happen that all the RHS columns are equated to constants.
8693 * We'd have to do something special to unique-ify in that case, and
8694 * it's such an unlikely-in-the-real-world case that it's not worth
8695 * the effort. So just punt if we found no columns to unique-ify.
8696 */
8697 if (sortPathkeys == NIL && groupClause == NIL)
8698 {
8699 MemoryContextSwitchTo(oldcontext);
8700 return NULL;
8701 }
8702
8703 /* Convert the required targetlist back to PathTarget form */
8704 unique_rel->reltarget = create_pathtarget(root, newtlist);
8705 }
8706
8707 /* build unique paths based on input rel's pathlist */
8708 create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8709 sjinfo, unique_rel);
8710
8711 /* build unique paths based on input rel's partial_pathlist */
8713 sjinfo, unique_rel);
8714
8715 /* Now choose the best path(s) */
8716 set_cheapest(unique_rel);
8717
8718 /*
8719 * There shouldn't be any partial paths for the unique relation;
8720 * otherwise, we won't be able to properly guarantee uniqueness.
8721 */
8722 Assert(unique_rel->partial_pathlist == NIL);
8723
8724 /* Cache the result */
8725 rel->unique_rel = unique_rel;
8727 rel->unique_groupclause = groupClause;
8728
8729 MemoryContextSwitchTo(oldcontext);
8730
8731 return unique_rel;
8732}
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition appendinfo.c:592
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:142
#define OidIsValid(objectId)
Definition c.h:788
#define elog(elevel,...)
Definition elog.h:226
List * list_delete_last(List *list)
Definition list.c:957
bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno)
Definition lsyscache.c:475
Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse)
Definition lsyscache.c:324
Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
Definition lsyscache.c:362
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition makefuncs.c:289
MemoryContext GetMemoryChunkContext(void *pointer)
Definition mcxt.c:756
@ JOIN_SEMI
Definition nodes.h:317
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
#define lfirst_oid(lc)
Definition pg_list.h:174
static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8864
unsigned int Oid
List * ppilist
Definition pathnodes.h:1033
List * unique_pathkeys
Definition pathnodes.h:1116
List * cheapest_parameterized_paths
Definition pathnodes.h:1037
List * unique_groupclause
Definition pathnodes.h:1118
struct RelOptInfo * unique_rel
Definition pathnodes.h:1114
JoinType jointype
Definition pathnodes.h:3199
Relids syn_righthand
Definition pathnodes.h:3198
List * semi_operators
Definition pathnodes.h:3209
TargetEntry * tlist_member(Expr *node, List *targetlist)
Definition tlist.c:88
List * make_tlist_from_pathtarget(PathTarget *target)
Definition tlist.c:633
#define create_pathtarget(root, tlist)
Definition tlist.h:58

References adjust_appendrel_attrs_multilevel(), Assert, assignSortGroupRef(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, copy_pathtarget(), create_final_unique_paths(), create_partial_unique_paths(), create_pathtarget, elog, ERROR, fb(), forboth, get_compatible_hash_operators(), get_equality_op_for_ordering_op(), get_ordering_op_for_equality_op(), GetMemoryChunkContext(), IS_OTHER_REL, JOIN_SEMI, SpecialJoinInfo::jointype, lappend(), lfirst, lfirst_oid, list_delete_last(), list_length(), make_pathkeys_for_sortclauses(), make_tlist_from_pathtarget(), makeNode, makeTargetEntry(), MemoryContextSwitchTo(), NIL, OidIsValid, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, RelOptInfo::ppilist, RelOptInfo::relids, RelOptInfo::reltarget, root, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, set_cheapest(), SpecialJoinInfo::syn_righthand, tlist_member(), RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, and RelOptInfo::unique_rel.

Referenced by join_is_legal(), and populate_joinrel_with_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4603 of file planner.c.

4610{
4612 ListCell *lc;
4613
4614 /* For now, do all work in the (WINDOW, NULL) upperrel */
4616
4617 /*
4618 * If the input relation is not parallel-safe, then the window relation
4619 * can't be parallel-safe, either. Otherwise, we need to examine the
4620 * target list and active windows for non-parallel-safe constructs.
4621 */
4622 if (input_rel->consider_parallel && output_target_parallel_safe &&
4623 is_parallel_safe(root, (Node *) activeWindows))
4624 window_rel->consider_parallel = true;
4625
4626 /*
4627 * If the input rel belongs to a single FDW, so does the window rel.
4628 */
4629 window_rel->serverid = input_rel->serverid;
4630 window_rel->userid = input_rel->userid;
4631 window_rel->useridiscurrent = input_rel->useridiscurrent;
4632 window_rel->fdwroutine = input_rel->fdwroutine;
4633
4634 /*
4635 * Consider computing window functions starting from the existing
4636 * cheapest-total path (which will likely require a sort) as well as any
4637 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4638 */
4639 foreach(lc, input_rel->pathlist)
4640 {
4641 Path *path = (Path *) lfirst(lc);
4642 int presorted_keys;
4643
4644 if (path == input_rel->cheapest_total_path ||
4645 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4646 &presorted_keys) ||
4647 presorted_keys > 0)
4649 window_rel,
4650 path,
4653 wflists,
4654 activeWindows);
4655 }
4656
4657 /*
4658 * If there is an FDW that's responsible for all baserels of the query,
4659 * let it consider adding ForeignPaths.
4660 */
4661 if (window_rel->fdwroutine &&
4662 window_rel->fdwroutine->GetForeignUpperPaths)
4663 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4665 NULL);
4666
4667 /* Let extensions possibly add some more paths */
4669 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4671
4672 /* Now choose the best path(s) */
4674
4675 return window_rel;
4676}
@ UPPERREL_WINDOW
Definition pathnodes.h:148
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4690

References create_one_window_path(), create_upper_paths_hook, fb(), fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), root, set_cheapest(), and UPPERREL_WINDOW.

Referenced by grouping_planner().

◆ expression_planner()

Expr * expression_planner ( Expr expr)

Definition at line 6817 of file planner.c.

6818{
6819 Node *result;
6820
6821 /*
6822 * Convert named-argument function calls, insert default arguments and
6823 * simplify constant subexprs
6824 */
6825 result = eval_const_expressions(NULL, (Node *) expr);
6826
6827 /* Fill in opfuncid values if missing */
6828 fix_opfuncids(result);
6829
6830 return (Expr *) result;
6831}
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition clauses.c:2267
void fix_opfuncids(Node *node)
Definition nodeFuncs.c:1840

References eval_const_expressions(), fb(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), createTableConstraints(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr * expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6844 of file planner.c.

6847{
6848 Node *result;
6849 PlannerGlobal glob;
6851
6852 /* Make up dummy planner state so we can use setrefs machinery */
6853 MemSet(&glob, 0, sizeof(glob));
6854 glob.type = T_PlannerGlobal;
6855 glob.relationOids = NIL;
6856 glob.invalItems = NIL;
6857
6858 MemSet(&root, 0, sizeof(root));
6859 root.type = T_PlannerInfo;
6860 root.glob = &glob;
6861
6862 /*
6863 * Convert named-argument function calls, insert default arguments and
6864 * simplify constant subexprs. Collect identities of inlined functions
6865 * and elided domains, too.
6866 */
6867 result = eval_const_expressions(&root, (Node *) expr);
6868
6869 /* Fill in opfuncid values if missing */
6870 fix_opfuncids(result);
6871
6872 /*
6873 * Now walk the finished expression to find anything else we ought to
6874 * record as an expression dependency.
6875 */
6877
6878 *relationOids = glob.relationOids;
6879 *invalItems = glob.invalItems;
6880
6881 return (Expr *) result;
6882}
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition setrefs.c:3692
List * invalItems
Definition pathnodes.h:227
List * relationOids
Definition pathnodes.h:224

References eval_const_expressions(), extract_query_dependencies_walker(), fb(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 3000 of file planner.c.

3001{
3002 int num_sets_raw = list_length(groupingSets);
3003 int num_empty = 0;
3004 int num_sets = 0; /* distinct sets */
3005 int num_chains = 0;
3006 List *result = NIL;
3007 List **results;
3008 List **orig_sets;
3010 int *chains;
3011 short **adjacency;
3012 short *adjacency_buf;
3014 int i;
3015 int j;
3016 int j_size;
3017 ListCell *lc1 = list_head(groupingSets);
3018 ListCell *lc;
3019
3020 /*
3021 * Start by stripping out empty sets. The algorithm doesn't require this,
3022 * but the planner currently needs all empty sets to be returned in the
3023 * first list, so we strip them here and add them back after.
3024 */
3025 while (lc1 && lfirst(lc1) == NIL)
3026 {
3027 ++num_empty;
3028 lc1 = lnext(groupingSets, lc1);
3029 }
3030
3031 /* bail out now if it turns out that all we had were empty sets. */
3032 if (!lc1)
3033 return list_make1(groupingSets);
3034
3035 /*----------
3036 * We don't strictly need to remove duplicate sets here, but if we don't,
3037 * they tend to become scattered through the result, which is a bit
3038 * confusing (and irritating if we ever decide to optimize them out).
3039 * So we remove them here and add them back after.
3040 *
3041 * For each non-duplicate set, we fill in the following:
3042 *
3043 * orig_sets[i] = list of the original set lists
3044 * set_masks[i] = bitmapset for testing inclusion
3045 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3046 *
3047 * chains[i] will be the result group this set is assigned to.
3048 *
3049 * We index all of these from 1 rather than 0 because it is convenient
3050 * to leave 0 free for the NIL node in the graph algorithm.
3051 *----------
3052 */
3053 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3054 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3055 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3056 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3057
3058 j_size = 0;
3059 j = 0;
3060 i = 1;
3061
3062 for_each_cell(lc, groupingSets, lc1)
3063 {
3064 List *candidate = (List *) lfirst(lc);
3066 ListCell *lc2;
3067 int dup_of = 0;
3068
3069 foreach(lc2, candidate)
3070 {
3072 }
3073
3074 /* we can only be a dup if we're the same length as a previous set */
3076 {
3077 int k;
3078
3079 for (k = j; k < i; ++k)
3080 {
3082 {
3083 dup_of = k;
3084 break;
3085 }
3086 }
3087 }
3088 else if (j_size < list_length(candidate))
3089 {
3091 j = i;
3092 }
3093
3094 if (dup_of > 0)
3095 {
3098 }
3099 else
3100 {
3101 int k;
3102 int n_adj = 0;
3103
3106
3107 /* fill in adjacency list; no need to compare equal-size sets */
3108
3109 for (k = j - 1; k > 0; --k)
3110 {
3112 adjacency_buf[++n_adj] = k;
3113 }
3114
3115 if (n_adj > 0)
3116 {
3117 adjacency_buf[0] = n_adj;
3118 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3119 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3120 }
3121 else
3122 adjacency[i] = NULL;
3123
3124 ++i;
3125 }
3126 }
3127
3128 num_sets = i - 1;
3129
3130 /*
3131 * Apply the graph matching algorithm to do the work.
3132 */
3133 state = BipartiteMatch(num_sets, num_sets, adjacency);
3134
3135 /*
3136 * Now, the state->pair* fields have the info we need to assign sets to
3137 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3138 * pair_vu[v] = u (both will be true, but we check both so that we can do
3139 * it in one pass)
3140 */
3141 chains = palloc0((num_sets + 1) * sizeof(int));
3142
3143 for (i = 1; i <= num_sets; ++i)
3144 {
3145 int u = state->pair_vu[i];
3146 int v = state->pair_uv[i];
3147
3148 if (u > 0 && u < i)
3149 chains[i] = chains[u];
3150 else if (v > 0 && v < i)
3151 chains[i] = chains[v];
3152 else
3153 chains[i] = ++num_chains;
3154 }
3155
3156 /* build result lists. */
3157 results = palloc0((num_chains + 1) * sizeof(List *));
3158
3159 for (i = 1; i <= num_sets; ++i)
3160 {
3161 int c = chains[i];
3162
3163 Assert(c > 0);
3164
3165 results[c] = list_concat(results[c], orig_sets[i]);
3166 }
3167
3168 /* push any empty sets back on the first list. */
3169 while (num_empty-- > 0)
3170 results[1] = lcons(NIL, results[1]);
3171
3172 /* make result list */
3173 for (i = 1; i <= num_chains; ++i)
3174 result = lappend(result, results[i]);
3175
3176 /*
3177 * Free all the things.
3178 *
3179 * (This is over-fussy for small sets but for large sets we could have
3180 * tied up a nontrivial amount of memory.)
3181 */
3183 pfree(results);
3184 pfree(chains);
3185 for (i = 1; i <= num_sets; ++i)
3186 if (adjacency[i])
3187 pfree(adjacency[i]);
3188 pfree(adjacency);
3191 for (i = 1; i <= num_sets; ++i)
3194
3195 return result;
3196}
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
int j
Definition isn.c:78
void * palloc0(Size size)
Definition mcxt.c:1417
char * c

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), fb(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7824 of file planner.c.

7825{
7826 ListCell *lc;
7829
7830 /*
7831 * This occurs after any partial aggregation has taken place, so trim off
7832 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7833 */
7834 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7835 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7836 root->num_groupby_pathkeys);
7837 else
7838 groupby_pathkeys = root->group_pathkeys;
7839
7840 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7842
7844
7845 /* XXX Shouldn't this also consider the group-key-reordering? */
7846 foreach(lc, rel->partial_pathlist)
7847 {
7848 Path *path = (Path *) lfirst(lc);
7849 bool is_sorted;
7850 int presorted_keys;
7851 double total_groups;
7852
7854 path->pathkeys,
7855 &presorted_keys);
7856
7857 if (is_sorted)
7858 continue;
7859
7860 /*
7861 * Try at least sorting the cheapest path and also try incrementally
7862 * sorting any path which is partially sorted already (no need to deal
7863 * with paths which have presorted keys when incremental sort is
7864 * disabled unless it's the cheapest input path).
7865 */
7866 if (path != cheapest_partial_path &&
7867 (presorted_keys == 0 || !enable_incremental_sort))
7868 continue;
7869
7870 /*
7871 * We've no need to consider both a sort and incremental sort. We'll
7872 * just do a sort if there are no presorted keys and an incremental
7873 * sort when there are presorted keys.
7874 */
7875 if (presorted_keys == 0 || !enable_incremental_sort)
7876 path = (Path *) create_sort_path(root, rel, path,
7878 -1.0);
7879 else
7881 rel,
7882 path,
7884 presorted_keys,
7885 -1.0);
7887 path = (Path *)
7889 rel,
7890 path,
7891 rel->reltarget,
7893 NULL,
7894 &total_groups);
7895
7896 add_path(rel, path);
7897 }
7898}
List * list_copy_head(const List *oldlist, int len)
Definition list.c:1593

References add_path(), compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fb(), generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, and root.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8414 of file planner.c.

8415{
8416 List *grouplist = copyObject(op->groupClauses);
8417 ListCell *lg;
8418 ListCell *lt;
8419 ListCell *ct;
8420
8422 ct = list_head(op->colTypes);
8423 foreach(lt, targetlist)
8424 {
8425 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8427 Oid coltype;
8428
8429 /* resjunk columns could have sortgrouprefs. Leave these alone */
8430 if (tle->resjunk)
8431 continue;
8432
8433 /*
8434 * We expect every non-resjunk target to have a SortGroupClause and
8435 * colTypes.
8436 */
8437 Assert(lg != NULL);
8438 Assert(ct != NULL);
8440 coltype = lfirst_oid(ct);
8441
8442 /* reject if target type isn't the same as the setop target type */
8443 if (coltype != exprType((Node *) tle->expr))
8444 return NIL;
8445
8446 lg = lnext(grouplist, lg);
8447 ct = lnext(op->colTypes, ct);
8448
8449 /* assign a tleSortGroupRef, or reuse the existing one */
8450 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8451 }
8452
8453 Assert(lg == NULL);
8454 Assert(ct == NULL);
8455
8456 return grouplist;
8457}
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42

References Assert, assignSortGroupRef(), copyObject, exprType(), fb(), lfirst, lfirst_oid, list_head(), lnext(), and NIL.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path * get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6655 of file planner.c.

6656{
6658 ListCell *l;
6659
6660 /* If all tuples will be retrieved, just return the cheapest-total path */
6661 if (tuple_fraction <= 0.0)
6662 return best_path;
6663
6664 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6665 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6666 tuple_fraction /= best_path->rows;
6667
6668 foreach(l, rel->pathlist)
6669 {
6670 Path *path = (Path *) lfirst(l);
6671
6672 if (path->param_info)
6673 continue;
6674
6675 if (path == rel->cheapest_total_path ||
6676 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6677 continue;
6678
6679 best_path = path;
6680 }
6681
6682 return best_path;
6683}
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition pathnode.c:123

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), fb(), lfirst, and RelOptInfo::pathlist.

Referenced by add_paths_to_append_rel(), make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3734 of file planner.c.

3738{
3739 Query *parse = root->parse;
3740 double dNumGroups;
3741
3742 if (parse->groupClause)
3743 {
3745
3746 if (parse->groupingSets)
3747 {
3748 /* Add up the estimates for each grouping set */
3749 ListCell *lc;
3750
3751 Assert(gd); /* keep Coverity happy */
3752
3753 dNumGroups = 0;
3754
3755 foreach(lc, gd->rollups)
3756 {
3758 ListCell *lc2;
3759 ListCell *lc3;
3760
3762 target_list);
3763
3764 rollup->numGroups = 0.0;
3765
3766 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3767 {
3768 List *gset = (List *) lfirst(lc2);
3770 double numGroups = estimate_num_groups(root,
3771 groupExprs,
3772 path_rows,
3773 &gset,
3774 NULL);
3775
3776 gs->numGroups = numGroups;
3777 rollup->numGroups += numGroups;
3778 }
3779
3780 dNumGroups += rollup->numGroups;
3781 }
3782
3783 if (gd->hash_sets_idx)
3784 {
3785 ListCell *lc2;
3786
3787 gd->dNumHashGroups = 0;
3788
3790 target_list);
3791
3792 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3793 {
3794 List *gset = (List *) lfirst(lc);
3796 double numGroups = estimate_num_groups(root,
3797 groupExprs,
3798 path_rows,
3799 &gset,
3800 NULL);
3801
3802 gs->numGroups = numGroups;
3803 gd->dNumHashGroups += numGroups;
3804 }
3805
3806 dNumGroups += gd->dNumHashGroups;
3807 }
3808 }
3809 else
3810 {
3811 /* Plain GROUP BY -- estimate based on optimized groupClause */
3812 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3813 target_list);
3814
3816 NULL, NULL);
3817 }
3818 }
3819 else if (parse->groupingSets)
3820 {
3821 /* Empty grouping sets ... one result row for each one */
3822 dNumGroups = list_length(parse->groupingSets);
3823 }
3824 else if (parse->hasAggs || root->hasHavingQual)
3825 {
3826 /* Plain aggregation, one result row */
3827 dNumGroups = 1;
3828 }
3829 else
3830 {
3831 /* Not grouping */
3832 dNumGroups = 1;
3833 }
3834
3835 return dNumGroups;
3836}

References Assert, estimate_num_groups(), fb(), forboth, get_sortgrouplist_exprs(), lfirst, lfirst_node, list_length(), parse(), and root.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ get_useful_pathkeys_for_distinct()

static List * get_useful_pathkeys_for_distinct ( PlannerInfo root,
List needed_pathkeys,
List path_pathkeys 
)
static

Definition at line 5293 of file planner.c.

5295{
5298
5299 /* always include the given 'needed_pathkeys' */
5302
5304 return useful_pathkeys_list;
5305
5306 /*
5307 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5308 * that match 'needed_pathkeys', but only up to the longest matching
5309 * prefix.
5310 *
5311 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5312 * list matches initial distinctClause pathkeys; otherwise, it won't have
5313 * the desired behavior.
5314 */
5316 {
5317 /*
5318 * The PathKey nodes are canonical, so they can be checked for
5319 * equality by simple pointer comparison.
5320 */
5322 break;
5323 if (root->parse->hasDistinctOn &&
5324 !list_member_ptr(root->distinct_pathkeys, pathkey))
5325 break;
5326
5328 }
5329
5330 /* If no match at all, no point in reordering needed_pathkeys */
5331 if (useful_pathkeys == NIL)
5332 return useful_pathkeys_list;
5333
5334 /*
5335 * If not full match, the resulting pathkey list is not useful without
5336 * incremental sort.
5337 */
5340 return useful_pathkeys_list;
5341
5342 /* Append the remaining PathKey nodes in needed_pathkeys */
5345
5346 /*
5347 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5348 * just drop it.
5349 */
5352 return useful_pathkeys_list;
5353
5356
5357 return useful_pathkeys_list;
5358}
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition list.c:1427
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
bool enable_distinct_reordering
Definition planner.c:71

References compare_pathkeys(), enable_distinct_reordering, enable_incremental_sort, fb(), foreach_node, lappend(), list_concat_unique_ptr(), list_length(), list_member_ptr(), NIL, PATHKEYS_EQUAL, and root.

Referenced by create_final_distinct_paths(), and create_partial_distinct_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8327 of file planner.c.

8330{
8331 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8332 int cnt = 0;
8333 int partnatts;
8334
8335 /* Input relation should be partitioned. */
8336 Assert(input_rel->part_scheme);
8337
8338 /* Rule out early, if there are no partition keys present. */
8339 if (!input_rel->partexprs)
8340 return false;
8341
8342 partnatts = input_rel->part_scheme->partnatts;
8343
8344 for (cnt = 0; cnt < partnatts; cnt++)
8345 {
8346 List *partexprs = input_rel->partexprs[cnt];
8347 ListCell *lc;
8348 bool found = false;
8349
8350 foreach(lc, partexprs)
8351 {
8352 ListCell *lg;
8353 Expr *partexpr = lfirst(lc);
8354 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8355
8356 foreach(lg, groupexprs)
8357 {
8358 Expr *groupexpr = lfirst(lg);
8360
8361 /*
8362 * Note: we can assume there is at most one RelabelType node;
8363 * eval_const_expressions() will have simplified if more than
8364 * one.
8365 */
8367 groupexpr = ((RelabelType *) groupexpr)->arg;
8368
8369 if (equal(groupexpr, partexpr))
8370 {
8371 /*
8372 * Reject a match if the grouping collation does not match
8373 * the partitioning collation.
8374 */
8377 return false;
8378
8379 found = true;
8380 break;
8381 }
8382 }
8383
8384 if (found)
8385 break;
8386 }
8387
8388 /*
8389 * If none of the partition key expressions match with any of the
8390 * GROUP BY expression, return false.
8391 */
8392 if (!found)
8393 return false;
8394 }
8395
8396 return true;
8397}
Oid exprCollation(const Node *expr)
Definition nodeFuncs.c:821

References Assert, equal(), exprCollation(), fb(), get_sortgrouplist_exprs(), IsA, lfirst, and OidIsValid.

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1511 of file planner.c.

1513{
1514 Query *parse = root->parse;
1515 int64 offset_est = 0;
1516 int64 count_est = 0;
1517 double limit_tuples = -1.0;
1518 bool have_postponed_srfs = false;
1525 FinalPathExtraData extra;
1526 ListCell *lc;
1527
1528 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1529 if (parse->limitCount || parse->limitOffset)
1530 {
1531 tuple_fraction = preprocess_limit(root, tuple_fraction,
1532 &offset_est, &count_est);
1533
1534 /*
1535 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1536 * estimate the effects of using a bounded sort.
1537 */
1538 if (count_est > 0 && offset_est >= 0)
1539 limit_tuples = (double) count_est + (double) offset_est;
1540 }
1541
1542 /* Make tuple_fraction accessible to lower-level routines */
1543 root->tuple_fraction = tuple_fraction;
1544
1545 if (parse->setOperations)
1546 {
1547 /*
1548 * Construct Paths for set operations. The results will not need any
1549 * work except perhaps a top-level sort and/or LIMIT. Note that any
1550 * special work for recursive unions is the responsibility of
1551 * plan_set_operations.
1552 */
1554
1555 /*
1556 * We should not need to call preprocess_targetlist, since we must be
1557 * in a SELECT query node. Instead, use the processed_tlist returned
1558 * by plan_set_operations (since this tells whether it returned any
1559 * resjunk columns!), and transfer any sort key information from the
1560 * original tlist.
1561 */
1562 Assert(parse->commandType == CMD_SELECT);
1563
1564 /* for safety, copy processed_tlist instead of modifying in-place */
1565 root->processed_tlist =
1566 postprocess_setop_tlist(copyObject(root->processed_tlist),
1567 parse->targetList);
1568
1569 /* Also extract the PathTarget form of the setop result tlist */
1570 final_target = current_rel->cheapest_total_path->pathtarget;
1571
1572 /* And check whether it's parallel safe */
1575
1576 /* The setop result tlist couldn't contain any SRFs */
1577 Assert(!parse->hasTargetSRFs);
1579
1580 /*
1581 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1582 * checked already, but let's make sure).
1583 */
1584 if (parse->rowMarks)
1585 ereport(ERROR,
1587 /*------
1588 translator: %s is a SQL row locking clause such as FOR UPDATE */
1589 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1591 parse->rowMarks)->strength))));
1592
1593 /*
1594 * Calculate pathkeys that represent result ordering requirements
1595 */
1596 Assert(parse->distinctClause == NIL);
1597 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1598 parse->sortClause,
1599 root->processed_tlist);
1600 }
1601 else
1602 {
1603 /* No set operations, do regular planning */
1617 bool have_grouping;
1619 List *activeWindows = NIL;
1620 grouping_sets_data *gset_data = NULL;
1622
1623 /* A recursive query should always have setOperations */
1624 Assert(!root->hasRecursion);
1625
1626 /* Preprocess grouping sets and GROUP BY clause, if any */
1627 if (parse->groupingSets)
1628 {
1629 gset_data = preprocess_grouping_sets(root);
1630 }
1631 else if (parse->groupClause)
1632 {
1633 /* Preprocess regular GROUP BY clause, if any */
1634 root->processed_groupClause = preprocess_groupclause(root, NIL);
1635 }
1636
1637 /*
1638 * Preprocess targetlist. Note that much of the remaining planning
1639 * work will be done with the PathTarget representation of tlists, but
1640 * we must also maintain the full representation of the final tlist so
1641 * that we can transfer its decoration (resnames etc) to the topmost
1642 * tlist of the finished Plan. This is kept in processed_tlist.
1643 */
1645
1646 /*
1647 * Mark all the aggregates with resolved aggtranstypes, and detect
1648 * aggregates that are duplicates or can share transition state. We
1649 * must do this before slicing and dicing the tlist into various
1650 * pathtargets, else some copies of the Aggref nodes might escape
1651 * being marked.
1652 */
1653 if (parse->hasAggs)
1654 {
1655 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1656 preprocess_aggrefs(root, (Node *) parse->havingQual);
1657 }
1658
1659 /*
1660 * Locate any window functions in the tlist. (We don't need to look
1661 * anywhere else, since expressions used in ORDER BY will be in there
1662 * too.) Note that they could all have been eliminated by constant
1663 * folding, in which case we don't need to do any more work.
1664 */
1665 if (parse->hasWindowFuncs)
1666 {
1667 wflists = find_window_functions((Node *) root->processed_tlist,
1668 list_length(parse->windowClause));
1669 if (wflists->numWindowFuncs > 0)
1670 {
1671 /*
1672 * See if any modifications can be made to each WindowClause
1673 * to allow the executor to execute the WindowFuncs more
1674 * quickly.
1675 */
1677
1678 /* Extract the list of windows actually in use. */
1679 activeWindows = select_active_windows(root, wflists);
1680
1681 /* Make sure they all have names, for EXPLAIN's use. */
1682 name_active_windows(activeWindows);
1683 }
1684 else
1685 parse->hasWindowFuncs = false;
1686 }
1687
1688 /*
1689 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1690 * adding logic between here and the query_planner() call. Anything
1691 * that is needed in MIN/MAX-optimizable cases will have to be
1692 * duplicated in planagg.c.
1693 */
1694 if (parse->hasAggs)
1696
1697 /*
1698 * Figure out whether there's a hard limit on the number of rows that
1699 * query_planner's result subplan needs to return. Even if we know a
1700 * hard limit overall, it doesn't apply if the query has any
1701 * grouping/aggregation operations, or SRFs in the tlist.
1702 */
1703 if (parse->groupClause ||
1704 parse->groupingSets ||
1705 parse->distinctClause ||
1706 parse->hasAggs ||
1707 parse->hasWindowFuncs ||
1708 parse->hasTargetSRFs ||
1709 root->hasHavingQual)
1710 root->limit_tuples = -1.0;
1711 else
1712 root->limit_tuples = limit_tuples;
1713
1714 /* Set up data needed by standard_qp_callback */
1715 qp_extra.activeWindows = activeWindows;
1716 qp_extra.gset_data = gset_data;
1717
1718 /*
1719 * If we're a subquery for a set operation, store the SetOperationStmt
1720 * in qp_extra.
1721 */
1722 qp_extra.setop = setops;
1723
1724 /*
1725 * Generate the best unsorted and presorted paths for the scan/join
1726 * portion of this Query, ie the processing represented by the
1727 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1728 * We also generate (in standard_qp_callback) pathkey representations
1729 * of the query's sort clause, distinct clause, etc.
1730 */
1732
1733 /*
1734 * Convert the query's result tlist into PathTarget format.
1735 *
1736 * Note: this cannot be done before query_planner() has performed
1737 * appendrel expansion, because that might add resjunk entries to
1738 * root->processed_tlist. Waiting till afterwards is also helpful
1739 * because the target width estimates can use per-Var width numbers
1740 * that were obtained within query_planner().
1741 */
1742 final_target = create_pathtarget(root, root->processed_tlist);
1745
1746 /*
1747 * If ORDER BY was given, consider whether we should use a post-sort
1748 * projection, and compute the adjusted target for preceding steps if
1749 * so.
1750 */
1751 if (parse->sortClause)
1752 {
1758 }
1759 else
1760 {
1763 }
1764
1765 /*
1766 * If we have window functions to deal with, the output from any
1767 * grouping step needs to be what the window functions want;
1768 * otherwise, it should be sort_input_target.
1769 */
1770 if (activeWindows)
1771 {
1774 activeWindows);
1777 }
1778 else
1779 {
1782 }
1783
1784 /*
1785 * If we have grouping or aggregation to do, the topmost scan/join
1786 * plan node must emit what the grouping step wants; otherwise, it
1787 * should emit grouping_target.
1788 */
1789 have_grouping = (parse->groupClause || parse->groupingSets ||
1790 parse->hasAggs || root->hasHavingQual);
1791 if (have_grouping)
1792 {
1796 }
1797 else
1798 {
1801 }
1802
1803 /*
1804 * If there are any SRFs in the targetlist, we must separate each of
1805 * these PathTargets into SRF-computing and SRF-free targets. Replace
1806 * each of the named targets with a SRF-free version, and remember the
1807 * list of additional projection steps we need to add afterwards.
1808 */
1809 if (parse->hasTargetSRFs)
1810 {
1811 /* final_target doesn't recompute any SRFs in sort_input_target */
1817 /* likewise for sort_input_target vs. grouping_target */
1823 /* likewise for grouping_target vs. scanjoin_target */
1830 /* scanjoin_target will not have any SRFs precomputed for it */
1836 }
1837 else
1838 {
1839 /* initialize lists; for most of these, dummy values are OK */
1845 }
1846
1847 /* Apply scan/join target. */
1849 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1854
1855 /*
1856 * Save the various upper-rel PathTargets we just computed into
1857 * root->upper_targets[]. The core code doesn't use this, but it
1858 * provides a convenient place for extensions to get at the info. For
1859 * consistency, we save all the intermediate targets, even though some
1860 * of the corresponding upperrels might not be needed for this query.
1861 */
1862 root->upper_targets[UPPERREL_FINAL] = final_target;
1863 root->upper_targets[UPPERREL_ORDERED] = final_target;
1864 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1866 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1867 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1868
1869 /*
1870 * If we have grouping and/or aggregation, consider ways to implement
1871 * that. We build a new upperrel representing the output of this
1872 * phase.
1873 */
1874 if (have_grouping)
1875 {
1880 gset_data);
1881 /* Fix things up if grouping_target contains SRFs */
1882 if (parse->hasTargetSRFs)
1886 }
1887
1888 /*
1889 * If we have window functions, consider ways to implement those. We
1890 * build a new upperrel representing the output of this phase.
1891 */
1892 if (activeWindows)
1893 {
1899 wflists,
1900 activeWindows);
1901 /* Fix things up if sort_input_target contains SRFs */
1902 if (parse->hasTargetSRFs)
1906 }
1907
1908 /*
1909 * If there is a DISTINCT clause, consider ways to implement that. We
1910 * build a new upperrel representing the output of this phase.
1911 */
1912 if (parse->distinctClause)
1913 {
1917 }
1918 } /* end of if (setOperations) */
1919
1920 /*
1921 * If ORDER BY was given, consider ways to implement that, and generate a
1922 * new upperrel containing only paths that emit the correct ordering and
1923 * project the correct final_target. We can apply the original
1924 * limit_tuples limit in sort costing here, but only if there are no
1925 * postponed SRFs.
1926 */
1927 if (parse->sortClause)
1928 {
1933 have_postponed_srfs ? -1.0 :
1934 limit_tuples);
1935 /* Fix things up if final_target contains SRFs */
1936 if (parse->hasTargetSRFs)
1940 }
1941
1942 /*
1943 * Now we are prepared to build the final-output upperrel.
1944 */
1946
1947 /*
1948 * If the input rel is marked consider_parallel and there's nothing that's
1949 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1950 * consider_parallel as well. Note that if the query has rowMarks or is
1951 * not a SELECT, consider_parallel will be false for every relation in the
1952 * query.
1953 */
1954 if (current_rel->consider_parallel &&
1955 is_parallel_safe(root, parse->limitOffset) &&
1956 is_parallel_safe(root, parse->limitCount))
1957 final_rel->consider_parallel = true;
1958
1959 /*
1960 * If the current_rel belongs to a single FDW, so does the final_rel.
1961 */
1962 final_rel->serverid = current_rel->serverid;
1963 final_rel->userid = current_rel->userid;
1964 final_rel->useridiscurrent = current_rel->useridiscurrent;
1965 final_rel->fdwroutine = current_rel->fdwroutine;
1966
1967 /*
1968 * Generate paths for the final_rel. Insert all surviving paths, with
1969 * LockRows, Limit, and/or ModifyTable steps added if needed.
1970 */
1971 foreach(lc, current_rel->pathlist)
1972 {
1973 Path *path = (Path *) lfirst(lc);
1974
1975 /*
1976 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1977 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1978 * here. If there are only non-locking rowmarks, they should be
1979 * handled by the ModifyTable node instead. However, root->rowMarks
1980 * is what goes into the LockRows node.)
1981 */
1982 if (parse->rowMarks)
1983 {
1984 path = (Path *) create_lockrows_path(root, final_rel, path,
1985 root->rowMarks,
1987 }
1988
1989 /*
1990 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1991 */
1992 if (limit_needed(parse))
1993 {
1994 path = (Path *) create_limit_path(root, final_rel, path,
1995 parse->limitOffset,
1996 parse->limitCount,
1997 parse->limitOption,
1998 offset_est, count_est);
1999 }
2000
2001 /*
2002 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2003 */
2004 if (parse->commandType != CMD_SELECT)
2005 {
2006 Index rootRelation;
2007 List *resultRelations = NIL;
2008 List *updateColnosLists = NIL;
2009 List *withCheckOptionLists = NIL;
2010 List *returningLists = NIL;
2011 List *mergeActionLists = NIL;
2012 List *mergeJoinConditions = NIL;
2013 List *rowMarks;
2014
2015 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2016 {
2017 /* Inherited UPDATE/DELETE/MERGE */
2019 parse->resultRelation);
2020 int resultRelation = -1;
2021
2022 /* Pass the root result rel forward to the executor. */
2023 rootRelation = parse->resultRelation;
2024
2025 /* Add only leaf children to ModifyTable. */
2026 while ((resultRelation = bms_next_member(root->leaf_result_relids,
2027 resultRelation)) >= 0)
2028 {
2030 resultRelation);
2031
2032 /*
2033 * Also exclude any leaf rels that have turned dummy since
2034 * being added to the list, for example, by being excluded
2035 * by constraint exclusion.
2036 */
2038 continue;
2039
2040 /* Build per-target-rel lists needed by ModifyTable */
2041 resultRelations = lappend_int(resultRelations,
2042 resultRelation);
2043 if (parse->commandType == CMD_UPDATE)
2044 {
2045 List *update_colnos = root->update_colnos;
2046
2048 update_colnos =
2050 update_colnos,
2051 this_result_rel->relid,
2052 top_result_rel->relid);
2053 updateColnosLists = lappend(updateColnosLists,
2054 update_colnos);
2055 }
2056 if (parse->withCheckOptions)
2057 {
2058 List *withCheckOptions = parse->withCheckOptions;
2059
2066 withCheckOptionLists = lappend(withCheckOptionLists,
2068 }
2069 if (parse->returningList)
2070 {
2071 List *returningList = parse->returningList;
2072
2074 returningList = (List *)
2076 (Node *) returningList,
2079 returningLists = lappend(returningLists,
2080 returningList);
2081 }
2082 if (parse->mergeActionList)
2083 {
2084 ListCell *l;
2085 List *mergeActionList = NIL;
2086
2087 /*
2088 * Copy MergeActions and translate stuff that
2089 * references attribute numbers.
2090 */
2091 foreach(l, parse->mergeActionList)
2092 {
2094 *leaf_action = copyObject(action);
2095
2096 leaf_action->qual =
2098 (Node *) action->qual,
2101 leaf_action->targetList = (List *)
2103 (Node *) action->targetList,
2106 if (leaf_action->commandType == CMD_UPDATE)
2107 leaf_action->updateColnos =
2109 action->updateColnos,
2110 this_result_rel->relid,
2111 top_result_rel->relid);
2112 mergeActionList = lappend(mergeActionList,
2113 leaf_action);
2114 }
2115
2116 mergeActionLists = lappend(mergeActionLists,
2117 mergeActionList);
2118 }
2119 if (parse->commandType == CMD_MERGE)
2120 {
2121 Node *mergeJoinCondition = parse->mergeJoinCondition;
2122
2124 mergeJoinCondition =
2126 mergeJoinCondition,
2129 mergeJoinConditions = lappend(mergeJoinConditions,
2130 mergeJoinCondition);
2131 }
2132 }
2133
2134 if (resultRelations == NIL)
2135 {
2136 /*
2137 * We managed to exclude every child rel, so generate a
2138 * dummy one-relation plan using info for the top target
2139 * rel (even though that may not be a leaf target).
2140 * Although it's clear that no data will be updated or
2141 * deleted, we still need to have a ModifyTable node so
2142 * that any statement triggers will be executed. (This
2143 * could be cleaner if we fixed nodeModifyTable.c to allow
2144 * zero target relations, but that probably wouldn't be a
2145 * net win.)
2146 */
2147 resultRelations = list_make1_int(parse->resultRelation);
2148 if (parse->commandType == CMD_UPDATE)
2149 updateColnosLists = list_make1(root->update_colnos);
2150 if (parse->withCheckOptions)
2151 withCheckOptionLists = list_make1(parse->withCheckOptions);
2152 if (parse->returningList)
2153 returningLists = list_make1(parse->returningList);
2154 if (parse->mergeActionList)
2155 mergeActionLists = list_make1(parse->mergeActionList);
2156 if (parse->commandType == CMD_MERGE)
2157 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2158 }
2159 }
2160 else
2161 {
2162 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2163 rootRelation = 0; /* there's no separate root rel */
2164 resultRelations = list_make1_int(parse->resultRelation);
2165 if (parse->commandType == CMD_UPDATE)
2166 updateColnosLists = list_make1(root->update_colnos);
2167 if (parse->withCheckOptions)
2168 withCheckOptionLists = list_make1(parse->withCheckOptions);
2169 if (parse->returningList)
2170 returningLists = list_make1(parse->returningList);
2171 if (parse->mergeActionList)
2172 mergeActionLists = list_make1(parse->mergeActionList);
2173 if (parse->commandType == CMD_MERGE)
2174 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2175 }
2176
2177 /*
2178 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2179 * will have dealt with fetching non-locked marked rows, else we
2180 * need to have ModifyTable do that.
2181 */
2182 if (parse->rowMarks)
2183 rowMarks = NIL;
2184 else
2185 rowMarks = root->rowMarks;
2186
2187 path = (Path *)
2189 path,
2190 parse->commandType,
2191 parse->canSetTag,
2192 parse->resultRelation,
2193 rootRelation,
2194 resultRelations,
2195 updateColnosLists,
2196 withCheckOptionLists,
2197 returningLists,
2198 rowMarks,
2199 parse->onConflict,
2200 mergeActionLists,
2201 mergeJoinConditions,
2203 }
2204
2205 /* And shove it into final_rel */
2206 add_path(final_rel, path);
2207 }
2208
2209 /*
2210 * Generate partial paths for final_rel, too, if outer query levels might
2211 * be able to make use of them.
2212 */
2213 if (final_rel->consider_parallel && root->query_level > 1 &&
2214 !limit_needed(parse))
2215 {
2216 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2217 foreach(lc, current_rel->partial_pathlist)
2218 {
2219 Path *partial_path = (Path *) lfirst(lc);
2220
2222 }
2223 }
2224
2225 extra.limit_needed = limit_needed(parse);
2226 extra.limit_tuples = limit_tuples;
2227 extra.count_est = count_est;
2228 extra.offset_est = offset_est;
2229
2230 /*
2231 * If there is an FDW that's responsible for all baserels of the query,
2232 * let it consider adding ForeignPaths.
2233 */
2234 if (final_rel->fdwroutine &&
2235 final_rel->fdwroutine->GetForeignUpperPaths)
2236 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2238 &extra);
2239
2240 /* Let extensions possibly add some more paths */
2242 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2243 current_rel, final_rel, &extra);
2244
2245 /* Note: currently, we leave it to callers to do set_cheapest() */
2246}
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition appendinfo.c:733
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:780
@ BMS_MULTIPLE
Definition bitmapset.h:73
unsigned int Index
Definition c.h:628
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition clauses.c:240
List * lappend_int(List *list, int datum)
Definition list.c:357
@ CMD_MERGE
Definition nodes.h:279
@ CMD_UPDATE
Definition nodes.h:276
@ CMD_SELECT
Definition nodes.h:275
int assign_special_exec_param(PlannerInfo *root)
const char * LCS_asString(LockClauseStrength strength)
Definition analyze.c:3324
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition pathnode.c:3636
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition pathnode.c:3574
@ UPPERREL_FINAL
Definition pathnodes.h:152
#define list_make1_int(x1)
Definition pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition planagg.c:74
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition planner.c:5851
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition planner.c:2653
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition planner.c:6231
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition planner.c:4860
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:5888
static void name_active_windows(List *activeWindows)
Definition planner.c:6111
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition planner.c:6479
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition planner.c:2258
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition planner.c:5601
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:6028
bool limit_needed(Query *parse)
Definition planner.c:2838
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition planner.c:5378
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition planner.c:3856
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition planner.c:3529
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4603
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition preptlist.c:64
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition prepunion.c:97
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:533
Cardinality limit_tuples
Definition pathnodes.h:3662
void split_pathtarget_at_srfs_grouping(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:868
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:845

References add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), CMD_MERGE, CMD_SELECT, CMD_UPDATE, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, fb(), fetch_upper_rel(), find_base_rel(), find_window_functions(), IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), name_active_windows(), NIL, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), root, select_active_windows(), split_pathtarget_at_srfs(), split_pathtarget_at_srfs_grouping(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, and UPPERREL_WINDOW.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3260 of file planner.c.

3261{
3262 ListCell *lc;
3263
3264 foreach(lc, keys)
3265 {
3267
3268 if (pathkey->pk_eclass->ec_has_volatile)
3269 return true;
3270 }
3271
3272 return false;
3273}

References fb(), and lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 4025 of file planner.c.

4026{
4027 Query *parse = root->parse;
4028
4029 return (root->hasHavingQual || parse->groupingSets) &&
4030 !parse->hasAggs && parse->groupClause == NIL;
4031}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2838 of file planner.c.

2839{
2840 Node *node;
2841
2842 node = parse->limitCount;
2843 if (node)
2844 {
2845 if (IsA(node, Const))
2846 {
2847 /* NULL indicates LIMIT ALL, ie, no limit */
2848 if (!((Const *) node)->constisnull)
2849 return true; /* LIMIT with a constant value */
2850 }
2851 else
2852 return true; /* non-constant LIMIT */
2853 }
2854
2855 node = parse->limitOffset;
2856 if (node)
2857 {
2858 if (IsA(node, Const))
2859 {
2860 /* Treat NULL as no offset; the executor would too */
2861 if (!((Const *) node)->constisnull)
2862 {
2863 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2864
2865 if (offset != 0)
2866 return true; /* OFFSET with a nonzero value */
2867 }
2868 }
2869 else
2870 return true; /* non-constant OFFSET */
2871 }
2872
2873 return false; /* don't need a Limit plan node */
2874}
static int64 DatumGetInt64(Datum X)
Definition postgres.h:413

References DatumGetInt64(), fb(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5601 of file planner.c.

5602{
5603 Query *parse = root->parse;
5607 int i;
5608 ListCell *lc;
5609
5610 /*
5611 * We must build a target containing all grouping columns, plus any other
5612 * Vars mentioned in the query's targetlist and HAVING qual.
5613 */
5616
5617 i = 0;
5618 foreach(lc, final_target->exprs)
5619 {
5620 Expr *expr = (Expr *) lfirst(lc);
5622
5623 if (sgref && root->processed_groupClause &&
5625 root->processed_groupClause) != NULL)
5626 {
5627 /*
5628 * It's a grouping column, so add it to the input target as-is.
5629 *
5630 * Note that the target is logically below the grouping step. So
5631 * with grouping sets we need to remove the RT index of the
5632 * grouping step if there is any from the target expression.
5633 */
5634 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5635 {
5636 Assert(root->group_rtindex > 0);
5637 expr = (Expr *)
5638 remove_nulling_relids((Node *) expr,
5639 bms_make_singleton(root->group_rtindex),
5640 NULL);
5641 }
5643 }
5644 else
5645 {
5646 /*
5647 * Non-grouping column, so just remember the expression for later
5648 * call to pull_var_clause.
5649 */
5651 }
5652
5653 i++;
5654 }
5655
5656 /*
5657 * If there's a HAVING clause, we'll need the Vars it uses, too.
5658 */
5659 if (parse->havingQual)
5661
5662 /*
5663 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5664 * add them to the input target if not already present. (A Var used
5665 * directly as a GROUP BY item will be present already.) Note this
5666 * includes Vars used in resjunk items, so we are covering the needs of
5667 * ORDER BY and window specifications. Vars used within Aggrefs and
5668 * WindowFuncs will be pulled out here, too.
5669 *
5670 * Note that the target is logically below the grouping step. So with
5671 * grouping sets we need to remove the RT index of the grouping step if
5672 * there is any from the non-group Vars.
5673 */
5678 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5679 {
5680 Assert(root->group_rtindex > 0);
5681 non_group_vars = (List *)
5683 bms_make_singleton(root->group_rtindex),
5684 NULL);
5685 }
5687
5688 /* clean up cruft */
5691
5692 /* XXX this causes some redundant cost calculation ... */
5694}
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition costsize.c:6514
void list_free(List *list)
Definition list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition optimizer.h:189
#define PVC_RECURSE_WINDOWFUNCS
Definition optimizer.h:191
#define PVC_INCLUDE_PLACEHOLDERS
Definition optimizer.h:192
#define get_pathtarget_sortgroupref(target, colno)
Definition pathnodes.h:1874
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition tlist.c:452
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition tlist.c:761
PathTarget * create_empty_pathtarget(void)
Definition tlist.c:690
List * pull_var_clause(Node *node, int flags)
Definition var.c:653

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_make_singleton(), create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, remove_nulling_relids(), root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3969 of file planner.c.

3972{
3973 RelOptInfo *grouped_rel;
3974
3976 {
3978 input_rel->relids);
3979 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3980 }
3981 else
3982 {
3983 /*
3984 * By tradition, the relids set for the main grouping relation is
3985 * NULL. (This could be changed, but might require adjustments
3986 * elsewhere.)
3987 */
3989 }
3990
3991 /* Set target. */
3992 grouped_rel->reltarget = target;
3993
3994 /*
3995 * If the input relation is not parallel-safe, then the grouped relation
3996 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3997 * target list and HAVING quals are parallel-safe.
3998 */
3999 if (input_rel->consider_parallel && target_parallel_safe &&
4000 is_parallel_safe(root, havingQual))
4001 grouped_rel->consider_parallel = true;
4002
4003 /* Assume that the same path generation strategies are allowed */
4004 grouped_rel->pgs_mask = input_rel->pgs_mask;
4005
4006 /*
4007 * If the input rel belongs to a single FDW, so does the grouped rel.
4008 */
4009 grouped_rel->serverid = input_rel->serverid;
4010 grouped_rel->userid = input_rel->userid;
4011 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4012 grouped_rel->fdwroutine = input_rel->fdwroutine;
4013
4014 return grouped_rel;
4015}
@ RELOPT_OTHER_UPPER_REL
Definition pathnodes.h:964

References RelOptInfo::consider_parallel, fb(), fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::pgs_mask, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path * make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys,
double  limit_tuples 
)
static

Definition at line 7765 of file planner.c.

7767{
7768 bool is_sorted;
7769 int presorted_keys;
7770
7772 path->pathkeys,
7773 &presorted_keys);
7774
7775 if (!is_sorted)
7776 {
7777 /*
7778 * Try at least sorting the cheapest path and also try incrementally
7779 * sorting any path which is partially sorted already (no need to deal
7780 * with paths which have presorted keys when incremental sort is
7781 * disabled unless it's the cheapest input path).
7782 */
7783 if (path != cheapest_path &&
7784 (presorted_keys == 0 || !enable_incremental_sort))
7785 return NULL;
7786
7787 /*
7788 * We've no need to consider both a sort and incremental sort. We'll
7789 * just do a sort if there are no presorted keys and an incremental
7790 * sort when there are presorted keys.
7791 */
7792 if (presorted_keys == 0 || !enable_incremental_sort)
7793 path = (Path *) create_sort_path(root,
7794 rel,
7795 path,
7796 pathkeys,
7797 limit_tuples);
7798 else
7800 rel,
7801 path,
7802 pathkeys,
7803 presorted_keys,
7804 limit_tuples);
7805 }
7806
7807 return path;
7808}

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fb(), Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5713 of file planner.c.

5716{
5720 int i;
5721 ListCell *lc;
5722
5725
5726 i = 0;
5727 foreach(lc, grouping_target->exprs)
5728 {
5729 Expr *expr = (Expr *) lfirst(lc);
5731
5732 if (sgref && root->processed_groupClause &&
5734 root->processed_groupClause) != NULL)
5735 {
5736 /*
5737 * It's a grouping column, so add it to the partial_target as-is.
5738 * (This allows the upper agg step to repeat the grouping calcs.)
5739 */
5741 }
5742 else
5743 {
5744 /*
5745 * Non-grouping column, so just remember the expression for later
5746 * call to pull_var_clause.
5747 */
5749 }
5750
5751 i++;
5752 }
5753
5754 /*
5755 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5756 */
5757 if (havingQual)
5758 non_group_cols = lappend(non_group_cols, havingQual);
5759
5760 /*
5761 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5762 * non-group cols (plus HAVING), and add them to the partial_target if not
5763 * already present. (An expression used directly as a GROUP BY item will
5764 * be present already.) Note this includes Vars used in resjunk items, so
5765 * we are covering the needs of ORDER BY and window specifications.
5766 */
5771
5773
5774 /*
5775 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5776 * are at the top level of the target list, so we can just scan the list
5777 * rather than recursing through the expression trees.
5778 */
5779 foreach(lc, partial_target->exprs)
5780 {
5781 Aggref *aggref = (Aggref *) lfirst(lc);
5782
5783 if (IsA(aggref, Aggref))
5784 {
5786
5787 /*
5788 * We shouldn't need to copy the substructure of the Aggref node,
5789 * but flat-copy the node itself to avoid damaging other trees.
5790 */
5792 memcpy(newaggref, aggref, sizeof(Aggref));
5793
5794 /* For now, assume serialization is required */
5796
5797 lfirst(lc) = newaggref;
5798 }
5799 }
5800
5801 /* clean up cruft */
5804
5805 /* XXX this causes some redundant cost calculation ... */
5807}
#define PVC_INCLUDE_AGGREGATES
Definition optimizer.h:188
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition planner.c:5816

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6351 of file planner.c.

6353{
6354 List *window_pathkeys = NIL;
6355
6356 /* Throw error if can't sort */
6358 ereport(ERROR,
6360 errmsg("could not implement window PARTITION BY"),
6361 errdetail("Window partitioning columns must be of sortable datatypes.")));
6363 ereport(ERROR,
6365 errmsg("could not implement window ORDER BY"),
6366 errdetail("Window ordering columns must be of sortable datatypes.")));
6367
6368 /*
6369 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6370 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6371 */
6372 if (wc->partitionClause != NIL)
6373 {
6374 bool sortable;
6375
6377 &wc->partitionClause,
6378 tlist,
6379 true,
6380 false,
6381 &sortable,
6382 false);
6383
6385 }
6386
6387 /*
6388 * In principle, we could also consider removing redundant ORDER BY items
6389 * too as doing so does not alter the result of peer row checks done by
6390 * the executor. However, we must *not* remove the ordering column for
6391 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6392 * if it's known to be equal to some partitioning column.
6393 */
6394 if (wc->orderClause != NIL)
6395 {
6397
6399 wc->orderClause,
6400 tlist);
6401
6402 /* Okay, make the combined pathkeys */
6403 if (window_pathkeys != NIL)
6404 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6405 else
6406 window_pathkeys = orderby_pathkeys;
6407 }
6408
6409 return window_pathkeys;
6410}
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition pathkeys.c:1381
List * partitionClause
List * orderClause

References append_pathkeys(), Assert, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), grouping_is_sortable(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), NIL, WindowClause::orderClause, WindowClause::partitionClause, and root.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 6479 of file planner.c.

6482{
6483 Query *parse = root->parse;
6485 int ncols;
6486 bool *col_is_srf;
6487 bool *postpone_col;
6488 bool have_srf;
6489 bool have_volatile;
6490 bool have_expensive;
6491 bool have_srf_sortcols;
6492 bool postpone_srfs;
6495 int i;
6496 ListCell *lc;
6497
6498 /* Shouldn't get here unless query has ORDER BY */
6499 Assert(parse->sortClause);
6500
6501 *have_postponed_srfs = false; /* default result */
6502
6503 /* Inspect tlist and collect per-column information */
6504 ncols = list_length(final_target->exprs);
6505 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6506 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6508
6509 i = 0;
6510 foreach(lc, final_target->exprs)
6511 {
6512 Expr *expr = (Expr *) lfirst(lc);
6513
6514 /*
6515 * If the column has a sortgroupref, assume it has to be evaluated
6516 * before sorting. Generally such columns would be ORDER BY, GROUP
6517 * BY, etc targets. One exception is columns that were removed from
6518 * GROUP BY by remove_useless_groupby_columns() ... but those would
6519 * only be Vars anyway. There don't seem to be any cases where it
6520 * would be worth the trouble to double-check.
6521 */
6523 {
6524 /*
6525 * Check for SRF or volatile functions. Check the SRF case first
6526 * because we must know whether we have any postponed SRFs.
6527 */
6528 if (parse->hasTargetSRFs &&
6529 expression_returns_set((Node *) expr))
6530 {
6531 /* We'll decide below whether these are postponable */
6532 col_is_srf[i] = true;
6533 have_srf = true;
6534 }
6535 else if (contain_volatile_functions((Node *) expr))
6536 {
6537 /* Unconditionally postpone */
6538 postpone_col[i] = true;
6539 have_volatile = true;
6540 }
6541 else
6542 {
6543 /*
6544 * Else check the cost. XXX it's annoying to have to do this
6545 * when set_pathtarget_cost_width() just did it. Refactor to
6546 * allow sharing the work?
6547 */
6548 QualCost cost;
6549
6550 cost_qual_eval_node(&cost, (Node *) expr, root);
6551
6552 /*
6553 * We arbitrarily define "expensive" as "more than 10X
6554 * cpu_operator_cost". Note this will take in any PL function
6555 * with default cost.
6556 */
6557 if (cost.per_tuple > 10 * cpu_operator_cost)
6558 {
6559 postpone_col[i] = true;
6560 have_expensive = true;
6561 }
6562 }
6563 }
6564 else
6565 {
6566 /* For sortgroupref cols, just check if any contain SRFs */
6567 if (!have_srf_sortcols &&
6568 parse->hasTargetSRFs &&
6569 expression_returns_set((Node *) expr))
6570 have_srf_sortcols = true;
6571 }
6572
6573 i++;
6574 }
6575
6576 /*
6577 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6578 */
6580
6581 /*
6582 * If we don't need a post-sort projection, just return final_target.
6583 */
6584 if (!(postpone_srfs || have_volatile ||
6585 (have_expensive &&
6586 (parse->limitCount || root->tuple_fraction > 0))))
6587 return final_target;
6588
6589 /*
6590 * Report whether the post-sort projection will contain set-returning
6591 * functions. This is important because it affects whether the Sort can
6592 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6593 * to return.
6594 */
6596
6597 /*
6598 * Construct the sort-input target, taking all non-postponable columns and
6599 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6600 * the postponable ones.
6601 */
6604
6605 i = 0;
6606 foreach(lc, final_target->exprs)
6607 {
6608 Expr *expr = (Expr *) lfirst(lc);
6609
6610 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6612 else
6615
6616 i++;
6617 }
6618
6619 /*
6620 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6621 * postponable columns, and add them to the sort-input target if not
6622 * already present. (Some might be there already.) We mustn't
6623 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6624 * would be unable to recompute them.
6625 */
6631
6632 /* clean up cruft */
6635
6636 /* XXX this represents even more redundant cost calculation ... */
6638}
bool contain_volatile_functions(Node *clause)
Definition clauses.c:547
double cpu_operator_cost
Definition costsize.c:134
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition costsize.c:4929
bool expression_returns_set(Node *clause)
Definition nodeFuncs.c:763
#define PVC_INCLUDE_WINDOWFUNCS
Definition optimizer.h:190
Cost per_tuple
Definition pathnodes.h:121

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, contain_volatile_functions(), cost_qual_eval_node(), cpu_operator_cost, create_empty_pathtarget(), expression_returns_set(), fb(), get_pathtarget_sortgroupref, i, lappend(), lfirst, list_free(), list_length(), NIL, palloc0(), parse(), QualCost::per_tuple, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_INCLUDE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_window_input_target()

static PathTarget * make_window_input_target ( PlannerInfo root,
PathTarget final_target,
List activeWindows 
)
static

Definition at line 6231 of file planner.c.

6234{
6239 int i;
6240 ListCell *lc;
6241
6242 Assert(root->parse->hasWindowFuncs);
6243
6244 /*
6245 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6246 * into a bitmapset for convenient reference below.
6247 */
6248 sgrefs = NULL;
6249 foreach(lc, activeWindows)
6250 {
6252 ListCell *lc2;
6253
6254 foreach(lc2, wc->partitionClause)
6255 {
6257
6258 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6259 }
6260 foreach(lc2, wc->orderClause)
6261 {
6263
6264 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6265 }
6266 }
6267
6268 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6269 foreach(lc, root->processed_groupClause)
6270 {
6272
6273 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6274 }
6275
6276 /*
6277 * Construct a target containing all the non-flattenable targetlist items,
6278 * and save aside the others for a moment.
6279 */
6282
6283 i = 0;
6284 foreach(lc, final_target->exprs)
6285 {
6286 Expr *expr = (Expr *) lfirst(lc);
6288
6289 /*
6290 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6291 * that such items can't contain window functions, so it's okay to
6292 * compute them below the WindowAgg nodes.)
6293 */
6294 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6295 {
6296 /*
6297 * Don't want to deconstruct this value, so add it to the input
6298 * target as-is.
6299 */
6301 }
6302 else
6303 {
6304 /*
6305 * Column is to be flattened, so just remember the expression for
6306 * later call to pull_var_clause.
6307 */
6309 }
6310
6311 i++;
6312 }
6313
6314 /*
6315 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6316 * add them to the input target if not already present. (Some might be
6317 * there already because they're used directly as window/group clauses.)
6318 *
6319 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6320 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6321 * at higher levels. On the other hand, we should recurse into
6322 * WindowFuncs to make sure their input expressions are available.
6323 */
6329
6330 /* clean up cruft */
6333
6334 /* XXX this causes some redundant cost calculation ... */
6336}

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_add_member(), bms_is_member(), create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, i, lappend(), lfirst, lfirst_node, list_free(), NIL, WindowClause::orderClause, WindowClause::partitionClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ mark_partial_aggref()

void mark_partial_aggref ( Aggref agg,
AggSplit  aggsplit 
)

Definition at line 5816 of file planner.c.

5817{
5818 /* aggtranstype should be computed by this point */
5819 Assert(OidIsValid(agg->aggtranstype));
5820 /* ... but aggsplit should still be as the parser left it */
5821 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5822
5823 /* Mark the Aggref with the intended partial-aggregation mode */
5824 agg->aggsplit = aggsplit;
5825
5826 /*
5827 * Adjust result type if needed. Normally, a partial aggregate returns
5828 * the aggregate's transition type; but if that's INTERNAL and we're
5829 * serializing, it returns BYTEA instead.
5830 */
5831 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5832 {
5833 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5834 agg->aggtype = BYTEAOID;
5835 else
5836 agg->aggtype = agg->aggtranstype;
5837 }
5838}
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition nodes.h:396
#define DO_AGGSPLIT_SERIALIZE(as)
Definition nodes.h:397

References AGGSPLIT_SIMPLE, Assert, DO_AGGSPLIT_SERIALIZE, DO_AGGSPLIT_SKIPFINAL, fb(), and OidIsValid.

Referenced by convert_combining_aggrefs(), create_rel_agg_info(), and make_partial_grouping_target().

◆ name_active_windows()

static void name_active_windows ( List activeWindows)
static

Definition at line 6111 of file planner.c.

6112{
6113 int next_n = 1;
6114 char newname[16];
6115 ListCell *lc;
6116
6117 foreach(lc, activeWindows)
6118 {
6120
6121 /* Nothing to do if it has a name already. */
6122 if (wc->name)
6123 continue;
6124
6125 /* Select a name not currently present in the list. */
6126 for (;;)
6127 {
6128 ListCell *lc2;
6129
6130 snprintf(newname, sizeof(newname), "w%d", next_n++);
6131 foreach(lc2, activeWindows)
6132 {
6134
6135 if (wc2->name && strcmp(wc2->name, newname) == 0)
6136 break; /* matched */
6137 }
6138 if (lc2 == NULL)
6139 break; /* reached the end with no match */
6140 }
6141 wc->name = pstrdup(newname);
6142 }
6143}
#define snprintf
Definition port.h:260

References fb(), lfirst_node, pstrdup(), and snprintf.

Referenced by grouping_planner().

◆ optimize_window_clauses()

static void optimize_window_clauses ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5888 of file planner.c.

5889{
5890 List *windowClause = root->parse->windowClause;
5891 ListCell *lc;
5892
5893 foreach(lc, windowClause)
5894 {
5896 ListCell *lc2;
5897 int optimizedFrameOptions = 0;
5898
5899 Assert(wc->winref <= wflists->maxWinRef);
5900
5901 /* skip any WindowClauses that have no WindowFuncs */
5902 if (wflists->windowFuncs[wc->winref] == NIL)
5903 continue;
5904
5905 foreach(lc2, wflists->windowFuncs[wc->winref])
5906 {
5911
5913
5914 /* Check if there's a support function for 'wfunc' */
5915 if (!OidIsValid(prosupport))
5916 break; /* can't optimize this WindowClause */
5917
5919 req.window_clause = wc;
5920 req.window_func = wfunc;
5921 req.frameOptions = wc->frameOptions;
5922
5923 /* call the support function */
5926 PointerGetDatum(&req)));
5927
5928 /*
5929 * Skip to next WindowClause if the support function does not
5930 * support this request type.
5931 */
5932 if (res == NULL)
5933 break;
5934
5935 /*
5936 * Save these frameOptions for the first WindowFunc for this
5937 * WindowClause.
5938 */
5939 if (foreach_current_index(lc2) == 0)
5941
5942 /*
5943 * On subsequent WindowFuncs, if the frameOptions are not the same
5944 * then we're unable to optimize the frameOptions for this
5945 * WindowClause.
5946 */
5947 else if (optimizedFrameOptions != res->frameOptions)
5948 break; /* skip to the next WindowClause, if any */
5949 }
5950
5951 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5952 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5953 {
5954 ListCell *lc3;
5955
5956 /* apply the new frame options */
5958
5959 /*
5960 * We now check to see if changing the frameOptions has caused
5961 * this WindowClause to be a duplicate of some other WindowClause.
5962 * This can only happen if we have multiple WindowClauses, so
5963 * don't bother if there's only 1.
5964 */
5965 if (list_length(windowClause) == 1)
5966 continue;
5967
5968 /*
5969 * Do the duplicate check and reuse the existing WindowClause if
5970 * we find a duplicate.
5971 */
5972 foreach(lc3, windowClause)
5973 {
5975
5976 /* skip over the WindowClause we're currently editing */
5977 if (existing_wc == wc)
5978 continue;
5979
5980 /*
5981 * Perform the same duplicate check that is done in
5982 * transformWindowFuncCall.
5983 */
5984 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5985 equal(wc->orderClause, existing_wc->orderClause) &&
5986 wc->frameOptions == existing_wc->frameOptions &&
5987 equal(wc->startOffset, existing_wc->startOffset) &&
5988 equal(wc->endOffset, existing_wc->endOffset))
5989 {
5990 ListCell *lc4;
5991
5992 /*
5993 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5994 * This required adjusting each WindowFunc's winref and
5995 * moving the WindowFuncs in 'wc' to the list of
5996 * WindowFuncs in 'existing_wc'.
5997 */
5998 foreach(lc4, wflists->windowFuncs[wc->winref])
5999 {
6001
6002 wfunc->winref = existing_wc->winref;
6003 }
6004
6005 /* move list items */
6006 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6007 wflists->windowFuncs[wc->winref]);
6008 wflists->windowFuncs[wc->winref] = NIL;
6009
6010 /*
6011 * transformWindowFuncCall() should have made sure there
6012 * are no other duplicates, so we needn't bother looking
6013 * any further.
6014 */
6015 break;
6016 }
6017 }
6018 }
6019 }
6020}
#define OidFunctionCall1(functionId, arg1)
Definition fmgr.h:722
RegProcedure get_func_support(Oid funcid)
Definition lsyscache.c:2008
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:342
Node * startOffset
Node * endOffset
Index winref
Definition primnodes.h:611

References Assert, DatumGetPointer(), WindowClause::endOffset, equal(), fb(), foreach_current_index, WindowClause::frameOptions, SupportRequestOptimizeWindowClause::frameOptions, get_func_support(), lfirst_node, list_concat(), list_length(), NIL, OidFunctionCall1, OidIsValid, WindowClause::orderClause, WindowClause::partitionClause, PointerGetDatum(), root, WindowClause::startOffset, WindowFunc::winfnoid, WindowClause::winref, and WindowFunc::winref.

Referenced by grouping_planner().

◆ plan_cluster_use_sort()

bool plan_cluster_use_sort ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6897 of file planner.c.

6898{
6900 Query *query;
6901 PlannerGlobal *glob;
6903 RelOptInfo *rel;
6904 IndexOptInfo *indexInfo;
6910 ListCell *lc;
6911
6912 /* We can short-circuit the cost comparison if indexscans are disabled */
6913 if (!enable_indexscan)
6914 return true; /* use sort */
6915
6916 /* Set up mostly-dummy planner state */
6917 query = makeNode(Query);
6918 query->commandType = CMD_SELECT;
6919
6920 glob = makeNode(PlannerGlobal);
6921
6923 root->parse = query;
6924 root->glob = glob;
6925 root->query_level = 1;
6926 root->planner_cxt = CurrentMemoryContext;
6927 root->wt_param_id = -1;
6928 root->join_domains = list_make1(makeNode(JoinDomain));
6929
6930 /* Build a minimal RTE for the rel */
6932 rte->rtekind = RTE_RELATION;
6933 rte->relid = tableOid;
6934 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6935 rte->rellockmode = AccessShareLock;
6936 rte->lateral = false;
6937 rte->inh = false;
6938 rte->inFromCl = true;
6939 query->rtable = list_make1(rte);
6940 addRTEPermissionInfo(&query->rteperminfos, rte);
6941
6942 /* Set up RTE/RelOptInfo arrays */
6944
6945 /* Build RelOptInfo */
6946 rel = build_simple_rel(root, 1, NULL);
6947
6948 /* Locate IndexOptInfo for the target index */
6949 indexInfo = NULL;
6950 foreach(lc, rel->indexlist)
6951 {
6952 indexInfo = lfirst_node(IndexOptInfo, lc);
6953 if (indexInfo->indexoid == indexOid)
6954 break;
6955 }
6956
6957 /*
6958 * It's possible that get_relation_info did not generate an IndexOptInfo
6959 * for the desired index; this could happen if it's not yet reached its
6960 * indcheckxmin usability horizon, or if it's a system index and we're
6961 * ignoring system indexes. In such cases we should tell CLUSTER to not
6962 * trust the index contents but use seqscan-and-sort.
6963 */
6964 if (lc == NULL) /* not in the list? */
6965 return true; /* use sort */
6966
6967 /*
6968 * Rather than doing all the pushups that would be needed to use
6969 * set_baserel_size_estimates, just do a quick hack for rows and width.
6970 */
6971 rel->rows = rel->tuples;
6972 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6973
6974 root->total_table_pages = rel->pages;
6975
6976 /*
6977 * Determine eval cost of the index expressions, if any. We need to
6978 * charge twice that amount for each tuple comparison that happens during
6979 * the sort, since tuplesort.c will have to re-evaluate the index
6980 * expressions each time. (XXX that's pretty inefficient...)
6981 */
6982 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6983 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6984
6985 /* Estimate the cost of seq scan + sort */
6988 seqScanPath->disabled_nodes,
6989 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6991
6992 /* Estimate the cost of index scan */
6994 NIL, NIL, NIL, NIL,
6995 ForwardScanDirection, false,
6996 NULL, 1.0, false);
6997
6998 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6999}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2200
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition costsize.c:4903
bool enable_indexscan
Definition costsize.c:146
int maintenance_work_mem
Definition globals.c:133
#define AccessShareLock
Definition lockdefs.h:36
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
double Cost
Definition nodes.h:261
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
@ RTE_RELATION
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition pathnode.c:1047
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition pathnode.c:981
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition plancat.c:1477
void setup_simple_rel_arrays(PlannerInfo *root)
Definition relnode.c:111
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition relnode.c:209
@ ForwardScanDirection
Definition sdir.h:28
List * rtable
Definition parsenodes.h:175
CmdType commandType
Definition parsenodes.h:121
Cardinality tuples
Definition pathnodes.h:1078
BlockNumber pages
Definition pathnodes.h:1077
List * indexlist
Definition pathnodes.h:1073

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, cost_qual_eval(), cost_sort(), create_index_path(), create_seqscan_path(), CurrentMemoryContext, enable_indexscan, fb(), ForwardScanDirection, get_relation_data_width(), RelOptInfo::indexlist, IndexOptInfo::indexoid, lfirst_node, list_make1, maintenance_work_mem, makeNode, NIL, RelOptInfo::pages, RelOptInfo::reltarget, root, RelOptInfo::rows, Query::rtable, RTE_RELATION, setup_simple_rel_arrays(), RelOptInfo::tuples, and PathTarget::width.

Referenced by copy_table_data().

◆ plan_create_index_workers()

int plan_create_index_workers ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 7019 of file planner.c.

7020{
7022 Query *query;
7023 PlannerGlobal *glob;
7025 Relation heap;
7027 RelOptInfo *rel;
7028 int parallel_workers;
7030 double reltuples;
7031 double allvisfrac;
7032
7033 /*
7034 * We don't allow performing parallel operation in standalone backend or
7035 * when parallelism is disabled.
7036 */
7038 return 0;
7039
7040 /* Set up largely-dummy planner state */
7041 query = makeNode(Query);
7042 query->commandType = CMD_SELECT;
7043
7044 glob = makeNode(PlannerGlobal);
7045
7047 root->parse = query;
7048 root->glob = glob;
7049 root->query_level = 1;
7050 root->planner_cxt = CurrentMemoryContext;
7051 root->wt_param_id = -1;
7052 root->join_domains = list_make1(makeNode(JoinDomain));
7053
7054 /*
7055 * Build a minimal RTE.
7056 *
7057 * Mark the RTE with inh = true. This is a kludge to prevent
7058 * get_relation_info() from fetching index info, which is necessary
7059 * because it does not expect that any IndexOptInfo is currently
7060 * undergoing REINDEX.
7061 */
7063 rte->rtekind = RTE_RELATION;
7064 rte->relid = tableOid;
7065 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7066 rte->rellockmode = AccessShareLock;
7067 rte->lateral = false;
7068 rte->inh = true;
7069 rte->inFromCl = true;
7070 query->rtable = list_make1(rte);
7071 addRTEPermissionInfo(&query->rteperminfos, rte);
7072
7073 /* Set up RTE/RelOptInfo arrays */
7075
7076 /* Build RelOptInfo */
7077 rel = build_simple_rel(root, 1, NULL);
7078
7079 /* Rels are assumed already locked by the caller */
7080 heap = table_open(tableOid, NoLock);
7081 index = index_open(indexOid, NoLock);
7082
7083 /*
7084 * Determine if it's safe to proceed.
7085 *
7086 * Currently, parallel workers can't access the leader's temporary tables.
7087 * Furthermore, any index predicate or index expressions must be parallel
7088 * safe.
7089 */
7090 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7093 {
7094 parallel_workers = 0;
7095 goto done;
7096 }
7097
7098 /*
7099 * If parallel_workers storage parameter is set for the table, accept that
7100 * as the number of parallel worker processes to launch (though still cap
7101 * at max_parallel_maintenance_workers). Note that we deliberately do not
7102 * consider any other factor when parallel_workers is set. (e.g., memory
7103 * use by workers.)
7104 */
7105 if (rel->rel_parallel_workers != -1)
7106 {
7107 parallel_workers = Min(rel->rel_parallel_workers,
7109 goto done;
7110 }
7111
7112 /*
7113 * Estimate heap relation size ourselves, since rel->pages cannot be
7114 * trusted (heap RTE was marked as inheritance parent)
7115 */
7116 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7117
7118 /*
7119 * Determine number of workers to scan the heap relation using generic
7120 * model
7121 */
7122 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7124
7125 /*
7126 * Cap workers based on available maintenance_work_mem as needed.
7127 *
7128 * Note that each tuplesort participant receives an even share of the
7129 * total maintenance_work_mem budget. Aim to leave participants
7130 * (including the leader as a participant) with no less than 32MB of
7131 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7132 * immediately past the threshold of being capable of launching a single
7133 * parallel worker to sort.
7134 */
7135 while (parallel_workers > 0 &&
7136 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7137 parallel_workers--;
7138
7139done:
7141 table_close(heap, NoLock);
7142
7143 return parallel_workers;
7144}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4726
uint32 BlockNumber
Definition block.h:31
int max_parallel_maintenance_workers
Definition globals.c:134
bool IsUnderPostmaster
Definition globals.c:120
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:133
#define NoLock
Definition lockdefs.h:34
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition plancat.c:1310
List * RelationGetIndexPredicate(Relation relation)
Definition relcache.c:5205
List * RelationGetIndexExpressions(Relation relation)
Definition relcache.c:5092
int rel_parallel_workers
Definition pathnodes.h:1085
Form_pg_class rd_rel
Definition rel.h:111
Definition type.h:96
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, compute_parallel_worker(), CurrentMemoryContext, estimate_rel_size(), fb(), index_close(), index_open(), is_parallel_safe(), IsUnderPostmaster, list_make1, maintenance_work_mem, makeNode, max_parallel_maintenance_workers, Min, NoLock, RelationData::rd_rel, RelOptInfo::rel_parallel_workers, RelationGetIndexExpressions(), RelationGetIndexPredicate(), root, Query::rtable, RTE_RELATION, setup_simple_rel_arrays(), table_close(), and table_open().

Referenced by index_build().

◆ planner()

PlannedStmt * planner ( Query parse,
const char query_string,
int  cursorOptions,
ParamListInfo  boundParams,
ExplainState es 
)

Definition at line 315 of file planner.c.

317{
318 PlannedStmt *result;
319
320 if (planner_hook)
321 result = (*planner_hook) (parse, query_string, cursorOptions,
322 boundParams, es);
323 else
324 result = standard_planner(parse, query_string, cursorOptions,
325 boundParams, es);
326
327 pgstat_report_plan_id(result->planId, false);
328
329 return result;
330}
void pgstat_report_plan_id(int64 plan_id, bool force)
planner_hook_type planner_hook
Definition planner.c:74
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition planner.c:333

References parse(), pgstat_report_plan_id(), PlannedStmt::planId, planner_hook, and standard_planner().

Referenced by pg_plan_query().

◆ postprocess_setop_tlist()

static List * postprocess_setop_tlist ( List new_tlist,
List orig_tlist 
)
static

Definition at line 5851 of file planner.c.

5852{
5853 ListCell *l;
5855
5856 foreach(l, new_tlist)
5857 {
5860
5861 /* ignore resjunk columns in setop result */
5862 if (new_tle->resjunk)
5863 continue;
5864
5868 if (orig_tle->resjunk) /* should not happen */
5869 elog(ERROR, "resjunk output columns are not implemented");
5870 Assert(new_tle->resno == orig_tle->resno);
5871 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5872 }
5873 if (orig_tlist_item != NULL)
5874 elog(ERROR, "resjunk output columns are not implemented");
5875 return new_tlist;
5876}

References Assert, elog, ERROR, fb(), lfirst_node, list_head(), and lnext().

Referenced by grouping_planner().

◆ preprocess_expression()

static Node * preprocess_expression ( PlannerInfo root,
Node expr,
int  kind 
)
static

Definition at line 1332 of file planner.c.

1333{
1334 /*
1335 * Fall out quickly if expression is empty. This occurs often enough to
1336 * be worth checking. Note that null->null is the correct conversion for
1337 * implicit-AND result format, too.
1338 */
1339 if (expr == NULL)
1340 return NULL;
1341
1342 /*
1343 * If the query has any join RTEs, replace join alias variables with
1344 * base-relation variables. We must do this first, since any expressions
1345 * we may extract from the joinaliasvars lists have not been preprocessed.
1346 * For example, if we did this after sublink processing, sublinks expanded
1347 * out from join aliases would not get processed. But we can skip this in
1348 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1349 * they can't contain any Vars of the current query level.
1350 */
1351 if (root->hasJoinRTEs &&
1352 !(kind == EXPRKIND_RTFUNC ||
1353 kind == EXPRKIND_VALUES ||
1354 kind == EXPRKIND_TABLESAMPLE ||
1355 kind == EXPRKIND_TABLEFUNC))
1356 expr = flatten_join_alias_vars(root, root->parse, expr);
1357
1358 /*
1359 * Simplify constant expressions. For function RTEs, this was already
1360 * done by preprocess_function_rtes. (But note we must do it again for
1361 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1362 * un-simplified subexpressions inserted by flattening of subqueries or
1363 * join alias variables.)
1364 *
1365 * Note: an essential effect of this is to convert named-argument function
1366 * calls to positional notation and insert the current actual values of
1367 * any default arguments for functions. To ensure that happens, we *must*
1368 * process all expressions here. Previous PG versions sometimes skipped
1369 * const-simplification if it didn't seem worth the trouble, but we can't
1370 * do that anymore.
1371 *
1372 * Note: this also flattens nested AND and OR expressions into N-argument
1373 * form. All processing of a qual expression after this point must be
1374 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1375 * with AND directly under AND, nor OR directly under OR.
1376 */
1377 if (kind != EXPRKIND_RTFUNC)
1378 expr = eval_const_expressions(root, expr);
1379
1380 /*
1381 * If it's a qual or havingQual, canonicalize it.
1382 */
1383 if (kind == EXPRKIND_QUAL)
1384 {
1385 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1386
1387#ifdef OPTIMIZER_DEBUG
1388 printf("After canonicalize_qual()\n");
1389 pprint(expr);
1390#endif
1391 }
1392
1393 /*
1394 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1395 * hashfuncid of any that might execute more quickly by using hash lookups
1396 * instead of a linear search.
1397 */
1398 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1399 {
1401 }
1402
1403 /* Expand SubLinks to SubPlans */
1404 if (root->parse->hasSubLinks)
1405 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1406
1407 /*
1408 * XXX do not insert anything here unless you have grokked the comments in
1409 * SS_replace_correlation_vars ...
1410 */
1411
1412 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1413 if (root->query_level > 1)
1414 expr = SS_replace_correlation_vars(root, expr);
1415
1416 /*
1417 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1418 * don't want to do this before eval_const_expressions, since the latter
1419 * would be unable to simplify a top-level AND correctly. Also,
1420 * SS_process_sublinks expects explicit-AND format.)
1421 */
1422 if (kind == EXPRKIND_QUAL)
1423 expr = (Node *) make_ands_implicit((Expr *) expr);
1424
1425 return expr;
1426}
void pprint(const void *obj)
Definition print.c:54
void convert_saop_to_hashed_saop(Node *node)
Definition clauses.c:2300
List * make_ands_implicit(Expr *clause)
Definition makefuncs.c:810
#define EXPRKIND_TARGET
Definition planner.c:88
#define EXPRKIND_TABLESAMPLE
Definition planner.c:96
#define EXPRKIND_VALUES
Definition planner.c:91
#define EXPRKIND_QUAL
Definition planner.c:87
#define EXPRKIND_TABLEFUNC
Definition planner.c:98
#define EXPRKIND_RTFUNC
Definition planner.c:89
#define printf(...)
Definition port.h:266
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition prepqual.c:293
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition subselect.c:2062
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition subselect.c:2007
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition var.c:789

References canonicalize_qual(), convert_saop_to_hashed_saop(), eval_const_expressions(), EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_TABLEFUNC, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, fb(), flatten_join_alias_vars(), make_ands_implicit(), pprint(), printf, root, SS_process_sublinks(), and SS_replace_correlation_vars().

Referenced by preprocess_phv_expression(), preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_groupclause()

static List * preprocess_groupclause ( PlannerInfo root,
List force 
)
static

Definition at line 2904 of file planner.c.

2905{
2906 Query *parse = root->parse;
2908 ListCell *sl;
2909 ListCell *gl;
2910
2911 /* For grouping sets, we need to force the ordering */
2912 if (force)
2913 {
2914 foreach(sl, force)
2915 {
2918
2920 }
2921
2922 return new_groupclause;
2923 }
2924
2925 /* If no ORDER BY, nothing useful to do here */
2926 if (parse->sortClause == NIL)
2927 return list_copy(parse->groupClause);
2928
2929 /*
2930 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2931 * items, but only as far as we can make a matching prefix.
2932 *
2933 * This code assumes that the sortClause contains no duplicate items.
2934 */
2935 foreach(sl, parse->sortClause)
2936 {
2938
2939 foreach(gl, parse->groupClause)
2940 {
2942
2943 if (equal(gc, sc))
2944 {
2946 break;
2947 }
2948 }
2949 if (gl == NULL)
2950 break; /* no match, so stop scanning */
2951 }
2952
2953
2954 /* If no match at all, no point in reordering GROUP BY */
2955 if (new_groupclause == NIL)
2956 return list_copy(parse->groupClause);
2957
2958 /*
2959 * Add any remaining GROUP BY items to the new list. We don't require a
2960 * complete match, because even partial match allows ORDER BY to be
2961 * implemented using incremental sort. Also, give up if there are any
2962 * non-sortable GROUP BY items, since then there's no hope anyway.
2963 */
2964 foreach(gl, parse->groupClause)
2965 {
2967
2969 continue; /* it matched an ORDER BY item */
2970 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2971 return list_copy(parse->groupClause);
2973 }
2974
2975 /* Success --- install the rearranged GROUP BY list */
2977 return new_groupclause;
2978}
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition tlist.c:431

References Assert, equal(), fb(), get_sortgroupref_clause(), lappend(), lfirst_int, lfirst_node, list_copy(), list_length(), list_member_ptr(), NIL, OidIsValid, parse(), and root.

Referenced by consider_groupingsets_paths(), grouping_planner(), and preprocess_grouping_sets().

◆ preprocess_grouping_sets()

static grouping_sets_data * preprocess_grouping_sets ( PlannerInfo root)
static

Definition at line 2258 of file planner.c.

2259{
2260 Query *parse = root->parse;
2261 List *sets;
2262 int maxref = 0;
2265
2266 /*
2267 * We don't currently make any attempt to optimize the groupClause when
2268 * there are grouping sets, so just duplicate it in processed_groupClause.
2269 */
2270 root->processed_groupClause = parse->groupClause;
2271
2272 /* Detect unhashable and unsortable grouping expressions */
2273 gd->any_hashable = false;
2274 gd->unhashable_refs = NULL;
2275 gd->unsortable_refs = NULL;
2276 gd->unsortable_sets = NIL;
2277
2278 if (parse->groupClause)
2279 {
2280 ListCell *lc;
2281
2282 foreach(lc, parse->groupClause)
2283 {
2285 Index ref = gc->tleSortGroupRef;
2286
2287 if (ref > maxref)
2288 maxref = ref;
2289
2290 if (!gc->hashable)
2291 gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2292
2293 if (!OidIsValid(gc->sortop))
2294 gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2295 }
2296 }
2297
2298 /* Allocate workspace array for remapping */
2299 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2300
2301 /*
2302 * If we have any unsortable sets, we must extract them before trying to
2303 * prepare rollups. Unsortable sets don't go through
2304 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2305 * here.
2306 */
2307 if (!bms_is_empty(gd->unsortable_refs))
2308 {
2310 ListCell *lc;
2311
2312 foreach(lc, parse->groupingSets)
2313 {
2314 List *gset = (List *) lfirst(lc);
2315
2316 if (bms_overlap_list(gd->unsortable_refs, gset))
2317 {
2319
2320 gs->set = gset;
2321 gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2322
2323 /*
2324 * We must enforce here that an unsortable set is hashable;
2325 * later code assumes this. Parse analysis only checks that
2326 * every individual column is either hashable or sortable.
2327 *
2328 * Note that passing this test doesn't guarantee we can
2329 * generate a plan; there might be other showstoppers.
2330 */
2331 if (bms_overlap_list(gd->unhashable_refs, gset))
2332 ereport(ERROR,
2334 errmsg("could not implement GROUP BY"),
2335 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2336 }
2337 else
2339 }
2340
2341 if (sortable_sets)
2343 else
2344 sets = NIL;
2345 }
2346 else
2347 sets = extract_rollup_sets(parse->groupingSets);
2348
2349 foreach(lc_set, sets)
2350 {
2354
2355 /*
2356 * Reorder the current list of grouping sets into correct prefix
2357 * order. If only one aggregation pass is needed, try to make the
2358 * list match the ORDER BY clause; if more than one pass is needed, we
2359 * don't bother with that.
2360 *
2361 * Note that this reorders the sets from smallest-member-first to
2362 * largest-member-first, and applies the GroupingSetData annotations,
2363 * though the data will be filled in later.
2364 */
2366 (list_length(sets) == 1
2367 ? parse->sortClause
2368 : NIL));
2369
2370 /*
2371 * Get the initial (and therefore largest) grouping set.
2372 */
2374
2375 /*
2376 * Order the groupClause appropriately. If the first grouping set is
2377 * empty, then the groupClause must also be empty; otherwise we have
2378 * to force the groupClause to match that grouping set's order.
2379 *
2380 * (The first grouping set can be empty even though parse->groupClause
2381 * is not empty only if all non-empty grouping sets are unsortable.
2382 * The groupClauses for hashed grouping sets are built later on.)
2383 */
2384 if (gs->set)
2385 rollup->groupClause = preprocess_groupclause(root, gs->set);
2386 else
2387 rollup->groupClause = NIL;
2388
2389 /*
2390 * Is it hashable? We pretend empty sets are hashable even though we
2391 * actually force them not to be hashed later. But don't bother if
2392 * there's nothing but empty sets (since in that case we can't hash
2393 * anything).
2394 */
2395 if (gs->set &&
2396 !bms_overlap_list(gd->unhashable_refs, gs->set))
2397 {
2398 rollup->hashable = true;
2399 gd->any_hashable = true;
2400 }
2401
2402 /*
2403 * Now that we've pinned down an order for the groupClause for this
2404 * list of grouping sets, we need to remap the entries in the grouping
2405 * sets from sortgrouprefs to plain indices (0-based) into the
2406 * groupClause for this collection of grouping sets. We keep the
2407 * original form for later use, though.
2408 */
2409 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2411 gd->tleref_to_colnum_map);
2412 rollup->gsets_data = current_sets;
2413
2414 gd->rollups = lappend(gd->rollups, rollup);
2415 }
2416
2417 if (gd->unsortable_sets)
2418 {
2419 /*
2420 * We have not yet pinned down a groupclause for this, but we will
2421 * need index-based lists for estimation purposes. Construct
2422 * hash_sets_idx based on the entire original groupclause for now.
2423 */
2424 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2425 gd->unsortable_sets,
2426 gd->tleref_to_colnum_map);
2427 gd->any_hashable = true;
2428 }
2429
2430 return gd;
2431}
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition bitmapset.c:607
#define palloc0_object(type)
Definition fe_memutils.h:75
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition planner.c:3212
static List * extract_rollup_sets(List *groupingSets)
Definition planner.c:3000

References bms_add_member(), bms_is_empty, bms_overlap_list(), ereport, errcode(), errdetail(), errmsg(), ERROR, extract_rollup_sets(), fb(), lappend(), lfirst, lfirst_node, linitial_node, list_length(), makeNode, NIL, OidIsValid, palloc(), palloc0_object, parse(), preprocess_groupclause(), remap_to_groupclause_idx(), reorder_grouping_sets(), and root.

Referenced by grouping_planner().

◆ preprocess_limit()

static double preprocess_limit ( PlannerInfo root,
double  tuple_fraction,
int64 offset_est,
int64 count_est 
)
static

Definition at line 2653 of file planner.c.

2655{
2656 Query *parse = root->parse;
2657 Node *est;
2658 double limit_fraction;
2659
2660 /* Should not be called unless LIMIT or OFFSET */
2661 Assert(parse->limitCount || parse->limitOffset);
2662
2663 /*
2664 * Try to obtain the clause values. We use estimate_expression_value
2665 * primarily because it can sometimes do something useful with Params.
2666 */
2667 if (parse->limitCount)
2668 {
2669 est = estimate_expression_value(root, parse->limitCount);
2670 if (est && IsA(est, Const))
2671 {
2672 if (((Const *) est)->constisnull)
2673 {
2674 /* NULL indicates LIMIT ALL, ie, no limit */
2675 *count_est = 0; /* treat as not present */
2676 }
2677 else
2678 {
2679 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2680 if (*count_est <= 0)
2681 *count_est = 1; /* force to at least 1 */
2682 }
2683 }
2684 else
2685 *count_est = -1; /* can't estimate */
2686 }
2687 else
2688 *count_est = 0; /* not present */
2689
2690 if (parse->limitOffset)
2691 {
2692 est = estimate_expression_value(root, parse->limitOffset);
2693 if (est && IsA(est, Const))
2694 {
2695 if (((Const *) est)->constisnull)
2696 {
2697 /* Treat NULL as no offset; the executor will too */
2698 *offset_est = 0; /* treat as not present */
2699 }
2700 else
2701 {
2702 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2703 if (*offset_est < 0)
2704 *offset_est = 0; /* treat as not present */
2705 }
2706 }
2707 else
2708 *offset_est = -1; /* can't estimate */
2709 }
2710 else
2711 *offset_est = 0; /* not present */
2712
2713 if (*count_est != 0)
2714 {
2715 /*
2716 * A LIMIT clause limits the absolute number of tuples returned.
2717 * However, if it's not a constant LIMIT then we have to guess; for
2718 * lack of a better idea, assume 10% of the plan's result is wanted.
2719 */
2720 if (*count_est < 0 || *offset_est < 0)
2721 {
2722 /* LIMIT or OFFSET is an expression ... punt ... */
2723 limit_fraction = 0.10;
2724 }
2725 else
2726 {
2727 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2728 limit_fraction = (double) *count_est + (double) *offset_est;
2729 }
2730
2731 /*
2732 * If we have absolute limits from both caller and LIMIT, use the
2733 * smaller value; likewise if they are both fractional. If one is
2734 * fractional and the other absolute, we can't easily determine which
2735 * is smaller, but we use the heuristic that the absolute will usually
2736 * be smaller.
2737 */
2738 if (tuple_fraction >= 1.0)
2739 {
2740 if (limit_fraction >= 1.0)
2741 {
2742 /* both absolute */
2743 tuple_fraction = Min(tuple_fraction, limit_fraction);
2744 }
2745 else
2746 {
2747 /* caller absolute, limit fractional; use caller's value */
2748 }
2749 }
2750 else if (tuple_fraction > 0.0)
2751 {
2752 if (limit_fraction >= 1.0)
2753 {
2754 /* caller fractional, limit absolute; use limit */
2755 tuple_fraction = limit_fraction;
2756 }
2757 else
2758 {
2759 /* both fractional */
2760 tuple_fraction = Min(tuple_fraction, limit_fraction);
2761 }
2762 }
2763 else
2764 {
2765 /* no info from caller, just use limit */
2766 tuple_fraction = limit_fraction;
2767 }
2768 }
2769 else if (*offset_est != 0 && tuple_fraction > 0.0)
2770 {
2771 /*
2772 * We have an OFFSET but no LIMIT. This acts entirely differently
2773 * from the LIMIT case: here, we need to increase rather than decrease
2774 * the caller's tuple_fraction, because the OFFSET acts to cause more
2775 * tuples to be fetched instead of fewer. This only matters if we got
2776 * a tuple_fraction > 0, however.
2777 *
2778 * As above, use 10% if OFFSET is present but unestimatable.
2779 */
2780 if (*offset_est < 0)
2781 limit_fraction = 0.10;
2782 else
2783 limit_fraction = (double) *offset_est;
2784
2785 /*
2786 * If we have absolute counts from both caller and OFFSET, add them
2787 * together; likewise if they are both fractional. If one is
2788 * fractional and the other absolute, we want to take the larger, and
2789 * we heuristically assume that's the fractional one.
2790 */
2791 if (tuple_fraction >= 1.0)
2792 {
2793 if (limit_fraction >= 1.0)
2794 {
2795 /* both absolute, so add them together */
2796 tuple_fraction += limit_fraction;
2797 }
2798 else
2799 {
2800 /* caller absolute, limit fractional; use limit */
2801 tuple_fraction = limit_fraction;
2802 }
2803 }
2804 else
2805 {
2806 if (limit_fraction >= 1.0)
2807 {
2808 /* caller fractional, limit absolute; use caller's value */
2809 }
2810 else
2811 {
2812 /* both fractional, so add them together */
2813 tuple_fraction += limit_fraction;
2814 if (tuple_fraction >= 1.0)
2815 tuple_fraction = 0.0; /* assume fetch all */
2816 }
2817 }
2818 }
2819
2820 return tuple_fraction;
2821}
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition clauses.c:2408

References Assert, DatumGetInt64(), estimate_expression_value(), fb(), IsA, Min, parse(), and root.

Referenced by grouping_planner().

◆ preprocess_phv_expression()

Expr * preprocess_phv_expression ( PlannerInfo root,
Expr expr 
)

Definition at line 1478 of file planner.c.

1479{
1480 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1481}
#define EXPRKIND_PHV
Definition planner.c:95
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition planner.c:1332

References EXPRKIND_PHV, preprocess_expression(), and root.

Referenced by extract_lateral_references().

◆ preprocess_qual_conditions()

static void preprocess_qual_conditions ( PlannerInfo root,
Node jtnode 
)
static

Definition at line 1434 of file planner.c.

1435{
1436 if (jtnode == NULL)
1437 return;
1438 if (IsA(jtnode, RangeTblRef))
1439 {
1440 /* nothing to do here */
1441 }
1442 else if (IsA(jtnode, FromExpr))
1443 {
1444 FromExpr *f = (FromExpr *) jtnode;
1445 ListCell *l;
1446
1447 foreach(l, f->fromlist)
1449
1451 }
1452 else if (IsA(jtnode, JoinExpr))
1453 {
1454 JoinExpr *j = (JoinExpr *) jtnode;
1455
1458
1459 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1460 }
1461 else
1462 elog(ERROR, "unrecognized node type: %d",
1463 (int) nodeTag(jtnode));
1464}
#define nodeTag(nodeptr)
Definition nodes.h:139
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition planner.c:1434
Node * quals
Definition primnodes.h:2358
List * fromlist
Definition primnodes.h:2357

References elog, ERROR, EXPRKIND_QUAL, fb(), FromExpr::fromlist, IsA, j, lfirst, nodeTag, preprocess_expression(), preprocess_qual_conditions(), FromExpr::quals, and root.

Referenced by preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_rowmarks()

static void preprocess_rowmarks ( PlannerInfo root)
static

Definition at line 2475 of file planner.c.

2476{
2477 Query *parse = root->parse;
2478 Bitmapset *rels;
2479 List *prowmarks;
2480 ListCell *l;
2481 int i;
2482
2483 if (parse->rowMarks)
2484 {
2485 /*
2486 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2487 * grouping, since grouping renders a reference to individual tuple
2488 * CTIDs invalid. This is also checked at parse time, but that's
2489 * insufficient because of rule substitution, query pullup, etc.
2490 */
2492 parse->rowMarks)->strength);
2493 }
2494 else
2495 {
2496 /*
2497 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2498 * UPDATE/SHARE.
2499 */
2500 if (parse->commandType != CMD_UPDATE &&
2501 parse->commandType != CMD_DELETE &&
2502 parse->commandType != CMD_MERGE)
2503 return;
2504 }
2505
2506 /*
2507 * We need to have rowmarks for all base relations except the target. We
2508 * make a bitmapset of all base rels and then remove the items we don't
2509 * need or have FOR [KEY] UPDATE/SHARE marks for.
2510 */
2511 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2512 if (parse->resultRelation)
2513 rels = bms_del_member(rels, parse->resultRelation);
2514
2515 /*
2516 * Convert RowMarkClauses to PlanRowMark representation.
2517 */
2518 prowmarks = NIL;
2519 foreach(l, parse->rowMarks)
2520 {
2522 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2524
2525 /*
2526 * Currently, it is syntactically impossible to have FOR UPDATE et al
2527 * applied to an update/delete target rel. If that ever becomes
2528 * possible, we should drop the target from the PlanRowMark list.
2529 */
2530 Assert(rc->rti != parse->resultRelation);
2531
2532 /*
2533 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2534 * can't support true locking. Subqueries that got flattened into the
2535 * main query should be ignored completely. Any that didn't will get
2536 * ROW_MARK_COPY items in the next loop.
2537 */
2538 if (rte->rtekind != RTE_RELATION)
2539 continue;
2540
2541 rels = bms_del_member(rels, rc->rti);
2542
2544 newrc->rti = newrc->prti = rc->rti;
2545 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2546 newrc->markType = select_rowmark_type(rte, rc->strength);
2547 newrc->allMarkTypes = (1 << newrc->markType);
2548 newrc->strength = rc->strength;
2549 newrc->waitPolicy = rc->waitPolicy;
2550 newrc->isParent = false;
2551
2553 }
2554
2555 /*
2556 * Now, add rowmarks for any non-target, non-locked base relations.
2557 */
2558 i = 0;
2559 foreach(l, parse->rtable)
2560 {
2563
2564 i++;
2565 if (!bms_is_member(i, rels))
2566 continue;
2567
2569 newrc->rti = newrc->prti = i;
2570 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2571 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2572 newrc->allMarkTypes = (1 << newrc->markType);
2573 newrc->strength = LCS_NONE;
2574 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2575 newrc->isParent = false;
2576
2578 }
2579
2580 root->rowMarks = prowmarks;
2581}
@ LockWaitBlock
Definition lockoptions.h:39
@ LCS_NONE
Definition lockoptions.h:23
@ CMD_DELETE
Definition nodes.h:278
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition analyze.c:3349
#define rt_fetch(rangetable_index, rangetable)
Definition parsetree.h:31
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition planner.c:2587
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
LockClauseStrength strength
LockWaitPolicy waitPolicy

References Assert, bms_del_member(), bms_is_member(), CheckSelectLocking(), CMD_DELETE, CMD_MERGE, CMD_UPDATE, fb(), get_relids_in_jointree(), i, lappend(), LCS_NONE, lfirst_node, linitial_node, LockWaitBlock, makeNode, NIL, parse(), root, rt_fetch, RTE_RELATION, RowMarkClause::rti, select_rowmark_type(), RowMarkClause::strength, and RowMarkClause::waitPolicy.

Referenced by subquery_planner().

◆ remap_to_groupclause_idx()

static List * remap_to_groupclause_idx ( List groupClause,
List gsets,
int tleref_to_colnum_map 
)
static

Definition at line 2438 of file planner.c.

2441{
2442 int ref = 0;
2443 List *result = NIL;
2444 ListCell *lc;
2445
2446 foreach(lc, groupClause)
2447 {
2449
2450 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2451 }
2452
2453 foreach(lc, gsets)
2454 {
2455 List *set = NIL;
2456 ListCell *lc2;
2458
2459 foreach(lc2, gs->set)
2460 {
2461 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2462 }
2463
2464 result = lappend(result, set);
2465 }
2466
2467 return result;
2468}

References fb(), lappend(), lappend_int(), lfirst_int, lfirst_node, and NIL.

Referenced by consider_groupingsets_paths(), and preprocess_grouping_sets().

◆ reorder_grouping_sets()

static List * reorder_grouping_sets ( List groupingSets,
List sortclause 
)
static

Definition at line 3212 of file planner.c.

3213{
3214 ListCell *lc;
3215 List *previous = NIL;
3216 List *result = NIL;
3217
3218 foreach(lc, groupingSets)
3219 {
3220 List *candidate = (List *) lfirst(lc);
3223
3224 while (list_length(sortclause) > list_length(previous) &&
3225 new_elems != NIL)
3226 {
3228 int ref = sc->tleSortGroupRef;
3229
3231 {
3232 previous = lappend_int(previous, ref);
3234 }
3235 else
3236 {
3237 /* diverged from the sortclause; give up on it */
3238 sortclause = NIL;
3239 break;
3240 }
3241 }
3242
3243 previous = list_concat(previous, new_elems);
3244
3245 gs->set = list_copy(previous);
3246 result = lcons(gs, result);
3247 }
3248
3249 list_free(previous);
3250
3251 return result;
3252}
List * list_difference_int(const List *list1, const List *list2)
Definition list.c:1288
List * list_delete_int(List *list, int datum)
Definition list.c:891
bool list_member_int(const List *list, int datum)
Definition list.c:702
static void * list_nth(const List *list, int n)
Definition pg_list.h:299

References fb(), lappend_int(), lcons(), lfirst, list_concat(), list_copy(), list_delete_int(), list_difference_int(), list_free(), list_length(), list_member_int(), list_nth(), makeNode, and NIL.

Referenced by preprocess_grouping_sets().

◆ select_active_windows()

static List * select_active_windows ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 6028 of file planner.c.

6029{
6030 List *windowClause = root->parse->windowClause;
6031 List *result = NIL;
6032 ListCell *lc;
6033 int nActive = 0;
6035 list_length(windowClause));
6036
6037 /* First, construct an array of the active windows */
6038 foreach(lc, windowClause)
6039 {
6041
6042 /* It's only active if wflists shows some related WindowFuncs */
6043 Assert(wc->winref <= wflists->maxWinRef);
6044 if (wflists->windowFuncs[wc->winref] == NIL)
6045 continue;
6046
6047 actives[nActive].wc = wc; /* original clause */
6048
6049 /*
6050 * For sorting, we want the list of partition keys followed by the
6051 * list of sort keys. But pathkeys construction will remove duplicates
6052 * between the two, so we can as well (even though we can't detect all
6053 * of the duplicates, since some may come from ECs - that might mean
6054 * we miss optimization chances here). We must, however, ensure that
6055 * the order of entries is preserved with respect to the ones we do
6056 * keep.
6057 *
6058 * partitionClause and orderClause had their own duplicates removed in
6059 * parse analysis, so we're only concerned here with removing
6060 * orderClause entries that also appear in partitionClause.
6061 */
6062 actives[nActive].uniqueOrder =
6064 wc->orderClause);
6065 nActive++;
6066 }
6067
6068 /*
6069 * Sort active windows by their partitioning/ordering clauses, ignoring
6070 * any framing clauses, so that the windows that need the same sorting are
6071 * adjacent in the list. When we come to generate paths, this will avoid
6072 * inserting additional Sort nodes.
6073 *
6074 * This is how we implement a specific requirement from the SQL standard,
6075 * which says that when two or more windows are order-equivalent (i.e.
6076 * have matching partition and order clauses, even if their names or
6077 * framing clauses differ), then all peer rows must be presented in the
6078 * same order in all of them. If we allowed multiple sort nodes for such
6079 * cases, we'd risk having the peer rows end up in different orders in
6080 * equivalent windows due to sort instability. (See General Rule 4 of
6081 * <window clause> in SQL2008 - SQL2016.)
6082 *
6083 * Additionally, if the entire list of clauses of one window is a prefix
6084 * of another, put first the window with stronger sorting requirements.
6085 * This way we will first sort for stronger window, and won't have to sort
6086 * again for the weaker one.
6087 */
6089
6090 /* build ordered list of the original WindowClause nodes */
6091 for (int i = 0; i < nActive; i++)
6092 result = lappend(result, actives[i].wc);
6093
6094 pfree(actives);
6095
6096 return result;
6097}
#define palloc_array(type, count)
Definition fe_memutils.h:76
List * list_concat_unique(List *list1, const List *list2)
Definition list.c:1405
static int common_prefix_cmp(const void *a, const void *b)
Definition planner.c:6162
#define qsort(a, b, c, d)
Definition port.h:495

References Assert, common_prefix_cmp(), fb(), i, lappend(), lfirst_node, list_concat_unique(), list_copy(), list_length(), NIL, WindowClause::orderClause, palloc_array, WindowClause::partitionClause, pfree(), qsort, root, and WindowClause::winref.

Referenced by grouping_planner().

◆ select_rowmark_type()

RowMarkType select_rowmark_type ( RangeTblEntry rte,
LockClauseStrength  strength 
)

Definition at line 2587 of file planner.c.

2588{
2589 if (rte->rtekind != RTE_RELATION)
2590 {
2591 /* If it's not a table at all, use ROW_MARK_COPY */
2592 return ROW_MARK_COPY;
2593 }
2594 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2595 {
2596 /* Let the FDW select the rowmark type, if it wants to */
2597 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2598
2599 if (fdwroutine->GetForeignRowMarkType != NULL)
2600 return fdwroutine->GetForeignRowMarkType(rte, strength);
2601 /* Otherwise, use ROW_MARK_COPY by default */
2602 return ROW_MARK_COPY;
2603 }
2604 else
2605 {
2606 /* Regular table, apply the appropriate lock type */
2607 switch (strength)
2608 {
2609 case LCS_NONE:
2610
2611 /*
2612 * We don't need a tuple lock, only the ability to re-fetch
2613 * the row.
2614 */
2615 return ROW_MARK_REFERENCE;
2616 break;
2617 case LCS_FORKEYSHARE:
2618 return ROW_MARK_KEYSHARE;
2619 break;
2620 case LCS_FORSHARE:
2621 return ROW_MARK_SHARE;
2622 break;
2623 case LCS_FORNOKEYUPDATE:
2625 break;
2626 case LCS_FORUPDATE:
2627 return ROW_MARK_EXCLUSIVE;
2628 break;
2629 }
2630 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2631 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2632 }
2633}
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition foreign.c:420
@ LCS_FORUPDATE
Definition lockoptions.h:27
@ LCS_FORSHARE
Definition lockoptions.h:25
@ LCS_FORKEYSHARE
Definition lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition lockoptions.h:26
@ ROW_MARK_COPY
Definition plannodes.h:1541
@ ROW_MARK_REFERENCE
Definition plannodes.h:1540
@ ROW_MARK_SHARE
Definition plannodes.h:1538
@ ROW_MARK_EXCLUSIVE
Definition plannodes.h:1536
@ ROW_MARK_NOKEYEXCLUSIVE
Definition plannodes.h:1537
@ ROW_MARK_KEYSHARE
Definition plannodes.h:1539
GetForeignRowMarkType_function GetForeignRowMarkType
Definition fdwapi.h:247

References elog, ERROR, fb(), GetFdwRoutineByRelId(), FdwRoutine::GetForeignRowMarkType, LCS_FORKEYSHARE, LCS_FORNOKEYUPDATE, LCS_FORSHARE, LCS_FORUPDATE, LCS_NONE, ROW_MARK_COPY, ROW_MARK_EXCLUSIVE, ROW_MARK_KEYSHARE, ROW_MARK_NOKEYEXCLUSIVE, ROW_MARK_REFERENCE, ROW_MARK_SHARE, and RTE_RELATION.

Referenced by expand_single_inheritance_child(), and preprocess_rowmarks().

◆ standard_planner()

PlannedStmt * standard_planner ( Query parse,
const char query_string,
int  cursorOptions,
ParamListInfo  boundParams,
ExplainState es 
)

Definition at line 333 of file planner.c.

335{
336 PlannedStmt *result;
337 PlannerGlobal *glob;
338 double tuple_fraction;
342 Plan *top_plan;
343 ListCell *lp,
344 *lr;
345
346 /*
347 * Set up global state for this planner invocation. This data is needed
348 * across all levels of sub-Query that might exist in the given command,
349 * so we keep it in a separate struct that's linked to by each per-Query
350 * PlannerInfo.
351 */
352 glob = makeNode(PlannerGlobal);
353
354 glob->boundParams = boundParams;
355 glob->subplans = NIL;
356 glob->subpaths = NIL;
357 glob->subroots = NIL;
358 glob->rewindPlanIDs = NULL;
359 glob->finalrtable = NIL;
360 glob->allRelids = NULL;
361 glob->prunableRelids = NULL;
362 glob->finalrteperminfos = NIL;
363 glob->finalrowmarks = NIL;
364 glob->resultRelations = NIL;
365 glob->appendRelations = NIL;
366 glob->partPruneInfos = NIL;
367 glob->relationOids = NIL;
368 glob->invalItems = NIL;
369 glob->paramExecTypes = NIL;
370 glob->lastPHId = 0;
371 glob->lastRowMarkId = 0;
372 glob->lastPlanNodeId = 0;
373 glob->transientPlan = false;
374 glob->dependsOnRole = false;
375 glob->partition_directory = NULL;
376 glob->rel_notnullatts_hash = NULL;
377
378 /*
379 * Assess whether it's feasible to use parallel mode for this query. We
380 * can't do this in a standalone backend, or if the command will try to
381 * modify any data, or if this is a cursor operation, or if GUCs are set
382 * to values that don't permit parallelism, or if parallel-unsafe
383 * functions are present in the query tree.
384 *
385 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 * the command is writing into a completely new table which workers won't
388 * be able to see. If the workers could see the table, the fact that
389 * group locking would cause them to ignore the leader's heavyweight GIN
390 * page locks would make this unsafe. We'll have to fix that somehow if
391 * we want to allow parallel inserts in general; updates and deletes have
392 * additional problems especially around combo CIDs.)
393 *
394 * For now, we don't try to use parallel mode if we're running inside a
395 * parallel worker. We might eventually be able to relax this
396 * restriction, but for now it seems best not to have parallel workers
397 * trying to create their own parallel workers.
398 */
399 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
401 parse->commandType == CMD_SELECT &&
402 !parse->hasModifyingCTE &&
405 {
406 /* all the cheap tests pass, so scan the query tree */
409 }
410 else
411 {
412 /* skip the query tree scan, just assume it's unsafe */
414 glob->parallelModeOK = false;
415 }
416
417 /*
418 * glob->parallelModeNeeded is normally set to false here and changed to
419 * true during plan creation if a Gather or Gather Merge plan is actually
420 * created (cf. create_gather_plan, create_gather_merge_plan).
421 *
422 * However, if debug_parallel_query = on or debug_parallel_query =
423 * regress, then we impose parallel mode whenever it's safe to do so, even
424 * if the final plan doesn't use parallelism. It's not safe to do so if
425 * the query contains anything parallel-unsafe; parallelModeOK will be
426 * false in that case. Note that parallelModeOK can't change after this
427 * point. Otherwise, everything in the query is either parallel-safe or
428 * parallel-restricted, and in either case it should be OK to impose
429 * parallel-mode restrictions. If that ends up breaking something, then
430 * either some function the user included in the query is incorrectly
431 * labeled as parallel-safe or parallel-restricted when in reality it's
432 * parallel-unsafe, or else the query planner itself has a bug.
433 */
434 glob->parallelModeNeeded = glob->parallelModeOK &&
436
437 /* Determine what fraction of the plan is likely to be scanned */
438 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 {
440 /*
441 * We have no real idea how many tuples the user will ultimately FETCH
442 * from a cursor, but it is often the case that he doesn't want 'em
443 * all, or would prefer a fast-start plan anyway so that he can
444 * process some of the tuples sooner. Use a GUC parameter to decide
445 * what fraction to optimize for.
446 */
447 tuple_fraction = cursor_tuple_fraction;
448
449 /*
450 * We document cursor_tuple_fraction as simply being a fraction, which
451 * means the edge cases 0 and 1 have to be treated specially here. We
452 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 */
454 if (tuple_fraction >= 1.0)
455 tuple_fraction = 0.0;
456 else if (tuple_fraction <= 0.0)
457 tuple_fraction = 1e-10;
458 }
459 else
460 {
461 /* Default assumption is we need all the tuples */
462 tuple_fraction = 0.0;
463 }
464
465 /*
466 * Compute the initial path generation strategy mask.
467 *
468 * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
469 * GUC, and so the corresponding bits are always set in the default
470 * strategy mask.
471 *
472 * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
473 * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
474 * corresponds to this exactly: enable_indexscan=off disables both
475 * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
476 * converts the index-only scan paths that we would have considered into
477 * index scan paths.
478 */
481 if (enable_tidscan)
483 if (enable_seqscan)
492 {
494 if (enable_material)
496 }
497 if (enable_nestloop)
498 {
500 if (enable_material)
502 if (enable_memoize)
504 }
505 if (enable_hashjoin)
511
512 /* Allow plugins to take control after we've initialized "glob" */
514 (*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
515
516 /* primary planning entry point (may recurse for subqueries) */
517 root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
518 NULL);
519
520 /* Select best Path and turn it into a Plan */
523
525
526 /*
527 * If creating a plan for a scrollable cursor, make sure it can run
528 * backwards on demand. Add a Material node at the top at need.
529 */
530 if (cursorOptions & CURSOR_OPT_SCROLL)
531 {
534 }
535
536 /*
537 * Optionally add a Gather node for testing purposes, provided this is
538 * actually a safe thing to do.
539 *
540 * We can add Gather even when top_plan has parallel-safe initPlans, but
541 * then we have to move the initPlans to the Gather node because of
542 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
543 * regression tests when debug_parallel_query = regress, because initPlans
544 * that would normally appear on the top_plan move to the Gather, causing
545 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
546 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
547 */
549 top_plan->parallel_safe &&
550 (top_plan->initPlan == NIL ||
552 {
555 bool unsafe_initplans;
556
557 gather->plan.targetlist = top_plan->targetlist;
558 gather->plan.qual = NIL;
559 gather->plan.lefttree = top_plan;
560 gather->plan.righttree = NULL;
561 gather->num_workers = 1;
562 gather->single_copy = true;
564
565 /* Transfer any initPlans to the new top node */
566 gather->plan.initPlan = top_plan->initPlan;
567 top_plan->initPlan = NIL;
568
569 /*
570 * Since this Gather has no parallel-aware descendants to signal to,
571 * we don't need a rescan Param.
572 */
573 gather->rescan_param = -1;
574
575 /*
576 * Ideally we'd use cost_gather here, but setting up dummy path data
577 * to satisfy it doesn't seem much cleaner than knowing what it does.
578 */
579 gather->plan.startup_cost = top_plan->startup_cost +
581 gather->plan.total_cost = top_plan->total_cost +
583 gather->plan.plan_rows = top_plan->plan_rows;
584 gather->plan.plan_width = top_plan->plan_width;
585 gather->plan.parallel_aware = false;
586 gather->plan.parallel_safe = false;
587
588 /*
589 * Delete the initplans' cost from top_plan. We needn't add it to the
590 * Gather node, since the above coding already included it there.
591 */
592 SS_compute_initplan_cost(gather->plan.initPlan,
594 top_plan->startup_cost -= initplan_cost;
595 top_plan->total_cost -= initplan_cost;
596
597 /* use parallel mode for parallel plans. */
598 root->glob->parallelModeNeeded = true;
599
600 top_plan = &gather->plan;
601 }
602
603 /*
604 * If any Params were generated, run through the plan tree and compute
605 * each plan node's extParam/allParam sets. Ideally we'd merge this into
606 * set_plan_references' tree traversal, but for now it has to be separate
607 * because we need to visit subplans before not after main plan.
608 */
609 if (glob->paramExecTypes != NIL)
610 {
611 Assert(list_length(glob->subplans) == list_length(glob->subroots));
612 forboth(lp, glob->subplans, lr, glob->subroots)
613 {
614 Plan *subplan = (Plan *) lfirst(lp);
616
617 SS_finalize_plan(subroot, subplan);
618 }
620 }
621
622 /* final cleanup of the plan */
623 Assert(glob->finalrtable == NIL);
624 Assert(glob->finalrteperminfos == NIL);
625 Assert(glob->finalrowmarks == NIL);
626 Assert(glob->resultRelations == NIL);
627 Assert(glob->appendRelations == NIL);
629 /* ... and the subplans (both regular subplans and initplans) */
630 Assert(list_length(glob->subplans) == list_length(glob->subroots));
631 forboth(lp, glob->subplans, lr, glob->subroots)
632 {
633 Plan *subplan = (Plan *) lfirst(lp);
635
636 lfirst(lp) = set_plan_references(subroot, subplan);
637 }
638
639 /* build the PlannedStmt result */
640 result = makeNode(PlannedStmt);
641
642 result->commandType = parse->commandType;
643 result->queryId = parse->queryId;
645 result->hasReturning = (parse->returningList != NIL);
646 result->hasModifyingCTE = parse->hasModifyingCTE;
647 result->canSetTag = parse->canSetTag;
648 result->transientPlan = glob->transientPlan;
649 result->dependsOnRole = glob->dependsOnRole;
650 result->parallelModeNeeded = glob->parallelModeNeeded;
651 result->planTree = top_plan;
652 result->partPruneInfos = glob->partPruneInfos;
653 result->rtable = glob->finalrtable;
654 result->unprunableRelids = bms_difference(glob->allRelids,
655 glob->prunableRelids);
656 result->permInfos = glob->finalrteperminfos;
657 result->resultRelations = glob->resultRelations;
658 result->appendRelations = glob->appendRelations;
659 result->subplans = glob->subplans;
660 result->rewindPlanIDs = glob->rewindPlanIDs;
661 result->rowMarks = glob->finalrowmarks;
662 result->relationOids = glob->relationOids;
663 result->invalItems = glob->invalItems;
664 result->paramExecTypes = glob->paramExecTypes;
665 /* utilityStmt should be null, but we might as well copy it */
666 result->utilityStmt = parse->utilityStmt;
667 result->stmt_location = parse->stmt_location;
668 result->stmt_len = parse->stmt_len;
669
670 result->jitFlags = PGJIT_NONE;
671 if (jit_enabled && jit_above_cost >= 0 &&
672 top_plan->total_cost > jit_above_cost)
673 {
674 result->jitFlags |= PGJIT_PERFORM;
675
676 /*
677 * Decide how much effort should be put into generating better code.
678 */
679 if (jit_optimize_above_cost >= 0 &&
680 top_plan->total_cost > jit_optimize_above_cost)
681 result->jitFlags |= PGJIT_OPT3;
682 if (jit_inline_above_cost >= 0 &&
683 top_plan->total_cost > jit_inline_above_cost)
684 result->jitFlags |= PGJIT_INLINE;
685
686 /*
687 * Decide which operations should be JITed.
688 */
689 if (jit_expressions)
690 result->jitFlags |= PGJIT_EXPR;
692 result->jitFlags |= PGJIT_DEFORM;
693 }
694
695 /* Allow plugins to take control before we discard "glob" */
697 (*planner_shutdown_hook) (glob, parse, query_string, result);
698
699 if (glob->partition_directory != NULL)
700 DestroyPartitionDirectory(glob->partition_directory);
701
702 return result;
703}
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:346
char max_parallel_hazard(Query *parse)
Definition clauses.c:743
bool enable_seqscan
Definition costsize.c:145
int max_parallel_workers_per_gather
Definition costsize.c:143
bool enable_memoize
Definition costsize.c:155
double parallel_setup_cost
Definition costsize.c:136
bool enable_gathermerge
Definition costsize.c:158
double parallel_tuple_cost
Definition costsize.c:135
bool enable_indexonlyscan
Definition costsize.c:147
bool enable_tidscan
Definition costsize.c:149
bool enable_material
Definition costsize.c:154
bool enable_hashjoin
Definition costsize.c:157
bool enable_mergejoin
Definition costsize.c:156
bool enable_partitionwise_join
Definition costsize.c:159
bool enable_nestloop
Definition costsize.c:153
bool enable_bitmapscan
Definition costsize.c:148
Plan * materialize_finished_plan(Plan *subplan)
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition createplan.c:338
bool ExecSupportsBackwardScan(Plan *node)
Definition execAmi.c:511
#define IsParallelWorker()
Definition parallel.h:60
double jit_optimize_above_cost
Definition jit.c:41
bool jit_enabled
Definition jit.c:32
bool jit_expressions
Definition jit.c:36
bool jit_tuple_deforming
Definition jit.c:38
double jit_above_cost
Definition jit.c:39
double jit_inline_above_cost
Definition jit.c:40
#define PGJIT_OPT3
Definition jit.h:21
#define PGJIT_NONE
Definition jit.h:19
#define PGJIT_EXPR
Definition jit.h:23
#define PGJIT_DEFORM
Definition jit.h:24
#define PGJIT_INLINE
Definition jit.h:22
#define PGJIT_PERFORM
Definition jit.h:20
@ DEBUG_PARALLEL_REGRESS
Definition optimizer.h:98
@ DEBUG_PARALLEL_OFF
Definition optimizer.h:96
#define CURSOR_OPT_SCROLL
#define CURSOR_OPT_FAST_PLAN
#define CURSOR_OPT_PARALLEL_OK
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition partdesc.c:484
#define PGS_NESTLOOP_MEMOIZE
Definition pathnodes.h:76
#define PGS_TIDSCAN
Definition pathnodes.h:70
#define PGS_FOREIGNJOIN
Definition pathnodes.h:71
#define PGS_APPEND
Definition pathnodes.h:78
#define PGS_MERGE_APPEND
Definition pathnodes.h:79
#define PGS_SEQSCAN
Definition pathnodes.h:66
#define PGS_CONSIDER_INDEXONLY
Definition pathnodes.h:82
#define PGS_NESTLOOP_MATERIALIZE
Definition pathnodes.h:75
#define PGS_MERGEJOIN_PLAIN
Definition pathnodes.h:72
#define PGS_MERGEJOIN_MATERIALIZE
Definition pathnodes.h:73
#define PGS_HASHJOIN
Definition pathnodes.h:77
#define PGS_CONSIDER_NONPARTIAL
Definition pathnodes.h:84
#define PGS_BITMAPSCAN
Definition pathnodes.h:69
#define PGS_GATHER
Definition pathnodes.h:80
#define PGS_CONSIDER_PARTITIONWISE
Definition pathnodes.h:83
#define PGS_GATHER_MERGE
Definition pathnodes.h:81
#define PGS_INDEXONLYSCAN
Definition pathnodes.h:68
#define PGS_INDEXSCAN
Definition pathnodes.h:67
#define PGS_NESTLOOP_PLAIN
Definition pathnodes.h:74
double cursor_tuple_fraction
Definition planner.c:68
planner_shutdown_hook_type planner_shutdown_hook
Definition planner.c:80
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:740
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition planner.c:6655
planner_setup_hook_type planner_setup_hook
Definition planner.c:77
int debug_parallel_query
Definition planner.c:69
@ PLAN_STMT_STANDARD
Definition plannodes.h:41
e
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition setrefs.c:288
struct Plan * planTree
Definition plannodes.h:101
bool hasModifyingCTE
Definition plannodes.h:83
List * appendRelations
Definition plannodes.h:127
List * permInfos
Definition plannodes.h:120
bool canSetTag
Definition plannodes.h:86
List * rowMarks
Definition plannodes.h:138
Bitmapset * rewindPlanIDs
Definition plannodes.h:135
int64 queryId
Definition plannodes.h:71
ParseLoc stmt_len
Definition plannodes.h:165
PlannedStmtOrigin planOrigin
Definition plannodes.h:77
bool hasReturning
Definition plannodes.h:80
ParseLoc stmt_location
Definition plannodes.h:163
List * invalItems
Definition plannodes.h:144
bool transientPlan
Definition plannodes.h:89
List * resultRelations
Definition plannodes.h:124
List * subplans
Definition plannodes.h:132
List * relationOids
Definition plannodes.h:141
bool dependsOnRole
Definition plannodes.h:92
Bitmapset * unprunableRelids
Definition plannodes.h:115
CmdType commandType
Definition plannodes.h:68
Node * utilityStmt
Definition plannodes.h:150
List * rtable
Definition plannodes.h:109
List * partPruneInfos
Definition plannodes.h:106
List * paramExecTypes
Definition plannodes.h:147
bool parallelModeNeeded
Definition plannodes.h:95
Bitmapset * prunableRelids
Definition pathnodes.h:206
char maxParallelHazard
Definition pathnodes.h:254
List * subplans
Definition pathnodes.h:178
bool dependsOnRole
Definition pathnodes.h:245
Bitmapset * allRelids
Definition pathnodes.h:199
List * appendRelations
Definition pathnodes.h:218
List * finalrowmarks
Definition pathnodes.h:212
List * paramExecTypes
Definition pathnodes.h:230
bool parallelModeOK
Definition pathnodes.h:248
bool transientPlan
Definition pathnodes.h:242
Bitmapset * rewindPlanIDs
Definition pathnodes.h:190
List * finalrteperminfos
Definition pathnodes.h:209
List * subpaths
Definition pathnodes.h:181
Index lastRowMarkId
Definition pathnodes.h:236
List * resultRelations
Definition pathnodes.h:215
List * partPruneInfos
Definition pathnodes.h:221
List * finalrtable
Definition pathnodes.h:193
uint64 default_pgs_mask
Definition pathnodes.h:257
bool parallelModeNeeded
Definition pathnodes.h:251
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition subselect.c:2404
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition subselect.c:2348

References PlannerGlobal::allRelids, PlannerGlobal::appendRelations, PlannedStmt::appendRelations, Assert, bms_difference(), PlannedStmt::canSetTag, CMD_SELECT, PlannedStmt::commandType, create_plan(), CURSOR_OPT_FAST_PLAN, CURSOR_OPT_PARALLEL_OK, CURSOR_OPT_SCROLL, cursor_tuple_fraction, DEBUG_PARALLEL_OFF, debug_parallel_query, DEBUG_PARALLEL_REGRESS, PlannerGlobal::default_pgs_mask, PlannerGlobal::dependsOnRole, PlannedStmt::dependsOnRole, DestroyPartitionDirectory(), enable_bitmapscan, enable_gathermerge, enable_hashjoin, enable_indexonlyscan, enable_indexscan, enable_material, enable_memoize, enable_mergejoin, enable_nestloop, enable_partitionwise_join, enable_seqscan, enable_tidscan, ExecSupportsBackwardScan(), fb(), fetch_upper_rel(), PlannerGlobal::finalrowmarks, PlannerGlobal::finalrtable, PlannerGlobal::finalrteperminfos, forboth, get_cheapest_fractional_path(), PlannedStmt::hasModifyingCTE, PlannedStmt::hasReturning, PlannerGlobal::invalItems, PlannedStmt::invalItems, IsParallelWorker, IsUnderPostmaster, jit_above_cost, jit_enabled, jit_expressions, jit_inline_above_cost, jit_optimize_above_cost, jit_tuple_deforming, PlannedStmt::jitFlags, PlannerGlobal::lastPHId, PlannerGlobal::lastPlanNodeId, PlannerGlobal::lastRowMarkId, lfirst, lfirst_node, list_length(), makeNode, materialize_finished_plan(), max_parallel_hazard(), max_parallel_workers_per_gather, PlannerGlobal::maxParallelHazard, NIL, parallel_setup_cost, parallel_tuple_cost, PlannerGlobal::parallelModeNeeded, PlannedStmt::parallelModeNeeded, PlannerGlobal::parallelModeOK, PlannerGlobal::paramExecTypes, PlannedStmt::paramExecTypes, parse(), PlannerGlobal::partPruneInfos, PlannedStmt::partPruneInfos, PlannedStmt::permInfos, PGJIT_DEFORM, PGJIT_EXPR, PGJIT_INLINE, PGJIT_NONE, PGJIT_OPT3, PGJIT_PERFORM, PGS_APPEND, PGS_BITMAPSCAN, PGS_CONSIDER_INDEXONLY, PGS_CONSIDER_NONPARTIAL, PGS_CONSIDER_PARTITIONWISE, PGS_FOREIGNJOIN, PGS_GATHER, PGS_GATHER_MERGE, PGS_HASHJOIN, PGS_INDEXONLYSCAN, PGS_INDEXSCAN, PGS_MERGE_APPEND, PGS_MERGEJOIN_MATERIALIZE, PGS_MERGEJOIN_PLAIN, PGS_NESTLOOP_MATERIALIZE, PGS_NESTLOOP_MEMOIZE, PGS_NESTLOOP_PLAIN, PGS_SEQSCAN, PGS_TIDSCAN, PLAN_STMT_STANDARD, planner_setup_hook, planner_shutdown_hook, PlannedStmt::planOrigin, PlannedStmt::planTree, PlannerGlobal::prunableRelids, PlannedStmt::queryId, PlannerGlobal::relationOids, PlannedStmt::relationOids, PlannerGlobal::resultRelations, PlannedStmt::resultRelations, PlannerGlobal::rewindPlanIDs, PlannedStmt::rewindPlanIDs, root, PlannedStmt::rowMarks, PlannedStmt::rtable, set_plan_references(), SS_compute_initplan_cost(), SS_finalize_plan(), PlannedStmt::stmt_len, PlannedStmt::stmt_location, PlannerGlobal::subpaths, PlannerGlobal::subplans, PlannedStmt::subplans, subquery_planner(), PlannerGlobal::transientPlan, PlannedStmt::transientPlan, PlannedStmt::unprunableRelids, UPPERREL_FINAL, and PlannedStmt::utilityStmt.

Referenced by delay_execution_planner(), pgss_planner(), and planner().

◆ standard_qp_callback()

static void standard_qp_callback ( PlannerInfo root,
void extra 
)
static

Definition at line 3529 of file planner.c.

3530{
3531 Query *parse = root->parse;
3533 List *tlist = root->processed_tlist;
3534 List *activeWindows = qp_extra->activeWindows;
3535
3536 /*
3537 * Calculate pathkeys that represent grouping/ordering and/or ordered
3538 * aggregate requirements.
3539 */
3540 if (qp_extra->gset_data)
3541 {
3542 /*
3543 * With grouping sets, just use the first RollupData's groupClause. We
3544 * don't make any effort to optimize grouping clauses when there are
3545 * grouping sets, nor can we combine aggregate ordering keys with
3546 * grouping.
3547 */
3548 List *rollups = qp_extra->gset_data->rollups;
3549 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3550
3551 if (grouping_is_sortable(groupClause))
3552 {
3553 bool sortable;
3554
3555 /*
3556 * The groupClause is logically below the grouping step. So if
3557 * there is an RTE entry for the grouping step, we need to remove
3558 * its RT index from the sort expressions before we make PathKeys
3559 * for them.
3560 */
3561 root->group_pathkeys =
3563 &groupClause,
3564 tlist,
3565 false,
3566 parse->hasGroupRTE,
3567 &sortable,
3568 false);
3570 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3571 }
3572 else
3573 {
3574 root->group_pathkeys = NIL;
3575 root->num_groupby_pathkeys = 0;
3576 }
3577 }
3578 else if (parse->groupClause || root->numOrderedAggs > 0)
3579 {
3580 /*
3581 * With a plain GROUP BY list, we can remove any grouping items that
3582 * are proven redundant by EquivalenceClass processing. For example,
3583 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3584 * especially common cases, but they're nearly free to detect. Note
3585 * that we remove redundant items from processed_groupClause but not
3586 * the original parse->groupClause.
3587 */
3588 bool sortable;
3589
3590 /*
3591 * Convert group clauses into pathkeys. Set the ec_sortref field of
3592 * EquivalenceClass'es if it's not set yet.
3593 */
3594 root->group_pathkeys =
3596 &root->processed_groupClause,
3597 tlist,
3598 true,
3599 false,
3600 &sortable,
3601 true);
3602 if (!sortable)
3603 {
3604 /* Can't sort; no point in considering aggregate ordering either */
3605 root->group_pathkeys = NIL;
3606 root->num_groupby_pathkeys = 0;
3607 }
3608 else
3609 {
3610 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3611 /* If we have ordered aggs, consider adding onto group_pathkeys */
3612 if (root->numOrderedAggs > 0)
3614 }
3615 }
3616 else
3617 {
3618 root->group_pathkeys = NIL;
3619 root->num_groupby_pathkeys = 0;
3620 }
3621
3622 /* We consider only the first (bottom) window in pathkeys logic */
3623 if (activeWindows != NIL)
3624 {
3625 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3626
3627 root->window_pathkeys = make_pathkeys_for_window(root,
3628 wc,
3629 tlist);
3630 }
3631 else
3632 root->window_pathkeys = NIL;
3633
3634 /*
3635 * As with GROUP BY, we can discard any DISTINCT items that are proven
3636 * redundant by EquivalenceClass processing. The non-redundant list is
3637 * kept in root->processed_distinctClause, leaving the original
3638 * parse->distinctClause alone.
3639 */
3640 if (parse->distinctClause)
3641 {
3642 bool sortable;
3643
3644 /* Make a copy since pathkey processing can modify the list */
3645 root->processed_distinctClause = list_copy(parse->distinctClause);
3646 root->distinct_pathkeys =
3648 &root->processed_distinctClause,
3649 tlist,
3650 true,
3651 false,
3652 &sortable,
3653 false);
3654 if (!sortable)
3655 root->distinct_pathkeys = NIL;
3656 }
3657 else
3658 root->distinct_pathkeys = NIL;
3659
3660 root->sort_pathkeys =
3662 parse->sortClause,
3663 tlist);
3664
3665 /* setting setop_pathkeys might be useful to the union planner */
3666 if (qp_extra->setop != NULL)
3667 {
3668 List *groupClauses;
3669 bool sortable;
3670
3671 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3672
3673 root->setop_pathkeys =
3675 &groupClauses,
3676 tlist,
3677 false,
3678 false,
3679 &sortable,
3680 false);
3681 if (!sortable)
3682 root->setop_pathkeys = NIL;
3683 }
3684 else
3685 root->setop_pathkeys = NIL;
3686
3687 /*
3688 * Figure out whether we want a sorted result from query_planner.
3689 *
3690 * If we have a sortable GROUP BY clause, then we want a result sorted
3691 * properly for grouping. Otherwise, if we have window functions to
3692 * evaluate, we try to sort for the first window. Otherwise, if there's a
3693 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3694 * we try to produce output that's sufficiently well sorted for the
3695 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3696 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3697 * for a set operation which can benefit from presorted results and have a
3698 * sortable targetlist, we want to sort by the target list.
3699 *
3700 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3701 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3702 * that might just leave us failing to exploit an available sort order at
3703 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3704 * much easier, since we know that the parser ensured that one is a
3705 * superset of the other.
3706 */
3707 if (root->group_pathkeys)
3708 root->query_pathkeys = root->group_pathkeys;
3709 else if (root->window_pathkeys)
3710 root->query_pathkeys = root->window_pathkeys;
3711 else if (list_length(root->distinct_pathkeys) >
3712 list_length(root->sort_pathkeys))
3713 root->query_pathkeys = root->distinct_pathkeys;
3714 else if (root->sort_pathkeys)
3715 root->query_pathkeys = root->sort_pathkeys;
3716 else if (root->setop_pathkeys != NIL)
3717 root->query_pathkeys = root->setop_pathkeys;
3718 else
3719 root->query_pathkeys = NIL;
3720}
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition planner.c:3305
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition planner.c:8414

References adjust_group_pathkeys_for_groupagg(), Assert, fb(), generate_setop_child_grouplist(), grouping_is_sortable(), linitial_node, list_copy(), list_length(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), make_pathkeys_for_window(), NIL, parse(), and root.

Referenced by grouping_planner().

◆ subquery_planner()

PlannerInfo * subquery_planner ( PlannerGlobal glob,
Query parse,
char plan_name,
PlannerInfo parent_root,
bool  hasRecursion,
double  tuple_fraction,
SetOperationStmt setops 
)

Definition at line 740 of file planner.c.

743{
747 bool hasOuterJoins;
748 bool hasResultRTEs;
750 ListCell *l;
751
752 /* Create a PlannerInfo data structure for this subquery */
754 root->parse = parse;
755 root->glob = glob;
756 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
757 root->plan_name = plan_name;
758 root->parent_root = parent_root;
759 root->plan_params = NIL;
760 root->outer_params = NULL;
761 root->planner_cxt = CurrentMemoryContext;
762 root->init_plans = NIL;
763 root->cte_plan_ids = NIL;
764 root->multiexpr_params = NIL;
765 root->join_domains = NIL;
766 root->eq_classes = NIL;
767 root->ec_merging_done = false;
768 root->last_rinfo_serial = 0;
769 root->all_result_relids =
770 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
771 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
772 root->append_rel_list = NIL;
773 root->row_identity_vars = NIL;
774 root->rowMarks = NIL;
775 memset(root->upper_rels, 0, sizeof(root->upper_rels));
776 memset(root->upper_targets, 0, sizeof(root->upper_targets));
777 root->processed_groupClause = NIL;
778 root->processed_distinctClause = NIL;
779 root->processed_tlist = NIL;
780 root->update_colnos = NIL;
781 root->grouping_map = NULL;
782 root->minmax_aggs = NIL;
783 root->qual_security_level = 0;
784 root->hasPseudoConstantQuals = false;
785 root->hasAlternativeSubPlans = false;
786 root->placeholdersFrozen = false;
787 root->hasRecursion = hasRecursion;
788 root->assumeReplanning = false;
789 if (hasRecursion)
790 root->wt_param_id = assign_special_exec_param(root);
791 else
792 root->wt_param_id = -1;
793 root->non_recursive_path = NULL;
794
795 /*
796 * Create the top-level join domain. This won't have valid contents until
797 * deconstruct_jointree fills it in, but the node needs to exist before
798 * that so we can build EquivalenceClasses referencing it.
799 */
800 root->join_domains = list_make1(makeNode(JoinDomain));
801
802 /*
803 * If there is a WITH list, process each WITH query and either convert it
804 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
805 */
806 if (parse->cteList)
808
809 /*
810 * If it's a MERGE command, transform the joinlist as appropriate.
811 */
813
814 /*
815 * Scan the rangetable for relation RTEs and retrieve the necessary
816 * catalog information for each relation. Using this information, clear
817 * the inh flag for any relation that has no children, collect not-null
818 * attribute numbers for any relation that has column not-null
819 * constraints, and expand virtual generated columns for any relation that
820 * contains them. Note that this step does not descend into sublinks and
821 * subqueries; if we pull up any sublinks or subqueries below, their
822 * relation RTEs are processed just before pulling them up.
823 */
825
826 /*
827 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
828 * that we don't need so many special cases to deal with that situation.
829 */
831
832 /*
833 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
834 * to transform them into joins. Note that this step does not descend
835 * into subqueries; if we pull up any subqueries below, their SubLinks are
836 * processed just before pulling them up.
837 */
838 if (parse->hasSubLinks)
840
841 /*
842 * Scan the rangetable for function RTEs, do const-simplification on them,
843 * and then inline them if possible (producing subqueries that might get
844 * pulled up next). Recursion issues here are handled in the same way as
845 * for SubLinks.
846 */
848
849 /*
850 * Check to see if any subqueries in the jointree can be merged into this
851 * query.
852 */
854
855 /*
856 * If this is a simple UNION ALL query, flatten it into an appendrel. We
857 * do this now because it requires applying pull_up_subqueries to the leaf
858 * queries of the UNION ALL, which weren't touched above because they
859 * weren't referenced by the jointree (they will be after we do this).
860 */
861 if (parse->setOperations)
863
864 /*
865 * Survey the rangetable to see what kinds of entries are present. We can
866 * skip some later processing if relevant SQL features are not used; for
867 * example if there are no JOIN RTEs we can avoid the expense of doing
868 * flatten_join_alias_vars(). This must be done after we have finished
869 * adding rangetable entries, of course. (Note: actually, processing of
870 * inherited or partitioned rels can cause RTEs for their child tables to
871 * get added later; but those must all be RTE_RELATION entries, so they
872 * don't invalidate the conclusions drawn here.)
873 */
874 root->hasJoinRTEs = false;
875 root->hasLateralRTEs = false;
876 root->group_rtindex = 0;
877 hasOuterJoins = false;
878 hasResultRTEs = false;
879 foreach(l, parse->rtable)
880 {
882
883 switch (rte->rtekind)
884 {
885 case RTE_JOIN:
886 root->hasJoinRTEs = true;
887 if (IS_OUTER_JOIN(rte->jointype))
888 hasOuterJoins = true;
889 break;
890 case RTE_RESULT:
891 hasResultRTEs = true;
892 break;
893 case RTE_GROUP:
894 Assert(parse->hasGroupRTE);
895 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
896 break;
897 default:
898 /* No work here for other RTE types */
899 break;
900 }
901
902 if (rte->lateral)
903 root->hasLateralRTEs = true;
904
905 /*
906 * We can also determine the maximum security level required for any
907 * securityQuals now. Addition of inheritance-child RTEs won't affect
908 * this, because child tables don't have their own securityQuals; see
909 * expand_single_inheritance_child().
910 */
911 if (rte->securityQuals)
912 root->qual_security_level = Max(root->qual_security_level,
913 list_length(rte->securityQuals));
914 }
915
916 /*
917 * If we have now verified that the query target relation is
918 * non-inheriting, mark it as a leaf target.
919 */
920 if (parse->resultRelation)
921 {
922 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
923
924 if (!rte->inh)
925 root->leaf_result_relids =
926 bms_make_singleton(parse->resultRelation);
927 }
928
929 /*
930 * This would be a convenient time to check access permissions for all
931 * relations mentioned in the query, since it would be better to fail now,
932 * before doing any detailed planning. However, for historical reasons,
933 * we leave this to be done at executor startup.
934 *
935 * Note, however, that we do need to check access permissions for any view
936 * relations mentioned in the query, in order to prevent information being
937 * leaked by selectivity estimation functions, which only check view owner
938 * permissions on underlying tables (see all_rows_selectable() and its
939 * callers). This is a little ugly, because it means that access
940 * permissions for views will be checked twice, which is another reason
941 * why it would be better to do all the ACL checks here.
942 */
943 foreach(l, parse->rtable)
944 {
946
947 if (rte->perminfoindex != 0 &&
948 rte->relkind == RELKIND_VIEW)
949 {
951 bool result;
952
953 perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
955 if (!result)
957 get_rel_name(perminfo->relid));
958 }
959 }
960
961 /*
962 * Preprocess RowMark information. We need to do this after subquery
963 * pullup, so that all base relations are present.
964 */
966
967 /*
968 * Set hasHavingQual to remember if HAVING clause is present. Needed
969 * because preprocess_expression will reduce a constant-true condition to
970 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
971 */
972 root->hasHavingQual = (parse->havingQual != NULL);
973
974 /*
975 * Do expression preprocessing on targetlist and quals, as well as other
976 * random expressions in the querytree. Note that we do not need to
977 * handle sort/group expressions explicitly, because they are actually
978 * part of the targetlist.
979 */
980 parse->targetList = (List *)
981 preprocess_expression(root, (Node *) parse->targetList,
983
985 foreach(l, parse->withCheckOptions)
986 {
988
989 wco->qual = preprocess_expression(root, wco->qual,
991 if (wco->qual != NULL)
993 }
994 parse->withCheckOptions = newWithCheckOptions;
995
996 parse->returningList = (List *)
997 preprocess_expression(root, (Node *) parse->returningList,
999
1000 preprocess_qual_conditions(root, (Node *) parse->jointree);
1001
1002 parse->havingQual = preprocess_expression(root, parse->havingQual,
1004
1005 foreach(l, parse->windowClause)
1006 {
1008
1009 /* partitionClause/orderClause are sort/group expressions */
1014 }
1015
1016 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1018 parse->limitCount = preprocess_expression(root, parse->limitCount,
1020
1021 if (parse->onConflict)
1022 {
1023 parse->onConflict->arbiterElems = (List *)
1025 (Node *) parse->onConflict->arbiterElems,
1027 parse->onConflict->arbiterWhere =
1029 parse->onConflict->arbiterWhere,
1031 parse->onConflict->onConflictSet = (List *)
1033 (Node *) parse->onConflict->onConflictSet,
1035 parse->onConflict->onConflictWhere =
1037 parse->onConflict->onConflictWhere,
1039 /* exclRelTlist contains only Vars, so no preprocessing needed */
1040 }
1041
1042 foreach(l, parse->mergeActionList)
1043 {
1045
1046 action->targetList = (List *)
1048 (Node *) action->targetList,
1050 action->qual =
1052 (Node *) action->qual,
1054 }
1055
1056 parse->mergeJoinCondition =
1057 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1058
1059 root->append_rel_list = (List *)
1060 preprocess_expression(root, (Node *) root->append_rel_list,
1062
1063 /* Also need to preprocess expressions within RTEs */
1064 foreach(l, parse->rtable)
1065 {
1067 int kind;
1068 ListCell *lcsq;
1069
1070 if (rte->rtekind == RTE_RELATION)
1071 {
1072 if (rte->tablesample)
1073 rte->tablesample = (TableSampleClause *)
1075 (Node *) rte->tablesample,
1077 }
1078 else if (rte->rtekind == RTE_SUBQUERY)
1079 {
1080 /*
1081 * We don't want to do all preprocessing yet on the subquery's
1082 * expressions, since that will happen when we plan it. But if it
1083 * contains any join aliases of our level, those have to get
1084 * expanded now, because planning of the subquery won't do it.
1085 * That's only possible if the subquery is LATERAL.
1086 */
1087 if (rte->lateral && root->hasJoinRTEs)
1088 rte->subquery = (Query *)
1090 (Node *) rte->subquery);
1091 }
1092 else if (rte->rtekind == RTE_FUNCTION)
1093 {
1094 /* Preprocess the function expression(s) fully */
1095 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1096 rte->functions = (List *)
1097 preprocess_expression(root, (Node *) rte->functions, kind);
1098 }
1099 else if (rte->rtekind == RTE_TABLEFUNC)
1100 {
1101 /* Preprocess the function expression(s) fully */
1103 rte->tablefunc = (TableFunc *)
1104 preprocess_expression(root, (Node *) rte->tablefunc, kind);
1105 }
1106 else if (rte->rtekind == RTE_VALUES)
1107 {
1108 /* Preprocess the values lists fully */
1109 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1110 rte->values_lists = (List *)
1111 preprocess_expression(root, (Node *) rte->values_lists, kind);
1112 }
1113 else if (rte->rtekind == RTE_GROUP)
1114 {
1115 /* Preprocess the groupexprs list fully */
1116 rte->groupexprs = (List *)
1117 preprocess_expression(root, (Node *) rte->groupexprs,
1119 }
1120
1121 /*
1122 * Process each element of the securityQuals list as if it were a
1123 * separate qual expression (as indeed it is). We need to do it this
1124 * way to get proper canonicalization of AND/OR structure. Note that
1125 * this converts each element into an implicit-AND sublist.
1126 */
1127 foreach(lcsq, rte->securityQuals)
1128 {
1130 (Node *) lfirst(lcsq),
1132 }
1133 }
1134
1135 /*
1136 * Now that we are done preprocessing expressions, and in particular done
1137 * flattening join alias variables, get rid of the joinaliasvars lists.
1138 * They no longer match what expressions in the rest of the tree look
1139 * like, because we have not preprocessed expressions in those lists (and
1140 * do not want to; for example, expanding a SubLink there would result in
1141 * a useless unreferenced subplan). Leaving them in place simply creates
1142 * a hazard for later scans of the tree. We could try to prevent that by
1143 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1144 * but that doesn't sound very reliable.
1145 */
1146 if (root->hasJoinRTEs)
1147 {
1148 foreach(l, parse->rtable)
1149 {
1151
1152 rte->joinaliasvars = NIL;
1153 }
1154 }
1155
1156 /*
1157 * Replace any Vars in the subquery's targetlist and havingQual that
1158 * reference GROUP outputs with the underlying grouping expressions.
1159 *
1160 * Note that we need to perform this replacement after we've preprocessed
1161 * the grouping expressions. This is to ensure that there is only one
1162 * instance of SubPlan for each SubLink contained within the grouping
1163 * expressions.
1164 */
1165 if (parse->hasGroupRTE)
1166 {
1167 parse->targetList = (List *)
1168 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1169 parse->havingQual =
1170 flatten_group_exprs(root, root->parse, parse->havingQual);
1171 }
1172
1173 /* Constant-folding might have removed all set-returning functions */
1174 if (parse->hasTargetSRFs)
1175 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1176
1177 /*
1178 * If we have grouping sets, expand the groupingSets tree of this query to
1179 * a flat list of grouping sets. We need to do this before optimizing
1180 * HAVING, since we can't easily tell if there's an empty grouping set
1181 * until we have this representation.
1182 */
1183 if (parse->groupingSets)
1184 {
1185 parse->groupingSets =
1186 expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1187 }
1188
1189 /*
1190 * In some cases we may want to transfer a HAVING clause into WHERE. We
1191 * cannot do so if the HAVING clause contains aggregates (obviously) or
1192 * volatile functions (since a HAVING clause is supposed to be executed
1193 * only once per group). We also can't do this if there are any grouping
1194 * sets and the clause references any columns that are nullable by the
1195 * grouping sets; the nulled values of those columns are not available
1196 * before the grouping step. (The test on groupClause might seem wrong,
1197 * but it's okay: it's just an optimization to avoid running pull_varnos
1198 * when there cannot be any Vars in the HAVING clause.)
1199 *
1200 * Also, it may be that the clause is so expensive to execute that we're
1201 * better off doing it only once per group, despite the loss of
1202 * selectivity. This is hard to estimate short of doing the entire
1203 * planning process twice, so we use a heuristic: clauses containing
1204 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1205 * clause into WHERE, in hopes of eliminating tuples before aggregation
1206 * instead of after.
1207 *
1208 * If the query has no empty grouping set then we can simply move such a
1209 * clause into WHERE; any group that fails the clause will not be in the
1210 * output because none of its tuples will reach the grouping or
1211 * aggregation stage. Otherwise we have to keep the clause in HAVING to
1212 * ensure that we don't emit a bogus aggregated row. But then the HAVING
1213 * clause must be degenerate (variable-free), so we can copy it into WHERE
1214 * so that query_planner() can use it in a gating Result node. (This could
1215 * be done better, but it seems not worth optimizing.)
1216 *
1217 * Note that a HAVING clause may contain expressions that are not fully
1218 * preprocessed. This can happen if these expressions are part of
1219 * grouping items. In such cases, they are replaced with GROUP Vars in
1220 * the parser and then replaced back after we're done with expression
1221 * preprocessing on havingQual. This is not an issue if the clause
1222 * remains in HAVING, because these expressions will be matched to lower
1223 * target items in setrefs.c. However, if the clause is moved or copied
1224 * into WHERE, we need to ensure that these expressions are fully
1225 * preprocessed.
1226 *
1227 * Note that both havingQual and parse->jointree->quals are in
1228 * implicitly-ANDed-list form at this point, even though they are declared
1229 * as Node *.
1230 */
1231 newHaving = NIL;
1232 foreach(l, (List *) parse->havingQual)
1233 {
1234 Node *havingclause = (Node *) lfirst(l);
1235
1239 (parse->groupClause && parse->groupingSets &&
1240 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1241 {
1242 /* keep it in HAVING */
1244 }
1245 else if (parse->groupClause &&
1246 (parse->groupingSets == NIL ||
1247 (List *) linitial(parse->groupingSets) != NIL))
1248 {
1249 /* There is GROUP BY, but no empty grouping set */
1251
1252 /* Preprocess the HAVING clause fully */
1255 /* ... and move it to WHERE */
1256 parse->jointree->quals = (Node *)
1257 list_concat((List *) parse->jointree->quals,
1258 (List *) whereclause);
1259 }
1260 else
1261 {
1262 /* There is an empty grouping set (perhaps implicitly) */
1264
1265 /* Preprocess the HAVING clause fully */
1268 /* ... and put a copy in WHERE */
1269 parse->jointree->quals = (Node *)
1270 list_concat((List *) parse->jointree->quals,
1271 (List *) whereclause);
1272 /* ... and also keep it in HAVING */
1274 }
1275 }
1276 parse->havingQual = (Node *) newHaving;
1277
1278 /*
1279 * If we have any outer joins, try to reduce them to plain inner joins.
1280 * This step is most easily done after we've done expression
1281 * preprocessing.
1282 */
1283 if (hasOuterJoins)
1285
1286 /*
1287 * If we have any RTE_RESULT relations, see if they can be deleted from
1288 * the jointree. We also rely on this processing to flatten single-child
1289 * FromExprs underneath outer joins. This step is most effectively done
1290 * after we've done expression preprocessing and outer join reduction.
1291 */
1294
1295 /*
1296 * Do the main planning.
1297 */
1298 grouping_planner(root, tuple_fraction, setops);
1299
1300 /*
1301 * Capture the set of outer-level param IDs we have access to, for use in
1302 * extParam/allParam calculations later.
1303 */
1305
1306 /*
1307 * If any initPlans were created in this query level, adjust the surviving
1308 * Paths' costs and parallel-safety flags to account for them. The
1309 * initPlans won't actually get attached to the plan tree till
1310 * create_plan() runs, but we must include their effects now.
1311 */
1314
1315 /*
1316 * Make sure we've identified the cheapest Path for the final rel. (By
1317 * doing this here not in grouping_planner, we include initPlan costs in
1318 * the decision, though it's unlikely that will change anything.)
1319 */
1321
1322 return root;
1323}
@ ACLCHECK_NO_PRIV
Definition acl.h:184
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition aclchk.c:2654
bool contain_agg_clause(Node *clause)
Definition clauses.c:190
bool contain_subplans(Node *clause)
Definition clauses.c:339
bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
Definition execMain.c:646
char * get_rel_name(Oid relid)
Definition lsyscache.c:2078
#define IS_OUTER_JOIN(jointype)
Definition nodes.h:348
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition parse_agg.c:1947
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_JOIN
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_GROUP
@ OBJECT_VIEW
static int list_cell_number(const List *l, const ListCell *c)
Definition pg_list.h:333
#define EXPRKIND_TABLEFUNC_LATERAL
Definition planner.c:99
#define EXPRKIND_APPINFO
Definition planner.c:94
static void preprocess_rowmarks(PlannerInfo *root)
Definition planner.c:2475
#define EXPRKIND_GROUPEXPR
Definition planner.c:100
#define EXPRKIND_RTFUNC_LATERAL
Definition planner.c:90
#define EXPRKIND_VALUES_LATERAL
Definition planner.c:92
#define EXPRKIND_LIMIT
Definition planner.c:93
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:1511
#define EXPRKIND_ARBITER_ELEM
Definition planner.c:97
void preprocess_function_rtes(PlannerInfo *root)
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
void replace_empty_jointree(Query *parse)
void pull_up_subqueries(PlannerInfo *root)
Query * preprocess_relation_rtes(PlannerInfo *root)
void reduce_outer_joins(PlannerInfo *root)
void SS_process_ctes(PlannerInfo *root)
Definition subselect.c:883
void SS_identify_outer_params(PlannerInfo *root)
Definition subselect.c:2220
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition subselect.c:2284
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition var.c:972
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition var.c:114

References aclcheck_error(), ACLCHECK_NO_PRIV, Assert, assign_special_exec_param(), bms_is_member(), bms_make_singleton(), contain_agg_clause(), contain_subplans(), contain_volatile_functions(), copyObject, CurrentMemoryContext, WindowClause::endOffset, ExecCheckOneRelPerms(), expand_grouping_sets(), expression_returns_set(), EXPRKIND_APPINFO, EXPRKIND_ARBITER_ELEM, EXPRKIND_GROUPEXPR, EXPRKIND_LIMIT, EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_RTFUNC_LATERAL, EXPRKIND_TABLEFUNC, EXPRKIND_TABLEFUNC_LATERAL, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, EXPRKIND_VALUES_LATERAL, fb(), fetch_upper_rel(), flatten_group_exprs(), flatten_join_alias_vars(), flatten_simple_union_all(), get_rel_name(), getRTEPermissionInfo(), grouping_planner(), IS_OUTER_JOIN, lappend(), lfirst, lfirst_node, linitial, list_cell_number(), list_concat(), list_length(), list_make1, makeNode, Max, NIL, OBJECT_VIEW, parse(), preprocess_expression(), preprocess_function_rtes(), preprocess_qual_conditions(), preprocess_relation_rtes(), preprocess_rowmarks(), pull_up_sublinks(), pull_up_subqueries(), pull_varnos(), reduce_outer_joins(), remove_useless_result_rtes(), replace_empty_jointree(), root, rt_fetch, RTE_FUNCTION, RTE_GROUP, RTE_JOIN, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, set_cheapest(), SS_charge_for_initplans(), SS_identify_outer_params(), SS_process_ctes(), WindowClause::startOffset, transform_MERGE_to_join(), and UPPERREL_FINAL.

Referenced by make_subplan(), recurse_set_operations(), set_subquery_pathlist(), SS_process_ctes(), and standard_planner().

Variable Documentation

◆ create_upper_paths_hook

◆ cursor_tuple_fraction

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION

Definition at line 68 of file planner.c.

Referenced by standard_planner().

◆ debug_parallel_query

int debug_parallel_query = DEBUG_PARALLEL_OFF

Definition at line 69 of file planner.c.

Referenced by ProcessParallelMessage(), query_planner(), and standard_planner().

◆ enable_distinct_reordering

bool enable_distinct_reordering = true

Definition at line 71 of file planner.c.

Referenced by get_useful_pathkeys_for_distinct().

◆ parallel_leader_participation

bool parallel_leader_participation = true

Definition at line 70 of file planner.c.

Referenced by ExecGather(), ExecGatherMerge(), ExecInitGather(), and get_parallel_divisor().

◆ planner_hook

planner_hook_type planner_hook = NULL

Definition at line 74 of file planner.c.

Referenced by _PG_init(), and planner().

◆ planner_setup_hook

planner_setup_hook_type planner_setup_hook = NULL

Definition at line 77 of file planner.c.

Referenced by standard_planner().

◆ planner_shutdown_hook

planner_shutdown_hook_type planner_shutdown_hook = NULL

Definition at line 80 of file planner.c.

Referenced by standard_planner().