PostgreSQL Source Code git master
Loading...
Searching...
No Matches
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "utils/acl.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 
#define EXPRKIND_GROUPEXPR   13
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static Listget_useful_pathkeys_for_distinct (PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static void name_active_windows (List *activeWindows)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
static void create_final_unique_paths (PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
 
static void create_partial_unique_paths (PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 
RelOptInfocreate_unique_paths (PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
 
charchoose_plan_name (PlannerGlobal *glob, const char *name, bool always_number)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
bool enable_distinct_reordering = true
 
planner_hook_type planner_hook = NULL
 
planner_setup_hook_type planner_setup_hook = NULL
 
planner_shutdown_hook_type planner_shutdown_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 94 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 97 of file planner.c.

◆ EXPRKIND_GROUPEXPR

#define EXPRKIND_GROUPEXPR   13

Definition at line 100 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 93 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 95 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 87 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 89 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 90 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 98 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 99 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 96 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 88 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 91 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 92 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra 
)
static

Definition at line 7154 of file planner.c.

7160{
7161 Query *parse = root->parse;
7162 Path *cheapest_path = input_rel->cheapest_total_path;
7164 ListCell *lc;
7165 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7166 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7167 List *havingQual = (List *) extra->havingQual;
7168 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7169 double dNumGroups = 0;
7170 double dNumFinalGroups = 0;
7171
7172 /*
7173 * Estimate number of groups for non-split aggregation.
7174 */
7176 cheapest_path->rows,
7177 gd,
7178 extra->targetList);
7179
7181 {
7183 partially_grouped_rel->cheapest_total_path;
7184
7185 /*
7186 * Estimate number of groups for final phase of partial aggregation.
7187 */
7191 gd,
7192 extra->targetList);
7193 }
7194
7195 if (can_sort)
7196 {
7197 /*
7198 * Use any available suitably-sorted path as input, and also consider
7199 * sorting the cheapest-total path and incremental sort on any paths
7200 * with presorted keys.
7201 */
7202 foreach(lc, input_rel->pathlist)
7203 {
7204 ListCell *lc2;
7205 Path *path = (Path *) lfirst(lc);
7206 Path *path_save = path;
7208
7209 /* generate alternative group orderings that might be useful */
7211
7213
7214 foreach(lc2, pathkey_orderings)
7215 {
7217
7218 /* restore the path (we replace it in the loop) */
7219 path = path_save;
7220
7221 path = make_ordered_path(root,
7222 grouped_rel,
7223 path,
7225 info->pathkeys,
7226 -1.0);
7227 if (path == NULL)
7228 continue;
7229
7230 /* Now decide what to stick atop it */
7231 if (parse->groupingSets)
7232 {
7233 consider_groupingsets_paths(root, grouped_rel,
7234 path, true, can_hash,
7236 }
7237 else if (parse->hasAggs)
7238 {
7239 /*
7240 * We have aggregation, possibly with plain GROUP BY. Make
7241 * an AggPath.
7242 */
7243 add_path(grouped_rel, (Path *)
7245 grouped_rel,
7246 path,
7247 grouped_rel->reltarget,
7248 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7250 info->clauses,
7251 havingQual,
7252 agg_costs,
7253 dNumGroups));
7254 }
7255 else if (parse->groupClause)
7256 {
7257 /*
7258 * We have GROUP BY without aggregation or grouping sets.
7259 * Make a GroupPath.
7260 */
7261 add_path(grouped_rel, (Path *)
7263 grouped_rel,
7264 path,
7265 info->clauses,
7266 havingQual,
7267 dNumGroups));
7268 }
7269 else
7270 {
7271 /* Other cases should have been handled above */
7272 Assert(false);
7273 }
7274 }
7275 }
7276
7277 /*
7278 * Instead of operating directly on the input relation, we can
7279 * consider finalizing a partially aggregated path.
7280 */
7282 {
7283 foreach(lc, partially_grouped_rel->pathlist)
7284 {
7285 ListCell *lc2;
7286 Path *path = (Path *) lfirst(lc);
7287 Path *path_save = path;
7289
7290 /* generate alternative group orderings that might be useful */
7292
7294
7295 /* process all potentially interesting grouping reorderings */
7296 foreach(lc2, pathkey_orderings)
7297 {
7299
7300 /* restore the path (we replace it in the loop) */
7301 path = path_save;
7302
7303 path = make_ordered_path(root,
7304 grouped_rel,
7305 path,
7307 info->pathkeys,
7308 -1.0);
7309
7310 if (path == NULL)
7311 continue;
7312
7313 if (parse->hasAggs)
7314 add_path(grouped_rel, (Path *)
7316 grouped_rel,
7317 path,
7318 grouped_rel->reltarget,
7319 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7321 info->clauses,
7322 havingQual,
7323 agg_final_costs,
7325 else
7326 add_path(grouped_rel, (Path *)
7328 grouped_rel,
7329 path,
7330 info->clauses,
7331 havingQual,
7333
7334 }
7335 }
7336 }
7337 }
7338
7339 if (can_hash)
7340 {
7341 if (parse->groupingSets)
7342 {
7343 /*
7344 * Try for a hash-only groupingsets path over unsorted input.
7345 */
7346 consider_groupingsets_paths(root, grouped_rel,
7347 cheapest_path, false, true,
7349 }
7350 else
7351 {
7352 /*
7353 * Generate a HashAgg Path. We just need an Agg over the
7354 * cheapest-total input path, since input order won't matter.
7355 */
7356 add_path(grouped_rel, (Path *)
7357 create_agg_path(root, grouped_rel,
7359 grouped_rel->reltarget,
7360 AGG_HASHED,
7362 root->processed_groupClause,
7363 havingQual,
7364 agg_costs,
7365 dNumGroups));
7366 }
7367
7368 /*
7369 * Generate a Finalize HashAgg Path atop of the cheapest partially
7370 * grouped path, assuming there is one
7371 */
7373 {
7374 add_path(grouped_rel, (Path *)
7376 grouped_rel,
7378 grouped_rel->reltarget,
7379 AGG_HASHED,
7381 root->processed_groupClause,
7382 havingQual,
7383 agg_final_costs,
7385 }
7386 }
7387
7388 /*
7389 * When partitionwise aggregate is used, we might have fully aggregated
7390 * paths in the partial pathlist, because add_paths_to_append_rel() will
7391 * consider a path for grouped_rel consisting of a Parallel Append of
7392 * non-partial paths from each child.
7393 */
7394 if (grouped_rel->partial_pathlist != NIL)
7395 gather_grouping_paths(root, grouped_rel);
7396}
#define Assert(condition)
Definition c.h:873
void parse(int)
Definition parse.c:49
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGG_PLAIN
Definition nodes.h:364
@ AGGSPLIT_FINAL_DESERIAL
Definition nodes.h:391
@ AGGSPLIT_SIMPLE
Definition nodes.h:387
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition pathkeys.c:467
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition pathnode.c:2895
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:459
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition pathnode.c:3004
#define GROUPING_CAN_USE_HASH
Definition pathnodes.h:3616
#define GROUPING_CAN_USE_SORT
Definition pathnodes.h:3615
#define lfirst(lc)
Definition pg_list.h:172
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition planner.c:7826
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition planner.c:7767
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition planner.c:4243
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition planner.c:3737
static int fb(int x)
tree ctl root
Definition radixtree.h:1857
AggClauseCosts agg_final_costs
Definition pathnodes.h:3656
Definition pg_list.h:54
struct PathTarget * reltarget
Definition pathnodes.h:1033
List * partial_pathlist
Definition pathnodes.h:1040

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), fb(), GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::reltarget, root, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3308 of file planner.c.

3309{
3310 List *grouppathkeys = root->group_pathkeys;
3314 ListCell *lc;
3315 int i;
3316
3317 /* Shouldn't be here if there are grouping sets */
3318 Assert(root->parse->groupingSets == NIL);
3319 /* Shouldn't be here unless there are some ordered aggregates */
3320 Assert(root->numOrderedAggs > 0);
3321
3322 /* Do nothing if disabled */
3324 return;
3325
3326 /*
3327 * Make a first pass over all AggInfos to collect a Bitmapset containing
3328 * the indexes of all AggInfos to be processed below.
3329 */
3331 foreach(lc, root->agginfos)
3332 {
3334 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3335
3336 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3337 continue;
3338
3339 /* Skip unless there's a DISTINCT or ORDER BY clause */
3340 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3341 continue;
3342
3343 /* Additional safety checks are needed if there's a FILTER clause */
3344 if (aggref->aggfilter != NULL)
3345 {
3346 ListCell *lc2;
3347 bool allow_presort = true;
3348
3349 /*
3350 * When the Aggref has a FILTER clause, it's possible that the
3351 * filter removes rows that cannot be sorted because the
3352 * expression to sort by results in an error during its
3353 * evaluation. This is a problem for presorting as that happens
3354 * before the FILTER, whereas without presorting, the Aggregate
3355 * node will apply the FILTER *before* sorting. So that we never
3356 * try to sort anything that might error, here we aim to skip over
3357 * any Aggrefs with arguments with expressions which, when
3358 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3359 * may be more cases that should be allowed, but more thought
3360 * needs to be given. Err on the side of caution.
3361 */
3362 foreach(lc2, aggref->args)
3363 {
3365 Expr *expr = tle->expr;
3366
3367 while (IsA(expr, RelabelType))
3368 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3369
3370 /* Common case, Vars and Consts are ok */
3371 if (IsA(expr, Var) || IsA(expr, Const))
3372 continue;
3373
3374 /* Unsupported. Don't try to presort for this Aggref */
3375 allow_presort = false;
3376 break;
3377 }
3378
3379 /* Skip unsupported Aggrefs */
3380 if (!allow_presort)
3381 continue;
3382 }
3383
3386 }
3387
3388 /*
3389 * Now process all the unprocessed_aggs to find the best set of pathkeys
3390 * for the given set of aggregates.
3391 *
3392 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3393 * this during the first loop using the pathkeys for the very first
3394 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3395 * a more strict set of compatible pathkeys. Once the outer loop is
3396 * complete, we mark off all the aggregates with compatible pathkeys then
3397 * remove those from the unprocessed_aggs and repeat the process to try to
3398 * find another set of pathkeys that are suitable for a larger number of
3399 * aggregates. The outer loop will stop when there are not enough
3400 * unprocessed aggregates for it to be possible to find a set of pathkeys
3401 * to suit a larger number of aggregates.
3402 */
3403 bestpathkeys = NIL;
3404 bestaggs = NULL;
3406 {
3409
3410 i = -1;
3411 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3412 {
3413 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3414 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3415 List *sortlist;
3416 List *pathkeys;
3417
3418 if (aggref->aggdistinct != NIL)
3419 sortlist = aggref->aggdistinct;
3420 else
3421 sortlist = aggref->aggorder;
3422
3424 aggref->args);
3425
3426 /*
3427 * Ignore Aggrefs which have volatile functions in their ORDER BY
3428 * or DISTINCT clause.
3429 */
3430 if (has_volatile_pathkey(pathkeys))
3431 {
3433 continue;
3434 }
3435
3436 /*
3437 * When not set yet, take the pathkeys from the first unprocessed
3438 * aggregate.
3439 */
3440 if (currpathkeys == NIL)
3441 {
3442 currpathkeys = pathkeys;
3443
3444 /* include the GROUP BY pathkeys, if they exist */
3445 if (grouppathkeys != NIL)
3447 currpathkeys);
3448
3449 /* record that we found pathkeys for this aggregate */
3451 }
3452 else
3453 {
3454 /* now look for a stronger set of matching pathkeys */
3455
3456 /* include the GROUP BY pathkeys, if they exist */
3457 if (grouppathkeys != NIL)
3459 pathkeys);
3460
3461 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3462 switch (compare_pathkeys(currpathkeys, pathkeys))
3463 {
3464 case PATHKEYS_BETTER2:
3465 /* 'pathkeys' are stronger, use these ones instead */
3466 currpathkeys = pathkeys;
3467 /* FALLTHROUGH */
3468
3469 case PATHKEYS_BETTER1:
3470 /* 'pathkeys' are less strict */
3471 /* FALLTHROUGH */
3472
3473 case PATHKEYS_EQUAL:
3474 /* mark this aggregate as covered by 'currpathkeys' */
3476 break;
3477
3478 case PATHKEYS_DIFFERENT:
3479 break;
3480 }
3481 }
3482 }
3483
3484 /* remove the aggregates that we've just processed */
3486
3487 /*
3488 * If this pass included more aggregates than the previous best then
3489 * use these ones as the best set.
3490 */
3492 {
3495 }
3496 }
3497
3498 /*
3499 * If we found any ordered aggregates, update root->group_pathkeys to add
3500 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3501 * the original GROUP BY pathkeys already.
3502 */
3503 if (bestpathkeys != NIL)
3504 root->group_pathkeys = bestpathkeys;
3505
3506 /*
3507 * Now that we've found the best set of aggregates we can set the
3508 * presorted flag to indicate to the executor that it needn't bother
3509 * performing a sort for these Aggrefs. We're able to do this now as
3510 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3511 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3512 * of ordered aggregates.
3513 */
3514 i = -1;
3515 while ((i = bms_next_member(bestaggs, i)) >= 0)
3516 {
3517 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3518
3519 foreach(lc, agginfo->aggrefs)
3520 {
3521 Aggref *aggref = lfirst_node(Aggref, lc);
3522
3523 aggref->aggpresorted = true;
3524 }
3525 }
3526}
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1305
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:1160
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition bitmapset.c:867
int bms_num_members(const Bitmapset *a)
Definition bitmapset.c:750
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:814
bool enable_presorted_aggregate
Definition costsize.c:164
int i
Definition isn.c:77
List * list_copy(const List *oldlist)
Definition list.c:1573
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define castNode(_type_, nodeptr)
Definition nodes.h:182
List * append_pathkeys(List *target, List *source)
Definition pathkeys.c:107
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition pathkeys.c:1336
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition pathkeys.c:304
@ PATHKEYS_BETTER2
Definition paths.h:222
@ PATHKEYS_BETTER1
Definition paths.h:221
@ PATHKEYS_DIFFERENT
Definition paths.h:223
@ PATHKEYS_EQUAL
Definition paths.h:220
void * arg
#define lfirst_node(type, lc)
Definition pg_list.h:176
#define linitial_node(type, l)
Definition pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition pg_list.h:403
#define list_nth_node(type, list, n)
Definition pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition planner.c:3263
List * aggdistinct
Definition primnodes.h:493
List * args
Definition primnodes.h:487
Expr * aggfilter
Definition primnodes.h:496
List * aggorder
Definition primnodes.h:490

References Aggref::aggdistinct, Aggref::aggfilter, Aggref::aggorder, append_pathkeys(), arg, Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), castNode, compare_pathkeys(), enable_presorted_aggregate, fb(), foreach_current_index, has_volatile_pathkey(), i, IsA, lfirst, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6703 of file planner.c.

6705{
6706 ListCell *lc;
6707
6710
6711 /* If no SRFs appear at this plan level, nothing to do */
6712 if (list_length(targets) == 1)
6713 return;
6714
6715 /*
6716 * Stack SRF-evaluation nodes atop each path for the rel.
6717 *
6718 * In principle we should re-run set_cheapest() here to identify the
6719 * cheapest path, but it seems unlikely that adding the same tlist eval
6720 * costs to all the paths would change that, so we don't bother. Instead,
6721 * just assume that the cheapest-startup and cheapest-total paths remain
6722 * so. (There should be no parameterized paths anymore, so we needn't
6723 * worry about updating cheapest_parameterized_paths.)
6724 */
6725 foreach(lc, rel->pathlist)
6726 {
6727 Path *subpath = (Path *) lfirst(lc);
6728 Path *newpath = subpath;
6729 ListCell *lc1,
6730 *lc2;
6731
6732 Assert(subpath->param_info == NULL);
6734 {
6736 bool contains_srfs = (bool) lfirst_int(lc2);
6737
6738 /* If this level doesn't contain SRFs, do regular projection */
6739 if (contains_srfs)
6741 rel,
6742 newpath,
6743 thistarget);
6744 else
6746 rel,
6747 newpath,
6748 thistarget);
6749 }
6750 lfirst(lc) = newpath;
6751 if (subpath == rel->cheapest_startup_path)
6753 if (subpath == rel->cheapest_total_path)
6755 }
6756
6757 /* Likewise for partial paths, if any */
6758 foreach(lc, rel->partial_pathlist)
6759 {
6760 Path *subpath = (Path *) lfirst(lc);
6761 Path *newpath = subpath;
6762 ListCell *lc1,
6763 *lc2;
6764
6765 Assert(subpath->param_info == NULL);
6767 {
6769 bool contains_srfs = (bool) lfirst_int(lc2);
6770
6771 /* If this level doesn't contain SRFs, do regular projection */
6772 if (contains_srfs)
6774 rel,
6775 newpath,
6776 thistarget);
6777 else
6778 {
6779 /* avoid apply_projection_to_path, in case of multiple refs */
6781 rel,
6782 newpath,
6783 thistarget);
6784 }
6785 }
6786 lfirst(lc) = newpath;
6787 }
6788}
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2732
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2534
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition pathnode.c:2643
#define forboth(cell1, list1, cell2, list2)
Definition pg_list.h:518
#define lfirst_int(lc)
Definition pg_list.h:173
#define linitial_int(l)
Definition pg_list.h:179
List * pathlist
Definition pathnodes.h:1038
struct Path * cheapest_startup_path
Definition pathnodes.h:1041
struct Path * cheapest_total_path
Definition pathnodes.h:1042

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), fb(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7951 of file planner.c.

7957{
7960 ListCell *lc;
7961
7962 /* This recurses, so be paranoid. */
7964
7965 /*
7966 * If the rel only has Append and MergeAppend paths, we want to drop its
7967 * existing paths and generate new ones. This function would still be
7968 * correct if we kept the existing paths: we'd modify them to generate the
7969 * correct target above the partitioning Append, and then they'd compete
7970 * on cost with paths generating the target below the Append. However, in
7971 * our current cost model the latter way is always the same or cheaper
7972 * cost, so modifying the existing paths would just be useless work.
7973 * Moreover, when the cost is the same, varying roundoff errors might
7974 * sometimes allow an existing path to be picked, resulting in undesirable
7975 * cross-platform plan variations. So we drop old paths and thereby force
7976 * the work to be done below the Append.
7977 *
7978 * However, there are several cases when this optimization is not safe. If
7979 * the rel isn't partitioned, then none of the paths will be Append or
7980 * MergeAppend paths, so we should definitely not do this. If it is
7981 * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7982 * but it can also have join paths that we can't afford to discard.
7983 *
7984 * Some care is needed, because we have to allow
7985 * generate_useful_gather_paths to see the old partial paths in the next
7986 * stanza. Hence, zap the main pathlist here, then allow
7987 * generate_useful_gather_paths to add path(s) to the main list, and
7988 * finally zap the partial pathlist.
7989 */
7991 rel->pathlist = NIL;
7992
7993 /*
7994 * If the scan/join target is not parallel-safe, partial paths cannot
7995 * generate it.
7996 */
7998 {
7999 /*
8000 * Since we can't generate the final scan/join target in parallel
8001 * workers, this is our last opportunity to use any partial paths that
8002 * exist; so build Gather path(s) that use them and emit whatever the
8003 * current reltarget is. We don't do this in the case where the
8004 * target is parallel-safe, since we will be able to generate superior
8005 * paths by doing it after the final scan/join target has been
8006 * applied.
8007 */
8009
8010 /* Can't use parallel query above this level. */
8011 rel->partial_pathlist = NIL;
8012 rel->consider_parallel = false;
8013 }
8014
8015 /* Finish dropping old paths for a partitioned rel, per comment above */
8017 rel->partial_pathlist = NIL;
8018
8019 /* Extract SRF-free scan/join target. */
8021
8022 /*
8023 * Apply the SRF-free scan/join target to each existing path.
8024 *
8025 * If the tlist exprs are the same, we can just inject the sortgroupref
8026 * information into the existing pathtargets. Otherwise, replace each
8027 * path with a projection path that generates the SRF-free scan/join
8028 * target. This can't change the ordering of paths within rel->pathlist,
8029 * so we just modify the list in place.
8030 */
8031 foreach(lc, rel->pathlist)
8032 {
8033 Path *subpath = (Path *) lfirst(lc);
8034
8035 /* Shouldn't have any parameterized paths anymore */
8036 Assert(subpath->param_info == NULL);
8037
8038 if (tlist_same_exprs)
8039 subpath->pathtarget->sortgrouprefs =
8040 scanjoin_target->sortgrouprefs;
8041 else
8042 {
8043 Path *newpath;
8044
8047 lfirst(lc) = newpath;
8048 }
8049 }
8050
8051 /* Likewise adjust the targets for any partial paths. */
8052 foreach(lc, rel->partial_pathlist)
8053 {
8054 Path *subpath = (Path *) lfirst(lc);
8055
8056 /* Shouldn't have any parameterized paths anymore */
8057 Assert(subpath->param_info == NULL);
8058
8059 if (tlist_same_exprs)
8060 subpath->pathtarget->sortgrouprefs =
8061 scanjoin_target->sortgrouprefs;
8062 else
8063 {
8064 Path *newpath;
8065
8068 lfirst(lc) = newpath;
8069 }
8070 }
8071
8072 /*
8073 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8074 * atop each existing path. (Note that this function doesn't look at the
8075 * cheapest-path fields, which is a good thing because they're bogus right
8076 * now.)
8077 */
8078 if (root->parse->hasTargetSRFs)
8082
8083 /*
8084 * Update the rel's target to be the final (with SRFs) scan/join target.
8085 * This now matches the actual output of all the paths, and we might get
8086 * confused in createplan.c if they don't agree. We must do this now so
8087 * that any append paths made in the next part will use the correct
8088 * pathtarget (cf. create_append_path).
8089 *
8090 * Note that this is also necessary if GetForeignUpperPaths() gets called
8091 * on the final scan/join relation or on any of its children, since the
8092 * FDW might look at the rel's target to create ForeignPaths.
8093 */
8095
8096 /*
8097 * If the relation is partitioned, recursively apply the scan/join target
8098 * to all partitions, and generate brand-new Append paths in which the
8099 * scan/join target is computed below the Append rather than above it.
8100 * Since Append is not projection-capable, that might save a separate
8101 * Result node, and it also is important for partitionwise aggregate.
8102 */
8104 {
8106 int i;
8107
8108 /* Adjust each partition. */
8109 i = -1;
8110 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8111 {
8112 RelOptInfo *child_rel = rel->part_rels[i];
8113 AppendRelInfo **appinfos;
8114 int nappinfos;
8116
8117 Assert(child_rel != NULL);
8118
8119 /* Dummy children can be ignored. */
8121 continue;
8122
8123 /* Translate scan/join targets for this child. */
8124 appinfos = find_appinfos_by_relids(root, child_rel->relids,
8125 &nappinfos);
8126 foreach(lc, scanjoin_targets)
8127 {
8128 PathTarget *target = lfirst_node(PathTarget, lc);
8129
8130 target = copy_pathtarget(target);
8131 target->exprs = (List *)
8133 (Node *) target->exprs,
8134 nappinfos, appinfos);
8136 target);
8137 }
8138 pfree(appinfos);
8139
8140 /* Recursion does the real work. */
8146
8147 /* Save non-dummy children for Append paths. */
8148 if (!IS_DUMMY_REL(child_rel))
8150 }
8151
8152 /* Build new paths for this relation by appending child paths. */
8154 }
8155
8156 /*
8157 * Consider generating Gather or Gather Merge paths. We must only do this
8158 * if the relation is parallel safe, and we don't do it for child rels to
8159 * avoid creating multiple Gather nodes within the same plan. We must do
8160 * this after all paths have been generated and before set_cheapest, since
8161 * one of the generated paths may turn out to be the cheapest one.
8162 */
8163 if (rel->consider_parallel && !IS_OTHER_REL(rel))
8165
8166 /*
8167 * Reassess which paths are the cheapest, now that we've potentially added
8168 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8169 * this relation.
8170 */
8171 set_cheapest(rel);
8172}
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition allpaths.c:3378
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition allpaths.c:1406
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition appendinfo.c:804
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition appendinfo.c:200
List * lappend(List *list, void *datum)
Definition list.c:339
void pfree(void *pointer)
Definition mcxt.c:1616
void set_cheapest(RelOptInfo *parent_rel)
Definition pathnode.c:268
#define IS_SIMPLE_REL(rel)
Definition pathnodes.h:977
#define IS_DUMMY_REL(r)
Definition pathnodes.h:2285
#define IS_PARTITIONED_REL(rel)
Definition pathnodes.h:1219
#define IS_OTHER_REL(rel)
Definition pathnodes.h:992
#define llast_node(type, l)
Definition pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition planner.c:7951
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition planner.c:6703
void check_stack_depth(void)
Definition stack_depth.c:95
Definition nodes.h:135
List * exprs
Definition pathnodes.h:1864
bool consider_parallel
Definition pathnodes.h:1025
Bitmapset * live_parts
Definition pathnodes.h:1192
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition tlist.c:227
PathTarget * copy_pathtarget(PathTarget *src)
Definition tlist.c:666

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, fb(), find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, IS_SIMPLE_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7909 of file planner.c.

7910{
7911 Query *parse = root->parse;
7912
7913 if (!parse->hasAggs && parse->groupClause == NIL)
7914 {
7915 /*
7916 * We don't know how to do parallel aggregation unless we have either
7917 * some aggregates or a grouping clause.
7918 */
7919 return false;
7920 }
7921 else if (parse->groupingSets)
7922 {
7923 /* We don't know how to do grouping sets in parallel. */
7924 return false;
7925 }
7926 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7927 {
7928 /* Insufficient support for partial mode. */
7929 return false;
7930 }
7931
7932 /* Everything looks good. */
7933 return true;
7934}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ choose_plan_name()

char * choose_plan_name ( PlannerGlobal glob,
const char name,
bool  always_number 
)

Definition at line 9024 of file planner.c.

9025{
9026 unsigned n;
9027
9028 /*
9029 * If a numeric suffix is not required, then search the list of
9030 * previously-assigned names for a match. If none is found, then we can
9031 * use the provided name without modification.
9032 */
9033 if (!always_number)
9034 {
9035 bool found = false;
9036
9037 foreach_ptr(char, subplan_name, glob->subplanNames)
9038 {
9039 if (strcmp(subplan_name, name) == 0)
9040 {
9041 found = true;
9042 break;
9043 }
9044 }
9045
9046 if (!found)
9047 {
9048 /* pstrdup here is just to avoid cast-away-const */
9049 char *chosen_name = pstrdup(name);
9050
9051 glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9052 return chosen_name;
9053 }
9054 }
9055
9056 /*
9057 * If a numeric suffix is required or if the un-suffixed name is already
9058 * in use, then loop until we find a positive integer that produces a
9059 * novel name.
9060 */
9061 for (n = 1; true; ++n)
9062 {
9063 char *proposed_name = psprintf("%s_%u", name, n);
9064 bool found = false;
9065
9066 foreach_ptr(char, subplan_name, glob->subplanNames)
9067 {
9069 {
9070 found = true;
9071 break;
9072 }
9073 }
9074
9075 if (!found)
9076 {
9077 glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9078 return proposed_name;
9079 }
9080
9082 }
9083}
char * pstrdup(const char *in)
Definition mcxt.c:1781
#define foreach_ptr(type, var, lst)
Definition pg_list.h:469
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
const char * name

References fb(), foreach_ptr, lappend(), name, pfree(), psprintf(), and pstrdup().

Referenced by build_minmax_path(), make_subplan(), recurse_set_operations(), set_subquery_pathlist(), and SS_process_ctes().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void a,
const void b 
)
static

Definition at line 6164 of file planner.c.

6165{
6166 const WindowClauseSortData *wcsa = a;
6167 const WindowClauseSortData *wcsb = b;
6170
6171 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6172 {
6175
6176 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6177 return -1;
6178 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6179 return 1;
6180 else if (sca->sortop > scb->sortop)
6181 return -1;
6182 else if (sca->sortop < scb->sortop)
6183 return 1;
6184 else if (sca->nulls_first && !scb->nulls_first)
6185 return -1;
6186 else if (!sca->nulls_first && scb->nulls_first)
6187 return 1;
6188 /* no need to compare eqop, since it is fully determined by sortop */
6189 }
6190
6191 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6192 return -1;
6193 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6194 return 1;
6195
6196 return 0;
6197}
int b
Definition isn.c:74
int a
Definition isn.c:73

References a, b, fb(), forboth, lfirst_node, and list_length().

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4243 of file planner.c.

4251{
4252 Query *parse = root->parse;
4253 Size hash_mem_limit = get_hash_memory_limit();
4254
4255 /*
4256 * If we're not being offered sorted input, then only consider plans that
4257 * can be done entirely by hashing.
4258 *
4259 * We can hash everything if it looks like it'll fit in hash_mem. But if
4260 * the input is actually sorted despite not being advertised as such, we
4261 * prefer to make use of that in order to use less memory.
4262 *
4263 * If none of the grouping sets are sortable, then ignore the hash_mem
4264 * limit and generate a path anyway, since otherwise we'll just fail.
4265 */
4266 if (!is_sorted)
4267 {
4268 List *new_rollups = NIL;
4270 List *sets_data;
4272 List *empty_sets = NIL;
4273 ListCell *lc;
4274 ListCell *l_start = list_head(gd->rollups);
4276 double hashsize;
4277 double exclude_groups = 0.0;
4278
4280
4281 /*
4282 * If the input is coincidentally sorted usefully (which can happen
4283 * even if is_sorted is false, since that only means that our caller
4284 * has set up the sorting for us), then save some hashtable space by
4285 * making use of that. But we need to watch out for degenerate cases:
4286 *
4287 * 1) If there are any empty grouping sets, then group_pathkeys might
4288 * be NIL if all non-empty grouping sets are unsortable. In this case,
4289 * there will be a rollup containing only empty groups, and the
4290 * pathkeys_contained_in test is vacuously true; this is ok.
4291 *
4292 * XXX: the above relies on the fact that group_pathkeys is generated
4293 * from the first rollup. If we add the ability to consider multiple
4294 * sort orders for grouping input, this assumption might fail.
4295 *
4296 * 2) If there are no empty sets and only unsortable sets, then the
4297 * rollups list will be empty (and thus l_start == NULL), and
4298 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4299 * pathkeys_contained_in test doesn't cause us to crash.
4300 */
4301 if (l_start != NULL &&
4302 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4303 {
4305 exclude_groups = unhashed_rollup->numGroups;
4306 l_start = lnext(gd->rollups, l_start);
4307 }
4308
4310 path,
4311 agg_costs,
4313
4314 /*
4315 * gd->rollups is empty if we have only unsortable columns to work
4316 * with. Override hash_mem in that case; otherwise, we'll rely on the
4317 * sorted-input case to generate usable mixed paths.
4318 */
4319 if (hashsize > hash_mem_limit && gd->rollups)
4320 return; /* nope, won't fit */
4321
4322 /*
4323 * We need to burst the existing rollups list into individual grouping
4324 * sets and recompute a groupClause for each set.
4325 */
4326 sets_data = list_copy(gd->unsortable_sets);
4327
4328 for_each_cell(lc, gd->rollups, l_start)
4329 {
4331
4332 /*
4333 * If we find an unhashable rollup that's not been skipped by the
4334 * "actually sorted" check above, we can't cope; we'd need sorted
4335 * input (with a different sort order) but we can't get that here.
4336 * So bail out; we'll get a valid path from the is_sorted case
4337 * instead.
4338 *
4339 * The mere presence of empty grouping sets doesn't make a rollup
4340 * unhashable (see preprocess_grouping_sets), we handle those
4341 * specially below.
4342 */
4343 if (!rollup->hashable)
4344 return;
4345
4346 sets_data = list_concat(sets_data, rollup->gsets_data);
4347 }
4348 foreach(lc, sets_data)
4349 {
4351 List *gset = gs->set;
4353
4354 if (gset == NIL)
4355 {
4356 /* Empty grouping sets can't be hashed. */
4359 }
4360 else
4361 {
4363
4364 rollup->groupClause = preprocess_groupclause(root, gset);
4365 rollup->gsets_data = list_make1(gs);
4366 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4367 rollup->gsets_data,
4368 gd->tleref_to_colnum_map);
4369 rollup->numGroups = gs->numGroups;
4370 rollup->hashable = true;
4371 rollup->is_hashed = true;
4373 }
4374 }
4375
4376 /*
4377 * If we didn't find anything nonempty to hash, then bail. We'll
4378 * generate a path from the is_sorted case.
4379 */
4380 if (new_rollups == NIL)
4381 return;
4382
4383 /*
4384 * If there were empty grouping sets they should have been in the
4385 * first rollup.
4386 */
4388
4389 if (unhashed_rollup)
4390 {
4392 strat = AGG_MIXED;
4393 }
4394 else if (empty_sets)
4395 {
4397
4398 rollup->groupClause = NIL;
4399 rollup->gsets_data = empty_sets_data;
4400 rollup->gsets = empty_sets;
4401 rollup->numGroups = list_length(empty_sets);
4402 rollup->hashable = false;
4403 rollup->is_hashed = false;
4405 strat = AGG_MIXED;
4406 }
4407
4408 add_path(grouped_rel, (Path *)
4410 grouped_rel,
4411 path,
4412 (List *) parse->havingQual,
4413 strat,
4415 agg_costs));
4416 return;
4417 }
4418
4419 /*
4420 * If we have sorted input but nothing we can do with it, bail.
4421 */
4422 if (gd->rollups == NIL)
4423 return;
4424
4425 /*
4426 * Given sorted input, we try and make two paths: one sorted and one mixed
4427 * sort/hash. (We need to try both because hashagg might be disabled, or
4428 * some columns might not be sortable.)
4429 *
4430 * can_hash is passed in as false if some obstacle elsewhere (such as
4431 * ordered aggs) means that we shouldn't consider hashing at all.
4432 */
4433 if (can_hash && gd->any_hashable)
4434 {
4435 List *rollups = NIL;
4436 List *hash_sets = list_copy(gd->unsortable_sets);
4437 double availspace = hash_mem_limit;
4438 ListCell *lc;
4439
4440 /*
4441 * Account first for space needed for groups we can't sort at all.
4442 */
4444 path,
4445 agg_costs,
4446 gd->dNumHashGroups);
4447
4448 if (availspace > 0 && list_length(gd->rollups) > 1)
4449 {
4450 double scale;
4451 int num_rollups = list_length(gd->rollups);
4452 int k_capacity;
4453 int *k_weights = palloc(num_rollups * sizeof(int));
4455 int i;
4456
4457 /*
4458 * We treat this as a knapsack problem: the knapsack capacity
4459 * represents hash_mem, the item weights are the estimated memory
4460 * usage of the hashtables needed to implement a single rollup,
4461 * and we really ought to use the cost saving as the item value;
4462 * however, currently the costs assigned to sort nodes don't
4463 * reflect the comparison costs well, and so we treat all items as
4464 * of equal value (each rollup we hash instead saves us one sort).
4465 *
4466 * To use the discrete knapsack, we need to scale the values to a
4467 * reasonably small bounded range. We choose to allow a 5% error
4468 * margin; we have no more than 4096 rollups in the worst possible
4469 * case, which with a 5% error margin will require a bit over 42MB
4470 * of workspace. (Anyone wanting to plan queries that complex had
4471 * better have the memory for it. In more reasonable cases, with
4472 * no more than a couple of dozen rollups, the memory usage will
4473 * be negligible.)
4474 *
4475 * k_capacity is naturally bounded, but we clamp the values for
4476 * scale and weight (below) to avoid overflows or underflows (or
4477 * uselessly trying to use a scale factor less than 1 byte).
4478 */
4479 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4481
4482 /*
4483 * We leave the first rollup out of consideration since it's the
4484 * one that matches the input sort order. We assign indexes "i"
4485 * to only those entries considered for hashing; the second loop,
4486 * below, must use the same condition.
4487 */
4488 i = 0;
4489 for_each_from(lc, gd->rollups, 1)
4490 {
4492
4493 if (rollup->hashable)
4494 {
4496 path,
4497 agg_costs,
4498 rollup->numGroups);
4499
4500 /*
4501 * If sz is enormous, but hash_mem (and hence scale) is
4502 * small, avoid integer overflow here.
4503 */
4504 k_weights[i] = (int) Min(floor(sz / scale),
4505 k_capacity + 1.0);
4506 ++i;
4507 }
4508 }
4509
4510 /*
4511 * Apply knapsack algorithm; compute the set of items which
4512 * maximizes the value stored (in this case the number of sorts
4513 * saved) while keeping the total size (approximately) within
4514 * capacity.
4515 */
4516 if (i > 0)
4518
4520 {
4521 rollups = list_make1(linitial(gd->rollups));
4522
4523 i = 0;
4524 for_each_from(lc, gd->rollups, 1)
4525 {
4527
4528 if (rollup->hashable)
4529 {
4532 rollup->gsets_data);
4533 else
4534 rollups = lappend(rollups, rollup);
4535 ++i;
4536 }
4537 else
4538 rollups = lappend(rollups, rollup);
4539 }
4540 }
4541 }
4542
4543 if (!rollups && hash_sets)
4544 rollups = list_copy(gd->rollups);
4545
4546 foreach(lc, hash_sets)
4547 {
4550
4551 Assert(gs->set != NIL);
4552
4553 rollup->groupClause = preprocess_groupclause(root, gs->set);
4554 rollup->gsets_data = list_make1(gs);
4555 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4556 rollup->gsets_data,
4557 gd->tleref_to_colnum_map);
4558 rollup->numGroups = gs->numGroups;
4559 rollup->hashable = true;
4560 rollup->is_hashed = true;
4561 rollups = lcons(rollup, rollups);
4562 }
4563
4564 if (rollups)
4565 {
4566 add_path(grouped_rel, (Path *)
4568 grouped_rel,
4569 path,
4570 (List *) parse->havingQual,
4571 AGG_MIXED,
4572 rollups,
4573 agg_costs));
4574 }
4575 }
4576
4577 /*
4578 * Now try the simple sorted case.
4579 */
4580 if (!gd->unsortable_sets)
4581 add_path(grouped_rel, (Path *)
4583 grouped_rel,
4584 path,
4585 (List *) parse->havingQual,
4586 AGG_SORTED,
4587 gd->rollups,
4588 agg_costs));
4589}
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
#define bms_is_empty(a)
Definition bitmapset.h:118
#define Min(x, y)
Definition c.h:997
#define Max(x, y)
Definition c.h:991
size_t Size
Definition c.h:619
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition knapsack.c:51
List * list_concat(List *list1, const List *list2)
Definition list.c:561
List * lcons(void *datum, List *list)
Definition list.c:495
void * palloc(Size size)
Definition mcxt.c:1387
size_t get_hash_memory_limit(void)
Definition nodeHash.c:3621
AggStrategy
Definition nodes.h:363
@ AGG_MIXED
Definition nodes.h:367
#define makeNode(_type_)
Definition nodes.h:161
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition pathnode.c:3086
#define list_make1(x1)
Definition pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition pg_list.h:438
#define for_each_from(cell, lst, N)
Definition pg_list.h:414
#define linitial(l)
Definition pg_list.h:178
static ListCell * list_head(const List *l)
Definition pg_list.h:128
static ListCell * lnext(const List *l, const ListCell *c)
Definition pg_list.h:343
static int scale
Definition pgbench.c:182
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition planner.c:2907
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition planner.c:2441
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition selfuncs.c:4521
List * pathkeys
Definition pathnodes.h:1997

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), estimate_hashagg_tablesize(), fb(), for_each_cell, for_each_from, get_hash_memory_limit(), i, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), root, and scale.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 4049 of file planner.c.

4051{
4052 Query *parse = root->parse;
4053 int nrows;
4054 Path *path;
4055
4056 nrows = list_length(parse->groupingSets);
4057 if (nrows > 1)
4058 {
4059 /*
4060 * Doesn't seem worthwhile writing code to cons up a generate_series
4061 * or a values scan to emit multiple rows. Instead just make N clones
4062 * and append them. (With a volatile HAVING clause, this means you
4063 * might get between 0 and N output rows. Offhand I think that's
4064 * desired.)
4065 */
4066 AppendPathInput append = {0};
4067
4068 while (--nrows >= 0)
4069 {
4070 path = (Path *)
4071 create_group_result_path(root, grouped_rel,
4072 grouped_rel->reltarget,
4073 (List *) parse->havingQual);
4074 append.subpaths = lappend(append.subpaths, path);
4075 }
4076 path = (Path *)
4078 grouped_rel,
4079 append,
4080 NIL,
4081 NULL,
4082 0,
4083 false,
4084 -1);
4085 }
4086 else
4087 {
4088 /* No grouping sets, or just one, so one output row */
4089 path = (Path *)
4090 create_group_result_path(root, grouped_rel,
4091 grouped_rel->reltarget,
4092 (List *) parse->havingQual);
4093 }
4094
4095 add_path(grouped_rel, path);
4096}
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, AppendPathInput input, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition pathnode.c:1299
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition pathnode.c:1611

References add_path(), create_append_path(), create_group_result_path(), fb(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4862 of file planner.c.

4864{
4866
4867 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4869
4870 /*
4871 * We don't compute anything at this level, so distinct_rel will be
4872 * parallel-safe if the input rel is parallel-safe. In particular, if
4873 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4874 * output those expressions, and will not be parallel-safe unless those
4875 * expressions are parallel-safe.
4876 */
4877 distinct_rel->consider_parallel = input_rel->consider_parallel;
4878
4879 /*
4880 * If the input rel belongs to a single FDW, so does the distinct_rel.
4881 */
4882 distinct_rel->serverid = input_rel->serverid;
4883 distinct_rel->userid = input_rel->userid;
4884 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4885 distinct_rel->fdwroutine = input_rel->fdwroutine;
4886
4887 /* build distinct paths based on input_rel's pathlist */
4889
4890 /* now build distinct paths based on input_rel's partial_pathlist */
4892
4893 /* Give a helpful error if we failed to create any paths */
4894 if (distinct_rel->pathlist == NIL)
4895 ereport(ERROR,
4897 errmsg("could not implement DISTINCT"),
4898 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4899
4900 /*
4901 * If there is an FDW that's responsible for all baserels of the query,
4902 * let it consider adding ForeignPaths.
4903 */
4904 if (distinct_rel->fdwroutine &&
4905 distinct_rel->fdwroutine->GetForeignUpperPaths)
4906 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4908 input_rel,
4910 NULL);
4911
4912 /* Let extensions possibly add some more paths */
4914 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4916
4917 /* Now choose the best path(s) */
4919
4920 return distinct_rel;
4921}
int errdetail(const char *fmt,...)
Definition elog.c:1217
int errcode(int sqlerrcode)
Definition elog.c:864
int errmsg(const char *fmt,...)
Definition elog.c:1081
#define ERROR
Definition elog.h:39
#define ereport(elevel,...)
Definition elog.h:150
@ UPPERREL_DISTINCT
Definition pathnodes.h:150
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition planner.c:5115
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition planner.c:4932
create_upper_paths_hook_type create_upper_paths_hook
Definition planner.c:83
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1606

References create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), fetch_upper_rel(), NIL, root, set_cheapest(), and UPPERREL_DISTINCT.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 5115 of file planner.c.

5117{
5118 Query *parse = root->parse;
5119 Path *cheapest_input_path = input_rel->cheapest_total_path;
5120 double numDistinctRows;
5121 bool allow_hash;
5122
5123 /* Estimate number of distinct rows there will be */
5124 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5125 root->hasHavingQual)
5126 {
5127 /*
5128 * If there was grouping or aggregation, use the number of input rows
5129 * as the estimated number of DISTINCT rows (ie, assume the input is
5130 * already mostly unique).
5131 */
5133 }
5134 else
5135 {
5136 /*
5137 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5138 */
5140
5141 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5142 parse->targetList);
5144 cheapest_input_path->rows,
5145 NULL, NULL);
5146 }
5147
5148 /*
5149 * Consider sort-based implementations of DISTINCT, if possible.
5150 */
5151 if (grouping_is_sortable(root->processed_distinctClause))
5152 {
5153 /*
5154 * Firstly, if we have any adequately-presorted paths, just stick a
5155 * Unique node on those. We also, consider doing an explicit sort of
5156 * the cheapest input path and Unique'ing that. If any paths have
5157 * presorted keys then we'll create an incremental sort atop of those
5158 * before adding a unique node on the top. We'll also attempt to
5159 * reorder the required pathkeys to match the input path's pathkeys as
5160 * much as possible, in hopes of avoiding a possible need to re-sort.
5161 *
5162 * When we have DISTINCT ON, we must sort by the more rigorous of
5163 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5164 * Also, if we do have to do an explicit sort, we might as well use
5165 * the more rigorous ordering to avoid a second sort later. (Note
5166 * that the parser will have ensured that one clause is a prefix of
5167 * the other.)
5168 */
5170 ListCell *lc;
5171 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5172
5173 if (parse->hasDistinctOn &&
5174 list_length(root->distinct_pathkeys) <
5175 list_length(root->sort_pathkeys))
5176 needed_pathkeys = root->sort_pathkeys;
5177 else
5178 needed_pathkeys = root->distinct_pathkeys;
5179
5180 foreach(lc, input_rel->pathlist)
5181 {
5182 Path *input_path = (Path *) lfirst(lc);
5185
5189 input_path->pathkeys);
5191
5193 {
5196 input_path,
5199 limittuples);
5200
5201 if (sorted_path == NULL)
5202 continue;
5203
5204 /*
5205 * distinct_pathkeys may have become empty if all of the
5206 * pathkeys were determined to be redundant. If all of the
5207 * pathkeys are redundant then each DISTINCT target must only
5208 * allow a single value, therefore all resulting tuples must
5209 * be identical (or at least indistinguishable by an equality
5210 * check). We can uniquify these tuples simply by just taking
5211 * the first tuple. All we do here is add a path to do "LIMIT
5212 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5213 * still have a non-NIL sort_pathkeys list, so we must still
5214 * only do this with paths which are correctly sorted by
5215 * sort_pathkeys.
5216 */
5217 if (root->distinct_pathkeys == NIL)
5218 {
5219 Node *limitCount;
5220
5221 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5222 sizeof(int64),
5223 Int64GetDatum(1), false,
5224 true);
5225
5226 /*
5227 * If the query already has a LIMIT clause, then we could
5228 * end up with a duplicate LimitPath in the final plan.
5229 * That does not seem worth troubling over too much.
5230 */
5233 NULL, limitCount,
5234 LIMIT_OPTION_COUNT, 0, 1));
5235 }
5236 else
5237 {
5241 list_length(root->distinct_pathkeys),
5243 }
5244 }
5245 }
5246 }
5247
5248 /*
5249 * Consider hash-based implementations of DISTINCT, if possible.
5250 *
5251 * If we were not able to make any other types of path, we *must* hash or
5252 * die trying. If we do have other choices, there are two things that
5253 * should prevent selection of hashing: if the query uses DISTINCT ON
5254 * (because it won't really have the expected behavior if we hash), or if
5255 * enable_hashagg is off.
5256 *
5257 * Note: grouping_is_hashable() is much more expensive to check than the
5258 * other gating conditions, so we want to do it last.
5259 */
5260 if (distinct_rel->pathlist == NIL)
5261 allow_hash = true; /* we have no alternatives */
5262 else if (parse->hasDistinctOn || !enable_hashagg)
5263 allow_hash = false; /* policy-based decision not to hash */
5264 else
5265 allow_hash = true; /* default */
5266
5267 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5268 {
5269 /* Generate hashed aggregate path --- no sort needed */
5274 cheapest_input_path->pathtarget,
5275 AGG_HASHED,
5277 root->processed_distinctClause,
5278 NIL,
5279 NULL,
5281 }
5282
5283 return distinct_rel;
5284}
int64_t int64
Definition c.h:543
bool enable_hashagg
Definition costsize.c:152
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition makefuncs.c:350
@ LIMIT_OPTION_COUNT
Definition nodes.h:441
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition pathnode.c:3739
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition pathnode.c:2952
#define foreach_node(type, var, lst)
Definition pg_list.h:496
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition planner.c:5295
static Datum Int64GetDatum(int64 X)
Definition postgres.h:423
#define InvalidOid
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3771
Cardinality rows
Definition pathnodes.h:1991
bool grouping_is_sortable(List *groupClause)
Definition tlist.c:549
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition tlist.c:401
bool grouping_is_hashable(List *groupClause)
Definition tlist.c:569

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, create_agg_path(), create_limit_path(), create_unique_path(), enable_hashagg, estimate_num_groups(), fb(), foreach_node, get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), make_ordered_path(), makeConst(), NIL, parse(), root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_final_unique_paths()

static void create_final_unique_paths ( PlannerInfo root,
RelOptInfo input_rel,
List sortPathkeys,
List groupClause,
SpecialJoinInfo sjinfo,
RelOptInfo unique_rel 
)
static

Definition at line 8741 of file planner.c.

8744{
8745 Path *cheapest_input_path = input_rel->cheapest_total_path;
8746
8747 /* Estimate number of output rows */
8748 unique_rel->rows = estimate_num_groups(root,
8749 sjinfo->semi_rhs_exprs,
8750 cheapest_input_path->rows,
8751 NULL,
8752 NULL);
8753
8754 /* Consider sort-based implementations, if possible. */
8755 if (sjinfo->semi_can_btree)
8756 {
8757 ListCell *lc;
8758
8759 /*
8760 * Use any available suitably-sorted path as input, and also consider
8761 * sorting the cheapest-total path and incremental sort on any paths
8762 * with presorted keys.
8763 *
8764 * To save planning time, we ignore parameterized input paths unless
8765 * they are the cheapest-total path.
8766 */
8767 foreach(lc, input_rel->pathlist)
8768 {
8769 Path *input_path = (Path *) lfirst(lc);
8770 Path *path;
8771 bool is_sorted;
8772 int presorted_keys;
8773
8774 /*
8775 * Ignore parameterized paths that are not the cheapest-total
8776 * path.
8777 */
8778 if (input_path->param_info &&
8780 continue;
8781
8783 input_path->pathkeys,
8784 &presorted_keys);
8785
8786 /*
8787 * Ignore paths that are not suitably or partially sorted, unless
8788 * they are the cheapest total path (no need to deal with paths
8789 * which have presorted keys when incremental sort is disabled).
8790 */
8792 (presorted_keys == 0 || !enable_incremental_sort))
8793 continue;
8794
8795 /*
8796 * Make a separate ProjectionPath in case we need a Result node.
8797 */
8798 path = (Path *) create_projection_path(root,
8799 unique_rel,
8800 input_path,
8801 unique_rel->reltarget);
8802
8803 if (!is_sorted)
8804 {
8805 /*
8806 * We've no need to consider both a sort and incremental sort.
8807 * We'll just do a sort if there are no presorted keys and an
8808 * incremental sort when there are presorted keys.
8809 */
8810 if (presorted_keys == 0 || !enable_incremental_sort)
8811 path = (Path *) create_sort_path(root,
8812 unique_rel,
8813 path,
8815 -1.0);
8816 else
8818 unique_rel,
8819 path,
8821 presorted_keys,
8822 -1.0);
8823 }
8824
8825 path = (Path *) create_unique_path(root, unique_rel, path,
8827 unique_rel->rows);
8828
8829 add_path(unique_rel, path);
8830 }
8831 }
8832
8833 /* Consider hash-based implementation, if possible. */
8834 if (sjinfo->semi_can_hash)
8835 {
8836 Path *path;
8837
8838 /*
8839 * Make a separate ProjectionPath in case we need a Result node.
8840 */
8841 path = (Path *) create_projection_path(root,
8842 unique_rel,
8844 unique_rel->reltarget);
8845
8846 path = (Path *) create_agg_path(root,
8847 unique_rel,
8848 path,
8849 cheapest_input_path->pathtarget,
8850 AGG_HASHED,
8852 groupClause,
8853 NIL,
8854 NULL,
8855 unique_rel->rows);
8856
8857 add_path(unique_rel, path);
8858 }
8859}
bool enable_incremental_sort
Definition costsize.c:151
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition pathnode.c:2802
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition pathnode.c:2851
Cardinality rows
Definition pathnodes.h:1015
List * semi_rhs_exprs
Definition pathnodes.h:3226

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, create_agg_path(), create_incremental_sort_path(), create_projection_path(), create_sort_path(), create_unique_path(), enable_incremental_sort, estimate_num_groups(), fb(), lfirst, list_length(), NIL, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, RelOptInfo::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, and SpecialJoinInfo::semi_rhs_exprs.

Referenced by create_partial_unique_paths(), and create_unique_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3859 of file planner.c.

3864{
3865 Query *parse = root->parse;
3866 RelOptInfo *grouped_rel;
3869
3870 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3872
3873 /*
3874 * Create grouping relation to hold fully aggregated grouping and/or
3875 * aggregation paths.
3876 */
3877 grouped_rel = make_grouping_rel(root, input_rel, target,
3878 target_parallel_safe, parse->havingQual);
3879
3880 /*
3881 * Create either paths for a degenerate grouping or paths for ordinary
3882 * grouping, as appropriate.
3883 */
3886 else
3887 {
3888 int flags = 0;
3889 GroupPathExtraData extra;
3890
3891 /*
3892 * Determine whether it's possible to perform sort-based
3893 * implementations of grouping. (Note that if processed_groupClause
3894 * is empty, grouping_is_sortable() is trivially true, and all the
3895 * pathkeys_contained_in() tests will succeed too, so that we'll
3896 * consider every surviving input path.)
3897 *
3898 * If we have grouping sets, we might be able to sort some but not all
3899 * of them; in this case, we need can_sort to be true as long as we
3900 * must consider any sorted-input plan.
3901 */
3902 if ((gd && gd->rollups != NIL)
3903 || grouping_is_sortable(root->processed_groupClause))
3904 flags |= GROUPING_CAN_USE_SORT;
3905
3906 /*
3907 * Determine whether we should consider hash-based implementations of
3908 * grouping.
3909 *
3910 * Hashed aggregation only applies if we're grouping. If we have
3911 * grouping sets, some groups might be hashable but others not; in
3912 * this case we set can_hash true as long as there is nothing globally
3913 * preventing us from hashing (and we should therefore consider plans
3914 * with hashes).
3915 *
3916 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3917 * BY aggregates. (Doing so would imply storing *all* the input
3918 * values in the hash table, and/or running many sorts in parallel,
3919 * either of which seems like a certain loser.) We similarly don't
3920 * support ordered-set aggregates in hashed aggregation, but that case
3921 * is also included in the numOrderedAggs count.
3922 *
3923 * Note: grouping_is_hashable() is much more expensive to check than
3924 * the other gating conditions, so we want to do it last.
3925 */
3926 if ((parse->groupClause != NIL &&
3927 root->numOrderedAggs == 0 &&
3928 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3929 flags |= GROUPING_CAN_USE_HASH;
3930
3931 /*
3932 * Determine whether partial aggregation is possible.
3933 */
3934 if (can_partial_agg(root))
3935 flags |= GROUPING_CAN_PARTIAL_AGG;
3936
3937 extra.flags = flags;
3938 extra.target_parallel_safe = target_parallel_safe;
3939 extra.havingQual = parse->havingQual;
3940 extra.targetList = parse->targetList;
3941 extra.partial_costs_set = false;
3942
3943 /*
3944 * Determine whether partitionwise aggregation is in theory possible.
3945 * It can be disabled by the user, and for now, we don't try to
3946 * support grouping sets. create_ordinary_grouping_paths() will check
3947 * additional conditions, such as whether input_rel is partitioned.
3948 */
3949 if (enable_partitionwise_aggregate && !parse->groupingSets)
3951 else
3953
3955 &agg_costs, gd, &extra,
3957 }
3958
3959 set_cheapest(grouped_rel);
3960 return grouped_rel;
3961}
#define MemSet(start, val, len)
Definition c.h:1013
bool enable_partitionwise_aggregate
Definition costsize.c:160
@ PARTITIONWISE_AGGREGATE_FULL
Definition pathnodes.h:3633
@ PARTITIONWISE_AGGREGATE_NONE
Definition pathnodes.h:3632
#define GROUPING_CAN_PARTIAL_AGG
Definition pathnodes.h:3617
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition planner.c:4049
static bool is_degenerate_grouping(PlannerInfo *root)
Definition planner.c:4028
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition planner.c:4112
static bool can_partial_agg(PlannerInfo *root)
Definition planner.c:7909
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition planner.c:3972
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition prepagg.c:559
PartitionwiseAggregateType patype
Definition pathnodes.h:3662

References AGGSPLIT_SIMPLE, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, fb(), GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4692 of file planner.c.

4699{
4701 ListCell *l;
4702 List *topqual = NIL;
4703
4704 /*
4705 * Since each window clause could require a different sort order, we stack
4706 * up a WindowAgg node for each clause, with sort steps between them as
4707 * needed. (We assume that select_active_windows chose a good order for
4708 * executing the clauses in.)
4709 *
4710 * input_target should contain all Vars and Aggs needed for the result.
4711 * (In some cases we wouldn't need to propagate all of these all the way
4712 * to the top, since they might only be needed as inputs to WindowFuncs.
4713 * It's probably not worth trying to optimize that though.) It must also
4714 * contain all window partitioning and sorting expressions, to ensure
4715 * they're computed only once at the bottom of the stack (that's critical
4716 * for volatile functions). As we climb up the stack, we'll add outputs
4717 * for the WindowFuncs computed at each level.
4718 */
4720
4721 foreach(l, activeWindows)
4722 {
4724 List *window_pathkeys;
4725 List *runcondition = NIL;
4726 int presorted_keys;
4727 bool is_sorted;
4728 bool topwindow;
4729 ListCell *lc2;
4730
4731 window_pathkeys = make_pathkeys_for_window(root,
4732 wc,
4733 root->processed_tlist);
4734
4735 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4736 path->pathkeys,
4737 &presorted_keys);
4738
4739 /* Sort if necessary */
4740 if (!is_sorted)
4741 {
4742 /*
4743 * No presorted keys or incremental sort disabled, just perform a
4744 * complete sort.
4745 */
4746 if (presorted_keys == 0 || !enable_incremental_sort)
4748 path,
4749 window_pathkeys,
4750 -1.0);
4751 else
4752 {
4753 /*
4754 * Since we have presorted keys and incremental sort is
4755 * enabled, just use incremental sort.
4756 */
4758 window_rel,
4759 path,
4760 window_pathkeys,
4761 presorted_keys,
4762 -1.0);
4763 }
4764 }
4765
4766 if (lnext(activeWindows, l))
4767 {
4768 /*
4769 * Add the current WindowFuncs to the output target for this
4770 * intermediate WindowAggPath. We must copy window_target to
4771 * avoid changing the previous path's target.
4772 *
4773 * Note: a WindowFunc adds nothing to the target's eval costs; but
4774 * we do need to account for the increase in tlist width.
4775 */
4777
4779 foreach(lc2, wflists->windowFuncs[wc->winref])
4780 {
4782
4784 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4785 }
4787 }
4788 else
4789 {
4790 /* Install the goal target in the topmost WindowAgg */
4792 }
4793
4794 /* mark the final item in the list as the top-level window */
4795 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4796
4797 /*
4798 * Collect the WindowFuncRunConditions from each WindowFunc and
4799 * convert them into OpExprs
4800 */
4801 foreach(lc2, wflists->windowFuncs[wc->winref])
4802 {
4803 ListCell *lc3;
4805
4806 foreach(lc3, wfunc->runCondition)
4807 {
4810 Expr *opexpr;
4811 Expr *leftop;
4812 Expr *rightop;
4813
4814 if (wfuncrc->wfunc_left)
4815 {
4816 leftop = (Expr *) copyObject(wfunc);
4817 rightop = copyObject(wfuncrc->arg);
4818 }
4819 else
4820 {
4821 leftop = copyObject(wfuncrc->arg);
4822 rightop = (Expr *) copyObject(wfunc);
4823 }
4824
4825 opexpr = make_opclause(wfuncrc->opno,
4826 BOOLOID,
4827 false,
4828 leftop,
4829 rightop,
4830 InvalidOid,
4831 wfuncrc->inputcollid);
4832
4833 runcondition = lappend(runcondition, opexpr);
4834
4835 if (!topwindow)
4836 topqual = lappend(topqual, opexpr);
4837 }
4838 }
4839
4840 path = (Path *)
4842 wflists->windowFuncs[wc->winref],
4843 runcondition, wc,
4844 topwindow ? topqual : NIL, topwindow);
4845 }
4846
4847 add_path(window_rel, path);
4848}
int32 clamp_width_est(int64 tuple_width)
Definition costsize.c:242
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2728
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition makefuncs.c:701
#define copyObject(obj)
Definition nodes.h:232
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition pathnode.c:3340
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition planner.c:6353
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition tlist.c:704

References add_column_to_pathtarget(), add_path(), clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, fb(), foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), root, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5380 of file planner.c.

5385{
5386 Path *cheapest_input_path = input_rel->cheapest_total_path;
5388 ListCell *lc;
5389
5390 /* For now, do all work in the (ORDERED, NULL) upperrel */
5392
5393 /*
5394 * If the input relation is not parallel-safe, then the ordered relation
5395 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5396 * target list is parallel-safe.
5397 */
5398 if (input_rel->consider_parallel && target_parallel_safe)
5399 ordered_rel->consider_parallel = true;
5400
5401 /* Assume that the same path generation strategies are allowed. */
5402 ordered_rel->pgs_mask = input_rel->pgs_mask;
5403
5404 /*
5405 * If the input rel belongs to a single FDW, so does the ordered_rel.
5406 */
5407 ordered_rel->serverid = input_rel->serverid;
5408 ordered_rel->userid = input_rel->userid;
5409 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5410 ordered_rel->fdwroutine = input_rel->fdwroutine;
5411
5412 foreach(lc, input_rel->pathlist)
5413 {
5414 Path *input_path = (Path *) lfirst(lc);
5416 bool is_sorted;
5417 int presorted_keys;
5418
5420 input_path->pathkeys, &presorted_keys);
5421
5422 if (is_sorted)
5424 else
5425 {
5426 /*
5427 * Try at least sorting the cheapest path and also try
5428 * incrementally sorting any path which is partially sorted
5429 * already (no need to deal with paths which have presorted keys
5430 * when incremental sort is disabled unless it's the cheapest
5431 * input path).
5432 */
5434 (presorted_keys == 0 || !enable_incremental_sort))
5435 continue;
5436
5437 /*
5438 * We've no need to consider both a sort and incremental sort.
5439 * We'll just do a sort if there are no presorted keys and an
5440 * incremental sort when there are presorted keys.
5441 */
5442 if (presorted_keys == 0 || !enable_incremental_sort)
5445 input_path,
5446 root->sort_pathkeys,
5447 limit_tuples);
5448 else
5451 input_path,
5452 root->sort_pathkeys,
5453 presorted_keys,
5454 limit_tuples);
5455 }
5456
5457 /*
5458 * If the pathtarget of the result path has different expressions from
5459 * the target to be applied, a projection step is needed.
5460 */
5461 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5463 sorted_path, target);
5464
5466 }
5467
5468 /*
5469 * generate_gather_paths() will have already generated a simple Gather
5470 * path for the best parallel path, if any, and the loop above will have
5471 * considered sorting it. Similarly, generate_gather_paths() will also
5472 * have generated order-preserving Gather Merge plans which can be used
5473 * without sorting if they happen to match the sort_pathkeys, and the loop
5474 * above will have handled those as well. However, there's one more
5475 * possibility: it may make sense to sort the cheapest partial path or
5476 * incrementally sort any partial path that is partially sorted according
5477 * to the required output order and then use Gather Merge.
5478 */
5479 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5480 input_rel->partial_pathlist != NIL)
5481 {
5483
5484 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5485
5486 foreach(lc, input_rel->partial_pathlist)
5487 {
5488 Path *input_path = (Path *) lfirst(lc);
5490 bool is_sorted;
5491 int presorted_keys;
5492 double total_groups;
5493
5495 input_path->pathkeys,
5496 &presorted_keys);
5497
5498 if (is_sorted)
5499 continue;
5500
5501 /*
5502 * Try at least sorting the cheapest path and also try
5503 * incrementally sorting any path which is partially sorted
5504 * already (no need to deal with paths which have presorted keys
5505 * when incremental sort is disabled unless it's the cheapest
5506 * partial path).
5507 */
5509 (presorted_keys == 0 || !enable_incremental_sort))
5510 continue;
5511
5512 /*
5513 * We've no need to consider both a sort and incremental sort.
5514 * We'll just do a sort if there are no presorted keys and an
5515 * incremental sort when there are presorted keys.
5516 */
5517 if (presorted_keys == 0 || !enable_incremental_sort)
5520 input_path,
5521 root->sort_pathkeys,
5522 limit_tuples);
5523 else
5526 input_path,
5527 root->sort_pathkeys,
5528 presorted_keys,
5529 limit_tuples);
5531 sorted_path = (Path *)
5534 sorted_path->pathtarget,
5535 root->sort_pathkeys, NULL,
5536 &total_groups);
5537
5538 /*
5539 * If the pathtarget of the result path has different expressions
5540 * from the target to be applied, a projection step is needed.
5541 */
5542 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5544 sorted_path, target);
5545
5547 }
5548 }
5549
5550 /*
5551 * If there is an FDW that's responsible for all baserels of the query,
5552 * let it consider adding ForeignPaths.
5553 */
5554 if (ordered_rel->fdwroutine &&
5555 ordered_rel->fdwroutine->GetForeignUpperPaths)
5556 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5558 NULL);
5559
5560 /* Let extensions possibly add some more paths */
5562 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5564
5565 /*
5566 * No need to bother with set_cheapest here; grouping_planner does not
5567 * need us to do it.
5568 */
5569 Assert(ordered_rel->pathlist != NIL);
5570
5571 return ordered_rel;
5572}
double compute_gather_rows(Path *path)
Definition costsize.c:6767
bool equal(const void *a, const void *b)
Definition equalfuncs.c:223
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition pathnode.c:1760
@ UPPERREL_ORDERED
Definition pathnodes.h:151

References add_path(), apply_projection_to_path(), Assert, compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, equal(), PathTarget::exprs, fb(), fetch_upper_rel(), lfirst, linitial, NIL, pathkeys_count_contained_in(), root, and UPPERREL_ORDERED.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 4112 of file planner.c.

4118{
4121
4122 /*
4123 * If this is the topmost grouping relation or if the parent relation is
4124 * doing some form of partitionwise aggregation, then we may be able to do
4125 * it at this level also. However, if the input relation is not
4126 * partitioned, partitionwise aggregate is impossible.
4127 */
4128 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4130 {
4131 /*
4132 * If this is the topmost relation or if the parent relation is doing
4133 * full partitionwise aggregation, then we can do full partitionwise
4134 * aggregation provided that the GROUP BY clause contains all of the
4135 * partitioning columns at this level and the collation used by GROUP
4136 * BY matches the partitioning collation. Otherwise, we can do at
4137 * most partial partitionwise aggregation. But if partial aggregation
4138 * is not supported in general then we can't use it for partitionwise
4139 * aggregation either.
4140 *
4141 * Check parse->groupClause not processed_groupClause, because it's
4142 * okay if some of the partitioning columns were proved redundant.
4143 */
4144 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4146 root->parse->groupClause))
4148 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4150 else
4152 }
4153
4154 /*
4155 * Before generating paths for grouped_rel, we first generate any possible
4156 * partially grouped paths; that way, later code can easily consider both
4157 * parallel and non-parallel approaches to grouping.
4158 */
4159 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4160 {
4161 bool force_rel_creation;
4162
4163 /*
4164 * If we're doing partitionwise aggregation at this level, force
4165 * creation of a partially_grouped_rel so we can add partitionwise
4166 * paths to it.
4167 */
4169
4172 grouped_rel,
4173 input_rel,
4174 gd,
4175 extra,
4177 }
4178
4179 /* Set out parameter. */
4181
4182 /* Apply partitionwise aggregation technique, if possible. */
4183 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4186 gd, patype, extra);
4187
4188 /* If we are doing partial aggregation only, return. */
4190 {
4192
4193 if (partially_grouped_rel->pathlist)
4195
4196 return;
4197 }
4198
4199 /* Gather any partially grouped partial paths. */
4200 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4202
4203 /* Now choose the best path(s) for partially_grouped_rel. */
4206
4207 /* Build final grouping paths */
4210 extra);
4211
4212 /* Give a helpful error if we failed to find any implementation */
4213 if (grouped_rel->pathlist == NIL)
4214 ereport(ERROR,
4216 errmsg("could not implement GROUP BY"),
4217 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4218
4219 /*
4220 * If there is an FDW that's responsible for all baserels of the query,
4221 * let it consider adding ForeignPaths.
4222 */
4223 if (grouped_rel->fdwroutine &&
4224 grouped_rel->fdwroutine->GetForeignUpperPaths)
4225 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4226 input_rel, grouped_rel,
4227 extra);
4228
4229 /* Let extensions possibly add some more paths */
4231 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4232 input_rel, grouped_rel,
4233 extra);
4234}
PartitionwiseAggregateType
Definition pathnodes.h:3631
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition pathnodes.h:3634
@ UPPERREL_GROUP_AGG
Definition pathnodes.h:147
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
Definition planner.c:7154
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition planner.c:7415
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition planner.c:8192
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition planner.c:8329

References add_paths_to_grouping_rel(), Assert, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), GroupPathExtraData::flags, gather_grouping_paths(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4932 of file planner.c.

4935{
4937 Query *parse;
4939 double numDistinctRows;
4941 ListCell *lc;
4942
4943 /* nothing to do when there are no partial paths in the input rel */
4944 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4945 return;
4946
4947 parse = root->parse;
4948
4949 /* can't do parallel DISTINCT ON */
4950 if (parse->hasDistinctOn)
4951 return;
4952
4954 NULL);
4955 partial_distinct_rel->reltarget = target;
4956 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4957
4958 /*
4959 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4960 */
4961 partial_distinct_rel->serverid = input_rel->serverid;
4962 partial_distinct_rel->userid = input_rel->userid;
4963 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4964 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4965
4966 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4967
4968 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4969 parse->targetList);
4970
4971 /* estimate how many distinct rows we'll get from each worker */
4974 NULL, NULL);
4975
4976 /*
4977 * Try sorting the cheapest path and incrementally sorting any paths with
4978 * presorted keys and put a unique paths atop of those. We'll also
4979 * attempt to reorder the required pathkeys to match the input path's
4980 * pathkeys as much as possible, in hopes of avoiding a possible need to
4981 * re-sort.
4982 */
4983 if (grouping_is_sortable(root->processed_distinctClause))
4984 {
4985 foreach(lc, input_rel->partial_pathlist)
4986 {
4987 Path *input_path = (Path *) lfirst(lc);
4990
4993 root->distinct_pathkeys,
4994 input_path->pathkeys);
4996
4998 {
5001 input_path,
5004 -1.0);
5005
5006 if (sorted_path == NULL)
5007 continue;
5008
5009 /*
5010 * An empty distinct_pathkeys means all tuples have the same
5011 * value for the DISTINCT clause. See
5012 * create_final_distinct_paths()
5013 */
5014 if (root->distinct_pathkeys == NIL)
5015 {
5016 Node *limitCount;
5017
5018 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5019 sizeof(int64),
5020 Int64GetDatum(1), false,
5021 true);
5022
5023 /*
5024 * Apply a LimitPath onto the partial path to restrict the
5025 * tuples from each worker to 1.
5026 * create_final_distinct_paths will need to apply an
5027 * additional LimitPath to restrict this to a single row
5028 * after the Gather node. If the query already has a
5029 * LIMIT clause, then we could end up with three Limit
5030 * nodes in the final plan. Consolidating the top two of
5031 * these could be done, but does not seem worth troubling
5032 * over.
5033 */
5037 NULL,
5038 limitCount,
5040 0, 1));
5041 }
5042 else
5043 {
5047 list_length(root->distinct_pathkeys),
5049 }
5050 }
5051 }
5052 }
5053
5054 /*
5055 * Now try hash aggregate paths, if enabled and hashing is possible. Since
5056 * we're not on the hook to ensure we do our best to create at least one
5057 * path here, we treat enable_hashagg as a hard off-switch rather than the
5058 * slightly softer variant in create_final_distinct_paths.
5059 */
5060 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5061 {
5066 cheapest_partial_path->pathtarget,
5067 AGG_HASHED,
5069 root->processed_distinctClause,
5070 NIL,
5071 NULL,
5073 }
5074
5075 /*
5076 * If there is an FDW that's responsible for all baserels of the query,
5077 * let it consider adding ForeignPaths.
5078 */
5079 if (partial_distinct_rel->fdwroutine &&
5080 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5081 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5083 input_rel,
5085 NULL);
5086
5087 /* Let extensions possibly add some more partial paths */
5089 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5091
5092 if (partial_distinct_rel->partial_pathlist != NIL)
5093 {
5096
5097 /*
5098 * Finally, create paths to distinctify the final result. This step
5099 * is needed to remove any duplicates due to combining rows from
5100 * parallel workers.
5101 */
5104 }
5105}
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:793
@ UPPERREL_PARTIAL_DISTINCT
Definition pathnodes.h:149

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, create_agg_path(), create_final_distinct_paths(), create_limit_path(), create_unique_path(), create_upper_paths_hook, enable_hashagg, estimate_num_groups(), fb(), fetch_upper_rel(), foreach_node, generate_useful_gather_paths(), get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), make_ordered_path(), makeConst(), NIL, parse(), root, set_cheapest(), and UPPERREL_PARTIAL_DISTINCT.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7415 of file planner.c.

7421{
7422 Query *parse = root->parse;
7425 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7426 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7428 Path *cheapest_total_path = NULL;
7429 double dNumPartialGroups = 0;
7430 double dNumPartialPartialGroups = 0;
7431 ListCell *lc;
7432 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7433 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7434
7435 /*
7436 * Check whether any partially aggregated paths have been generated
7437 * through eager aggregation.
7438 */
7439 if (input_rel->grouped_rel &&
7440 !IS_DUMMY_REL(input_rel->grouped_rel) &&
7441 input_rel->grouped_rel->pathlist != NIL)
7442 eager_agg_rel = input_rel->grouped_rel;
7443
7444 /*
7445 * Consider whether we should generate partially aggregated non-partial
7446 * paths. We can only do this if we have a non-partial path, and only if
7447 * the parent of the input rel is performing partial partitionwise
7448 * aggregation. (Note that extra->patype is the type of partitionwise
7449 * aggregation being used at the parent level, not this level.)
7450 */
7451 if (input_rel->pathlist != NIL &&
7453 cheapest_total_path = input_rel->cheapest_total_path;
7454
7455 /*
7456 * If parallelism is possible for grouped_rel, then we should consider
7457 * generating partially-grouped partial paths. However, if the input rel
7458 * has no partial paths, then we can't.
7459 */
7460 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7461 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7462
7463 /*
7464 * If we can't partially aggregate partial paths, and we can't partially
7465 * aggregate non-partial paths, and no partially aggregated paths were
7466 * generated by eager aggregation, then don't bother creating the new
7467 * RelOptInfo at all, unless the caller specified force_rel_creation.
7468 */
7469 if (cheapest_total_path == NULL &&
7471 eager_agg_rel == NULL &&
7473 return NULL;
7474
7475 /*
7476 * Build a new upper relation to represent the result of partially
7477 * aggregating the rows from the input relation.
7478 */
7481 grouped_rel->relids);
7482 partially_grouped_rel->consider_parallel =
7483 grouped_rel->consider_parallel;
7484 partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7485 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7486 partially_grouped_rel->serverid = grouped_rel->serverid;
7487 partially_grouped_rel->userid = grouped_rel->userid;
7488 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7489 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7490
7491 /*
7492 * Build target list for partial aggregate paths. These paths cannot just
7493 * emit the same tlist as regular aggregate paths, because (1) we must
7494 * include Vars and Aggrefs needed in HAVING, which might not appear in
7495 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7496 */
7499 extra->havingQual);
7500
7501 if (!extra->partial_costs_set)
7502 {
7503 /*
7504 * Collect statistics about aggregates for estimating costs of
7505 * performing aggregation in parallel.
7506 */
7507 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7508 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7509 if (parse->hasAggs)
7510 {
7511 /* partial phase */
7513 agg_partial_costs);
7514
7515 /* final phase */
7517 agg_final_costs);
7518 }
7519
7520 extra->partial_costs_set = true;
7521 }
7522
7523 /* Estimate number of partial groups. */
7524 if (cheapest_total_path != NULL)
7527 cheapest_total_path->rows,
7528 gd,
7529 extra->targetList);
7534 gd,
7535 extra->targetList);
7536
7537 if (can_sort && cheapest_total_path != NULL)
7538 {
7539 /* This should have been checked previously */
7540 Assert(parse->hasAggs || parse->groupClause);
7541
7542 /*
7543 * Use any available suitably-sorted path as input, and also consider
7544 * sorting the cheapest partial path.
7545 */
7546 foreach(lc, input_rel->pathlist)
7547 {
7548 ListCell *lc2;
7549 Path *path = (Path *) lfirst(lc);
7550 Path *path_save = path;
7552
7553 /* generate alternative group orderings that might be useful */
7555
7557
7558 /* process all potentially interesting grouping reorderings */
7559 foreach(lc2, pathkey_orderings)
7560 {
7562
7563 /* restore the path (we replace it in the loop) */
7564 path = path_save;
7565
7566 path = make_ordered_path(root,
7568 path,
7569 cheapest_total_path,
7570 info->pathkeys,
7571 -1.0);
7572
7573 if (path == NULL)
7574 continue;
7575
7576 if (parse->hasAggs)
7580 path,
7581 partially_grouped_rel->reltarget,
7582 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7584 info->clauses,
7585 NIL,
7586 agg_partial_costs,
7588 else
7592 path,
7593 info->clauses,
7594 NIL,
7596 }
7597 }
7598 }
7599
7601 {
7602 /* Similar to above logic, but for partial paths. */
7603 foreach(lc, input_rel->partial_pathlist)
7604 {
7605 ListCell *lc2;
7606 Path *path = (Path *) lfirst(lc);
7607 Path *path_save = path;
7609
7610 /* generate alternative group orderings that might be useful */
7612
7614
7615 /* process all potentially interesting grouping reorderings */
7616 foreach(lc2, pathkey_orderings)
7617 {
7619
7620
7621 /* restore the path (we replace it in the loop) */
7622 path = path_save;
7623
7624 path = make_ordered_path(root,
7626 path,
7628 info->pathkeys,
7629 -1.0);
7630
7631 if (path == NULL)
7632 continue;
7633
7634 if (parse->hasAggs)
7638 path,
7639 partially_grouped_rel->reltarget,
7640 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7642 info->clauses,
7643 NIL,
7644 agg_partial_costs,
7646 else
7650 path,
7651 info->clauses,
7652 NIL,
7654 }
7655 }
7656 }
7657
7658 /*
7659 * Add a partially-grouped HashAgg Path where possible
7660 */
7661 if (can_hash && cheapest_total_path != NULL)
7662 {
7663 /* Checked above */
7664 Assert(parse->hasAggs || parse->groupClause);
7665
7669 cheapest_total_path,
7670 partially_grouped_rel->reltarget,
7671 AGG_HASHED,
7673 root->processed_groupClause,
7674 NIL,
7675 agg_partial_costs,
7677 }
7678
7679 /*
7680 * Now add a partially-grouped HashAgg partial Path where possible
7681 */
7683 {
7688 partially_grouped_rel->reltarget,
7689 AGG_HASHED,
7691 root->processed_groupClause,
7692 NIL,
7693 agg_partial_costs,
7695 }
7696
7697 /*
7698 * Add any partially aggregated paths generated by eager aggregation to
7699 * the new upper relation after applying projection steps as needed.
7700 */
7701 if (eager_agg_rel)
7702 {
7703 /* Add the paths */
7704 foreach(lc, eager_agg_rel->pathlist)
7705 {
7706 Path *path = (Path *) lfirst(lc);
7707
7708 /* Shouldn't have any parameterized paths anymore */
7709 Assert(path->param_info == NULL);
7710
7711 path = (Path *) create_projection_path(root,
7713 path,
7714 partially_grouped_rel->reltarget);
7715
7717 }
7718
7719 /*
7720 * Likewise add the partial paths, but only if parallelism is possible
7721 * for partially_grouped_rel.
7722 */
7723 if (partially_grouped_rel->consider_parallel)
7724 {
7725 foreach(lc, eager_agg_rel->partial_pathlist)
7726 {
7727 Path *path = (Path *) lfirst(lc);
7728
7729 /* Shouldn't have any parameterized paths anymore */
7730 Assert(path->param_info == NULL);
7731
7732 path = (Path *) create_projection_path(root,
7734 path,
7735 partially_grouped_rel->reltarget);
7736
7738 }
7739 }
7740 }
7741
7742 /*
7743 * If there is an FDW that's responsible for all baserels of the query,
7744 * let it consider adding partially grouped ForeignPaths.
7745 */
7746 if (partially_grouped_rel->fdwroutine &&
7747 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7748 {
7749 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7750
7751 fdwroutine->GetForeignUpperPaths(root,
7754 extra);
7755 }
7756
7757 return partially_grouped_rel;
7758}
@ AGGSPLIT_INITIAL_SERIAL
Definition nodes.h:389
@ UPPERREL_PARTIAL_GROUP_AGG
Definition pathnodes.h:145
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition planner.c:5715
GetForeignUpperPaths_function GetForeignUpperPaths
Definition fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition pathnodes.h:3655
bool useridiscurrent
Definition pathnodes.h:1103
Relids relids
Definition pathnodes.h:1009
uint64 pgs_mask
Definition pathnodes.h:1027
RelOptKind reloptkind
Definition pathnodes.h:1003

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), create_projection_path(), fb(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, IS_DUMMY_REL, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, GroupPathExtraData::patype, RelOptInfo::pgs_mask, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partial_unique_paths()

static void create_partial_unique_paths ( PlannerInfo root,
RelOptInfo input_rel,
List sortPathkeys,
List groupClause,
SpecialJoinInfo sjinfo,
RelOptInfo unique_rel 
)
static

Definition at line 8866 of file planner.c.

8869{
8872
8873 /* nothing to do when there are no partial paths in the input rel */
8874 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8875 return;
8876
8877 /*
8878 * nothing to do if there's anything in the targetlist that's
8879 * parallel-restricted.
8880 */
8881 if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8882 return;
8883
8884 cheapest_partial_path = linitial(input_rel->partial_pathlist);
8885
8888
8889 /*
8890 * clear path info
8891 */
8892 partial_unique_rel->pathlist = NIL;
8893 partial_unique_rel->ppilist = NIL;
8894 partial_unique_rel->partial_pathlist = NIL;
8895 partial_unique_rel->cheapest_startup_path = NULL;
8896 partial_unique_rel->cheapest_total_path = NULL;
8897 partial_unique_rel->cheapest_parameterized_paths = NIL;
8898
8899 /* Estimate number of output rows */
8901 sjinfo->semi_rhs_exprs,
8903 NULL,
8904 NULL);
8905 partial_unique_rel->reltarget = unique_rel->reltarget;
8906
8907 /* Consider sort-based implementations, if possible. */
8908 if (sjinfo->semi_can_btree)
8909 {
8910 ListCell *lc;
8911
8912 /*
8913 * Use any available suitably-sorted path as input, and also consider
8914 * sorting the cheapest partial path and incremental sort on any paths
8915 * with presorted keys.
8916 */
8917 foreach(lc, input_rel->partial_pathlist)
8918 {
8919 Path *input_path = (Path *) lfirst(lc);
8920 Path *path;
8921 bool is_sorted;
8922 int presorted_keys;
8923
8925 input_path->pathkeys,
8926 &presorted_keys);
8927
8928 /*
8929 * Ignore paths that are not suitably or partially sorted, unless
8930 * they are the cheapest partial path (no need to deal with paths
8931 * which have presorted keys when incremental sort is disabled).
8932 */
8934 (presorted_keys == 0 || !enable_incremental_sort))
8935 continue;
8936
8937 /*
8938 * Make a separate ProjectionPath in case we need a Result node.
8939 */
8940 path = (Path *) create_projection_path(root,
8942 input_path,
8943 partial_unique_rel->reltarget);
8944
8945 if (!is_sorted)
8946 {
8947 /*
8948 * We've no need to consider both a sort and incremental sort.
8949 * We'll just do a sort if there are no presorted keys and an
8950 * incremental sort when there are presorted keys.
8951 */
8952 if (presorted_keys == 0 || !enable_incremental_sort)
8953 path = (Path *) create_sort_path(root,
8955 path,
8957 -1.0);
8958 else
8961 path,
8963 presorted_keys,
8964 -1.0);
8965 }
8966
8969 partial_unique_rel->rows);
8970
8972 }
8973 }
8974
8975 /* Consider hash-based implementation, if possible. */
8976 if (sjinfo->semi_can_hash)
8977 {
8978 Path *path;
8979
8980 /*
8981 * Make a separate ProjectionPath in case we need a Result node.
8982 */
8983 path = (Path *) create_projection_path(root,
8986 partial_unique_rel->reltarget);
8987
8988 path = (Path *) create_agg_path(root,
8990 path,
8991 cheapest_partial_path->pathtarget,
8992 AGG_HASHED,
8994 groupClause,
8995 NIL,
8996 NULL,
8997 partial_unique_rel->rows);
8998
9000 }
9001
9002 if (partial_unique_rel->partial_pathlist != NIL)
9003 {
9006
9007 /*
9008 * Finally, create paths to unique-ify the final result. This step is
9009 * needed to remove any duplicates due to combining rows from parallel
9010 * workers.
9011 */
9013 sortPathkeys, groupClause,
9014 sjinfo, unique_rel);
9015 }
9016}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition clauses.c:762
static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8741

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, create_agg_path(), create_final_unique_paths(), create_incremental_sort_path(), create_projection_path(), create_sort_path(), create_unique_path(), enable_incremental_sort, estimate_num_groups(), PathTarget::exprs, fb(), generate_useful_gather_paths(), is_parallel_safe(), lfirst, linitial, list_length(), makeNode, NIL, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, Path::rows, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_rhs_exprs, and set_cheapest().

Referenced by create_unique_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 8192 of file planner.c.

8200{
8203 PathTarget *target = grouped_rel->reltarget;
8204 bool partial_grouping_valid = true;
8205 int i;
8206
8210
8211 /* Add paths for partitionwise aggregation/grouping. */
8212 i = -1;
8213 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8214 {
8215 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8217 AppendRelInfo **appinfos;
8218 int nappinfos;
8222
8224
8225 /* Dummy children can be ignored. */
8227 continue;
8228
8229 child_target = copy_pathtarget(target);
8230
8231 /*
8232 * Copy the given "extra" structure as is and then override the
8233 * members specific to this child.
8234 */
8235 memcpy(&child_extra, extra, sizeof(child_extra));
8236
8237 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8238 &nappinfos);
8239
8240 child_target->exprs = (List *)
8242 (Node *) target->exprs,
8243 nappinfos, appinfos);
8244
8245 /* Translate havingQual and targetList. */
8246 child_extra.havingQual = (Node *)
8248 extra->havingQual,
8249 nappinfos, appinfos);
8250 child_extra.targetList = (List *)
8252 (Node *) extra->targetList,
8253 nappinfos, appinfos);
8254
8255 /*
8256 * extra->patype was the value computed for our parent rel; patype is
8257 * the value for this relation. For the child, our value is its
8258 * parent rel's value.
8259 */
8260 child_extra.patype = patype;
8261
8262 /*
8263 * Create grouping relation to hold fully aggregated grouping and/or
8264 * aggregation paths for the child.
8265 */
8268 extra->target_parallel_safe,
8269 child_extra.havingQual);
8270
8271 /* Create grouping paths for this child relation. */
8276
8278 {
8282 }
8283 else
8284 partial_grouping_valid = false;
8285
8286 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8287 {
8291 }
8292
8293 pfree(appinfos);
8294 }
8295
8296 /*
8297 * Try to create append paths for partially grouped children. For full
8298 * partitionwise aggregation, we might have paths in the partial_pathlist
8299 * if parallel aggregation is possible. For partial partitionwise
8300 * aggregation, we may have paths in both pathlist and partial_pathlist.
8301 *
8302 * NB: We must have a partially grouped path for every child in order to
8303 * generate a partially grouped path for this relation.
8304 */
8306 {
8308
8311 }
8312
8313 /* If possible, create append paths for fully grouped children. */
8314 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8315 {
8317
8319 }
8320}

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, fb(), find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, pfree(), RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_unique_paths()

RelOptInfo * create_unique_paths ( PlannerInfo root,
RelOptInfo rel,
SpecialJoinInfo sjinfo 
)

Definition at line 8472 of file planner.c.

8473{
8474 RelOptInfo *unique_rel;
8476 List *groupClause = NIL;
8477 MemoryContext oldcontext;
8478
8479 /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8480 Assert(sjinfo->jointype == JOIN_SEMI);
8481 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8482
8483 /* If result already cached, return it */
8484 if (rel->unique_rel)
8485 return rel->unique_rel;
8486
8487 /* If it's not possible to unique-ify, return NULL */
8488 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8489 return NULL;
8490
8491 /*
8492 * Punt if this is a child relation and we failed to build a unique-ified
8493 * relation for its parent. This can happen if all the RHS columns were
8494 * found to be equated to constants when unique-ifying the parent table,
8495 * leaving no columns to unique-ify.
8496 */
8497 if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8498 return NULL;
8499
8500 /*
8501 * When called during GEQO join planning, we are in a short-lived memory
8502 * context. We must make sure that the unique rel and any subsidiary data
8503 * structures created for a baserel survive the GEQO cycle, else the
8504 * baserel is trashed for future GEQO cycles. On the other hand, when we
8505 * are creating those for a joinrel during GEQO, we don't want them to
8506 * clutter the main planning context. Upshot is that the best solution is
8507 * to explicitly allocate memory in the same context the given RelOptInfo
8508 * is in.
8509 */
8511
8512 unique_rel = makeNode(RelOptInfo);
8513 memcpy(unique_rel, rel, sizeof(RelOptInfo));
8514
8515 /*
8516 * clear path info
8517 */
8518 unique_rel->pathlist = NIL;
8519 unique_rel->ppilist = NIL;
8520 unique_rel->partial_pathlist = NIL;
8521 unique_rel->cheapest_startup_path = NULL;
8522 unique_rel->cheapest_total_path = NULL;
8523 unique_rel->cheapest_parameterized_paths = NIL;
8524
8525 /*
8526 * Build the target list for the unique rel. We also build the pathkeys
8527 * that represent the ordering requirements for the sort-based
8528 * implementation, and the list of SortGroupClause nodes that represent
8529 * the columns to be grouped on for the hash-based implementation.
8530 *
8531 * For a child rel, we can construct these fields from those of its
8532 * parent.
8533 */
8534 if (IS_OTHER_REL(rel))
8535 {
8538
8539 parent_unique_target = rel->top_parent->unique_rel->reltarget;
8540
8542
8543 /* Translate the target expressions */
8544 child_unique_target->exprs = (List *)
8546 (Node *) parent_unique_target->exprs,
8547 rel,
8548 rel->top_parent);
8549
8550 unique_rel->reltarget = child_unique_target;
8551
8552 sortPathkeys = rel->top_parent->unique_pathkeys;
8553 groupClause = rel->top_parent->unique_groupclause;
8554 }
8555 else
8556 {
8557 List *newtlist;
8558 int nextresno;
8559 List *sortList = NIL;
8560 ListCell *lc1;
8561 ListCell *lc2;
8562
8563 /*
8564 * The values we are supposed to unique-ify may be expressions in the
8565 * variables of the input rel's targetlist. We have to add any such
8566 * expressions to the unique rel's targetlist.
8567 *
8568 * To complicate matters, some of the values to be unique-ified may be
8569 * known redundant by the EquivalenceClass machinery (e.g., because
8570 * they have been equated to constants). There is no need to compare
8571 * such values during unique-ification, and indeed we had better not
8572 * try because the Vars involved may not have propagated as high as
8573 * the semijoin's level. We use make_pathkeys_for_sortclauses to
8574 * detect such cases, which is a tad inefficient but it doesn't seem
8575 * worth building specialized infrastructure for this.
8576 */
8579
8580 forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8581 {
8582 Expr *uniqexpr = lfirst(lc1);
8584 Oid sortop;
8586 bool made_tle = false;
8587
8589 if (!tle)
8590 {
8592 nextresno,
8593 NULL,
8594 false);
8596 nextresno++;
8597 made_tle = true;
8598 }
8599
8600 /*
8601 * Try to build an ORDER BY list to sort the input compatibly. We
8602 * do this for each sortable clause even when the clauses are not
8603 * all sortable, so that we can detect clauses that are redundant
8604 * according to the pathkey machinery.
8605 */
8607 if (OidIsValid(sortop))
8608 {
8609 Oid eqop;
8611
8612 /*
8613 * The Unique node will need equality operators. Normally
8614 * these are the same as the IN clause operators, but if those
8615 * are cross-type operators then the equality operators are
8616 * the ones for the IN clause operators' RHS datatype.
8617 */
8618 eqop = get_equality_op_for_ordering_op(sortop, NULL);
8619 if (!OidIsValid(eqop)) /* shouldn't happen */
8620 elog(ERROR, "could not find equality operator for ordering operator %u",
8621 sortop);
8622
8624 sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8625 sortcl->eqop = eqop;
8626 sortcl->sortop = sortop;
8627 sortcl->reverse_sort = false;
8628 sortcl->nulls_first = false;
8629 sortcl->hashable = false; /* no need to make this accurate */
8631
8632 /*
8633 * At each step, convert the SortGroupClause list to pathkey
8634 * form. If the just-added SortGroupClause is redundant, the
8635 * result will be shorter than the SortGroupClause list.
8636 */
8638 newtlist);
8640 {
8641 /* Drop the redundant SortGroupClause */
8644 /* Undo tlist addition, if we made one */
8645 if (made_tle)
8646 {
8648 nextresno--;
8649 }
8650 /* We need not consider this clause for hashing, either */
8651 continue;
8652 }
8653 }
8654 else if (sjinfo->semi_can_btree) /* shouldn't happen */
8655 elog(ERROR, "could not find ordering operator for equality operator %u",
8656 in_oper);
8657
8658 if (sjinfo->semi_can_hash)
8659 {
8660 /* Create a GROUP BY list for the Agg node to use */
8661 Oid eq_oper;
8663
8664 /*
8665 * Get the hashable equality operators for the Agg node to
8666 * use. Normally these are the same as the IN clause
8667 * operators, but if those are cross-type operators then the
8668 * equality operators are the ones for the IN clause
8669 * operators' RHS datatype.
8670 */
8672 elog(ERROR, "could not find compatible hash operator for operator %u",
8673 in_oper);
8674
8676 groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8677 groupcl->eqop = eq_oper;
8678 groupcl->sortop = sortop;
8679 groupcl->reverse_sort = false;
8680 groupcl->nulls_first = false;
8681 groupcl->hashable = true;
8682 groupClause = lappend(groupClause, groupcl);
8683 }
8684 }
8685
8686 /*
8687 * Done building the sortPathkeys and groupClause. But the
8688 * sortPathkeys are bogus if not all the clauses were sortable.
8689 */
8690 if (!sjinfo->semi_can_btree)
8691 sortPathkeys = NIL;
8692
8693 /*
8694 * It can happen that all the RHS columns are equated to constants.
8695 * We'd have to do something special to unique-ify in that case, and
8696 * it's such an unlikely-in-the-real-world case that it's not worth
8697 * the effort. So just punt if we found no columns to unique-ify.
8698 */
8699 if (sortPathkeys == NIL && groupClause == NIL)
8700 {
8701 MemoryContextSwitchTo(oldcontext);
8702 return NULL;
8703 }
8704
8705 /* Convert the required targetlist back to PathTarget form */
8706 unique_rel->reltarget = create_pathtarget(root, newtlist);
8707 }
8708
8709 /* build unique paths based on input rel's pathlist */
8710 create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8711 sjinfo, unique_rel);
8712
8713 /* build unique paths based on input rel's partial_pathlist */
8715 sjinfo, unique_rel);
8716
8717 /* Now choose the best path(s) */
8718 set_cheapest(unique_rel);
8719
8720 /*
8721 * There shouldn't be any partial paths for the unique relation;
8722 * otherwise, we won't be able to properly guarantee uniqueness.
8723 */
8724 Assert(unique_rel->partial_pathlist == NIL);
8725
8726 /* Cache the result */
8727 rel->unique_rel = unique_rel;
8729 rel->unique_groupclause = groupClause;
8730
8731 MemoryContextSwitchTo(oldcontext);
8732
8733 return unique_rel;
8734}
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition appendinfo.c:592
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:142
#define OidIsValid(objectId)
Definition c.h:788
#define elog(elevel,...)
Definition elog.h:226
List * list_delete_last(List *list)
Definition list.c:957
bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno)
Definition lsyscache.c:475
Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse)
Definition lsyscache.c:324
Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
Definition lsyscache.c:362
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition makefuncs.c:289
MemoryContext GetMemoryChunkContext(void *pointer)
Definition mcxt.c:756
@ JOIN_SEMI
Definition nodes.h:317
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
#define lfirst_oid(lc)
Definition pg_list.h:174
static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8866
unsigned int Oid
List * ppilist
Definition pathnodes.h:1039
List * unique_pathkeys
Definition pathnodes.h:1122
List * cheapest_parameterized_paths
Definition pathnodes.h:1043
List * unique_groupclause
Definition pathnodes.h:1124
struct RelOptInfo * unique_rel
Definition pathnodes.h:1120
JoinType jointype
Definition pathnodes.h:3215
Relids syn_righthand
Definition pathnodes.h:3214
List * semi_operators
Definition pathnodes.h:3225
TargetEntry * tlist_member(Expr *node, List *targetlist)
Definition tlist.c:88
List * make_tlist_from_pathtarget(PathTarget *target)
Definition tlist.c:633
#define create_pathtarget(root, tlist)
Definition tlist.h:58

References adjust_appendrel_attrs_multilevel(), Assert, assignSortGroupRef(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, copy_pathtarget(), create_final_unique_paths(), create_partial_unique_paths(), create_pathtarget, elog, ERROR, fb(), forboth, get_compatible_hash_operators(), get_equality_op_for_ordering_op(), get_ordering_op_for_equality_op(), GetMemoryChunkContext(), IS_OTHER_REL, JOIN_SEMI, SpecialJoinInfo::jointype, lappend(), lfirst, lfirst_oid, list_delete_last(), list_length(), make_pathkeys_for_sortclauses(), make_tlist_from_pathtarget(), makeNode, makeTargetEntry(), MemoryContextSwitchTo(), NIL, OidIsValid, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, RelOptInfo::ppilist, RelOptInfo::relids, RelOptInfo::reltarget, root, SpecialJoinInfo::semi_can_btree, SpecialJoinInfo::semi_can_hash, SpecialJoinInfo::semi_operators, SpecialJoinInfo::semi_rhs_exprs, set_cheapest(), SpecialJoinInfo::syn_righthand, tlist_member(), RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, and RelOptInfo::unique_rel.

Referenced by join_is_legal(), and populate_joinrel_with_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4605 of file planner.c.

4612{
4614 ListCell *lc;
4615
4616 /* For now, do all work in the (WINDOW, NULL) upperrel */
4618
4619 /*
4620 * If the input relation is not parallel-safe, then the window relation
4621 * can't be parallel-safe, either. Otherwise, we need to examine the
4622 * target list and active windows for non-parallel-safe constructs.
4623 */
4624 if (input_rel->consider_parallel && output_target_parallel_safe &&
4625 is_parallel_safe(root, (Node *) activeWindows))
4626 window_rel->consider_parallel = true;
4627
4628 /*
4629 * If the input rel belongs to a single FDW, so does the window rel.
4630 */
4631 window_rel->serverid = input_rel->serverid;
4632 window_rel->userid = input_rel->userid;
4633 window_rel->useridiscurrent = input_rel->useridiscurrent;
4634 window_rel->fdwroutine = input_rel->fdwroutine;
4635
4636 /*
4637 * Consider computing window functions starting from the existing
4638 * cheapest-total path (which will likely require a sort) as well as any
4639 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4640 */
4641 foreach(lc, input_rel->pathlist)
4642 {
4643 Path *path = (Path *) lfirst(lc);
4644 int presorted_keys;
4645
4646 if (path == input_rel->cheapest_total_path ||
4647 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4648 &presorted_keys) ||
4649 presorted_keys > 0)
4651 window_rel,
4652 path,
4655 wflists,
4656 activeWindows);
4657 }
4658
4659 /*
4660 * If there is an FDW that's responsible for all baserels of the query,
4661 * let it consider adding ForeignPaths.
4662 */
4663 if (window_rel->fdwroutine &&
4664 window_rel->fdwroutine->GetForeignUpperPaths)
4665 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4667 NULL);
4668
4669 /* Let extensions possibly add some more paths */
4671 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4673
4674 /* Now choose the best path(s) */
4676
4677 return window_rel;
4678}
@ UPPERREL_WINDOW
Definition pathnodes.h:148
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4692

References create_one_window_path(), create_upper_paths_hook, fb(), fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), root, set_cheapest(), and UPPERREL_WINDOW.

Referenced by grouping_planner().

◆ expression_planner()

Expr * expression_planner ( Expr expr)

Definition at line 6819 of file planner.c.

6820{
6821 Node *result;
6822
6823 /*
6824 * Convert named-argument function calls, insert default arguments and
6825 * simplify constant subexprs
6826 */
6827 result = eval_const_expressions(NULL, (Node *) expr);
6828
6829 /* Fill in opfuncid values if missing */
6830 fix_opfuncids(result);
6831
6832 return (Expr *) result;
6833}
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition clauses.c:2267
void fix_opfuncids(Node *node)
Definition nodeFuncs.c:1840

References eval_const_expressions(), fb(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), createTableConstraints(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr * expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6846 of file planner.c.

6849{
6850 Node *result;
6851 PlannerGlobal glob;
6853
6854 /* Make up dummy planner state so we can use setrefs machinery */
6855 MemSet(&glob, 0, sizeof(glob));
6856 glob.type = T_PlannerGlobal;
6857 glob.relationOids = NIL;
6858 glob.invalItems = NIL;
6859
6860 MemSet(&root, 0, sizeof(root));
6861 root.type = T_PlannerInfo;
6862 root.glob = &glob;
6863
6864 /*
6865 * Convert named-argument function calls, insert default arguments and
6866 * simplify constant subexprs. Collect identities of inlined functions
6867 * and elided domains, too.
6868 */
6869 result = eval_const_expressions(&root, (Node *) expr);
6870
6871 /* Fill in opfuncid values if missing */
6872 fix_opfuncids(result);
6873
6874 /*
6875 * Now walk the finished expression to find anything else we ought to
6876 * record as an expression dependency.
6877 */
6879
6880 *relationOids = glob.relationOids;
6881 *invalItems = glob.invalItems;
6882
6883 return (Expr *) result;
6884}
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition setrefs.c:3742
List * invalItems
Definition pathnodes.h:230
List * relationOids
Definition pathnodes.h:227

References eval_const_expressions(), extract_query_dependencies_walker(), fb(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 3003 of file planner.c.

3004{
3005 int num_sets_raw = list_length(groupingSets);
3006 int num_empty = 0;
3007 int num_sets = 0; /* distinct sets */
3008 int num_chains = 0;
3009 List *result = NIL;
3010 List **results;
3011 List **orig_sets;
3013 int *chains;
3014 short **adjacency;
3015 short *adjacency_buf;
3017 int i;
3018 int j;
3019 int j_size;
3020 ListCell *lc1 = list_head(groupingSets);
3021 ListCell *lc;
3022
3023 /*
3024 * Start by stripping out empty sets. The algorithm doesn't require this,
3025 * but the planner currently needs all empty sets to be returned in the
3026 * first list, so we strip them here and add them back after.
3027 */
3028 while (lc1 && lfirst(lc1) == NIL)
3029 {
3030 ++num_empty;
3031 lc1 = lnext(groupingSets, lc1);
3032 }
3033
3034 /* bail out now if it turns out that all we had were empty sets. */
3035 if (!lc1)
3036 return list_make1(groupingSets);
3037
3038 /*----------
3039 * We don't strictly need to remove duplicate sets here, but if we don't,
3040 * they tend to become scattered through the result, which is a bit
3041 * confusing (and irritating if we ever decide to optimize them out).
3042 * So we remove them here and add them back after.
3043 *
3044 * For each non-duplicate set, we fill in the following:
3045 *
3046 * orig_sets[i] = list of the original set lists
3047 * set_masks[i] = bitmapset for testing inclusion
3048 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3049 *
3050 * chains[i] will be the result group this set is assigned to.
3051 *
3052 * We index all of these from 1 rather than 0 because it is convenient
3053 * to leave 0 free for the NIL node in the graph algorithm.
3054 *----------
3055 */
3056 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3057 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3058 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3059 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3060
3061 j_size = 0;
3062 j = 0;
3063 i = 1;
3064
3065 for_each_cell(lc, groupingSets, lc1)
3066 {
3067 List *candidate = (List *) lfirst(lc);
3069 ListCell *lc2;
3070 int dup_of = 0;
3071
3072 foreach(lc2, candidate)
3073 {
3075 }
3076
3077 /* we can only be a dup if we're the same length as a previous set */
3079 {
3080 int k;
3081
3082 for (k = j; k < i; ++k)
3083 {
3085 {
3086 dup_of = k;
3087 break;
3088 }
3089 }
3090 }
3091 else if (j_size < list_length(candidate))
3092 {
3094 j = i;
3095 }
3096
3097 if (dup_of > 0)
3098 {
3101 }
3102 else
3103 {
3104 int k;
3105 int n_adj = 0;
3106
3109
3110 /* fill in adjacency list; no need to compare equal-size sets */
3111
3112 for (k = j - 1; k > 0; --k)
3113 {
3115 adjacency_buf[++n_adj] = k;
3116 }
3117
3118 if (n_adj > 0)
3119 {
3120 adjacency_buf[0] = n_adj;
3121 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3122 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3123 }
3124 else
3125 adjacency[i] = NULL;
3126
3127 ++i;
3128 }
3129 }
3130
3131 num_sets = i - 1;
3132
3133 /*
3134 * Apply the graph matching algorithm to do the work.
3135 */
3136 state = BipartiteMatch(num_sets, num_sets, adjacency);
3137
3138 /*
3139 * Now, the state->pair* fields have the info we need to assign sets to
3140 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3141 * pair_vu[v] = u (both will be true, but we check both so that we can do
3142 * it in one pass)
3143 */
3144 chains = palloc0((num_sets + 1) * sizeof(int));
3145
3146 for (i = 1; i <= num_sets; ++i)
3147 {
3148 int u = state->pair_vu[i];
3149 int v = state->pair_uv[i];
3150
3151 if (u > 0 && u < i)
3152 chains[i] = chains[u];
3153 else if (v > 0 && v < i)
3154 chains[i] = chains[v];
3155 else
3156 chains[i] = ++num_chains;
3157 }
3158
3159 /* build result lists. */
3160 results = palloc0((num_chains + 1) * sizeof(List *));
3161
3162 for (i = 1; i <= num_sets; ++i)
3163 {
3164 int c = chains[i];
3165
3166 Assert(c > 0);
3167
3168 results[c] = list_concat(results[c], orig_sets[i]);
3169 }
3170
3171 /* push any empty sets back on the first list. */
3172 while (num_empty-- > 0)
3173 results[1] = lcons(NIL, results[1]);
3174
3175 /* make result list */
3176 for (i = 1; i <= num_chains; ++i)
3177 result = lappend(result, results[i]);
3178
3179 /*
3180 * Free all the things.
3181 *
3182 * (This is over-fussy for small sets but for large sets we could have
3183 * tied up a nontrivial amount of memory.)
3184 */
3186 pfree(results);
3187 pfree(chains);
3188 for (i = 1; i <= num_sets; ++i)
3189 if (adjacency[i])
3190 pfree(adjacency[i]);
3191 pfree(adjacency);
3194 for (i = 1; i <= num_sets; ++i)
3197
3198 return result;
3199}
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
int j
Definition isn.c:78
void * palloc0(Size size)
Definition mcxt.c:1417
char * c

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), fb(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7826 of file planner.c.

7827{
7828 ListCell *lc;
7831
7832 /*
7833 * This occurs after any partial aggregation has taken place, so trim off
7834 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7835 */
7836 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7837 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7838 root->num_groupby_pathkeys);
7839 else
7840 groupby_pathkeys = root->group_pathkeys;
7841
7842 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7844
7846
7847 /* XXX Shouldn't this also consider the group-key-reordering? */
7848 foreach(lc, rel->partial_pathlist)
7849 {
7850 Path *path = (Path *) lfirst(lc);
7851 bool is_sorted;
7852 int presorted_keys;
7853 double total_groups;
7854
7856 path->pathkeys,
7857 &presorted_keys);
7858
7859 if (is_sorted)
7860 continue;
7861
7862 /*
7863 * Try at least sorting the cheapest path and also try incrementally
7864 * sorting any path which is partially sorted already (no need to deal
7865 * with paths which have presorted keys when incremental sort is
7866 * disabled unless it's the cheapest input path).
7867 */
7868 if (path != cheapest_partial_path &&
7869 (presorted_keys == 0 || !enable_incremental_sort))
7870 continue;
7871
7872 /*
7873 * We've no need to consider both a sort and incremental sort. We'll
7874 * just do a sort if there are no presorted keys and an incremental
7875 * sort when there are presorted keys.
7876 */
7877 if (presorted_keys == 0 || !enable_incremental_sort)
7878 path = (Path *) create_sort_path(root, rel, path,
7880 -1.0);
7881 else
7883 rel,
7884 path,
7886 presorted_keys,
7887 -1.0);
7889 path = (Path *)
7891 rel,
7892 path,
7893 rel->reltarget,
7895 NULL,
7896 &total_groups);
7897
7898 add_path(rel, path);
7899 }
7900}
List * list_copy_head(const List *oldlist, int len)
Definition list.c:1593

References add_path(), compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fb(), generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, and root.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8416 of file planner.c.

8417{
8418 List *grouplist = copyObject(op->groupClauses);
8419 ListCell *lg;
8420 ListCell *lt;
8421 ListCell *ct;
8422
8424 ct = list_head(op->colTypes);
8425 foreach(lt, targetlist)
8426 {
8427 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8429 Oid coltype;
8430
8431 /* resjunk columns could have sortgrouprefs. Leave these alone */
8432 if (tle->resjunk)
8433 continue;
8434
8435 /*
8436 * We expect every non-resjunk target to have a SortGroupClause and
8437 * colTypes.
8438 */
8439 Assert(lg != NULL);
8440 Assert(ct != NULL);
8442 coltype = lfirst_oid(ct);
8443
8444 /* reject if target type isn't the same as the setop target type */
8445 if (coltype != exprType((Node *) tle->expr))
8446 return NIL;
8447
8448 lg = lnext(grouplist, lg);
8449 ct = lnext(op->colTypes, ct);
8450
8451 /* assign a tleSortGroupRef, or reuse the existing one */
8452 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8453 }
8454
8455 Assert(lg == NULL);
8456 Assert(ct == NULL);
8457
8458 return grouplist;
8459}
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42

References Assert, assignSortGroupRef(), copyObject, exprType(), fb(), lfirst, lfirst_oid, list_head(), lnext(), and NIL.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path * get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6657 of file planner.c.

6658{
6660 ListCell *l;
6661
6662 /* If all tuples will be retrieved, just return the cheapest-total path */
6663 if (tuple_fraction <= 0.0)
6664 return best_path;
6665
6666 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6667 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6668 tuple_fraction /= best_path->rows;
6669
6670 foreach(l, rel->pathlist)
6671 {
6672 Path *path = (Path *) lfirst(l);
6673
6674 if (path->param_info)
6675 continue;
6676
6677 if (path == rel->cheapest_total_path ||
6678 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6679 continue;
6680
6681 best_path = path;
6682 }
6683
6684 return best_path;
6685}
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition pathnode.c:123

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), fb(), lfirst, and RelOptInfo::pathlist.

Referenced by add_paths_to_append_rel(), make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3737 of file planner.c.

3741{
3742 Query *parse = root->parse;
3743 double dNumGroups;
3744
3745 if (parse->groupClause)
3746 {
3748
3749 if (parse->groupingSets)
3750 {
3751 /* Add up the estimates for each grouping set */
3752 ListCell *lc;
3753
3754 Assert(gd); /* keep Coverity happy */
3755
3756 dNumGroups = 0;
3757
3758 foreach(lc, gd->rollups)
3759 {
3761 ListCell *lc2;
3762 ListCell *lc3;
3763
3765 target_list);
3766
3767 rollup->numGroups = 0.0;
3768
3769 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3770 {
3771 List *gset = (List *) lfirst(lc2);
3773 double numGroups = estimate_num_groups(root,
3774 groupExprs,
3775 path_rows,
3776 &gset,
3777 NULL);
3778
3779 gs->numGroups = numGroups;
3780 rollup->numGroups += numGroups;
3781 }
3782
3783 dNumGroups += rollup->numGroups;
3784 }
3785
3786 if (gd->hash_sets_idx)
3787 {
3788 ListCell *lc2;
3789
3790 gd->dNumHashGroups = 0;
3791
3793 target_list);
3794
3795 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3796 {
3797 List *gset = (List *) lfirst(lc);
3799 double numGroups = estimate_num_groups(root,
3800 groupExprs,
3801 path_rows,
3802 &gset,
3803 NULL);
3804
3805 gs->numGroups = numGroups;
3806 gd->dNumHashGroups += numGroups;
3807 }
3808
3809 dNumGroups += gd->dNumHashGroups;
3810 }
3811 }
3812 else
3813 {
3814 /* Plain GROUP BY -- estimate based on optimized groupClause */
3815 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3816 target_list);
3817
3819 NULL, NULL);
3820 }
3821 }
3822 else if (parse->groupingSets)
3823 {
3824 /* Empty grouping sets ... one result row for each one */
3825 dNumGroups = list_length(parse->groupingSets);
3826 }
3827 else if (parse->hasAggs || root->hasHavingQual)
3828 {
3829 /* Plain aggregation, one result row */
3830 dNumGroups = 1;
3831 }
3832 else
3833 {
3834 /* Not grouping */
3835 dNumGroups = 1;
3836 }
3837
3838 return dNumGroups;
3839}

References Assert, estimate_num_groups(), fb(), forboth, get_sortgrouplist_exprs(), lfirst, lfirst_node, list_length(), parse(), and root.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ get_useful_pathkeys_for_distinct()

static List * get_useful_pathkeys_for_distinct ( PlannerInfo root,
List needed_pathkeys,
List path_pathkeys 
)
static

Definition at line 5295 of file planner.c.

5297{
5300
5301 /* always include the given 'needed_pathkeys' */
5304
5306 return useful_pathkeys_list;
5307
5308 /*
5309 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5310 * that match 'needed_pathkeys', but only up to the longest matching
5311 * prefix.
5312 *
5313 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5314 * list matches initial distinctClause pathkeys; otherwise, it won't have
5315 * the desired behavior.
5316 */
5318 {
5319 /*
5320 * The PathKey nodes are canonical, so they can be checked for
5321 * equality by simple pointer comparison.
5322 */
5324 break;
5325 if (root->parse->hasDistinctOn &&
5326 !list_member_ptr(root->distinct_pathkeys, pathkey))
5327 break;
5328
5330 }
5331
5332 /* If no match at all, no point in reordering needed_pathkeys */
5333 if (useful_pathkeys == NIL)
5334 return useful_pathkeys_list;
5335
5336 /*
5337 * If not full match, the resulting pathkey list is not useful without
5338 * incremental sort.
5339 */
5342 return useful_pathkeys_list;
5343
5344 /* Append the remaining PathKey nodes in needed_pathkeys */
5347
5348 /*
5349 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5350 * just drop it.
5351 */
5354 return useful_pathkeys_list;
5355
5358
5359 return useful_pathkeys_list;
5360}
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition list.c:1427
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
bool enable_distinct_reordering
Definition planner.c:71

References compare_pathkeys(), enable_distinct_reordering, enable_incremental_sort, fb(), foreach_node, lappend(), list_concat_unique_ptr(), list_length(), list_member_ptr(), NIL, PATHKEYS_EQUAL, and root.

Referenced by create_final_distinct_paths(), and create_partial_distinct_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8329 of file planner.c.

8332{
8333 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8334 int cnt = 0;
8335 int partnatts;
8336
8337 /* Input relation should be partitioned. */
8338 Assert(input_rel->part_scheme);
8339
8340 /* Rule out early, if there are no partition keys present. */
8341 if (!input_rel->partexprs)
8342 return false;
8343
8344 partnatts = input_rel->part_scheme->partnatts;
8345
8346 for (cnt = 0; cnt < partnatts; cnt++)
8347 {
8348 List *partexprs = input_rel->partexprs[cnt];
8349 ListCell *lc;
8350 bool found = false;
8351
8352 foreach(lc, partexprs)
8353 {
8354 ListCell *lg;
8355 Expr *partexpr = lfirst(lc);
8356 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8357
8358 foreach(lg, groupexprs)
8359 {
8360 Expr *groupexpr = lfirst(lg);
8362
8363 /*
8364 * Note: we can assume there is at most one RelabelType node;
8365 * eval_const_expressions() will have simplified if more than
8366 * one.
8367 */
8369 groupexpr = ((RelabelType *) groupexpr)->arg;
8370
8371 if (equal(groupexpr, partexpr))
8372 {
8373 /*
8374 * Reject a match if the grouping collation does not match
8375 * the partitioning collation.
8376 */
8379 return false;
8380
8381 found = true;
8382 break;
8383 }
8384 }
8385
8386 if (found)
8387 break;
8388 }
8389
8390 /*
8391 * If none of the partition key expressions match with any of the
8392 * GROUP BY expression, return false.
8393 */
8394 if (!found)
8395 return false;
8396 }
8397
8398 return true;
8399}
Oid exprCollation(const Node *expr)
Definition nodeFuncs.c:821

References Assert, equal(), exprCollation(), fb(), get_sortgrouplist_exprs(), IsA, lfirst, and OidIsValid.

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1514 of file planner.c.

1516{
1517 Query *parse = root->parse;
1518 int64 offset_est = 0;
1519 int64 count_est = 0;
1520 double limit_tuples = -1.0;
1521 bool have_postponed_srfs = false;
1528 FinalPathExtraData extra;
1529 ListCell *lc;
1530
1531 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1532 if (parse->limitCount || parse->limitOffset)
1533 {
1534 tuple_fraction = preprocess_limit(root, tuple_fraction,
1535 &offset_est, &count_est);
1536
1537 /*
1538 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1539 * estimate the effects of using a bounded sort.
1540 */
1541 if (count_est > 0 && offset_est >= 0)
1542 limit_tuples = (double) count_est + (double) offset_est;
1543 }
1544
1545 /* Make tuple_fraction accessible to lower-level routines */
1546 root->tuple_fraction = tuple_fraction;
1547
1548 if (parse->setOperations)
1549 {
1550 /*
1551 * Construct Paths for set operations. The results will not need any
1552 * work except perhaps a top-level sort and/or LIMIT. Note that any
1553 * special work for recursive unions is the responsibility of
1554 * plan_set_operations.
1555 */
1557
1558 /*
1559 * We should not need to call preprocess_targetlist, since we must be
1560 * in a SELECT query node. Instead, use the processed_tlist returned
1561 * by plan_set_operations (since this tells whether it returned any
1562 * resjunk columns!), and transfer any sort key information from the
1563 * original tlist.
1564 */
1565 Assert(parse->commandType == CMD_SELECT);
1566
1567 /* for safety, copy processed_tlist instead of modifying in-place */
1568 root->processed_tlist =
1569 postprocess_setop_tlist(copyObject(root->processed_tlist),
1570 parse->targetList);
1571
1572 /* Also extract the PathTarget form of the setop result tlist */
1573 final_target = current_rel->cheapest_total_path->pathtarget;
1574
1575 /* And check whether it's parallel safe */
1578
1579 /* The setop result tlist couldn't contain any SRFs */
1580 Assert(!parse->hasTargetSRFs);
1582
1583 /*
1584 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1585 * checked already, but let's make sure).
1586 */
1587 if (parse->rowMarks)
1588 ereport(ERROR,
1590 /*------
1591 translator: %s is a SQL row locking clause such as FOR UPDATE */
1592 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1594 parse->rowMarks)->strength))));
1595
1596 /*
1597 * Calculate pathkeys that represent result ordering requirements
1598 */
1599 Assert(parse->distinctClause == NIL);
1600 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1601 parse->sortClause,
1602 root->processed_tlist);
1603 }
1604 else
1605 {
1606 /* No set operations, do regular planning */
1620 bool have_grouping;
1622 List *activeWindows = NIL;
1623 grouping_sets_data *gset_data = NULL;
1625
1626 /* A recursive query should always have setOperations */
1627 Assert(!root->hasRecursion);
1628
1629 /* Preprocess grouping sets and GROUP BY clause, if any */
1630 if (parse->groupingSets)
1631 {
1632 gset_data = preprocess_grouping_sets(root);
1633 }
1634 else if (parse->groupClause)
1635 {
1636 /* Preprocess regular GROUP BY clause, if any */
1637 root->processed_groupClause = preprocess_groupclause(root, NIL);
1638 }
1639
1640 /*
1641 * Preprocess targetlist. Note that much of the remaining planning
1642 * work will be done with the PathTarget representation of tlists, but
1643 * we must also maintain the full representation of the final tlist so
1644 * that we can transfer its decoration (resnames etc) to the topmost
1645 * tlist of the finished Plan. This is kept in processed_tlist.
1646 */
1648
1649 /*
1650 * Mark all the aggregates with resolved aggtranstypes, and detect
1651 * aggregates that are duplicates or can share transition state. We
1652 * must do this before slicing and dicing the tlist into various
1653 * pathtargets, else some copies of the Aggref nodes might escape
1654 * being marked.
1655 */
1656 if (parse->hasAggs)
1657 {
1658 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1659 preprocess_aggrefs(root, (Node *) parse->havingQual);
1660 }
1661
1662 /*
1663 * Locate any window functions in the tlist. (We don't need to look
1664 * anywhere else, since expressions used in ORDER BY will be in there
1665 * too.) Note that they could all have been eliminated by constant
1666 * folding, in which case we don't need to do any more work.
1667 */
1668 if (parse->hasWindowFuncs)
1669 {
1670 wflists = find_window_functions((Node *) root->processed_tlist,
1671 list_length(parse->windowClause));
1672 if (wflists->numWindowFuncs > 0)
1673 {
1674 /*
1675 * See if any modifications can be made to each WindowClause
1676 * to allow the executor to execute the WindowFuncs more
1677 * quickly.
1678 */
1680
1681 /* Extract the list of windows actually in use. */
1682 activeWindows = select_active_windows(root, wflists);
1683
1684 /* Make sure they all have names, for EXPLAIN's use. */
1685 name_active_windows(activeWindows);
1686 }
1687 else
1688 parse->hasWindowFuncs = false;
1689 }
1690
1691 /*
1692 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1693 * adding logic between here and the query_planner() call. Anything
1694 * that is needed in MIN/MAX-optimizable cases will have to be
1695 * duplicated in planagg.c.
1696 */
1697 if (parse->hasAggs)
1699
1700 /*
1701 * Figure out whether there's a hard limit on the number of rows that
1702 * query_planner's result subplan needs to return. Even if we know a
1703 * hard limit overall, it doesn't apply if the query has any
1704 * grouping/aggregation operations, or SRFs in the tlist.
1705 */
1706 if (parse->groupClause ||
1707 parse->groupingSets ||
1708 parse->distinctClause ||
1709 parse->hasAggs ||
1710 parse->hasWindowFuncs ||
1711 parse->hasTargetSRFs ||
1712 root->hasHavingQual)
1713 root->limit_tuples = -1.0;
1714 else
1715 root->limit_tuples = limit_tuples;
1716
1717 /* Set up data needed by standard_qp_callback */
1718 qp_extra.activeWindows = activeWindows;
1719 qp_extra.gset_data = gset_data;
1720
1721 /*
1722 * If we're a subquery for a set operation, store the SetOperationStmt
1723 * in qp_extra.
1724 */
1725 qp_extra.setop = setops;
1726
1727 /*
1728 * Generate the best unsorted and presorted paths for the scan/join
1729 * portion of this Query, ie the processing represented by the
1730 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1731 * We also generate (in standard_qp_callback) pathkey representations
1732 * of the query's sort clause, distinct clause, etc.
1733 */
1735
1736 /*
1737 * Convert the query's result tlist into PathTarget format.
1738 *
1739 * Note: this cannot be done before query_planner() has performed
1740 * appendrel expansion, because that might add resjunk entries to
1741 * root->processed_tlist. Waiting till afterwards is also helpful
1742 * because the target width estimates can use per-Var width numbers
1743 * that were obtained within query_planner().
1744 */
1745 final_target = create_pathtarget(root, root->processed_tlist);
1748
1749 /*
1750 * If ORDER BY was given, consider whether we should use a post-sort
1751 * projection, and compute the adjusted target for preceding steps if
1752 * so.
1753 */
1754 if (parse->sortClause)
1755 {
1761 }
1762 else
1763 {
1766 }
1767
1768 /*
1769 * If we have window functions to deal with, the output from any
1770 * grouping step needs to be what the window functions want;
1771 * otherwise, it should be sort_input_target.
1772 */
1773 if (activeWindows)
1774 {
1777 activeWindows);
1780 }
1781 else
1782 {
1785 }
1786
1787 /*
1788 * If we have grouping or aggregation to do, the topmost scan/join
1789 * plan node must emit what the grouping step wants; otherwise, it
1790 * should emit grouping_target.
1791 */
1792 have_grouping = (parse->groupClause || parse->groupingSets ||
1793 parse->hasAggs || root->hasHavingQual);
1794 if (have_grouping)
1795 {
1799 }
1800 else
1801 {
1804 }
1805
1806 /*
1807 * If there are any SRFs in the targetlist, we must separate each of
1808 * these PathTargets into SRF-computing and SRF-free targets. Replace
1809 * each of the named targets with a SRF-free version, and remember the
1810 * list of additional projection steps we need to add afterwards.
1811 */
1812 if (parse->hasTargetSRFs)
1813 {
1814 /* final_target doesn't recompute any SRFs in sort_input_target */
1820 /* likewise for sort_input_target vs. grouping_target */
1826 /* likewise for grouping_target vs. scanjoin_target */
1833 /* scanjoin_target will not have any SRFs precomputed for it */
1839 }
1840 else
1841 {
1842 /* initialize lists; for most of these, dummy values are OK */
1848 }
1849
1850 /* Apply scan/join target. */
1852 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1857
1858 /*
1859 * Save the various upper-rel PathTargets we just computed into
1860 * root->upper_targets[]. The core code doesn't use this, but it
1861 * provides a convenient place for extensions to get at the info. For
1862 * consistency, we save all the intermediate targets, even though some
1863 * of the corresponding upperrels might not be needed for this query.
1864 */
1865 root->upper_targets[UPPERREL_FINAL] = final_target;
1866 root->upper_targets[UPPERREL_ORDERED] = final_target;
1867 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1869 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1870 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1871
1872 /*
1873 * If we have grouping and/or aggregation, consider ways to implement
1874 * that. We build a new upperrel representing the output of this
1875 * phase.
1876 */
1877 if (have_grouping)
1878 {
1883 gset_data);
1884 /* Fix things up if grouping_target contains SRFs */
1885 if (parse->hasTargetSRFs)
1889 }
1890
1891 /*
1892 * If we have window functions, consider ways to implement those. We
1893 * build a new upperrel representing the output of this phase.
1894 */
1895 if (activeWindows)
1896 {
1902 wflists,
1903 activeWindows);
1904 /* Fix things up if sort_input_target contains SRFs */
1905 if (parse->hasTargetSRFs)
1909 }
1910
1911 /*
1912 * If there is a DISTINCT clause, consider ways to implement that. We
1913 * build a new upperrel representing the output of this phase.
1914 */
1915 if (parse->distinctClause)
1916 {
1920 }
1921 } /* end of if (setOperations) */
1922
1923 /*
1924 * If ORDER BY was given, consider ways to implement that, and generate a
1925 * new upperrel containing only paths that emit the correct ordering and
1926 * project the correct final_target. We can apply the original
1927 * limit_tuples limit in sort costing here, but only if there are no
1928 * postponed SRFs.
1929 */
1930 if (parse->sortClause)
1931 {
1936 have_postponed_srfs ? -1.0 :
1937 limit_tuples);
1938 /* Fix things up if final_target contains SRFs */
1939 if (parse->hasTargetSRFs)
1943 }
1944
1945 /*
1946 * Now we are prepared to build the final-output upperrel.
1947 */
1949
1950 /*
1951 * If the input rel is marked consider_parallel and there's nothing that's
1952 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1953 * consider_parallel as well. Note that if the query has rowMarks or is
1954 * not a SELECT, consider_parallel will be false for every relation in the
1955 * query.
1956 */
1957 if (current_rel->consider_parallel &&
1958 is_parallel_safe(root, parse->limitOffset) &&
1959 is_parallel_safe(root, parse->limitCount))
1960 final_rel->consider_parallel = true;
1961
1962 /*
1963 * If the current_rel belongs to a single FDW, so does the final_rel.
1964 */
1965 final_rel->serverid = current_rel->serverid;
1966 final_rel->userid = current_rel->userid;
1967 final_rel->useridiscurrent = current_rel->useridiscurrent;
1968 final_rel->fdwroutine = current_rel->fdwroutine;
1969
1970 /*
1971 * Generate paths for the final_rel. Insert all surviving paths, with
1972 * LockRows, Limit, and/or ModifyTable steps added if needed.
1973 */
1974 foreach(lc, current_rel->pathlist)
1975 {
1976 Path *path = (Path *) lfirst(lc);
1977
1978 /*
1979 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1980 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1981 * here. If there are only non-locking rowmarks, they should be
1982 * handled by the ModifyTable node instead. However, root->rowMarks
1983 * is what goes into the LockRows node.)
1984 */
1985 if (parse->rowMarks)
1986 {
1987 path = (Path *) create_lockrows_path(root, final_rel, path,
1988 root->rowMarks,
1990 }
1991
1992 /*
1993 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1994 */
1995 if (limit_needed(parse))
1996 {
1997 path = (Path *) create_limit_path(root, final_rel, path,
1998 parse->limitOffset,
1999 parse->limitCount,
2000 parse->limitOption,
2001 offset_est, count_est);
2002 }
2003
2004 /*
2005 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2006 */
2007 if (parse->commandType != CMD_SELECT)
2008 {
2009 Index rootRelation;
2010 List *resultRelations = NIL;
2011 List *updateColnosLists = NIL;
2012 List *withCheckOptionLists = NIL;
2013 List *returningLists = NIL;
2014 List *mergeActionLists = NIL;
2015 List *mergeJoinConditions = NIL;
2016 List *rowMarks;
2017
2018 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2019 {
2020 /* Inherited UPDATE/DELETE/MERGE */
2022 parse->resultRelation);
2023 int resultRelation = -1;
2024
2025 /* Pass the root result rel forward to the executor. */
2026 rootRelation = parse->resultRelation;
2027
2028 /* Add only leaf children to ModifyTable. */
2029 while ((resultRelation = bms_next_member(root->leaf_result_relids,
2030 resultRelation)) >= 0)
2031 {
2033 resultRelation);
2034
2035 /*
2036 * Also exclude any leaf rels that have turned dummy since
2037 * being added to the list, for example, by being excluded
2038 * by constraint exclusion.
2039 */
2041 continue;
2042
2043 /* Build per-target-rel lists needed by ModifyTable */
2044 resultRelations = lappend_int(resultRelations,
2045 resultRelation);
2046 if (parse->commandType == CMD_UPDATE)
2047 {
2048 List *update_colnos = root->update_colnos;
2049
2051 update_colnos =
2053 update_colnos,
2054 this_result_rel->relid,
2055 top_result_rel->relid);
2056 updateColnosLists = lappend(updateColnosLists,
2057 update_colnos);
2058 }
2059 if (parse->withCheckOptions)
2060 {
2061 List *withCheckOptions = parse->withCheckOptions;
2062
2069 withCheckOptionLists = lappend(withCheckOptionLists,
2071 }
2072 if (parse->returningList)
2073 {
2074 List *returningList = parse->returningList;
2075
2077 returningList = (List *)
2079 (Node *) returningList,
2082 returningLists = lappend(returningLists,
2083 returningList);
2084 }
2085 if (parse->mergeActionList)
2086 {
2087 ListCell *l;
2088 List *mergeActionList = NIL;
2089
2090 /*
2091 * Copy MergeActions and translate stuff that
2092 * references attribute numbers.
2093 */
2094 foreach(l, parse->mergeActionList)
2095 {
2097 *leaf_action = copyObject(action);
2098
2099 leaf_action->qual =
2101 (Node *) action->qual,
2104 leaf_action->targetList = (List *)
2106 (Node *) action->targetList,
2109 if (leaf_action->commandType == CMD_UPDATE)
2110 leaf_action->updateColnos =
2112 action->updateColnos,
2113 this_result_rel->relid,
2114 top_result_rel->relid);
2115 mergeActionList = lappend(mergeActionList,
2116 leaf_action);
2117 }
2118
2119 mergeActionLists = lappend(mergeActionLists,
2120 mergeActionList);
2121 }
2122 if (parse->commandType == CMD_MERGE)
2123 {
2124 Node *mergeJoinCondition = parse->mergeJoinCondition;
2125
2127 mergeJoinCondition =
2129 mergeJoinCondition,
2132 mergeJoinConditions = lappend(mergeJoinConditions,
2133 mergeJoinCondition);
2134 }
2135 }
2136
2137 if (resultRelations == NIL)
2138 {
2139 /*
2140 * We managed to exclude every child rel, so generate a
2141 * dummy one-relation plan using info for the top target
2142 * rel (even though that may not be a leaf target).
2143 * Although it's clear that no data will be updated or
2144 * deleted, we still need to have a ModifyTable node so
2145 * that any statement triggers will be executed. (This
2146 * could be cleaner if we fixed nodeModifyTable.c to allow
2147 * zero target relations, but that probably wouldn't be a
2148 * net win.)
2149 */
2150 resultRelations = list_make1_int(parse->resultRelation);
2151 if (parse->commandType == CMD_UPDATE)
2152 updateColnosLists = list_make1(root->update_colnos);
2153 if (parse->withCheckOptions)
2154 withCheckOptionLists = list_make1(parse->withCheckOptions);
2155 if (parse->returningList)
2156 returningLists = list_make1(parse->returningList);
2157 if (parse->mergeActionList)
2158 mergeActionLists = list_make1(parse->mergeActionList);
2159 if (parse->commandType == CMD_MERGE)
2160 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2161 }
2162 }
2163 else
2164 {
2165 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2166 rootRelation = 0; /* there's no separate root rel */
2167 resultRelations = list_make1_int(parse->resultRelation);
2168 if (parse->commandType == CMD_UPDATE)
2169 updateColnosLists = list_make1(root->update_colnos);
2170 if (parse->withCheckOptions)
2171 withCheckOptionLists = list_make1(parse->withCheckOptions);
2172 if (parse->returningList)
2173 returningLists = list_make1(parse->returningList);
2174 if (parse->mergeActionList)
2175 mergeActionLists = list_make1(parse->mergeActionList);
2176 if (parse->commandType == CMD_MERGE)
2177 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2178 }
2179
2180 /*
2181 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2182 * will have dealt with fetching non-locked marked rows, else we
2183 * need to have ModifyTable do that.
2184 */
2185 if (parse->rowMarks)
2186 rowMarks = NIL;
2187 else
2188 rowMarks = root->rowMarks;
2189
2190 path = (Path *)
2192 path,
2193 parse->commandType,
2194 parse->canSetTag,
2195 parse->resultRelation,
2196 rootRelation,
2197 resultRelations,
2198 updateColnosLists,
2199 withCheckOptionLists,
2200 returningLists,
2201 rowMarks,
2202 parse->onConflict,
2203 mergeActionLists,
2204 mergeJoinConditions,
2206 }
2207
2208 /* And shove it into final_rel */
2209 add_path(final_rel, path);
2210 }
2211
2212 /*
2213 * Generate partial paths for final_rel, too, if outer query levels might
2214 * be able to make use of them.
2215 */
2216 if (final_rel->consider_parallel && root->query_level > 1 &&
2217 !limit_needed(parse))
2218 {
2219 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2220 foreach(lc, current_rel->partial_pathlist)
2221 {
2222 Path *partial_path = (Path *) lfirst(lc);
2223
2225 }
2226 }
2227
2228 extra.limit_needed = limit_needed(parse);
2229 extra.limit_tuples = limit_tuples;
2230 extra.count_est = count_est;
2231 extra.offset_est = offset_est;
2232
2233 /*
2234 * If there is an FDW that's responsible for all baserels of the query,
2235 * let it consider adding ForeignPaths.
2236 */
2237 if (final_rel->fdwroutine &&
2238 final_rel->fdwroutine->GetForeignUpperPaths)
2239 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2241 &extra);
2242
2243 /* Let extensions possibly add some more paths */
2245 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2246 current_rel, final_rel, &extra);
2247
2248 /* Note: currently, we leave it to callers to do set_cheapest() */
2249}
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition appendinfo.c:733
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:780
@ BMS_MULTIPLE
Definition bitmapset.h:73
unsigned int Index
Definition c.h:628
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition clauses.c:240
List * lappend_int(List *list, int datum)
Definition list.c:357
@ CMD_MERGE
Definition nodes.h:279
@ CMD_UPDATE
Definition nodes.h:276
@ CMD_SELECT
Definition nodes.h:275
int assign_special_exec_param(PlannerInfo *root)
const char * LCS_asString(LockClauseStrength strength)
Definition analyze.c:3324
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition pathnode.c:3639
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition pathnode.c:3577
@ UPPERREL_FINAL
Definition pathnodes.h:152
#define list_make1_int(x1)
Definition pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition planagg.c:74
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition planner.c:5853
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition planner.c:2656
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition planner.c:6233
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition planner.c:4862
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:5890
static void name_active_windows(List *activeWindows)
Definition planner.c:6113
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition planner.c:6481
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition planner.c:2261
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition planner.c:5603
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:6030
bool limit_needed(Query *parse)
Definition planner.c:2841
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition planner.c:5380
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition planner.c:3859
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition planner.c:3532
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4605
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition preptlist.c:64
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition prepunion.c:97
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:533
Cardinality limit_tuples
Definition pathnodes.h:3678
void split_pathtarget_at_srfs_grouping(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:868
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:845

References add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), CMD_MERGE, CMD_SELECT, CMD_UPDATE, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, fb(), fetch_upper_rel(), find_base_rel(), find_window_functions(), IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), name_active_windows(), NIL, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), root, select_active_windows(), split_pathtarget_at_srfs(), split_pathtarget_at_srfs_grouping(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, and UPPERREL_WINDOW.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3263 of file planner.c.

3264{
3265 ListCell *lc;
3266
3267 foreach(lc, keys)
3268 {
3270
3271 if (pathkey->pk_eclass->ec_has_volatile)
3272 return true;
3273 }
3274
3275 return false;
3276}

References fb(), and lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 4028 of file planner.c.

4029{
4030 Query *parse = root->parse;
4031
4032 return (root->hasHavingQual || parse->groupingSets) &&
4033 !parse->hasAggs && parse->groupClause == NIL;
4034}

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2841 of file planner.c.

2842{
2843 Node *node;
2844
2845 node = parse->limitCount;
2846 if (node)
2847 {
2848 if (IsA(node, Const))
2849 {
2850 /* NULL indicates LIMIT ALL, ie, no limit */
2851 if (!((Const *) node)->constisnull)
2852 return true; /* LIMIT with a constant value */
2853 }
2854 else
2855 return true; /* non-constant LIMIT */
2856 }
2857
2858 node = parse->limitOffset;
2859 if (node)
2860 {
2861 if (IsA(node, Const))
2862 {
2863 /* Treat NULL as no offset; the executor would too */
2864 if (!((Const *) node)->constisnull)
2865 {
2866 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2867
2868 if (offset != 0)
2869 return true; /* OFFSET with a nonzero value */
2870 }
2871 }
2872 else
2873 return true; /* non-constant OFFSET */
2874 }
2875
2876 return false; /* don't need a Limit plan node */
2877}
static int64 DatumGetInt64(Datum X)
Definition postgres.h:413

References DatumGetInt64(), fb(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5603 of file planner.c.

5604{
5605 Query *parse = root->parse;
5609 int i;
5610 ListCell *lc;
5611
5612 /*
5613 * We must build a target containing all grouping columns, plus any other
5614 * Vars mentioned in the query's targetlist and HAVING qual.
5615 */
5618
5619 i = 0;
5620 foreach(lc, final_target->exprs)
5621 {
5622 Expr *expr = (Expr *) lfirst(lc);
5624
5625 if (sgref && root->processed_groupClause &&
5627 root->processed_groupClause) != NULL)
5628 {
5629 /*
5630 * It's a grouping column, so add it to the input target as-is.
5631 *
5632 * Note that the target is logically below the grouping step. So
5633 * with grouping sets we need to remove the RT index of the
5634 * grouping step if there is any from the target expression.
5635 */
5636 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5637 {
5638 Assert(root->group_rtindex > 0);
5639 expr = (Expr *)
5640 remove_nulling_relids((Node *) expr,
5641 bms_make_singleton(root->group_rtindex),
5642 NULL);
5643 }
5645 }
5646 else
5647 {
5648 /*
5649 * Non-grouping column, so just remember the expression for later
5650 * call to pull_var_clause.
5651 */
5653 }
5654
5655 i++;
5656 }
5657
5658 /*
5659 * If there's a HAVING clause, we'll need the Vars it uses, too.
5660 */
5661 if (parse->havingQual)
5663
5664 /*
5665 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5666 * add them to the input target if not already present. (A Var used
5667 * directly as a GROUP BY item will be present already.) Note this
5668 * includes Vars used in resjunk items, so we are covering the needs of
5669 * ORDER BY and window specifications. Vars used within Aggrefs and
5670 * WindowFuncs will be pulled out here, too.
5671 *
5672 * Note that the target is logically below the grouping step. So with
5673 * grouping sets we need to remove the RT index of the grouping step if
5674 * there is any from the non-group Vars.
5675 */
5680 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5681 {
5682 Assert(root->group_rtindex > 0);
5683 non_group_vars = (List *)
5685 bms_make_singleton(root->group_rtindex),
5686 NULL);
5687 }
5689
5690 /* clean up cruft */
5693
5694 /* XXX this causes some redundant cost calculation ... */
5696}
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition costsize.c:6509
void list_free(List *list)
Definition list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition optimizer.h:189
#define PVC_RECURSE_WINDOWFUNCS
Definition optimizer.h:191
#define PVC_INCLUDE_PLACEHOLDERS
Definition optimizer.h:192
#define get_pathtarget_sortgroupref(target, colno)
Definition pathnodes.h:1880
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition tlist.c:452
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition tlist.c:761
PathTarget * create_empty_pathtarget(void)
Definition tlist.c:690
List * pull_var_clause(Node *node, int flags)
Definition var.c:653

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_make_singleton(), create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, remove_nulling_relids(), root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3972 of file planner.c.

3975{
3976 RelOptInfo *grouped_rel;
3977
3979 {
3981 input_rel->relids);
3982 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3983 }
3984 else
3985 {
3986 /*
3987 * By tradition, the relids set for the main grouping relation is
3988 * NULL. (This could be changed, but might require adjustments
3989 * elsewhere.)
3990 */
3992 }
3993
3994 /* Set target. */
3995 grouped_rel->reltarget = target;
3996
3997 /*
3998 * If the input relation is not parallel-safe, then the grouped relation
3999 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4000 * target list and HAVING quals are parallel-safe.
4001 */
4002 if (input_rel->consider_parallel && target_parallel_safe &&
4003 is_parallel_safe(root, havingQual))
4004 grouped_rel->consider_parallel = true;
4005
4006 /* Assume that the same path generation strategies are allowed */
4007 grouped_rel->pgs_mask = input_rel->pgs_mask;
4008
4009 /*
4010 * If the input rel belongs to a single FDW, so does the grouped rel.
4011 */
4012 grouped_rel->serverid = input_rel->serverid;
4013 grouped_rel->userid = input_rel->userid;
4014 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4015 grouped_rel->fdwroutine = input_rel->fdwroutine;
4016
4017 return grouped_rel;
4018}
@ RELOPT_OTHER_UPPER_REL
Definition pathnodes.h:970

References RelOptInfo::consider_parallel, fb(), fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::pgs_mask, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path * make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys,
double  limit_tuples 
)
static

Definition at line 7767 of file planner.c.

7769{
7770 bool is_sorted;
7771 int presorted_keys;
7772
7774 path->pathkeys,
7775 &presorted_keys);
7776
7777 if (!is_sorted)
7778 {
7779 /*
7780 * Try at least sorting the cheapest path and also try incrementally
7781 * sorting any path which is partially sorted already (no need to deal
7782 * with paths which have presorted keys when incremental sort is
7783 * disabled unless it's the cheapest input path).
7784 */
7785 if (path != cheapest_path &&
7786 (presorted_keys == 0 || !enable_incremental_sort))
7787 return NULL;
7788
7789 /*
7790 * We've no need to consider both a sort and incremental sort. We'll
7791 * just do a sort if there are no presorted keys and an incremental
7792 * sort when there are presorted keys.
7793 */
7794 if (presorted_keys == 0 || !enable_incremental_sort)
7795 path = (Path *) create_sort_path(root,
7796 rel,
7797 path,
7798 pathkeys,
7799 limit_tuples);
7800 else
7802 rel,
7803 path,
7804 pathkeys,
7805 presorted_keys,
7806 limit_tuples);
7807 }
7808
7809 return path;
7810}

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fb(), Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_partial_distinct_paths(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5715 of file planner.c.

5718{
5722 int i;
5723 ListCell *lc;
5724
5727
5728 i = 0;
5729 foreach(lc, grouping_target->exprs)
5730 {
5731 Expr *expr = (Expr *) lfirst(lc);
5733
5734 if (sgref && root->processed_groupClause &&
5736 root->processed_groupClause) != NULL)
5737 {
5738 /*
5739 * It's a grouping column, so add it to the partial_target as-is.
5740 * (This allows the upper agg step to repeat the grouping calcs.)
5741 */
5743 }
5744 else
5745 {
5746 /*
5747 * Non-grouping column, so just remember the expression for later
5748 * call to pull_var_clause.
5749 */
5751 }
5752
5753 i++;
5754 }
5755
5756 /*
5757 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5758 */
5759 if (havingQual)
5760 non_group_cols = lappend(non_group_cols, havingQual);
5761
5762 /*
5763 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5764 * non-group cols (plus HAVING), and add them to the partial_target if not
5765 * already present. (An expression used directly as a GROUP BY item will
5766 * be present already.) Note this includes Vars used in resjunk items, so
5767 * we are covering the needs of ORDER BY and window specifications.
5768 */
5773
5775
5776 /*
5777 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5778 * are at the top level of the target list, so we can just scan the list
5779 * rather than recursing through the expression trees.
5780 */
5781 foreach(lc, partial_target->exprs)
5782 {
5783 Aggref *aggref = (Aggref *) lfirst(lc);
5784
5785 if (IsA(aggref, Aggref))
5786 {
5788
5789 /*
5790 * We shouldn't need to copy the substructure of the Aggref node,
5791 * but flat-copy the node itself to avoid damaging other trees.
5792 */
5794 memcpy(newaggref, aggref, sizeof(Aggref));
5795
5796 /* For now, assume serialization is required */
5798
5799 lfirst(lc) = newaggref;
5800 }
5801 }
5802
5803 /* clean up cruft */
5806
5807 /* XXX this causes some redundant cost calculation ... */
5809}
#define PVC_INCLUDE_AGGREGATES
Definition optimizer.h:188
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition planner.c:5818

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6353 of file planner.c.

6355{
6356 List *window_pathkeys = NIL;
6357
6358 /* Throw error if can't sort */
6360 ereport(ERROR,
6362 errmsg("could not implement window PARTITION BY"),
6363 errdetail("Window partitioning columns must be of sortable datatypes.")));
6365 ereport(ERROR,
6367 errmsg("could not implement window ORDER BY"),
6368 errdetail("Window ordering columns must be of sortable datatypes.")));
6369
6370 /*
6371 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6372 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6373 */
6374 if (wc->partitionClause != NIL)
6375 {
6376 bool sortable;
6377
6379 &wc->partitionClause,
6380 tlist,
6381 true,
6382 false,
6383 &sortable,
6384 false);
6385
6387 }
6388
6389 /*
6390 * In principle, we could also consider removing redundant ORDER BY items
6391 * too as doing so does not alter the result of peer row checks done by
6392 * the executor. However, we must *not* remove the ordering column for
6393 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6394 * if it's known to be equal to some partitioning column.
6395 */
6396 if (wc->orderClause != NIL)
6397 {
6399
6401 wc->orderClause,
6402 tlist);
6403
6404 /* Okay, make the combined pathkeys */
6405 if (window_pathkeys != NIL)
6406 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6407 else
6408 window_pathkeys = orderby_pathkeys;
6409 }
6410
6411 return window_pathkeys;
6412}
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition pathkeys.c:1381
List * partitionClause
List * orderClause

References append_pathkeys(), Assert, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), grouping_is_sortable(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), NIL, WindowClause::orderClause, WindowClause::partitionClause, and root.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 6481 of file planner.c.

6484{
6485 Query *parse = root->parse;
6487 int ncols;
6488 bool *col_is_srf;
6489 bool *postpone_col;
6490 bool have_srf;
6491 bool have_volatile;
6492 bool have_expensive;
6493 bool have_srf_sortcols;
6494 bool postpone_srfs;
6497 int i;
6498 ListCell *lc;
6499
6500 /* Shouldn't get here unless query has ORDER BY */
6501 Assert(parse->sortClause);
6502
6503 *have_postponed_srfs = false; /* default result */
6504
6505 /* Inspect tlist and collect per-column information */
6506 ncols = list_length(final_target->exprs);
6507 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6508 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6510
6511 i = 0;
6512 foreach(lc, final_target->exprs)
6513 {
6514 Expr *expr = (Expr *) lfirst(lc);
6515
6516 /*
6517 * If the column has a sortgroupref, assume it has to be evaluated
6518 * before sorting. Generally such columns would be ORDER BY, GROUP
6519 * BY, etc targets. One exception is columns that were removed from
6520 * GROUP BY by remove_useless_groupby_columns() ... but those would
6521 * only be Vars anyway. There don't seem to be any cases where it
6522 * would be worth the trouble to double-check.
6523 */
6525 {
6526 /*
6527 * Check for SRF or volatile functions. Check the SRF case first
6528 * because we must know whether we have any postponed SRFs.
6529 */
6530 if (parse->hasTargetSRFs &&
6531 expression_returns_set((Node *) expr))
6532 {
6533 /* We'll decide below whether these are postponable */
6534 col_is_srf[i] = true;
6535 have_srf = true;
6536 }
6537 else if (contain_volatile_functions((Node *) expr))
6538 {
6539 /* Unconditionally postpone */
6540 postpone_col[i] = true;
6541 have_volatile = true;
6542 }
6543 else
6544 {
6545 /*
6546 * Else check the cost. XXX it's annoying to have to do this
6547 * when set_pathtarget_cost_width() just did it. Refactor to
6548 * allow sharing the work?
6549 */
6550 QualCost cost;
6551
6552 cost_qual_eval_node(&cost, (Node *) expr, root);
6553
6554 /*
6555 * We arbitrarily define "expensive" as "more than 10X
6556 * cpu_operator_cost". Note this will take in any PL function
6557 * with default cost.
6558 */
6559 if (cost.per_tuple > 10 * cpu_operator_cost)
6560 {
6561 postpone_col[i] = true;
6562 have_expensive = true;
6563 }
6564 }
6565 }
6566 else
6567 {
6568 /* For sortgroupref cols, just check if any contain SRFs */
6569 if (!have_srf_sortcols &&
6570 parse->hasTargetSRFs &&
6571 expression_returns_set((Node *) expr))
6572 have_srf_sortcols = true;
6573 }
6574
6575 i++;
6576 }
6577
6578 /*
6579 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6580 */
6582
6583 /*
6584 * If we don't need a post-sort projection, just return final_target.
6585 */
6586 if (!(postpone_srfs || have_volatile ||
6587 (have_expensive &&
6588 (parse->limitCount || root->tuple_fraction > 0))))
6589 return final_target;
6590
6591 /*
6592 * Report whether the post-sort projection will contain set-returning
6593 * functions. This is important because it affects whether the Sort can
6594 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6595 * to return.
6596 */
6598
6599 /*
6600 * Construct the sort-input target, taking all non-postponable columns and
6601 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6602 * the postponable ones.
6603 */
6606
6607 i = 0;
6608 foreach(lc, final_target->exprs)
6609 {
6610 Expr *expr = (Expr *) lfirst(lc);
6611
6612 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6614 else
6617
6618 i++;
6619 }
6620
6621 /*
6622 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6623 * postponable columns, and add them to the sort-input target if not
6624 * already present. (Some might be there already.) We mustn't
6625 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6626 * would be unable to recompute them.
6627 */
6633
6634 /* clean up cruft */
6637
6638 /* XXX this represents even more redundant cost calculation ... */
6640}
bool contain_volatile_functions(Node *clause)
Definition clauses.c:547
double cpu_operator_cost
Definition costsize.c:134
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition costsize.c:4924
bool expression_returns_set(Node *clause)
Definition nodeFuncs.c:763
#define PVC_INCLUDE_WINDOWFUNCS
Definition optimizer.h:190
Cost per_tuple
Definition pathnodes.h:121

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, contain_volatile_functions(), cost_qual_eval_node(), cpu_operator_cost, create_empty_pathtarget(), expression_returns_set(), fb(), get_pathtarget_sortgroupref, i, lappend(), lfirst, list_free(), list_length(), NIL, palloc0(), parse(), QualCost::per_tuple, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_INCLUDE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_window_input_target()

static PathTarget * make_window_input_target ( PlannerInfo root,
PathTarget final_target,
List activeWindows 
)
static

Definition at line 6233 of file planner.c.

6236{
6241 int i;
6242 ListCell *lc;
6243
6244 Assert(root->parse->hasWindowFuncs);
6245
6246 /*
6247 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6248 * into a bitmapset for convenient reference below.
6249 */
6250 sgrefs = NULL;
6251 foreach(lc, activeWindows)
6252 {
6254 ListCell *lc2;
6255
6256 foreach(lc2, wc->partitionClause)
6257 {
6259
6260 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6261 }
6262 foreach(lc2, wc->orderClause)
6263 {
6265
6266 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6267 }
6268 }
6269
6270 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6271 foreach(lc, root->processed_groupClause)
6272 {
6274
6275 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6276 }
6277
6278 /*
6279 * Construct a target containing all the non-flattenable targetlist items,
6280 * and save aside the others for a moment.
6281 */
6284
6285 i = 0;
6286 foreach(lc, final_target->exprs)
6287 {
6288 Expr *expr = (Expr *) lfirst(lc);
6290
6291 /*
6292 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6293 * that such items can't contain window functions, so it's okay to
6294 * compute them below the WindowAgg nodes.)
6295 */
6296 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6297 {
6298 /*
6299 * Don't want to deconstruct this value, so add it to the input
6300 * target as-is.
6301 */
6303 }
6304 else
6305 {
6306 /*
6307 * Column is to be flattened, so just remember the expression for
6308 * later call to pull_var_clause.
6309 */
6311 }
6312
6313 i++;
6314 }
6315
6316 /*
6317 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6318 * add them to the input target if not already present. (Some might be
6319 * there already because they're used directly as window/group clauses.)
6320 *
6321 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6322 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6323 * at higher levels. On the other hand, we should recurse into
6324 * WindowFuncs to make sure their input expressions are available.
6325 */
6331
6332 /* clean up cruft */
6335
6336 /* XXX this causes some redundant cost calculation ... */
6338}

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_add_member(), bms_is_member(), create_empty_pathtarget(), fb(), get_pathtarget_sortgroupref, i, lappend(), lfirst, lfirst_node, list_free(), NIL, WindowClause::orderClause, WindowClause::partitionClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ mark_partial_aggref()

void mark_partial_aggref ( Aggref agg,
AggSplit  aggsplit 
)

Definition at line 5818 of file planner.c.

5819{
5820 /* aggtranstype should be computed by this point */
5821 Assert(OidIsValid(agg->aggtranstype));
5822 /* ... but aggsplit should still be as the parser left it */
5823 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5824
5825 /* Mark the Aggref with the intended partial-aggregation mode */
5826 agg->aggsplit = aggsplit;
5827
5828 /*
5829 * Adjust result type if needed. Normally, a partial aggregate returns
5830 * the aggregate's transition type; but if that's INTERNAL and we're
5831 * serializing, it returns BYTEA instead.
5832 */
5833 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5834 {
5835 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5836 agg->aggtype = BYTEAOID;
5837 else
5838 agg->aggtype = agg->aggtranstype;
5839 }
5840}
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition nodes.h:396
#define DO_AGGSPLIT_SERIALIZE(as)
Definition nodes.h:397

References AGGSPLIT_SIMPLE, Assert, DO_AGGSPLIT_SERIALIZE, DO_AGGSPLIT_SKIPFINAL, fb(), and OidIsValid.

Referenced by convert_combining_aggrefs(), create_rel_agg_info(), and make_partial_grouping_target().

◆ name_active_windows()

static void name_active_windows ( List activeWindows)
static

Definition at line 6113 of file planner.c.

6114{
6115 int next_n = 1;
6116 char newname[16];
6117 ListCell *lc;
6118
6119 foreach(lc, activeWindows)
6120 {
6122
6123 /* Nothing to do if it has a name already. */
6124 if (wc->name)
6125 continue;
6126
6127 /* Select a name not currently present in the list. */
6128 for (;;)
6129 {
6130 ListCell *lc2;
6131
6132 snprintf(newname, sizeof(newname), "w%d", next_n++);
6133 foreach(lc2, activeWindows)
6134 {
6136
6137 if (wc2->name && strcmp(wc2->name, newname) == 0)
6138 break; /* matched */
6139 }
6140 if (lc2 == NULL)
6141 break; /* reached the end with no match */
6142 }
6143 wc->name = pstrdup(newname);
6144 }
6145}
#define snprintf
Definition port.h:260

References fb(), lfirst_node, pstrdup(), and snprintf.

Referenced by grouping_planner().

◆ optimize_window_clauses()

static void optimize_window_clauses ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5890 of file planner.c.

5891{
5892 List *windowClause = root->parse->windowClause;
5893 ListCell *lc;
5894
5895 foreach(lc, windowClause)
5896 {
5898 ListCell *lc2;
5899 int optimizedFrameOptions = 0;
5900
5901 Assert(wc->winref <= wflists->maxWinRef);
5902
5903 /* skip any WindowClauses that have no WindowFuncs */
5904 if (wflists->windowFuncs[wc->winref] == NIL)
5905 continue;
5906
5907 foreach(lc2, wflists->windowFuncs[wc->winref])
5908 {
5913
5915
5916 /* Check if there's a support function for 'wfunc' */
5917 if (!OidIsValid(prosupport))
5918 break; /* can't optimize this WindowClause */
5919
5921 req.window_clause = wc;
5922 req.window_func = wfunc;
5923 req.frameOptions = wc->frameOptions;
5924
5925 /* call the support function */
5928 PointerGetDatum(&req)));
5929
5930 /*
5931 * Skip to next WindowClause if the support function does not
5932 * support this request type.
5933 */
5934 if (res == NULL)
5935 break;
5936
5937 /*
5938 * Save these frameOptions for the first WindowFunc for this
5939 * WindowClause.
5940 */
5941 if (foreach_current_index(lc2) == 0)
5943
5944 /*
5945 * On subsequent WindowFuncs, if the frameOptions are not the same
5946 * then we're unable to optimize the frameOptions for this
5947 * WindowClause.
5948 */
5949 else if (optimizedFrameOptions != res->frameOptions)
5950 break; /* skip to the next WindowClause, if any */
5951 }
5952
5953 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5954 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5955 {
5956 ListCell *lc3;
5957
5958 /* apply the new frame options */
5960
5961 /*
5962 * We now check to see if changing the frameOptions has caused
5963 * this WindowClause to be a duplicate of some other WindowClause.
5964 * This can only happen if we have multiple WindowClauses, so
5965 * don't bother if there's only 1.
5966 */
5967 if (list_length(windowClause) == 1)
5968 continue;
5969
5970 /*
5971 * Do the duplicate check and reuse the existing WindowClause if
5972 * we find a duplicate.
5973 */
5974 foreach(lc3, windowClause)
5975 {
5977
5978 /* skip over the WindowClause we're currently editing */
5979 if (existing_wc == wc)
5980 continue;
5981
5982 /*
5983 * Perform the same duplicate check that is done in
5984 * transformWindowFuncCall.
5985 */
5986 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5987 equal(wc->orderClause, existing_wc->orderClause) &&
5988 wc->frameOptions == existing_wc->frameOptions &&
5989 equal(wc->startOffset, existing_wc->startOffset) &&
5990 equal(wc->endOffset, existing_wc->endOffset))
5991 {
5992 ListCell *lc4;
5993
5994 /*
5995 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5996 * This required adjusting each WindowFunc's winref and
5997 * moving the WindowFuncs in 'wc' to the list of
5998 * WindowFuncs in 'existing_wc'.
5999 */
6000 foreach(lc4, wflists->windowFuncs[wc->winref])
6001 {
6003
6004 wfunc->winref = existing_wc->winref;
6005 }
6006
6007 /* move list items */
6008 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6009 wflists->windowFuncs[wc->winref]);
6010 wflists->windowFuncs[wc->winref] = NIL;
6011
6012 /*
6013 * transformWindowFuncCall() should have made sure there
6014 * are no other duplicates, so we needn't bother looking
6015 * any further.
6016 */
6017 break;
6018 }
6019 }
6020 }
6021 }
6022}
#define OidFunctionCall1(functionId, arg1)
Definition fmgr.h:722
RegProcedure get_func_support(Oid funcid)
Definition lsyscache.c:2008
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:342
Node * startOffset
Node * endOffset
Index winref
Definition primnodes.h:611

References Assert, DatumGetPointer(), WindowClause::endOffset, equal(), fb(), foreach_current_index, WindowClause::frameOptions, SupportRequestOptimizeWindowClause::frameOptions, get_func_support(), lfirst_node, list_concat(), list_length(), NIL, OidFunctionCall1, OidIsValid, WindowClause::orderClause, WindowClause::partitionClause, PointerGetDatum(), root, WindowClause::startOffset, WindowFunc::winfnoid, WindowClause::winref, and WindowFunc::winref.

Referenced by grouping_planner().

◆ plan_cluster_use_sort()

bool plan_cluster_use_sort ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6899 of file planner.c.

6900{
6902 Query *query;
6903 PlannerGlobal *glob;
6905 RelOptInfo *rel;
6906 IndexOptInfo *indexInfo;
6912 ListCell *lc;
6913
6914 /* We can short-circuit the cost comparison if indexscans are disabled */
6915 if (!enable_indexscan)
6916 return true; /* use sort */
6917
6918 /* Set up mostly-dummy planner state */
6919 query = makeNode(Query);
6920 query->commandType = CMD_SELECT;
6921
6922 glob = makeNode(PlannerGlobal);
6923
6925 root->parse = query;
6926 root->glob = glob;
6927 root->query_level = 1;
6928 root->planner_cxt = CurrentMemoryContext;
6929 root->wt_param_id = -1;
6930 root->join_domains = list_make1(makeNode(JoinDomain));
6931
6932 /* Build a minimal RTE for the rel */
6934 rte->rtekind = RTE_RELATION;
6935 rte->relid = tableOid;
6936 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6937 rte->rellockmode = AccessShareLock;
6938 rte->lateral = false;
6939 rte->inh = false;
6940 rte->inFromCl = true;
6941 query->rtable = list_make1(rte);
6942 addRTEPermissionInfo(&query->rteperminfos, rte);
6943
6944 /* Set up RTE/RelOptInfo arrays */
6946
6947 /* Build RelOptInfo */
6948 rel = build_simple_rel(root, 1, NULL);
6949
6950 /* Locate IndexOptInfo for the target index */
6951 indexInfo = NULL;
6952 foreach(lc, rel->indexlist)
6953 {
6954 indexInfo = lfirst_node(IndexOptInfo, lc);
6955 if (indexInfo->indexoid == indexOid)
6956 break;
6957 }
6958
6959 /*
6960 * It's possible that get_relation_info did not generate an IndexOptInfo
6961 * for the desired index; this could happen if it's not yet reached its
6962 * indcheckxmin usability horizon, or if it's a system index and we're
6963 * ignoring system indexes. In such cases we should tell CLUSTER to not
6964 * trust the index contents but use seqscan-and-sort.
6965 */
6966 if (lc == NULL) /* not in the list? */
6967 return true; /* use sort */
6968
6969 /*
6970 * Rather than doing all the pushups that would be needed to use
6971 * set_baserel_size_estimates, just do a quick hack for rows and width.
6972 */
6973 rel->rows = rel->tuples;
6974 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6975
6976 root->total_table_pages = rel->pages;
6977
6978 /*
6979 * Determine eval cost of the index expressions, if any. We need to
6980 * charge twice that amount for each tuple comparison that happens during
6981 * the sort, since tuplesort.c will have to re-evaluate the index
6982 * expressions each time. (XXX that's pretty inefficient...)
6983 */
6984 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6985 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6986
6987 /* Estimate the cost of seq scan + sort */
6990 seqScanPath->disabled_nodes,
6991 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6993
6994 /* Estimate the cost of index scan */
6996 NIL, NIL, NIL, NIL,
6997 ForwardScanDirection, false,
6998 NULL, 1.0, false);
6999
7000 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
7001}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2200
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition costsize.c:4898
bool enable_indexscan
Definition costsize.c:146
int maintenance_work_mem
Definition globals.c:133
#define AccessShareLock
Definition lockdefs.h:36
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
double Cost
Definition nodes.h:261
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
@ RTE_RELATION
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition pathnode.c:1047
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition pathnode.c:981
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition plancat.c:1477
void setup_simple_rel_arrays(PlannerInfo *root)
Definition relnode.c:111
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition relnode.c:209
@ ForwardScanDirection
Definition sdir.h:28
List * rtable
Definition parsenodes.h:175
CmdType commandType
Definition parsenodes.h:121
Cardinality tuples
Definition pathnodes.h:1084
BlockNumber pages
Definition pathnodes.h:1083
List * indexlist
Definition pathnodes.h:1079

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, cost_qual_eval(), cost_sort(), create_index_path(), create_seqscan_path(), CurrentMemoryContext, enable_indexscan, fb(), ForwardScanDirection, get_relation_data_width(), RelOptInfo::indexlist, IndexOptInfo::indexoid, lfirst_node, list_make1, maintenance_work_mem, makeNode, NIL, RelOptInfo::pages, RelOptInfo::reltarget, root, RelOptInfo::rows, Query::rtable, RTE_RELATION, setup_simple_rel_arrays(), RelOptInfo::tuples, and PathTarget::width.

Referenced by copy_table_data().

◆ plan_create_index_workers()

int plan_create_index_workers ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 7021 of file planner.c.

7022{
7024 Query *query;
7025 PlannerGlobal *glob;
7027 Relation heap;
7029 RelOptInfo *rel;
7030 int parallel_workers;
7032 double reltuples;
7033 double allvisfrac;
7034
7035 /*
7036 * We don't allow performing parallel operation in standalone backend or
7037 * when parallelism is disabled.
7038 */
7040 return 0;
7041
7042 /* Set up largely-dummy planner state */
7043 query = makeNode(Query);
7044 query->commandType = CMD_SELECT;
7045
7046 glob = makeNode(PlannerGlobal);
7047
7049 root->parse = query;
7050 root->glob = glob;
7051 root->query_level = 1;
7052 root->planner_cxt = CurrentMemoryContext;
7053 root->wt_param_id = -1;
7054 root->join_domains = list_make1(makeNode(JoinDomain));
7055
7056 /*
7057 * Build a minimal RTE.
7058 *
7059 * Mark the RTE with inh = true. This is a kludge to prevent
7060 * get_relation_info() from fetching index info, which is necessary
7061 * because it does not expect that any IndexOptInfo is currently
7062 * undergoing REINDEX.
7063 */
7065 rte->rtekind = RTE_RELATION;
7066 rte->relid = tableOid;
7067 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7068 rte->rellockmode = AccessShareLock;
7069 rte->lateral = false;
7070 rte->inh = true;
7071 rte->inFromCl = true;
7072 query->rtable = list_make1(rte);
7073 addRTEPermissionInfo(&query->rteperminfos, rte);
7074
7075 /* Set up RTE/RelOptInfo arrays */
7077
7078 /* Build RelOptInfo */
7079 rel = build_simple_rel(root, 1, NULL);
7080
7081 /* Rels are assumed already locked by the caller */
7082 heap = table_open(tableOid, NoLock);
7083 index = index_open(indexOid, NoLock);
7084
7085 /*
7086 * Determine if it's safe to proceed.
7087 *
7088 * Currently, parallel workers can't access the leader's temporary tables.
7089 * Furthermore, any index predicate or index expressions must be parallel
7090 * safe.
7091 */
7092 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7095 {
7096 parallel_workers = 0;
7097 goto done;
7098 }
7099
7100 /*
7101 * If parallel_workers storage parameter is set for the table, accept that
7102 * as the number of parallel worker processes to launch (though still cap
7103 * at max_parallel_maintenance_workers). Note that we deliberately do not
7104 * consider any other factor when parallel_workers is set. (e.g., memory
7105 * use by workers.)
7106 */
7107 if (rel->rel_parallel_workers != -1)
7108 {
7109 parallel_workers = Min(rel->rel_parallel_workers,
7111 goto done;
7112 }
7113
7114 /*
7115 * Estimate heap relation size ourselves, since rel->pages cannot be
7116 * trusted (heap RTE was marked as inheritance parent)
7117 */
7118 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7119
7120 /*
7121 * Determine number of workers to scan the heap relation using generic
7122 * model
7123 */
7124 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7126
7127 /*
7128 * Cap workers based on available maintenance_work_mem as needed.
7129 *
7130 * Note that each tuplesort participant receives an even share of the
7131 * total maintenance_work_mem budget. Aim to leave participants
7132 * (including the leader as a participant) with no less than 32MB of
7133 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7134 * immediately past the threshold of being capable of launching a single
7135 * parallel worker to sort.
7136 */
7137 while (parallel_workers > 0 &&
7138 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7139 parallel_workers--;
7140
7141done:
7143 table_close(heap, NoLock);
7144
7145 return parallel_workers;
7146}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4779
uint32 BlockNumber
Definition block.h:31
int max_parallel_maintenance_workers
Definition globals.c:134
bool IsUnderPostmaster
Definition globals.c:120
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:133
#define NoLock
Definition lockdefs.h:34
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition plancat.c:1310
List * RelationGetIndexPredicate(Relation relation)
Definition relcache.c:5205
List * RelationGetIndexExpressions(Relation relation)
Definition relcache.c:5092
int rel_parallel_workers
Definition pathnodes.h:1091
Form_pg_class rd_rel
Definition rel.h:111
Definition type.h:96
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, compute_parallel_worker(), CurrentMemoryContext, estimate_rel_size(), fb(), index_close(), index_open(), is_parallel_safe(), IsUnderPostmaster, list_make1, maintenance_work_mem, makeNode, max_parallel_maintenance_workers, Min, NoLock, RelationData::rd_rel, RelOptInfo::rel_parallel_workers, RelationGetIndexExpressions(), RelationGetIndexPredicate(), root, Query::rtable, RTE_RELATION, setup_simple_rel_arrays(), table_close(), and table_open().

Referenced by index_build().

◆ planner()

PlannedStmt * planner ( Query parse,
const char query_string,
int  cursorOptions,
ParamListInfo  boundParams,
ExplainState es 
)

Definition at line 315 of file planner.c.

317{
318 PlannedStmt *result;
319
320 if (planner_hook)
321 result = (*planner_hook) (parse, query_string, cursorOptions,
322 boundParams, es);
323 else
324 result = standard_planner(parse, query_string, cursorOptions,
325 boundParams, es);
326
327 pgstat_report_plan_id(result->planId, false);
328
329 return result;
330}
void pgstat_report_plan_id(int64 plan_id, bool force)
planner_hook_type planner_hook
Definition planner.c:74
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition planner.c:333

References parse(), pgstat_report_plan_id(), PlannedStmt::planId, planner_hook, and standard_planner().

Referenced by pg_plan_query().

◆ postprocess_setop_tlist()

static List * postprocess_setop_tlist ( List new_tlist,
List orig_tlist 
)
static

Definition at line 5853 of file planner.c.

5854{
5855 ListCell *l;
5857
5858 foreach(l, new_tlist)
5859 {
5862
5863 /* ignore resjunk columns in setop result */
5864 if (new_tle->resjunk)
5865 continue;
5866
5870 if (orig_tle->resjunk) /* should not happen */
5871 elog(ERROR, "resjunk output columns are not implemented");
5872 Assert(new_tle->resno == orig_tle->resno);
5873 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5874 }
5875 if (orig_tlist_item != NULL)
5876 elog(ERROR, "resjunk output columns are not implemented");
5877 return new_tlist;
5878}

References Assert, elog, ERROR, fb(), lfirst_node, list_head(), and lnext().

Referenced by grouping_planner().

◆ preprocess_expression()

static Node * preprocess_expression ( PlannerInfo root,
Node expr,
int  kind 
)
static

Definition at line 1335 of file planner.c.

1336{
1337 /*
1338 * Fall out quickly if expression is empty. This occurs often enough to
1339 * be worth checking. Note that null->null is the correct conversion for
1340 * implicit-AND result format, too.
1341 */
1342 if (expr == NULL)
1343 return NULL;
1344
1345 /*
1346 * If the query has any join RTEs, replace join alias variables with
1347 * base-relation variables. We must do this first, since any expressions
1348 * we may extract from the joinaliasvars lists have not been preprocessed.
1349 * For example, if we did this after sublink processing, sublinks expanded
1350 * out from join aliases would not get processed. But we can skip this in
1351 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1352 * they can't contain any Vars of the current query level.
1353 */
1354 if (root->hasJoinRTEs &&
1355 !(kind == EXPRKIND_RTFUNC ||
1356 kind == EXPRKIND_VALUES ||
1357 kind == EXPRKIND_TABLESAMPLE ||
1358 kind == EXPRKIND_TABLEFUNC))
1359 expr = flatten_join_alias_vars(root, root->parse, expr);
1360
1361 /*
1362 * Simplify constant expressions. For function RTEs, this was already
1363 * done by preprocess_function_rtes. (But note we must do it again for
1364 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1365 * un-simplified subexpressions inserted by flattening of subqueries or
1366 * join alias variables.)
1367 *
1368 * Note: an essential effect of this is to convert named-argument function
1369 * calls to positional notation and insert the current actual values of
1370 * any default arguments for functions. To ensure that happens, we *must*
1371 * process all expressions here. Previous PG versions sometimes skipped
1372 * const-simplification if it didn't seem worth the trouble, but we can't
1373 * do that anymore.
1374 *
1375 * Note: this also flattens nested AND and OR expressions into N-argument
1376 * form. All processing of a qual expression after this point must be
1377 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1378 * with AND directly under AND, nor OR directly under OR.
1379 */
1380 if (kind != EXPRKIND_RTFUNC)
1381 expr = eval_const_expressions(root, expr);
1382
1383 /*
1384 * If it's a qual or havingQual, canonicalize it.
1385 */
1386 if (kind == EXPRKIND_QUAL)
1387 {
1388 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1389
1390#ifdef OPTIMIZER_DEBUG
1391 printf("After canonicalize_qual()\n");
1392 pprint(expr);
1393#endif
1394 }
1395
1396 /*
1397 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1398 * hashfuncid of any that might execute more quickly by using hash lookups
1399 * instead of a linear search.
1400 */
1401 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1402 {
1404 }
1405
1406 /* Expand SubLinks to SubPlans */
1407 if (root->parse->hasSubLinks)
1408 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1409
1410 /*
1411 * XXX do not insert anything here unless you have grokked the comments in
1412 * SS_replace_correlation_vars ...
1413 */
1414
1415 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1416 if (root->query_level > 1)
1417 expr = SS_replace_correlation_vars(root, expr);
1418
1419 /*
1420 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1421 * don't want to do this before eval_const_expressions, since the latter
1422 * would be unable to simplify a top-level AND correctly. Also,
1423 * SS_process_sublinks expects explicit-AND format.)
1424 */
1425 if (kind == EXPRKIND_QUAL)
1426 expr = (Node *) make_ands_implicit((Expr *) expr);
1427
1428 return expr;
1429}
void pprint(const void *obj)
Definition print.c:54
void convert_saop_to_hashed_saop(Node *node)
Definition clauses.c:2300
List * make_ands_implicit(Expr *clause)
Definition makefuncs.c:810
#define EXPRKIND_TARGET
Definition planner.c:88
#define EXPRKIND_TABLESAMPLE
Definition planner.c:96
#define EXPRKIND_VALUES
Definition planner.c:91
#define EXPRKIND_QUAL
Definition planner.c:87
#define EXPRKIND_TABLEFUNC
Definition planner.c:98
#define EXPRKIND_RTFUNC
Definition planner.c:89
#define printf(...)
Definition port.h:266
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition prepqual.c:293
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition subselect.c:2062
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition subselect.c:2007
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition var.c:789

References canonicalize_qual(), convert_saop_to_hashed_saop(), eval_const_expressions(), EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_TABLEFUNC, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, fb(), flatten_join_alias_vars(), make_ands_implicit(), pprint(), printf, root, SS_process_sublinks(), and SS_replace_correlation_vars().

Referenced by preprocess_phv_expression(), preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_groupclause()

static List * preprocess_groupclause ( PlannerInfo root,
List force 
)
static

Definition at line 2907 of file planner.c.

2908{
2909 Query *parse = root->parse;
2911 ListCell *sl;
2912 ListCell *gl;
2913
2914 /* For grouping sets, we need to force the ordering */
2915 if (force)
2916 {
2917 foreach(sl, force)
2918 {
2921
2923 }
2924
2925 return new_groupclause;
2926 }
2927
2928 /* If no ORDER BY, nothing useful to do here */
2929 if (parse->sortClause == NIL)
2930 return list_copy(parse->groupClause);
2931
2932 /*
2933 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2934 * items, but only as far as we can make a matching prefix.
2935 *
2936 * This code assumes that the sortClause contains no duplicate items.
2937 */
2938 foreach(sl, parse->sortClause)
2939 {
2941
2942 foreach(gl, parse->groupClause)
2943 {
2945
2946 if (equal(gc, sc))
2947 {
2949 break;
2950 }
2951 }
2952 if (gl == NULL)
2953 break; /* no match, so stop scanning */
2954 }
2955
2956
2957 /* If no match at all, no point in reordering GROUP BY */
2958 if (new_groupclause == NIL)
2959 return list_copy(parse->groupClause);
2960
2961 /*
2962 * Add any remaining GROUP BY items to the new list. We don't require a
2963 * complete match, because even partial match allows ORDER BY to be
2964 * implemented using incremental sort. Also, give up if there are any
2965 * non-sortable GROUP BY items, since then there's no hope anyway.
2966 */
2967 foreach(gl, parse->groupClause)
2968 {
2970
2972 continue; /* it matched an ORDER BY item */
2973 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2974 return list_copy(parse->groupClause);
2976 }
2977
2978 /* Success --- install the rearranged GROUP BY list */
2980 return new_groupclause;
2981}
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition tlist.c:431

References Assert, equal(), fb(), get_sortgroupref_clause(), lappend(), lfirst_int, lfirst_node, list_copy(), list_length(), list_member_ptr(), NIL, OidIsValid, parse(), and root.

Referenced by consider_groupingsets_paths(), grouping_planner(), and preprocess_grouping_sets().

◆ preprocess_grouping_sets()

static grouping_sets_data * preprocess_grouping_sets ( PlannerInfo root)
static

Definition at line 2261 of file planner.c.

2262{
2263 Query *parse = root->parse;
2264 List *sets;
2265 int maxref = 0;
2268
2269 /*
2270 * We don't currently make any attempt to optimize the groupClause when
2271 * there are grouping sets, so just duplicate it in processed_groupClause.
2272 */
2273 root->processed_groupClause = parse->groupClause;
2274
2275 /* Detect unhashable and unsortable grouping expressions */
2276 gd->any_hashable = false;
2277 gd->unhashable_refs = NULL;
2278 gd->unsortable_refs = NULL;
2279 gd->unsortable_sets = NIL;
2280
2281 if (parse->groupClause)
2282 {
2283 ListCell *lc;
2284
2285 foreach(lc, parse->groupClause)
2286 {
2288 Index ref = gc->tleSortGroupRef;
2289
2290 if (ref > maxref)
2291 maxref = ref;
2292
2293 if (!gc->hashable)
2294 gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2295
2296 if (!OidIsValid(gc->sortop))
2297 gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2298 }
2299 }
2300
2301 /* Allocate workspace array for remapping */
2302 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2303
2304 /*
2305 * If we have any unsortable sets, we must extract them before trying to
2306 * prepare rollups. Unsortable sets don't go through
2307 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2308 * here.
2309 */
2310 if (!bms_is_empty(gd->unsortable_refs))
2311 {
2313 ListCell *lc;
2314
2315 foreach(lc, parse->groupingSets)
2316 {
2317 List *gset = (List *) lfirst(lc);
2318
2319 if (bms_overlap_list(gd->unsortable_refs, gset))
2320 {
2322
2323 gs->set = gset;
2324 gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2325
2326 /*
2327 * We must enforce here that an unsortable set is hashable;
2328 * later code assumes this. Parse analysis only checks that
2329 * every individual column is either hashable or sortable.
2330 *
2331 * Note that passing this test doesn't guarantee we can
2332 * generate a plan; there might be other showstoppers.
2333 */
2334 if (bms_overlap_list(gd->unhashable_refs, gset))
2335 ereport(ERROR,
2337 errmsg("could not implement GROUP BY"),
2338 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2339 }
2340 else
2342 }
2343
2344 if (sortable_sets)
2346 else
2347 sets = NIL;
2348 }
2349 else
2350 sets = extract_rollup_sets(parse->groupingSets);
2351
2352 foreach(lc_set, sets)
2353 {
2357
2358 /*
2359 * Reorder the current list of grouping sets into correct prefix
2360 * order. If only one aggregation pass is needed, try to make the
2361 * list match the ORDER BY clause; if more than one pass is needed, we
2362 * don't bother with that.
2363 *
2364 * Note that this reorders the sets from smallest-member-first to
2365 * largest-member-first, and applies the GroupingSetData annotations,
2366 * though the data will be filled in later.
2367 */
2369 (list_length(sets) == 1
2370 ? parse->sortClause
2371 : NIL));
2372
2373 /*
2374 * Get the initial (and therefore largest) grouping set.
2375 */
2377
2378 /*
2379 * Order the groupClause appropriately. If the first grouping set is
2380 * empty, then the groupClause must also be empty; otherwise we have
2381 * to force the groupClause to match that grouping set's order.
2382 *
2383 * (The first grouping set can be empty even though parse->groupClause
2384 * is not empty only if all non-empty grouping sets are unsortable.
2385 * The groupClauses for hashed grouping sets are built later on.)
2386 */
2387 if (gs->set)
2388 rollup->groupClause = preprocess_groupclause(root, gs->set);
2389 else
2390 rollup->groupClause = NIL;
2391
2392 /*
2393 * Is it hashable? We pretend empty sets are hashable even though we
2394 * actually force them not to be hashed later. But don't bother if
2395 * there's nothing but empty sets (since in that case we can't hash
2396 * anything).
2397 */
2398 if (gs->set &&
2399 !bms_overlap_list(gd->unhashable_refs, gs->set))
2400 {
2401 rollup->hashable = true;
2402 gd->any_hashable = true;
2403 }
2404
2405 /*
2406 * Now that we've pinned down an order for the groupClause for this
2407 * list of grouping sets, we need to remap the entries in the grouping
2408 * sets from sortgrouprefs to plain indices (0-based) into the
2409 * groupClause for this collection of grouping sets. We keep the
2410 * original form for later use, though.
2411 */
2412 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2414 gd->tleref_to_colnum_map);
2415 rollup->gsets_data = current_sets;
2416
2417 gd->rollups = lappend(gd->rollups, rollup);
2418 }
2419
2420 if (gd->unsortable_sets)
2421 {
2422 /*
2423 * We have not yet pinned down a groupclause for this, but we will
2424 * need index-based lists for estimation purposes. Construct
2425 * hash_sets_idx based on the entire original groupclause for now.
2426 */
2427 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2428 gd->unsortable_sets,
2429 gd->tleref_to_colnum_map);
2430 gd->any_hashable = true;
2431 }
2432
2433 return gd;
2434}
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition bitmapset.c:607
#define palloc0_object(type)
Definition fe_memutils.h:75
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition planner.c:3215
static List * extract_rollup_sets(List *groupingSets)
Definition planner.c:3003

References bms_add_member(), bms_is_empty, bms_overlap_list(), ereport, errcode(), errdetail(), errmsg(), ERROR, extract_rollup_sets(), fb(), lappend(), lfirst, lfirst_node, linitial_node, list_length(), makeNode, NIL, OidIsValid, palloc(), palloc0_object, parse(), preprocess_groupclause(), remap_to_groupclause_idx(), reorder_grouping_sets(), and root.

Referenced by grouping_planner().

◆ preprocess_limit()

static double preprocess_limit ( PlannerInfo root,
double  tuple_fraction,
int64 offset_est,
int64 count_est 
)
static

Definition at line 2656 of file planner.c.

2658{
2659 Query *parse = root->parse;
2660 Node *est;
2661 double limit_fraction;
2662
2663 /* Should not be called unless LIMIT or OFFSET */
2664 Assert(parse->limitCount || parse->limitOffset);
2665
2666 /*
2667 * Try to obtain the clause values. We use estimate_expression_value
2668 * primarily because it can sometimes do something useful with Params.
2669 */
2670 if (parse->limitCount)
2671 {
2672 est = estimate_expression_value(root, parse->limitCount);
2673 if (est && IsA(est, Const))
2674 {
2675 if (((Const *) est)->constisnull)
2676 {
2677 /* NULL indicates LIMIT ALL, ie, no limit */
2678 *count_est = 0; /* treat as not present */
2679 }
2680 else
2681 {
2682 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2683 if (*count_est <= 0)
2684 *count_est = 1; /* force to at least 1 */
2685 }
2686 }
2687 else
2688 *count_est = -1; /* can't estimate */
2689 }
2690 else
2691 *count_est = 0; /* not present */
2692
2693 if (parse->limitOffset)
2694 {
2695 est = estimate_expression_value(root, parse->limitOffset);
2696 if (est && IsA(est, Const))
2697 {
2698 if (((Const *) est)->constisnull)
2699 {
2700 /* Treat NULL as no offset; the executor will too */
2701 *offset_est = 0; /* treat as not present */
2702 }
2703 else
2704 {
2705 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2706 if (*offset_est < 0)
2707 *offset_est = 0; /* treat as not present */
2708 }
2709 }
2710 else
2711 *offset_est = -1; /* can't estimate */
2712 }
2713 else
2714 *offset_est = 0; /* not present */
2715
2716 if (*count_est != 0)
2717 {
2718 /*
2719 * A LIMIT clause limits the absolute number of tuples returned.
2720 * However, if it's not a constant LIMIT then we have to guess; for
2721 * lack of a better idea, assume 10% of the plan's result is wanted.
2722 */
2723 if (*count_est < 0 || *offset_est < 0)
2724 {
2725 /* LIMIT or OFFSET is an expression ... punt ... */
2726 limit_fraction = 0.10;
2727 }
2728 else
2729 {
2730 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2731 limit_fraction = (double) *count_est + (double) *offset_est;
2732 }
2733
2734 /*
2735 * If we have absolute limits from both caller and LIMIT, use the
2736 * smaller value; likewise if they are both fractional. If one is
2737 * fractional and the other absolute, we can't easily determine which
2738 * is smaller, but we use the heuristic that the absolute will usually
2739 * be smaller.
2740 */
2741 if (tuple_fraction >= 1.0)
2742 {
2743 if (limit_fraction >= 1.0)
2744 {
2745 /* both absolute */
2746 tuple_fraction = Min(tuple_fraction, limit_fraction);
2747 }
2748 else
2749 {
2750 /* caller absolute, limit fractional; use caller's value */
2751 }
2752 }
2753 else if (tuple_fraction > 0.0)
2754 {
2755 if (limit_fraction >= 1.0)
2756 {
2757 /* caller fractional, limit absolute; use limit */
2758 tuple_fraction = limit_fraction;
2759 }
2760 else
2761 {
2762 /* both fractional */
2763 tuple_fraction = Min(tuple_fraction, limit_fraction);
2764 }
2765 }
2766 else
2767 {
2768 /* no info from caller, just use limit */
2769 tuple_fraction = limit_fraction;
2770 }
2771 }
2772 else if (*offset_est != 0 && tuple_fraction > 0.0)
2773 {
2774 /*
2775 * We have an OFFSET but no LIMIT. This acts entirely differently
2776 * from the LIMIT case: here, we need to increase rather than decrease
2777 * the caller's tuple_fraction, because the OFFSET acts to cause more
2778 * tuples to be fetched instead of fewer. This only matters if we got
2779 * a tuple_fraction > 0, however.
2780 *
2781 * As above, use 10% if OFFSET is present but unestimatable.
2782 */
2783 if (*offset_est < 0)
2784 limit_fraction = 0.10;
2785 else
2786 limit_fraction = (double) *offset_est;
2787
2788 /*
2789 * If we have absolute counts from both caller and OFFSET, add them
2790 * together; likewise if they are both fractional. If one is
2791 * fractional and the other absolute, we want to take the larger, and
2792 * we heuristically assume that's the fractional one.
2793 */
2794 if (tuple_fraction >= 1.0)
2795 {
2796 if (limit_fraction >= 1.0)
2797 {
2798 /* both absolute, so add them together */
2799 tuple_fraction += limit_fraction;
2800 }
2801 else
2802 {
2803 /* caller absolute, limit fractional; use limit */
2804 tuple_fraction = limit_fraction;
2805 }
2806 }
2807 else
2808 {
2809 if (limit_fraction >= 1.0)
2810 {
2811 /* caller fractional, limit absolute; use caller's value */
2812 }
2813 else
2814 {
2815 /* both fractional, so add them together */
2816 tuple_fraction += limit_fraction;
2817 if (tuple_fraction >= 1.0)
2818 tuple_fraction = 0.0; /* assume fetch all */
2819 }
2820 }
2821 }
2822
2823 return tuple_fraction;
2824}
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition clauses.c:2408

References Assert, DatumGetInt64(), estimate_expression_value(), fb(), IsA, Min, parse(), and root.

Referenced by grouping_planner().

◆ preprocess_phv_expression()

Expr * preprocess_phv_expression ( PlannerInfo root,
Expr expr 
)

Definition at line 1481 of file planner.c.

1482{
1483 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1484}
#define EXPRKIND_PHV
Definition planner.c:95
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition planner.c:1335

References EXPRKIND_PHV, preprocess_expression(), and root.

Referenced by extract_lateral_references().

◆ preprocess_qual_conditions()

static void preprocess_qual_conditions ( PlannerInfo root,
Node jtnode 
)
static

Definition at line 1437 of file planner.c.

1438{
1439 if (jtnode == NULL)
1440 return;
1441 if (IsA(jtnode, RangeTblRef))
1442 {
1443 /* nothing to do here */
1444 }
1445 else if (IsA(jtnode, FromExpr))
1446 {
1447 FromExpr *f = (FromExpr *) jtnode;
1448 ListCell *l;
1449
1450 foreach(l, f->fromlist)
1452
1454 }
1455 else if (IsA(jtnode, JoinExpr))
1456 {
1457 JoinExpr *j = (JoinExpr *) jtnode;
1458
1461
1462 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1463 }
1464 else
1465 elog(ERROR, "unrecognized node type: %d",
1466 (int) nodeTag(jtnode));
1467}
#define nodeTag(nodeptr)
Definition nodes.h:139
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition planner.c:1437
Node * quals
Definition primnodes.h:2358
List * fromlist
Definition primnodes.h:2357

References elog, ERROR, EXPRKIND_QUAL, fb(), FromExpr::fromlist, IsA, j, lfirst, nodeTag, preprocess_expression(), preprocess_qual_conditions(), FromExpr::quals, and root.

Referenced by preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_rowmarks()

static void preprocess_rowmarks ( PlannerInfo root)
static

Definition at line 2478 of file planner.c.

2479{
2480 Query *parse = root->parse;
2481 Bitmapset *rels;
2482 List *prowmarks;
2483 ListCell *l;
2484 int i;
2485
2486 if (parse->rowMarks)
2487 {
2488 /*
2489 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2490 * grouping, since grouping renders a reference to individual tuple
2491 * CTIDs invalid. This is also checked at parse time, but that's
2492 * insufficient because of rule substitution, query pullup, etc.
2493 */
2495 parse->rowMarks)->strength);
2496 }
2497 else
2498 {
2499 /*
2500 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2501 * UPDATE/SHARE.
2502 */
2503 if (parse->commandType != CMD_UPDATE &&
2504 parse->commandType != CMD_DELETE &&
2505 parse->commandType != CMD_MERGE)
2506 return;
2507 }
2508
2509 /*
2510 * We need to have rowmarks for all base relations except the target. We
2511 * make a bitmapset of all base rels and then remove the items we don't
2512 * need or have FOR [KEY] UPDATE/SHARE marks for.
2513 */
2514 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2515 if (parse->resultRelation)
2516 rels = bms_del_member(rels, parse->resultRelation);
2517
2518 /*
2519 * Convert RowMarkClauses to PlanRowMark representation.
2520 */
2521 prowmarks = NIL;
2522 foreach(l, parse->rowMarks)
2523 {
2525 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2527
2528 /*
2529 * Currently, it is syntactically impossible to have FOR UPDATE et al
2530 * applied to an update/delete target rel. If that ever becomes
2531 * possible, we should drop the target from the PlanRowMark list.
2532 */
2533 Assert(rc->rti != parse->resultRelation);
2534
2535 /*
2536 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2537 * can't support true locking. Subqueries that got flattened into the
2538 * main query should be ignored completely. Any that didn't will get
2539 * ROW_MARK_COPY items in the next loop.
2540 */
2541 if (rte->rtekind != RTE_RELATION)
2542 continue;
2543
2544 rels = bms_del_member(rels, rc->rti);
2545
2547 newrc->rti = newrc->prti = rc->rti;
2548 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2549 newrc->markType = select_rowmark_type(rte, rc->strength);
2550 newrc->allMarkTypes = (1 << newrc->markType);
2551 newrc->strength = rc->strength;
2552 newrc->waitPolicy = rc->waitPolicy;
2553 newrc->isParent = false;
2554
2556 }
2557
2558 /*
2559 * Now, add rowmarks for any non-target, non-locked base relations.
2560 */
2561 i = 0;
2562 foreach(l, parse->rtable)
2563 {
2566
2567 i++;
2568 if (!bms_is_member(i, rels))
2569 continue;
2570
2572 newrc->rti = newrc->prti = i;
2573 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2574 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2575 newrc->allMarkTypes = (1 << newrc->markType);
2576 newrc->strength = LCS_NONE;
2577 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2578 newrc->isParent = false;
2579
2581 }
2582
2583 root->rowMarks = prowmarks;
2584}
@ LockWaitBlock
Definition lockoptions.h:39
@ LCS_NONE
Definition lockoptions.h:23
@ CMD_DELETE
Definition nodes.h:278
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition analyze.c:3349
#define rt_fetch(rangetable_index, rangetable)
Definition parsetree.h:31
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition planner.c:2590
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
LockClauseStrength strength
LockWaitPolicy waitPolicy

References Assert, bms_del_member(), bms_is_member(), CheckSelectLocking(), CMD_DELETE, CMD_MERGE, CMD_UPDATE, fb(), get_relids_in_jointree(), i, lappend(), LCS_NONE, lfirst_node, linitial_node, LockWaitBlock, makeNode, NIL, parse(), root, rt_fetch, RTE_RELATION, RowMarkClause::rti, select_rowmark_type(), RowMarkClause::strength, and RowMarkClause::waitPolicy.

Referenced by subquery_planner().

◆ remap_to_groupclause_idx()

static List * remap_to_groupclause_idx ( List groupClause,
List gsets,
int tleref_to_colnum_map 
)
static

Definition at line 2441 of file planner.c.

2444{
2445 int ref = 0;
2446 List *result = NIL;
2447 ListCell *lc;
2448
2449 foreach(lc, groupClause)
2450 {
2452
2453 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2454 }
2455
2456 foreach(lc, gsets)
2457 {
2458 List *set = NIL;
2459 ListCell *lc2;
2461
2462 foreach(lc2, gs->set)
2463 {
2464 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2465 }
2466
2467 result = lappend(result, set);
2468 }
2469
2470 return result;
2471}

References fb(), lappend(), lappend_int(), lfirst_int, lfirst_node, and NIL.

Referenced by consider_groupingsets_paths(), and preprocess_grouping_sets().

◆ reorder_grouping_sets()

static List * reorder_grouping_sets ( List groupingSets,
List sortclause 
)
static

Definition at line 3215 of file planner.c.

3216{
3217 ListCell *lc;
3218 List *previous = NIL;
3219 List *result = NIL;
3220
3221 foreach(lc, groupingSets)
3222 {
3223 List *candidate = (List *) lfirst(lc);
3226
3227 while (list_length(sortclause) > list_length(previous) &&
3228 new_elems != NIL)
3229 {
3231 int ref = sc->tleSortGroupRef;
3232
3234 {
3235 previous = lappend_int(previous, ref);
3237 }
3238 else
3239 {
3240 /* diverged from the sortclause; give up on it */
3241 sortclause = NIL;
3242 break;
3243 }
3244 }
3245
3246 previous = list_concat(previous, new_elems);
3247
3248 gs->set = list_copy(previous);
3249 result = lcons(gs, result);
3250 }
3251
3252 list_free(previous);
3253
3254 return result;
3255}
List * list_difference_int(const List *list1, const List *list2)
Definition list.c:1288
List * list_delete_int(List *list, int datum)
Definition list.c:891
bool list_member_int(const List *list, int datum)
Definition list.c:702
static void * list_nth(const List *list, int n)
Definition pg_list.h:299

References fb(), lappend_int(), lcons(), lfirst, list_concat(), list_copy(), list_delete_int(), list_difference_int(), list_free(), list_length(), list_member_int(), list_nth(), makeNode, and NIL.

Referenced by preprocess_grouping_sets().

◆ select_active_windows()

static List * select_active_windows ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 6030 of file planner.c.

6031{
6032 List *windowClause = root->parse->windowClause;
6033 List *result = NIL;
6034 ListCell *lc;
6035 int nActive = 0;
6037 list_length(windowClause));
6038
6039 /* First, construct an array of the active windows */
6040 foreach(lc, windowClause)
6041 {
6043
6044 /* It's only active if wflists shows some related WindowFuncs */
6045 Assert(wc->winref <= wflists->maxWinRef);
6046 if (wflists->windowFuncs[wc->winref] == NIL)
6047 continue;
6048
6049 actives[nActive].wc = wc; /* original clause */
6050
6051 /*
6052 * For sorting, we want the list of partition keys followed by the
6053 * list of sort keys. But pathkeys construction will remove duplicates
6054 * between the two, so we can as well (even though we can't detect all
6055 * of the duplicates, since some may come from ECs - that might mean
6056 * we miss optimization chances here). We must, however, ensure that
6057 * the order of entries is preserved with respect to the ones we do
6058 * keep.
6059 *
6060 * partitionClause and orderClause had their own duplicates removed in
6061 * parse analysis, so we're only concerned here with removing
6062 * orderClause entries that also appear in partitionClause.
6063 */
6064 actives[nActive].uniqueOrder =
6066 wc->orderClause);
6067 nActive++;
6068 }
6069
6070 /*
6071 * Sort active windows by their partitioning/ordering clauses, ignoring
6072 * any framing clauses, so that the windows that need the same sorting are
6073 * adjacent in the list. When we come to generate paths, this will avoid
6074 * inserting additional Sort nodes.
6075 *
6076 * This is how we implement a specific requirement from the SQL standard,
6077 * which says that when two or more windows are order-equivalent (i.e.
6078 * have matching partition and order clauses, even if their names or
6079 * framing clauses differ), then all peer rows must be presented in the
6080 * same order in all of them. If we allowed multiple sort nodes for such
6081 * cases, we'd risk having the peer rows end up in different orders in
6082 * equivalent windows due to sort instability. (See General Rule 4 of
6083 * <window clause> in SQL2008 - SQL2016.)
6084 *
6085 * Additionally, if the entire list of clauses of one window is a prefix
6086 * of another, put first the window with stronger sorting requirements.
6087 * This way we will first sort for stronger window, and won't have to sort
6088 * again for the weaker one.
6089 */
6091
6092 /* build ordered list of the original WindowClause nodes */
6093 for (int i = 0; i < nActive; i++)
6094 result = lappend(result, actives[i].wc);
6095
6096 pfree(actives);
6097
6098 return result;
6099}
#define palloc_array(type, count)
Definition fe_memutils.h:76
List * list_concat_unique(List *list1, const List *list2)
Definition list.c:1405
static int common_prefix_cmp(const void *a, const void *b)
Definition planner.c:6164
#define qsort(a, b, c, d)
Definition port.h:495

References Assert, common_prefix_cmp(), fb(), i, lappend(), lfirst_node, list_concat_unique(), list_copy(), list_length(), NIL, WindowClause::orderClause, palloc_array, WindowClause::partitionClause, pfree(), qsort, root, and WindowClause::winref.

Referenced by grouping_planner().

◆ select_rowmark_type()

RowMarkType select_rowmark_type ( RangeTblEntry rte,
LockClauseStrength  strength 
)

Definition at line 2590 of file planner.c.

2591{
2592 if (rte->rtekind != RTE_RELATION)
2593 {
2594 /* If it's not a table at all, use ROW_MARK_COPY */
2595 return ROW_MARK_COPY;
2596 }
2597 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2598 {
2599 /* Let the FDW select the rowmark type, if it wants to */
2600 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2601
2602 if (fdwroutine->GetForeignRowMarkType != NULL)
2603 return fdwroutine->GetForeignRowMarkType(rte, strength);
2604 /* Otherwise, use ROW_MARK_COPY by default */
2605 return ROW_MARK_COPY;
2606 }
2607 else
2608 {
2609 /* Regular table, apply the appropriate lock type */
2610 switch (strength)
2611 {
2612 case LCS_NONE:
2613
2614 /*
2615 * We don't need a tuple lock, only the ability to re-fetch
2616 * the row.
2617 */
2618 return ROW_MARK_REFERENCE;
2619 break;
2620 case LCS_FORKEYSHARE:
2621 return ROW_MARK_KEYSHARE;
2622 break;
2623 case LCS_FORSHARE:
2624 return ROW_MARK_SHARE;
2625 break;
2626 case LCS_FORNOKEYUPDATE:
2628 break;
2629 case LCS_FORUPDATE:
2630 return ROW_MARK_EXCLUSIVE;
2631 break;
2632 }
2633 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2634 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2635 }
2636}
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition foreign.c:420
@ LCS_FORUPDATE
Definition lockoptions.h:27
@ LCS_FORSHARE
Definition lockoptions.h:25
@ LCS_FORKEYSHARE
Definition lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition lockoptions.h:26
@ ROW_MARK_COPY
Definition plannodes.h:1558
@ ROW_MARK_REFERENCE
Definition plannodes.h:1557
@ ROW_MARK_SHARE
Definition plannodes.h:1555
@ ROW_MARK_EXCLUSIVE
Definition plannodes.h:1553
@ ROW_MARK_NOKEYEXCLUSIVE
Definition plannodes.h:1554
@ ROW_MARK_KEYSHARE
Definition plannodes.h:1556
GetForeignRowMarkType_function GetForeignRowMarkType
Definition fdwapi.h:247

References elog, ERROR, fb(), GetFdwRoutineByRelId(), FdwRoutine::GetForeignRowMarkType, LCS_FORKEYSHARE, LCS_FORNOKEYUPDATE, LCS_FORSHARE, LCS_FORUPDATE, LCS_NONE, ROW_MARK_COPY, ROW_MARK_EXCLUSIVE, ROW_MARK_KEYSHARE, ROW_MARK_NOKEYEXCLUSIVE, ROW_MARK_REFERENCE, ROW_MARK_SHARE, and RTE_RELATION.

Referenced by expand_single_inheritance_child(), and preprocess_rowmarks().

◆ standard_planner()

PlannedStmt * standard_planner ( Query parse,
const char query_string,
int  cursorOptions,
ParamListInfo  boundParams,
ExplainState es 
)

Definition at line 333 of file planner.c.

335{
336 PlannedStmt *result;
337 PlannerGlobal *glob;
338 double tuple_fraction;
342 Plan *top_plan;
343 ListCell *lp,
344 *lr;
345
346 /*
347 * Set up global state for this planner invocation. This data is needed
348 * across all levels of sub-Query that might exist in the given command,
349 * so we keep it in a separate struct that's linked to by each per-Query
350 * PlannerInfo.
351 */
352 glob = makeNode(PlannerGlobal);
353
354 glob->boundParams = boundParams;
355 glob->subplans = NIL;
356 glob->subpaths = NIL;
357 glob->subroots = NIL;
358 glob->rewindPlanIDs = NULL;
359 glob->finalrtable = NIL;
360 glob->allRelids = NULL;
361 glob->prunableRelids = NULL;
362 glob->finalrteperminfos = NIL;
363 glob->finalrowmarks = NIL;
364 glob->resultRelations = NIL;
365 glob->appendRelations = NIL;
366 glob->partPruneInfos = NIL;
367 glob->relationOids = NIL;
368 glob->invalItems = NIL;
369 glob->paramExecTypes = NIL;
370 glob->lastPHId = 0;
371 glob->lastRowMarkId = 0;
372 glob->lastPlanNodeId = 0;
373 glob->transientPlan = false;
374 glob->dependsOnRole = false;
375 glob->partition_directory = NULL;
376 glob->rel_notnullatts_hash = NULL;
377
378 /*
379 * Assess whether it's feasible to use parallel mode for this query. We
380 * can't do this in a standalone backend, or if the command will try to
381 * modify any data, or if this is a cursor operation, or if GUCs are set
382 * to values that don't permit parallelism, or if parallel-unsafe
383 * functions are present in the query tree.
384 *
385 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 * the command is writing into a completely new table which workers won't
388 * be able to see. If the workers could see the table, the fact that
389 * group locking would cause them to ignore the leader's heavyweight GIN
390 * page locks would make this unsafe. We'll have to fix that somehow if
391 * we want to allow parallel inserts in general; updates and deletes have
392 * additional problems especially around combo CIDs.)
393 *
394 * For now, we don't try to use parallel mode if we're running inside a
395 * parallel worker. We might eventually be able to relax this
396 * restriction, but for now it seems best not to have parallel workers
397 * trying to create their own parallel workers.
398 */
399 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
401 parse->commandType == CMD_SELECT &&
402 !parse->hasModifyingCTE &&
405 {
406 /* all the cheap tests pass, so scan the query tree */
409 }
410 else
411 {
412 /* skip the query tree scan, just assume it's unsafe */
414 glob->parallelModeOK = false;
415 }
416
417 /*
418 * glob->parallelModeNeeded is normally set to false here and changed to
419 * true during plan creation if a Gather or Gather Merge plan is actually
420 * created (cf. create_gather_plan, create_gather_merge_plan).
421 *
422 * However, if debug_parallel_query = on or debug_parallel_query =
423 * regress, then we impose parallel mode whenever it's safe to do so, even
424 * if the final plan doesn't use parallelism. It's not safe to do so if
425 * the query contains anything parallel-unsafe; parallelModeOK will be
426 * false in that case. Note that parallelModeOK can't change after this
427 * point. Otherwise, everything in the query is either parallel-safe or
428 * parallel-restricted, and in either case it should be OK to impose
429 * parallel-mode restrictions. If that ends up breaking something, then
430 * either some function the user included in the query is incorrectly
431 * labeled as parallel-safe or parallel-restricted when in reality it's
432 * parallel-unsafe, or else the query planner itself has a bug.
433 */
434 glob->parallelModeNeeded = glob->parallelModeOK &&
436
437 /* Determine what fraction of the plan is likely to be scanned */
438 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 {
440 /*
441 * We have no real idea how many tuples the user will ultimately FETCH
442 * from a cursor, but it is often the case that he doesn't want 'em
443 * all, or would prefer a fast-start plan anyway so that he can
444 * process some of the tuples sooner. Use a GUC parameter to decide
445 * what fraction to optimize for.
446 */
447 tuple_fraction = cursor_tuple_fraction;
448
449 /*
450 * We document cursor_tuple_fraction as simply being a fraction, which
451 * means the edge cases 0 and 1 have to be treated specially here. We
452 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 */
454 if (tuple_fraction >= 1.0)
455 tuple_fraction = 0.0;
456 else if (tuple_fraction <= 0.0)
457 tuple_fraction = 1e-10;
458 }
459 else
460 {
461 /* Default assumption is we need all the tuples */
462 tuple_fraction = 0.0;
463 }
464
465 /*
466 * Compute the initial path generation strategy mask.
467 *
468 * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
469 * GUC, and so the corresponding bits are always set in the default
470 * strategy mask.
471 *
472 * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
473 * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
474 * corresponds to this exactly: enable_indexscan=off disables both
475 * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
476 * converts the index-only scan paths that we would have considered into
477 * index scan paths.
478 */
481 if (enable_tidscan)
483 if (enable_seqscan)
492 {
494 if (enable_material)
496 }
497 if (enable_nestloop)
498 {
500 if (enable_material)
502 if (enable_memoize)
504 }
505 if (enable_hashjoin)
511
512 /* Allow plugins to take control after we've initialized "glob" */
514 (*planner_setup_hook) (glob, parse, query_string, cursorOptions,
515 &tuple_fraction, es);
516
517 /* primary planning entry point (may recurse for subqueries) */
518 root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
519 NULL);
520
521 /* Select best Path and turn it into a Plan */
524
526
527 /*
528 * If creating a plan for a scrollable cursor, make sure it can run
529 * backwards on demand. Add a Material node at the top at need.
530 */
531 if (cursorOptions & CURSOR_OPT_SCROLL)
532 {
535 }
536
537 /*
538 * Optionally add a Gather node for testing purposes, provided this is
539 * actually a safe thing to do.
540 *
541 * We can add Gather even when top_plan has parallel-safe initPlans, but
542 * then we have to move the initPlans to the Gather node because of
543 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
544 * regression tests when debug_parallel_query = regress, because initPlans
545 * that would normally appear on the top_plan move to the Gather, causing
546 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
547 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
548 */
550 top_plan->parallel_safe &&
551 (top_plan->initPlan == NIL ||
553 {
556 bool unsafe_initplans;
557
558 gather->plan.targetlist = top_plan->targetlist;
559 gather->plan.qual = NIL;
560 gather->plan.lefttree = top_plan;
561 gather->plan.righttree = NULL;
562 gather->num_workers = 1;
563 gather->single_copy = true;
565
566 /* Transfer any initPlans to the new top node */
567 gather->plan.initPlan = top_plan->initPlan;
568 top_plan->initPlan = NIL;
569
570 /*
571 * Since this Gather has no parallel-aware descendants to signal to,
572 * we don't need a rescan Param.
573 */
574 gather->rescan_param = -1;
575
576 /*
577 * Ideally we'd use cost_gather here, but setting up dummy path data
578 * to satisfy it doesn't seem much cleaner than knowing what it does.
579 */
580 gather->plan.startup_cost = top_plan->startup_cost +
582 gather->plan.total_cost = top_plan->total_cost +
584 gather->plan.plan_rows = top_plan->plan_rows;
585 gather->plan.plan_width = top_plan->plan_width;
586 gather->plan.parallel_aware = false;
587 gather->plan.parallel_safe = false;
588
589 /*
590 * Delete the initplans' cost from top_plan. We needn't add it to the
591 * Gather node, since the above coding already included it there.
592 */
593 SS_compute_initplan_cost(gather->plan.initPlan,
595 top_plan->startup_cost -= initplan_cost;
596 top_plan->total_cost -= initplan_cost;
597
598 /* use parallel mode for parallel plans. */
599 root->glob->parallelModeNeeded = true;
600
601 top_plan = &gather->plan;
602 }
603
604 /*
605 * If any Params were generated, run through the plan tree and compute
606 * each plan node's extParam/allParam sets. Ideally we'd merge this into
607 * set_plan_references' tree traversal, but for now it has to be separate
608 * because we need to visit subplans before not after main plan.
609 */
610 if (glob->paramExecTypes != NIL)
611 {
612 Assert(list_length(glob->subplans) == list_length(glob->subroots));
613 forboth(lp, glob->subplans, lr, glob->subroots)
614 {
615 Plan *subplan = (Plan *) lfirst(lp);
617
618 SS_finalize_plan(subroot, subplan);
619 }
621 }
622
623 /* final cleanup of the plan */
624 Assert(glob->finalrtable == NIL);
625 Assert(glob->finalrteperminfos == NIL);
626 Assert(glob->finalrowmarks == NIL);
627 Assert(glob->resultRelations == NIL);
628 Assert(glob->appendRelations == NIL);
630 /* ... and the subplans (both regular subplans and initplans) */
631 Assert(list_length(glob->subplans) == list_length(glob->subroots));
632 forboth(lp, glob->subplans, lr, glob->subroots)
633 {
634 Plan *subplan = (Plan *) lfirst(lp);
636
637 lfirst(lp) = set_plan_references(subroot, subplan);
638 }
639
640 /* build the PlannedStmt result */
641 result = makeNode(PlannedStmt);
642
643 result->commandType = parse->commandType;
644 result->queryId = parse->queryId;
646 result->hasReturning = (parse->returningList != NIL);
647 result->hasModifyingCTE = parse->hasModifyingCTE;
648 result->canSetTag = parse->canSetTag;
649 result->transientPlan = glob->transientPlan;
650 result->dependsOnRole = glob->dependsOnRole;
651 result->parallelModeNeeded = glob->parallelModeNeeded;
652 result->planTree = top_plan;
653 result->partPruneInfos = glob->partPruneInfos;
654 result->rtable = glob->finalrtable;
655 result->unprunableRelids = bms_difference(glob->allRelids,
656 glob->prunableRelids);
657 result->permInfos = glob->finalrteperminfos;
658 result->subrtinfos = glob->subrtinfos;
659 result->resultRelations = glob->resultRelations;
660 result->appendRelations = glob->appendRelations;
661 result->subplans = glob->subplans;
662 result->rewindPlanIDs = glob->rewindPlanIDs;
663 result->rowMarks = glob->finalrowmarks;
664 result->relationOids = glob->relationOids;
665 result->invalItems = glob->invalItems;
666 result->paramExecTypes = glob->paramExecTypes;
667 /* utilityStmt should be null, but we might as well copy it */
668 result->utilityStmt = parse->utilityStmt;
669 result->elidedNodes = glob->elidedNodes;
670 result->stmt_location = parse->stmt_location;
671 result->stmt_len = parse->stmt_len;
672
673 result->jitFlags = PGJIT_NONE;
674 if (jit_enabled && jit_above_cost >= 0 &&
675 top_plan->total_cost > jit_above_cost)
676 {
677 result->jitFlags |= PGJIT_PERFORM;
678
679 /*
680 * Decide how much effort should be put into generating better code.
681 */
682 if (jit_optimize_above_cost >= 0 &&
683 top_plan->total_cost > jit_optimize_above_cost)
684 result->jitFlags |= PGJIT_OPT3;
685 if (jit_inline_above_cost >= 0 &&
686 top_plan->total_cost > jit_inline_above_cost)
687 result->jitFlags |= PGJIT_INLINE;
688
689 /*
690 * Decide which operations should be JITed.
691 */
692 if (jit_expressions)
693 result->jitFlags |= PGJIT_EXPR;
695 result->jitFlags |= PGJIT_DEFORM;
696 }
697
698 /* Allow plugins to take control before we discard "glob" */
700 (*planner_shutdown_hook) (glob, parse, query_string, result);
701
702 if (glob->partition_directory != NULL)
703 DestroyPartitionDirectory(glob->partition_directory);
704
705 return result;
706}
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:346
char max_parallel_hazard(Query *parse)
Definition clauses.c:743
bool enable_seqscan
Definition costsize.c:145
int max_parallel_workers_per_gather
Definition costsize.c:143
bool enable_memoize
Definition costsize.c:155
double parallel_setup_cost
Definition costsize.c:136
bool enable_gathermerge
Definition costsize.c:158
double parallel_tuple_cost
Definition costsize.c:135
bool enable_indexonlyscan
Definition costsize.c:147
bool enable_tidscan
Definition costsize.c:149
bool enable_material
Definition costsize.c:154
bool enable_hashjoin
Definition costsize.c:157
bool enable_mergejoin
Definition costsize.c:156
bool enable_partitionwise_join
Definition costsize.c:159
bool enable_nestloop
Definition costsize.c:153
bool enable_bitmapscan
Definition costsize.c:148
Plan * materialize_finished_plan(Plan *subplan)
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition createplan.c:338
bool ExecSupportsBackwardScan(Plan *node)
Definition execAmi.c:511
#define IsParallelWorker()
Definition parallel.h:60
double jit_optimize_above_cost
Definition jit.c:41
bool jit_enabled
Definition jit.c:32
bool jit_expressions
Definition jit.c:36
bool jit_tuple_deforming
Definition jit.c:38
double jit_above_cost
Definition jit.c:39
double jit_inline_above_cost
Definition jit.c:40
#define PGJIT_OPT3
Definition jit.h:21
#define PGJIT_NONE
Definition jit.h:19
#define PGJIT_EXPR
Definition jit.h:23
#define PGJIT_DEFORM
Definition jit.h:24
#define PGJIT_INLINE
Definition jit.h:22
#define PGJIT_PERFORM
Definition jit.h:20
@ DEBUG_PARALLEL_REGRESS
Definition optimizer.h:98
@ DEBUG_PARALLEL_OFF
Definition optimizer.h:96
#define CURSOR_OPT_SCROLL
#define CURSOR_OPT_FAST_PLAN
#define CURSOR_OPT_PARALLEL_OK
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition partdesc.c:484
#define PGS_NESTLOOP_MEMOIZE
Definition pathnodes.h:76
#define PGS_TIDSCAN
Definition pathnodes.h:70
#define PGS_FOREIGNJOIN
Definition pathnodes.h:71
#define PGS_APPEND
Definition pathnodes.h:78
#define PGS_MERGE_APPEND
Definition pathnodes.h:79
#define PGS_SEQSCAN
Definition pathnodes.h:66
#define PGS_CONSIDER_INDEXONLY
Definition pathnodes.h:82
#define PGS_NESTLOOP_MATERIALIZE
Definition pathnodes.h:75
#define PGS_MERGEJOIN_PLAIN
Definition pathnodes.h:72
#define PGS_MERGEJOIN_MATERIALIZE
Definition pathnodes.h:73
#define PGS_HASHJOIN
Definition pathnodes.h:77
#define PGS_CONSIDER_NONPARTIAL
Definition pathnodes.h:84
#define PGS_BITMAPSCAN
Definition pathnodes.h:69
#define PGS_GATHER
Definition pathnodes.h:80
#define PGS_CONSIDER_PARTITIONWISE
Definition pathnodes.h:83
#define PGS_GATHER_MERGE
Definition pathnodes.h:81
#define PGS_INDEXONLYSCAN
Definition pathnodes.h:68
#define PGS_INDEXSCAN
Definition pathnodes.h:67
#define PGS_NESTLOOP_PLAIN
Definition pathnodes.h:74
double cursor_tuple_fraction
Definition planner.c:68
planner_shutdown_hook_type planner_shutdown_hook
Definition planner.c:80
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:743
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition planner.c:6657
planner_setup_hook_type planner_setup_hook
Definition planner.c:77
int debug_parallel_query
Definition planner.c:69
@ PLAN_STMT_STANDARD
Definition plannodes.h:41
e
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition setrefs.c:291
struct Plan * planTree
Definition plannodes.h:101
bool hasModifyingCTE
Definition plannodes.h:83
List * appendRelations
Definition plannodes.h:127
List * elidedNodes
Definition plannodes.h:156
List * permInfos
Definition plannodes.h:120
bool canSetTag
Definition plannodes.h:86
List * rowMarks
Definition plannodes.h:141
Bitmapset * rewindPlanIDs
Definition plannodes.h:138
int64 queryId
Definition plannodes.h:71
ParseLoc stmt_len
Definition plannodes.h:171
PlannedStmtOrigin planOrigin
Definition plannodes.h:77
bool hasReturning
Definition plannodes.h:80
ParseLoc stmt_location
Definition plannodes.h:169
List * invalItems
Definition plannodes.h:147
bool transientPlan
Definition plannodes.h:89
List * resultRelations
Definition plannodes.h:124
List * subplans
Definition plannodes.h:132
List * relationOids
Definition plannodes.h:144
List * subrtinfos
Definition plannodes.h:135
bool dependsOnRole
Definition plannodes.h:92
Bitmapset * unprunableRelids
Definition plannodes.h:115
CmdType commandType
Definition plannodes.h:68
Node * utilityStmt
Definition plannodes.h:153
List * rtable
Definition plannodes.h:109
List * partPruneInfos
Definition plannodes.h:106
List * paramExecTypes
Definition plannodes.h:150
bool parallelModeNeeded
Definition plannodes.h:95
Bitmapset * prunableRelids
Definition pathnodes.h:206
char maxParallelHazard
Definition pathnodes.h:260
List * subplans
Definition pathnodes.h:178
bool dependsOnRole
Definition pathnodes.h:251
Bitmapset * allRelids
Definition pathnodes.h:199
List * appendRelations
Definition pathnodes.h:221
List * finalrowmarks
Definition pathnodes.h:215
List * paramExecTypes
Definition pathnodes.h:233
bool parallelModeOK
Definition pathnodes.h:254
bool transientPlan
Definition pathnodes.h:248
Bitmapset * rewindPlanIDs
Definition pathnodes.h:190
List * finalrteperminfos
Definition pathnodes.h:209
List * subpaths
Definition pathnodes.h:181
Index lastRowMarkId
Definition pathnodes.h:242
List * resultRelations
Definition pathnodes.h:218
List * partPruneInfos
Definition pathnodes.h:224
List * finalrtable
Definition pathnodes.h:193
uint64 default_pgs_mask
Definition pathnodes.h:263
bool parallelModeNeeded
Definition pathnodes.h:257
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition subselect.c:2404
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition subselect.c:2348

References PlannerGlobal::allRelids, PlannerGlobal::appendRelations, PlannedStmt::appendRelations, Assert, bms_difference(), PlannedStmt::canSetTag, CMD_SELECT, PlannedStmt::commandType, create_plan(), CURSOR_OPT_FAST_PLAN, CURSOR_OPT_PARALLEL_OK, CURSOR_OPT_SCROLL, cursor_tuple_fraction, DEBUG_PARALLEL_OFF, debug_parallel_query, DEBUG_PARALLEL_REGRESS, PlannerGlobal::default_pgs_mask, PlannerGlobal::dependsOnRole, PlannedStmt::dependsOnRole, DestroyPartitionDirectory(), PlannerGlobal::elidedNodes, PlannedStmt::elidedNodes, enable_bitmapscan, enable_gathermerge, enable_hashjoin, enable_indexonlyscan, enable_indexscan, enable_material, enable_memoize, enable_mergejoin, enable_nestloop, enable_partitionwise_join, enable_seqscan, enable_tidscan, ExecSupportsBackwardScan(), fb(), fetch_upper_rel(), PlannerGlobal::finalrowmarks, PlannerGlobal::finalrtable, PlannerGlobal::finalrteperminfos, forboth, get_cheapest_fractional_path(), PlannedStmt::hasModifyingCTE, PlannedStmt::hasReturning, PlannerGlobal::invalItems, PlannedStmt::invalItems, IsParallelWorker, IsUnderPostmaster, jit_above_cost, jit_enabled, jit_expressions, jit_inline_above_cost, jit_optimize_above_cost, jit_tuple_deforming, PlannedStmt::jitFlags, PlannerGlobal::lastPHId, PlannerGlobal::lastPlanNodeId, PlannerGlobal::lastRowMarkId, lfirst, lfirst_node, list_length(), makeNode, materialize_finished_plan(), max_parallel_hazard(), max_parallel_workers_per_gather, PlannerGlobal::maxParallelHazard, NIL, parallel_setup_cost, parallel_tuple_cost, PlannerGlobal::parallelModeNeeded, PlannedStmt::parallelModeNeeded, PlannerGlobal::parallelModeOK, PlannerGlobal::paramExecTypes, PlannedStmt::paramExecTypes, parse(), PlannerGlobal::partPruneInfos, PlannedStmt::partPruneInfos, PlannedStmt::permInfos, PGJIT_DEFORM, PGJIT_EXPR, PGJIT_INLINE, PGJIT_NONE, PGJIT_OPT3, PGJIT_PERFORM, PGS_APPEND, PGS_BITMAPSCAN, PGS_CONSIDER_INDEXONLY, PGS_CONSIDER_NONPARTIAL, PGS_CONSIDER_PARTITIONWISE, PGS_FOREIGNJOIN, PGS_GATHER, PGS_GATHER_MERGE, PGS_HASHJOIN, PGS_INDEXONLYSCAN, PGS_INDEXSCAN, PGS_MERGE_APPEND, PGS_MERGEJOIN_MATERIALIZE, PGS_MERGEJOIN_PLAIN, PGS_NESTLOOP_MATERIALIZE, PGS_NESTLOOP_MEMOIZE, PGS_NESTLOOP_PLAIN, PGS_SEQSCAN, PGS_TIDSCAN, PLAN_STMT_STANDARD, planner_setup_hook, planner_shutdown_hook, PlannedStmt::planOrigin, PlannedStmt::planTree, PlannerGlobal::prunableRelids, PlannedStmt::queryId, PlannerGlobal::relationOids, PlannedStmt::relationOids, PlannerGlobal::resultRelations, PlannedStmt::resultRelations, PlannerGlobal::rewindPlanIDs, PlannedStmt::rewindPlanIDs, root, PlannedStmt::rowMarks, PlannedStmt::rtable, set_plan_references(), SS_compute_initplan_cost(), SS_finalize_plan(), PlannedStmt::stmt_len, PlannedStmt::stmt_location, PlannerGlobal::subpaths, PlannerGlobal::subplans, PlannedStmt::subplans, subquery_planner(), PlannerGlobal::subrtinfos, PlannedStmt::subrtinfos, PlannerGlobal::transientPlan, PlannedStmt::transientPlan, PlannedStmt::unprunableRelids, UPPERREL_FINAL, and PlannedStmt::utilityStmt.

Referenced by delay_execution_planner(), pgss_planner(), and planner().

◆ standard_qp_callback()

static void standard_qp_callback ( PlannerInfo root,
void extra 
)
static

Definition at line 3532 of file planner.c.

3533{
3534 Query *parse = root->parse;
3536 List *tlist = root->processed_tlist;
3537 List *activeWindows = qp_extra->activeWindows;
3538
3539 /*
3540 * Calculate pathkeys that represent grouping/ordering and/or ordered
3541 * aggregate requirements.
3542 */
3543 if (qp_extra->gset_data)
3544 {
3545 /*
3546 * With grouping sets, just use the first RollupData's groupClause. We
3547 * don't make any effort to optimize grouping clauses when there are
3548 * grouping sets, nor can we combine aggregate ordering keys with
3549 * grouping.
3550 */
3551 List *rollups = qp_extra->gset_data->rollups;
3552 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3553
3554 if (grouping_is_sortable(groupClause))
3555 {
3556 bool sortable;
3557
3558 /*
3559 * The groupClause is logically below the grouping step. So if
3560 * there is an RTE entry for the grouping step, we need to remove
3561 * its RT index from the sort expressions before we make PathKeys
3562 * for them.
3563 */
3564 root->group_pathkeys =
3566 &groupClause,
3567 tlist,
3568 false,
3569 parse->hasGroupRTE,
3570 &sortable,
3571 false);
3573 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3574 }
3575 else
3576 {
3577 root->group_pathkeys = NIL;
3578 root->num_groupby_pathkeys = 0;
3579 }
3580 }
3581 else if (parse->groupClause || root->numOrderedAggs > 0)
3582 {
3583 /*
3584 * With a plain GROUP BY list, we can remove any grouping items that
3585 * are proven redundant by EquivalenceClass processing. For example,
3586 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3587 * especially common cases, but they're nearly free to detect. Note
3588 * that we remove redundant items from processed_groupClause but not
3589 * the original parse->groupClause.
3590 */
3591 bool sortable;
3592
3593 /*
3594 * Convert group clauses into pathkeys. Set the ec_sortref field of
3595 * EquivalenceClass'es if it's not set yet.
3596 */
3597 root->group_pathkeys =
3599 &root->processed_groupClause,
3600 tlist,
3601 true,
3602 false,
3603 &sortable,
3604 true);
3605 if (!sortable)
3606 {
3607 /* Can't sort; no point in considering aggregate ordering either */
3608 root->group_pathkeys = NIL;
3609 root->num_groupby_pathkeys = 0;
3610 }
3611 else
3612 {
3613 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3614 /* If we have ordered aggs, consider adding onto group_pathkeys */
3615 if (root->numOrderedAggs > 0)
3617 }
3618 }
3619 else
3620 {
3621 root->group_pathkeys = NIL;
3622 root->num_groupby_pathkeys = 0;
3623 }
3624
3625 /* We consider only the first (bottom) window in pathkeys logic */
3626 if (activeWindows != NIL)
3627 {
3628 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3629
3630 root->window_pathkeys = make_pathkeys_for_window(root,
3631 wc,
3632 tlist);
3633 }
3634 else
3635 root->window_pathkeys = NIL;
3636
3637 /*
3638 * As with GROUP BY, we can discard any DISTINCT items that are proven
3639 * redundant by EquivalenceClass processing. The non-redundant list is
3640 * kept in root->processed_distinctClause, leaving the original
3641 * parse->distinctClause alone.
3642 */
3643 if (parse->distinctClause)
3644 {
3645 bool sortable;
3646
3647 /* Make a copy since pathkey processing can modify the list */
3648 root->processed_distinctClause = list_copy(parse->distinctClause);
3649 root->distinct_pathkeys =
3651 &root->processed_distinctClause,
3652 tlist,
3653 true,
3654 false,
3655 &sortable,
3656 false);
3657 if (!sortable)
3658 root->distinct_pathkeys = NIL;
3659 }
3660 else
3661 root->distinct_pathkeys = NIL;
3662
3663 root->sort_pathkeys =
3665 parse->sortClause,
3666 tlist);
3667
3668 /* setting setop_pathkeys might be useful to the union planner */
3669 if (qp_extra->setop != NULL)
3670 {
3671 List *groupClauses;
3672 bool sortable;
3673
3674 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3675
3676 root->setop_pathkeys =
3678 &groupClauses,
3679 tlist,
3680 false,
3681 false,
3682 &sortable,
3683 false);
3684 if (!sortable)
3685 root->setop_pathkeys = NIL;
3686 }
3687 else
3688 root->setop_pathkeys = NIL;
3689
3690 /*
3691 * Figure out whether we want a sorted result from query_planner.
3692 *
3693 * If we have a sortable GROUP BY clause, then we want a result sorted
3694 * properly for grouping. Otherwise, if we have window functions to
3695 * evaluate, we try to sort for the first window. Otherwise, if there's a
3696 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3697 * we try to produce output that's sufficiently well sorted for the
3698 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3699 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3700 * for a set operation which can benefit from presorted results and have a
3701 * sortable targetlist, we want to sort by the target list.
3702 *
3703 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3704 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3705 * that might just leave us failing to exploit an available sort order at
3706 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3707 * much easier, since we know that the parser ensured that one is a
3708 * superset of the other.
3709 */
3710 if (root->group_pathkeys)
3711 root->query_pathkeys = root->group_pathkeys;
3712 else if (root->window_pathkeys)
3713 root->query_pathkeys = root->window_pathkeys;
3714 else if (list_length(root->distinct_pathkeys) >
3715 list_length(root->sort_pathkeys))
3716 root->query_pathkeys = root->distinct_pathkeys;
3717 else if (root->sort_pathkeys)
3718 root->query_pathkeys = root->sort_pathkeys;
3719 else if (root->setop_pathkeys != NIL)
3720 root->query_pathkeys = root->setop_pathkeys;
3721 else
3722 root->query_pathkeys = NIL;
3723}
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition planner.c:3308
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition planner.c:8416

References adjust_group_pathkeys_for_groupagg(), Assert, fb(), generate_setop_child_grouplist(), grouping_is_sortable(), linitial_node, list_copy(), list_length(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), make_pathkeys_for_window(), NIL, parse(), and root.

Referenced by grouping_planner().

◆ subquery_planner()

PlannerInfo * subquery_planner ( PlannerGlobal glob,
Query parse,
char plan_name,
PlannerInfo parent_root,
bool  hasRecursion,
double  tuple_fraction,
SetOperationStmt setops 
)

Definition at line 743 of file planner.c.

746{
750 bool hasOuterJoins;
751 bool hasResultRTEs;
753 ListCell *l;
754
755 /* Create a PlannerInfo data structure for this subquery */
757 root->parse = parse;
758 root->glob = glob;
759 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
760 root->plan_name = plan_name;
761 root->parent_root = parent_root;
762 root->plan_params = NIL;
763 root->outer_params = NULL;
764 root->planner_cxt = CurrentMemoryContext;
765 root->init_plans = NIL;
766 root->cte_plan_ids = NIL;
767 root->multiexpr_params = NIL;
768 root->join_domains = NIL;
769 root->eq_classes = NIL;
770 root->ec_merging_done = false;
771 root->last_rinfo_serial = 0;
772 root->all_result_relids =
773 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
774 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
775 root->append_rel_list = NIL;
776 root->row_identity_vars = NIL;
777 root->rowMarks = NIL;
778 memset(root->upper_rels, 0, sizeof(root->upper_rels));
779 memset(root->upper_targets, 0, sizeof(root->upper_targets));
780 root->processed_groupClause = NIL;
781 root->processed_distinctClause = NIL;
782 root->processed_tlist = NIL;
783 root->update_colnos = NIL;
784 root->grouping_map = NULL;
785 root->minmax_aggs = NIL;
786 root->qual_security_level = 0;
787 root->hasPseudoConstantQuals = false;
788 root->hasAlternativeSubPlans = false;
789 root->placeholdersFrozen = false;
790 root->hasRecursion = hasRecursion;
791 root->assumeReplanning = false;
792 if (hasRecursion)
793 root->wt_param_id = assign_special_exec_param(root);
794 else
795 root->wt_param_id = -1;
796 root->non_recursive_path = NULL;
797
798 /*
799 * Create the top-level join domain. This won't have valid contents until
800 * deconstruct_jointree fills it in, but the node needs to exist before
801 * that so we can build EquivalenceClasses referencing it.
802 */
803 root->join_domains = list_make1(makeNode(JoinDomain));
804
805 /*
806 * If there is a WITH list, process each WITH query and either convert it
807 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
808 */
809 if (parse->cteList)
811
812 /*
813 * If it's a MERGE command, transform the joinlist as appropriate.
814 */
816
817 /*
818 * Scan the rangetable for relation RTEs and retrieve the necessary
819 * catalog information for each relation. Using this information, clear
820 * the inh flag for any relation that has no children, collect not-null
821 * attribute numbers for any relation that has column not-null
822 * constraints, and expand virtual generated columns for any relation that
823 * contains them. Note that this step does not descend into sublinks and
824 * subqueries; if we pull up any sublinks or subqueries below, their
825 * relation RTEs are processed just before pulling them up.
826 */
828
829 /*
830 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
831 * that we don't need so many special cases to deal with that situation.
832 */
834
835 /*
836 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
837 * to transform them into joins. Note that this step does not descend
838 * into subqueries; if we pull up any subqueries below, their SubLinks are
839 * processed just before pulling them up.
840 */
841 if (parse->hasSubLinks)
843
844 /*
845 * Scan the rangetable for function RTEs, do const-simplification on them,
846 * and then inline them if possible (producing subqueries that might get
847 * pulled up next). Recursion issues here are handled in the same way as
848 * for SubLinks.
849 */
851
852 /*
853 * Check to see if any subqueries in the jointree can be merged into this
854 * query.
855 */
857
858 /*
859 * If this is a simple UNION ALL query, flatten it into an appendrel. We
860 * do this now because it requires applying pull_up_subqueries to the leaf
861 * queries of the UNION ALL, which weren't touched above because they
862 * weren't referenced by the jointree (they will be after we do this).
863 */
864 if (parse->setOperations)
866
867 /*
868 * Survey the rangetable to see what kinds of entries are present. We can
869 * skip some later processing if relevant SQL features are not used; for
870 * example if there are no JOIN RTEs we can avoid the expense of doing
871 * flatten_join_alias_vars(). This must be done after we have finished
872 * adding rangetable entries, of course. (Note: actually, processing of
873 * inherited or partitioned rels can cause RTEs for their child tables to
874 * get added later; but those must all be RTE_RELATION entries, so they
875 * don't invalidate the conclusions drawn here.)
876 */
877 root->hasJoinRTEs = false;
878 root->hasLateralRTEs = false;
879 root->group_rtindex = 0;
880 hasOuterJoins = false;
881 hasResultRTEs = false;
882 foreach(l, parse->rtable)
883 {
885
886 switch (rte->rtekind)
887 {
888 case RTE_JOIN:
889 root->hasJoinRTEs = true;
890 if (IS_OUTER_JOIN(rte->jointype))
891 hasOuterJoins = true;
892 break;
893 case RTE_RESULT:
894 hasResultRTEs = true;
895 break;
896 case RTE_GROUP:
897 Assert(parse->hasGroupRTE);
898 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
899 break;
900 default:
901 /* No work here for other RTE types */
902 break;
903 }
904
905 if (rte->lateral)
906 root->hasLateralRTEs = true;
907
908 /*
909 * We can also determine the maximum security level required for any
910 * securityQuals now. Addition of inheritance-child RTEs won't affect
911 * this, because child tables don't have their own securityQuals; see
912 * expand_single_inheritance_child().
913 */
914 if (rte->securityQuals)
915 root->qual_security_level = Max(root->qual_security_level,
916 list_length(rte->securityQuals));
917 }
918
919 /*
920 * If we have now verified that the query target relation is
921 * non-inheriting, mark it as a leaf target.
922 */
923 if (parse->resultRelation)
924 {
925 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
926
927 if (!rte->inh)
928 root->leaf_result_relids =
929 bms_make_singleton(parse->resultRelation);
930 }
931
932 /*
933 * This would be a convenient time to check access permissions for all
934 * relations mentioned in the query, since it would be better to fail now,
935 * before doing any detailed planning. However, for historical reasons,
936 * we leave this to be done at executor startup.
937 *
938 * Note, however, that we do need to check access permissions for any view
939 * relations mentioned in the query, in order to prevent information being
940 * leaked by selectivity estimation functions, which only check view owner
941 * permissions on underlying tables (see all_rows_selectable() and its
942 * callers). This is a little ugly, because it means that access
943 * permissions for views will be checked twice, which is another reason
944 * why it would be better to do all the ACL checks here.
945 */
946 foreach(l, parse->rtable)
947 {
949
950 if (rte->perminfoindex != 0 &&
951 rte->relkind == RELKIND_VIEW)
952 {
954 bool result;
955
956 perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
958 if (!result)
960 get_rel_name(perminfo->relid));
961 }
962 }
963
964 /*
965 * Preprocess RowMark information. We need to do this after subquery
966 * pullup, so that all base relations are present.
967 */
969
970 /*
971 * Set hasHavingQual to remember if HAVING clause is present. Needed
972 * because preprocess_expression will reduce a constant-true condition to
973 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
974 */
975 root->hasHavingQual = (parse->havingQual != NULL);
976
977 /*
978 * Do expression preprocessing on targetlist and quals, as well as other
979 * random expressions in the querytree. Note that we do not need to
980 * handle sort/group expressions explicitly, because they are actually
981 * part of the targetlist.
982 */
983 parse->targetList = (List *)
984 preprocess_expression(root, (Node *) parse->targetList,
986
988 foreach(l, parse->withCheckOptions)
989 {
991
992 wco->qual = preprocess_expression(root, wco->qual,
994 if (wco->qual != NULL)
996 }
997 parse->withCheckOptions = newWithCheckOptions;
998
999 parse->returningList = (List *)
1000 preprocess_expression(root, (Node *) parse->returningList,
1002
1003 preprocess_qual_conditions(root, (Node *) parse->jointree);
1004
1005 parse->havingQual = preprocess_expression(root, parse->havingQual,
1007
1008 foreach(l, parse->windowClause)
1009 {
1011
1012 /* partitionClause/orderClause are sort/group expressions */
1017 }
1018
1019 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1021 parse->limitCount = preprocess_expression(root, parse->limitCount,
1023
1024 if (parse->onConflict)
1025 {
1026 parse->onConflict->arbiterElems = (List *)
1028 (Node *) parse->onConflict->arbiterElems,
1030 parse->onConflict->arbiterWhere =
1032 parse->onConflict->arbiterWhere,
1034 parse->onConflict->onConflictSet = (List *)
1036 (Node *) parse->onConflict->onConflictSet,
1038 parse->onConflict->onConflictWhere =
1040 parse->onConflict->onConflictWhere,
1042 /* exclRelTlist contains only Vars, so no preprocessing needed */
1043 }
1044
1045 foreach(l, parse->mergeActionList)
1046 {
1048
1049 action->targetList = (List *)
1051 (Node *) action->targetList,
1053 action->qual =
1055 (Node *) action->qual,
1057 }
1058
1059 parse->mergeJoinCondition =
1060 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1061
1062 root->append_rel_list = (List *)
1063 preprocess_expression(root, (Node *) root->append_rel_list,
1065
1066 /* Also need to preprocess expressions within RTEs */
1067 foreach(l, parse->rtable)
1068 {
1070 int kind;
1071 ListCell *lcsq;
1072
1073 if (rte->rtekind == RTE_RELATION)
1074 {
1075 if (rte->tablesample)
1076 rte->tablesample = (TableSampleClause *)
1078 (Node *) rte->tablesample,
1080 }
1081 else if (rte->rtekind == RTE_SUBQUERY)
1082 {
1083 /*
1084 * We don't want to do all preprocessing yet on the subquery's
1085 * expressions, since that will happen when we plan it. But if it
1086 * contains any join aliases of our level, those have to get
1087 * expanded now, because planning of the subquery won't do it.
1088 * That's only possible if the subquery is LATERAL.
1089 */
1090 if (rte->lateral && root->hasJoinRTEs)
1091 rte->subquery = (Query *)
1093 (Node *) rte->subquery);
1094 }
1095 else if (rte->rtekind == RTE_FUNCTION)
1096 {
1097 /* Preprocess the function expression(s) fully */
1098 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1099 rte->functions = (List *)
1100 preprocess_expression(root, (Node *) rte->functions, kind);
1101 }
1102 else if (rte->rtekind == RTE_TABLEFUNC)
1103 {
1104 /* Preprocess the function expression(s) fully */
1106 rte->tablefunc = (TableFunc *)
1107 preprocess_expression(root, (Node *) rte->tablefunc, kind);
1108 }
1109 else if (rte->rtekind == RTE_VALUES)
1110 {
1111 /* Preprocess the values lists fully */
1112 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1113 rte->values_lists = (List *)
1114 preprocess_expression(root, (Node *) rte->values_lists, kind);
1115 }
1116 else if (rte->rtekind == RTE_GROUP)
1117 {
1118 /* Preprocess the groupexprs list fully */
1119 rte->groupexprs = (List *)
1120 preprocess_expression(root, (Node *) rte->groupexprs,
1122 }
1123
1124 /*
1125 * Process each element of the securityQuals list as if it were a
1126 * separate qual expression (as indeed it is). We need to do it this
1127 * way to get proper canonicalization of AND/OR structure. Note that
1128 * this converts each element into an implicit-AND sublist.
1129 */
1130 foreach(lcsq, rte->securityQuals)
1131 {
1133 (Node *) lfirst(lcsq),
1135 }
1136 }
1137
1138 /*
1139 * Now that we are done preprocessing expressions, and in particular done
1140 * flattening join alias variables, get rid of the joinaliasvars lists.
1141 * They no longer match what expressions in the rest of the tree look
1142 * like, because we have not preprocessed expressions in those lists (and
1143 * do not want to; for example, expanding a SubLink there would result in
1144 * a useless unreferenced subplan). Leaving them in place simply creates
1145 * a hazard for later scans of the tree. We could try to prevent that by
1146 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1147 * but that doesn't sound very reliable.
1148 */
1149 if (root->hasJoinRTEs)
1150 {
1151 foreach(l, parse->rtable)
1152 {
1154
1155 rte->joinaliasvars = NIL;
1156 }
1157 }
1158
1159 /*
1160 * Replace any Vars in the subquery's targetlist and havingQual that
1161 * reference GROUP outputs with the underlying grouping expressions.
1162 *
1163 * Note that we need to perform this replacement after we've preprocessed
1164 * the grouping expressions. This is to ensure that there is only one
1165 * instance of SubPlan for each SubLink contained within the grouping
1166 * expressions.
1167 */
1168 if (parse->hasGroupRTE)
1169 {
1170 parse->targetList = (List *)
1171 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1172 parse->havingQual =
1173 flatten_group_exprs(root, root->parse, parse->havingQual);
1174 }
1175
1176 /* Constant-folding might have removed all set-returning functions */
1177 if (parse->hasTargetSRFs)
1178 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1179
1180 /*
1181 * If we have grouping sets, expand the groupingSets tree of this query to
1182 * a flat list of grouping sets. We need to do this before optimizing
1183 * HAVING, since we can't easily tell if there's an empty grouping set
1184 * until we have this representation.
1185 */
1186 if (parse->groupingSets)
1187 {
1188 parse->groupingSets =
1189 expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1190 }
1191
1192 /*
1193 * In some cases we may want to transfer a HAVING clause into WHERE. We
1194 * cannot do so if the HAVING clause contains aggregates (obviously) or
1195 * volatile functions (since a HAVING clause is supposed to be executed
1196 * only once per group). We also can't do this if there are any grouping
1197 * sets and the clause references any columns that are nullable by the
1198 * grouping sets; the nulled values of those columns are not available
1199 * before the grouping step. (The test on groupClause might seem wrong,
1200 * but it's okay: it's just an optimization to avoid running pull_varnos
1201 * when there cannot be any Vars in the HAVING clause.)
1202 *
1203 * Also, it may be that the clause is so expensive to execute that we're
1204 * better off doing it only once per group, despite the loss of
1205 * selectivity. This is hard to estimate short of doing the entire
1206 * planning process twice, so we use a heuristic: clauses containing
1207 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1208 * clause into WHERE, in hopes of eliminating tuples before aggregation
1209 * instead of after.
1210 *
1211 * If the query has no empty grouping set then we can simply move such a
1212 * clause into WHERE; any group that fails the clause will not be in the
1213 * output because none of its tuples will reach the grouping or
1214 * aggregation stage. Otherwise we have to keep the clause in HAVING to
1215 * ensure that we don't emit a bogus aggregated row. But then the HAVING
1216 * clause must be degenerate (variable-free), so we can copy it into WHERE
1217 * so that query_planner() can use it in a gating Result node. (This could
1218 * be done better, but it seems not worth optimizing.)
1219 *
1220 * Note that a HAVING clause may contain expressions that are not fully
1221 * preprocessed. This can happen if these expressions are part of
1222 * grouping items. In such cases, they are replaced with GROUP Vars in
1223 * the parser and then replaced back after we're done with expression
1224 * preprocessing on havingQual. This is not an issue if the clause
1225 * remains in HAVING, because these expressions will be matched to lower
1226 * target items in setrefs.c. However, if the clause is moved or copied
1227 * into WHERE, we need to ensure that these expressions are fully
1228 * preprocessed.
1229 *
1230 * Note that both havingQual and parse->jointree->quals are in
1231 * implicitly-ANDed-list form at this point, even though they are declared
1232 * as Node *.
1233 */
1234 newHaving = NIL;
1235 foreach(l, (List *) parse->havingQual)
1236 {
1237 Node *havingclause = (Node *) lfirst(l);
1238
1242 (parse->groupClause && parse->groupingSets &&
1243 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1244 {
1245 /* keep it in HAVING */
1247 }
1248 else if (parse->groupClause &&
1249 (parse->groupingSets == NIL ||
1250 (List *) linitial(parse->groupingSets) != NIL))
1251 {
1252 /* There is GROUP BY, but no empty grouping set */
1254
1255 /* Preprocess the HAVING clause fully */
1258 /* ... and move it to WHERE */
1259 parse->jointree->quals = (Node *)
1260 list_concat((List *) parse->jointree->quals,
1261 (List *) whereclause);
1262 }
1263 else
1264 {
1265 /* There is an empty grouping set (perhaps implicitly) */
1267
1268 /* Preprocess the HAVING clause fully */
1271 /* ... and put a copy in WHERE */
1272 parse->jointree->quals = (Node *)
1273 list_concat((List *) parse->jointree->quals,
1274 (List *) whereclause);
1275 /* ... and also keep it in HAVING */
1277 }
1278 }
1279 parse->havingQual = (Node *) newHaving;
1280
1281 /*
1282 * If we have any outer joins, try to reduce them to plain inner joins.
1283 * This step is most easily done after we've done expression
1284 * preprocessing.
1285 */
1286 if (hasOuterJoins)
1288
1289 /*
1290 * If we have any RTE_RESULT relations, see if they can be deleted from
1291 * the jointree. We also rely on this processing to flatten single-child
1292 * FromExprs underneath outer joins. This step is most effectively done
1293 * after we've done expression preprocessing and outer join reduction.
1294 */
1297
1298 /*
1299 * Do the main planning.
1300 */
1301 grouping_planner(root, tuple_fraction, setops);
1302
1303 /*
1304 * Capture the set of outer-level param IDs we have access to, for use in
1305 * extParam/allParam calculations later.
1306 */
1308
1309 /*
1310 * If any initPlans were created in this query level, adjust the surviving
1311 * Paths' costs and parallel-safety flags to account for them. The
1312 * initPlans won't actually get attached to the plan tree till
1313 * create_plan() runs, but we must include their effects now.
1314 */
1317
1318 /*
1319 * Make sure we've identified the cheapest Path for the final rel. (By
1320 * doing this here not in grouping_planner, we include initPlan costs in
1321 * the decision, though it's unlikely that will change anything.)
1322 */
1324
1325 return root;
1326}
@ ACLCHECK_NO_PRIV
Definition acl.h:184
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition aclchk.c:2654
bool contain_agg_clause(Node *clause)
Definition clauses.c:190
bool contain_subplans(Node *clause)
Definition clauses.c:339
bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
Definition execMain.c:646
char * get_rel_name(Oid relid)
Definition lsyscache.c:2078
#define IS_OUTER_JOIN(jointype)
Definition nodes.h:348
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition parse_agg.c:1947
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_JOIN
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_GROUP
@ OBJECT_VIEW
static int list_cell_number(const List *l, const ListCell *c)
Definition pg_list.h:333
#define EXPRKIND_TABLEFUNC_LATERAL
Definition planner.c:99
#define EXPRKIND_APPINFO
Definition planner.c:94
static void preprocess_rowmarks(PlannerInfo *root)
Definition planner.c:2478
#define EXPRKIND_GROUPEXPR
Definition planner.c:100
#define EXPRKIND_RTFUNC_LATERAL
Definition planner.c:90
#define EXPRKIND_VALUES_LATERAL
Definition planner.c:92
#define EXPRKIND_LIMIT
Definition planner.c:93
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:1514
#define EXPRKIND_ARBITER_ELEM
Definition planner.c:97
void preprocess_function_rtes(PlannerInfo *root)
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
void replace_empty_jointree(Query *parse)
void pull_up_subqueries(PlannerInfo *root)
Query * preprocess_relation_rtes(PlannerInfo *root)
void reduce_outer_joins(PlannerInfo *root)
void SS_process_ctes(PlannerInfo *root)
Definition subselect.c:883
void SS_identify_outer_params(PlannerInfo *root)
Definition subselect.c:2220
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition subselect.c:2284
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition var.c:972
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition var.c:114

References aclcheck_error(), ACLCHECK_NO_PRIV, Assert, assign_special_exec_param(), bms_is_member(), bms_make_singleton(), contain_agg_clause(), contain_subplans(), contain_volatile_functions(), copyObject, CurrentMemoryContext, WindowClause::endOffset, ExecCheckOneRelPerms(), expand_grouping_sets(), expression_returns_set(), EXPRKIND_APPINFO, EXPRKIND_ARBITER_ELEM, EXPRKIND_GROUPEXPR, EXPRKIND_LIMIT, EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_RTFUNC_LATERAL, EXPRKIND_TABLEFUNC, EXPRKIND_TABLEFUNC_LATERAL, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, EXPRKIND_VALUES_LATERAL, fb(), fetch_upper_rel(), flatten_group_exprs(), flatten_join_alias_vars(), flatten_simple_union_all(), get_rel_name(), getRTEPermissionInfo(), grouping_planner(), IS_OUTER_JOIN, lappend(), lfirst, lfirst_node, linitial, list_cell_number(), list_concat(), list_length(), list_make1, makeNode, Max, NIL, OBJECT_VIEW, parse(), preprocess_expression(), preprocess_function_rtes(), preprocess_qual_conditions(), preprocess_relation_rtes(), preprocess_rowmarks(), pull_up_sublinks(), pull_up_subqueries(), pull_varnos(), reduce_outer_joins(), remove_useless_result_rtes(), replace_empty_jointree(), root, rt_fetch, RTE_FUNCTION, RTE_GROUP, RTE_JOIN, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, set_cheapest(), SS_charge_for_initplans(), SS_identify_outer_params(), SS_process_ctes(), WindowClause::startOffset, transform_MERGE_to_join(), and UPPERREL_FINAL.

Referenced by make_subplan(), recurse_set_operations(), set_subquery_pathlist(), SS_process_ctes(), and standard_planner().

Variable Documentation

◆ create_upper_paths_hook

◆ cursor_tuple_fraction

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION

Definition at line 68 of file planner.c.

Referenced by standard_planner().

◆ debug_parallel_query

int debug_parallel_query = DEBUG_PARALLEL_OFF

Definition at line 69 of file planner.c.

Referenced by ProcessParallelMessage(), query_planner(), and standard_planner().

◆ enable_distinct_reordering

bool enable_distinct_reordering = true

Definition at line 71 of file planner.c.

Referenced by get_useful_pathkeys_for_distinct().

◆ parallel_leader_participation

bool parallel_leader_participation = true

Definition at line 70 of file planner.c.

Referenced by ExecGather(), ExecGatherMerge(), ExecInitGather(), and get_parallel_divisor().

◆ planner_hook

planner_hook_type planner_hook = NULL

Definition at line 74 of file planner.c.

Referenced by _PG_init(), and planner().

◆ planner_setup_hook

planner_setup_hook_type planner_setup_hook = NULL

Definition at line 77 of file planner.c.

Referenced by standard_planner().

◆ planner_shutdown_hook

planner_shutdown_hook_type planner_shutdown_hook = NULL

Definition at line 80 of file planner.c.

Referenced by standard_planner().