PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 
#define EXPRKIND_GROUPEXPR   13
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static Listget_useful_pathkeys_for_distinct (PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
bool enable_distinct_reordering = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 87 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 90 of file planner.c.

◆ EXPRKIND_GROUPEXPR

#define EXPRKIND_GROUPEXPR   13

Definition at line 93 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 86 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 88 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 80 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 82 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 83 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 91 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 92 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 89 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 81 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 84 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 85 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 7069 of file planner.c.

7075 {
7076  Query *parse = root->parse;
7077  Path *cheapest_path = input_rel->cheapest_total_path;
7078  ListCell *lc;
7079  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7080  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7081  List *havingQual = (List *) extra->havingQual;
7082  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7083 
7084  if (can_sort)
7085  {
7086  /*
7087  * Use any available suitably-sorted path as input, and also consider
7088  * sorting the cheapest-total path and incremental sort on any paths
7089  * with presorted keys.
7090  */
7091  foreach(lc, input_rel->pathlist)
7092  {
7093  ListCell *lc2;
7094  Path *path = (Path *) lfirst(lc);
7095  Path *path_save = path;
7096  List *pathkey_orderings = NIL;
7097 
7098  /* generate alternative group orderings that might be useful */
7099  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7100 
7101  Assert(list_length(pathkey_orderings) > 0);
7102 
7103  foreach(lc2, pathkey_orderings)
7104  {
7105  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7106 
7107  /* restore the path (we replace it in the loop) */
7108  path = path_save;
7109 
7110  path = make_ordered_path(root,
7111  grouped_rel,
7112  path,
7113  cheapest_path,
7114  info->pathkeys,
7115  -1.0);
7116  if (path == NULL)
7117  continue;
7118 
7119  /* Now decide what to stick atop it */
7120  if (parse->groupingSets)
7121  {
7122  consider_groupingsets_paths(root, grouped_rel,
7123  path, true, can_hash,
7124  gd, agg_costs, dNumGroups);
7125  }
7126  else if (parse->hasAggs)
7127  {
7128  /*
7129  * We have aggregation, possibly with plain GROUP BY. Make
7130  * an AggPath.
7131  */
7132  add_path(grouped_rel, (Path *)
7134  grouped_rel,
7135  path,
7136  grouped_rel->reltarget,
7137  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7139  info->clauses,
7140  havingQual,
7141  agg_costs,
7142  dNumGroups));
7143  }
7144  else if (parse->groupClause)
7145  {
7146  /*
7147  * We have GROUP BY without aggregation or grouping sets.
7148  * Make a GroupPath.
7149  */
7150  add_path(grouped_rel, (Path *)
7152  grouped_rel,
7153  path,
7154  info->clauses,
7155  havingQual,
7156  dNumGroups));
7157  }
7158  else
7159  {
7160  /* Other cases should have been handled above */
7161  Assert(false);
7162  }
7163  }
7164  }
7165 
7166  /*
7167  * Instead of operating directly on the input relation, we can
7168  * consider finalizing a partially aggregated path.
7169  */
7170  if (partially_grouped_rel != NULL)
7171  {
7172  foreach(lc, partially_grouped_rel->pathlist)
7173  {
7174  ListCell *lc2;
7175  Path *path = (Path *) lfirst(lc);
7176  Path *path_save = path;
7177  List *pathkey_orderings = NIL;
7178 
7179  /* generate alternative group orderings that might be useful */
7180  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7181 
7182  Assert(list_length(pathkey_orderings) > 0);
7183 
7184  /* process all potentially interesting grouping reorderings */
7185  foreach(lc2, pathkey_orderings)
7186  {
7187  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7188 
7189  /* restore the path (we replace it in the loop) */
7190  path = path_save;
7191 
7192  path = make_ordered_path(root,
7193  grouped_rel,
7194  path,
7195  partially_grouped_rel->cheapest_total_path,
7196  info->pathkeys,
7197  -1.0);
7198 
7199  if (path == NULL)
7200  continue;
7201 
7202  if (parse->hasAggs)
7203  add_path(grouped_rel, (Path *)
7205  grouped_rel,
7206  path,
7207  grouped_rel->reltarget,
7208  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7210  info->clauses,
7211  havingQual,
7212  agg_final_costs,
7213  dNumGroups));
7214  else
7215  add_path(grouped_rel, (Path *)
7217  grouped_rel,
7218  path,
7219  info->clauses,
7220  havingQual,
7221  dNumGroups));
7222 
7223  }
7224  }
7225  }
7226  }
7227 
7228  if (can_hash)
7229  {
7230  if (parse->groupingSets)
7231  {
7232  /*
7233  * Try for a hash-only groupingsets path over unsorted input.
7234  */
7235  consider_groupingsets_paths(root, grouped_rel,
7236  cheapest_path, false, true,
7237  gd, agg_costs, dNumGroups);
7238  }
7239  else
7240  {
7241  /*
7242  * Generate a HashAgg Path. We just need an Agg over the
7243  * cheapest-total input path, since input order won't matter.
7244  */
7245  add_path(grouped_rel, (Path *)
7246  create_agg_path(root, grouped_rel,
7247  cheapest_path,
7248  grouped_rel->reltarget,
7249  AGG_HASHED,
7251  root->processed_groupClause,
7252  havingQual,
7253  agg_costs,
7254  dNumGroups));
7255  }
7256 
7257  /*
7258  * Generate a Finalize HashAgg Path atop of the cheapest partially
7259  * grouped path, assuming there is one
7260  */
7261  if (partially_grouped_rel && partially_grouped_rel->pathlist)
7262  {
7263  Path *path = partially_grouped_rel->cheapest_total_path;
7264 
7265  add_path(grouped_rel, (Path *)
7267  grouped_rel,
7268  path,
7269  grouped_rel->reltarget,
7270  AGG_HASHED,
7272  root->processed_groupClause,
7273  havingQual,
7274  agg_final_costs,
7275  dNumGroups));
7276  }
7277  }
7278 
7279  /*
7280  * When partitionwise aggregate is used, we might have fully aggregated
7281  * paths in the partial pathlist, because add_paths_to_append_rel() will
7282  * consider a path for grouped_rel consisting of a Parallel Append of
7283  * non-partial paths from each child.
7284  */
7285  if (grouped_rel->partial_pathlist != NIL)
7286  gather_grouping_paths(root, grouped_rel);
7287 }
#define Assert(condition)
Definition: c.h:812
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
@ AGG_SORTED
Definition: nodes.h:355
@ AGG_HASHED
Definition: nodes.h:356
@ AGG_PLAIN
Definition: nodes.h:354
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:381
@ AGGSPLIT_SIMPLE
Definition: nodes.h:377
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3127
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3240
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3264
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3263
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7659
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7600
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4216
tree ctl root
Definition: radixtree.h:1886
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3304
Definition: pg_list.h:54
struct PathTarget * reltarget
Definition: pathnodes.h:893
List * pathlist
Definition: pathnodes.h:898
struct Path * cheapest_total_path
Definition: pathnodes.h:902
List * partial_pathlist
Definition: pathnodes.h:900

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::pathlist, RelOptInfo::reltarget, and root.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3316 of file planner.c.

3317 {
3318  List *grouppathkeys = root->group_pathkeys;
3319  List *bestpathkeys;
3320  Bitmapset *bestaggs;
3321  Bitmapset *unprocessed_aggs;
3322  ListCell *lc;
3323  int i;
3324 
3325  /* Shouldn't be here if there are grouping sets */
3326  Assert(root->parse->groupingSets == NIL);
3327  /* Shouldn't be here unless there are some ordered aggregates */
3328  Assert(root->numOrderedAggs > 0);
3329 
3330  /* Do nothing if disabled */
3332  return;
3333 
3334  /*
3335  * Make a first pass over all AggInfos to collect a Bitmapset containing
3336  * the indexes of all AggInfos to be processed below.
3337  */
3338  unprocessed_aggs = NULL;
3339  foreach(lc, root->agginfos)
3340  {
3341  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3342  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3343 
3344  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3345  continue;
3346 
3347  /* only add aggregates with a DISTINCT or ORDER BY */
3348  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3349  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3350  foreach_current_index(lc));
3351  }
3352 
3353  /*
3354  * Now process all the unprocessed_aggs to find the best set of pathkeys
3355  * for the given set of aggregates.
3356  *
3357  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3358  * this during the first loop using the pathkeys for the very first
3359  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3360  * a more strict set of compatible pathkeys. Once the outer loop is
3361  * complete, we mark off all the aggregates with compatible pathkeys then
3362  * remove those from the unprocessed_aggs and repeat the process to try to
3363  * find another set of pathkeys that are suitable for a larger number of
3364  * aggregates. The outer loop will stop when there are not enough
3365  * unprocessed aggregates for it to be possible to find a set of pathkeys
3366  * to suit a larger number of aggregates.
3367  */
3368  bestpathkeys = NIL;
3369  bestaggs = NULL;
3370  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3371  {
3372  Bitmapset *aggindexes = NULL;
3373  List *currpathkeys = NIL;
3374 
3375  i = -1;
3376  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3377  {
3378  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3379  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3380  List *sortlist;
3381  List *pathkeys;
3382 
3383  if (aggref->aggdistinct != NIL)
3384  sortlist = aggref->aggdistinct;
3385  else
3386  sortlist = aggref->aggorder;
3387 
3388  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3389  aggref->args);
3390 
3391  /*
3392  * Ignore Aggrefs which have volatile functions in their ORDER BY
3393  * or DISTINCT clause.
3394  */
3395  if (has_volatile_pathkey(pathkeys))
3396  {
3397  unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3398  continue;
3399  }
3400 
3401  /*
3402  * When not set yet, take the pathkeys from the first unprocessed
3403  * aggregate.
3404  */
3405  if (currpathkeys == NIL)
3406  {
3407  currpathkeys = pathkeys;
3408 
3409  /* include the GROUP BY pathkeys, if they exist */
3410  if (grouppathkeys != NIL)
3411  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3412  currpathkeys);
3413 
3414  /* record that we found pathkeys for this aggregate */
3415  aggindexes = bms_add_member(aggindexes, i);
3416  }
3417  else
3418  {
3419  /* now look for a stronger set of matching pathkeys */
3420 
3421  /* include the GROUP BY pathkeys, if they exist */
3422  if (grouppathkeys != NIL)
3423  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3424  pathkeys);
3425 
3426  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3427  switch (compare_pathkeys(currpathkeys, pathkeys))
3428  {
3429  case PATHKEYS_BETTER2:
3430  /* 'pathkeys' are stronger, use these ones instead */
3431  currpathkeys = pathkeys;
3432  /* FALLTHROUGH */
3433 
3434  case PATHKEYS_BETTER1:
3435  /* 'pathkeys' are less strict */
3436  /* FALLTHROUGH */
3437 
3438  case PATHKEYS_EQUAL:
3439  /* mark this aggregate as covered by 'currpathkeys' */
3440  aggindexes = bms_add_member(aggindexes, i);
3441  break;
3442 
3443  case PATHKEYS_DIFFERENT:
3444  break;
3445  }
3446  }
3447  }
3448 
3449  /* remove the aggregates that we've just processed */
3450  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3451 
3452  /*
3453  * If this pass included more aggregates than the previous best then
3454  * use these ones as the best set.
3455  */
3456  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3457  {
3458  bestaggs = aggindexes;
3459  bestpathkeys = currpathkeys;
3460  }
3461  }
3462 
3463  /*
3464  * If we found any ordered aggregates, update root->group_pathkeys to add
3465  * the best set of aggregate pathkeys. Note that bestpathkeys includes
3466  * the original GROUP BY pathkeys already.
3467  */
3468  if (bestpathkeys != NIL)
3469  root->group_pathkeys = bestpathkeys;
3470 
3471  /*
3472  * Now that we've found the best set of aggregates we can set the
3473  * presorted flag to indicate to the executor that it needn't bother
3474  * performing a sort for these Aggrefs. We're able to do this now as
3475  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3476  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3477  * of ordered aggregates.
3478  */
3479  i = -1;
3480  while ((i = bms_next_member(bestaggs, i)) >= 0)
3481  {
3482  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3483 
3484  foreach(lc, agginfo->aggrefs)
3485  {
3486  Aggref *aggref = lfirst_node(Aggref, lc);
3487 
3488  aggref->aggpresorted = true;
3489  }
3490  }
3491 }
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool enable_presorted_aggregate
Definition: costsize.c:164
int i
Definition: isn.c:72
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1335
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
@ PATHKEYS_BETTER2
Definition: paths.h:206
@ PATHKEYS_BETTER1
Definition: paths.h:205
@ PATHKEYS_DIFFERENT
Definition: paths.h:207
@ PATHKEYS_EQUAL
Definition: paths.h:204
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3271
List * aggrefs
Definition: pathnodes.h:3386
List * aggdistinct
Definition: primnodes.h:474
List * args
Definition: primnodes.h:468
List * aggorder
Definition: primnodes.h:471

References Aggref::aggdistinct, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6619 of file planner.c.

6621 {
6622  ListCell *lc;
6623 
6624  Assert(list_length(targets) == list_length(targets_contain_srfs));
6625  Assert(!linitial_int(targets_contain_srfs));
6626 
6627  /* If no SRFs appear at this plan level, nothing to do */
6628  if (list_length(targets) == 1)
6629  return;
6630 
6631  /*
6632  * Stack SRF-evaluation nodes atop each path for the rel.
6633  *
6634  * In principle we should re-run set_cheapest() here to identify the
6635  * cheapest path, but it seems unlikely that adding the same tlist eval
6636  * costs to all the paths would change that, so we don't bother. Instead,
6637  * just assume that the cheapest-startup and cheapest-total paths remain
6638  * so. (There should be no parameterized paths anymore, so we needn't
6639  * worry about updating cheapest_parameterized_paths.)
6640  */
6641  foreach(lc, rel->pathlist)
6642  {
6643  Path *subpath = (Path *) lfirst(lc);
6644  Path *newpath = subpath;
6645  ListCell *lc1,
6646  *lc2;
6647 
6648  Assert(subpath->param_info == NULL);
6649  forboth(lc1, targets, lc2, targets_contain_srfs)
6650  {
6651  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6652  bool contains_srfs = (bool) lfirst_int(lc2);
6653 
6654  /* If this level doesn't contain SRFs, do regular projection */
6655  if (contains_srfs)
6656  newpath = (Path *) create_set_projection_path(root,
6657  rel,
6658  newpath,
6659  thistarget);
6660  else
6661  newpath = (Path *) apply_projection_to_path(root,
6662  rel,
6663  newpath,
6664  thistarget);
6665  }
6666  lfirst(lc) = newpath;
6667  if (subpath == rel->cheapest_startup_path)
6668  rel->cheapest_startup_path = newpath;
6669  if (subpath == rel->cheapest_total_path)
6670  rel->cheapest_total_path = newpath;
6671  }
6672 
6673  /* Likewise for partial paths, if any */
6674  foreach(lc, rel->partial_pathlist)
6675  {
6676  Path *subpath = (Path *) lfirst(lc);
6677  Path *newpath = subpath;
6678  ListCell *lc1,
6679  *lc2;
6680 
6681  Assert(subpath->param_info == NULL);
6682  forboth(lc1, targets, lc2, targets_contain_srfs)
6683  {
6684  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6685  bool contains_srfs = (bool) lfirst_int(lc2);
6686 
6687  /* If this level doesn't contain SRFs, do regular projection */
6688  if (contains_srfs)
6689  newpath = (Path *) create_set_projection_path(root,
6690  rel,
6691  newpath,
6692  thistarget);
6693  else
6694  {
6695  /* avoid apply_projection_to_path, in case of multiple refs */
6696  newpath = (Path *) create_projection_path(root,
6697  rel,
6698  newpath,
6699  thistarget);
6700  }
6701  }
6702  lfirst(lc) = newpath;
6703  }
6704 }
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:308
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2763
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2962
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2873
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:901

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7784 of file planner.c.

7790 {
7791  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7792  PathTarget *scanjoin_target;
7793  ListCell *lc;
7794 
7795  /* This recurses, so be paranoid. */
7797 
7798  /*
7799  * If the rel is partitioned, we want to drop its existing paths and
7800  * generate new ones. This function would still be correct if we kept the
7801  * existing paths: we'd modify them to generate the correct target above
7802  * the partitioning Append, and then they'd compete on cost with paths
7803  * generating the target below the Append. However, in our current cost
7804  * model the latter way is always the same or cheaper cost, so modifying
7805  * the existing paths would just be useless work. Moreover, when the cost
7806  * is the same, varying roundoff errors might sometimes allow an existing
7807  * path to be picked, resulting in undesirable cross-platform plan
7808  * variations. So we drop old paths and thereby force the work to be done
7809  * below the Append, except in the case of a non-parallel-safe target.
7810  *
7811  * Some care is needed, because we have to allow
7812  * generate_useful_gather_paths to see the old partial paths in the next
7813  * stanza. Hence, zap the main pathlist here, then allow
7814  * generate_useful_gather_paths to add path(s) to the main list, and
7815  * finally zap the partial pathlist.
7816  */
7817  if (rel_is_partitioned)
7818  rel->pathlist = NIL;
7819 
7820  /*
7821  * If the scan/join target is not parallel-safe, partial paths cannot
7822  * generate it.
7823  */
7824  if (!scanjoin_target_parallel_safe)
7825  {
7826  /*
7827  * Since we can't generate the final scan/join target in parallel
7828  * workers, this is our last opportunity to use any partial paths that
7829  * exist; so build Gather path(s) that use them and emit whatever the
7830  * current reltarget is. We don't do this in the case where the
7831  * target is parallel-safe, since we will be able to generate superior
7832  * paths by doing it after the final scan/join target has been
7833  * applied.
7834  */
7835  generate_useful_gather_paths(root, rel, false);
7836 
7837  /* Can't use parallel query above this level. */
7838  rel->partial_pathlist = NIL;
7839  rel->consider_parallel = false;
7840  }
7841 
7842  /* Finish dropping old paths for a partitioned rel, per comment above */
7843  if (rel_is_partitioned)
7844  rel->partial_pathlist = NIL;
7845 
7846  /* Extract SRF-free scan/join target. */
7847  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7848 
7849  /*
7850  * Apply the SRF-free scan/join target to each existing path.
7851  *
7852  * If the tlist exprs are the same, we can just inject the sortgroupref
7853  * information into the existing pathtargets. Otherwise, replace each
7854  * path with a projection path that generates the SRF-free scan/join
7855  * target. This can't change the ordering of paths within rel->pathlist,
7856  * so we just modify the list in place.
7857  */
7858  foreach(lc, rel->pathlist)
7859  {
7860  Path *subpath = (Path *) lfirst(lc);
7861 
7862  /* Shouldn't have any parameterized paths anymore */
7863  Assert(subpath->param_info == NULL);
7864 
7865  if (tlist_same_exprs)
7866  subpath->pathtarget->sortgrouprefs =
7867  scanjoin_target->sortgrouprefs;
7868  else
7869  {
7870  Path *newpath;
7871 
7872  newpath = (Path *) create_projection_path(root, rel, subpath,
7873  scanjoin_target);
7874  lfirst(lc) = newpath;
7875  }
7876  }
7877 
7878  /* Likewise adjust the targets for any partial paths. */
7879  foreach(lc, rel->partial_pathlist)
7880  {
7881  Path *subpath = (Path *) lfirst(lc);
7882 
7883  /* Shouldn't have any parameterized paths anymore */
7884  Assert(subpath->param_info == NULL);
7885 
7886  if (tlist_same_exprs)
7887  subpath->pathtarget->sortgrouprefs =
7888  scanjoin_target->sortgrouprefs;
7889  else
7890  {
7891  Path *newpath;
7892 
7893  newpath = (Path *) create_projection_path(root, rel, subpath,
7894  scanjoin_target);
7895  lfirst(lc) = newpath;
7896  }
7897  }
7898 
7899  /*
7900  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7901  * atop each existing path. (Note that this function doesn't look at the
7902  * cheapest-path fields, which is a good thing because they're bogus right
7903  * now.)
7904  */
7905  if (root->parse->hasTargetSRFs)
7907  scanjoin_targets,
7908  scanjoin_targets_contain_srfs);
7909 
7910  /*
7911  * Update the rel's target to be the final (with SRFs) scan/join target.
7912  * This now matches the actual output of all the paths, and we might get
7913  * confused in createplan.c if they don't agree. We must do this now so
7914  * that any append paths made in the next part will use the correct
7915  * pathtarget (cf. create_append_path).
7916  *
7917  * Note that this is also necessary if GetForeignUpperPaths() gets called
7918  * on the final scan/join relation or on any of its children, since the
7919  * FDW might look at the rel's target to create ForeignPaths.
7920  */
7921  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7922 
7923  /*
7924  * If the relation is partitioned, recursively apply the scan/join target
7925  * to all partitions, and generate brand-new Append paths in which the
7926  * scan/join target is computed below the Append rather than above it.
7927  * Since Append is not projection-capable, that might save a separate
7928  * Result node, and it also is important for partitionwise aggregate.
7929  */
7930  if (rel_is_partitioned)
7931  {
7932  List *live_children = NIL;
7933  int i;
7934 
7935  /* Adjust each partition. */
7936  i = -1;
7937  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7938  {
7939  RelOptInfo *child_rel = rel->part_rels[i];
7940  AppendRelInfo **appinfos;
7941  int nappinfos;
7942  List *child_scanjoin_targets = NIL;
7943 
7944  Assert(child_rel != NULL);
7945 
7946  /* Dummy children can be ignored. */
7947  if (IS_DUMMY_REL(child_rel))
7948  continue;
7949 
7950  /* Translate scan/join targets for this child. */
7951  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7952  &nappinfos);
7953  foreach(lc, scanjoin_targets)
7954  {
7955  PathTarget *target = lfirst_node(PathTarget, lc);
7956 
7957  target = copy_pathtarget(target);
7958  target->exprs = (List *)
7960  (Node *) target->exprs,
7961  nappinfos, appinfos);
7962  child_scanjoin_targets = lappend(child_scanjoin_targets,
7963  target);
7964  }
7965  pfree(appinfos);
7966 
7967  /* Recursion does the real work. */
7969  child_scanjoin_targets,
7970  scanjoin_targets_contain_srfs,
7971  scanjoin_target_parallel_safe,
7973 
7974  /* Save non-dummy children for Append paths. */
7975  if (!IS_DUMMY_REL(child_rel))
7976  live_children = lappend(live_children, child_rel);
7977  }
7978 
7979  /* Build new paths for this relation by appending child paths. */
7980  add_paths_to_append_rel(root, rel, live_children);
7981  }
7982 
7983  /*
7984  * Consider generating Gather or Gather Merge paths. We must only do this
7985  * if the relation is parallel safe, and we don't do it for child rels to
7986  * avoid creating multiple Gather nodes within the same plan. We must do
7987  * this after all paths have been generated and before set_cheapest, since
7988  * one of the generated paths may turn out to be the cheapest one.
7989  */
7990  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7991  generate_useful_gather_paths(root, rel, false);
7992 
7993  /*
7994  * Reassess which paths are the cheapest, now that we've potentially added
7995  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7996  * this relation.
7997  */
7998  set_cheapest(rel);
7999 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3201
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1314
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:736
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * lappend(List *list, void *datum)
Definition: list.c:339
void pfree(void *pointer)
Definition: mcxt.c:1521
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1956
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1062
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:854
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7784
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6619
void check_stack_depth(void)
Definition: postgres.c:3574
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1542
Relids relids
Definition: pathnodes.h:871
bool consider_parallel
Definition: pathnodes.h:887
Bitmapset * live_parts
Definition: pathnodes.h:1039
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7742 of file planner.c.

7743 {
7744  Query *parse = root->parse;
7745 
7746  if (!parse->hasAggs && parse->groupClause == NIL)
7747  {
7748  /*
7749  * We don't know how to do parallel aggregation unless we have either
7750  * some aggregates or a grouping clause.
7751  */
7752  return false;
7753  }
7754  else if (parse->groupingSets)
7755  {
7756  /* We don't know how to do grouping sets in parallel. */
7757  return false;
7758  }
7759  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7760  {
7761  /* Insufficient support for partial mode. */
7762  return false;
7763  }
7764 
7765  /* Everything looks good. */
7766  return true;
7767 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 6088 of file planner.c.

6089 {
6090  const WindowClauseSortData *wcsa = a;
6091  const WindowClauseSortData *wcsb = b;
6092  ListCell *item_a;
6093  ListCell *item_b;
6094 
6095  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6096  {
6099 
6100  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6101  return -1;
6102  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6103  return 1;
6104  else if (sca->sortop > scb->sortop)
6105  return -1;
6106  else if (sca->sortop < scb->sortop)
6107  return 1;
6108  else if (sca->nulls_first && !scb->nulls_first)
6109  return -1;
6110  else if (!sca->nulls_first && scb->nulls_first)
6111  return 1;
6112  /* no need to compare eqop, since it is fully determined by sortop */
6113  }
6114 
6115  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6116  return -1;
6117  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6118  return 1;
6119 
6120  return 0;
6121 }
int b
Definition: isn.c:69
int a
Definition: isn.c:68
Index tleSortGroupRef
Definition: parsenodes.h:1438

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4216 of file planner.c.

4224 {
4225  Query *parse = root->parse;
4226  Size hash_mem_limit = get_hash_memory_limit();
4227 
4228  /*
4229  * If we're not being offered sorted input, then only consider plans that
4230  * can be done entirely by hashing.
4231  *
4232  * We can hash everything if it looks like it'll fit in hash_mem. But if
4233  * the input is actually sorted despite not being advertised as such, we
4234  * prefer to make use of that in order to use less memory.
4235  *
4236  * If none of the grouping sets are sortable, then ignore the hash_mem
4237  * limit and generate a path anyway, since otherwise we'll just fail.
4238  */
4239  if (!is_sorted)
4240  {
4241  List *new_rollups = NIL;
4242  RollupData *unhashed_rollup = NULL;
4243  List *sets_data;
4244  List *empty_sets_data = NIL;
4245  List *empty_sets = NIL;
4246  ListCell *lc;
4247  ListCell *l_start = list_head(gd->rollups);
4248  AggStrategy strat = AGG_HASHED;
4249  double hashsize;
4250  double exclude_groups = 0.0;
4251 
4252  Assert(can_hash);
4253 
4254  /*
4255  * If the input is coincidentally sorted usefully (which can happen
4256  * even if is_sorted is false, since that only means that our caller
4257  * has set up the sorting for us), then save some hashtable space by
4258  * making use of that. But we need to watch out for degenerate cases:
4259  *
4260  * 1) If there are any empty grouping sets, then group_pathkeys might
4261  * be NIL if all non-empty grouping sets are unsortable. In this case,
4262  * there will be a rollup containing only empty groups, and the
4263  * pathkeys_contained_in test is vacuously true; this is ok.
4264  *
4265  * XXX: the above relies on the fact that group_pathkeys is generated
4266  * from the first rollup. If we add the ability to consider multiple
4267  * sort orders for grouping input, this assumption might fail.
4268  *
4269  * 2) If there are no empty sets and only unsortable sets, then the
4270  * rollups list will be empty (and thus l_start == NULL), and
4271  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4272  * pathkeys_contained_in test doesn't cause us to crash.
4273  */
4274  if (l_start != NULL &&
4275  pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4276  {
4277  unhashed_rollup = lfirst_node(RollupData, l_start);
4278  exclude_groups = unhashed_rollup->numGroups;
4279  l_start = lnext(gd->rollups, l_start);
4280  }
4281 
4282  hashsize = estimate_hashagg_tablesize(root,
4283  path,
4284  agg_costs,
4285  dNumGroups - exclude_groups);
4286 
4287  /*
4288  * gd->rollups is empty if we have only unsortable columns to work
4289  * with. Override hash_mem in that case; otherwise, we'll rely on the
4290  * sorted-input case to generate usable mixed paths.
4291  */
4292  if (hashsize > hash_mem_limit && gd->rollups)
4293  return; /* nope, won't fit */
4294 
4295  /*
4296  * We need to burst the existing rollups list into individual grouping
4297  * sets and recompute a groupClause for each set.
4298  */
4299  sets_data = list_copy(gd->unsortable_sets);
4300 
4301  for_each_cell(lc, gd->rollups, l_start)
4302  {
4303  RollupData *rollup = lfirst_node(RollupData, lc);
4304 
4305  /*
4306  * If we find an unhashable rollup that's not been skipped by the
4307  * "actually sorted" check above, we can't cope; we'd need sorted
4308  * input (with a different sort order) but we can't get that here.
4309  * So bail out; we'll get a valid path from the is_sorted case
4310  * instead.
4311  *
4312  * The mere presence of empty grouping sets doesn't make a rollup
4313  * unhashable (see preprocess_grouping_sets), we handle those
4314  * specially below.
4315  */
4316  if (!rollup->hashable)
4317  return;
4318 
4319  sets_data = list_concat(sets_data, rollup->gsets_data);
4320  }
4321  foreach(lc, sets_data)
4322  {
4324  List *gset = gs->set;
4325  RollupData *rollup;
4326 
4327  if (gset == NIL)
4328  {
4329  /* Empty grouping sets can't be hashed. */
4330  empty_sets_data = lappend(empty_sets_data, gs);
4331  empty_sets = lappend(empty_sets, NIL);
4332  }
4333  else
4334  {
4335  rollup = makeNode(RollupData);
4336 
4337  rollup->groupClause = preprocess_groupclause(root, gset);
4338  rollup->gsets_data = list_make1(gs);
4339  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4340  rollup->gsets_data,
4341  gd->tleref_to_colnum_map);
4342  rollup->numGroups = gs->numGroups;
4343  rollup->hashable = true;
4344  rollup->is_hashed = true;
4345  new_rollups = lappend(new_rollups, rollup);
4346  }
4347  }
4348 
4349  /*
4350  * If we didn't find anything nonempty to hash, then bail. We'll
4351  * generate a path from the is_sorted case.
4352  */
4353  if (new_rollups == NIL)
4354  return;
4355 
4356  /*
4357  * If there were empty grouping sets they should have been in the
4358  * first rollup.
4359  */
4360  Assert(!unhashed_rollup || !empty_sets);
4361 
4362  if (unhashed_rollup)
4363  {
4364  new_rollups = lappend(new_rollups, unhashed_rollup);
4365  strat = AGG_MIXED;
4366  }
4367  else if (empty_sets)
4368  {
4369  RollupData *rollup = makeNode(RollupData);
4370 
4371  rollup->groupClause = NIL;
4372  rollup->gsets_data = empty_sets_data;
4373  rollup->gsets = empty_sets;
4374  rollup->numGroups = list_length(empty_sets);
4375  rollup->hashable = false;
4376  rollup->is_hashed = false;
4377  new_rollups = lappend(new_rollups, rollup);
4378  strat = AGG_MIXED;
4379  }
4380 
4381  add_path(grouped_rel, (Path *)
4383  grouped_rel,
4384  path,
4385  (List *) parse->havingQual,
4386  strat,
4387  new_rollups,
4388  agg_costs));
4389  return;
4390  }
4391 
4392  /*
4393  * If we have sorted input but nothing we can do with it, bail.
4394  */
4395  if (gd->rollups == NIL)
4396  return;
4397 
4398  /*
4399  * Given sorted input, we try and make two paths: one sorted and one mixed
4400  * sort/hash. (We need to try both because hashagg might be disabled, or
4401  * some columns might not be sortable.)
4402  *
4403  * can_hash is passed in as false if some obstacle elsewhere (such as
4404  * ordered aggs) means that we shouldn't consider hashing at all.
4405  */
4406  if (can_hash && gd->any_hashable)
4407  {
4408  List *rollups = NIL;
4409  List *hash_sets = list_copy(gd->unsortable_sets);
4410  double availspace = hash_mem_limit;
4411  ListCell *lc;
4412 
4413  /*
4414  * Account first for space needed for groups we can't sort at all.
4415  */
4416  availspace -= estimate_hashagg_tablesize(root,
4417  path,
4418  agg_costs,
4419  gd->dNumHashGroups);
4420 
4421  if (availspace > 0 && list_length(gd->rollups) > 1)
4422  {
4423  double scale;
4424  int num_rollups = list_length(gd->rollups);
4425  int k_capacity;
4426  int *k_weights = palloc(num_rollups * sizeof(int));
4427  Bitmapset *hash_items = NULL;
4428  int i;
4429 
4430  /*
4431  * We treat this as a knapsack problem: the knapsack capacity
4432  * represents hash_mem, the item weights are the estimated memory
4433  * usage of the hashtables needed to implement a single rollup,
4434  * and we really ought to use the cost saving as the item value;
4435  * however, currently the costs assigned to sort nodes don't
4436  * reflect the comparison costs well, and so we treat all items as
4437  * of equal value (each rollup we hash instead saves us one sort).
4438  *
4439  * To use the discrete knapsack, we need to scale the values to a
4440  * reasonably small bounded range. We choose to allow a 5% error
4441  * margin; we have no more than 4096 rollups in the worst possible
4442  * case, which with a 5% error margin will require a bit over 42MB
4443  * of workspace. (Anyone wanting to plan queries that complex had
4444  * better have the memory for it. In more reasonable cases, with
4445  * no more than a couple of dozen rollups, the memory usage will
4446  * be negligible.)
4447  *
4448  * k_capacity is naturally bounded, but we clamp the values for
4449  * scale and weight (below) to avoid overflows or underflows (or
4450  * uselessly trying to use a scale factor less than 1 byte).
4451  */
4452  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4453  k_capacity = (int) floor(availspace / scale);
4454 
4455  /*
4456  * We leave the first rollup out of consideration since it's the
4457  * one that matches the input sort order. We assign indexes "i"
4458  * to only those entries considered for hashing; the second loop,
4459  * below, must use the same condition.
4460  */
4461  i = 0;
4462  for_each_from(lc, gd->rollups, 1)
4463  {
4464  RollupData *rollup = lfirst_node(RollupData, lc);
4465 
4466  if (rollup->hashable)
4467  {
4468  double sz = estimate_hashagg_tablesize(root,
4469  path,
4470  agg_costs,
4471  rollup->numGroups);
4472 
4473  /*
4474  * If sz is enormous, but hash_mem (and hence scale) is
4475  * small, avoid integer overflow here.
4476  */
4477  k_weights[i] = (int) Min(floor(sz / scale),
4478  k_capacity + 1.0);
4479  ++i;
4480  }
4481  }
4482 
4483  /*
4484  * Apply knapsack algorithm; compute the set of items which
4485  * maximizes the value stored (in this case the number of sorts
4486  * saved) while keeping the total size (approximately) within
4487  * capacity.
4488  */
4489  if (i > 0)
4490  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4491 
4492  if (!bms_is_empty(hash_items))
4493  {
4494  rollups = list_make1(linitial(gd->rollups));
4495 
4496  i = 0;
4497  for_each_from(lc, gd->rollups, 1)
4498  {
4499  RollupData *rollup = lfirst_node(RollupData, lc);
4500 
4501  if (rollup->hashable)
4502  {
4503  if (bms_is_member(i, hash_items))
4504  hash_sets = list_concat(hash_sets,
4505  rollup->gsets_data);
4506  else
4507  rollups = lappend(rollups, rollup);
4508  ++i;
4509  }
4510  else
4511  rollups = lappend(rollups, rollup);
4512  }
4513  }
4514  }
4515 
4516  if (!rollups && hash_sets)
4517  rollups = list_copy(gd->rollups);
4518 
4519  foreach(lc, hash_sets)
4520  {
4522  RollupData *rollup = makeNode(RollupData);
4523 
4524  Assert(gs->set != NIL);
4525 
4526  rollup->groupClause = preprocess_groupclause(root, gs->set);
4527  rollup->gsets_data = list_make1(gs);
4528  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4529  rollup->gsets_data,
4530  gd->tleref_to_colnum_map);
4531  rollup->numGroups = gs->numGroups;
4532  rollup->hashable = true;
4533  rollup->is_hashed = true;
4534  rollups = lcons(rollup, rollups);
4535  }
4536 
4537  if (rollups)
4538  {
4539  add_path(grouped_rel, (Path *)
4541  grouped_rel,
4542  path,
4543  (List *) parse->havingQual,
4544  AGG_MIXED,
4545  rollups,
4546  agg_costs));
4547  }
4548  }
4549 
4550  /*
4551  * Now try the simple sorted case.
4552  */
4553  if (!gd->unsortable_sets)
4554  add_path(grouped_rel, (Path *)
4556  grouped_rel,
4557  path,
4558  (List *) parse->havingQual,
4559  AGG_SORTED,
4560  gd->rollups,
4561  agg_costs));
4562 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define Min(x, y)
Definition: c.h:958
#define Max(x, y)
Definition: c.h:952
size_t Size
Definition: c.h:559
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * lcons(void *datum, List *list)
Definition: list.c:495
void * palloc(Size size)
Definition: mcxt.c:1317
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3487
AggStrategy
Definition: nodes.h:353
@ AGG_MIXED
Definition: nodes.h:357
#define makeNode(_type_)
Definition: nodes.h:155
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3323
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
static int scale
Definition: pgbench.c:181
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2915
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2289
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3921
Cardinality numGroups
Definition: pathnodes.h:2283
List * pathkeys
Definition: pathnodes.h:1675
Cardinality numGroups
Definition: pathnodes.h:2294
List * groupClause
Definition: pathnodes.h:2291
List * gsets_data
Definition: pathnodes.h:2293
bool hashable
Definition: pathnodes.h:2295
List * gsets
Definition: pathnodes.h:2292
bool is_hashed
Definition: pathnodes.h:2296
int * tleref_to_colnum_map
Definition: planner.c:107
List * unsortable_sets
Definition: planner.c:106
double dNumHashGroups
Definition: planner.c:102

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, root, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 4012 of file planner.c.

4014 {
4015  Query *parse = root->parse;
4016  int nrows;
4017  Path *path;
4018 
4019  nrows = list_length(parse->groupingSets);
4020  if (nrows > 1)
4021  {
4022  /*
4023  * Doesn't seem worthwhile writing code to cons up a generate_series
4024  * or a values scan to emit multiple rows. Instead just make N clones
4025  * and append them. (With a volatile HAVING clause, this means you
4026  * might get between 0 and N output rows. Offhand I think that's
4027  * desired.)
4028  */
4029  List *paths = NIL;
4030 
4031  while (--nrows >= 0)
4032  {
4033  path = (Path *)
4034  create_group_result_path(root, grouped_rel,
4035  grouped_rel->reltarget,
4036  (List *) parse->havingQual);
4037  paths = lappend(paths, path);
4038  }
4039  path = (Path *)
4041  grouped_rel,
4042  paths,
4043  NIL,
4044  NIL,
4045  NULL,
4046  0,
4047  false,
4048  -1);
4049  }
4050  else
4051  {
4052  /* No grouping sets, or just one, so one output row */
4053  path = (Path *)
4054  create_group_result_path(root, grouped_rel,
4055  grouped_rel->reltarget,
4056  (List *) parse->havingQual);
4057  }
4058 
4059  add_path(grouped_rel, path);
4060 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1586

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4835 of file planner.c.

4837 {
4838  RelOptInfo *distinct_rel;
4839 
4840  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4841  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4842 
4843  /*
4844  * We don't compute anything at this level, so distinct_rel will be
4845  * parallel-safe if the input rel is parallel-safe. In particular, if
4846  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4847  * output those expressions, and will not be parallel-safe unless those
4848  * expressions are parallel-safe.
4849  */
4850  distinct_rel->consider_parallel = input_rel->consider_parallel;
4851 
4852  /*
4853  * If the input rel belongs to a single FDW, so does the distinct_rel.
4854  */
4855  distinct_rel->serverid = input_rel->serverid;
4856  distinct_rel->userid = input_rel->userid;
4857  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4858  distinct_rel->fdwroutine = input_rel->fdwroutine;
4859 
4860  /* build distinct paths based on input_rel's pathlist */
4861  create_final_distinct_paths(root, input_rel, distinct_rel);
4862 
4863  /* now build distinct paths based on input_rel's partial_pathlist */
4864  create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4865 
4866  /* Give a helpful error if we failed to create any paths */
4867  if (distinct_rel->pathlist == NIL)
4868  ereport(ERROR,
4869  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4870  errmsg("could not implement DISTINCT"),
4871  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4872 
4873  /*
4874  * If there is an FDW that's responsible for all baserels of the query,
4875  * let it consider adding ForeignPaths.
4876  */
4877  if (distinct_rel->fdwroutine &&
4878  distinct_rel->fdwroutine->GetForeignUpperPaths)
4879  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4881  input_rel,
4882  distinct_rel,
4883  NULL);
4884 
4885  /* Let extensions possibly add some more paths */
4887  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4888  distinct_rel, NULL);
4889 
4890  /* Now choose the best path(s) */
4891  set_cheapest(distinct_rel);
4892 
4893  return distinct_rel;
4894 }
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:5088
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4905
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:76
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
bool useridiscurrent
Definition: pathnodes.h:968
Oid userid
Definition: pathnodes.h:966
Oid serverid
Definition: pathnodes.h:964

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 5088 of file planner.c.

5090 {
5091  Query *parse = root->parse;
5092  Path *cheapest_input_path = input_rel->cheapest_total_path;
5093  double numDistinctRows;
5094  bool allow_hash;
5095 
5096  /* Estimate number of distinct rows there will be */
5097  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5098  root->hasHavingQual)
5099  {
5100  /*
5101  * If there was grouping or aggregation, use the number of input rows
5102  * as the estimated number of DISTINCT rows (ie, assume the input is
5103  * already mostly unique).
5104  */
5105  numDistinctRows = cheapest_input_path->rows;
5106  }
5107  else
5108  {
5109  /*
5110  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5111  */
5112  List *distinctExprs;
5113 
5114  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5115  parse->targetList);
5116  numDistinctRows = estimate_num_groups(root, distinctExprs,
5117  cheapest_input_path->rows,
5118  NULL, NULL);
5119  }
5120 
5121  /*
5122  * Consider sort-based implementations of DISTINCT, if possible.
5123  */
5124  if (grouping_is_sortable(root->processed_distinctClause))
5125  {
5126  /*
5127  * Firstly, if we have any adequately-presorted paths, just stick a
5128  * Unique node on those. We also, consider doing an explicit sort of
5129  * the cheapest input path and Unique'ing that. If any paths have
5130  * presorted keys then we'll create an incremental sort atop of those
5131  * before adding a unique node on the top. We'll also attempt to
5132  * reorder the required pathkeys to match the input path's pathkeys as
5133  * much as possible, in hopes of avoiding a possible need to re-sort.
5134  *
5135  * When we have DISTINCT ON, we must sort by the more rigorous of
5136  * DISTINCT and ORDER BY, else it won't have the desired behavior.
5137  * Also, if we do have to do an explicit sort, we might as well use
5138  * the more rigorous ordering to avoid a second sort later. (Note
5139  * that the parser will have ensured that one clause is a prefix of
5140  * the other.)
5141  */
5142  List *needed_pathkeys;
5143  ListCell *lc;
5144  double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5145 
5146  if (parse->hasDistinctOn &&
5147  list_length(root->distinct_pathkeys) <
5148  list_length(root->sort_pathkeys))
5149  needed_pathkeys = root->sort_pathkeys;
5150  else
5151  needed_pathkeys = root->distinct_pathkeys;
5152 
5153  foreach(lc, input_rel->pathlist)
5154  {
5155  Path *input_path = (Path *) lfirst(lc);
5156  Path *sorted_path;
5157  List *useful_pathkeys_list = NIL;
5158 
5159  useful_pathkeys_list =
5161  needed_pathkeys,
5162  input_path->pathkeys);
5163  Assert(list_length(useful_pathkeys_list) > 0);
5164 
5165  foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5166  {
5167  sorted_path = make_ordered_path(root,
5168  distinct_rel,
5169  input_path,
5170  cheapest_input_path,
5171  useful_pathkeys,
5172  limittuples);
5173 
5174  if (sorted_path == NULL)
5175  continue;
5176 
5177  /*
5178  * distinct_pathkeys may have become empty if all of the
5179  * pathkeys were determined to be redundant. If all of the
5180  * pathkeys are redundant then each DISTINCT target must only
5181  * allow a single value, therefore all resulting tuples must
5182  * be identical (or at least indistinguishable by an equality
5183  * check). We can uniquify these tuples simply by just taking
5184  * the first tuple. All we do here is add a path to do "LIMIT
5185  * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5186  * still have a non-NIL sort_pathkeys list, so we must still
5187  * only do this with paths which are correctly sorted by
5188  * sort_pathkeys.
5189  */
5190  if (root->distinct_pathkeys == NIL)
5191  {
5192  Node *limitCount;
5193 
5194  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5195  sizeof(int64),
5196  Int64GetDatum(1), false,
5197  FLOAT8PASSBYVAL);
5198 
5199  /*
5200  * If the query already has a LIMIT clause, then we could
5201  * end up with a duplicate LimitPath in the final plan.
5202  * That does not seem worth troubling over too much.
5203  */
5204  add_path(distinct_rel, (Path *)
5205  create_limit_path(root, distinct_rel, sorted_path,
5206  NULL, limitCount,
5207  LIMIT_OPTION_COUNT, 0, 1));
5208  }
5209  else
5210  {
5211  add_path(distinct_rel, (Path *)
5212  create_upper_unique_path(root, distinct_rel,
5213  sorted_path,
5214  list_length(root->distinct_pathkeys),
5215  numDistinctRows));
5216  }
5217  }
5218  }
5219  }
5220 
5221  /*
5222  * Consider hash-based implementations of DISTINCT, if possible.
5223  *
5224  * If we were not able to make any other types of path, we *must* hash or
5225  * die trying. If we do have other choices, there are two things that
5226  * should prevent selection of hashing: if the query uses DISTINCT ON
5227  * (because it won't really have the expected behavior if we hash), or if
5228  * enable_hashagg is off.
5229  *
5230  * Note: grouping_is_hashable() is much more expensive to check than the
5231  * other gating conditions, so we want to do it last.
5232  */
5233  if (distinct_rel->pathlist == NIL)
5234  allow_hash = true; /* we have no alternatives */
5235  else if (parse->hasDistinctOn || !enable_hashagg)
5236  allow_hash = false; /* policy-based decision not to hash */
5237  else
5238  allow_hash = true; /* default */
5239 
5240  if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5241  {
5242  /* Generate hashed aggregate path --- no sort needed */
5243  add_path(distinct_rel, (Path *)
5245  distinct_rel,
5246  cheapest_input_path,
5247  cheapest_input_path->pathtarget,
5248  AGG_HASHED,
5250  root->processed_distinctClause,
5251  NIL,
5252  NULL,
5253  numDistinctRows));
5254  }
5255 
5256  return distinct_rel;
5257 }
int64_t int64
Definition: c.h:482
#define FLOAT8PASSBYVAL
Definition: c.h:589
bool enable_hashagg
Definition: costsize.c:152
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
@ LIMIT_OPTION_COUNT
Definition: nodes.h:431
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3922
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3187
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5268
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3420
Cardinality rows
Definition: pathnodes.h:1669
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, create_agg_path(), create_limit_path(), create_upper_unique_path(), enable_hashagg, estimate_num_groups(), FLOAT8PASSBYVAL, foreach_node, get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), make_ordered_path(), makeConst(), NIL, parse(), Path::pathkeys, RelOptInfo::pathlist, root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3825 of file planner.c.

3830 {
3831  Query *parse = root->parse;
3832  RelOptInfo *grouped_rel;
3833  RelOptInfo *partially_grouped_rel;
3834  AggClauseCosts agg_costs;
3835 
3836  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3838 
3839  /*
3840  * Create grouping relation to hold fully aggregated grouping and/or
3841  * aggregation paths.
3842  */
3843  grouped_rel = make_grouping_rel(root, input_rel, target,
3844  target_parallel_safe, parse->havingQual);
3845 
3846  /*
3847  * Create either paths for a degenerate grouping or paths for ordinary
3848  * grouping, as appropriate.
3849  */
3851  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3852  else
3853  {
3854  int flags = 0;
3855  GroupPathExtraData extra;
3856 
3857  /*
3858  * Determine whether it's possible to perform sort-based
3859  * implementations of grouping. (Note that if processed_groupClause
3860  * is empty, grouping_is_sortable() is trivially true, and all the
3861  * pathkeys_contained_in() tests will succeed too, so that we'll
3862  * consider every surviving input path.)
3863  *
3864  * If we have grouping sets, we might be able to sort some but not all
3865  * of them; in this case, we need can_sort to be true as long as we
3866  * must consider any sorted-input plan.
3867  */
3868  if ((gd && gd->rollups != NIL)
3869  || grouping_is_sortable(root->processed_groupClause))
3870  flags |= GROUPING_CAN_USE_SORT;
3871 
3872  /*
3873  * Determine whether we should consider hash-based implementations of
3874  * grouping.
3875  *
3876  * Hashed aggregation only applies if we're grouping. If we have
3877  * grouping sets, some groups might be hashable but others not; in
3878  * this case we set can_hash true as long as there is nothing globally
3879  * preventing us from hashing (and we should therefore consider plans
3880  * with hashes).
3881  *
3882  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3883  * BY aggregates. (Doing so would imply storing *all* the input
3884  * values in the hash table, and/or running many sorts in parallel,
3885  * either of which seems like a certain loser.) We similarly don't
3886  * support ordered-set aggregates in hashed aggregation, but that case
3887  * is also included in the numOrderedAggs count.
3888  *
3889  * Note: grouping_is_hashable() is much more expensive to check than
3890  * the other gating conditions, so we want to do it last.
3891  */
3892  if ((parse->groupClause != NIL &&
3893  root->numOrderedAggs == 0 &&
3894  (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3895  flags |= GROUPING_CAN_USE_HASH;
3896 
3897  /*
3898  * Determine whether partial aggregation is possible.
3899  */
3900  if (can_partial_agg(root))
3901  flags |= GROUPING_CAN_PARTIAL_AGG;
3902 
3903  extra.flags = flags;
3904  extra.target_parallel_safe = target_parallel_safe;
3905  extra.havingQual = parse->havingQual;
3906  extra.targetList = parse->targetList;
3907  extra.partial_costs_set = false;
3908 
3909  /*
3910  * Determine whether partitionwise aggregation is in theory possible.
3911  * It can be disabled by the user, and for now, we don't try to
3912  * support grouping sets. create_ordinary_grouping_paths() will check
3913  * additional conditions, such as whether input_rel is partitioned.
3914  */
3915  if (enable_partitionwise_aggregate && !parse->groupingSets)
3917  else
3919 
3920  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3921  &agg_costs, gd, &extra,
3922  &partially_grouped_rel);
3923  }
3924 
3925  set_cheapest(grouped_rel);
3926  return grouped_rel;
3927 }
#define MemSet(start, val, len)
Definition: c.h:974
bool enable_partitionwise_aggregate
Definition: costsize.c:160
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3281
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3280
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3265
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:4012
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3991
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:4076
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7742
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3938
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
PartitionwiseAggregateType patype
Definition: pathnodes.h:3310

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4665 of file planner.c.

4672 {
4673  PathTarget *window_target;
4674  ListCell *l;
4675  List *topqual = NIL;
4676 
4677  /*
4678  * Since each window clause could require a different sort order, we stack
4679  * up a WindowAgg node for each clause, with sort steps between them as
4680  * needed. (We assume that select_active_windows chose a good order for
4681  * executing the clauses in.)
4682  *
4683  * input_target should contain all Vars and Aggs needed for the result.
4684  * (In some cases we wouldn't need to propagate all of these all the way
4685  * to the top, since they might only be needed as inputs to WindowFuncs.
4686  * It's probably not worth trying to optimize that though.) It must also
4687  * contain all window partitioning and sorting expressions, to ensure
4688  * they're computed only once at the bottom of the stack (that's critical
4689  * for volatile functions). As we climb up the stack, we'll add outputs
4690  * for the WindowFuncs computed at each level.
4691  */
4692  window_target = input_target;
4693 
4694  foreach(l, activeWindows)
4695  {
4697  List *window_pathkeys;
4698  List *runcondition = NIL;
4699  int presorted_keys;
4700  bool is_sorted;
4701  bool topwindow;
4702  ListCell *lc2;
4703 
4704  window_pathkeys = make_pathkeys_for_window(root,
4705  wc,
4706  root->processed_tlist);
4707 
4708  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4709  path->pathkeys,
4710  &presorted_keys);
4711 
4712  /* Sort if necessary */
4713  if (!is_sorted)
4714  {
4715  /*
4716  * No presorted keys or incremental sort disabled, just perform a
4717  * complete sort.
4718  */
4719  if (presorted_keys == 0 || !enable_incremental_sort)
4720  path = (Path *) create_sort_path(root, window_rel,
4721  path,
4722  window_pathkeys,
4723  -1.0);
4724  else
4725  {
4726  /*
4727  * Since we have presorted keys and incremental sort is
4728  * enabled, just use incremental sort.
4729  */
4731  window_rel,
4732  path,
4733  window_pathkeys,
4734  presorted_keys,
4735  -1.0);
4736  }
4737  }
4738 
4739  if (lnext(activeWindows, l))
4740  {
4741  /*
4742  * Add the current WindowFuncs to the output target for this
4743  * intermediate WindowAggPath. We must copy window_target to
4744  * avoid changing the previous path's target.
4745  *
4746  * Note: a WindowFunc adds nothing to the target's eval costs; but
4747  * we do need to account for the increase in tlist width.
4748  */
4749  int64 tuple_width = window_target->width;
4750 
4751  window_target = copy_pathtarget(window_target);
4752  foreach(lc2, wflists->windowFuncs[wc->winref])
4753  {
4754  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4755 
4756  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4757  tuple_width += get_typavgwidth(wfunc->wintype, -1);
4758  }
4759  window_target->width = clamp_width_est(tuple_width);
4760  }
4761  else
4762  {
4763  /* Install the goal target in the topmost WindowAgg */
4764  window_target = output_target;
4765  }
4766 
4767  /* mark the final item in the list as the top-level window */
4768  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4769 
4770  /*
4771  * Collect the WindowFuncRunConditions from each WindowFunc and
4772  * convert them into OpExprs
4773  */
4774  foreach(lc2, wflists->windowFuncs[wc->winref])
4775  {
4776  ListCell *lc3;
4777  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4778 
4779  foreach(lc3, wfunc->runCondition)
4780  {
4781  WindowFuncRunCondition *wfuncrc =
4783  Expr *opexpr;
4784  Expr *leftop;
4785  Expr *rightop;
4786 
4787  if (wfuncrc->wfunc_left)
4788  {
4789  leftop = (Expr *) copyObject(wfunc);
4790  rightop = copyObject(wfuncrc->arg);
4791  }
4792  else
4793  {
4794  leftop = copyObject(wfuncrc->arg);
4795  rightop = (Expr *) copyObject(wfunc);
4796  }
4797 
4798  opexpr = make_opclause(wfuncrc->opno,
4799  BOOLOID,
4800  false,
4801  leftop,
4802  rightop,
4803  InvalidOid,
4804  wfuncrc->inputcollid);
4805 
4806  runcondition = lappend(runcondition, opexpr);
4807 
4808  if (!topwindow)
4809  topqual = lappend(topqual, opexpr);
4810  }
4811  }
4812 
4813  path = (Path *)
4814  create_windowagg_path(root, window_rel, path, window_target,
4815  wflists->windowFuncs[wc->winref],
4816  runcondition, wc,
4817  topwindow ? topqual : NIL, topwindow);
4818  }
4819 
4820  add_path(window_rel, path);
4821 }
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_incremental_sort
Definition: costsize.c:151
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:651
#define copyObject(obj)
Definition: nodes.h:224
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3577
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6277
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), WindowFuncRunCondition::arg, clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, WindowFuncRunCondition::opno, Path::pathkeys, pathkeys_count_contained_in(), root, WindowFuncRunCondition::wfunc_left, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5353 of file planner.c.

5358 {
5359  Path *cheapest_input_path = input_rel->cheapest_total_path;
5360  RelOptInfo *ordered_rel;
5361  ListCell *lc;
5362 
5363  /* For now, do all work in the (ORDERED, NULL) upperrel */
5364  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5365 
5366  /*
5367  * If the input relation is not parallel-safe, then the ordered relation
5368  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5369  * target list is parallel-safe.
5370  */
5371  if (input_rel->consider_parallel && target_parallel_safe)
5372  ordered_rel->consider_parallel = true;
5373 
5374  /*
5375  * If the input rel belongs to a single FDW, so does the ordered_rel.
5376  */
5377  ordered_rel->serverid = input_rel->serverid;
5378  ordered_rel->userid = input_rel->userid;
5379  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5380  ordered_rel->fdwroutine = input_rel->fdwroutine;
5381 
5382  foreach(lc, input_rel->pathlist)
5383  {
5384  Path *input_path = (Path *) lfirst(lc);
5385  Path *sorted_path;
5386  bool is_sorted;
5387  int presorted_keys;
5388 
5389  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5390  input_path->pathkeys, &presorted_keys);
5391 
5392  if (is_sorted)
5393  sorted_path = input_path;
5394  else
5395  {
5396  /*
5397  * Try at least sorting the cheapest path and also try
5398  * incrementally sorting any path which is partially sorted
5399  * already (no need to deal with paths which have presorted keys
5400  * when incremental sort is disabled unless it's the cheapest
5401  * input path).
5402  */
5403  if (input_path != cheapest_input_path &&
5404  (presorted_keys == 0 || !enable_incremental_sort))
5405  continue;
5406 
5407  /*
5408  * We've no need to consider both a sort and incremental sort.
5409  * We'll just do a sort if there are no presorted keys and an
5410  * incremental sort when there are presorted keys.
5411  */
5412  if (presorted_keys == 0 || !enable_incremental_sort)
5413  sorted_path = (Path *) create_sort_path(root,
5414  ordered_rel,
5415  input_path,
5416  root->sort_pathkeys,
5417  limit_tuples);
5418  else
5419  sorted_path = (Path *) create_incremental_sort_path(root,
5420  ordered_rel,
5421  input_path,
5422  root->sort_pathkeys,
5423  presorted_keys,
5424  limit_tuples);
5425  }
5426 
5427  /*
5428  * If the pathtarget of the result path has different expressions from
5429  * the target to be applied, a projection step is needed.
5430  */
5431  if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5432  sorted_path = apply_projection_to_path(root, ordered_rel,
5433  sorted_path, target);
5434 
5435  add_path(ordered_rel, sorted_path);
5436  }
5437 
5438  /*
5439  * generate_gather_paths() will have already generated a simple Gather
5440  * path for the best parallel path, if any, and the loop above will have
5441  * considered sorting it. Similarly, generate_gather_paths() will also
5442  * have generated order-preserving Gather Merge plans which can be used
5443  * without sorting if they happen to match the sort_pathkeys, and the loop
5444  * above will have handled those as well. However, there's one more
5445  * possibility: it may make sense to sort the cheapest partial path or
5446  * incrementally sort any partial path that is partially sorted according
5447  * to the required output order and then use Gather Merge.
5448  */
5449  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5450  input_rel->partial_pathlist != NIL)
5451  {
5452  Path *cheapest_partial_path;
5453 
5454  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5455 
5456  foreach(lc, input_rel->partial_pathlist)
5457  {
5458  Path *input_path = (Path *) lfirst(lc);
5459  Path *sorted_path;
5460  bool is_sorted;
5461  int presorted_keys;
5462  double total_groups;
5463 
5464  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5465  input_path->pathkeys,
5466  &presorted_keys);
5467 
5468  if (is_sorted)
5469  continue;
5470 
5471  /*
5472  * Try at least sorting the cheapest path and also try
5473  * incrementally sorting any path which is partially sorted
5474  * already (no need to deal with paths which have presorted keys
5475  * when incremental sort is disabled unless it's the cheapest
5476  * partial path).
5477  */
5478  if (input_path != cheapest_partial_path &&
5479  (presorted_keys == 0 || !enable_incremental_sort))
5480  continue;
5481 
5482  /*
5483  * We've no need to consider both a sort and incremental sort.
5484  * We'll just do a sort if there are no presorted keys and an
5485  * incremental sort when there are presorted keys.
5486  */
5487  if (presorted_keys == 0 || !enable_incremental_sort)
5488  sorted_path = (Path *) create_sort_path(root,
5489  ordered_rel,
5490  input_path,
5491  root->sort_pathkeys,
5492  limit_tuples);
5493  else
5494  sorted_path = (Path *) create_incremental_sort_path(root,
5495  ordered_rel,
5496  input_path,
5497  root->sort_pathkeys,
5498  presorted_keys,
5499  limit_tuples);
5500  total_groups = compute_gather_rows(sorted_path);
5501  sorted_path = (Path *)
5502  create_gather_merge_path(root, ordered_rel,
5503  sorted_path,
5504  sorted_path->pathtarget,
5505  root->sort_pathkeys, NULL,
5506  &total_groups);
5507 
5508  /*
5509  * If the pathtarget of the result path has different expressions
5510  * from the target to be applied, a projection step is needed.
5511  */
5512  if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5513  sorted_path = apply_projection_to_path(root, ordered_rel,
5514  sorted_path, target);
5515 
5516  add_path(ordered_rel, sorted_path);
5517  }
5518  }
5519 
5520  /*
5521  * If there is an FDW that's responsible for all baserels of the query,
5522  * let it consider adding ForeignPaths.
5523  */
5524  if (ordered_rel->fdwroutine &&
5525  ordered_rel->fdwroutine->GetForeignUpperPaths)
5526  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5527  input_rel, ordered_rel,
5528  NULL);
5529 
5530  /* Let extensions possibly add some more paths */
5532  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5533  input_rel, ordered_rel, NULL);
5534 
5535  /*
5536  * No need to bother with set_cheapest here; grouping_planner does not
5537  * need us to do it.
5538  */
5539  Assert(ordered_rel->pathlist != NIL);
5540 
5541  return ordered_rel;
5542 }
double compute_gather_rows(Path *path)
Definition: costsize.c:6600
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
@ UPPERREL_ORDERED
Definition: pathnodes.h:78

References add_path(), apply_projection_to_path(), Assert, RelOptInfo::cheapest_total_path, compute_gather_rows(), RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, equal(), PathTarget::exprs, fetch_upper_rel(), lfirst, linitial, NIL, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 4076 of file planner.c.

4082 {
4083  Path *cheapest_path = input_rel->cheapest_total_path;
4084  RelOptInfo *partially_grouped_rel = NULL;
4085  double dNumGroups;
4087 
4088  /*
4089  * If this is the topmost grouping relation or if the parent relation is
4090  * doing some form of partitionwise aggregation, then we may be able to do
4091  * it at this level also. However, if the input relation is not
4092  * partitioned, partitionwise aggregate is impossible.
4093  */
4094  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4095  IS_PARTITIONED_REL(input_rel))
4096  {
4097  /*
4098  * If this is the topmost relation or if the parent relation is doing
4099  * full partitionwise aggregation, then we can do full partitionwise
4100  * aggregation provided that the GROUP BY clause contains all of the
4101  * partitioning columns at this level and the collation used by GROUP
4102  * BY matches the partitioning collation. Otherwise, we can do at
4103  * most partial partitionwise aggregation. But if partial aggregation
4104  * is not supported in general then we can't use it for partitionwise
4105  * aggregation either.
4106  *
4107  * Check parse->groupClause not processed_groupClause, because it's
4108  * okay if some of the partitioning columns were proved redundant.
4109  */
4110  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4111  group_by_has_partkey(input_rel, extra->targetList,
4112  root->parse->groupClause))
4114  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4116  else
4118  }
4119 
4120  /*
4121  * Before generating paths for grouped_rel, we first generate any possible
4122  * partially grouped paths; that way, later code can easily consider both
4123  * parallel and non-parallel approaches to grouping.
4124  */
4125  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4126  {
4127  bool force_rel_creation;
4128 
4129  /*
4130  * If we're doing partitionwise aggregation at this level, force
4131  * creation of a partially_grouped_rel so we can add partitionwise
4132  * paths to it.
4133  */
4134  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4135 
4136  partially_grouped_rel =
4138  grouped_rel,
4139  input_rel,
4140  gd,
4141  extra,
4142  force_rel_creation);
4143  }
4144 
4145  /* Set out parameter. */
4146  *partially_grouped_rel_p = partially_grouped_rel;
4147 
4148  /* Apply partitionwise aggregation technique, if possible. */
4149  if (patype != PARTITIONWISE_AGGREGATE_NONE)
4150  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4151  partially_grouped_rel, agg_costs,
4152  gd, patype, extra);
4153 
4154  /* If we are doing partial aggregation only, return. */
4156  {
4157  Assert(partially_grouped_rel);
4158 
4159  if (partially_grouped_rel->pathlist)
4160  set_cheapest(partially_grouped_rel);
4161 
4162  return;
4163  }
4164 
4165  /* Gather any partially grouped partial paths. */
4166  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4167  {
4168  gather_grouping_paths(root, partially_grouped_rel);
4169  set_cheapest(partially_grouped_rel);
4170  }
4171 
4172  /*
4173  * Estimate number of groups.
4174  */
4175  dNumGroups = get_number_of_groups(root,
4176  cheapest_path->rows,
4177  gd,
4178  extra->targetList);
4179 
4180  /* Build final grouping paths */
4181  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4182  partially_grouped_rel, agg_costs, gd,
4183  dNumGroups, extra);
4184 
4185  /* Give a helpful error if we failed to find any implementation */
4186  if (grouped_rel->pathlist == NIL)
4187  ereport(ERROR,
4188  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4189  errmsg("could not implement GROUP BY"),
4190  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4191 
4192  /*
4193  * If there is an FDW that's responsible for all baserels of the query,
4194  * let it consider adding ForeignPaths.
4195  */
4196  if (grouped_rel->fdwroutine &&
4197  grouped_rel->fdwroutine->GetForeignUpperPaths)
4198  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4199  input_rel, grouped_rel,
4200  extra);
4201 
4202  /* Let extensions possibly add some more paths */
4204  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4205  input_rel, grouped_rel,
4206  extra);
4207 }
PartitionwiseAggregateType
Definition: pathnodes.h:3279
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3282
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7306
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:7069
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3703
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:8019
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8163

References add_paths_to_grouping_rel(), Assert, RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4905 of file planner.c.

4908 {
4909  RelOptInfo *partial_distinct_rel;
4910  Query *parse;
4911  List *distinctExprs;
4912  double numDistinctRows;
4913  Path *cheapest_partial_path;
4914  ListCell *lc;
4915 
4916  /* nothing to do when there are no partial paths in the input rel */
4917  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4918  return;
4919 
4920  parse = root->parse;
4921 
4922  /* can't do parallel DISTINCT ON */
4923  if (parse->hasDistinctOn)
4924  return;
4925 
4926  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4927  NULL);
4928  partial_distinct_rel->reltarget = target;
4929  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4930 
4931  /*
4932  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4933  */
4934  partial_distinct_rel->serverid = input_rel->serverid;
4935  partial_distinct_rel->userid = input_rel->userid;
4936  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4937  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4938 
4939  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4940 
4941  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4942  parse->targetList);
4943 
4944  /* estimate how many distinct rows we'll get from each worker */
4945  numDistinctRows = estimate_num_groups(root, distinctExprs,
4946  cheapest_partial_path->rows,
4947  NULL, NULL);
4948 
4949  /*
4950  * Try sorting the cheapest path and incrementally sorting any paths with
4951  * presorted keys and put a unique paths atop of those. We'll also
4952  * attempt to reorder the required pathkeys to match the input path's
4953  * pathkeys as much as possible, in hopes of avoiding a possible need to
4954  * re-sort.
4955  */
4956  if (grouping_is_sortable(root->processed_distinctClause))
4957  {
4958  foreach(lc, input_rel->partial_pathlist)
4959  {
4960  Path *input_path = (Path *) lfirst(lc);
4961  Path *sorted_path;
4962  List *useful_pathkeys_list = NIL;
4963 
4964  useful_pathkeys_list =
4966  root->distinct_pathkeys,
4967  input_path->pathkeys);
4968  Assert(list_length(useful_pathkeys_list) > 0);
4969 
4970  foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4971  {
4972  sorted_path = make_ordered_path(root,
4973  partial_distinct_rel,
4974  input_path,
4975  cheapest_partial_path,
4976  useful_pathkeys,
4977  -1.0);
4978 
4979  if (sorted_path == NULL)
4980  continue;
4981 
4982  /*
4983  * An empty distinct_pathkeys means all tuples have the same
4984  * value for the DISTINCT clause. See
4985  * create_final_distinct_paths()
4986  */
4987  if (root->distinct_pathkeys == NIL)
4988  {
4989  Node *limitCount;
4990 
4991  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4992  sizeof(int64),
4993  Int64GetDatum(1), false,
4994  FLOAT8PASSBYVAL);
4995 
4996  /*
4997  * Apply a LimitPath onto the partial path to restrict the
4998  * tuples from each worker to 1.
4999  * create_final_distinct_paths will need to apply an
5000  * additional LimitPath to restrict this to a single row
5001  * after the Gather node. If the query already has a
5002  * LIMIT clause, then we could end up with three Limit
5003  * nodes in the final plan. Consolidating the top two of
5004  * these could be done, but does not seem worth troubling
5005  * over.
5006  */
5007  add_partial_path(partial_distinct_rel, (Path *)
5008  create_limit_path(root, partial_distinct_rel,
5009  sorted_path,
5010  NULL,
5011  limitCount,
5013  0, 1));
5014  }
5015  else
5016  {
5017  add_partial_path(partial_distinct_rel, (Path *)
5018  create_upper_unique_path(root, partial_distinct_rel,
5019  sorted_path,
5020  list_length(root->distinct_pathkeys),
5021  numDistinctRows));
5022  }
5023  }
5024  }
5025  }
5026 
5027  /*
5028  * Now try hash aggregate paths, if enabled and hashing is possible. Since
5029  * we're not on the hook to ensure we do our best to create at least one
5030  * path here, we treat enable_hashagg as a hard off-switch rather than the
5031  * slightly softer variant in create_final_distinct_paths.
5032  */
5033  if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5034  {
5035  add_partial_path(partial_distinct_rel, (Path *)
5037  partial_distinct_rel,
5038  cheapest_partial_path,
5039  cheapest_partial_path->pathtarget,
5040  AGG_HASHED,
5042  root->processed_distinctClause,
5043  NIL,
5044  NULL,
5045  numDistinctRows));
5046  }
5047 
5048  /*
5049  * If there is an FDW that's responsible for all baserels of the query,
5050  * let it consider adding ForeignPaths.
5051  */
5052  if (partial_distinct_rel->fdwroutine &&
5053  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5054  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5056  input_rel,
5057  partial_distinct_rel,
5058  NULL);
5059 
5060  /* Let extensions possibly add some more partial paths */
5062  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5063  input_rel, partial_distinct_rel, NULL);
5064 
5065  if (partial_distinct_rel->partial_pathlist != NIL)
5066  {
5067  generate_useful_gather_paths(root, partial_distinct_rel, true);
5068  set_cheapest(partial_distinct_rel);
5069 
5070  /*
5071  * Finally, create paths to distinctify the final result. This step
5072  * is needed to remove any duplicates due to combining rows from
5073  * parallel workers.
5074  */
5075  create_final_distinct_paths(root, partial_distinct_rel,
5076  final_distinct_rel);
5077  }
5078 }
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_limit_path(), create_upper_paths_hook, create_upper_unique_path(), enable_hashagg, estimate_num_groups(), fetch_upper_rel(), FLOAT8PASSBYVAL, foreach_node, generate_useful_gather_paths(), get_sortgrouplist_exprs(), get_useful_pathkeys_for_distinct(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), make_ordered_path(), makeConst(), NIL, parse(), RelOptInfo::partial_pathlist, Path::pathkeys, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7306 of file planner.c.

7312 {
7313  Query *parse = root->parse;
7314  RelOptInfo *partially_grouped_rel;
7315  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7316  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7317  Path *cheapest_partial_path = NULL;
7318  Path *cheapest_total_path = NULL;
7319  double dNumPartialGroups = 0;
7320  double dNumPartialPartialGroups = 0;
7321  ListCell *lc;
7322  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7323  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7324 
7325  /*
7326  * Consider whether we should generate partially aggregated non-partial
7327  * paths. We can only do this if we have a non-partial path, and only if
7328  * the parent of the input rel is performing partial partitionwise
7329  * aggregation. (Note that extra->patype is the type of partitionwise
7330  * aggregation being used at the parent level, not this level.)
7331  */
7332  if (input_rel->pathlist != NIL &&
7334  cheapest_total_path = input_rel->cheapest_total_path;
7335 
7336  /*
7337  * If parallelism is possible for grouped_rel, then we should consider
7338  * generating partially-grouped partial paths. However, if the input rel
7339  * has no partial paths, then we can't.
7340  */
7341  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7342  cheapest_partial_path = linitial(input_rel->partial_pathlist);
7343 
7344  /*
7345  * If we can't partially aggregate partial paths, and we can't partially
7346  * aggregate non-partial paths, then don't bother creating the new
7347  * RelOptInfo at all, unless the caller specified force_rel_creation.
7348  */
7349  if (cheapest_total_path == NULL &&
7350  cheapest_partial_path == NULL &&
7351  !force_rel_creation)
7352  return NULL;
7353 
7354  /*
7355  * Build a new upper relation to represent the result of partially
7356  * aggregating the rows from the input relation.
7357  */
7358  partially_grouped_rel = fetch_upper_rel(root,
7360  grouped_rel->relids);
7361  partially_grouped_rel->consider_parallel =
7362  grouped_rel->consider_parallel;
7363  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7364  partially_grouped_rel->serverid = grouped_rel->serverid;
7365  partially_grouped_rel->userid = grouped_rel->userid;
7366  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7367  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7368 
7369  /*
7370  * Build target list for partial aggregate paths. These paths cannot just
7371  * emit the same tlist as regular aggregate paths, because (1) we must
7372  * include Vars and Aggrefs needed in HAVING, which might not appear in
7373  * the result tlist, and (2) the Aggrefs must be set in partial mode.
7374  */
7375  partially_grouped_rel->reltarget =
7377  extra->havingQual);
7378 
7379  if (!extra->partial_costs_set)
7380  {
7381  /*
7382  * Collect statistics about aggregates for estimating costs of
7383  * performing aggregation in parallel.
7384  */
7385  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7386  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7387  if (parse->hasAggs)
7388  {
7389  /* partial phase */
7391  agg_partial_costs);
7392 
7393  /* final phase */
7395  agg_final_costs);
7396  }
7397 
7398  extra->partial_costs_set = true;
7399  }
7400 
7401  /* Estimate number of partial groups. */
7402  if (cheapest_total_path != NULL)
7403  dNumPartialGroups =
7405  cheapest_total_path->rows,
7406  gd,
7407  extra->targetList);
7408  if (cheapest_partial_path != NULL)
7409  dNumPartialPartialGroups =
7411  cheapest_partial_path->rows,
7412  gd,
7413  extra->targetList);
7414 
7415  if (can_sort && cheapest_total_path != NULL)
7416  {
7417  /* This should have been checked previously */
7418  Assert(parse->hasAggs || parse->groupClause);
7419 
7420  /*
7421  * Use any available suitably-sorted path as input, and also consider
7422  * sorting the cheapest partial path.
7423  */
7424  foreach(lc, input_rel->pathlist)
7425  {
7426  ListCell *lc2;
7427  Path *path = (Path *) lfirst(lc);
7428  Path *path_save = path;
7429  List *pathkey_orderings = NIL;
7430 
7431  /* generate alternative group orderings that might be useful */
7432  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7433 
7434  Assert(list_length(pathkey_orderings) > 0);
7435 
7436  /* process all potentially interesting grouping reorderings */
7437  foreach(lc2, pathkey_orderings)
7438  {
7439  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7440 
7441  /* restore the path (we replace it in the loop) */
7442  path = path_save;
7443 
7444  path = make_ordered_path(root,
7445  partially_grouped_rel,
7446  path,
7447  cheapest_total_path,
7448  info->pathkeys,
7449  -1.0);
7450 
7451  if (path == NULL)
7452  continue;
7453 
7454  if (parse->hasAggs)
7455  add_path(partially_grouped_rel, (Path *)
7457  partially_grouped_rel,
7458  path,
7459  partially_grouped_rel->reltarget,
7460  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7462  info->clauses,
7463  NIL,
7464  agg_partial_costs,
7465  dNumPartialGroups));
7466  else
7467  add_path(partially_grouped_rel, (Path *)
7469  partially_grouped_rel,
7470  path,
7471  info->clauses,
7472  NIL,
7473  dNumPartialGroups));
7474  }
7475  }
7476  }
7477 
7478  if (can_sort && cheapest_partial_path != NULL)
7479  {
7480  /* Similar to above logic, but for partial paths. */
7481  foreach(lc, input_rel->partial_pathlist)
7482  {
7483  ListCell *lc2;
7484  Path *path = (Path *) lfirst(lc);
7485  Path *path_save = path;
7486  List *pathkey_orderings = NIL;
7487 
7488  /* generate alternative group orderings that might be useful */
7489  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7490 
7491  Assert(list_length(pathkey_orderings) > 0);
7492 
7493  /* process all potentially interesting grouping reorderings */
7494  foreach(lc2, pathkey_orderings)
7495  {
7496  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7497 
7498 
7499  /* restore the path (we replace it in the loop) */
7500  path = path_save;
7501 
7502  path = make_ordered_path(root,
7503  partially_grouped_rel,
7504  path,
7505  cheapest_partial_path,
7506  info->pathkeys,
7507  -1.0);
7508 
7509  if (path == NULL)
7510  continue;
7511 
7512  if (parse->hasAggs)
7513  add_partial_path(partially_grouped_rel, (Path *)
7515  partially_grouped_rel,
7516  path,
7517  partially_grouped_rel->reltarget,
7518  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7520  info->clauses,
7521  NIL,
7522  agg_partial_costs,
7523  dNumPartialPartialGroups));
7524  else
7525  add_partial_path(partially_grouped_rel, (Path *)
7527  partially_grouped_rel,
7528  path,
7529  info->clauses,
7530  NIL,
7531  dNumPartialPartialGroups));
7532  }
7533  }
7534  }
7535 
7536  /*
7537  * Add a partially-grouped HashAgg Path where possible
7538  */
7539  if (can_hash && cheapest_total_path != NULL)
7540  {
7541  /* Checked above */
7542  Assert(parse->hasAggs || parse->groupClause);
7543 
7544  add_path(partially_grouped_rel, (Path *)
7546  partially_grouped_rel,
7547  cheapest_total_path,
7548  partially_grouped_rel->reltarget,
7549  AGG_HASHED,
7551  root->processed_groupClause,
7552  NIL,
7553  agg_partial_costs,
7554  dNumPartialGroups));
7555  }
7556 
7557  /*
7558  * Now add a partially-grouped HashAgg partial Path where possible
7559  */
7560  if (can_hash && cheapest_partial_path != NULL)
7561  {
7562  add_partial_path(partially_grouped_rel, (Path *)
7564  partially_grouped_rel,
7565  cheapest_partial_path,
7566  partially_grouped_rel->reltarget,
7567  AGG_HASHED,
7569  root->processed_groupClause,
7570  NIL,
7571  agg_partial_costs,
7572  dNumPartialPartialGroups));
7573  }
7574 
7575  /*
7576  * If there is an FDW that's responsible for all baserels of the query,
7577  * let it consider adding partially grouped ForeignPaths.
7578  */
7579  if (partially_grouped_rel->fdwroutine &&
7580  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7581  {
7582  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7583 
7584  fdwroutine->GetForeignUpperPaths(root,
7586  input_rel, partially_grouped_rel,
7587  extra);
7588  }
7589 
7590  return partially_grouped_rel;
7591 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:379
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5685
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3303
RelOptKind reloptkind
Definition: pathnodes.h:865

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 8019 of file planner.c.

8027 {
8028  List *grouped_live_children = NIL;
8029  List *partially_grouped_live_children = NIL;
8030  PathTarget *target = grouped_rel->reltarget;
8031  bool partial_grouping_valid = true;
8032  int i;
8033 
8036  partially_grouped_rel != NULL);
8037 
8038  /* Add paths for partitionwise aggregation/grouping. */
8039  i = -1;
8040  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8041  {
8042  RelOptInfo *child_input_rel = input_rel->part_rels[i];
8043  PathTarget *child_target;
8044  AppendRelInfo **appinfos;
8045  int nappinfos;
8046  GroupPathExtraData child_extra;
8047  RelOptInfo *child_grouped_rel;
8048  RelOptInfo *child_partially_grouped_rel;
8049 
8050  Assert(child_input_rel != NULL);
8051 
8052  /* Dummy children can be ignored. */
8053  if (IS_DUMMY_REL(child_input_rel))
8054  continue;
8055 
8056  child_target = copy_pathtarget(target);
8057 
8058  /*
8059  * Copy the given "extra" structure as is and then override the
8060  * members specific to this child.
8061  */
8062  memcpy(&child_extra, extra, sizeof(child_extra));
8063 
8064  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8065  &nappinfos);
8066 
8067  child_target->exprs = (List *)
8069  (Node *) target->exprs,
8070  nappinfos, appinfos);
8071 
8072  /* Translate havingQual and targetList. */
8073  child_extra.havingQual = (Node *)
8075  extra->havingQual,
8076  nappinfos, appinfos);
8077  child_extra.targetList = (List *)
8079  (Node *) extra->targetList,
8080  nappinfos, appinfos);
8081 
8082  /*
8083  * extra->patype was the value computed for our parent rel; patype is
8084  * the value for this relation. For the child, our value is its
8085  * parent rel's value.
8086  */
8087  child_extra.patype = patype;
8088 
8089  /*
8090  * Create grouping relation to hold fully aggregated grouping and/or
8091  * aggregation paths for the child.
8092  */
8093  child_grouped_rel = make_grouping_rel(root, child_input_rel,
8094  child_target,
8095  extra->target_parallel_safe,
8096  child_extra.havingQual);
8097 
8098  /* Create grouping paths for this child relation. */
8099  create_ordinary_grouping_paths(root, child_input_rel,
8100  child_grouped_rel,
8101  agg_costs, gd, &child_extra,
8102  &child_partially_grouped_rel);
8103 
8104  if (child_partially_grouped_rel)
8105  {
8106  partially_grouped_live_children =
8107  lappend(partially_grouped_live_children,
8108  child_partially_grouped_rel);
8109  }
8110  else
8111  partial_grouping_valid = false;
8112 
8113  if (patype == PARTITIONWISE_AGGREGATE_FULL)
8114  {
8115  set_cheapest(child_grouped_rel);
8116  grouped_live_children = lappend(grouped_live_children,
8117  child_grouped_rel);
8118  }
8119 
8120  pfree(appinfos);
8121  }
8122 
8123  /*
8124  * Try to create append paths for partially grouped children. For full
8125  * partitionwise aggregation, we might have paths in the partial_pathlist
8126  * if parallel aggregation is possible. For partial partitionwise
8127  * aggregation, we may have paths in both pathlist and partial_pathlist.
8128  *
8129  * NB: We must have a partially grouped path for every child in order to
8130  * generate a partially grouped path for this relation.
8131  */
8132  if (partially_grouped_rel && partial_grouping_valid)
8133  {
8134  Assert(partially_grouped_live_children != NIL);
8135 
8136  add_paths_to_append_rel(root, partially_grouped_rel,
8137  partially_grouped_live_children);
8138 
8139  /*
8140  * We need call set_cheapest, since the finalization step will use the
8141  * cheapest path from the rel.
8142  */
8143  if (partially_grouped_rel->pathlist)
8144  set_cheapest(partially_grouped_rel);
8145  }
8146 
8147  /* If possible, create append paths for fully grouped children. */
8148  if (patype == PARTITIONWISE_AGGREGATE_FULL)
8149  {
8150  Assert(grouped_live_children != NIL);
8151 
8152  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8153  }
8154 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4578 of file planner.c.

4585 {
4586  RelOptInfo *window_rel;
4587  ListCell *lc;
4588 
4589  /* For now, do all work in the (WINDOW, NULL) upperrel */
4590  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4591 
4592  /*
4593  * If the input relation is not parallel-safe, then the window relation
4594  * can't be parallel-safe, either. Otherwise, we need to examine the
4595  * target list and active windows for non-parallel-safe constructs.
4596  */
4597  if (input_rel->consider_parallel && output_target_parallel_safe &&
4598  is_parallel_safe(root, (Node *) activeWindows))
4599  window_rel->consider_parallel = true;
4600 
4601  /*
4602  * If the input rel belongs to a single FDW, so does the window rel.
4603  */
4604  window_rel->serverid = input_rel->serverid;
4605  window_rel->userid = input_rel->userid;
4606  window_rel->useridiscurrent = input_rel->useridiscurrent;
4607  window_rel->fdwroutine = input_rel->fdwroutine;
4608 
4609  /*
4610  * Consider computing window functions starting from the existing
4611  * cheapest-total path (which will likely require a sort) as well as any
4612  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4613  */
4614  foreach(lc, input_rel->pathlist)
4615  {
4616  Path *path = (Path *) lfirst(lc);
4617  int presorted_keys;
4618 
4619  if (path == input_rel->cheapest_total_path ||
4620  pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4621  &presorted_keys) ||
4622  presorted_keys > 0)
4624  window_rel,
4625  path,
4626  input_target,
4627  output_target,
4628  wflists,
4629  activeWindows);
4630  }
4631 
4632  /*
4633  * If there is an FDW that's responsible for all baserels of the query,
4634  * let it consider adding ForeignPaths.
4635  */
4636  if (window_rel->fdwroutine &&
4637  window_rel->fdwroutine->GetForeignUpperPaths)
4638  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4639  input_rel, window_rel,
4640  NULL);
4641 
4642  /* Let extensions possibly add some more paths */
4644  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4645  input_rel, window_rel, NULL);
4646 
4647  /* Now choose the best path(s) */
4648  set_cheapest(window_rel);
4649 
4650  return window_rel;
4651 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:752
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4665

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6735 of file planner.c.

6736 {
6737  Node *result;
6738 
6739  /*
6740  * Convert named-argument function calls, insert default arguments and
6741  * simplify constant subexprs
6742  */
6743  result = eval_const_expressions(NULL, (Node *) expr);
6744 
6745  /* Fill in opfuncid values if missing */
6746  fix_opfuncids(result);
6747 
6748  return (Expr *) result;
6749 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2253
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1830

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6762 of file planner.c.

6765 {
6766  Node *result;
6767  PlannerGlobal glob;
6768  PlannerInfo root;
6769 
6770  /* Make up dummy planner state so we can use setrefs machinery */
6771  MemSet(&glob, 0, sizeof(glob));
6772  glob.type = T_PlannerGlobal;
6773  glob.relationOids = NIL;
6774  glob.invalItems = NIL;
6775 
6776  MemSet(&root, 0, sizeof(root));
6777  root.type = T_PlannerInfo;
6778  root.glob = &glob;
6779 
6780  /*
6781  * Convert named-argument function calls, insert default arguments and
6782  * simplify constant subexprs. Collect identities of inlined functions
6783  * and elided domains, too.
6784  */
6785  result = eval_const_expressions(&root, (Node *) expr);
6786 
6787  /* Fill in opfuncid values if missing */
6788  fix_opfuncids(result);
6789 
6790  /*
6791  * Now walk the finished expression to find anything else we ought to
6792  * record as an expression dependency.
6793  */
6794  (void) extract_query_dependencies_walker(result, &root);
6795 
6796  *relationOids = glob.relationOids;
6797  *invalItems = glob.invalItems;
6798 
6799  return (Expr *) result;
6800 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3593
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 3011 of file planner.c.

3012 {
3013  int num_sets_raw = list_length(groupingSets);
3014  int num_empty = 0;
3015  int num_sets = 0; /* distinct sets */
3016  int num_chains = 0;
3017  List *result = NIL;
3018  List **results;
3019  List **orig_sets;
3020  Bitmapset **set_masks;
3021  int *chains;
3022  short **adjacency;
3023  short *adjacency_buf;
3025  int i;
3026  int j;
3027  int j_size;
3028  ListCell *lc1 = list_head(groupingSets);
3029  ListCell *lc;
3030 
3031  /*
3032  * Start by stripping out empty sets. The algorithm doesn't require this,
3033  * but the planner currently needs all empty sets to be returned in the
3034  * first list, so we strip them here and add them back after.
3035  */
3036  while (lc1 && lfirst(lc1) == NIL)
3037  {
3038  ++num_empty;
3039  lc1 = lnext(groupingSets, lc1);
3040  }
3041 
3042  /* bail out now if it turns out that all we had were empty sets. */
3043  if (!lc1)
3044  return list_make1(groupingSets);
3045 
3046  /*----------
3047  * We don't strictly need to remove duplicate sets here, but if we don't,
3048  * they tend to become scattered through the result, which is a bit
3049  * confusing (and irritating if we ever decide to optimize them out).
3050  * So we remove them here and add them back after.
3051  *
3052  * For each non-duplicate set, we fill in the following:
3053  *
3054  * orig_sets[i] = list of the original set lists
3055  * set_masks[i] = bitmapset for testing inclusion
3056  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3057  *
3058  * chains[i] will be the result group this set is assigned to.
3059  *
3060  * We index all of these from 1 rather than 0 because it is convenient
3061  * to leave 0 free for the NIL node in the graph algorithm.
3062  *----------
3063  */
3064  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3065  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3066  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3067  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3068 
3069  j_size = 0;
3070  j = 0;
3071  i = 1;
3072 
3073  for_each_cell(lc, groupingSets, lc1)
3074  {
3075  List *candidate = (List *) lfirst(lc);
3076  Bitmapset *candidate_set = NULL;
3077  ListCell *lc2;
3078  int dup_of = 0;
3079 
3080  foreach(lc2, candidate)
3081  {
3082  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3083  }
3084 
3085  /* we can only be a dup if we're the same length as a previous set */
3086  if (j_size == list_length(candidate))
3087  {
3088  int k;
3089 
3090  for (k = j; k < i; ++k)
3091  {
3092  if (bms_equal(set_masks[k], candidate_set))
3093  {
3094  dup_of = k;
3095  break;
3096  }
3097  }
3098  }
3099  else if (j_size < list_length(candidate))
3100  {
3101  j_size = list_length(candidate);
3102  j = i;
3103  }
3104 
3105  if (dup_of > 0)
3106  {
3107  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3108  bms_free(candidate_set);
3109  }
3110  else
3111  {
3112  int k;
3113  int n_adj = 0;
3114 
3115  orig_sets[i] = list_make1(candidate);
3116  set_masks[i] = candidate_set;
3117 
3118  /* fill in adjacency list; no need to compare equal-size sets */
3119 
3120  for (k = j - 1; k > 0; --k)
3121  {
3122  if (bms_is_subset(set_masks[k], candidate_set))
3123  adjacency_buf[++n_adj] = k;
3124  }
3125 
3126  if (n_adj > 0)
3127  {
3128  adjacency_buf[0] = n_adj;
3129  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3130  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3131  }
3132  else
3133  adjacency[i] = NULL;
3134 
3135  ++i;
3136  }
3137  }
3138 
3139  num_sets = i - 1;
3140 
3141  /*
3142  * Apply the graph matching algorithm to do the work.
3143  */
3144  state = BipartiteMatch(num_sets, num_sets, adjacency);
3145 
3146  /*
3147  * Now, the state->pair* fields have the info we need to assign sets to
3148  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3149  * pair_vu[v] = u (both will be true, but we check both so that we can do
3150  * it in one pass)
3151  */
3152  chains = palloc0((num_sets + 1) * sizeof(int));
3153 
3154  for (i = 1; i <= num_sets; ++i)
3155  {
3156  int u = state->pair_vu[i];
3157  int v = state->pair_uv[i];
3158 
3159  if (u > 0 && u < i)
3160  chains[i] = chains[u];
3161  else if (v > 0 && v < i)
3162  chains[i] = chains[v];
3163  else
3164  chains[i] = ++num_chains;
3165  }
3166 
3167  /* build result lists. */
3168  results = palloc0((num_chains + 1) * sizeof(List *));
3169 
3170  for (i = 1; i <= num_sets; ++i)
3171  {
3172  int c = chains[i];
3173 
3174  Assert(c > 0);
3175 
3176  results[c] = list_concat(results[c], orig_sets[i]);
3177  }
3178 
3179  /* push any empty sets back on the first list. */
3180  while (num_empty-- > 0)
3181  results[1] = lcons(NIL, results[1]);
3182 
3183  /* make result list */
3184  for (i = 1; i <= num_chains; ++i)
3185  result = lappend(result, results[i]);
3186 
3187  /*
3188  * Free all the things.
3189  *
3190  * (This is over-fussy for small sets but for large sets we could have
3191  * tied up a nontrivial amount of memory.)
3192  */
3194  pfree(results);
3195  pfree(chains);
3196  for (i = 1; i <= num_sets; ++i)
3197  if (adjacency[i])
3198  pfree(adjacency[i]);
3199  pfree(adjacency);
3200  pfree(adjacency_buf);
3201  pfree(orig_sets);
3202  for (i = 1; i <= num_sets; ++i)
3203  bms_free(set_masks[i]);
3204  pfree(set_masks);
3205 
3206  return result;
3207 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int j
Definition: isn.c:73
void * palloc0(Size size)
Definition: mcxt.c:1347
char * c
Definition: regguts.h:323

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7659 of file planner.c.

7660 {
7661  ListCell *lc;
7662  Path *cheapest_partial_path;
7663  List *groupby_pathkeys;
7664 
7665  /*
7666  * This occurs after any partial aggregation has taken place, so trim off
7667  * any pathkeys added for ORDER BY / DISTINCT aggregates.
7668  */
7669  if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7670  groupby_pathkeys = list_copy_head(root->group_pathkeys,
7671  root->num_groupby_pathkeys);
7672  else
7673  groupby_pathkeys = root->group_pathkeys;
7674 
7675  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7676  generate_useful_gather_paths(root, rel, true);
7677 
7678  cheapest_partial_path = linitial(rel->partial_pathlist);
7679 
7680  /* XXX Shouldn't this also consider the group-key-reordering? */
7681  foreach(lc, rel->partial_pathlist)
7682  {
7683  Path *path = (Path *) lfirst(lc);
7684  bool is_sorted;
7685  int presorted_keys;
7686  double total_groups;
7687 
7688  is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7689  path->pathkeys,
7690  &presorted_keys);
7691 
7692  if (is_sorted)
7693  continue;
7694 
7695  /*
7696  * Try at least sorting the cheapest path and also try incrementally
7697  * sorting any path which is partially sorted already (no need to deal
7698  * with paths which have presorted keys when incremental sort is
7699  * disabled unless it's the cheapest input path).
7700  */
7701  if (path != cheapest_partial_path &&
7702  (presorted_keys == 0 || !enable_incremental_sort))
7703  continue;
7704 
7705  /*
7706  * We've no need to consider both a sort and incremental sort. We'll
7707  * just do a sort if there are no presorted keys and an incremental
7708  * sort when there are presorted keys.
7709  */
7710  if (presorted_keys == 0 || !enable_incremental_sort)
7711  path = (Path *) create_sort_path(root, rel, path,
7712  groupby_pathkeys,
7713  -1.0);
7714  else
7716  rel,
7717  path,
7718  groupby_pathkeys,
7719  presorted_keys,
7720  -1.0);
7721  total_groups = compute_gather_rows(path);
7722  path = (Path *)
7724  rel,
7725  path,
7726  rel->reltarget,
7727  groupby_pathkeys,
7728  NULL,
7729  &total_groups);
7730 
7731  add_path(rel, path);
7732  }
7733 }
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593

References add_path(), compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, and root.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8247 of file planner.c.

8248 {
8249  List *grouplist = copyObject(op->groupClauses);
8250  ListCell *lg;
8251  ListCell *lt;
8252 
8253  lg = list_head(grouplist);
8254  foreach(lt, targetlist)
8255  {
8256  TargetEntry *tle = (TargetEntry *) lfirst(lt);
8257  SortGroupClause *sgc;
8258 
8259  /* resjunk columns could have sortgrouprefs. Leave these alone */
8260  if (tle->resjunk)
8261  continue;
8262 
8263  /* we expect every non-resjunk target to have a SortGroupClause */
8264  Assert(lg != NULL);
8265  sgc = (SortGroupClause *) lfirst(lg);
8266  lg = lnext(grouplist, lg);
8267 
8268  /* assign a tleSortGroupRef, or reuse the existing one */
8269  sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8270  }
8271  Assert(lg == NULL);
8272  return grouplist;
8273 }
Index assignSortGroupRef(TargetEntry *tle, List *tlist)

References Assert, assignSortGroupRef(), copyObject, lfirst, list_head(), lnext(), and SortGroupClause::tleSortGroupRef.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6576 of file planner.c.

6577 {
6578  Path *best_path = rel->cheapest_total_path;
6579  ListCell *l;
6580 
6581  /* If all tuples will be retrieved, just return the cheapest-total path */
6582  if (tuple_fraction <= 0.0)
6583  return best_path;
6584 
6585  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6586  if (tuple_fraction >= 1.0 && best_path->rows > 0)
6587  tuple_fraction /= best_path->rows;
6588 
6589  foreach(l, rel->pathlist)
6590  {
6591  Path *path = (Path *) lfirst(l);
6592 
6593  if (path == rel->cheapest_total_path ||
6594  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6595  continue;
6596 
6597  best_path = path;
6598  }
6599 
6600  return best_path;
6601 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:124

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3703 of file planner.c.

3707 {
3708  Query *parse = root->parse;
3709  double dNumGroups;
3710 
3711  if (parse->groupClause)
3712  {
3713  List *groupExprs;
3714 
3715  if (parse->groupingSets)
3716  {
3717  /* Add up the estimates for each grouping set */
3718  ListCell *lc;
3719 
3720  Assert(gd); /* keep Coverity happy */
3721 
3722  dNumGroups = 0;
3723 
3724  foreach(lc, gd->rollups)
3725  {
3726  RollupData *rollup = lfirst_node(RollupData, lc);
3727  ListCell *lc2;
3728  ListCell *lc3;
3729 
3730  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3731  target_list);
3732 
3733  rollup->numGroups = 0.0;
3734 
3735  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3736  {
3737  List *gset = (List *) lfirst(lc2);
3739  double numGroups = estimate_num_groups(root,
3740  groupExprs,
3741  path_rows,
3742  &gset,
3743  NULL);
3744 
3745  gs->numGroups = numGroups;
3746  rollup->numGroups += numGroups;
3747  }
3748 
3749  dNumGroups += rollup->numGroups;
3750  }
3751 
3752  if (gd->hash_sets_idx)
3753  {
3754  ListCell *lc2;
3755 
3756  gd->dNumHashGroups = 0;
3757 
3758  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3759  target_list);
3760 
3761  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3762  {
3763  List *gset = (List *) lfirst(lc);
3765  double numGroups = estimate_num_groups(root,
3766  groupExprs,
3767  path_rows,
3768  &gset,
3769  NULL);
3770 
3771  gs->numGroups = numGroups;
3772  gd->dNumHashGroups += numGroups;
3773  }
3774 
3775  dNumGroups += gd->dNumHashGroups;
3776  }
3777  }
3778  else
3779  {
3780  /* Plain GROUP BY -- estimate based on optimized groupClause */
3781  groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3782  target_list);
3783 
3784  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3785  NULL, NULL);
3786  }
3787  }
3788  else if (parse->groupingSets)
3789  {
3790  /* Empty grouping sets ... one result row for each one */
3791  dNumGroups = list_length(parse->groupingSets);
3792  }
3793  else if (parse->hasAggs || root->hasHavingQual)
3794  {
3795  /* Plain aggregation, one result row */
3796  dNumGroups = 1;
3797  }
3798  else
3799  {
3800  /* Not grouping */
3801  dNumGroups = 1;
3802  }
3803 
3804  return dNumGroups;
3805 }
List * hash_sets_idx
Definition: planner.c:101

References Assert, grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), grouping_sets_data::rollups, root, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ get_useful_pathkeys_for_distinct()

static List * get_useful_pathkeys_for_distinct ( PlannerInfo root,
List needed_pathkeys,
List path_pathkeys 
)
static

Definition at line 5268 of file planner.c.

5270 {
5271  List *useful_pathkeys_list = NIL;
5272  List *useful_pathkeys = NIL;
5273 
5274  /* always include the given 'needed_pathkeys' */
5275  useful_pathkeys_list = lappend(useful_pathkeys_list,
5276  needed_pathkeys);
5277 
5279  return useful_pathkeys_list;
5280 
5281  /*
5282  * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5283  * that match 'needed_pathkeys', but only up to the longest matching
5284  * prefix.
5285  *
5286  * When we have DISTINCT ON, we must ensure that the resulting pathkey
5287  * list matches initial distinctClause pathkeys; otherwise, it won't have
5288  * the desired behavior.
5289  */
5290  foreach_node(PathKey, pathkey, path_pathkeys)
5291  {
5292  /*
5293  * The PathKey nodes are canonical, so they can be checked for
5294  * equality by simple pointer comparison.
5295  */
5296  if (!list_member_ptr(needed_pathkeys, pathkey))
5297  break;
5298  if (root->parse->hasDistinctOn &&
5299  !list_member_ptr(root->distinct_pathkeys, pathkey))
5300  break;
5301 
5302  useful_pathkeys = lappend(useful_pathkeys, pathkey);
5303  }
5304 
5305  /* If no match at all, no point in reordering needed_pathkeys */
5306  if (useful_pathkeys == NIL)
5307  return useful_pathkeys_list;
5308 
5309  /*
5310  * If not full match, the resulting pathkey list is not useful without
5311  * incremental sort.
5312  */
5313  if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5315  return useful_pathkeys_list;
5316 
5317  /* Append the remaining PathKey nodes in needed_pathkeys */
5318  useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5319  needed_pathkeys);
5320 
5321  /*
5322  * If the resulting pathkey list is the same as the 'needed_pathkeys',
5323  * just drop it.
5324  */
5325  if (compare_pathkeys(needed_pathkeys,
5326  useful_pathkeys) == PATHKEYS_EQUAL)
5327  return useful_pathkeys_list;
5328 
5329  useful_pathkeys_list = lappend(useful_pathkeys_list,
5330  useful_pathkeys);
5331 
5332  return useful_pathkeys_list;
5333 }
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
bool enable_distinct_reordering
Definition: planner.c:70

References compare_pathkeys(), enable_distinct_reordering, enable_incremental_sort, foreach_node, lappend(), list_concat_unique_ptr(), list_length(), list_member_ptr(), NIL, PATHKEYS_EQUAL, and root.

Referenced by create_final_distinct_paths(), and create_partial_distinct_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8163 of file planner.c.

8166 {
8167  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8168  int cnt = 0;
8169  int partnatts;
8170 
8171  /* Input relation should be partitioned. */
8172  Assert(input_rel->part_scheme);
8173 
8174  /* Rule out early, if there are no partition keys present. */
8175  if (!input_rel->partexprs)
8176  return false;
8177 
8178  partnatts = input_rel->part_scheme->partnatts;
8179 
8180  for (cnt = 0; cnt < partnatts; cnt++)
8181  {
8182  List *partexprs = input_rel->partexprs[cnt];
8183  ListCell *lc;
8184  bool found = false;
8185 
8186  foreach(lc, partexprs)
8187  {
8188  ListCell *lg;
8189  Expr *partexpr = lfirst(lc);
8190  Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8191 
8192  foreach(lg, groupexprs)
8193  {
8194  Expr *groupexpr = lfirst(lg);
8195  Oid groupcoll = exprCollation((Node *) groupexpr);
8196 
8197  /*
8198  * Note: we can assume there is at most one RelabelType node;
8199  * eval_const_expressions() will have simplified if more than
8200  * one.
8201  */
8202  if (IsA(groupexpr, RelabelType))
8203  groupexpr = ((RelabelType *) groupexpr)->arg;
8204 
8205  if (equal(groupexpr, partexpr))
8206  {
8207  /*
8208  * Reject a match if the grouping collation does not match
8209  * the partitioning collation.
8210  */
8211  if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8212  partcoll != groupcoll)
8213  return false;
8214 
8215  found = true;
8216  break;
8217  }
8218  }
8219 
8220  if (found)
8221  break;
8222  }
8223 
8224  /*
8225  * If none of the partition key expressions match with any of the
8226  * GROUP BY expression, return false.
8227  */
8228  if (!found)
8229  return false;
8230  }
8231 
8232  return true;
8233 }
#define OidIsValid(objectId)
Definition: c.h:729
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:816
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
unsigned int Oid
Definition: postgres_ext.h:31

References Assert, equal(), exprCollation(), get_sortgrouplist_exprs(), IsA, lfirst, and OidIsValid.

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1366 of file planner.c.

1368 {
1369  Query *parse = root->parse;
1370  int64 offset_est = 0;
1371  int64 count_est = 0;
1372  double limit_tuples = -1.0;
1373  bool have_postponed_srfs = false;
1374  PathTarget *final_target;
1375  List *final_targets;
1376  List *final_targets_contain_srfs;
1377  bool final_target_parallel_safe;
1378  RelOptInfo *current_rel;
1379  RelOptInfo *final_rel;
1380  FinalPathExtraData extra;
1381  ListCell *lc;
1382 
1383  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1384  if (parse->limitCount || parse->limitOffset)
1385  {
1386  tuple_fraction = preprocess_limit(root, tuple_fraction,
1387  &offset_est, &count_est);
1388 
1389  /*
1390  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1391  * estimate the effects of using a bounded sort.
1392  */
1393  if (count_est > 0 && offset_est >= 0)
1394  limit_tuples = (double) count_est + (double) offset_est;
1395  }
1396 
1397  /* Make tuple_fraction accessible to lower-level routines */
1398  root->tuple_fraction = tuple_fraction;
1399 
1400  if (parse->setOperations)
1401  {
1402  /*
1403  * Construct Paths for set operations. The results will not need any
1404  * work except perhaps a top-level sort and/or LIMIT. Note that any
1405  * special work for recursive unions is the responsibility of
1406  * plan_set_operations.
1407  */
1408  current_rel = plan_set_operations(root);
1409 
1410  /*
1411  * We should not need to call preprocess_targetlist, since we must be
1412  * in a SELECT query node. Instead, use the processed_tlist returned
1413  * by plan_set_operations (since this tells whether it returned any
1414  * resjunk columns!), and transfer any sort key information from the
1415  * original tlist.
1416  */
1417  Assert(parse->commandType == CMD_SELECT);
1418 
1419  /* for safety, copy processed_tlist instead of modifying in-place */
1420  root->processed_tlist =
1421  postprocess_setop_tlist(copyObject(root->processed_tlist),
1422  parse->targetList);
1423 
1424  /* Also extract the PathTarget form of the setop result tlist */
1425  final_target = current_rel->cheapest_total_path->pathtarget;
1426 
1427  /* And check whether it's parallel safe */
1428  final_target_parallel_safe =
1429  is_parallel_safe(root, (Node *) final_target->exprs);
1430 
1431  /* The setop result tlist couldn't contain any SRFs */
1432  Assert(!parse->hasTargetSRFs);
1433  final_targets = final_targets_contain_srfs = NIL;
1434 
1435  /*
1436  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1437  * checked already, but let's make sure).
1438  */
1439  if (parse->rowMarks)
1440  ereport(ERROR,
1441  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1442  /*------
1443  translator: %s is a SQL row locking clause such as FOR UPDATE */
1444  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1446  parse->rowMarks)->strength))));
1447 
1448  /*
1449  * Calculate pathkeys that represent result ordering requirements
1450  */
1451  Assert(parse->distinctClause == NIL);
1452  root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1453  parse->sortClause,
1454  root->processed_tlist);
1455  }
1456  else
1457  {
1458  /* No set operations, do regular planning */
1459  PathTarget *sort_input_target;
1460  List *sort_input_targets;
1461  List *sort_input_targets_contain_srfs;
1462  bool sort_input_target_parallel_safe;
1463  PathTarget *grouping_target;
1464  List *grouping_targets;
1465  List *grouping_targets_contain_srfs;
1466  bool grouping_target_parallel_safe;
1467  PathTarget *scanjoin_target;
1468  List *scanjoin_targets;
1469  List *scanjoin_targets_contain_srfs;
1470  bool scanjoin_target_parallel_safe;
1471  bool scanjoin_target_same_exprs;
1472  bool have_grouping;
1473  WindowFuncLists *wflists = NULL;
1474  List *activeWindows = NIL;
1475  grouping_sets_data *gset_data = NULL;
1476  standard_qp_extra qp_extra;
1477 
1478  /* A recursive query should always have setOperations */
1479  Assert(!root->hasRecursion);
1480 
1481  /* Preprocess grouping sets and GROUP BY clause, if any */
1482  if (parse->groupingSets)
1483  {
1484  gset_data = preprocess_grouping_sets(root);
1485  }
1486  else if (parse->groupClause)
1487  {
1488  /* Preprocess regular GROUP BY clause, if any */
1489  root->processed_groupClause = preprocess_groupclause(root, NIL);
1490  /* Remove any redundant GROUP BY columns */
1492  }
1493 
1494  /*
1495  * Preprocess targetlist. Note that much of the remaining planning
1496  * work will be done with the PathTarget representation of tlists, but
1497  * we must also maintain the full representation of the final tlist so
1498  * that we can transfer its decoration (resnames etc) to the topmost
1499  * tlist of the finished Plan. This is kept in processed_tlist.
1500  */
1502 
1503  /*
1504  * Mark all the aggregates with resolved aggtranstypes, and detect
1505  * aggregates that are duplicates or can share transition state. We
1506  * must do this before slicing and dicing the tlist into various
1507  * pathtargets, else some copies of the Aggref nodes might escape
1508  * being marked.
1509  */
1510  if (parse->hasAggs)
1511  {
1512  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1513  preprocess_aggrefs(root, (Node *) parse->havingQual);
1514  }
1515 
1516  /*
1517  * Locate any window functions in the tlist. (We don't need to look
1518  * anywhere else, since expressions used in ORDER BY will be in there
1519  * too.) Note that they could all have been eliminated by constant
1520  * folding, in which case we don't need to do any more work.
1521  */
1522  if (parse->hasWindowFuncs)
1523  {
1524  wflists = find_window_functions((Node *) root->processed_tlist,
1525  list_length(parse->windowClause));
1526  if (wflists->numWindowFuncs > 0)
1527  {
1528  /*
1529  * See if any modifications can be made to each WindowClause
1530  * to allow the executor to execute the WindowFuncs more
1531  * quickly.
1532  */
1533  optimize_window_clauses(root, wflists);
1534 
1535  activeWindows = select_active_windows(root, wflists);
1536  }
1537  else
1538  parse->hasWindowFuncs = false;
1539  }
1540 
1541  /*
1542  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1543  * adding logic between here and the query_planner() call. Anything
1544  * that is needed in MIN/MAX-optimizable cases will have to be
1545  * duplicated in planagg.c.
1546  */
1547  if (parse->hasAggs)
1549 
1550  /*
1551  * Figure out whether there's a hard limit on the number of rows that
1552  * query_planner's result subplan needs to return. Even if we know a
1553  * hard limit overall, it doesn't apply if the query has any
1554  * grouping/aggregation operations, or SRFs in the tlist.
1555  */
1556  if (parse->groupClause ||
1557  parse->groupingSets ||
1558  parse->distinctClause ||
1559  parse->hasAggs ||
1560  parse->hasWindowFuncs ||
1561  parse->hasTargetSRFs ||
1562  root->hasHavingQual)
1563  root->limit_tuples = -1.0;
1564  else
1565  root->limit_tuples = limit_tuples;
1566 
1567  /* Set up data needed by standard_qp_callback */
1568  qp_extra.activeWindows = activeWindows;
1569  qp_extra.gset_data = gset_data;
1570 
1571  /*
1572  * If we're a subquery for a set operation, store the SetOperationStmt
1573  * in qp_extra.
1574  */
1575  qp_extra.setop = setops;
1576 
1577  /*
1578  * Generate the best unsorted and presorted paths for the scan/join
1579  * portion of this Query, ie the processing represented by the
1580  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1581  * We also generate (in standard_qp_callback) pathkey representations
1582  * of the query's sort clause, distinct clause, etc.
1583  */
1584  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1585 
1586  /*
1587  * Convert the query's result tlist into PathTarget format.
1588  *
1589  * Note: this cannot be done before query_planner() has performed
1590  * appendrel expansion, because that might add resjunk entries to
1591  * root->processed_tlist. Waiting till afterwards is also helpful
1592  * because the target width estimates can use per-Var width numbers
1593  * that were obtained within query_planner().
1594  */
1595  final_target = create_pathtarget(root, root->processed_tlist);
1596  final_target_parallel_safe =
1597  is_parallel_safe(root, (Node *) final_target->exprs);
1598 
1599  /*
1600  * If ORDER BY was given, consider whether we should use a post-sort
1601  * projection, and compute the adjusted target for preceding steps if
1602  * so.
1603  */
1604  if (parse->sortClause)
1605  {
1606  sort_input_target = make_sort_input_target(root,
1607  final_target,
1608  &have_postponed_srfs);
1609  sort_input_target_parallel_safe =
1610  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1611  }
1612  else
1613  {
1614  sort_input_target = final_target;
1615  sort_input_target_parallel_safe = final_target_parallel_safe;
1616  }
1617 
1618  /*
1619  * If we have window functions to deal with, the output from any
1620  * grouping step needs to be what the window functions want;
1621  * otherwise, it should be sort_input_target.
1622  */
1623  if (activeWindows)
1624  {
1625  grouping_target = make_window_input_target(root,
1626  final_target,
1627  activeWindows);
1628  grouping_target_parallel_safe =
1629  is_parallel_safe(root, (Node *) grouping_target->exprs);
1630  }
1631  else
1632  {
1633  grouping_target = sort_input_target;
1634  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1635  }
1636 
1637  /*
1638  * If we have grouping or aggregation to do, the topmost scan/join
1639  * plan node must emit what the grouping step wants; otherwise, it
1640  * should emit grouping_target.
1641  */
1642  have_grouping = (parse->groupClause || parse->groupingSets ||
1643  parse->hasAggs || root->hasHavingQual);
1644  if (have_grouping)
1645  {
1646  scanjoin_target = make_group_input_target(root, final_target);
1647  scanjoin_target_parallel_safe =
1648  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1649  }
1650  else
1651  {
1652  scanjoin_target = grouping_target;
1653  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1654  }
1655 
1656  /*
1657  * If there are any SRFs in the targetlist, we must separate each of
1658  * these PathTargets into SRF-computing and SRF-free targets. Replace
1659  * each of the named targets with a SRF-free version, and remember the
1660  * list of additional projection steps we need to add afterwards.
1661  */
1662  if (parse->hasTargetSRFs)
1663  {
1664  /* final_target doesn't recompute any SRFs in sort_input_target */
1665  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1666  &final_targets,
1667  &final_targets_contain_srfs);
1668  final_target = linitial_node(PathTarget, final_targets);
1669  Assert(!linitial_int(final_targets_contain_srfs));
1670  /* likewise for sort_input_target vs. grouping_target */
1671  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1672  &sort_input_targets,
1673  &sort_input_targets_contain_srfs);
1674  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1675  Assert(!linitial_int(sort_input_targets_contain_srfs));
1676  /* likewise for grouping_target vs. scanjoin_target */
1677  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1678  &grouping_targets,
1679  &grouping_targets_contain_srfs);
1680  grouping_target = linitial_node(PathTarget, grouping_targets);
1681  Assert(!linitial_int(grouping_targets_contain_srfs));
1682  /* scanjoin_target will not have any SRFs precomputed for it */
1683  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1684  &scanjoin_targets,
1685  &scanjoin_targets_contain_srfs);
1686  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1687  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1688  }
1689  else
1690  {
1691  /* initialize lists; for most of these, dummy values are OK */
1692  final_targets = final_targets_contain_srfs = NIL;
1693  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1694  grouping_targets = grouping_targets_contain_srfs = NIL;
1695  scanjoin_targets = list_make1(scanjoin_target);
1696  scanjoin_targets_contain_srfs = NIL;
1697  }
1698 
1699  /* Apply scan/join target. */
1700  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1701  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1702  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1703  scanjoin_targets_contain_srfs,
1704  scanjoin_target_parallel_safe,
1705  scanjoin_target_same_exprs);
1706 
1707  /*
1708  * Save the various upper-rel PathTargets we just computed into
1709  * root->upper_targets[]. The core code doesn't use this, but it
1710  * provides a convenient place for extensions to get at the info. For
1711  * consistency, we save all the intermediate targets, even though some
1712  * of the corresponding upperrels might not be needed for this query.
1713  */
1714  root->upper_targets[UPPERREL_FINAL] = final_target;
1715  root->upper_targets[UPPERREL_ORDERED] = final_target;
1716  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1717  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1718  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1719  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1720 
1721  /*
1722  * If we have grouping and/or aggregation, consider ways to implement
1723  * that. We build a new upperrel representing the output of this
1724  * phase.
1725  */
1726  if (have_grouping)
1727  {
1728  current_rel = create_grouping_paths(root,
1729  current_rel,
1730  grouping_target,
1731  grouping_target_parallel_safe,
1732  gset_data);
1733  /* Fix things up if grouping_target contains SRFs */
1734  if (parse->hasTargetSRFs)
1735  adjust_paths_for_srfs(root, current_rel,
1736  grouping_targets,
1737  grouping_targets_contain_srfs);
1738  }
1739 
1740  /*
1741  * If we have window functions, consider ways to implement those. We
1742  * build a new upperrel representing the output of this phase.
1743  */
1744  if (activeWindows)
1745  {
1746  current_rel = create_window_paths(root,
1747  current_rel,
1748  grouping_target,
1749  sort_input_target,
1750  sort_input_target_parallel_safe,
1751  wflists,
1752  activeWindows);
1753  /* Fix things up if sort_input_target contains SRFs */
1754  if (parse->hasTargetSRFs)
1755  adjust_paths_for_srfs(root, current_rel,
1756  sort_input_targets,
1757  sort_input_targets_contain_srfs);
1758  }
1759 
1760  /*
1761  * If there is a DISTINCT clause, consider ways to implement that. We
1762  * build a new upperrel representing the output of this phase.
1763  */
1764  if (parse->distinctClause)
1765  {
1766  current_rel = create_distinct_paths(root,
1767  current_rel,
1768  sort_input_target);
1769  }
1770  } /* end of if (setOperations) */
1771 
1772  /*
1773  * If ORDER BY was given, consider ways to implement that, and generate a
1774  * new upperrel containing only paths that emit the correct ordering and
1775  * project the correct final_target. We can apply the original
1776  * limit_tuples limit in sort costing here, but only if there are no
1777  * postponed SRFs.
1778  */
1779  if (parse->sortClause)
1780  {
1781  current_rel = create_ordered_paths(root,
1782  current_rel,
1783  final_target,
1784  final_target_parallel_safe,
1785  have_postponed_srfs ? -1.0 :
1786  limit_tuples);
1787  /* Fix things up if final_target contains SRFs */
1788  if (parse->hasTargetSRFs)
1789  adjust_paths_for_srfs(root, current_rel,
1790  final_targets,
1791  final_targets_contain_srfs);
1792  }
1793 
1794  /*
1795  * Now we are prepared to build the final-output upperrel.
1796  */
1797  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1798 
1799  /*
1800  * If the input rel is marked consider_parallel and there's nothing that's
1801  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1802  * consider_parallel as well. Note that if the query has rowMarks or is
1803  * not a SELECT, consider_parallel will be false for every relation in the
1804  * query.
1805  */
1806  if (current_rel->consider_parallel &&
1807  is_parallel_safe(root, parse->limitOffset) &&
1808  is_parallel_safe(root, parse->limitCount))
1809  final_rel->consider_parallel = true;
1810 
1811  /*
1812  * If the current_rel belongs to a single FDW, so does the final_rel.
1813  */
1814  final_rel->serverid = current_rel->serverid;
1815  final_rel->userid = current_rel->userid;
1816  final_rel->useridiscurrent = current_rel->useridiscurrent;
1817  final_rel->fdwroutine = current_rel->fdwroutine;
1818 
1819  /*
1820  * Generate paths for the final_rel. Insert all surviving paths, with
1821  * LockRows, Limit, and/or ModifyTable steps added if needed.
1822  */
1823  foreach(lc, current_rel->pathlist)
1824  {
1825  Path *path = (Path *) lfirst(lc);
1826 
1827  /*
1828  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1829  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1830  * here. If there are only non-locking rowmarks, they should be
1831  * handled by the ModifyTable node instead. However, root->rowMarks
1832  * is what goes into the LockRows node.)
1833  */
1834  if (parse->rowMarks)
1835  {
1836  path = (Path *) create_lockrows_path(root, final_rel, path,
1837  root->rowMarks,
1839  }
1840 
1841  /*
1842  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1843  */
1844  if (limit_needed(parse))
1845  {
1846  path = (Path *) create_limit_path(root, final_rel, path,
1847  parse->limitOffset,
1848  parse->limitCount,
1849  parse->limitOption,
1850  offset_est, count_est);
1851  }
1852 
1853  /*
1854  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1855  */
1856  if (parse->commandType != CMD_SELECT)
1857  {
1858  Index rootRelation;
1859  List *resultRelations = NIL;
1860  List *updateColnosLists = NIL;
1861  List *withCheckOptionLists = NIL;
1862  List *returningLists = NIL;
1863  List *mergeActionLists = NIL;
1864  List *mergeJoinConditions = NIL;
1865  List *rowMarks;
1866 
1867  if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1868  {
1869  /* Inherited UPDATE/DELETE/MERGE */
1870  RelOptInfo *top_result_rel = find_base_rel(root,
1871  parse->resultRelation);
1872  int resultRelation = -1;
1873 
1874  /* Pass the root result rel forward to the executor. */
1875  rootRelation = parse->resultRelation;
1876 
1877  /* Add only leaf children to ModifyTable. */
1878  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1879  resultRelation)) >= 0)
1880  {
1881  RelOptInfo *this_result_rel = find_base_rel(root,
1882  resultRelation);
1883 
1884  /*
1885  * Also exclude any leaf rels that have turned dummy since
1886  * being added to the list, for example, by being excluded
1887  * by constraint exclusion.
1888  */
1889  if (IS_DUMMY_REL(this_result_rel))
1890  continue;
1891 
1892  /* Build per-target-rel lists needed by ModifyTable */
1893  resultRelations = lappend_int(resultRelations,
1894  resultRelation);
1895  if (parse->commandType == CMD_UPDATE)
1896  {
1897  List *update_colnos = root->update_colnos;
1898 
1899  if (this_result_rel != top_result_rel)
1900  update_colnos =
1902  update_colnos,
1903  this_result_rel->relid,
1904  top_result_rel->relid);
1905  updateColnosLists = lappend(updateColnosLists,
1906  update_colnos);
1907  }
1908  if (parse->withCheckOptions)
1909  {
1910  List *withCheckOptions = parse->withCheckOptions;
1911 
1912  if (this_result_rel != top_result_rel)
1913  withCheckOptions = (List *)
1915  (Node *) withCheckOptions,
1916  this_result_rel,
1917  top_result_rel);
1918  withCheckOptionLists = lappend(withCheckOptionLists,
1919  withCheckOptions);
1920  }
1921  if (parse->returningList)
1922  {
1923  List *returningList = parse->returningList;
1924 
1925  if (this_result_rel != top_result_rel)
1926  returningList = (List *)
1928  (Node *) returningList,
1929  this_result_rel,
1930  top_result_rel);
1931  returningLists = lappend(returningLists,
1932  returningList);
1933  }
1934  if (parse->mergeActionList)
1935  {
1936  ListCell *l;
1937  List *mergeActionList = NIL;
1938 
1939  /*
1940  * Copy MergeActions and translate stuff that
1941  * references attribute numbers.
1942  */
1943  foreach(l, parse->mergeActionList)
1944  {
1945  MergeAction *action = lfirst(l),
1946  *leaf_action = copyObject(action);
1947 
1948  leaf_action->qual =
1950  (Node *) action->qual,
1951  this_result_rel,
1952  top_result_rel);
1953  leaf_action->targetList = (List *)
1955  (Node *) action->targetList,
1956  this_result_rel,
1957  top_result_rel);
1958  if (leaf_action->commandType == CMD_UPDATE)
1959  leaf_action->updateColnos =
1961  action->updateColnos,
1962  this_result_rel->relid,
1963  top_result_rel->relid);
1964  mergeActionList = lappend(mergeActionList,
1965  leaf_action);
1966  }
1967 
1968  mergeActionLists = lappend(mergeActionLists,
1969  mergeActionList);
1970  }
1971  if (parse->commandType == CMD_MERGE)
1972  {
1973  Node *mergeJoinCondition = parse->mergeJoinCondition;
1974 
1975  if (this_result_rel != top_result_rel)
1976  mergeJoinCondition =
1978  mergeJoinCondition,
1979  this_result_rel,
1980  top_result_rel);
1981  mergeJoinConditions = lappend(mergeJoinConditions,
1982  mergeJoinCondition);
1983  }
1984  }
1985 
1986  if (resultRelations == NIL)
1987  {
1988  /*
1989  * We managed to exclude every child rel, so generate a
1990  * dummy one-relation plan using info for the top target
1991  * rel (even though that may not be a leaf target).
1992  * Although it's clear that no data will be updated or
1993  * deleted, we still need to have a ModifyTable node so
1994  * that any statement triggers will be executed. (This
1995  * could be cleaner if we fixed nodeModifyTable.c to allow
1996  * zero target relations, but that probably wouldn't be a
1997  * net win.)
1998  */
1999  resultRelations = list_make1_int(parse->resultRelation);
2000  if (parse->commandType == CMD_UPDATE)
2001  updateColnosLists = list_make1(root->update_colnos);
2002  if (parse->withCheckOptions)
2003  withCheckOptionLists = list_make1(parse->withCheckOptions);
2004  if (parse->returningList)
2005  returningLists = list_make1(parse->returningList);
2006  if (parse->mergeActionList)
2007  mergeActionLists = list_make1(parse->mergeActionList);
2008  if (parse->commandType == CMD_MERGE)
2009  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2010  }
2011  }
2012  else
2013  {
2014  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2015  rootRelation = 0; /* there's no separate root rel */
2016  resultRelations = list_make1_int(parse->resultRelation);
2017  if (parse->commandType == CMD_UPDATE)
2018  updateColnosLists = list_make1(root->update_colnos);
2019  if (parse->withCheckOptions)
2020  withCheckOptionLists = list_make1(parse->withCheckOptions);
2021  if (parse->returningList)
2022  returningLists = list_make1(parse->returningList);
2023  if (parse->mergeActionList)
2024  mergeActionLists = list_make1(parse->mergeActionList);
2025  if (parse->commandType == CMD_MERGE)
2026  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2027  }
2028 
2029  /*
2030  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2031  * will have dealt with fetching non-locked marked rows, else we
2032  * need to have ModifyTable do that.
2033  */
2034  if (parse->rowMarks)
2035  rowMarks = NIL;
2036  else
2037  rowMarks = root->rowMarks;
2038 
2039  path = (Path *)
2040  create_modifytable_path(root, final_rel,
2041  path,
2042  parse->commandType,
2043  parse->canSetTag,
2044  parse->resultRelation,
2045  rootRelation,
2046  root->partColsUpdated,
2047  resultRelations,
2048  updateColnosLists,
2049  withCheckOptionLists,
2050  returningLists,
2051  rowMarks,
2052  parse->onConflict,
2053  mergeActionLists,
2054  mergeJoinConditions,
2056  }
2057 
2058  /* And shove it into final_rel */
2059  add_path(final_rel, path);
2060  }
2061 
2062  /*
2063  * Generate partial paths for final_rel, too, if outer query levels might
2064  * be able to make use of them.
2065  */
2066  if (final_rel->consider_parallel && root->query_level > 1 &&
2067  !limit_needed(parse))
2068  {
2069  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2070  foreach(lc, current_rel->partial_pathlist)
2071  {
2072  Path *partial_path = (Path *) lfirst(lc);
2073 
2074  add_partial_path(final_rel, partial_path);
2075  }
2076  }
2077 
2078  extra.limit_needed = limit_needed(parse);
2079  extra.limit_tuples = limit_tuples;
2080  extra.count_est = count_est;
2081  extra.offset_est = offset_est;
2082 
2083  /*
2084  * If there is an FDW that's responsible for all baserels of the query,
2085  * let it consider adding ForeignPaths.
2086  */
2087  if (final_rel->fdwroutine &&
2088  final_rel->fdwroutine->GetForeignUpperPaths)
2089  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2090  current_rel, final_rel,
2091  &extra);
2092 
2093  /* Let extensions possibly add some more paths */
2095  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2096  current_rel, final_rel, &extra);
2097 
2098  /* Note: currently, we leave it to callers to do set_cheapest() */
2099 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:524
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:665
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
@ BMS_MULTIPLE
Definition: bitmapset.h:73
unsigned int Index
Definition: c.h:568
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
List * lappend_int(List *list, int datum)
Definition: list.c:357
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:664
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3305
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3756
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3820
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5823
static void remove_useless_groupby_columns(PlannerInfo *root)
Definition: planner.c:2748
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2504
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6157
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4835
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5860
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6405
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2108
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5573
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:6000
bool limit_needed(Query *parse)
Definition: planner.c:2689
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5353
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3825
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3497
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4578
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:99
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Cardinality limit_tuples
Definition: pathnodes.h:3326
Index relid
Definition: pathnodes.h:918
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:124
grouping_sets_data * gset_data
Definition: planner.c:125
SetOperationStmt * setop
Definition: planner.c:126
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_MERGE, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, remove_useless_groupby_columns(), root, select_active_windows(), RelOptInfo::serverid, standard_qp_extra::setop, split_pathtarget_at_srfs(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3271 of file planner.c.

3272 {
3273  ListCell *lc;
3274 
3275  foreach(lc, keys)
3276  {
3277  PathKey *pathkey = lfirst_node(PathKey, lc);
3278 
3279  if (pathkey->pk_eclass->ec_has_volatile)
3280  return true;
3281  }
3282 
3283  return false;
3284 }

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3991 of file planner.c.

3992 {
3993  Query *parse = root->parse;
3994 
3995  return (root->hasHavingQual || parse->groupingSets) &&
3996  !parse->hasAggs && parse->groupClause == NIL;
3997 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2689 of file planner.c.

2690 {
2691  Node *node;
2692 
2693  node = parse->limitCount;
2694  if (node)
2695  {
2696  if (IsA(node, Const))
2697  {
2698  /* NULL indicates LIMIT ALL, ie, no limit */
2699  if (!((Const *) node)->constisnull)
2700  return true; /* LIMIT with a constant value */
2701  }
2702  else
2703  return true; /* non-constant LIMIT */
2704  }
2705 
2706  node = parse->limitOffset;
2707  if (node)
2708  {
2709  if (IsA(node, Const))
2710  {
2711  /* Treat NULL as no offset; the executor would too */
2712  if (!((Const *) node)->constisnull)
2713  {
2714  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2715 
2716  if (offset != 0)
2717  return true; /* OFFSET with a nonzero value */
2718  }
2719  }
2720  else
2721  return true; /* non-constant OFFSET */
2722  }
2723 
2724  return false; /* don't need a Limit plan node */
2725 }
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5573 of file planner.c.

5574 {
5575  Query *parse = root->parse;
5576  PathTarget *input_target;
5577  List *non_group_cols;
5578  List *non_group_vars;
5579  int i;
5580  ListCell *lc;
5581 
5582  /*
5583  * We must build a target containing all grouping columns, plus any other
5584  * Vars mentioned in the query's targetlist and HAVING qual.
5585  */
5586  input_target = create_empty_pathtarget();
5587  non_group_cols = NIL;
5588 
5589  i = 0;
5590  foreach(lc, final_target->exprs)
5591  {
5592  Expr *expr = (Expr *) lfirst(lc);
5593  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5594 
5595  if (sgref && root->processed_groupClause &&
5597  root->processed_groupClause) != NULL)
5598  {
5599  /*
5600  * It's a grouping column, so add it to the input target as-is.
5601  *
5602  * Note that the target is logically below the grouping step. So
5603  * with grouping sets we need to remove the RT index of the
5604  * grouping step if there is any from the target expression.
5605  */
5606  if (parse->hasGroupRTE && parse->groupingSets != NIL)
5607  {
5608  Assert(root->group_rtindex > 0);
5609  expr = (Expr *)
5610  remove_nulling_relids((Node *) expr,
5611  bms_make_singleton(root->group_rtindex),
5612  NULL);
5613  }
5614  add_column_to_pathtarget(input_target, expr, sgref);
5615  }
5616  else
5617  {
5618  /*
5619  * Non-grouping column, so just remember the expression for later
5620  * call to pull_var_clause.
5621  */
5622  non_group_cols = lappend(non_group_cols, expr);
5623  }
5624 
5625  i++;
5626  }
5627 
5628  /*
5629  * If there's a HAVING clause, we'll need the Vars it uses, too.
5630  */
5631  if (parse->havingQual)
5632  non_group_cols = lappend(non_group_cols, parse->havingQual);
5633 
5634  /*
5635  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5636  * add them to the input target if not already present. (A Var used
5637  * directly as a GROUP BY item will be present already.) Note this
5638  * includes Vars used in resjunk items, so we are covering the needs of
5639  * ORDER BY and window specifications. Vars used within Aggrefs and
5640  * WindowFuncs will be pulled out here, too.
5641  *
5642  * Note that the target is logically below the grouping step. So with
5643  * grouping sets we need to remove the RT index of the grouping step if
5644  * there is any from the non-group Vars.
5645  */
5646  non_group_vars = pull_var_clause((Node *) non_group_cols,
5650  if (parse->hasGroupRTE && parse->groupingSets != NIL)
5651  {
5652  Assert(root->group_rtindex > 0);
5653  non_group_vars = (List *)
5654  remove_nulling_relids((Node *) non_group_vars,
5655  bms_make_singleton(root->group_rtindex),
5656  NULL);
5657  }
5658  add_new_columns_to_pathtarget(input_target, non_group_vars);
5659 
5660  /* clean up cruft */
5661  list_free(non_group_vars);
5662  list_free(non_group_cols);
5663 
5664  /* XXX this causes some redundant cost calculation ... */
5665  return set_pathtarget_cost_width(root, input_target);
5666 }
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6342
void list_free(List *list)
Definition: list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:188
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:190
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:191
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1558
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:609

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_make_singleton(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, remove_nulling_relids(), root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3938 of file planner.c.

3941 {
3942  RelOptInfo *grouped_rel;
3943 
3944  if (IS_OTHER_REL(input_rel))
3945  {
3946  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3947  input_rel->relids);
3948  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3949  }
3950  else
3951  {
3952  /*
3953  * By tradition, the relids set for the main grouping relation is
3954  * NULL. (This could be changed, but might require adjustments
3955  * elsewhere.)
3956  */
3957  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3958  }
3959 
3960  /* Set target. */
3961  grouped_rel->reltarget = target;
3962 
3963  /*
3964  * If the input relation is not parallel-safe, then the grouped relation
3965  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3966  * target list and HAVING quals are parallel-safe.
3967  */
3968  if (input_rel->consider_parallel && target_parallel_safe &&
3969  is_parallel_safe(root, (Node *) havingQual))
3970  grouped_rel->consider_parallel = true;
3971 
3972  /*
3973  * If the input rel belongs to a single FDW, so does the grouped rel.
3974  */
3975  grouped_rel->serverid = input_rel->serverid;
3976  grouped_rel->userid = input_rel->userid;
3977  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3978  grouped_rel->fdwroutine = input_rel->fdwroutine;
3979 
3980  return grouped_rel;
3981 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:832

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path * make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys,
double  limit_tuples 
)
static

Definition at line 7600 of file planner.c.

7602 {
7603  bool is_sorted;
7604  int presorted_keys;
7605 
7606  is_sorted = pathkeys_count_contained_in(pathkeys,
7607  path->pathkeys,
7608  &presorted_keys);
7609 
7610  if (!is_sorted)
7611  {
7612  /*
7613  * Try at least sorting the cheapest path and also try incrementally
7614  * sorting any path which is partially sorted already (no need to deal
7615  * with paths which have presorted keys when incremental sort is
7616  * disabled unless it's the cheapest input path).
7617  */
7618  if (path != cheapest_path &&
7619  (presorted_keys == 0 || !enable_incremental_sort))
7620  return NULL;
7621 
7622  /*
7623  * We've no need to consider both a sort and incremental sort. We'll
7624  * just do a sort if there are no presorted keys and an incremental
7625  * sort when there are presorted keys.
7626  */
7627  if (presorted_keys == 0 || !enable_incremental_sort)
7628  path = (Path *) create_sort_path(root,
7629  rel,
7630  path,
7631  pathkeys,
7632  limit_tuples);
7633  else