PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 
#define EXPRKIND_GROUPEXPR   13
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 86 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 89 of file planner.c.

◆ EXPRKIND_GROUPEXPR

#define EXPRKIND_GROUPEXPR   13

Definition at line 92 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 85 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 87 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 79 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 81 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 82 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 90 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 91 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 88 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 80 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 83 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 84 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 7073 of file planner.c.

7079 {
7080  Query *parse = root->parse;
7081  Path *cheapest_path = input_rel->cheapest_total_path;
7082  ListCell *lc;
7083  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7084  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7085  List *havingQual = (List *) extra->havingQual;
7086  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7087 
7088  if (can_sort)
7089  {
7090  /*
7091  * Use any available suitably-sorted path as input, and also consider
7092  * sorting the cheapest-total path and incremental sort on any paths
7093  * with presorted keys.
7094  */
7095  foreach(lc, input_rel->pathlist)
7096  {
7097  ListCell *lc2;
7098  Path *path = (Path *) lfirst(lc);
7099  Path *path_save = path;
7100  List *pathkey_orderings = NIL;
7101 
7102  /* generate alternative group orderings that might be useful */
7103  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7104 
7105  Assert(list_length(pathkey_orderings) > 0);
7106 
7107  foreach(lc2, pathkey_orderings)
7108  {
7109  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7110 
7111  /* restore the path (we replace it in the loop) */
7112  path = path_save;
7113 
7114  path = make_ordered_path(root,
7115  grouped_rel,
7116  path,
7117  cheapest_path,
7118  info->pathkeys);
7119  if (path == NULL)
7120  continue;
7121 
7122  /* Now decide what to stick atop it */
7123  if (parse->groupingSets)
7124  {
7125  consider_groupingsets_paths(root, grouped_rel,
7126  path, true, can_hash,
7127  gd, agg_costs, dNumGroups);
7128  }
7129  else if (parse->hasAggs)
7130  {
7131  /*
7132  * We have aggregation, possibly with plain GROUP BY. Make
7133  * an AggPath.
7134  */
7135  add_path(grouped_rel, (Path *)
7137  grouped_rel,
7138  path,
7139  grouped_rel->reltarget,
7140  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7142  info->clauses,
7143  havingQual,
7144  agg_costs,
7145  dNumGroups));
7146  }
7147  else if (parse->groupClause)
7148  {
7149  /*
7150  * We have GROUP BY without aggregation or grouping sets.
7151  * Make a GroupPath.
7152  */
7153  add_path(grouped_rel, (Path *)
7155  grouped_rel,
7156  path,
7157  info->clauses,
7158  havingQual,
7159  dNumGroups));
7160  }
7161  else
7162  {
7163  /* Other cases should have been handled above */
7164  Assert(false);
7165  }
7166  }
7167  }
7168 
7169  /*
7170  * Instead of operating directly on the input relation, we can
7171  * consider finalizing a partially aggregated path.
7172  */
7173  if (partially_grouped_rel != NULL)
7174  {
7175  foreach(lc, partially_grouped_rel->pathlist)
7176  {
7177  ListCell *lc2;
7178  Path *path = (Path *) lfirst(lc);
7179  Path *path_save = path;
7180  List *pathkey_orderings = NIL;
7181 
7182  /* generate alternative group orderings that might be useful */
7183  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7184 
7185  Assert(list_length(pathkey_orderings) > 0);
7186 
7187  /* process all potentially interesting grouping reorderings */
7188  foreach(lc2, pathkey_orderings)
7189  {
7190  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7191 
7192  /* restore the path (we replace it in the loop) */
7193  path = path_save;
7194 
7195  path = make_ordered_path(root,
7196  grouped_rel,
7197  path,
7198  partially_grouped_rel->cheapest_total_path,
7199  info->pathkeys);
7200 
7201  if (path == NULL)
7202  continue;
7203 
7204  if (parse->hasAggs)
7205  add_path(grouped_rel, (Path *)
7207  grouped_rel,
7208  path,
7209  grouped_rel->reltarget,
7210  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7212  info->clauses,
7213  havingQual,
7214  agg_final_costs,
7215  dNumGroups));
7216  else
7217  add_path(grouped_rel, (Path *)
7219  grouped_rel,
7220  path,
7221  info->clauses,
7222  havingQual,
7223  dNumGroups));
7224 
7225  }
7226  }
7227  }
7228  }
7229 
7230  if (can_hash)
7231  {
7232  if (parse->groupingSets)
7233  {
7234  /*
7235  * Try for a hash-only groupingsets path over unsorted input.
7236  */
7237  consider_groupingsets_paths(root, grouped_rel,
7238  cheapest_path, false, true,
7239  gd, agg_costs, dNumGroups);
7240  }
7241  else
7242  {
7243  /*
7244  * Generate a HashAgg Path. We just need an Agg over the
7245  * cheapest-total input path, since input order won't matter.
7246  */
7247  add_path(grouped_rel, (Path *)
7248  create_agg_path(root, grouped_rel,
7249  cheapest_path,
7250  grouped_rel->reltarget,
7251  AGG_HASHED,
7253  root->processed_groupClause,
7254  havingQual,
7255  agg_costs,
7256  dNumGroups));
7257  }
7258 
7259  /*
7260  * Generate a Finalize HashAgg Path atop of the cheapest partially
7261  * grouped path, assuming there is one
7262  */
7263  if (partially_grouped_rel && partially_grouped_rel->pathlist)
7264  {
7265  Path *path = partially_grouped_rel->cheapest_total_path;
7266 
7267  add_path(grouped_rel, (Path *)
7269  grouped_rel,
7270  path,
7271  grouped_rel->reltarget,
7272  AGG_HASHED,
7274  root->processed_groupClause,
7275  havingQual,
7276  agg_final_costs,
7277  dNumGroups));
7278  }
7279  }
7280 
7281  /*
7282  * When partitionwise aggregate is used, we might have fully aggregated
7283  * paths in the partial pathlist, because add_paths_to_append_rel() will
7284  * consider a path for grouped_rel consisting of a Parallel Append of
7285  * non-partial paths from each child.
7286  */
7287  if (grouped_rel->partial_pathlist != NIL)
7288  gather_grouping_paths(root, grouped_rel);
7289 }
#define Assert(condition)
Definition: c.h:858
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
@ AGG_SORTED
Definition: nodes.h:355
@ AGG_HASHED
Definition: nodes.h:356
@ AGG_PLAIN
Definition: nodes.h:354
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:381
@ AGGSPLIT_SIMPLE
Definition: nodes.h:377
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:466
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3127
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3240
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3261
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3260
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7607
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4210
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
Definition: planner.c:7022
tree ctl root
Definition: radixtree.h:1886
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:715
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3301
Definition: pg_list.h:54
struct PathTarget * reltarget
Definition: pathnodes.h:893
List * pathlist
Definition: pathnodes.h:898
struct Path * cheapest_total_path
Definition: pathnodes.h:902
List * partial_pathlist
Definition: pathnodes.h:900

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::pathlist, RelOptInfo::reltarget, and root.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3311 of file planner.c.

3312 {
3313  List *grouppathkeys = root->group_pathkeys;
3314  List *bestpathkeys;
3315  Bitmapset *bestaggs;
3316  Bitmapset *unprocessed_aggs;
3317  ListCell *lc;
3318  int i;
3319 
3320  /* Shouldn't be here if there are grouping sets */
3321  Assert(root->parse->groupingSets == NIL);
3322  /* Shouldn't be here unless there are some ordered aggregates */
3323  Assert(root->numOrderedAggs > 0);
3324 
3325  /* Do nothing if disabled */
3327  return;
3328 
3329  /*
3330  * Make a first pass over all AggInfos to collect a Bitmapset containing
3331  * the indexes of all AggInfos to be processed below.
3332  */
3333  unprocessed_aggs = NULL;
3334  foreach(lc, root->agginfos)
3335  {
3336  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3337  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3338 
3339  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3340  continue;
3341 
3342  /* only add aggregates with a DISTINCT or ORDER BY */
3343  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3344  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3345  foreach_current_index(lc));
3346  }
3347 
3348  /*
3349  * Now process all the unprocessed_aggs to find the best set of pathkeys
3350  * for the given set of aggregates.
3351  *
3352  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3353  * this during the first loop using the pathkeys for the very first
3354  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3355  * a more strict set of compatible pathkeys. Once the outer loop is
3356  * complete, we mark off all the aggregates with compatible pathkeys then
3357  * remove those from the unprocessed_aggs and repeat the process to try to
3358  * find another set of pathkeys that are suitable for a larger number of
3359  * aggregates. The outer loop will stop when there are not enough
3360  * unprocessed aggregates for it to be possible to find a set of pathkeys
3361  * to suit a larger number of aggregates.
3362  */
3363  bestpathkeys = NIL;
3364  bestaggs = NULL;
3365  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3366  {
3367  Bitmapset *aggindexes = NULL;
3368  List *currpathkeys = NIL;
3369 
3370  i = -1;
3371  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3372  {
3373  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3374  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3375  List *sortlist;
3376  List *pathkeys;
3377 
3378  if (aggref->aggdistinct != NIL)
3379  sortlist = aggref->aggdistinct;
3380  else
3381  sortlist = aggref->aggorder;
3382 
3383  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3384  aggref->args);
3385 
3386  /*
3387  * Ignore Aggrefs which have volatile functions in their ORDER BY
3388  * or DISTINCT clause.
3389  */
3390  if (has_volatile_pathkey(pathkeys))
3391  {
3392  unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3393  continue;
3394  }
3395 
3396  /*
3397  * When not set yet, take the pathkeys from the first unprocessed
3398  * aggregate.
3399  */
3400  if (currpathkeys == NIL)
3401  {
3402  currpathkeys = pathkeys;
3403 
3404  /* include the GROUP BY pathkeys, if they exist */
3405  if (grouppathkeys != NIL)
3406  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3407  currpathkeys);
3408 
3409  /* record that we found pathkeys for this aggregate */
3410  aggindexes = bms_add_member(aggindexes, i);
3411  }
3412  else
3413  {
3414  /* now look for a stronger set of matching pathkeys */
3415 
3416  /* include the GROUP BY pathkeys, if they exist */
3417  if (grouppathkeys != NIL)
3418  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3419  pathkeys);
3420 
3421  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3422  switch (compare_pathkeys(currpathkeys, pathkeys))
3423  {
3424  case PATHKEYS_BETTER2:
3425  /* 'pathkeys' are stronger, use these ones instead */
3426  currpathkeys = pathkeys;
3427  /* FALLTHROUGH */
3428 
3429  case PATHKEYS_BETTER1:
3430  /* 'pathkeys' are less strict */
3431  /* FALLTHROUGH */
3432 
3433  case PATHKEYS_EQUAL:
3434  /* mark this aggregate as covered by 'currpathkeys' */
3435  aggindexes = bms_add_member(aggindexes, i);
3436  break;
3437 
3438  case PATHKEYS_DIFFERENT:
3439  break;
3440  }
3441  }
3442  }
3443 
3444  /* remove the aggregates that we've just processed */
3445  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3446 
3447  /*
3448  * If this pass included more aggregates than the previous best then
3449  * use these ones as the best set.
3450  */
3451  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3452  {
3453  bestaggs = aggindexes;
3454  bestpathkeys = currpathkeys;
3455  }
3456  }
3457 
3458  /*
3459  * If we found any ordered aggregates, update root->group_pathkeys to add
3460  * the best set of aggregate pathkeys. Note that bestpathkeys includes
3461  * the original GROUP BY pathkeys already.
3462  */
3463  if (bestpathkeys != NIL)
3464  root->group_pathkeys = bestpathkeys;
3465 
3466  /*
3467  * Now that we've found the best set of aggregates we can set the
3468  * presorted flag to indicate to the executor that it needn't bother
3469  * performing a sort for these Aggrefs. We're able to do this now as
3470  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3471  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3472  * of ordered aggregates.
3473  */
3474  i = -1;
3475  while ((i = bms_next_member(bestaggs, i)) >= 0)
3476  {
3477  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3478 
3479  foreach(lc, agginfo->aggrefs)
3480  {
3481  Aggref *aggref = lfirst_node(Aggref, lc);
3482 
3483  aggref->aggpresorted = true;
3484  }
3485  }
3486 }
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool enable_presorted_aggregate
Definition: costsize.c:164
int i
Definition: isn.c:73
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1334
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:303
@ PATHKEYS_BETTER2
Definition: paths.h:205
@ PATHKEYS_BETTER1
Definition: paths.h:204
@ PATHKEYS_DIFFERENT
Definition: paths.h:206
@ PATHKEYS_EQUAL
Definition: paths.h:203
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3266
List * aggrefs
Definition: pathnodes.h:3383
List * aggdistinct
Definition: primnodes.h:474
List * args
Definition: primnodes.h:468
List * aggorder
Definition: primnodes.h:471

References Aggref::aggdistinct, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6571 of file planner.c.

6573 {
6574  ListCell *lc;
6575 
6576  Assert(list_length(targets) == list_length(targets_contain_srfs));
6577  Assert(!linitial_int(targets_contain_srfs));
6578 
6579  /* If no SRFs appear at this plan level, nothing to do */
6580  if (list_length(targets) == 1)
6581  return;
6582 
6583  /*
6584  * Stack SRF-evaluation nodes atop each path for the rel.
6585  *
6586  * In principle we should re-run set_cheapest() here to identify the
6587  * cheapest path, but it seems unlikely that adding the same tlist eval
6588  * costs to all the paths would change that, so we don't bother. Instead,
6589  * just assume that the cheapest-startup and cheapest-total paths remain
6590  * so. (There should be no parameterized paths anymore, so we needn't
6591  * worry about updating cheapest_parameterized_paths.)
6592  */
6593  foreach(lc, rel->pathlist)
6594  {
6595  Path *subpath = (Path *) lfirst(lc);
6596  Path *newpath = subpath;
6597  ListCell *lc1,
6598  *lc2;
6599 
6600  Assert(subpath->param_info == NULL);
6601  forboth(lc1, targets, lc2, targets_contain_srfs)
6602  {
6603  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6604  bool contains_srfs = (bool) lfirst_int(lc2);
6605 
6606  /* If this level doesn't contain SRFs, do regular projection */
6607  if (contains_srfs)
6608  newpath = (Path *) create_set_projection_path(root,
6609  rel,
6610  newpath,
6611  thistarget);
6612  else
6613  newpath = (Path *) apply_projection_to_path(root,
6614  rel,
6615  newpath,
6616  thistarget);
6617  }
6618  lfirst(lc) = newpath;
6619  if (subpath == rel->cheapest_startup_path)
6620  rel->cheapest_startup_path = newpath;
6621  if (subpath == rel->cheapest_total_path)
6622  rel->cheapest_total_path = newpath;
6623  }
6624 
6625  /* Likewise for partial paths, if any */
6626  foreach(lc, rel->partial_pathlist)
6627  {
6628  Path *subpath = (Path *) lfirst(lc);
6629  Path *newpath = subpath;
6630  ListCell *lc1,
6631  *lc2;
6632 
6633  Assert(subpath->param_info == NULL);
6634  forboth(lc1, targets, lc2, targets_contain_srfs)
6635  {
6636  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6637  bool contains_srfs = (bool) lfirst_int(lc2);
6638 
6639  /* If this level doesn't contain SRFs, do regular projection */
6640  if (contains_srfs)
6641  newpath = (Path *) create_set_projection_path(root,
6642  rel,
6643  newpath,
6644  thistarget);
6645  else
6646  {
6647  /* avoid apply_projection_to_path, in case of multiple refs */
6648  newpath = (Path *) create_projection_path(root,
6649  rel,
6650  newpath,
6651  thistarget);
6652  }
6653  }
6654  lfirst(lc) = newpath;
6655  }
6656 }
unsigned char bool
Definition: c.h:456
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:310
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2763
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2962
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2873
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:901

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7732 of file planner.c.

7738 {
7739  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7740  PathTarget *scanjoin_target;
7741  ListCell *lc;
7742 
7743  /* This recurses, so be paranoid. */
7745 
7746  /*
7747  * If the rel is partitioned, we want to drop its existing paths and
7748  * generate new ones. This function would still be correct if we kept the
7749  * existing paths: we'd modify them to generate the correct target above
7750  * the partitioning Append, and then they'd compete on cost with paths
7751  * generating the target below the Append. However, in our current cost
7752  * model the latter way is always the same or cheaper cost, so modifying
7753  * the existing paths would just be useless work. Moreover, when the cost
7754  * is the same, varying roundoff errors might sometimes allow an existing
7755  * path to be picked, resulting in undesirable cross-platform plan
7756  * variations. So we drop old paths and thereby force the work to be done
7757  * below the Append, except in the case of a non-parallel-safe target.
7758  *
7759  * Some care is needed, because we have to allow
7760  * generate_useful_gather_paths to see the old partial paths in the next
7761  * stanza. Hence, zap the main pathlist here, then allow
7762  * generate_useful_gather_paths to add path(s) to the main list, and
7763  * finally zap the partial pathlist.
7764  */
7765  if (rel_is_partitioned)
7766  rel->pathlist = NIL;
7767 
7768  /*
7769  * If the scan/join target is not parallel-safe, partial paths cannot
7770  * generate it.
7771  */
7772  if (!scanjoin_target_parallel_safe)
7773  {
7774  /*
7775  * Since we can't generate the final scan/join target in parallel
7776  * workers, this is our last opportunity to use any partial paths that
7777  * exist; so build Gather path(s) that use them and emit whatever the
7778  * current reltarget is. We don't do this in the case where the
7779  * target is parallel-safe, since we will be able to generate superior
7780  * paths by doing it after the final scan/join target has been
7781  * applied.
7782  */
7783  generate_useful_gather_paths(root, rel, false);
7784 
7785  /* Can't use parallel query above this level. */
7786  rel->partial_pathlist = NIL;
7787  rel->consider_parallel = false;
7788  }
7789 
7790  /* Finish dropping old paths for a partitioned rel, per comment above */
7791  if (rel_is_partitioned)
7792  rel->partial_pathlist = NIL;
7793 
7794  /* Extract SRF-free scan/join target. */
7795  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7796 
7797  /*
7798  * Apply the SRF-free scan/join target to each existing path.
7799  *
7800  * If the tlist exprs are the same, we can just inject the sortgroupref
7801  * information into the existing pathtargets. Otherwise, replace each
7802  * path with a projection path that generates the SRF-free scan/join
7803  * target. This can't change the ordering of paths within rel->pathlist,
7804  * so we just modify the list in place.
7805  */
7806  foreach(lc, rel->pathlist)
7807  {
7808  Path *subpath = (Path *) lfirst(lc);
7809 
7810  /* Shouldn't have any parameterized paths anymore */
7811  Assert(subpath->param_info == NULL);
7812 
7813  if (tlist_same_exprs)
7814  subpath->pathtarget->sortgrouprefs =
7815  scanjoin_target->sortgrouprefs;
7816  else
7817  {
7818  Path *newpath;
7819 
7820  newpath = (Path *) create_projection_path(root, rel, subpath,
7821  scanjoin_target);
7822  lfirst(lc) = newpath;
7823  }
7824  }
7825 
7826  /* Likewise adjust the targets for any partial paths. */
7827  foreach(lc, rel->partial_pathlist)
7828  {
7829  Path *subpath = (Path *) lfirst(lc);
7830 
7831  /* Shouldn't have any parameterized paths anymore */
7832  Assert(subpath->param_info == NULL);
7833 
7834  if (tlist_same_exprs)
7835  subpath->pathtarget->sortgrouprefs =
7836  scanjoin_target->sortgrouprefs;
7837  else
7838  {
7839  Path *newpath;
7840 
7841  newpath = (Path *) create_projection_path(root, rel, subpath,
7842  scanjoin_target);
7843  lfirst(lc) = newpath;
7844  }
7845  }
7846 
7847  /*
7848  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7849  * atop each existing path. (Note that this function doesn't look at the
7850  * cheapest-path fields, which is a good thing because they're bogus right
7851  * now.)
7852  */
7853  if (root->parse->hasTargetSRFs)
7855  scanjoin_targets,
7856  scanjoin_targets_contain_srfs);
7857 
7858  /*
7859  * Update the rel's target to be the final (with SRFs) scan/join target.
7860  * This now matches the actual output of all the paths, and we might get
7861  * confused in createplan.c if they don't agree. We must do this now so
7862  * that any append paths made in the next part will use the correct
7863  * pathtarget (cf. create_append_path).
7864  *
7865  * Note that this is also necessary if GetForeignUpperPaths() gets called
7866  * on the final scan/join relation or on any of its children, since the
7867  * FDW might look at the rel's target to create ForeignPaths.
7868  */
7869  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7870 
7871  /*
7872  * If the relation is partitioned, recursively apply the scan/join target
7873  * to all partitions, and generate brand-new Append paths in which the
7874  * scan/join target is computed below the Append rather than above it.
7875  * Since Append is not projection-capable, that might save a separate
7876  * Result node, and it also is important for partitionwise aggregate.
7877  */
7878  if (rel_is_partitioned)
7879  {
7880  List *live_children = NIL;
7881  int i;
7882 
7883  /* Adjust each partition. */
7884  i = -1;
7885  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7886  {
7887  RelOptInfo *child_rel = rel->part_rels[i];
7888  AppendRelInfo **appinfos;
7889  int nappinfos;
7890  List *child_scanjoin_targets = NIL;
7891 
7892  Assert(child_rel != NULL);
7893 
7894  /* Dummy children can be ignored. */
7895  if (IS_DUMMY_REL(child_rel))
7896  continue;
7897 
7898  /* Translate scan/join targets for this child. */
7899  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7900  &nappinfos);
7901  foreach(lc, scanjoin_targets)
7902  {
7903  PathTarget *target = lfirst_node(PathTarget, lc);
7904 
7905  target = copy_pathtarget(target);
7906  target->exprs = (List *)
7908  (Node *) target->exprs,
7909  nappinfos, appinfos);
7910  child_scanjoin_targets = lappend(child_scanjoin_targets,
7911  target);
7912  }
7913  pfree(appinfos);
7914 
7915  /* Recursion does the real work. */
7917  child_scanjoin_targets,
7918  scanjoin_targets_contain_srfs,
7919  scanjoin_target_parallel_safe,
7921 
7922  /* Save non-dummy children for Append paths. */
7923  if (!IS_DUMMY_REL(child_rel))
7924  live_children = lappend(live_children, child_rel);
7925  }
7926 
7927  /* Build new paths for this relation by appending child paths. */
7928  add_paths_to_append_rel(root, rel, live_children);
7929  }
7930 
7931  /*
7932  * Consider generating Gather or Gather Merge paths. We must only do this
7933  * if the relation is parallel safe, and we don't do it for child rels to
7934  * avoid creating multiple Gather nodes within the same plan. We must do
7935  * this after all paths have been generated and before set_cheapest, since
7936  * one of the generated paths may turn out to be the cheapest one.
7937  */
7938  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7939  generate_useful_gather_paths(root, rel, false);
7940 
7941  /*
7942  * Reassess which paths are the cheapest, now that we've potentially added
7943  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7944  * this relation.
7945  */
7946  set_cheapest(rel);
7947 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3201
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1314
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:737
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * lappend(List *list, void *datum)
Definition: list.c:339
void pfree(void *pointer)
Definition: mcxt.c:1521
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1953
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1062
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:854
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7732
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6571
void check_stack_depth(void)
Definition: postgres.c:3564
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1539
Relids relids
Definition: pathnodes.h:871
bool consider_parallel
Definition: pathnodes.h:887
Bitmapset * live_parts
Definition: pathnodes.h:1039
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7690 of file planner.c.

7691 {
7692  Query *parse = root->parse;
7693 
7694  if (!parse->hasAggs && parse->groupClause == NIL)
7695  {
7696  /*
7697  * We don't know how to do parallel aggregation unless we have either
7698  * some aggregates or a grouping clause.
7699  */
7700  return false;
7701  }
7702  else if (parse->groupingSets)
7703  {
7704  /* We don't know how to do grouping sets in parallel. */
7705  return false;
7706  }
7707  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7708  {
7709  /* Insufficient support for partial mode. */
7710  return false;
7711  }
7712 
7713  /* Everything looks good. */
7714  return true;
7715 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 6040 of file planner.c.

6041 {
6042  const WindowClauseSortData *wcsa = a;
6043  const WindowClauseSortData *wcsb = b;
6044  ListCell *item_a;
6045  ListCell *item_b;
6046 
6047  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6048  {
6051 
6052  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6053  return -1;
6054  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6055  return 1;
6056  else if (sca->sortop > scb->sortop)
6057  return -1;
6058  else if (sca->sortop < scb->sortop)
6059  return 1;
6060  else if (sca->nulls_first && !scb->nulls_first)
6061  return -1;
6062  else if (!sca->nulls_first && scb->nulls_first)
6063  return 1;
6064  /* no need to compare eqop, since it is fully determined by sortop */
6065  }
6066 
6067  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6068  return -1;
6069  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6070  return 1;
6071 
6072  return 0;
6073 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1438

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4210 of file planner.c.

4218 {
4219  Query *parse = root->parse;
4220  Size hash_mem_limit = get_hash_memory_limit();
4221 
4222  /*
4223  * If we're not being offered sorted input, then only consider plans that
4224  * can be done entirely by hashing.
4225  *
4226  * We can hash everything if it looks like it'll fit in hash_mem. But if
4227  * the input is actually sorted despite not being advertised as such, we
4228  * prefer to make use of that in order to use less memory.
4229  *
4230  * If none of the grouping sets are sortable, then ignore the hash_mem
4231  * limit and generate a path anyway, since otherwise we'll just fail.
4232  */
4233  if (!is_sorted)
4234  {
4235  List *new_rollups = NIL;
4236  RollupData *unhashed_rollup = NULL;
4237  List *sets_data;
4238  List *empty_sets_data = NIL;
4239  List *empty_sets = NIL;
4240  ListCell *lc;
4241  ListCell *l_start = list_head(gd->rollups);
4242  AggStrategy strat = AGG_HASHED;
4243  double hashsize;
4244  double exclude_groups = 0.0;
4245 
4246  Assert(can_hash);
4247 
4248  /*
4249  * If the input is coincidentally sorted usefully (which can happen
4250  * even if is_sorted is false, since that only means that our caller
4251  * has set up the sorting for us), then save some hashtable space by
4252  * making use of that. But we need to watch out for degenerate cases:
4253  *
4254  * 1) If there are any empty grouping sets, then group_pathkeys might
4255  * be NIL if all non-empty grouping sets are unsortable. In this case,
4256  * there will be a rollup containing only empty groups, and the
4257  * pathkeys_contained_in test is vacuously true; this is ok.
4258  *
4259  * XXX: the above relies on the fact that group_pathkeys is generated
4260  * from the first rollup. If we add the ability to consider multiple
4261  * sort orders for grouping input, this assumption might fail.
4262  *
4263  * 2) If there are no empty sets and only unsortable sets, then the
4264  * rollups list will be empty (and thus l_start == NULL), and
4265  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4266  * pathkeys_contained_in test doesn't cause us to crash.
4267  */
4268  if (l_start != NULL &&
4269  pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4270  {
4271  unhashed_rollup = lfirst_node(RollupData, l_start);
4272  exclude_groups = unhashed_rollup->numGroups;
4273  l_start = lnext(gd->rollups, l_start);
4274  }
4275 
4276  hashsize = estimate_hashagg_tablesize(root,
4277  path,
4278  agg_costs,
4279  dNumGroups - exclude_groups);
4280 
4281  /*
4282  * gd->rollups is empty if we have only unsortable columns to work
4283  * with. Override hash_mem in that case; otherwise, we'll rely on the
4284  * sorted-input case to generate usable mixed paths.
4285  */
4286  if (hashsize > hash_mem_limit && gd->rollups)
4287  return; /* nope, won't fit */
4288 
4289  /*
4290  * We need to burst the existing rollups list into individual grouping
4291  * sets and recompute a groupClause for each set.
4292  */
4293  sets_data = list_copy(gd->unsortable_sets);
4294 
4295  for_each_cell(lc, gd->rollups, l_start)
4296  {
4297  RollupData *rollup = lfirst_node(RollupData, lc);
4298 
4299  /*
4300  * If we find an unhashable rollup that's not been skipped by the
4301  * "actually sorted" check above, we can't cope; we'd need sorted
4302  * input (with a different sort order) but we can't get that here.
4303  * So bail out; we'll get a valid path from the is_sorted case
4304  * instead.
4305  *
4306  * The mere presence of empty grouping sets doesn't make a rollup
4307  * unhashable (see preprocess_grouping_sets), we handle those
4308  * specially below.
4309  */
4310  if (!rollup->hashable)
4311  return;
4312 
4313  sets_data = list_concat(sets_data, rollup->gsets_data);
4314  }
4315  foreach(lc, sets_data)
4316  {
4318  List *gset = gs->set;
4319  RollupData *rollup;
4320 
4321  if (gset == NIL)
4322  {
4323  /* Empty grouping sets can't be hashed. */
4324  empty_sets_data = lappend(empty_sets_data, gs);
4325  empty_sets = lappend(empty_sets, NIL);
4326  }
4327  else
4328  {
4329  rollup = makeNode(RollupData);
4330 
4331  rollup->groupClause = preprocess_groupclause(root, gset);
4332  rollup->gsets_data = list_make1(gs);
4333  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4334  rollup->gsets_data,
4335  gd->tleref_to_colnum_map);
4336  rollup->numGroups = gs->numGroups;
4337  rollup->hashable = true;
4338  rollup->is_hashed = true;
4339  new_rollups = lappend(new_rollups, rollup);
4340  }
4341  }
4342 
4343  /*
4344  * If we didn't find anything nonempty to hash, then bail. We'll
4345  * generate a path from the is_sorted case.
4346  */
4347  if (new_rollups == NIL)
4348  return;
4349 
4350  /*
4351  * If there were empty grouping sets they should have been in the
4352  * first rollup.
4353  */
4354  Assert(!unhashed_rollup || !empty_sets);
4355 
4356  if (unhashed_rollup)
4357  {
4358  new_rollups = lappend(new_rollups, unhashed_rollup);
4359  strat = AGG_MIXED;
4360  }
4361  else if (empty_sets)
4362  {
4363  RollupData *rollup = makeNode(RollupData);
4364 
4365  rollup->groupClause = NIL;
4366  rollup->gsets_data = empty_sets_data;
4367  rollup->gsets = empty_sets;
4368  rollup->numGroups = list_length(empty_sets);
4369  rollup->hashable = false;
4370  rollup->is_hashed = false;
4371  new_rollups = lappend(new_rollups, rollup);
4372  strat = AGG_MIXED;
4373  }
4374 
4375  add_path(grouped_rel, (Path *)
4377  grouped_rel,
4378  path,
4379  (List *) parse->havingQual,
4380  strat,
4381  new_rollups,
4382  agg_costs));
4383  return;
4384  }
4385 
4386  /*
4387  * If we have sorted input but nothing we can do with it, bail.
4388  */
4389  if (gd->rollups == NIL)
4390  return;
4391 
4392  /*
4393  * Given sorted input, we try and make two paths: one sorted and one mixed
4394  * sort/hash. (We need to try both because hashagg might be disabled, or
4395  * some columns might not be sortable.)
4396  *
4397  * can_hash is passed in as false if some obstacle elsewhere (such as
4398  * ordered aggs) means that we shouldn't consider hashing at all.
4399  */
4400  if (can_hash && gd->any_hashable)
4401  {
4402  List *rollups = NIL;
4403  List *hash_sets = list_copy(gd->unsortable_sets);
4404  double availspace = hash_mem_limit;
4405  ListCell *lc;
4406 
4407  /*
4408  * Account first for space needed for groups we can't sort at all.
4409  */
4410  availspace -= estimate_hashagg_tablesize(root,
4411  path,
4412  agg_costs,
4413  gd->dNumHashGroups);
4414 
4415  if (availspace > 0 && list_length(gd->rollups) > 1)
4416  {
4417  double scale;
4418  int num_rollups = list_length(gd->rollups);
4419  int k_capacity;
4420  int *k_weights = palloc(num_rollups * sizeof(int));
4421  Bitmapset *hash_items = NULL;
4422  int i;
4423 
4424  /*
4425  * We treat this as a knapsack problem: the knapsack capacity
4426  * represents hash_mem, the item weights are the estimated memory
4427  * usage of the hashtables needed to implement a single rollup,
4428  * and we really ought to use the cost saving as the item value;
4429  * however, currently the costs assigned to sort nodes don't
4430  * reflect the comparison costs well, and so we treat all items as
4431  * of equal value (each rollup we hash instead saves us one sort).
4432  *
4433  * To use the discrete knapsack, we need to scale the values to a
4434  * reasonably small bounded range. We choose to allow a 5% error
4435  * margin; we have no more than 4096 rollups in the worst possible
4436  * case, which with a 5% error margin will require a bit over 42MB
4437  * of workspace. (Anyone wanting to plan queries that complex had
4438  * better have the memory for it. In more reasonable cases, with
4439  * no more than a couple of dozen rollups, the memory usage will
4440  * be negligible.)
4441  *
4442  * k_capacity is naturally bounded, but we clamp the values for
4443  * scale and weight (below) to avoid overflows or underflows (or
4444  * uselessly trying to use a scale factor less than 1 byte).
4445  */
4446  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4447  k_capacity = (int) floor(availspace / scale);
4448 
4449  /*
4450  * We leave the first rollup out of consideration since it's the
4451  * one that matches the input sort order. We assign indexes "i"
4452  * to only those entries considered for hashing; the second loop,
4453  * below, must use the same condition.
4454  */
4455  i = 0;
4456  for_each_from(lc, gd->rollups, 1)
4457  {
4458  RollupData *rollup = lfirst_node(RollupData, lc);
4459 
4460  if (rollup->hashable)
4461  {
4462  double sz = estimate_hashagg_tablesize(root,
4463  path,
4464  agg_costs,
4465  rollup->numGroups);
4466 
4467  /*
4468  * If sz is enormous, but hash_mem (and hence scale) is
4469  * small, avoid integer overflow here.
4470  */
4471  k_weights[i] = (int) Min(floor(sz / scale),
4472  k_capacity + 1.0);
4473  ++i;
4474  }
4475  }
4476 
4477  /*
4478  * Apply knapsack algorithm; compute the set of items which
4479  * maximizes the value stored (in this case the number of sorts
4480  * saved) while keeping the total size (approximately) within
4481  * capacity.
4482  */
4483  if (i > 0)
4484  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4485 
4486  if (!bms_is_empty(hash_items))
4487  {
4488  rollups = list_make1(linitial(gd->rollups));
4489 
4490  i = 0;
4491  for_each_from(lc, gd->rollups, 1)
4492  {
4493  RollupData *rollup = lfirst_node(RollupData, lc);
4494 
4495  if (rollup->hashable)
4496  {
4497  if (bms_is_member(i, hash_items))
4498  hash_sets = list_concat(hash_sets,
4499  rollup->gsets_data);
4500  else
4501  rollups = lappend(rollups, rollup);
4502  ++i;
4503  }
4504  else
4505  rollups = lappend(rollups, rollup);
4506  }
4507  }
4508  }
4509 
4510  if (!rollups && hash_sets)
4511  rollups = list_copy(gd->rollups);
4512 
4513  foreach(lc, hash_sets)
4514  {
4516  RollupData *rollup = makeNode(RollupData);
4517 
4518  Assert(gs->set != NIL);
4519 
4520  rollup->groupClause = preprocess_groupclause(root, gs->set);
4521  rollup->gsets_data = list_make1(gs);
4522  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4523  rollup->gsets_data,
4524  gd->tleref_to_colnum_map);
4525  rollup->numGroups = gs->numGroups;
4526  rollup->hashable = true;
4527  rollup->is_hashed = true;
4528  rollups = lcons(rollup, rollups);
4529  }
4530 
4531  if (rollups)
4532  {
4533  add_path(grouped_rel, (Path *)
4535  grouped_rel,
4536  path,
4537  (List *) parse->havingQual,
4538  AGG_MIXED,
4539  rollups,
4540  agg_costs));
4541  }
4542  }
4543 
4544  /*
4545  * Now try the simple sorted case.
4546  */
4547  if (!gd->unsortable_sets)
4548  add_path(grouped_rel, (Path *)
4550  grouped_rel,
4551  path,
4552  (List *) parse->havingQual,
4553  AGG_SORTED,
4554  gd->rollups,
4555  agg_costs));
4556 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define Min(x, y)
Definition: c.h:1004
#define Max(x, y)
Definition: c.h:998
size_t Size
Definition: c.h:605
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * lcons(void *datum, List *list)
Definition: list.c:495
void * palloc(Size size)
Definition: mcxt.c:1317
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3481
AggStrategy
Definition: nodes.h:353
@ AGG_MIXED
Definition: nodes.h:357
#define makeNode(_type_)
Definition: nodes.h:155
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:342
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3323
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
static int scale
Definition: pgbench.c:181
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2910
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2284
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3918
Cardinality numGroups
Definition: pathnodes.h:2280
List * pathkeys
Definition: pathnodes.h:1672
Cardinality numGroups
Definition: pathnodes.h:2291
List * groupClause
Definition: pathnodes.h:2288
List * gsets_data
Definition: pathnodes.h:2290
bool hashable
Definition: pathnodes.h:2292
List * gsets
Definition: pathnodes.h:2289
bool is_hashed
Definition: pathnodes.h:2293
int * tleref_to_colnum_map
Definition: planner.c:106
List * rollups
Definition: planner.c:99
List * unsortable_sets
Definition: planner.c:105
double dNumHashGroups
Definition: planner.c:101

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, root, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 4007 of file planner.c.

4009 {
4010  Query *parse = root->parse;
4011  int nrows;
4012  Path *path;
4013 
4014  nrows = list_length(parse->groupingSets);
4015  if (nrows > 1)
4016  {
4017  /*
4018  * Doesn't seem worthwhile writing code to cons up a generate_series
4019  * or a values scan to emit multiple rows. Instead just make N clones
4020  * and append them. (With a volatile HAVING clause, this means you
4021  * might get between 0 and N output rows. Offhand I think that's
4022  * desired.)
4023  */
4024  List *paths = NIL;
4025 
4026  while (--nrows >= 0)
4027  {
4028  path = (Path *)
4029  create_group_result_path(root, grouped_rel,
4030  grouped_rel->reltarget,
4031  (List *) parse->havingQual);
4032  paths = lappend(paths, path);
4033  }
4034  path = (Path *)
4036  grouped_rel,
4037  paths,
4038  NIL,
4039  NIL,
4040  NULL,
4041  0,
4042  false,
4043  -1);
4044  }
4045  else
4046  {
4047  /* No grouping sets, or just one, so one output row */
4048  path = (Path *)
4049  create_group_result_path(root, grouped_rel,
4050  grouped_rel->reltarget,
4051  (List *) parse->havingQual);
4052  }
4053 
4054  add_path(grouped_rel, path);
4055 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1586

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4829 of file planner.c.

4831 {
4832  RelOptInfo *distinct_rel;
4833 
4834  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4835  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4836 
4837  /*
4838  * We don't compute anything at this level, so distinct_rel will be
4839  * parallel-safe if the input rel is parallel-safe. In particular, if
4840  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4841  * output those expressions, and will not be parallel-safe unless those
4842  * expressions are parallel-safe.
4843  */
4844  distinct_rel->consider_parallel = input_rel->consider_parallel;
4845 
4846  /*
4847  * If the input rel belongs to a single FDW, so does the distinct_rel.
4848  */
4849  distinct_rel->serverid = input_rel->serverid;
4850  distinct_rel->userid = input_rel->userid;
4851  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4852  distinct_rel->fdwroutine = input_rel->fdwroutine;
4853 
4854  /* build distinct paths based on input_rel's pathlist */
4855  create_final_distinct_paths(root, input_rel, distinct_rel);
4856 
4857  /* now build distinct paths based on input_rel's partial_pathlist */
4858  create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4859 
4860  /* Give a helpful error if we failed to create any paths */
4861  if (distinct_rel->pathlist == NIL)
4862  ereport(ERROR,
4863  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4864  errmsg("could not implement DISTINCT"),
4865  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4866 
4867  /*
4868  * If there is an FDW that's responsible for all baserels of the query,
4869  * let it consider adding ForeignPaths.
4870  */
4871  if (distinct_rel->fdwroutine &&
4872  distinct_rel->fdwroutine->GetForeignUpperPaths)
4873  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4875  input_rel,
4876  distinct_rel,
4877  NULL);
4878 
4879  /* Let extensions possibly add some more paths */
4881  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4882  distinct_rel, NULL);
4883 
4884  /* Now choose the best path(s) */
4885  set_cheapest(distinct_rel);
4886 
4887  return distinct_rel;
4888 }
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:5098
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4899
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:75
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
bool useridiscurrent
Definition: pathnodes.h:968
Oid userid
Definition: pathnodes.h:966
Oid serverid
Definition: pathnodes.h:964

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 5098 of file planner.c.

5100 {
5101  Query *parse = root->parse;
5102  Path *cheapest_input_path = input_rel->cheapest_total_path;
5103  double numDistinctRows;
5104  bool allow_hash;
5105 
5106  /* Estimate number of distinct rows there will be */
5107  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5108  root->hasHavingQual)
5109  {
5110  /*
5111  * If there was grouping or aggregation, use the number of input rows
5112  * as the estimated number of DISTINCT rows (ie, assume the input is
5113  * already mostly unique).
5114  */
5115  numDistinctRows = cheapest_input_path->rows;
5116  }
5117  else
5118  {
5119  /*
5120  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5121  */
5122  List *distinctExprs;
5123 
5124  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5125  parse->targetList);
5126  numDistinctRows = estimate_num_groups(root, distinctExprs,
5127  cheapest_input_path->rows,
5128  NULL, NULL);
5129  }
5130 
5131  /*
5132  * Consider sort-based implementations of DISTINCT, if possible.
5133  */
5134  if (grouping_is_sortable(root->processed_distinctClause))
5135  {
5136  /*
5137  * Firstly, if we have any adequately-presorted paths, just stick a
5138  * Unique node on those. We also, consider doing an explicit sort of
5139  * the cheapest input path and Unique'ing that. If any paths have
5140  * presorted keys then we'll create an incremental sort atop of those
5141  * before adding a unique node on the top.
5142  *
5143  * When we have DISTINCT ON, we must sort by the more rigorous of
5144  * DISTINCT and ORDER BY, else it won't have the desired behavior.
5145  * Also, if we do have to do an explicit sort, we might as well use
5146  * the more rigorous ordering to avoid a second sort later. (Note
5147  * that the parser will have ensured that one clause is a prefix of
5148  * the other.)
5149  */
5150  List *needed_pathkeys;
5151  ListCell *lc;
5152  double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5153 
5154  if (parse->hasDistinctOn &&
5155  list_length(root->distinct_pathkeys) <
5156  list_length(root->sort_pathkeys))
5157  needed_pathkeys = root->sort_pathkeys;
5158  else
5159  needed_pathkeys = root->distinct_pathkeys;
5160 
5161  foreach(lc, input_rel->pathlist)
5162  {
5163  Path *input_path = (Path *) lfirst(lc);
5164  Path *sorted_path;
5165  bool is_sorted;
5166  int presorted_keys;
5167 
5168  is_sorted = pathkeys_count_contained_in(needed_pathkeys,
5169  input_path->pathkeys,
5170  &presorted_keys);
5171 
5172  if (is_sorted)
5173  sorted_path = input_path;
5174  else
5175  {
5176  /*
5177  * Try at least sorting the cheapest path and also try
5178  * incrementally sorting any path which is partially sorted
5179  * already (no need to deal with paths which have presorted
5180  * keys when incremental sort is disabled unless it's the
5181  * cheapest input path).
5182  */
5183  if (input_path != cheapest_input_path &&
5184  (presorted_keys == 0 || !enable_incremental_sort))
5185  continue;
5186 
5187  /*
5188  * We've no need to consider both a sort and incremental sort.
5189  * We'll just do a sort if there are no presorted keys and an
5190  * incremental sort when there are presorted keys.
5191  */
5192  if (presorted_keys == 0 || !enable_incremental_sort)
5193  sorted_path = (Path *) create_sort_path(root,
5194  distinct_rel,
5195  input_path,
5196  needed_pathkeys,
5197  limittuples);
5198  else
5199  sorted_path = (Path *) create_incremental_sort_path(root,
5200  distinct_rel,
5201  input_path,
5202  needed_pathkeys,
5203  presorted_keys,
5204  limittuples);
5205  }
5206 
5207  /*
5208  * distinct_pathkeys may have become empty if all of the pathkeys
5209  * were determined to be redundant. If all of the pathkeys are
5210  * redundant then each DISTINCT target must only allow a single
5211  * value, therefore all resulting tuples must be identical (or at
5212  * least indistinguishable by an equality check). We can uniquify
5213  * these tuples simply by just taking the first tuple. All we do
5214  * here is add a path to do "LIMIT 1" atop of 'sorted_path'. When
5215  * doing a DISTINCT ON we may still have a non-NIL sort_pathkeys
5216  * list, so we must still only do this with paths which are
5217  * correctly sorted by sort_pathkeys.
5218  */
5219  if (root->distinct_pathkeys == NIL)
5220  {
5221  Node *limitCount;
5222 
5223  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5224  sizeof(int64),
5225  Int64GetDatum(1), false,
5226  FLOAT8PASSBYVAL);
5227 
5228  /*
5229  * If the query already has a LIMIT clause, then we could end
5230  * up with a duplicate LimitPath in the final plan. That does
5231  * not seem worth troubling over too much.
5232  */
5233  add_path(distinct_rel, (Path *)
5234  create_limit_path(root, distinct_rel, sorted_path,
5235  NULL, limitCount,
5236  LIMIT_OPTION_COUNT, 0, 1));
5237  }
5238  else
5239  {
5240  add_path(distinct_rel, (Path *)
5241  create_upper_unique_path(root, distinct_rel,
5242  sorted_path,
5243  list_length(root->distinct_pathkeys),
5244  numDistinctRows));
5245  }
5246  }
5247  }
5248 
5249  /*
5250  * Consider hash-based implementations of DISTINCT, if possible.
5251  *
5252  * If we were not able to make any other types of path, we *must* hash or
5253  * die trying. If we do have other choices, there are two things that
5254  * should prevent selection of hashing: if the query uses DISTINCT ON
5255  * (because it won't really have the expected behavior if we hash), or if
5256  * enable_hashagg is off.
5257  *
5258  * Note: grouping_is_hashable() is much more expensive to check than the
5259  * other gating conditions, so we want to do it last.
5260  */
5261  if (distinct_rel->pathlist == NIL)
5262  allow_hash = true; /* we have no alternatives */
5263  else if (parse->hasDistinctOn || !enable_hashagg)
5264  allow_hash = false; /* policy-based decision not to hash */
5265  else
5266  allow_hash = true; /* default */
5267 
5268  if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5269  {
5270  /* Generate hashed aggregate path --- no sort needed */
5271  add_path(distinct_rel, (Path *)
5273  distinct_rel,
5274  cheapest_input_path,
5275  cheapest_input_path->pathtarget,
5276  AGG_HASHED,
5278  root->processed_distinctClause,
5279  NIL,
5280  NULL,
5281  numDistinctRows));
5282  }
5283 
5284  return distinct_rel;
5285 }
#define FLOAT8PASSBYVAL
Definition: c.h:635
bool enable_hashagg
Definition: costsize.c:152
bool enable_incremental_sort
Definition: costsize.c:151
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
@ LIMIT_OPTION_COUNT
Definition: nodes.h:431
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:557
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3922
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3187
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3417
Cardinality rows
Definition: pathnodes.h:1666
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::cheapest_total_path, create_agg_path(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), FLOAT8PASSBYVAL, get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), makeConst(), NIL, parse(), Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3820 of file planner.c.

3825 {
3826  Query *parse = root->parse;
3827  RelOptInfo *grouped_rel;
3828  RelOptInfo *partially_grouped_rel;
3829  AggClauseCosts agg_costs;
3830 
3831  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3833 
3834  /*
3835  * Create grouping relation to hold fully aggregated grouping and/or
3836  * aggregation paths.
3837  */
3838  grouped_rel = make_grouping_rel(root, input_rel, target,
3839  target_parallel_safe, parse->havingQual);
3840 
3841  /*
3842  * Create either paths for a degenerate grouping or paths for ordinary
3843  * grouping, as appropriate.
3844  */
3846  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3847  else
3848  {
3849  int flags = 0;
3850  GroupPathExtraData extra;
3851 
3852  /*
3853  * Determine whether it's possible to perform sort-based
3854  * implementations of grouping. (Note that if processed_groupClause
3855  * is empty, grouping_is_sortable() is trivially true, and all the
3856  * pathkeys_contained_in() tests will succeed too, so that we'll
3857  * consider every surviving input path.)
3858  *
3859  * If we have grouping sets, we might be able to sort some but not all
3860  * of them; in this case, we need can_sort to be true as long as we
3861  * must consider any sorted-input plan.
3862  */
3863  if ((gd && gd->rollups != NIL)
3864  || grouping_is_sortable(root->processed_groupClause))
3865  flags |= GROUPING_CAN_USE_SORT;
3866 
3867  /*
3868  * Determine whether we should consider hash-based implementations of
3869  * grouping.
3870  *
3871  * Hashed aggregation only applies if we're grouping. If we have
3872  * grouping sets, some groups might be hashable but others not; in
3873  * this case we set can_hash true as long as there is nothing globally
3874  * preventing us from hashing (and we should therefore consider plans
3875  * with hashes).
3876  *
3877  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3878  * BY aggregates. (Doing so would imply storing *all* the input
3879  * values in the hash table, and/or running many sorts in parallel,
3880  * either of which seems like a certain loser.) We similarly don't
3881  * support ordered-set aggregates in hashed aggregation, but that case
3882  * is also included in the numOrderedAggs count.
3883  *
3884  * Note: grouping_is_hashable() is much more expensive to check than
3885  * the other gating conditions, so we want to do it last.
3886  */
3887  if ((parse->groupClause != NIL &&
3888  root->numOrderedAggs == 0 &&
3889  (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3890  flags |= GROUPING_CAN_USE_HASH;
3891 
3892  /*
3893  * Determine whether partial aggregation is possible.
3894  */
3895  if (can_partial_agg(root))
3896  flags |= GROUPING_CAN_PARTIAL_AGG;
3897 
3898  extra.flags = flags;
3899  extra.target_parallel_safe = target_parallel_safe;
3900  extra.havingQual = parse->havingQual;
3901  extra.targetList = parse->targetList;
3902  extra.partial_costs_set = false;
3903 
3904  /*
3905  * Determine whether partitionwise aggregation is in theory possible.
3906  * It can be disabled by the user, and for now, we don't try to
3907  * support grouping sets. create_ordinary_grouping_paths() will check
3908  * additional conditions, such as whether input_rel is partitioned.
3909  */
3910  if (enable_partitionwise_aggregate && !parse->groupingSets)
3912  else
3914 
3915  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3916  &agg_costs, gd, &extra,
3917  &partially_grouped_rel);
3918  }
3919 
3920  set_cheapest(grouped_rel);
3921  return grouped_rel;
3922 }
#define MemSet(start, val, len)
Definition: c.h:1020
bool enable_partitionwise_aggregate
Definition: costsize.c:160
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3278
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3277
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3262
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:4007
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3986
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:4071
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7690
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3933
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:560
PartitionwiseAggregateType patype
Definition: pathnodes.h:3307

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4659 of file planner.c.

4666 {
4667  PathTarget *window_target;
4668  ListCell *l;
4669  List *topqual = NIL;
4670 
4671  /*
4672  * Since each window clause could require a different sort order, we stack
4673  * up a WindowAgg node for each clause, with sort steps between them as
4674  * needed. (We assume that select_active_windows chose a good order for
4675  * executing the clauses in.)
4676  *
4677  * input_target should contain all Vars and Aggs needed for the result.
4678  * (In some cases we wouldn't need to propagate all of these all the way
4679  * to the top, since they might only be needed as inputs to WindowFuncs.
4680  * It's probably not worth trying to optimize that though.) It must also
4681  * contain all window partitioning and sorting expressions, to ensure
4682  * they're computed only once at the bottom of the stack (that's critical
4683  * for volatile functions). As we climb up the stack, we'll add outputs
4684  * for the WindowFuncs computed at each level.
4685  */
4686  window_target = input_target;
4687 
4688  foreach(l, activeWindows)
4689  {
4691  List *window_pathkeys;
4692  List *runcondition = NIL;
4693  int presorted_keys;
4694  bool is_sorted;
4695  bool topwindow;
4696  ListCell *lc2;
4697 
4698  window_pathkeys = make_pathkeys_for_window(root,
4699  wc,
4700  root->processed_tlist);
4701 
4702  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4703  path->pathkeys,
4704  &presorted_keys);
4705 
4706  /* Sort if necessary */
4707  if (!is_sorted)
4708  {
4709  /*
4710  * No presorted keys or incremental sort disabled, just perform a
4711  * complete sort.
4712  */
4713  if (presorted_keys == 0 || !enable_incremental_sort)
4714  path = (Path *) create_sort_path(root, window_rel,
4715  path,
4716  window_pathkeys,
4717  -1.0);
4718  else
4719  {
4720  /*
4721  * Since we have presorted keys and incremental sort is
4722  * enabled, just use incremental sort.
4723  */
4725  window_rel,
4726  path,
4727  window_pathkeys,
4728  presorted_keys,
4729  -1.0);
4730  }
4731  }
4732 
4733  if (lnext(activeWindows, l))
4734  {
4735  /*
4736  * Add the current WindowFuncs to the output target for this
4737  * intermediate WindowAggPath. We must copy window_target to
4738  * avoid changing the previous path's target.
4739  *
4740  * Note: a WindowFunc adds nothing to the target's eval costs; but
4741  * we do need to account for the increase in tlist width.
4742  */
4743  int64 tuple_width = window_target->width;
4744 
4745  window_target = copy_pathtarget(window_target);
4746  foreach(lc2, wflists->windowFuncs[wc->winref])
4747  {
4748  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4749 
4750  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4751  tuple_width += get_typavgwidth(wfunc->wintype, -1);
4752  }
4753  window_target->width = clamp_width_est(tuple_width);
4754  }
4755  else
4756  {
4757  /* Install the goal target in the topmost WindowAgg */
4758  window_target = output_target;
4759  }
4760 
4761  /* mark the final item in the list as the top-level window */
4762  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4763 
4764  /*
4765  * Collect the WindowFuncRunConditions from each WindowFunc and
4766  * convert them into OpExprs
4767  */
4768  foreach(lc2, wflists->windowFuncs[wc->winref])
4769  {
4770  ListCell *lc3;
4771  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4772 
4773  foreach(lc3, wfunc->runCondition)
4774  {
4775  WindowFuncRunCondition *wfuncrc =
4777  Expr *opexpr;
4778  Expr *leftop;
4779  Expr *rightop;
4780 
4781  if (wfuncrc->wfunc_left)
4782  {
4783  leftop = (Expr *) copyObject(wfunc);
4784  rightop = copyObject(wfuncrc->arg);
4785  }
4786  else
4787  {
4788  leftop = copyObject(wfuncrc->arg);
4789  rightop = (Expr *) copyObject(wfunc);
4790  }
4791 
4792  opexpr = make_opclause(wfuncrc->opno,
4793  BOOLOID,
4794  false,
4795  leftop,
4796  rightop,
4797  InvalidOid,
4798  wfuncrc->inputcollid);
4799 
4800  runcondition = lappend(runcondition, opexpr);
4801 
4802  if (!topwindow)
4803  topqual = lappend(topqual, opexpr);
4804  }
4805  }
4806 
4807  path = (Path *)
4808  create_windowagg_path(root, window_rel, path, window_target,
4809  wflists->windowFuncs[wc->winref],
4810  runcondition, wc,
4811  topwindow ? topqual : NIL, topwindow);
4812  }
4813 
4814  add_path(window_rel, path);
4815 }
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:628
#define copyObject(obj)
Definition: nodes.h:224
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3577
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6229
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), WindowFuncRunCondition::arg, clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, WindowFuncRunCondition::opno, Path::pathkeys, pathkeys_count_contained_in(), root, WindowFuncRunCondition::wfunc_left, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5305 of file planner.c.

5310 {
5311  Path *cheapest_input_path = input_rel->cheapest_total_path;
5312  RelOptInfo *ordered_rel;
5313  ListCell *lc;
5314 
5315  /* For now, do all work in the (ORDERED, NULL) upperrel */
5316  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5317 
5318  /*
5319  * If the input relation is not parallel-safe, then the ordered relation
5320  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5321  * target list is parallel-safe.
5322  */
5323  if (input_rel->consider_parallel && target_parallel_safe)
5324  ordered_rel->consider_parallel = true;
5325 
5326  /*
5327  * If the input rel belongs to a single FDW, so does the ordered_rel.
5328  */
5329  ordered_rel->serverid = input_rel->serverid;
5330  ordered_rel->userid = input_rel->userid;
5331  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5332  ordered_rel->fdwroutine = input_rel->fdwroutine;
5333 
5334  foreach(lc, input_rel->pathlist)
5335  {
5336  Path *input_path = (Path *) lfirst(lc);
5337  Path *sorted_path;
5338  bool is_sorted;
5339  int presorted_keys;
5340 
5341  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5342  input_path->pathkeys, &presorted_keys);
5343 
5344  if (is_sorted)
5345  sorted_path = input_path;
5346  else
5347  {
5348  /*
5349  * Try at least sorting the cheapest path and also try
5350  * incrementally sorting any path which is partially sorted
5351  * already (no need to deal with paths which have presorted keys
5352  * when incremental sort is disabled unless it's the cheapest
5353  * input path).
5354  */
5355  if (input_path != cheapest_input_path &&
5356  (presorted_keys == 0 || !enable_incremental_sort))
5357  continue;
5358 
5359  /*
5360  * We've no need to consider both a sort and incremental sort.
5361  * We'll just do a sort if there are no presorted keys and an
5362  * incremental sort when there are presorted keys.
5363  */
5364  if (presorted_keys == 0 || !enable_incremental_sort)
5365  sorted_path = (Path *) create_sort_path(root,
5366  ordered_rel,
5367  input_path,
5368  root->sort_pathkeys,
5369  limit_tuples);
5370  else
5371  sorted_path = (Path *) create_incremental_sort_path(root,
5372  ordered_rel,
5373  input_path,
5374  root->sort_pathkeys,
5375  presorted_keys,
5376  limit_tuples);
5377  }
5378 
5379  /*
5380  * If the pathtarget of the result path has different expressions from
5381  * the target to be applied, a projection step is needed.
5382  */
5383  if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5384  sorted_path = apply_projection_to_path(root, ordered_rel,
5385  sorted_path, target);
5386 
5387  add_path(ordered_rel, sorted_path);
5388  }
5389 
5390  /*
5391  * generate_gather_paths() will have already generated a simple Gather
5392  * path for the best parallel path, if any, and the loop above will have
5393  * considered sorting it. Similarly, generate_gather_paths() will also
5394  * have generated order-preserving Gather Merge plans which can be used
5395  * without sorting if they happen to match the sort_pathkeys, and the loop
5396  * above will have handled those as well. However, there's one more
5397  * possibility: it may make sense to sort the cheapest partial path or
5398  * incrementally sort any partial path that is partially sorted according
5399  * to the required output order and then use Gather Merge.
5400  */
5401  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5402  input_rel->partial_pathlist != NIL)
5403  {
5404  Path *cheapest_partial_path;
5405 
5406  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5407 
5408  foreach(lc, input_rel->partial_pathlist)
5409  {
5410  Path *input_path = (Path *) lfirst(lc);
5411  Path *sorted_path;
5412  bool is_sorted;
5413  int presorted_keys;
5414  double total_groups;
5415 
5416  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5417  input_path->pathkeys,
5418  &presorted_keys);
5419 
5420  if (is_sorted)
5421  continue;
5422 
5423  /*
5424  * Try at least sorting the cheapest path and also try
5425  * incrementally sorting any path which is partially sorted
5426  * already (no need to deal with paths which have presorted keys
5427  * when incremental sort is disabled unless it's the cheapest
5428  * partial path).
5429  */
5430  if (input_path != cheapest_partial_path &&
5431  (presorted_keys == 0 || !enable_incremental_sort))
5432  continue;
5433 
5434  /*
5435  * We've no need to consider both a sort and incremental sort.
5436  * We'll just do a sort if there are no presorted keys and an
5437  * incremental sort when there are presorted keys.
5438  */
5439  if (presorted_keys == 0 || !enable_incremental_sort)
5440  sorted_path = (Path *) create_sort_path(root,
5441  ordered_rel,
5442  input_path,
5443  root->sort_pathkeys,
5444  limit_tuples);
5445  else
5446  sorted_path = (Path *) create_incremental_sort_path(root,
5447  ordered_rel,
5448  input_path,
5449  root->sort_pathkeys,
5450  presorted_keys,
5451  limit_tuples);
5452  total_groups = compute_gather_rows(sorted_path);
5453  sorted_path = (Path *)
5454  create_gather_merge_path(root, ordered_rel,
5455  sorted_path,
5456  sorted_path->pathtarget,
5457  root->sort_pathkeys, NULL,
5458  &total_groups);
5459 
5460  /*
5461  * If the pathtarget of the result path has different expressions
5462  * from the target to be applied, a projection step is needed.
5463  */
5464  if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5465  sorted_path = apply_projection_to_path(root, ordered_rel,
5466  sorted_path, target);
5467 
5468  add_path(ordered_rel, sorted_path);
5469  }
5470  }
5471 
5472  /*
5473  * If there is an FDW that's responsible for all baserels of the query,
5474  * let it consider adding ForeignPaths.
5475  */
5476  if (ordered_rel->fdwroutine &&
5477  ordered_rel->fdwroutine->GetForeignUpperPaths)
5478  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5479  input_rel, ordered_rel,
5480  NULL);
5481 
5482  /* Let extensions possibly add some more paths */
5484  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5485  input_rel, ordered_rel, NULL);
5486 
5487  /*
5488  * No need to bother with set_cheapest here; grouping_planner does not
5489  * need us to do it.
5490  */
5491  Assert(ordered_rel->pathlist != NIL);
5492 
5493  return ordered_rel;
5494 }
double compute_gather_rows(Path *path)
Definition: costsize.c:6553
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
@ UPPERREL_ORDERED
Definition: pathnodes.h:78

References add_path(), apply_projection_to_path(), Assert, RelOptInfo::cheapest_total_path, compute_gather_rows(), RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, equal(), PathTarget::exprs, fetch_upper_rel(), lfirst, linitial, NIL, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 4071 of file planner.c.

4077 {
4078  Path *cheapest_path = input_rel->cheapest_total_path;
4079  RelOptInfo *partially_grouped_rel = NULL;
4080  double dNumGroups;
4082 
4083  /*
4084  * If this is the topmost grouping relation or if the parent relation is
4085  * doing some form of partitionwise aggregation, then we may be able to do
4086  * it at this level also. However, if the input relation is not
4087  * partitioned, partitionwise aggregate is impossible.
4088  */
4089  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4090  IS_PARTITIONED_REL(input_rel))
4091  {
4092  /*
4093  * If this is the topmost relation or if the parent relation is doing
4094  * full partitionwise aggregation, then we can do full partitionwise
4095  * aggregation provided that the GROUP BY clause contains all of the
4096  * partitioning columns at this level. Otherwise, we can do at most
4097  * partial partitionwise aggregation. But if partial aggregation is
4098  * not supported in general then we can't use it for partitionwise
4099  * aggregation either.
4100  *
4101  * Check parse->groupClause not processed_groupClause, because it's
4102  * okay if some of the partitioning columns were proved redundant.
4103  */
4104  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4105  group_by_has_partkey(input_rel, extra->targetList,
4106  root->parse->groupClause))
4108  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4110  else
4112  }
4113 
4114  /*
4115  * Before generating paths for grouped_rel, we first generate any possible
4116  * partially grouped paths; that way, later code can easily consider both
4117  * parallel and non-parallel approaches to grouping.
4118  */
4119  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4120  {
4121  bool force_rel_creation;
4122 
4123  /*
4124  * If we're doing partitionwise aggregation at this level, force
4125  * creation of a partially_grouped_rel so we can add partitionwise
4126  * paths to it.
4127  */
4128  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4129 
4130  partially_grouped_rel =
4132  grouped_rel,
4133  input_rel,
4134  gd,
4135  extra,
4136  force_rel_creation);
4137  }
4138 
4139  /* Set out parameter. */
4140  *partially_grouped_rel_p = partially_grouped_rel;
4141 
4142  /* Apply partitionwise aggregation technique, if possible. */
4143  if (patype != PARTITIONWISE_AGGREGATE_NONE)
4144  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4145  partially_grouped_rel, agg_costs,
4146  gd, patype, extra);
4147 
4148  /* If we are doing partial aggregation only, return. */
4150  {
4151  Assert(partially_grouped_rel);
4152 
4153  if (partially_grouped_rel->pathlist)
4154  set_cheapest(partially_grouped_rel);
4155 
4156  return;
4157  }
4158 
4159  /* Gather any partially grouped partial paths. */
4160  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4161  {
4162  gather_grouping_paths(root, partially_grouped_rel);
4163  set_cheapest(partially_grouped_rel);
4164  }
4165 
4166  /*
4167  * Estimate number of groups.
4168  */
4169  dNumGroups = get_number_of_groups(root,
4170  cheapest_path->rows,
4171  gd,
4172  extra->targetList);
4173 
4174  /* Build final grouping paths */
4175  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4176  partially_grouped_rel, agg_costs, gd,
4177  dNumGroups, extra);
4178 
4179  /* Give a helpful error if we failed to find any implementation */
4180  if (grouped_rel->pathlist == NIL)
4181  ereport(ERROR,
4182  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4183  errmsg("could not implement GROUP BY"),
4184  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4185 
4186  /*
4187  * If there is an FDW that's responsible for all baserels of the query,
4188  * let it consider adding ForeignPaths.
4189  */
4190  if (grouped_rel->fdwroutine &&
4191  grouped_rel->fdwroutine->GetForeignUpperPaths)
4192  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4193  input_rel, grouped_rel,
4194  extra);
4195 
4196  /* Let extensions possibly add some more paths */
4198  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4199  input_rel, grouped_rel,
4200  extra);
4201 }
PartitionwiseAggregateType
Definition: pathnodes.h:3276
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3279
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7308
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:7073
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3698
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7967
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8111

References add_paths_to_grouping_rel(), Assert, RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4899 of file planner.c.

4902 {
4903  RelOptInfo *partial_distinct_rel;
4904  Query *parse;
4905  List *distinctExprs;
4906  double numDistinctRows;
4907  Path *cheapest_partial_path;
4908  ListCell *lc;
4909 
4910  /* nothing to do when there are no partial paths in the input rel */
4911  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4912  return;
4913 
4914  parse = root->parse;
4915 
4916  /* can't do parallel DISTINCT ON */
4917  if (parse->hasDistinctOn)
4918  return;
4919 
4920  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4921  NULL);
4922  partial_distinct_rel->reltarget = target;
4923  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4924 
4925  /*
4926  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4927  */
4928  partial_distinct_rel->serverid = input_rel->serverid;
4929  partial_distinct_rel->userid = input_rel->userid;
4930  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4931  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4932 
4933  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4934 
4935  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4936  parse->targetList);
4937 
4938  /* estimate how many distinct rows we'll get from each worker */
4939  numDistinctRows = estimate_num_groups(root, distinctExprs,
4940  cheapest_partial_path->rows,
4941  NULL, NULL);
4942 
4943  /*
4944  * Try sorting the cheapest path and incrementally sorting any paths with
4945  * presorted keys and put a unique paths atop of those.
4946  */
4947  if (grouping_is_sortable(root->processed_distinctClause))
4948  {
4949  foreach(lc, input_rel->partial_pathlist)
4950  {
4951  Path *input_path = (Path *) lfirst(lc);
4952  Path *sorted_path;
4953  bool is_sorted;
4954  int presorted_keys;
4955 
4956  is_sorted = pathkeys_count_contained_in(root->distinct_pathkeys,
4957  input_path->pathkeys,
4958  &presorted_keys);
4959 
4960  if (is_sorted)
4961  sorted_path = input_path;
4962  else
4963  {
4964  /*
4965  * Try at least sorting the cheapest path and also try
4966  * incrementally sorting any path which is partially sorted
4967  * already (no need to deal with paths which have presorted
4968  * keys when incremental sort is disabled unless it's the
4969  * cheapest partial path).
4970  */
4971  if (input_path != cheapest_partial_path &&
4972  (presorted_keys == 0 || !enable_incremental_sort))
4973  continue;
4974 
4975  /*
4976  * We've no need to consider both a sort and incremental sort.
4977  * We'll just do a sort if there are no presorted keys and an
4978  * incremental sort when there are presorted keys.
4979  */
4980  if (presorted_keys == 0 || !enable_incremental_sort)
4981  sorted_path = (Path *) create_sort_path(root,
4982  partial_distinct_rel,
4983  input_path,
4984  root->distinct_pathkeys,
4985  -1.0);
4986  else
4987  sorted_path = (Path *) create_incremental_sort_path(root,
4988  partial_distinct_rel,
4989  input_path,
4990  root->distinct_pathkeys,
4991  presorted_keys,
4992  -1.0);
4993  }
4994 
4995  /*
4996  * An empty distinct_pathkeys means all tuples have the same value
4997  * for the DISTINCT clause. See create_final_distinct_paths()
4998  */
4999  if (root->distinct_pathkeys == NIL)
5000  {
5001  Node *limitCount;
5002 
5003  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5004  sizeof(int64),
5005  Int64GetDatum(1), false,
5006  FLOAT8PASSBYVAL);
5007 
5008  /*
5009  * Apply a LimitPath onto the partial path to restrict the
5010  * tuples from each worker to 1. create_final_distinct_paths
5011  * will need to apply an additional LimitPath to restrict this
5012  * to a single row after the Gather node. If the query
5013  * already has a LIMIT clause, then we could end up with three
5014  * Limit nodes in the final plan. Consolidating the top two
5015  * of these could be done, but does not seem worth troubling
5016  * over.
5017  */
5018  add_partial_path(partial_distinct_rel, (Path *)
5019  create_limit_path(root, partial_distinct_rel,
5020  sorted_path,
5021  NULL,
5022  limitCount,
5024  0, 1));
5025  }
5026  else
5027  {
5028  add_partial_path(partial_distinct_rel, (Path *)
5029  create_upper_unique_path(root, partial_distinct_rel,
5030  sorted_path,
5031  list_length(root->distinct_pathkeys),
5032  numDistinctRows));
5033  }
5034  }
5035  }
5036 
5037  /*
5038  * Now try hash aggregate paths, if enabled and hashing is possible. Since
5039  * we're not on the hook to ensure we do our best to create at least one
5040  * path here, we treat enable_hashagg as a hard off-switch rather than the
5041  * slightly softer variant in create_final_distinct_paths.
5042  */
5043  if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5044  {
5045  add_partial_path(partial_distinct_rel, (Path *)
5047  partial_distinct_rel,
5048  cheapest_partial_path,
5049  cheapest_partial_path->pathtarget,
5050  AGG_HASHED,
5052  root->processed_distinctClause,
5053  NIL,
5054  NULL,
5055  numDistinctRows));
5056  }
5057 
5058  /*
5059  * If there is an FDW that's responsible for all baserels of the query,
5060  * let it consider adding ForeignPaths.
5061  */
5062  if (partial_distinct_rel->fdwroutine &&
5063  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5064  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5066  input_rel,
5067  partial_distinct_rel,
5068  NULL);
5069 
5070  /* Let extensions possibly add some more partial paths */
5072  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5073  input_rel, partial_distinct_rel, NULL);
5074 
5075  if (partial_distinct_rel->partial_pathlist != NIL)
5076  {
5077  generate_useful_gather_paths(root, partial_distinct_rel, true);
5078  set_cheapest(partial_distinct_rel);
5079 
5080  /*
5081  * Finally, create paths to distinctify the final result. This step
5082  * is needed to remove any duplicates due to combining rows from
5083  * parallel workers.
5084  */
5085  create_final_distinct_paths(root, partial_distinct_rel,
5086  final_distinct_rel);
5087  }
5088 }
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_paths_hook, create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), fetch_upper_rel(), FLOAT8PASSBYVAL, generate_useful_gather_paths(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), makeConst(), NIL, parse(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7308 of file planner.c.

7314 {
7315  Query *parse = root->parse;
7316  RelOptInfo *partially_grouped_rel;
7317  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7318  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7319  Path *cheapest_partial_path = NULL;
7320  Path *cheapest_total_path = NULL;
7321  double dNumPartialGroups = 0;
7322  double dNumPartialPartialGroups = 0;
7323  ListCell *lc;
7324  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7325  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7326 
7327  /*
7328  * Consider whether we should generate partially aggregated non-partial
7329  * paths. We can only do this if we have a non-partial path, and only if
7330  * the parent of the input rel is performing partial partitionwise
7331  * aggregation. (Note that extra->patype is the type of partitionwise
7332  * aggregation being used at the parent level, not this level.)
7333  */
7334  if (input_rel->pathlist != NIL &&
7336  cheapest_total_path = input_rel->cheapest_total_path;
7337 
7338  /*
7339  * If parallelism is possible for grouped_rel, then we should consider
7340  * generating partially-grouped partial paths. However, if the input rel
7341  * has no partial paths, then we can't.
7342  */
7343  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7344  cheapest_partial_path = linitial(input_rel->partial_pathlist);
7345 
7346  /*
7347  * If we can't partially aggregate partial paths, and we can't partially
7348  * aggregate non-partial paths, then don't bother creating the new
7349  * RelOptInfo at all, unless the caller specified force_rel_creation.
7350  */
7351  if (cheapest_total_path == NULL &&
7352  cheapest_partial_path == NULL &&
7353  !force_rel_creation)
7354  return NULL;
7355 
7356  /*
7357  * Build a new upper relation to represent the result of partially
7358  * aggregating the rows from the input relation.
7359  */
7360  partially_grouped_rel = fetch_upper_rel(root,
7362  grouped_rel->relids);
7363  partially_grouped_rel->consider_parallel =
7364  grouped_rel->consider_parallel;
7365  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7366  partially_grouped_rel->serverid = grouped_rel->serverid;
7367  partially_grouped_rel->userid = grouped_rel->userid;
7368  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7369  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7370 
7371  /*
7372  * Build target list for partial aggregate paths. These paths cannot just
7373  * emit the same tlist as regular aggregate paths, because (1) we must
7374  * include Vars and Aggrefs needed in HAVING, which might not appear in
7375  * the result tlist, and (2) the Aggrefs must be set in partial mode.
7376  */
7377  partially_grouped_rel->reltarget =
7379  extra->havingQual);
7380 
7381  if (!extra->partial_costs_set)
7382  {
7383  /*
7384  * Collect statistics about aggregates for estimating costs of
7385  * performing aggregation in parallel.
7386  */
7387  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7388  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7389  if (parse->hasAggs)
7390  {
7391  /* partial phase */
7393  agg_partial_costs);
7394 
7395  /* final phase */
7397  agg_final_costs);
7398  }
7399 
7400  extra->partial_costs_set = true;
7401  }
7402 
7403  /* Estimate number of partial groups. */
7404  if (cheapest_total_path != NULL)
7405  dNumPartialGroups =
7407  cheapest_total_path->rows,
7408  gd,
7409  extra->targetList);
7410  if (cheapest_partial_path != NULL)
7411  dNumPartialPartialGroups =
7413  cheapest_partial_path->rows,
7414  gd,
7415  extra->targetList);
7416 
7417  if (can_sort && cheapest_total_path != NULL)
7418  {
7419  /* This should have been checked previously */
7420  Assert(parse->hasAggs || parse->groupClause);
7421 
7422  /*
7423  * Use any available suitably-sorted path as input, and also consider
7424  * sorting the cheapest partial path.
7425  */
7426  foreach(lc, input_rel->pathlist)
7427  {
7428  ListCell *lc2;
7429  Path *path = (Path *) lfirst(lc);
7430  Path *path_save = path;
7431  List *pathkey_orderings = NIL;
7432 
7433  /* generate alternative group orderings that might be useful */
7434  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7435 
7436  Assert(list_length(pathkey_orderings) > 0);
7437 
7438  /* process all potentially interesting grouping reorderings */
7439  foreach(lc2, pathkey_orderings)
7440  {
7441  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7442 
7443  /* restore the path (we replace it in the loop) */
7444  path = path_save;
7445 
7446  path = make_ordered_path(root,
7447  partially_grouped_rel,
7448  path,
7449  cheapest_total_path,
7450  info->pathkeys);
7451 
7452  if (path == NULL)
7453  continue;
7454 
7455  if (parse->hasAggs)
7456  add_path(partially_grouped_rel, (Path *)
7458  partially_grouped_rel,
7459  path,
7460  partially_grouped_rel->reltarget,
7461  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7463  info->clauses,
7464  NIL,
7465  agg_partial_costs,
7466  dNumPartialGroups));
7467  else
7468  add_path(partially_grouped_rel, (Path *)
7470  partially_grouped_rel,
7471  path,
7472  info->clauses,
7473  NIL,
7474  dNumPartialGroups));
7475  }
7476  }
7477  }
7478 
7479  if (can_sort && cheapest_partial_path != NULL)
7480  {
7481  /* Similar to above logic, but for partial paths. */
7482  foreach(lc, input_rel->partial_pathlist)
7483  {
7484  ListCell *lc2;
7485  Path *path = (Path *) lfirst(lc);
7486  Path *path_save = path;
7487  List *pathkey_orderings = NIL;
7488 
7489  /* generate alternative group orderings that might be useful */
7490  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7491 
7492  Assert(list_length(pathkey_orderings) > 0);
7493 
7494  /* process all potentially interesting grouping reorderings */
7495  foreach(lc2, pathkey_orderings)
7496  {
7497  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7498 
7499 
7500  /* restore the path (we replace it in the loop) */
7501  path = path_save;
7502 
7503  path = make_ordered_path(root,
7504  partially_grouped_rel,
7505  path,
7506  cheapest_partial_path,
7507  info->pathkeys);
7508 
7509  if (path == NULL)
7510  continue;
7511 
7512  if (parse->hasAggs)
7513  add_partial_path(partially_grouped_rel, (Path *)
7515  partially_grouped_rel,
7516  path,
7517  partially_grouped_rel->reltarget,
7518  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7520  info->clauses,
7521  NIL,
7522  agg_partial_costs,
7523  dNumPartialPartialGroups));
7524  else
7525  add_partial_path(partially_grouped_rel, (Path *)
7527  partially_grouped_rel,
7528  path,
7529  info->clauses,
7530  NIL,
7531  dNumPartialPartialGroups));
7532  }
7533  }
7534  }
7535 
7536  /*
7537  * Add a partially-grouped HashAgg Path where possible
7538  */
7539  if (can_hash && cheapest_total_path != NULL)
7540  {
7541  /* Checked above */
7542  Assert(parse->hasAggs || parse->groupClause);
7543 
7544  add_path(partially_grouped_rel, (Path *)
7546  partially_grouped_rel,
7547  cheapest_total_path,
7548  partially_grouped_rel->reltarget,
7549  AGG_HASHED,
7551  root->processed_groupClause,
7552  NIL,
7553  agg_partial_costs,
7554  dNumPartialGroups));
7555  }
7556 
7557  /*
7558  * Now add a partially-grouped HashAgg partial Path where possible
7559  */
7560  if (can_hash && cheapest_partial_path != NULL)
7561  {
7562  add_partial_path(partially_grouped_rel, (Path *)
7564  partially_grouped_rel,
7565  cheapest_partial_path,
7566  partially_grouped_rel->reltarget,
7567  AGG_HASHED,
7569  root->processed_groupClause,
7570  NIL,
7571  agg_partial_costs,
7572  dNumPartialPartialGroups));
7573  }
7574 
7575  /*
7576  * If there is an FDW that's responsible for all baserels of the query,
7577  * let it consider adding partially grouped ForeignPaths.
7578  */
7579  if (partially_grouped_rel->fdwroutine &&
7580  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7581  {
7582  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7583 
7584  fdwroutine->GetForeignUpperPaths(root,
7586  input_rel, partially_grouped_rel,
7587  extra);
7588  }
7589 
7590  return partially_grouped_rel;
7591 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:379
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5637
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3300
RelOptKind reloptkind
Definition: pathnodes.h:865

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7967 of file planner.c.

7975 {
7976  List *grouped_live_children = NIL;
7977  List *partially_grouped_live_children = NIL;
7978  PathTarget *target = grouped_rel->reltarget;
7979  bool partial_grouping_valid = true;
7980  int i;
7981 
7984  partially_grouped_rel != NULL);
7985 
7986  /* Add paths for partitionwise aggregation/grouping. */
7987  i = -1;
7988  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7989  {
7990  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7991  PathTarget *child_target;
7992  AppendRelInfo **appinfos;
7993  int nappinfos;
7994  GroupPathExtraData child_extra;
7995  RelOptInfo *child_grouped_rel;
7996  RelOptInfo *child_partially_grouped_rel;
7997 
7998  Assert(child_input_rel != NULL);
7999 
8000  /* Dummy children can be ignored. */
8001  if (IS_DUMMY_REL(child_input_rel))
8002  continue;
8003 
8004  child_target = copy_pathtarget(target);
8005 
8006  /*
8007  * Copy the given "extra" structure as is and then override the
8008  * members specific to this child.
8009  */
8010  memcpy(&child_extra, extra, sizeof(child_extra));
8011 
8012  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8013  &nappinfos);
8014 
8015  child_target->exprs = (List *)
8017  (Node *) target->exprs,
8018  nappinfos, appinfos);
8019 
8020  /* Translate havingQual and targetList. */
8021  child_extra.havingQual = (Node *)
8023  extra->havingQual,
8024  nappinfos, appinfos);
8025  child_extra.targetList = (List *)
8027  (Node *) extra->targetList,
8028  nappinfos, appinfos);
8029 
8030  /*
8031  * extra->patype was the value computed for our parent rel; patype is
8032  * the value for this relation. For the child, our value is its
8033  * parent rel's value.
8034  */
8035  child_extra.patype = patype;
8036 
8037  /*
8038  * Create grouping relation to hold fully aggregated grouping and/or
8039  * aggregation paths for the child.
8040  */
8041  child_grouped_rel = make_grouping_rel(root, child_input_rel,
8042  child_target,
8043  extra->target_parallel_safe,
8044  child_extra.havingQual);
8045 
8046  /* Create grouping paths for this child relation. */
8047  create_ordinary_grouping_paths(root, child_input_rel,
8048  child_grouped_rel,
8049  agg_costs, gd, &child_extra,
8050  &child_partially_grouped_rel);
8051 
8052  if (child_partially_grouped_rel)
8053  {
8054  partially_grouped_live_children =
8055  lappend(partially_grouped_live_children,
8056  child_partially_grouped_rel);
8057  }
8058  else
8059  partial_grouping_valid = false;
8060 
8061  if (patype == PARTITIONWISE_AGGREGATE_FULL)
8062  {
8063  set_cheapest(child_grouped_rel);
8064  grouped_live_children = lappend(grouped_live_children,
8065  child_grouped_rel);
8066  }
8067 
8068  pfree(appinfos);
8069  }
8070 
8071  /*
8072  * Try to create append paths for partially grouped children. For full
8073  * partitionwise aggregation, we might have paths in the partial_pathlist
8074  * if parallel aggregation is possible. For partial partitionwise
8075  * aggregation, we may have paths in both pathlist and partial_pathlist.
8076  *
8077  * NB: We must have a partially grouped path for every child in order to
8078  * generate a partially grouped path for this relation.
8079  */
8080  if (partially_grouped_rel && partial_grouping_valid)
8081  {
8082  Assert(partially_grouped_live_children != NIL);
8083 
8084  add_paths_to_append_rel(root, partially_grouped_rel,
8085  partially_grouped_live_children);
8086 
8087  /*
8088  * We need call set_cheapest, since the finalization step will use the
8089  * cheapest path from the rel.
8090  */
8091  if (partially_grouped_rel->pathlist)
8092  set_cheapest(partially_grouped_rel);
8093  }
8094 
8095  /* If possible, create append paths for fully grouped children. */
8096  if (patype == PARTITIONWISE_AGGREGATE_FULL)
8097  {
8098  Assert(grouped_live_children != NIL);
8099 
8100  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8101  }
8102 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4572 of file planner.c.

4579 {
4580  RelOptInfo *window_rel;
4581  ListCell *lc;
4582 
4583  /* For now, do all work in the (WINDOW, NULL) upperrel */
4584  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4585 
4586  /*
4587  * If the input relation is not parallel-safe, then the window relation
4588  * can't be parallel-safe, either. Otherwise, we need to examine the
4589  * target list and active windows for non-parallel-safe constructs.
4590  */
4591  if (input_rel->consider_parallel && output_target_parallel_safe &&
4592  is_parallel_safe(root, (Node *) activeWindows))
4593  window_rel->consider_parallel = true;
4594 
4595  /*
4596  * If the input rel belongs to a single FDW, so does the window rel.
4597  */
4598  window_rel->serverid = input_rel->serverid;
4599  window_rel->userid = input_rel->userid;
4600  window_rel->useridiscurrent = input_rel->useridiscurrent;
4601  window_rel->fdwroutine = input_rel->fdwroutine;
4602 
4603  /*
4604  * Consider computing window functions starting from the existing
4605  * cheapest-total path (which will likely require a sort) as well as any
4606  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4607  */
4608  foreach(lc, input_rel->pathlist)
4609  {
4610  Path *path = (Path *) lfirst(lc);
4611  int presorted_keys;
4612 
4613  if (path == input_rel->cheapest_total_path ||
4614  pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4615  &presorted_keys) ||
4616  presorted_keys > 0)
4618  window_rel,
4619  path,
4620  input_target,
4621  output_target,
4622  wflists,
4623  activeWindows);
4624  }
4625 
4626  /*
4627  * If there is an FDW that's responsible for all baserels of the query,
4628  * let it consider adding ForeignPaths.
4629  */
4630  if (window_rel->fdwroutine &&
4631  window_rel->fdwroutine->GetForeignUpperPaths)
4632  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4633  input_rel, window_rel,
4634  NULL);
4635 
4636  /* Let extensions possibly add some more paths */
4638  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4639  input_rel, window_rel, NULL);
4640 
4641  /* Now choose the best path(s) */
4642  set_cheapest(window_rel);
4643 
4644  return window_rel;
4645 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:753
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4659

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6687 of file planner.c.

6688 {
6689  Node *result;
6690 
6691  /*
6692  * Convert named-argument function calls, insert default arguments and
6693  * simplify constant subexprs
6694  */
6695  result = eval_const_expressions(NULL, (Node *) expr);
6696 
6697  /* Fill in opfuncid values if missing */
6698  fix_opfuncids(result);
6699 
6700  return (Expr *) result;
6701 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2254
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1831

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6714 of file planner.c.

6717 {
6718  Node *result;
6719  PlannerGlobal glob;
6720  PlannerInfo root;
6721 
6722  /* Make up dummy planner state so we can use setrefs machinery */
6723  MemSet(&glob, 0, sizeof(glob));
6724  glob.type = T_PlannerGlobal;
6725  glob.relationOids = NIL;
6726  glob.invalItems = NIL;
6727 
6728  MemSet(&root, 0, sizeof(root));
6729  root.type = T_PlannerInfo;
6730  root.glob = &glob;
6731 
6732  /*
6733  * Convert named-argument function calls, insert default arguments and
6734  * simplify constant subexprs. Collect identities of inlined functions
6735  * and elided domains, too.
6736  */
6737  result = eval_const_expressions(&root, (Node *) expr);
6738 
6739  /* Fill in opfuncid values if missing */
6740  fix_opfuncids(result);
6741 
6742  /*
6743  * Now walk the finished expression to find anything else we ought to
6744  * record as an expression dependency.
6745  */
6746  (void) extract_query_dependencies_walker(result, &root);
6747 
6748  *relationOids = glob.relationOids;
6749  *invalItems = glob.invalItems;
6750 
6751  return (Expr *) result;
6752 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3601
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 3006 of file planner.c.

3007 {
3008  int num_sets_raw = list_length(groupingSets);
3009  int num_empty = 0;
3010  int num_sets = 0; /* distinct sets */
3011  int num_chains = 0;
3012  List *result = NIL;
3013  List **results;
3014  List **orig_sets;
3015  Bitmapset **set_masks;
3016  int *chains;
3017  short **adjacency;
3018  short *adjacency_buf;
3020  int i;
3021  int j;
3022  int j_size;
3023  ListCell *lc1 = list_head(groupingSets);
3024  ListCell *lc;
3025 
3026  /*
3027  * Start by stripping out empty sets. The algorithm doesn't require this,
3028  * but the planner currently needs all empty sets to be returned in the
3029  * first list, so we strip them here and add them back after.
3030  */
3031  while (lc1 && lfirst(lc1) == NIL)
3032  {
3033  ++num_empty;
3034  lc1 = lnext(groupingSets, lc1);
3035  }
3036 
3037  /* bail out now if it turns out that all we had were empty sets. */
3038  if (!lc1)
3039  return list_make1(groupingSets);
3040 
3041  /*----------
3042  * We don't strictly need to remove duplicate sets here, but if we don't,
3043  * they tend to become scattered through the result, which is a bit
3044  * confusing (and irritating if we ever decide to optimize them out).
3045  * So we remove them here and add them back after.
3046  *
3047  * For each non-duplicate set, we fill in the following:
3048  *
3049  * orig_sets[i] = list of the original set lists
3050  * set_masks[i] = bitmapset for testing inclusion
3051  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3052  *
3053  * chains[i] will be the result group this set is assigned to.
3054  *
3055  * We index all of these from 1 rather than 0 because it is convenient
3056  * to leave 0 free for the NIL node in the graph algorithm.
3057  *----------
3058  */
3059  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3060  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3061  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3062  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3063 
3064  j_size = 0;
3065  j = 0;
3066  i = 1;
3067 
3068  for_each_cell(lc, groupingSets, lc1)
3069  {
3070  List *candidate = (List *) lfirst(lc);
3071  Bitmapset *candidate_set = NULL;
3072  ListCell *lc2;
3073  int dup_of = 0;
3074 
3075  foreach(lc2, candidate)
3076  {
3077  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3078  }
3079 
3080  /* we can only be a dup if we're the same length as a previous set */
3081  if (j_size == list_length(candidate))
3082  {
3083  int k;
3084 
3085  for (k = j; k < i; ++k)
3086  {
3087  if (bms_equal(set_masks[k], candidate_set))
3088  {
3089  dup_of = k;
3090  break;
3091  }
3092  }
3093  }
3094  else if (j_size < list_length(candidate))
3095  {
3096  j_size = list_length(candidate);
3097  j = i;
3098  }
3099 
3100  if (dup_of > 0)
3101  {
3102  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3103  bms_free(candidate_set);
3104  }
3105  else
3106  {
3107  int k;
3108  int n_adj = 0;
3109 
3110  orig_sets[i] = list_make1(candidate);
3111  set_masks[i] = candidate_set;
3112 
3113  /* fill in adjacency list; no need to compare equal-size sets */
3114 
3115  for (k = j - 1; k > 0; --k)
3116  {
3117  if (bms_is_subset(set_masks[k], candidate_set))
3118  adjacency_buf[++n_adj] = k;
3119  }
3120 
3121  if (n_adj > 0)
3122  {
3123  adjacency_buf[0] = n_adj;
3124  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3125  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3126  }
3127  else
3128  adjacency[i] = NULL;
3129 
3130  ++i;
3131  }
3132  }
3133 
3134  num_sets = i - 1;
3135 
3136  /*
3137  * Apply the graph matching algorithm to do the work.
3138  */
3139  state = BipartiteMatch(num_sets, num_sets, adjacency);
3140 
3141  /*
3142  * Now, the state->pair* fields have the info we need to assign sets to
3143  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3144  * pair_vu[v] = u (both will be true, but we check both so that we can do
3145  * it in one pass)
3146  */
3147  chains = palloc0((num_sets + 1) * sizeof(int));
3148 
3149  for (i = 1; i <= num_sets; ++i)
3150  {
3151  int u = state->pair_vu[i];
3152  int v = state->pair_uv[i];
3153 
3154  if (u > 0 && u < i)
3155  chains[i] = chains[u];
3156  else if (v > 0 && v < i)
3157  chains[i] = chains[v];
3158  else
3159  chains[i] = ++num_chains;
3160  }
3161 
3162  /* build result lists. */
3163  results = palloc0((num_chains + 1) * sizeof(List *));
3164 
3165  for (i = 1; i <= num_sets; ++i)
3166  {
3167  int c = chains[i];
3168 
3169  Assert(c > 0);
3170 
3171  results[c] = list_concat(results[c], orig_sets[i]);
3172  }
3173 
3174  /* push any empty sets back on the first list. */
3175  while (num_empty-- > 0)
3176  results[1] = lcons(NIL, results[1]);
3177 
3178  /* make result list */
3179  for (i = 1; i <= num_chains; ++i)
3180  result = lappend(result, results[i]);
3181 
3182  /*
3183  * Free all the things.
3184  *
3185  * (This is over-fussy for small sets but for large sets we could have
3186  * tied up a nontrivial amount of memory.)
3187  */
3189  pfree(results);
3190  pfree(chains);
3191  for (i = 1; i <= num_sets; ++i)
3192  if (adjacency[i])
3193  pfree(adjacency[i]);
3194  pfree(adjacency);
3195  pfree(adjacency_buf);
3196  pfree(orig_sets);
3197  for (i = 1; i <= num_sets; ++i)
3198  bms_free(set_masks[i]);
3199  pfree(set_masks);
3200 
3201  return result;
3202 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1347
char * c
Definition: regguts.h:323

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7607 of file planner.c.

7608 {
7609  ListCell *lc;
7610  Path *cheapest_partial_path;
7611  List *groupby_pathkeys;
7612 
7613  /*
7614  * This occurs after any partial aggregation has taken place, so trim off
7615  * any pathkeys added for ORDER BY / DISTINCT aggregates.
7616  */
7617  if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7618  groupby_pathkeys = list_copy_head(root->group_pathkeys,
7619  root->num_groupby_pathkeys);
7620  else
7621  groupby_pathkeys = root->group_pathkeys;
7622 
7623  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7624  generate_useful_gather_paths(root, rel, true);
7625 
7626  cheapest_partial_path = linitial(rel->partial_pathlist);
7627 
7628  /* XXX Shouldn't this also consider the group-key-reordering? */
7629  foreach(lc, rel->partial_pathlist)
7630  {
7631  Path *path = (Path *) lfirst(lc);
7632  bool is_sorted;
7633  int presorted_keys;
7634  double total_groups;
7635 
7636  is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7637  path->pathkeys,
7638  &presorted_keys);
7639 
7640  if (is_sorted)
7641  continue;
7642 
7643  /*
7644  * Try at least sorting the cheapest path and also try incrementally
7645  * sorting any path which is partially sorted already (no need to deal
7646  * with paths which have presorted keys when incremental sort is
7647  * disabled unless it's the cheapest input path).
7648  */
7649  if (path != cheapest_partial_path &&
7650  (presorted_keys == 0 || !enable_incremental_sort))
7651  continue;
7652 
7653  /*
7654  * We've no need to consider both a sort and incremental sort. We'll
7655  * just do a sort if there are no presorted keys and an incremental
7656  * sort when there are presorted keys.
7657  */
7658  if (presorted_keys == 0 || !enable_incremental_sort)
7659  path = (Path *) create_sort_path(root, rel, path,
7660  groupby_pathkeys,
7661  -1.0);
7662  else
7664  rel,
7665  path,
7666  groupby_pathkeys,
7667  presorted_keys,
7668  -1.0);
7669  total_groups = compute_gather_rows(path);
7670  path = (Path *)
7672  rel,
7673  path,
7674  rel->reltarget,
7675  groupby_pathkeys,
7676  NULL,
7677  &total_groups);
7678 
7679  add_path(rel, path);
7680  }
7681 }
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593

References add_path(), compute_gather_rows(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, and root.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8168 of file planner.c.

8169 {
8170  List *grouplist = copyObject(op->groupClauses);
8171  ListCell *lg;
8172  ListCell *lt;
8173 
8174  lg = list_head(grouplist);
8175  foreach(lt, targetlist)
8176  {
8177  TargetEntry *tle = (TargetEntry *) lfirst(lt);
8178  SortGroupClause *sgc;
8179 
8180  /* resjunk columns could have sortgrouprefs. Leave these alone */
8181  if (tle->resjunk)
8182  continue;
8183 
8184  /* we expect every non-resjunk target to have a SortGroupClause */
8185  Assert(lg != NULL);
8186  sgc = (SortGroupClause *) lfirst(lg);
8187  lg = lnext(grouplist, lg);
8188 
8189  /* assign a tleSortGroupRef, or reuse the existing one */
8190  sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8191  }
8192  Assert(lg == NULL);
8193  return grouplist;
8194 }
Index assignSortGroupRef(TargetEntry *tle, List *tlist)

References Assert, assignSortGroupRef(), copyObject, lfirst, list_head(), lnext(), and SortGroupClause::tleSortGroupRef.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6528 of file planner.c.

6529 {
6530  Path *best_path = rel->cheapest_total_path;
6531  ListCell *l;
6532 
6533  /* If all tuples will be retrieved, just return the cheapest-total path */
6534  if (tuple_fraction <= 0.0)
6535  return best_path;
6536 
6537  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6538  if (tuple_fraction >= 1.0 && best_path->rows > 0)
6539  tuple_fraction /= best_path->rows;
6540 
6541  foreach(l, rel->pathlist)
6542  {
6543  Path *path = (Path *) lfirst(l);
6544 
6545  if (path == rel->cheapest_total_path ||
6546  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6547  continue;
6548 
6549  best_path = path;
6550  }
6551 
6552  return best_path;
6553 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:124

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3698 of file planner.c.

3702 {
3703  Query *parse = root->parse;
3704  double dNumGroups;
3705 
3706  if (parse->groupClause)
3707  {
3708  List *groupExprs;
3709 
3710  if (parse->groupingSets)
3711  {
3712  /* Add up the estimates for each grouping set */
3713  ListCell *lc;
3714 
3715  Assert(gd); /* keep Coverity happy */
3716 
3717  dNumGroups = 0;
3718 
3719  foreach(lc, gd->rollups)
3720  {
3721  RollupData *rollup = lfirst_node(RollupData, lc);
3722  ListCell *lc2;
3723  ListCell *lc3;
3724 
3725  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3726  target_list);
3727 
3728  rollup->numGroups = 0.0;
3729 
3730  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3731  {
3732  List *gset = (List *) lfirst(lc2);
3734  double numGroups = estimate_num_groups(root,
3735  groupExprs,
3736  path_rows,
3737  &gset,
3738  NULL);
3739 
3740  gs->numGroups = numGroups;
3741  rollup->numGroups += numGroups;
3742  }
3743 
3744  dNumGroups += rollup->numGroups;
3745  }
3746 
3747  if (gd->hash_sets_idx)
3748  {
3749  ListCell *lc2;
3750 
3751  gd->dNumHashGroups = 0;
3752 
3753  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3754  target_list);
3755 
3756  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3757  {
3758  List *gset = (List *) lfirst(lc);
3760  double numGroups = estimate_num_groups(root,
3761  groupExprs,
3762  path_rows,
3763  &gset,
3764  NULL);
3765 
3766  gs->numGroups = numGroups;
3767  gd->dNumHashGroups += numGroups;
3768  }
3769 
3770  dNumGroups += gd->dNumHashGroups;
3771  }
3772  }
3773  else
3774  {
3775  /* Plain GROUP BY -- estimate based on optimized groupClause */
3776  groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3777  target_list);
3778 
3779  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3780  NULL, NULL);
3781  }
3782  }
3783  else if (parse->groupingSets)
3784  {
3785  /* Empty grouping sets ... one result row for each one */
3786  dNumGroups = list_length(parse->groupingSets);
3787  }
3788  else if (parse->hasAggs || root->hasHavingQual)
3789  {
3790  /* Plain aggregation, one result row */
3791  dNumGroups = 1;
3792  }
3793  else
3794  {
3795  /* Not grouping */
3796  dNumGroups = 1;
3797  }
3798 
3799  return dNumGroups;
3800 }
List * hash_sets_idx
Definition: planner.c:100

References Assert, grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), grouping_sets_data::rollups, root, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8111 of file planner.c.

8114 {
8115  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8116  int cnt = 0;
8117  int partnatts;
8118 
8119  /* Input relation should be partitioned. */
8120  Assert(input_rel->part_scheme);
8121 
8122  /* Rule out early, if there are no partition keys present. */
8123  if (!input_rel->partexprs)
8124  return false;
8125 
8126  partnatts = input_rel->part_scheme->partnatts;
8127 
8128  for (cnt = 0; cnt < partnatts; cnt++)
8129  {
8130  List *partexprs = input_rel->partexprs[cnt];
8131  ListCell *lc;
8132  bool found = false;
8133 
8134  foreach(lc, partexprs)
8135  {
8136  Expr *partexpr = lfirst(lc);
8137 
8138  if (list_member(groupexprs, partexpr))
8139  {
8140  found = true;
8141  break;
8142  }
8143  }
8144 
8145  /*
8146  * If none of the partition key expressions match with any of the
8147  * GROUP BY expression, return false.
8148  */
8149  if (!found)
8150  return false;
8151  }
8152 
8153  return true;
8154 }
bool list_member(const List *list, const void *datum)
Definition: list.c:661

References Assert, get_sortgrouplist_exprs(), lfirst, and list_member().

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1361 of file planner.c.

1363 {
1364  Query *parse = root->parse;
1365  int64 offset_est = 0;
1366  int64 count_est = 0;
1367  double limit_tuples = -1.0;
1368  bool have_postponed_srfs = false;
1369  PathTarget *final_target;
1370  List *final_targets;
1371  List *final_targets_contain_srfs;
1372  bool final_target_parallel_safe;
1373  RelOptInfo *current_rel;
1374  RelOptInfo *final_rel;
1375  FinalPathExtraData extra;
1376  ListCell *lc;
1377 
1378  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1379  if (parse->limitCount || parse->limitOffset)
1380  {
1381  tuple_fraction = preprocess_limit(root, tuple_fraction,
1382  &offset_est, &count_est);
1383 
1384  /*
1385  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1386  * estimate the effects of using a bounded sort.
1387  */
1388  if (count_est > 0 && offset_est >= 0)
1389  limit_tuples = (double) count_est + (double) offset_est;
1390  }
1391 
1392  /* Make tuple_fraction accessible to lower-level routines */
1393  root->tuple_fraction = tuple_fraction;
1394 
1395  if (parse->setOperations)
1396  {
1397  /*
1398  * Construct Paths for set operations. The results will not need any
1399  * work except perhaps a top-level sort and/or LIMIT. Note that any
1400  * special work for recursive unions is the responsibility of
1401  * plan_set_operations.
1402  */
1403  current_rel = plan_set_operations(root);
1404 
1405  /*
1406  * We should not need to call preprocess_targetlist, since we must be
1407  * in a SELECT query node. Instead, use the processed_tlist returned
1408  * by plan_set_operations (since this tells whether it returned any
1409  * resjunk columns!), and transfer any sort key information from the
1410  * original tlist.
1411  */
1412  Assert(parse->commandType == CMD_SELECT);
1413 
1414  /* for safety, copy processed_tlist instead of modifying in-place */
1415  root->processed_tlist =
1416  postprocess_setop_tlist(copyObject(root->processed_tlist),
1417  parse->targetList);
1418 
1419  /* Also extract the PathTarget form of the setop result tlist */
1420  final_target = current_rel->cheapest_total_path->pathtarget;
1421 
1422  /* And check whether it's parallel safe */
1423  final_target_parallel_safe =
1424  is_parallel_safe(root, (Node *) final_target->exprs);
1425 
1426  /* The setop result tlist couldn't contain any SRFs */
1427  Assert(!parse->hasTargetSRFs);
1428  final_targets = final_targets_contain_srfs = NIL;
1429 
1430  /*
1431  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1432  * checked already, but let's make sure).
1433  */
1434  if (parse->rowMarks)
1435  ereport(ERROR,
1436  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1437  /*------
1438  translator: %s is a SQL row locking clause such as FOR UPDATE */
1439  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1441  parse->rowMarks)->strength))));
1442 
1443  /*
1444  * Calculate pathkeys that represent result ordering requirements
1445  */
1446  Assert(parse->distinctClause == NIL);
1447  root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1448  parse->sortClause,
1449  root->processed_tlist);
1450  }
1451  else
1452  {
1453  /* No set operations, do regular planning */
1454  PathTarget *sort_input_target;
1455  List *sort_input_targets;
1456  List *sort_input_targets_contain_srfs;
1457  bool sort_input_target_parallel_safe;
1458  PathTarget *grouping_target;
1459  List *grouping_targets;
1460  List *grouping_targets_contain_srfs;
1461  bool grouping_target_parallel_safe;
1462  PathTarget *scanjoin_target;
1463  List *scanjoin_targets;
1464  List *scanjoin_targets_contain_srfs;
1465  bool scanjoin_target_parallel_safe;
1466  bool scanjoin_target_same_exprs;
1467  bool have_grouping;
1468  WindowFuncLists *wflists = NULL;
1469  List *activeWindows = NIL;
1470  grouping_sets_data *gset_data = NULL;
1471  standard_qp_extra qp_extra;
1472 
1473  /* A recursive query should always have setOperations */
1474  Assert(!root->hasRecursion);
1475 
1476  /* Preprocess grouping sets and GROUP BY clause, if any */
1477  if (parse->groupingSets)
1478  {
1479  gset_data = preprocess_grouping_sets(root);
1480  }
1481  else if (parse->groupClause)
1482  {
1483  /* Preprocess regular GROUP BY clause, if any */
1484  root->processed_groupClause = preprocess_groupclause(root, NIL);
1485  /* Remove any redundant GROUP BY columns */
1487  }
1488 
1489  /*
1490  * Preprocess targetlist. Note that much of the remaining planning
1491  * work will be done with the PathTarget representation of tlists, but
1492  * we must also maintain the full representation of the final tlist so
1493  * that we can transfer its decoration (resnames etc) to the topmost
1494  * tlist of the finished Plan. This is kept in processed_tlist.
1495  */
1497 
1498  /*
1499  * Mark all the aggregates with resolved aggtranstypes, and detect
1500  * aggregates that are duplicates or can share transition state. We
1501  * must do this before slicing and dicing the tlist into various
1502  * pathtargets, else some copies of the Aggref nodes might escape
1503  * being marked.
1504  */
1505  if (parse->hasAggs)
1506  {
1507  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1508  preprocess_aggrefs(root, (Node *) parse->havingQual);
1509  }
1510 
1511  /*
1512  * Locate any window functions in the tlist. (We don't need to look
1513  * anywhere else, since expressions used in ORDER BY will be in there
1514  * too.) Note that they could all have been eliminated by constant
1515  * folding, in which case we don't need to do any more work.
1516  */
1517  if (parse->hasWindowFuncs)
1518  {
1519  wflists = find_window_functions((Node *) root->processed_tlist,
1520  list_length(parse->windowClause));
1521  if (wflists->numWindowFuncs > 0)
1522  {
1523  /*
1524  * See if any modifications can be made to each WindowClause
1525  * to allow the executor to execute the WindowFuncs more
1526  * quickly.
1527  */
1528  optimize_window_clauses(root, wflists);
1529 
1530  activeWindows = select_active_windows(root, wflists);
1531  }
1532  else
1533  parse->hasWindowFuncs = false;
1534  }
1535 
1536  /*
1537  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1538  * adding logic between here and the query_planner() call. Anything
1539  * that is needed in MIN/MAX-optimizable cases will have to be
1540  * duplicated in planagg.c.
1541  */
1542  if (parse->hasAggs)
1544 
1545  /*
1546  * Figure out whether there's a hard limit on the number of rows that
1547  * query_planner's result subplan needs to return. Even if we know a
1548  * hard limit overall, it doesn't apply if the query has any
1549  * grouping/aggregation operations, or SRFs in the tlist.
1550  */
1551  if (parse->groupClause ||
1552  parse->groupingSets ||
1553  parse->distinctClause ||
1554  parse->hasAggs ||
1555  parse->hasWindowFuncs ||
1556  parse->hasTargetSRFs ||
1557  root->hasHavingQual)
1558  root->limit_tuples = -1.0;
1559  else
1560  root->limit_tuples = limit_tuples;
1561 
1562  /* Set up data needed by standard_qp_callback */
1563  qp_extra.activeWindows = activeWindows;
1564  qp_extra.gset_data = gset_data;
1565 
1566  /*
1567  * If we're a subquery for a set operation, store the SetOperationStmt
1568  * in qp_extra.
1569  */
1570  qp_extra.setop = setops;
1571 
1572  /*
1573  * Generate the best unsorted and presorted paths for the scan/join
1574  * portion of this Query, ie the processing represented by the
1575  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1576  * We also generate (in standard_qp_callback) pathkey representations
1577  * of the query's sort clause, distinct clause, etc.
1578  */
1579  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1580 
1581  /*
1582  * Convert the query's result tlist into PathTarget format.
1583  *
1584  * Note: this cannot be done before query_planner() has performed
1585  * appendrel expansion, because that might add resjunk entries to
1586  * root->processed_tlist. Waiting till afterwards is also helpful
1587  * because the target width estimates can use per-Var width numbers
1588  * that were obtained within query_planner().
1589  */
1590  final_target = create_pathtarget(root, root->processed_tlist);
1591  final_target_parallel_safe =
1592  is_parallel_safe(root, (Node *) final_target->exprs);
1593 
1594  /*
1595  * If ORDER BY was given, consider whether we should use a post-sort
1596  * projection, and compute the adjusted target for preceding steps if
1597  * so.
1598  */
1599  if (parse->sortClause)
1600  {
1601  sort_input_target = make_sort_input_target(root,
1602  final_target,
1603  &have_postponed_srfs);
1604  sort_input_target_parallel_safe =
1605  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1606  }
1607  else
1608  {
1609  sort_input_target = final_target;
1610  sort_input_target_parallel_safe = final_target_parallel_safe;
1611  }
1612 
1613  /*
1614  * If we have window functions to deal with, the output from any
1615  * grouping step needs to be what the window functions want;
1616  * otherwise, it should be sort_input_target.
1617  */
1618  if (activeWindows)
1619  {
1620  grouping_target = make_window_input_target(root,
1621  final_target,
1622  activeWindows);
1623  grouping_target_parallel_safe =
1624  is_parallel_safe(root, (Node *) grouping_target->exprs);
1625  }
1626  else
1627  {
1628  grouping_target = sort_input_target;
1629  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1630  }
1631 
1632  /*
1633  * If we have grouping or aggregation to do, the topmost scan/join
1634  * plan node must emit what the grouping step wants; otherwise, it
1635  * should emit grouping_target.
1636  */
1637  have_grouping = (parse->groupClause || parse->groupingSets ||
1638  parse->hasAggs || root->hasHavingQual);
1639  if (have_grouping)
1640  {
1641  scanjoin_target = make_group_input_target(root, final_target);
1642  scanjoin_target_parallel_safe =
1643  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1644  }
1645  else
1646  {
1647  scanjoin_target = grouping_target;
1648  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1649  }
1650 
1651  /*
1652  * If there are any SRFs in the targetlist, we must separate each of
1653  * these PathTargets into SRF-computing and SRF-free targets. Replace
1654  * each of the named targets with a SRF-free version, and remember the
1655  * list of additional projection steps we need to add afterwards.
1656  */
1657  if (parse->hasTargetSRFs)
1658  {
1659  /* final_target doesn't recompute any SRFs in sort_input_target */
1660  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1661  &final_targets,
1662  &final_targets_contain_srfs);
1663  final_target = linitial_node(PathTarget, final_targets);
1664  Assert(!linitial_int(final_targets_contain_srfs));
1665  /* likewise for sort_input_target vs. grouping_target */
1666  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1667  &sort_input_targets,
1668  &sort_input_targets_contain_srfs);
1669  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1670  Assert(!linitial_int(sort_input_targets_contain_srfs));
1671  /* likewise for grouping_target vs. scanjoin_target */
1672  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1673  &grouping_targets,
1674  &grouping_targets_contain_srfs);
1675  grouping_target = linitial_node(PathTarget, grouping_targets);
1676  Assert(!linitial_int(grouping_targets_contain_srfs));
1677  /* scanjoin_target will not have any SRFs precomputed for it */
1678  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1679  &scanjoin_targets,
1680  &scanjoin_targets_contain_srfs);
1681  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1682  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1683  }
1684  else
1685  {
1686  /* initialize lists; for most of these, dummy values are OK */
1687  final_targets = final_targets_contain_srfs = NIL;
1688  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1689  grouping_targets = grouping_targets_contain_srfs = NIL;
1690  scanjoin_targets = list_make1(scanjoin_target);
1691  scanjoin_targets_contain_srfs = NIL;
1692  }
1693 
1694  /* Apply scan/join target. */
1695  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1696  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1697  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1698  scanjoin_targets_contain_srfs,
1699  scanjoin_target_parallel_safe,
1700  scanjoin_target_same_exprs);
1701 
1702  /*
1703  * Save the various upper-rel PathTargets we just computed into
1704  * root->upper_targets[]. The core code doesn't use this, but it
1705  * provides a convenient place for extensions to get at the info. For
1706  * consistency, we save all the intermediate targets, even though some
1707  * of the corresponding upperrels might not be needed for this query.
1708  */
1709  root->upper_targets[UPPERREL_FINAL] = final_target;
1710  root->upper_targets[UPPERREL_ORDERED] = final_target;
1711  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1712  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1713  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1714  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1715 
1716  /*
1717  * If we have grouping and/or aggregation, consider ways to implement
1718  * that. We build a new upperrel representing the output of this
1719  * phase.
1720  */
1721  if (have_grouping)
1722  {
1723  current_rel = create_grouping_paths(root,
1724  current_rel,
1725  grouping_target,
1726  grouping_target_parallel_safe,
1727  gset_data);
1728  /* Fix things up if grouping_target contains SRFs */
1729  if (parse->hasTargetSRFs)
1730  adjust_paths_for_srfs(root, current_rel,
1731  grouping_targets,
1732  grouping_targets_contain_srfs);
1733  }
1734 
1735  /*
1736  * If we have window functions, consider ways to implement those. We
1737  * build a new upperrel representing the output of this phase.
1738  */
1739  if (activeWindows)
1740  {
1741  current_rel = create_window_paths(root,
1742  current_rel,
1743  grouping_target,
1744  sort_input_target,
1745  sort_input_target_parallel_safe,
1746  wflists,
1747  activeWindows);
1748  /* Fix things up if sort_input_target contains SRFs */
1749  if (parse->hasTargetSRFs)
1750  adjust_paths_for_srfs(root, current_rel,
1751  sort_input_targets,
1752  sort_input_targets_contain_srfs);
1753  }
1754 
1755  /*
1756  * If there is a DISTINCT clause, consider ways to implement that. We
1757  * build a new upperrel representing the output of this phase.
1758  */
1759  if (parse->distinctClause)
1760  {
1761  current_rel = create_distinct_paths(root,
1762  current_rel,
1763  sort_input_target);
1764  }
1765  } /* end of if (setOperations) */
1766 
1767  /*
1768  * If ORDER BY was given, consider ways to implement that, and generate a
1769  * new upperrel containing only paths that emit the correct ordering and
1770  * project the correct final_target. We can apply the original
1771  * limit_tuples limit in sort costing here, but only if there are no
1772  * postponed SRFs.
1773  */
1774  if (parse->sortClause)
1775  {
1776  current_rel = create_ordered_paths(root,
1777  current_rel,
1778  final_target,
1779  final_target_parallel_safe,
1780  have_postponed_srfs ? -1.0 :
1781  limit_tuples);
1782  /* Fix things up if final_target contains SRFs */
1783  if (parse->hasTargetSRFs)
1784  adjust_paths_for_srfs(root, current_rel,
1785  final_targets,
1786  final_targets_contain_srfs);
1787  }
1788 
1789  /*
1790  * Now we are prepared to build the final-output upperrel.
1791  */
1792  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1793 
1794  /*
1795  * If the input rel is marked consider_parallel and there's nothing that's
1796  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1797  * consider_parallel as well. Note that if the query has rowMarks or is
1798  * not a SELECT, consider_parallel will be false for every relation in the
1799  * query.
1800  */
1801  if (current_rel->consider_parallel &&
1802  is_parallel_safe(root, parse->limitOffset) &&
1803  is_parallel_safe(root, parse->limitCount))
1804  final_rel->consider_parallel = true;
1805 
1806  /*
1807  * If the current_rel belongs to a single FDW, so does the final_rel.
1808  */
1809  final_rel->serverid = current_rel->serverid;
1810  final_rel->userid = current_rel->userid;
1811  final_rel->useridiscurrent = current_rel->useridiscurrent;
1812  final_rel->fdwroutine = current_rel->fdwroutine;
1813 
1814  /*
1815  * Generate paths for the final_rel. Insert all surviving paths, with
1816  * LockRows, Limit, and/or ModifyTable steps added if needed.
1817  */
1818  foreach(lc, current_rel->pathlist)
1819  {
1820  Path *path = (Path *) lfirst(lc);
1821 
1822  /*
1823  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1824  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1825  * here. If there are only non-locking rowmarks, they should be
1826  * handled by the ModifyTable node instead. However, root->rowMarks
1827  * is what goes into the LockRows node.)
1828  */
1829  if (parse->rowMarks)
1830  {
1831  path = (Path *) create_lockrows_path(root, final_rel, path,
1832  root->rowMarks,
1834  }
1835 
1836  /*
1837  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1838  */
1839  if (limit_needed(parse))
1840  {
1841  path = (Path *) create_limit_path(root, final_rel, path,
1842  parse->limitOffset,
1843  parse->limitCount,
1844  parse->limitOption,
1845  offset_est, count_est);
1846  }
1847 
1848  /*
1849  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1850  */
1851  if (parse->commandType != CMD_SELECT)
1852  {
1853  Index rootRelation;
1854  List *resultRelations = NIL;
1855  List *updateColnosLists = NIL;
1856  List *withCheckOptionLists = NIL;
1857  List *returningLists = NIL;
1858  List *mergeActionLists = NIL;
1859  List *mergeJoinConditions = NIL;
1860  List *rowMarks;
1861 
1862  if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1863  {
1864  /* Inherited UPDATE/DELETE/MERGE */
1865  RelOptInfo *top_result_rel = find_base_rel(root,
1866  parse->resultRelation);
1867  int resultRelation = -1;
1868 
1869  /* Pass the root result rel forward to the executor. */
1870  rootRelation = parse->resultRelation;
1871 
1872  /* Add only leaf children to ModifyTable. */
1873  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1874  resultRelation)) >= 0)
1875  {
1876  RelOptInfo *this_result_rel = find_base_rel(root,
1877  resultRelation);
1878 
1879  /*
1880  * Also exclude any leaf rels that have turned dummy since
1881  * being added to the list, for example, by being excluded
1882  * by constraint exclusion.
1883  */
1884  if (IS_DUMMY_REL(this_result_rel))
1885  continue;
1886 
1887  /* Build per-target-rel lists needed by ModifyTable */
1888  resultRelations = lappend_int(resultRelations,
1889  resultRelation);
1890  if (parse->commandType == CMD_UPDATE)
1891  {
1892  List *update_colnos = root->update_colnos;
1893 
1894  if (this_result_rel != top_result_rel)
1895  update_colnos =
1897  update_colnos,
1898  this_result_rel->relid,
1899  top_result_rel->relid);
1900  updateColnosLists = lappend(updateColnosLists,
1901  update_colnos);
1902  }
1903  if (parse->withCheckOptions)
1904  {
1905  List *withCheckOptions = parse->withCheckOptions;
1906 
1907  if (this_result_rel != top_result_rel)
1908  withCheckOptions = (List *)
1910  (Node *) withCheckOptions,
1911  this_result_rel,
1912  top_result_rel);
1913  withCheckOptionLists = lappend(withCheckOptionLists,
1914  withCheckOptions);
1915  }
1916  if (parse->returningList)
1917  {
1918  List *returningList = parse->returningList;
1919 
1920  if (this_result_rel != top_result_rel)
1921  returningList = (List *)
1923  (Node *) returningList,
1924  this_result_rel,
1925  top_result_rel);
1926  returningLists = lappend(returningLists,
1927  returningList);
1928  }
1929  if (parse->mergeActionList)
1930  {
1931  ListCell *l;
1932  List *mergeActionList = NIL;
1933 
1934  /*
1935  * Copy MergeActions and translate stuff that
1936  * references attribute numbers.
1937  */
1938  foreach(l, parse->mergeActionList)
1939  {
1940  MergeAction *action = lfirst(l),
1941  *leaf_action = copyObject(action);
1942 
1943  leaf_action->qual =
1945  (Node *) action->qual,
1946  this_result_rel,
1947  top_result_rel);
1948  leaf_action->targetList = (List *)
1950  (Node *) action->targetList,
1951  this_result_rel,
1952  top_result_rel);
1953  if (leaf_action->commandType == CMD_UPDATE)
1954  leaf_action->updateColnos =
1956  action->updateColnos,
1957  this_result_rel->relid,
1958  top_result_rel->relid);
1959  mergeActionList = lappend(mergeActionList,
1960  leaf_action);
1961  }
1962 
1963  mergeActionLists = lappend(mergeActionLists,
1964  mergeActionList);
1965  }
1966  if (parse->commandType == CMD_MERGE)
1967  {
1968  Node *mergeJoinCondition = parse->mergeJoinCondition;
1969 
1970  if (this_result_rel != top_result_rel)
1971  mergeJoinCondition =
1973  mergeJoinCondition,
1974  this_result_rel,
1975  top_result_rel);
1976  mergeJoinConditions = lappend(mergeJoinConditions,
1977  mergeJoinCondition);
1978  }
1979  }
1980 
1981  if (resultRelations == NIL)
1982  {
1983  /*
1984  * We managed to exclude every child rel, so generate a
1985  * dummy one-relation plan using info for the top target
1986  * rel (even though that may not be a leaf target).
1987  * Although it's clear that no data will be updated or
1988  * deleted, we still need to have a ModifyTable node so
1989  * that any statement triggers will be executed. (This
1990  * could be cleaner if we fixed nodeModifyTable.c to allow
1991  * zero target relations, but that probably wouldn't be a
1992  * net win.)
1993  */
1994  resultRelations = list_make1_int(parse->resultRelation);
1995  if (parse->commandType == CMD_UPDATE)
1996  updateColnosLists = list_make1(root->update_colnos);
1997  if (parse->withCheckOptions)
1998  withCheckOptionLists = list_make1(parse->withCheckOptions);
1999  if (parse->returningList)
2000  returningLists = list_make1(parse->returningList);
2001  if (parse->mergeActionList)
2002  mergeActionLists = list_make1(parse->mergeActionList);
2003  if (parse->commandType == CMD_MERGE)
2004  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2005  }
2006  }
2007  else
2008  {
2009  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2010  rootRelation = 0; /* there's no separate root rel */
2011  resultRelations = list_make1_int(parse->resultRelation);
2012  if (parse->commandType == CMD_UPDATE)
2013  updateColnosLists = list_make1(root->update_colnos);
2014  if (parse->withCheckOptions)
2015  withCheckOptionLists = list_make1(parse->withCheckOptions);
2016  if (parse->returningList)
2017  returningLists = list_make1(parse->returningList);
2018  if (parse->mergeActionList)
2019  mergeActionLists = list_make1(parse->mergeActionList);
2020  if (parse->commandType == CMD_MERGE)
2021  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2022  }
2023 
2024  /*
2025  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2026  * will have dealt with fetching non-locked marked rows, else we
2027  * need to have ModifyTable do that.
2028  */
2029  if (parse->rowMarks)
2030  rowMarks = NIL;
2031  else
2032  rowMarks = root->rowMarks;
2033 
2034  path = (Path *)
2035  create_modifytable_path(root, final_rel,
2036  path,
2037  parse->commandType,
2038  parse->canSetTag,
2039  parse->resultRelation,
2040  rootRelation,
2041  root->partColsUpdated,
2042  resultRelations,
2043  updateColnosLists,
2044  withCheckOptionLists,
2045  returningLists,
2046  rowMarks,
2047  parse->onConflict,
2048  mergeActionLists,
2049  mergeJoinConditions,
2051  }
2052 
2053  /* And shove it into final_rel */
2054  add_path(final_rel, path);
2055  }
2056 
2057  /*
2058  * Generate partial paths for final_rel, too, if outer query levels might
2059  * be able to make use of them.
2060  */
2061  if (final_rel->consider_parallel && root->query_level > 1 &&
2062  !limit_needed(parse))
2063  {
2064  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2065  foreach(lc, current_rel->partial_pathlist)
2066  {
2067  Path *partial_path = (Path *) lfirst(lc);
2068 
2069  add_partial_path(final_rel, partial_path);
2070  }
2071  }
2072 
2073  extra.limit_needed = limit_needed(parse);
2074  extra.limit_tuples = limit_tuples;
2075  extra.count_est = count_est;
2076  extra.offset_est = offset_est;
2077 
2078  /*
2079  * If there is an FDW that's responsible for all baserels of the query,
2080  * let it consider adding ForeignPaths.
2081  */
2082  if (final_rel->fdwroutine &&
2083  final_rel->fdwroutine->GetForeignUpperPaths)
2084  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2085  current_rel, final_rel,
2086  &extra);
2087 
2088  /* Let extensions possibly add some more paths */
2090  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2091  current_rel, final_rel, &extra);
2092 
2093  /* Note: currently, we leave it to callers to do set_cheapest() */
2094 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:525
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:666
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
@ BMS_MULTIPLE
Definition: bitmapset.h:73
unsigned int Index
Definition: c.h:614
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
List * lappend_int(List *list, int datum)
Definition: list.c:357
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:664
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3219
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3756
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3820
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:72
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5775
static void remove_useless_groupby_columns(PlannerInfo *root)
Definition: planner.c:2743
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2499
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6109
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4829
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5812
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6357
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2103
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5525
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5952
bool limit_needed(Query *parse)
Definition: planner.c:2684
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5305
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3820
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3492
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4572
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:99
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Cardinality limit_tuples
Definition: pathnodes.h:3323
Index relid
Definition: pathnodes.h:918
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:123
grouping_sets_data * gset_data
Definition: planner.c:124
SetOperationStmt * setop
Definition: planner.c:125
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_MERGE, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, remove_useless_groupby_columns(), root, select_active_windows(), RelOptInfo::serverid, standard_qp_extra::setop, split_pathtarget_at_srfs(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3266 of file planner.c.

3267 {
3268  ListCell *lc;
3269 
3270  foreach(lc, keys)
3271  {
3272  PathKey *pathkey = lfirst_node(PathKey, lc);
3273 
3274  if (pathkey->pk_eclass->ec_has_volatile)
3275  return true;
3276  }
3277 
3278  return false;
3279 }

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3986 of file planner.c.

3987 {
3988  Query *parse = root->parse;
3989 
3990  return (root->hasHavingQual || parse->groupingSets) &&
3991  !parse->hasAggs && parse->groupClause == NIL;
3992 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2684 of file planner.c.

2685 {
2686  Node *node;
2687 
2688  node = parse->limitCount;
2689  if (node)
2690  {
2691  if (IsA(node, Const))
2692  {
2693  /* NULL indicates LIMIT ALL, ie, no limit */
2694  if (!((Const *) node)->constisnull)
2695  return true; /* LIMIT with a constant value */
2696  }
2697  else
2698  return true; /* non-constant LIMIT */
2699  }
2700 
2701  node = parse->limitOffset;
2702  if (node)
2703  {
2704  if (IsA(node, Const))
2705  {
2706  /* Treat NULL as no offset; the executor would too */
2707  if (!((Const *) node)->constisnull)
2708  {
2709  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2710 
2711  if (offset != 0)
2712  return true; /* OFFSET with a nonzero value */
2713  }
2714  }
2715  else
2716  return true; /* non-constant OFFSET */
2717  }
2718 
2719  return false; /* don't need a Limit plan node */
2720 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5525 of file planner.c.

5526 {
5527  Query *parse = root->parse;
5528  PathTarget *input_target;
5529  List *non_group_cols;
5530  List *non_group_vars;
5531  int i;
5532  ListCell *lc;
5533 
5534  /*
5535  * We must build a target containing all grouping columns, plus any other
5536  * Vars mentioned in the query's targetlist and HAVING qual.
5537  */
5538  input_target = create_empty_pathtarget();
5539  non_group_cols = NIL;
5540 
5541  i = 0;
5542  foreach(lc, final_target->exprs)
5543  {
5544  Expr *expr = (Expr *) lfirst(lc);
5545  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5546 
5547  if (sgref && root->processed_groupClause &&
5549  root->processed_groupClause) != NULL)
5550  {
5551  /*
5552  * It's a grouping column, so add it to the input target as-is.
5553  *
5554  * Note that the target is logically below the grouping step. So
5555  * with grouping sets we need to remove the RT index of the
5556  * grouping step if there is any from the target expression.
5557  */
5558  if (parse->hasGroupRTE && parse->groupingSets != NIL)
5559  {
5560  Assert(root->group_rtindex > 0);
5561  expr = (Expr *)
5562  remove_nulling_relids((Node *) expr,
5563  bms_make_singleton(root->group_rtindex),
5564  NULL);
5565  }
5566  add_column_to_pathtarget(input_target, expr, sgref);
5567  }
5568  else
5569  {
5570  /*
5571  * Non-grouping column, so just remember the expression for later
5572  * call to pull_var_clause.
5573  */
5574  non_group_cols = lappend(non_group_cols, expr);
5575  }
5576 
5577  i++;
5578  }
5579 
5580  /*
5581  * If there's a HAVING clause, we'll need the Vars it uses, too.
5582  */
5583  if (parse->havingQual)
5584  non_group_cols = lappend(non_group_cols, parse->havingQual);
5585 
5586  /*
5587  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5588  * add them to the input target if not already present. (A Var used
5589  * directly as a GROUP BY item will be present already.) Note this
5590  * includes Vars used in resjunk items, so we are covering the needs of
5591  * ORDER BY and window specifications. Vars used within Aggrefs and
5592  * WindowFuncs will be pulled out here, too.
5593  *
5594  * Note that the target is logically below the grouping step. So with
5595  * grouping sets we need to remove the RT index of the grouping step if
5596  * there is any from the non-group Vars.
5597  */
5598  non_group_vars = pull_var_clause((Node *) non_group_cols,
5602  if (parse->hasGroupRTE && parse->groupingSets != NIL)
5603  {
5604  Assert(root->group_rtindex > 0);
5605  non_group_vars = (List *)
5606  remove_nulling_relids((Node *) non_group_vars,
5607  bms_make_singleton(root->group_rtindex),
5608  NULL);
5609  }
5610  add_new_columns_to_pathtarget(input_target, non_group_vars);
5611 
5612  /* clean up cruft */
5613  list_free(non_group_vars);
5614  list_free(non_group_cols);
5615 
5616  /* XXX this causes some redundant cost calculation ... */
5617  return set_pathtarget_cost_width(root, input_target);
5618 }
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6295
void list_free(List *list)
Definition: list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:187
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:189
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:190
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1555
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:612

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_make_singleton(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, remove_nulling_relids(), root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3933 of file planner.c.

3936 {
3937  RelOptInfo *grouped_rel;
3938 
3939  if (IS_OTHER_REL(input_rel))
3940  {
3941  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3942  input_rel->relids);
3943  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3944  }
3945  else
3946  {
3947  /*
3948  * By tradition, the relids set for the main grouping relation is
3949  * NULL. (This could be changed, but might require adjustments
3950  * elsewhere.)
3951  */
3952  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3953  }
3954 
3955  /* Set target. */
3956  grouped_rel->reltarget = target;
3957 
3958  /*
3959  * If the input relation is not parallel-safe, then the grouped relation
3960  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3961  * target list and HAVING quals are parallel-safe.
3962  */
3963  if (input_rel->consider_parallel && target_parallel_safe &&
3964  is_parallel_safe(root, (Node *) havingQual))
3965  grouped_rel->consider_parallel = true;
3966 
3967  /*
3968  * If the input rel belongs to a single FDW, so does the grouped rel.
3969  */
3970  grouped_rel->serverid = input_rel->serverid;
3971  grouped_rel->userid = input_rel->userid;
3972  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3973  grouped_rel->fdwroutine = input_rel->fdwroutine;
3974 
3975  return grouped_rel;
3976 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:832

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path* make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys 
)
static

Definition at line 7022 of file planner.c.

7024 {
7025  bool is_sorted;
7026  int presorted_keys;
7027 
7028  is_sorted = pathkeys_count_contained_in(pathkeys,
7029  path->pathkeys,
7030  &presorted_keys);
7031 
7032  if (!is_sorted)
7033  {
7034  /*
7035  * Try at least sorting the cheapest path and also try incrementally
7036  * sorting any path which is partially sorted already (no need to deal
7037  * with paths which have presorted keys when incremental sort is
7038  * disabled unless it's the cheapest input path).
7039  */
7040  if (path != cheapest_path &&
7041  (presorted_keys == 0 || !enable_incremental_sort))
7042  return NULL;
7043 
7044  /*
7045  * We've no need to consider both a sort and incremental sort. We'll
7046  * just do a sort if there are no presorted keys and an incremental
7047  * sort when there are presorted keys.
7048  */
7049  if (presorted_keys == 0 || !enable_incremental_sort)
7050  path = (Path *) create_sort_path(root,
7051  rel,
7052  path,
7053  pathkeys,
7054  -1.0);
7055  else
7057  rel,
7058  path,
7059  pathkeys,
7060  presorted_keys,
7061  -1.0);
7062  }
7063 
7064  return path;
7065 }

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5637 of file planner.c.

5640 {
5641  PathTarget *partial_target;
5642  List *non_group_cols;
5643  List *non_group_exprs;
5644  int i;
5645  ListCell *lc;
5646 
5647  partial_target = create_empty_pathtarget();
5648  non_group_cols = NIL;
5649 
5650  i = 0;
5651  foreach(lc, grouping_target->exprs)
5652  {
5653  Expr *expr = (Expr *) lfirst(lc);
5654  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5655 
5656  if (sgref && root->processed_groupClause &&
5658  root->processed_groupClause) != NULL)
5659  {
5660  /*
5661  * It's a grouping column, so add it to the partial_target as-is.
5662  * (This allows the upper agg step to repeat the grouping calcs.)
5663  */
5664  add_column_to_pathtarget(partial_target, expr, sgref);
5665  }
5666  else
5667  {
5668  /*
5669  * Non-grouping column, so just remember the expression for later
5670  * call to pull_var_clause.
5671  */
5672  non_group_cols = lappend(non_group_cols, expr);
5673  }
5674 
5675  i++;
5676  }
5677 
5678  /*
5679  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5680  */
5681  if (havingQual)
5682  non_group_cols = lappend(non_group_cols, havingQual);
5683 
5684  /*
5685  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5686  * non-group cols (plus HAVING), and add them to the partial_target if not
5687  * already present. (An expression used directly as a GROUP BY item will
5688  * be present already.) Note this includes Vars used in resjunk items, so
5689  * we are covering the needs of ORDER BY and window specifications.
5690  */
5691  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5695 
5696  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5697 
5698  /*
5699  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5700  * are at the top level of the target list, so we can just scan the list
5701  * rather than recursing through the expression trees.
5702  */
5703  foreach(lc, partial_target->exprs)
5704  {
5705  Aggref *aggref = (Aggref *) lfirst(lc);
5706 
5707  if (IsA(aggref, Aggref))
5708  {
5709  Aggref *newaggref;
5710 
5711