PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 85 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 88 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 84 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 86 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 78 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 80 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 81 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 89 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 90 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 87 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 79 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 82 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 83 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6966 of file planner.c.

6972 {
6973  Query *parse = root->parse;
6974  Path *cheapest_path = input_rel->cheapest_total_path;
6975  ListCell *lc;
6976  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6977  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6978  List *havingQual = (List *) extra->havingQual;
6979  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6980 
6981  if (can_sort)
6982  {
6983  /*
6984  * Use any available suitably-sorted path as input, and also consider
6985  * sorting the cheapest-total path and incremental sort on any paths
6986  * with presorted keys.
6987  */
6988  foreach(lc, input_rel->pathlist)
6989  {
6990  ListCell *lc2;
6991  Path *path = (Path *) lfirst(lc);
6992  Path *path_save = path;
6993  List *pathkey_orderings = NIL;
6994 
6995  /* generate alternative group orderings that might be useful */
6996  pathkey_orderings = get_useful_group_keys_orderings(root, path);
6997 
6998  Assert(list_length(pathkey_orderings) > 0);
6999 
7000  foreach(lc2, pathkey_orderings)
7001  {
7002  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7003 
7004  /* restore the path (we replace it in the loop) */
7005  path = path_save;
7006 
7007  path = make_ordered_path(root,
7008  grouped_rel,
7009  path,
7010  cheapest_path,
7011  info->pathkeys);
7012  if (path == NULL)
7013  continue;
7014 
7015  /* Now decide what to stick atop it */
7016  if (parse->groupingSets)
7017  {
7018  consider_groupingsets_paths(root, grouped_rel,
7019  path, true, can_hash,
7020  gd, agg_costs, dNumGroups);
7021  }
7022  else if (parse->hasAggs)
7023  {
7024  /*
7025  * We have aggregation, possibly with plain GROUP BY. Make
7026  * an AggPath.
7027  */
7028  add_path(grouped_rel, (Path *)
7030  grouped_rel,
7031  path,
7032  grouped_rel->reltarget,
7033  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7035  info->clauses,
7036  havingQual,
7037  agg_costs,
7038  dNumGroups));
7039  }
7040  else if (parse->groupClause)
7041  {
7042  /*
7043  * We have GROUP BY without aggregation or grouping sets.
7044  * Make a GroupPath.
7045  */
7046  add_path(grouped_rel, (Path *)
7048  grouped_rel,
7049  path,
7050  info->clauses,
7051  havingQual,
7052  dNumGroups));
7053  }
7054  else
7055  {
7056  /* Other cases should have been handled above */
7057  Assert(false);
7058  }
7059  }
7060  }
7061 
7062  /*
7063  * Instead of operating directly on the input relation, we can
7064  * consider finalizing a partially aggregated path.
7065  */
7066  if (partially_grouped_rel != NULL)
7067  {
7068  foreach(lc, partially_grouped_rel->pathlist)
7069  {
7070  ListCell *lc2;
7071  Path *path = (Path *) lfirst(lc);
7072  Path *path_save = path;
7073  List *pathkey_orderings = NIL;
7074 
7075  /* generate alternative group orderings that might be useful */
7076  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7077 
7078  Assert(list_length(pathkey_orderings) > 0);
7079 
7080  /* process all potentially interesting grouping reorderings */
7081  foreach(lc2, pathkey_orderings)
7082  {
7083  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7084 
7085  /* restore the path (we replace it in the loop) */
7086  path = path_save;
7087 
7088  path = make_ordered_path(root,
7089  grouped_rel,
7090  path,
7091  partially_grouped_rel->cheapest_total_path,
7092  info->pathkeys);
7093 
7094  if (path == NULL)
7095  continue;
7096 
7097  if (parse->hasAggs)
7098  add_path(grouped_rel, (Path *)
7100  grouped_rel,
7101  path,
7102  grouped_rel->reltarget,
7103  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7105  info->clauses,
7106  havingQual,
7107  agg_final_costs,
7108  dNumGroups));
7109  else
7110  add_path(grouped_rel, (Path *)
7112  grouped_rel,
7113  path,
7114  info->clauses,
7115  havingQual,
7116  dNumGroups));
7117 
7118  }
7119  }
7120  }
7121  }
7122 
7123  if (can_hash)
7124  {
7125  if (parse->groupingSets)
7126  {
7127  /*
7128  * Try for a hash-only groupingsets path over unsorted input.
7129  */
7130  consider_groupingsets_paths(root, grouped_rel,
7131  cheapest_path, false, true,
7132  gd, agg_costs, dNumGroups);
7133  }
7134  else
7135  {
7136  /*
7137  * Generate a HashAgg Path. We just need an Agg over the
7138  * cheapest-total input path, since input order won't matter.
7139  */
7140  add_path(grouped_rel, (Path *)
7141  create_agg_path(root, grouped_rel,
7142  cheapest_path,
7143  grouped_rel->reltarget,
7144  AGG_HASHED,
7146  root->processed_groupClause,
7147  havingQual,
7148  agg_costs,
7149  dNumGroups));
7150  }
7151 
7152  /*
7153  * Generate a Finalize HashAgg Path atop of the cheapest partially
7154  * grouped path, assuming there is one
7155  */
7156  if (partially_grouped_rel && partially_grouped_rel->pathlist)
7157  {
7158  Path *path = partially_grouped_rel->cheapest_total_path;
7159 
7160  add_path(grouped_rel, (Path *)
7162  grouped_rel,
7163  path,
7164  grouped_rel->reltarget,
7165  AGG_HASHED,
7167  root->processed_groupClause,
7168  havingQual,
7169  agg_final_costs,
7170  dNumGroups));
7171  }
7172  }
7173 
7174  /*
7175  * When partitionwise aggregate is used, we might have fully aggregated
7176  * paths in the partial pathlist, because add_paths_to_append_rel() will
7177  * consider a path for grouped_rel consisting of a Parallel Append of
7178  * non-partial paths from each child.
7179  */
7180  if (grouped_rel->partial_pathlist != NIL)
7181  gather_grouping_paths(root, grouped_rel);
7182 }
#define Assert(condition)
Definition: c.h:858
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
@ AGG_SORTED
Definition: nodes.h:354
@ AGG_HASHED
Definition: nodes.h:355
@ AGG_PLAIN
Definition: nodes.h:353
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:380
@ AGGSPLIT_SIMPLE
Definition: nodes.h:376
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:465
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3044
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3155
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3254
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3253
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7500
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4134
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
Definition: planner.c:6915
tree ctl root
Definition: radixtree.h:1886
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:715
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3294
Definition: pg_list.h:54
struct PathTarget * reltarget
Definition: pathnodes.h:887
List * pathlist
Definition: pathnodes.h:892
struct Path * cheapest_total_path
Definition: pathnodes.h:896
List * partial_pathlist
Definition: pathnodes.h:894

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, GroupByOrdering::pathkeys, RelOptInfo::pathlist, RelOptInfo::reltarget, and root.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3252 of file planner.c.

3253 {
3254  List *grouppathkeys = root->group_pathkeys;
3255  List *bestpathkeys;
3256  Bitmapset *bestaggs;
3257  Bitmapset *unprocessed_aggs;
3258  ListCell *lc;
3259  int i;
3260 
3261  /* Shouldn't be here if there are grouping sets */
3262  Assert(root->parse->groupingSets == NIL);
3263  /* Shouldn't be here unless there are some ordered aggregates */
3264  Assert(root->numOrderedAggs > 0);
3265 
3266  /* Do nothing if disabled */
3268  return;
3269 
3270  /*
3271  * Make a first pass over all AggInfos to collect a Bitmapset containing
3272  * the indexes of all AggInfos to be processed below.
3273  */
3274  unprocessed_aggs = NULL;
3275  foreach(lc, root->agginfos)
3276  {
3277  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3278  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3279 
3280  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3281  continue;
3282 
3283  /* only add aggregates with a DISTINCT or ORDER BY */
3284  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3285  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3286  foreach_current_index(lc));
3287  }
3288 
3289  /*
3290  * Now process all the unprocessed_aggs to find the best set of pathkeys
3291  * for the given set of aggregates.
3292  *
3293  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3294  * this during the first loop using the pathkeys for the very first
3295  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3296  * a more strict set of compatible pathkeys. Once the outer loop is
3297  * complete, we mark off all the aggregates with compatible pathkeys then
3298  * remove those from the unprocessed_aggs and repeat the process to try to
3299  * find another set of pathkeys that are suitable for a larger number of
3300  * aggregates. The outer loop will stop when there are not enough
3301  * unprocessed aggregates for it to be possible to find a set of pathkeys
3302  * to suit a larger number of aggregates.
3303  */
3304  bestpathkeys = NIL;
3305  bestaggs = NULL;
3306  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3307  {
3308  Bitmapset *aggindexes = NULL;
3309  List *currpathkeys = NIL;
3310 
3311  i = -1;
3312  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3313  {
3314  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3315  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3316  List *sortlist;
3317  List *pathkeys;
3318 
3319  if (aggref->aggdistinct != NIL)
3320  sortlist = aggref->aggdistinct;
3321  else
3322  sortlist = aggref->aggorder;
3323 
3324  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3325  aggref->args);
3326 
3327  /*
3328  * Ignore Aggrefs which have volatile functions in their ORDER BY
3329  * or DISTINCT clause.
3330  */
3331  if (has_volatile_pathkey(pathkeys))
3332  {
3333  unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3334  continue;
3335  }
3336 
3337  /*
3338  * When not set yet, take the pathkeys from the first unprocessed
3339  * aggregate.
3340  */
3341  if (currpathkeys == NIL)
3342  {
3343  currpathkeys = pathkeys;
3344 
3345  /* include the GROUP BY pathkeys, if they exist */
3346  if (grouppathkeys != NIL)
3347  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3348  currpathkeys);
3349 
3350  /* record that we found pathkeys for this aggregate */
3351  aggindexes = bms_add_member(aggindexes, i);
3352  }
3353  else
3354  {
3355  /* now look for a stronger set of matching pathkeys */
3356 
3357  /* include the GROUP BY pathkeys, if they exist */
3358  if (grouppathkeys != NIL)
3359  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3360  pathkeys);
3361 
3362  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3363  switch (compare_pathkeys(currpathkeys, pathkeys))
3364  {
3365  case PATHKEYS_BETTER2:
3366  /* 'pathkeys' are stronger, use these ones instead */
3367  currpathkeys = pathkeys;
3368  /* FALLTHROUGH */
3369 
3370  case PATHKEYS_BETTER1:
3371  /* 'pathkeys' are less strict */
3372  /* FALLTHROUGH */
3373 
3374  case PATHKEYS_EQUAL:
3375  /* mark this aggregate as covered by 'currpathkeys' */
3376  aggindexes = bms_add_member(aggindexes, i);
3377  break;
3378 
3379  case PATHKEYS_DIFFERENT:
3380  break;
3381  }
3382  }
3383  }
3384 
3385  /* remove the aggregates that we've just processed */
3386  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3387 
3388  /*
3389  * If this pass included more aggregates than the previous best then
3390  * use these ones as the best set.
3391  */
3392  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3393  {
3394  bestaggs = aggindexes;
3395  bestpathkeys = currpathkeys;
3396  }
3397  }
3398 
3399  /*
3400  * If we found any ordered aggregates, update root->group_pathkeys to add
3401  * the best set of aggregate pathkeys. Note that bestpathkeys includes
3402  * the original GROUP BY pathkeys already.
3403  */
3404  if (bestpathkeys != NIL)
3405  root->group_pathkeys = bestpathkeys;
3406 
3407  /*
3408  * Now that we've found the best set of aggregates we can set the
3409  * presorted flag to indicate to the executor that it needn't bother
3410  * performing a sort for these Aggrefs. We're able to do this now as
3411  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3412  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3413  * of ordered aggregates.
3414  */
3415  i = -1;
3416  while ((i = bms_next_member(bestaggs, i)) >= 0)
3417  {
3418  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3419 
3420  foreach(lc, agginfo->aggrefs)
3421  {
3422  Aggref *aggref = lfirst_node(Aggref, lc);
3423 
3424  aggref->aggpresorted = true;
3425  }
3426  }
3427 }
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool enable_presorted_aggregate
Definition: costsize.c:153
int i
Definition: isn.c:73
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:106
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1330
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:302
@ PATHKEYS_BETTER2
Definition: paths.h:204
@ PATHKEYS_BETTER1
Definition: paths.h:203
@ PATHKEYS_DIFFERENT
Definition: paths.h:205
@ PATHKEYS_EQUAL
Definition: paths.h:202
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3207
List * aggrefs
Definition: pathnodes.h:3375
List * aggdistinct
Definition: primnodes.h:474
List * args
Definition: primnodes.h:468
List * aggorder
Definition: primnodes.h:471

References Aggref::aggdistinct, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6465 of file planner.c.

6467 {
6468  ListCell *lc;
6469 
6470  Assert(list_length(targets) == list_length(targets_contain_srfs));
6471  Assert(!linitial_int(targets_contain_srfs));
6472 
6473  /* If no SRFs appear at this plan level, nothing to do */
6474  if (list_length(targets) == 1)
6475  return;
6476 
6477  /*
6478  * Stack SRF-evaluation nodes atop each path for the rel.
6479  *
6480  * In principle we should re-run set_cheapest() here to identify the
6481  * cheapest path, but it seems unlikely that adding the same tlist eval
6482  * costs to all the paths would change that, so we don't bother. Instead,
6483  * just assume that the cheapest-startup and cheapest-total paths remain
6484  * so. (There should be no parameterized paths anymore, so we needn't
6485  * worry about updating cheapest_parameterized_paths.)
6486  */
6487  foreach(lc, rel->pathlist)
6488  {
6489  Path *subpath = (Path *) lfirst(lc);
6490  Path *newpath = subpath;
6491  ListCell *lc1,
6492  *lc2;
6493 
6494  Assert(subpath->param_info == NULL);
6495  forboth(lc1, targets, lc2, targets_contain_srfs)
6496  {
6497  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6498  bool contains_srfs = (bool) lfirst_int(lc2);
6499 
6500  /* If this level doesn't contain SRFs, do regular projection */
6501  if (contains_srfs)
6502  newpath = (Path *) create_set_projection_path(root,
6503  rel,
6504  newpath,
6505  thistarget);
6506  else
6507  newpath = (Path *) apply_projection_to_path(root,
6508  rel,
6509  newpath,
6510  thistarget);
6511  }
6512  lfirst(lc) = newpath;
6513  if (subpath == rel->cheapest_startup_path)
6514  rel->cheapest_startup_path = newpath;
6515  if (subpath == rel->cheapest_total_path)
6516  rel->cheapest_total_path = newpath;
6517  }
6518 
6519  /* Likewise for partial paths, if any */
6520  foreach(lc, rel->partial_pathlist)
6521  {
6522  Path *subpath = (Path *) lfirst(lc);
6523  Path *newpath = subpath;
6524  ListCell *lc1,
6525  *lc2;
6526 
6527  Assert(subpath->param_info == NULL);
6528  forboth(lc1, targets, lc2, targets_contain_srfs)
6529  {
6530  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6531  bool contains_srfs = (bool) lfirst_int(lc2);
6532 
6533  /* If this level doesn't contain SRFs, do regular projection */
6534  if (contains_srfs)
6535  newpath = (Path *) create_set_projection_path(root,
6536  rel,
6537  newpath,
6538  thistarget);
6539  else
6540  {
6541  /* avoid apply_projection_to_path, in case of multiple refs */
6542  newpath = (Path *) create_projection_path(root,
6543  rel,
6544  newpath,
6545  thistarget);
6546  }
6547  }
6548  lfirst(lc) = newpath;
6549  }
6550 }
unsigned char bool
Definition: c.h:456
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:310
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2685
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2882
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2793
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:895

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7627 of file planner.c.

7633 {
7634  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7635  PathTarget *scanjoin_target;
7636  ListCell *lc;
7637 
7638  /* This recurses, so be paranoid. */
7640 
7641  /*
7642  * If the rel is partitioned, we want to drop its existing paths and
7643  * generate new ones. This function would still be correct if we kept the
7644  * existing paths: we'd modify them to generate the correct target above
7645  * the partitioning Append, and then they'd compete on cost with paths
7646  * generating the target below the Append. However, in our current cost
7647  * model the latter way is always the same or cheaper cost, so modifying
7648  * the existing paths would just be useless work. Moreover, when the cost
7649  * is the same, varying roundoff errors might sometimes allow an existing
7650  * path to be picked, resulting in undesirable cross-platform plan
7651  * variations. So we drop old paths and thereby force the work to be done
7652  * below the Append, except in the case of a non-parallel-safe target.
7653  *
7654  * Some care is needed, because we have to allow
7655  * generate_useful_gather_paths to see the old partial paths in the next
7656  * stanza. Hence, zap the main pathlist here, then allow
7657  * generate_useful_gather_paths to add path(s) to the main list, and
7658  * finally zap the partial pathlist.
7659  */
7660  if (rel_is_partitioned)
7661  rel->pathlist = NIL;
7662 
7663  /*
7664  * If the scan/join target is not parallel-safe, partial paths cannot
7665  * generate it.
7666  */
7667  if (!scanjoin_target_parallel_safe)
7668  {
7669  /*
7670  * Since we can't generate the final scan/join target in parallel
7671  * workers, this is our last opportunity to use any partial paths that
7672  * exist; so build Gather path(s) that use them and emit whatever the
7673  * current reltarget is. We don't do this in the case where the
7674  * target is parallel-safe, since we will be able to generate superior
7675  * paths by doing it after the final scan/join target has been
7676  * applied.
7677  */
7678  generate_useful_gather_paths(root, rel, false);
7679 
7680  /* Can't use parallel query above this level. */
7681  rel->partial_pathlist = NIL;
7682  rel->consider_parallel = false;
7683  }
7684 
7685  /* Finish dropping old paths for a partitioned rel, per comment above */
7686  if (rel_is_partitioned)
7687  rel->partial_pathlist = NIL;
7688 
7689  /* Extract SRF-free scan/join target. */
7690  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7691 
7692  /*
7693  * Apply the SRF-free scan/join target to each existing path.
7694  *
7695  * If the tlist exprs are the same, we can just inject the sortgroupref
7696  * information into the existing pathtargets. Otherwise, replace each
7697  * path with a projection path that generates the SRF-free scan/join
7698  * target. This can't change the ordering of paths within rel->pathlist,
7699  * so we just modify the list in place.
7700  */
7701  foreach(lc, rel->pathlist)
7702  {
7703  Path *subpath = (Path *) lfirst(lc);
7704 
7705  /* Shouldn't have any parameterized paths anymore */
7706  Assert(subpath->param_info == NULL);
7707 
7708  if (tlist_same_exprs)
7709  subpath->pathtarget->sortgrouprefs =
7710  scanjoin_target->sortgrouprefs;
7711  else
7712  {
7713  Path *newpath;
7714 
7715  newpath = (Path *) create_projection_path(root, rel, subpath,
7716  scanjoin_target);
7717  lfirst(lc) = newpath;
7718  }
7719  }
7720 
7721  /* Likewise adjust the targets for any partial paths. */
7722  foreach(lc, rel->partial_pathlist)
7723  {
7724  Path *subpath = (Path *) lfirst(lc);
7725 
7726  /* Shouldn't have any parameterized paths anymore */
7727  Assert(subpath->param_info == NULL);
7728 
7729  if (tlist_same_exprs)
7730  subpath->pathtarget->sortgrouprefs =
7731  scanjoin_target->sortgrouprefs;
7732  else
7733  {
7734  Path *newpath;
7735 
7736  newpath = (Path *) create_projection_path(root, rel, subpath,
7737  scanjoin_target);
7738  lfirst(lc) = newpath;
7739  }
7740  }
7741 
7742  /*
7743  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7744  * atop each existing path. (Note that this function doesn't look at the
7745  * cheapest-path fields, which is a good thing because they're bogus right
7746  * now.)
7747  */
7748  if (root->parse->hasTargetSRFs)
7750  scanjoin_targets,
7751  scanjoin_targets_contain_srfs);
7752 
7753  /*
7754  * Update the rel's target to be the final (with SRFs) scan/join target.
7755  * This now matches the actual output of all the paths, and we might get
7756  * confused in createplan.c if they don't agree. We must do this now so
7757  * that any append paths made in the next part will use the correct
7758  * pathtarget (cf. create_append_path).
7759  *
7760  * Note that this is also necessary if GetForeignUpperPaths() gets called
7761  * on the final scan/join relation or on any of its children, since the
7762  * FDW might look at the rel's target to create ForeignPaths.
7763  */
7764  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7765 
7766  /*
7767  * If the relation is partitioned, recursively apply the scan/join target
7768  * to all partitions, and generate brand-new Append paths in which the
7769  * scan/join target is computed below the Append rather than above it.
7770  * Since Append is not projection-capable, that might save a separate
7771  * Result node, and it also is important for partitionwise aggregate.
7772  */
7773  if (rel_is_partitioned)
7774  {
7775  List *live_children = NIL;
7776  int i;
7777 
7778  /* Adjust each partition. */
7779  i = -1;
7780  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7781  {
7782  RelOptInfo *child_rel = rel->part_rels[i];
7783  AppendRelInfo **appinfos;
7784  int nappinfos;
7785  List *child_scanjoin_targets = NIL;
7786 
7787  Assert(child_rel != NULL);
7788 
7789  /* Dummy children can be ignored. */
7790  if (IS_DUMMY_REL(child_rel))
7791  continue;
7792 
7793  /* Translate scan/join targets for this child. */
7794  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7795  &nappinfos);
7796  foreach(lc, scanjoin_targets)
7797  {
7798  PathTarget *target = lfirst_node(PathTarget, lc);
7799 
7800  target = copy_pathtarget(target);
7801  target->exprs = (List *)
7803  (Node *) target->exprs,
7804  nappinfos, appinfos);
7805  child_scanjoin_targets = lappend(child_scanjoin_targets,
7806  target);
7807  }
7808  pfree(appinfos);
7809 
7810  /* Recursion does the real work. */
7812  child_scanjoin_targets,
7813  scanjoin_targets_contain_srfs,
7814  scanjoin_target_parallel_safe,
7816 
7817  /* Save non-dummy children for Append paths. */
7818  if (!IS_DUMMY_REL(child_rel))
7819  live_children = lappend(live_children, child_rel);
7820  }
7821 
7822  /* Build new paths for this relation by appending child paths. */
7823  add_paths_to_append_rel(root, rel, live_children);
7824  }
7825 
7826  /*
7827  * Consider generating Gather or Gather Merge paths. We must only do this
7828  * if the relation is parallel safe, and we don't do it for child rels to
7829  * avoid creating multiple Gather nodes within the same plan. We must do
7830  * this after all paths have been generated and before set_cheapest, since
7831  * one of the generated paths may turn out to be the cheapest one.
7832  */
7833  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7834  generate_useful_gather_paths(root, rel, false);
7835 
7836  /*
7837  * Reassess which paths are the cheapest, now that we've potentially added
7838  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7839  * this relation.
7840  */
7841  set_cheapest(rel);
7842 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3190
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1302
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:733
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:196
List * lappend(List *list, void *datum)
Definition: list.c:339
void pfree(void *pointer)
Definition: mcxt.c:1520
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1946
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1056
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:848
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7627
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6465
void check_stack_depth(void)
Definition: postgres.c:3531
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1533
Relids relids
Definition: pathnodes.h:865
bool consider_parallel
Definition: pathnodes.h:881
Bitmapset * live_parts
Definition: pathnodes.h:1033
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7585 of file planner.c.

7586 {
7587  Query *parse = root->parse;
7588 
7589  if (!parse->hasAggs && parse->groupClause == NIL)
7590  {
7591  /*
7592  * We don't know how to do parallel aggregation unless we have either
7593  * some aggregates or a grouping clause.
7594  */
7595  return false;
7596  }
7597  else if (parse->groupingSets)
7598  {
7599  /* We don't know how to do grouping sets in parallel. */
7600  return false;
7601  }
7602  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7603  {
7604  /* Insufficient support for partial mode. */
7605  return false;
7606  }
7607 
7608  /* Everything looks good. */
7609  return true;
7610 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5935 of file planner.c.

5936 {
5937  const WindowClauseSortData *wcsa = a;
5938  const WindowClauseSortData *wcsb = b;
5939  ListCell *item_a;
5940  ListCell *item_b;
5941 
5942  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5943  {
5946 
5947  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5948  return -1;
5949  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5950  return 1;
5951  else if (sca->sortop > scb->sortop)
5952  return -1;
5953  else if (sca->sortop < scb->sortop)
5954  return 1;
5955  else if (sca->nulls_first && !scb->nulls_first)
5956  return -1;
5957  else if (!sca->nulls_first && scb->nulls_first)
5958  return 1;
5959  /* no need to compare eqop, since it is fully determined by sortop */
5960  }
5961 
5962  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5963  return -1;
5964  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5965  return 1;
5966 
5967  return 0;
5968 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1442

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4134 of file planner.c.

4142 {
4143  Query *parse = root->parse;
4144  Size hash_mem_limit = get_hash_memory_limit();
4145 
4146  /*
4147  * If we're not being offered sorted input, then only consider plans that
4148  * can be done entirely by hashing.
4149  *
4150  * We can hash everything if it looks like it'll fit in hash_mem. But if
4151  * the input is actually sorted despite not being advertised as such, we
4152  * prefer to make use of that in order to use less memory.
4153  *
4154  * If none of the grouping sets are sortable, then ignore the hash_mem
4155  * limit and generate a path anyway, since otherwise we'll just fail.
4156  */
4157  if (!is_sorted)
4158  {
4159  List *new_rollups = NIL;
4160  RollupData *unhashed_rollup = NULL;
4161  List *sets_data;
4162  List *empty_sets_data = NIL;
4163  List *empty_sets = NIL;
4164  ListCell *lc;
4165  ListCell *l_start = list_head(gd->rollups);
4166  AggStrategy strat = AGG_HASHED;
4167  double hashsize;
4168  double exclude_groups = 0.0;
4169 
4170  Assert(can_hash);
4171 
4172  /*
4173  * If the input is coincidentally sorted usefully (which can happen
4174  * even if is_sorted is false, since that only means that our caller
4175  * has set up the sorting for us), then save some hashtable space by
4176  * making use of that. But we need to watch out for degenerate cases:
4177  *
4178  * 1) If there are any empty grouping sets, then group_pathkeys might
4179  * be NIL if all non-empty grouping sets are unsortable. In this case,
4180  * there will be a rollup containing only empty groups, and the
4181  * pathkeys_contained_in test is vacuously true; this is ok.
4182  *
4183  * XXX: the above relies on the fact that group_pathkeys is generated
4184  * from the first rollup. If we add the ability to consider multiple
4185  * sort orders for grouping input, this assumption might fail.
4186  *
4187  * 2) If there are no empty sets and only unsortable sets, then the
4188  * rollups list will be empty (and thus l_start == NULL), and
4189  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4190  * pathkeys_contained_in test doesn't cause us to crash.
4191  */
4192  if (l_start != NULL &&
4193  pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4194  {
4195  unhashed_rollup = lfirst_node(RollupData, l_start);
4196  exclude_groups = unhashed_rollup->numGroups;
4197  l_start = lnext(gd->rollups, l_start);
4198  }
4199 
4200  hashsize = estimate_hashagg_tablesize(root,
4201  path,
4202  agg_costs,
4203  dNumGroups - exclude_groups);
4204 
4205  /*
4206  * gd->rollups is empty if we have only unsortable columns to work
4207  * with. Override hash_mem in that case; otherwise, we'll rely on the
4208  * sorted-input case to generate usable mixed paths.
4209  */
4210  if (hashsize > hash_mem_limit && gd->rollups)
4211  return; /* nope, won't fit */
4212 
4213  /*
4214  * We need to burst the existing rollups list into individual grouping
4215  * sets and recompute a groupClause for each set.
4216  */
4217  sets_data = list_copy(gd->unsortable_sets);
4218 
4219  for_each_cell(lc, gd->rollups, l_start)
4220  {
4221  RollupData *rollup = lfirst_node(RollupData, lc);
4222 
4223  /*
4224  * If we find an unhashable rollup that's not been skipped by the
4225  * "actually sorted" check above, we can't cope; we'd need sorted
4226  * input (with a different sort order) but we can't get that here.
4227  * So bail out; we'll get a valid path from the is_sorted case
4228  * instead.
4229  *
4230  * The mere presence of empty grouping sets doesn't make a rollup
4231  * unhashable (see preprocess_grouping_sets), we handle those
4232  * specially below.
4233  */
4234  if (!rollup->hashable)
4235  return;
4236 
4237  sets_data = list_concat(sets_data, rollup->gsets_data);
4238  }
4239  foreach(lc, sets_data)
4240  {
4242  List *gset = gs->set;
4243  RollupData *rollup;
4244 
4245  if (gset == NIL)
4246  {
4247  /* Empty grouping sets can't be hashed. */
4248  empty_sets_data = lappend(empty_sets_data, gs);
4249  empty_sets = lappend(empty_sets, NIL);
4250  }
4251  else
4252  {
4253  rollup = makeNode(RollupData);
4254 
4255  rollup->groupClause = preprocess_groupclause(root, gset);
4256  rollup->gsets_data = list_make1(gs);
4257  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4258  rollup->gsets_data,
4259  gd->tleref_to_colnum_map);
4260  rollup->numGroups = gs->numGroups;
4261  rollup->hashable = true;
4262  rollup->is_hashed = true;
4263  new_rollups = lappend(new_rollups, rollup);
4264  }
4265  }
4266 
4267  /*
4268  * If we didn't find anything nonempty to hash, then bail. We'll
4269  * generate a path from the is_sorted case.
4270  */
4271  if (new_rollups == NIL)
4272  return;
4273 
4274  /*
4275  * If there were empty grouping sets they should have been in the
4276  * first rollup.
4277  */
4278  Assert(!unhashed_rollup || !empty_sets);
4279 
4280  if (unhashed_rollup)
4281  {
4282  new_rollups = lappend(new_rollups, unhashed_rollup);
4283  strat = AGG_MIXED;
4284  }
4285  else if (empty_sets)
4286  {
4287  RollupData *rollup = makeNode(RollupData);
4288 
4289  rollup->groupClause = NIL;
4290  rollup->gsets_data = empty_sets_data;
4291  rollup->gsets = empty_sets;
4292  rollup->numGroups = list_length(empty_sets);
4293  rollup->hashable = false;
4294  rollup->is_hashed = false;
4295  new_rollups = lappend(new_rollups, rollup);
4296  strat = AGG_MIXED;
4297  }
4298 
4299  add_path(grouped_rel, (Path *)
4301  grouped_rel,
4302  path,
4303  (List *) parse->havingQual,
4304  strat,
4305  new_rollups,
4306  agg_costs));
4307  return;
4308  }
4309 
4310  /*
4311  * If we have sorted input but nothing we can do with it, bail.
4312  */
4313  if (gd->rollups == NIL)
4314  return;
4315 
4316  /*
4317  * Given sorted input, we try and make two paths: one sorted and one mixed
4318  * sort/hash. (We need to try both because hashagg might be disabled, or
4319  * some columns might not be sortable.)
4320  *
4321  * can_hash is passed in as false if some obstacle elsewhere (such as
4322  * ordered aggs) means that we shouldn't consider hashing at all.
4323  */
4324  if (can_hash && gd->any_hashable)
4325  {
4326  List *rollups = NIL;
4327  List *hash_sets = list_copy(gd->unsortable_sets);
4328  double availspace = hash_mem_limit;
4329  ListCell *lc;
4330 
4331  /*
4332  * Account first for space needed for groups we can't sort at all.
4333  */
4334  availspace -= estimate_hashagg_tablesize(root,
4335  path,
4336  agg_costs,
4337  gd->dNumHashGroups);
4338 
4339  if (availspace > 0 && list_length(gd->rollups) > 1)
4340  {
4341  double scale;
4342  int num_rollups = list_length(gd->rollups);
4343  int k_capacity;
4344  int *k_weights = palloc(num_rollups * sizeof(int));
4345  Bitmapset *hash_items = NULL;
4346  int i;
4347 
4348  /*
4349  * We treat this as a knapsack problem: the knapsack capacity
4350  * represents hash_mem, the item weights are the estimated memory
4351  * usage of the hashtables needed to implement a single rollup,
4352  * and we really ought to use the cost saving as the item value;
4353  * however, currently the costs assigned to sort nodes don't
4354  * reflect the comparison costs well, and so we treat all items as
4355  * of equal value (each rollup we hash instead saves us one sort).
4356  *
4357  * To use the discrete knapsack, we need to scale the values to a
4358  * reasonably small bounded range. We choose to allow a 5% error
4359  * margin; we have no more than 4096 rollups in the worst possible
4360  * case, which with a 5% error margin will require a bit over 42MB
4361  * of workspace. (Anyone wanting to plan queries that complex had
4362  * better have the memory for it. In more reasonable cases, with
4363  * no more than a couple of dozen rollups, the memory usage will
4364  * be negligible.)
4365  *
4366  * k_capacity is naturally bounded, but we clamp the values for
4367  * scale and weight (below) to avoid overflows or underflows (or
4368  * uselessly trying to use a scale factor less than 1 byte).
4369  */
4370  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4371  k_capacity = (int) floor(availspace / scale);
4372 
4373  /*
4374  * We leave the first rollup out of consideration since it's the
4375  * one that matches the input sort order. We assign indexes "i"
4376  * to only those entries considered for hashing; the second loop,
4377  * below, must use the same condition.
4378  */
4379  i = 0;
4380  for_each_from(lc, gd->rollups, 1)
4381  {
4382  RollupData *rollup = lfirst_node(RollupData, lc);
4383 
4384  if (rollup->hashable)
4385  {
4386  double sz = estimate_hashagg_tablesize(root,
4387  path,
4388  agg_costs,
4389  rollup->numGroups);
4390 
4391  /*
4392  * If sz is enormous, but hash_mem (and hence scale) is
4393  * small, avoid integer overflow here.
4394  */
4395  k_weights[i] = (int) Min(floor(sz / scale),
4396  k_capacity + 1.0);
4397  ++i;
4398  }
4399  }
4400 
4401  /*
4402  * Apply knapsack algorithm; compute the set of items which
4403  * maximizes the value stored (in this case the number of sorts
4404  * saved) while keeping the total size (approximately) within
4405  * capacity.
4406  */
4407  if (i > 0)
4408  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4409 
4410  if (!bms_is_empty(hash_items))
4411  {
4412  rollups = list_make1(linitial(gd->rollups));
4413 
4414  i = 0;
4415  for_each_from(lc, gd->rollups, 1)
4416  {
4417  RollupData *rollup = lfirst_node(RollupData, lc);
4418 
4419  if (rollup->hashable)
4420  {
4421  if (bms_is_member(i, hash_items))
4422  hash_sets = list_concat(hash_sets,
4423  rollup->gsets_data);
4424  else
4425  rollups = lappend(rollups, rollup);
4426  ++i;
4427  }
4428  else
4429  rollups = lappend(rollups, rollup);
4430  }
4431  }
4432  }
4433 
4434  if (!rollups && hash_sets)
4435  rollups = list_copy(gd->rollups);
4436 
4437  foreach(lc, hash_sets)
4438  {
4440  RollupData *rollup = makeNode(RollupData);
4441 
4442  Assert(gs->set != NIL);
4443 
4444  rollup->groupClause = preprocess_groupclause(root, gs->set);
4445  rollup->gsets_data = list_make1(gs);
4446  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4447  rollup->gsets_data,
4448  gd->tleref_to_colnum_map);
4449  rollup->numGroups = gs->numGroups;
4450  rollup->hashable = true;
4451  rollup->is_hashed = true;
4452  rollups = lcons(rollup, rollups);
4453  }
4454 
4455  if (rollups)
4456  {
4457  add_path(grouped_rel, (Path *)
4459  grouped_rel,
4460  path,
4461  (List *) parse->havingQual,
4462  AGG_MIXED,
4463  rollups,
4464  agg_costs));
4465  }
4466  }
4467 
4468  /*
4469  * Now try the simple sorted case.
4470  */
4471  if (!gd->unsortable_sets)
4472  add_path(grouped_rel, (Path *)
4474  grouped_rel,
4475  path,
4476  (List *) parse->havingQual,
4477  AGG_SORTED,
4478  gd->rollups,
4479  agg_costs));
4480 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define Min(x, y)
Definition: c.h:1004
#define Max(x, y)
Definition: c.h:998
size_t Size
Definition: c.h:605
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * lcons(void *datum, List *list)
Definition: list.c:495
void * palloc(Size size)
Definition: mcxt.c:1316
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3595
AggStrategy
Definition: nodes.h:352
@ AGG_MIXED
Definition: nodes.h:356
#define makeNode(_type_)
Definition: nodes.h:155
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:341
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3237
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
int scale
Definition: pgbench.c:181
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2851
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2225
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3917
Cardinality numGroups
Definition: pathnodes.h:2273
List * pathkeys
Definition: pathnodes.h:1665
Cardinality numGroups
Definition: pathnodes.h:2284
List * groupClause
Definition: pathnodes.h:2281
List * gsets_data
Definition: pathnodes.h:2283
bool hashable
Definition: pathnodes.h:2285
List * gsets
Definition: pathnodes.h:2282
bool is_hashed
Definition: pathnodes.h:2286
int * tleref_to_colnum_map
Definition: planner.c:104
List * rollups
Definition: planner.c:97
List * unsortable_sets
Definition: planner.c:103
double dNumHashGroups
Definition: planner.c:99

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, root, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3931 of file planner.c.

3933 {
3934  Query *parse = root->parse;
3935  int nrows;
3936  Path *path;
3937 
3938  nrows = list_length(parse->groupingSets);
3939  if (nrows > 1)
3940  {
3941  /*
3942  * Doesn't seem worthwhile writing code to cons up a generate_series
3943  * or a values scan to emit multiple rows. Instead just make N clones
3944  * and append them. (With a volatile HAVING clause, this means you
3945  * might get between 0 and N output rows. Offhand I think that's
3946  * desired.)
3947  */
3948  List *paths = NIL;
3949 
3950  while (--nrows >= 0)
3951  {
3952  path = (Path *)
3953  create_group_result_path(root, grouped_rel,
3954  grouped_rel->reltarget,
3955  (List *) parse->havingQual);
3956  paths = lappend(paths, path);
3957  }
3958  path = (Path *)
3960  grouped_rel,
3961  paths,
3962  NIL,
3963  NIL,
3964  NULL,
3965  0,
3966  false,
3967  -1);
3968  }
3969  else
3970  {
3971  /* No grouping sets, or just one, so one output row */
3972  path = (Path *)
3973  create_group_result_path(root, grouped_rel,
3974  grouped_rel->reltarget,
3975  (List *) parse->havingQual);
3976  }
3977 
3978  add_path(grouped_rel, path);
3979 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1518

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4753 of file planner.c.

4755 {
4756  RelOptInfo *distinct_rel;
4757 
4758  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4759  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4760 
4761  /*
4762  * We don't compute anything at this level, so distinct_rel will be
4763  * parallel-safe if the input rel is parallel-safe. In particular, if
4764  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4765  * output those expressions, and will not be parallel-safe unless those
4766  * expressions are parallel-safe.
4767  */
4768  distinct_rel->consider_parallel = input_rel->consider_parallel;
4769 
4770  /*
4771  * If the input rel belongs to a single FDW, so does the distinct_rel.
4772  */
4773  distinct_rel->serverid = input_rel->serverid;
4774  distinct_rel->userid = input_rel->userid;
4775  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4776  distinct_rel->fdwroutine = input_rel->fdwroutine;
4777 
4778  /* build distinct paths based on input_rel's pathlist */
4779  create_final_distinct_paths(root, input_rel, distinct_rel);
4780 
4781  /* now build distinct paths based on input_rel's partial_pathlist */
4782  create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4783 
4784  /* Give a helpful error if we failed to create any paths */
4785  if (distinct_rel->pathlist == NIL)
4786  ereport(ERROR,
4787  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4788  errmsg("could not implement DISTINCT"),
4789  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4790 
4791  /*
4792  * If there is an FDW that's responsible for all baserels of the query,
4793  * let it consider adding ForeignPaths.
4794  */
4795  if (distinct_rel->fdwroutine &&
4796  distinct_rel->fdwroutine->GetForeignUpperPaths)
4797  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4799  input_rel,
4800  distinct_rel,
4801  NULL);
4802 
4803  /* Let extensions possibly add some more paths */
4805  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4806  distinct_rel, NULL);
4807 
4808  /* Now choose the best path(s) */
4809  set_cheapest(distinct_rel);
4810 
4811  return distinct_rel;
4812 }
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:857
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:5022
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4823
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:74
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1470
bool useridiscurrent
Definition: pathnodes.h:962
Oid userid
Definition: pathnodes.h:960
Oid serverid
Definition: pathnodes.h:958

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 5022 of file planner.c.

5024 {
5025  Query *parse = root->parse;
5026  Path *cheapest_input_path = input_rel->cheapest_total_path;
5027  double numDistinctRows;
5028  bool allow_hash;
5029 
5030  /* Estimate number of distinct rows there will be */
5031  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5032  root->hasHavingQual)
5033  {
5034  /*
5035  * If there was grouping or aggregation, use the number of input rows
5036  * as the estimated number of DISTINCT rows (ie, assume the input is
5037  * already mostly unique).
5038  */
5039  numDistinctRows = cheapest_input_path->rows;
5040  }
5041  else
5042  {
5043  /*
5044  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5045  */
5046  List *distinctExprs;
5047 
5048  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5049  parse->targetList);
5050  numDistinctRows = estimate_num_groups(root, distinctExprs,
5051  cheapest_input_path->rows,
5052  NULL, NULL);
5053  }
5054 
5055  /*
5056  * Consider sort-based implementations of DISTINCT, if possible.
5057  */
5058  if (grouping_is_sortable(root->processed_distinctClause))
5059  {
5060  /*
5061  * Firstly, if we have any adequately-presorted paths, just stick a
5062  * Unique node on those. We also, consider doing an explicit sort of
5063  * the cheapest input path and Unique'ing that. If any paths have
5064  * presorted keys then we'll create an incremental sort atop of those
5065  * before adding a unique node on the top.
5066  *
5067  * When we have DISTINCT ON, we must sort by the more rigorous of
5068  * DISTINCT and ORDER BY, else it won't have the desired behavior.
5069  * Also, if we do have to do an explicit sort, we might as well use
5070  * the more rigorous ordering to avoid a second sort later. (Note
5071  * that the parser will have ensured that one clause is a prefix of
5072  * the other.)
5073  */
5074  List *needed_pathkeys;
5075  ListCell *lc;
5076  double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5077 
5078  if (parse->hasDistinctOn &&
5079  list_length(root->distinct_pathkeys) <
5080  list_length(root->sort_pathkeys))
5081  needed_pathkeys = root->sort_pathkeys;
5082  else
5083  needed_pathkeys = root->distinct_pathkeys;
5084 
5085  foreach(lc, input_rel->pathlist)
5086  {
5087  Path *input_path = (Path *) lfirst(lc);
5088  Path *sorted_path;
5089  bool is_sorted;
5090  int presorted_keys;
5091 
5092  is_sorted = pathkeys_count_contained_in(needed_pathkeys,
5093  input_path->pathkeys,
5094  &presorted_keys);
5095 
5096  if (is_sorted)
5097  sorted_path = input_path;
5098  else
5099  {
5100  /*
5101  * Try at least sorting the cheapest path and also try
5102  * incrementally sorting any path which is partially sorted
5103  * already (no need to deal with paths which have presorted
5104  * keys when incremental sort is disabled unless it's the
5105  * cheapest input path).
5106  */
5107  if (input_path != cheapest_input_path &&
5108  (presorted_keys == 0 || !enable_incremental_sort))
5109  continue;
5110 
5111  /*
5112  * We've no need to consider both a sort and incremental sort.
5113  * We'll just do a sort if there are no presorted keys and an
5114  * incremental sort when there are presorted keys.
5115  */
5116  if (presorted_keys == 0 || !enable_incremental_sort)
5117  sorted_path = (Path *) create_sort_path(root,
5118  distinct_rel,
5119  input_path,
5120  needed_pathkeys,
5121  limittuples);
5122  else
5123  sorted_path = (Path *) create_incremental_sort_path(root,
5124  distinct_rel,
5125  input_path,
5126  needed_pathkeys,
5127  presorted_keys,
5128  limittuples);
5129  }
5130 
5131  /*
5132  * distinct_pathkeys may have become empty if all of the pathkeys
5133  * were determined to be redundant. If all of the pathkeys are
5134  * redundant then each DISTINCT target must only allow a single
5135  * value, therefore all resulting tuples must be identical (or at
5136  * least indistinguishable by an equality check). We can uniquify
5137  * these tuples simply by just taking the first tuple. All we do
5138  * here is add a path to do "LIMIT 1" atop of 'sorted_path'. When
5139  * doing a DISTINCT ON we may still have a non-NIL sort_pathkeys
5140  * list, so we must still only do this with paths which are
5141  * correctly sorted by sort_pathkeys.
5142  */
5143  if (root->distinct_pathkeys == NIL)
5144  {
5145  Node *limitCount;
5146 
5147  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5148  sizeof(int64),
5149  Int64GetDatum(1), false,
5150  FLOAT8PASSBYVAL);
5151 
5152  /*
5153  * If the query already has a LIMIT clause, then we could end
5154  * up with a duplicate LimitPath in the final plan. That does
5155  * not seem worth troubling over too much.
5156  */
5157  add_path(distinct_rel, (Path *)
5158  create_limit_path(root, distinct_rel, sorted_path,
5159  NULL, limitCount,
5160  LIMIT_OPTION_COUNT, 0, 1));
5161  }
5162  else
5163  {
5164  add_path(distinct_rel, (Path *)
5165  create_upper_unique_path(root, distinct_rel,
5166  sorted_path,
5167  list_length(root->distinct_pathkeys),
5168  numDistinctRows));
5169  }
5170  }
5171  }
5172 
5173  /*
5174  * Consider hash-based implementations of DISTINCT, if possible.
5175  *
5176  * If we were not able to make any other types of path, we *must* hash or
5177  * die trying. If we do have other choices, there are two things that
5178  * should prevent selection of hashing: if the query uses DISTINCT ON
5179  * (because it won't really have the expected behavior if we hash), or if
5180  * enable_hashagg is off.
5181  *
5182  * Note: grouping_is_hashable() is much more expensive to check than the
5183  * other gating conditions, so we want to do it last.
5184  */
5185  if (distinct_rel->pathlist == NIL)
5186  allow_hash = true; /* we have no alternatives */
5187  else if (parse->hasDistinctOn || !enable_hashagg)
5188  allow_hash = false; /* policy-based decision not to hash */
5189  else
5190  allow_hash = true; /* default */
5191 
5192  if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5193  {
5194  /* Generate hashed aggregate path --- no sort needed */
5195  add_path(distinct_rel, (Path *)
5197  distinct_rel,
5198  cheapest_input_path,
5199  cheapest_input_path->pathtarget,
5200  AGG_HASHED,
5202  root->processed_distinctClause,
5203  NIL,
5204  NULL,
5205  numDistinctRows));
5206  }
5207 
5208  return distinct_rel;
5209 }
#define FLOAT8PASSBYVAL
Definition: c.h:635
bool enable_hashagg
Definition: costsize.c:141
bool enable_incremental_sort
Definition: costsize.c:140
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
@ LIMIT_OPTION_COUNT
Definition: nodes.h:430
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:556
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3000
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3826
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3103
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2951
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3416
Cardinality rows
Definition: pathnodes.h:1660
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::cheapest_total_path, create_agg_path(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), FLOAT8PASSBYVAL, get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), makeConst(), NIL, parse(), Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3744 of file planner.c.

3749 {
3750  Query *parse = root->parse;
3751  RelOptInfo *grouped_rel;
3752  RelOptInfo *partially_grouped_rel;
3753  AggClauseCosts agg_costs;
3754 
3755  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3757 
3758  /*
3759  * Create grouping relation to hold fully aggregated grouping and/or
3760  * aggregation paths.
3761  */
3762  grouped_rel = make_grouping_rel(root, input_rel, target,
3763  target_parallel_safe, parse->havingQual);
3764 
3765  /*
3766  * Create either paths for a degenerate grouping or paths for ordinary
3767  * grouping, as appropriate.
3768  */
3770  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3771  else
3772  {
3773  int flags = 0;
3774  GroupPathExtraData extra;
3775 
3776  /*
3777  * Determine whether it's possible to perform sort-based
3778  * implementations of grouping. (Note that if processed_groupClause
3779  * is empty, grouping_is_sortable() is trivially true, and all the
3780  * pathkeys_contained_in() tests will succeed too, so that we'll
3781  * consider every surviving input path.)
3782  *
3783  * If we have grouping sets, we might be able to sort some but not all
3784  * of them; in this case, we need can_sort to be true as long as we
3785  * must consider any sorted-input plan.
3786  */
3787  if ((gd && gd->rollups != NIL)
3788  || grouping_is_sortable(root->processed_groupClause))
3789  flags |= GROUPING_CAN_USE_SORT;
3790 
3791  /*
3792  * Determine whether we should consider hash-based implementations of
3793  * grouping.
3794  *
3795  * Hashed aggregation only applies if we're grouping. If we have
3796  * grouping sets, some groups might be hashable but others not; in
3797  * this case we set can_hash true as long as there is nothing globally
3798  * preventing us from hashing (and we should therefore consider plans
3799  * with hashes).
3800  *
3801  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3802  * BY aggregates. (Doing so would imply storing *all* the input
3803  * values in the hash table, and/or running many sorts in parallel,
3804  * either of which seems like a certain loser.) We similarly don't
3805  * support ordered-set aggregates in hashed aggregation, but that case
3806  * is also included in the numOrderedAggs count.
3807  *
3808  * Note: grouping_is_hashable() is much more expensive to check than
3809  * the other gating conditions, so we want to do it last.
3810  */
3811  if ((parse->groupClause != NIL &&
3812  root->numOrderedAggs == 0 &&
3813  (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3814  flags |= GROUPING_CAN_USE_HASH;
3815 
3816  /*
3817  * Determine whether partial aggregation is possible.
3818  */
3819  if (can_partial_agg(root))
3820  flags |= GROUPING_CAN_PARTIAL_AGG;
3821 
3822  extra.flags = flags;
3823  extra.target_parallel_safe = target_parallel_safe;
3824  extra.havingQual = parse->havingQual;
3825  extra.targetList = parse->targetList;
3826  extra.partial_costs_set = false;
3827 
3828  /*
3829  * Determine whether partitionwise aggregation is in theory possible.
3830  * It can be disabled by the user, and for now, we don't try to
3831  * support grouping sets. create_ordinary_grouping_paths() will check
3832  * additional conditions, such as whether input_rel is partitioned.
3833  */
3834  if (enable_partitionwise_aggregate && !parse->groupingSets)
3836  else
3838 
3839  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3840  &agg_costs, gd, &extra,
3841  &partially_grouped_rel);
3842  }
3843 
3844  set_cheapest(grouped_rel);
3845  return grouped_rel;
3846 }
#define MemSet(start, val, len)
Definition: c.h:1020
bool enable_partitionwise_aggregate
Definition: costsize.c:149
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3271
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3270
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3255
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3931
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3910
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3995
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7585
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3857
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:560
PartitionwiseAggregateType patype
Definition: pathnodes.h:3300

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4583 of file planner.c.

4590 {
4591  PathTarget *window_target;
4592  ListCell *l;
4593  List *topqual = NIL;
4594 
4595  /*
4596  * Since each window clause could require a different sort order, we stack
4597  * up a WindowAgg node for each clause, with sort steps between them as
4598  * needed. (We assume that select_active_windows chose a good order for
4599  * executing the clauses in.)
4600  *
4601  * input_target should contain all Vars and Aggs needed for the result.
4602  * (In some cases we wouldn't need to propagate all of these all the way
4603  * to the top, since they might only be needed as inputs to WindowFuncs.
4604  * It's probably not worth trying to optimize that though.) It must also
4605  * contain all window partitioning and sorting expressions, to ensure
4606  * they're computed only once at the bottom of the stack (that's critical
4607  * for volatile functions). As we climb up the stack, we'll add outputs
4608  * for the WindowFuncs computed at each level.
4609  */
4610  window_target = input_target;
4611 
4612  foreach(l, activeWindows)
4613  {
4615  List *window_pathkeys;
4616  List *runcondition = NIL;
4617  int presorted_keys;
4618  bool is_sorted;
4619  bool topwindow;
4620  ListCell *lc2;
4621 
4622  window_pathkeys = make_pathkeys_for_window(root,
4623  wc,
4624  root->processed_tlist);
4625 
4626  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4627  path->pathkeys,
4628  &presorted_keys);
4629 
4630  /* Sort if necessary */
4631  if (!is_sorted)
4632  {
4633  /*
4634  * No presorted keys or incremental sort disabled, just perform a
4635  * complete sort.
4636  */
4637  if (presorted_keys == 0 || !enable_incremental_sort)
4638  path = (Path *) create_sort_path(root, window_rel,
4639  path,
4640  window_pathkeys,
4641  -1.0);
4642  else
4643  {
4644  /*
4645  * Since we have presorted keys and incremental sort is
4646  * enabled, just use incremental sort.
4647  */
4649  window_rel,
4650  path,
4651  window_pathkeys,
4652  presorted_keys,
4653  -1.0);
4654  }
4655  }
4656 
4657  if (lnext(activeWindows, l))
4658  {
4659  /*
4660  * Add the current WindowFuncs to the output target for this
4661  * intermediate WindowAggPath. We must copy window_target to
4662  * avoid changing the previous path's target.
4663  *
4664  * Note: a WindowFunc adds nothing to the target's eval costs; but
4665  * we do need to account for the increase in tlist width.
4666  */
4667  int64 tuple_width = window_target->width;
4668 
4669  window_target = copy_pathtarget(window_target);
4670  foreach(lc2, wflists->windowFuncs[wc->winref])
4671  {
4672  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4673 
4674  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4675  tuple_width += get_typavgwidth(wfunc->wintype, -1);
4676  }
4677  window_target->width = clamp_width_est(tuple_width);
4678  }
4679  else
4680  {
4681  /* Install the goal target in the topmost WindowAgg */
4682  window_target = output_target;
4683  }
4684 
4685  /* mark the final item in the list as the top-level window */
4686  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4687 
4688  /*
4689  * Collect the WindowFuncRunConditions from each WindowFunc and
4690  * convert them into OpExprs
4691  */
4692  foreach(lc2, wflists->windowFuncs[wc->winref])
4693  {
4694  ListCell *lc3;
4695  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4696 
4697  foreach(lc3, wfunc->runCondition)
4698  {
4699  WindowFuncRunCondition *wfuncrc =
4701  Expr *opexpr;
4702  Expr *leftop;
4703  Expr *rightop;
4704 
4705  if (wfuncrc->wfunc_left)
4706  {
4707  leftop = (Expr *) copyObject(wfunc);
4708  rightop = copyObject(wfuncrc->arg);
4709  }
4710  else
4711  {
4712  leftop = copyObject(wfuncrc->arg);
4713  rightop = (Expr *) copyObject(wfunc);
4714  }
4715 
4716  opexpr = make_opclause(wfuncrc->opno,
4717  BOOLOID,
4718  false,
4719  leftop,
4720  rightop,
4721  InvalidOid,
4722  wfuncrc->inputcollid);
4723 
4724  runcondition = lappend(runcondition, opexpr);
4725 
4726  if (!topwindow)
4727  topqual = lappend(topqual, opexpr);
4728  }
4729  }
4730 
4731  path = (Path *)
4732  create_windowagg_path(root, window_rel, path, window_target,
4733  wflists->windowFuncs[wc->winref],
4734  runcondition, wc,
4735  topwindow ? topqual : NIL, topwindow);
4736  }
4737 
4738  add_path(window_rel, path);
4739 }
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:231
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:628
#define copyObject(obj)
Definition: nodes.h:224
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3485
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6124
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), WindowFuncRunCondition::arg, clamp_width_est(), copy_pathtarget(), copyObject, create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), InvalidOid, lappend(), lfirst_node, list_length(), lnext(), make_opclause(), make_pathkeys_for_window(), NIL, WindowFuncRunCondition::opno, Path::pathkeys, pathkeys_count_contained_in(), root, WindowFuncRunCondition::wfunc_left, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5229 of file planner.c.

5234 {
5235  Path *cheapest_input_path = input_rel->cheapest_total_path;
5236  RelOptInfo *ordered_rel;
5237  ListCell *lc;
5238 
5239  /* For now, do all work in the (ORDERED, NULL) upperrel */
5240  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5241 
5242  /*
5243  * If the input relation is not parallel-safe, then the ordered relation
5244  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5245  * target list is parallel-safe.
5246  */
5247  if (input_rel->consider_parallel && target_parallel_safe)
5248  ordered_rel->consider_parallel = true;
5249 
5250  /*
5251  * If the input rel belongs to a single FDW, so does the ordered_rel.
5252  */
5253  ordered_rel->serverid = input_rel->serverid;
5254  ordered_rel->userid = input_rel->userid;
5255  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5256  ordered_rel->fdwroutine = input_rel->fdwroutine;
5257 
5258  foreach(lc, input_rel->pathlist)
5259  {
5260  Path *input_path = (Path *) lfirst(lc);
5261  Path *sorted_path;
5262  bool is_sorted;
5263  int presorted_keys;
5264 
5265  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5266  input_path->pathkeys, &presorted_keys);
5267 
5268  if (is_sorted)
5269  sorted_path = input_path;
5270  else
5271  {
5272  /*
5273  * Try at least sorting the cheapest path and also try
5274  * incrementally sorting any path which is partially sorted
5275  * already (no need to deal with paths which have presorted keys
5276  * when incremental sort is disabled unless it's the cheapest
5277  * input path).
5278  */
5279  if (input_path != cheapest_input_path &&
5280  (presorted_keys == 0 || !enable_incremental_sort))
5281  continue;
5282 
5283  /*
5284  * We've no need to consider both a sort and incremental sort.
5285  * We'll just do a sort if there are no presorted keys and an
5286  * incremental sort when there are presorted keys.
5287  */
5288  if (presorted_keys == 0 || !enable_incremental_sort)
5289  sorted_path = (Path *) create_sort_path(root,
5290  ordered_rel,
5291  input_path,
5292  root->sort_pathkeys,
5293  limit_tuples);
5294  else
5295  sorted_path = (Path *) create_incremental_sort_path(root,
5296  ordered_rel,
5297  input_path,
5298  root->sort_pathkeys,
5299  presorted_keys,
5300  limit_tuples);
5301  }
5302 
5303  /* Add projection step if needed */
5304  if (sorted_path->pathtarget != target)
5305  sorted_path = apply_projection_to_path(root, ordered_rel,
5306  sorted_path, target);
5307 
5308  add_path(ordered_rel, sorted_path);
5309  }
5310 
5311  /*
5312  * generate_gather_paths() will have already generated a simple Gather
5313  * path for the best parallel path, if any, and the loop above will have
5314  * considered sorting it. Similarly, generate_gather_paths() will also
5315  * have generated order-preserving Gather Merge plans which can be used
5316  * without sorting if they happen to match the sort_pathkeys, and the loop
5317  * above will have handled those as well. However, there's one more
5318  * possibility: it may make sense to sort the cheapest partial path or
5319  * incrementally sort any partial path that is partially sorted according
5320  * to the required output order and then use Gather Merge.
5321  */
5322  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5323  input_rel->partial_pathlist != NIL)
5324  {
5325  Path *cheapest_partial_path;
5326 
5327  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5328 
5329  foreach(lc, input_rel->partial_pathlist)
5330  {
5331  Path *input_path = (Path *) lfirst(lc);
5332  Path *sorted_path;
5333  bool is_sorted;
5334  int presorted_keys;
5335  double total_groups;
5336 
5337  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5338  input_path->pathkeys,
5339  &presorted_keys);
5340 
5341  if (is_sorted)
5342  continue;
5343 
5344  /*
5345  * Try at least sorting the cheapest path and also try
5346  * incrementally sorting any path which is partially sorted
5347  * already (no need to deal with paths which have presorted keys
5348  * when incremental sort is disabled unless it's the cheapest
5349  * partial path).
5350  */
5351  if (input_path != cheapest_partial_path &&
5352  (presorted_keys == 0 || !enable_incremental_sort))
5353  continue;
5354 
5355  /*
5356  * We've no need to consider both a sort and incremental sort.
5357  * We'll just do a sort if there are no presorted keys and an
5358  * incremental sort when there are presorted keys.
5359  */
5360  if (presorted_keys == 0 || !enable_incremental_sort)
5361  sorted_path = (Path *) create_sort_path(root,
5362  ordered_rel,
5363  input_path,
5364  root->sort_pathkeys,
5365  limit_tuples);
5366  else
5367  sorted_path = (Path *) create_incremental_sort_path(root,
5368  ordered_rel,
5369  input_path,
5370  root->sort_pathkeys,
5371  presorted_keys,
5372  limit_tuples);
5373  total_groups = input_path->rows *
5374  input_path->parallel_workers;
5375  sorted_path = (Path *)
5376  create_gather_merge_path(root, ordered_rel,
5377  sorted_path,
5378  sorted_path->pathtarget,
5379  root->sort_pathkeys, NULL,
5380  &total_groups);
5381 
5382  /* Add projection step if needed */
5383  if (sorted_path->pathtarget != target)
5384  sorted_path = apply_projection_to_path(root, ordered_rel,
5385  sorted_path, target);
5386 
5387  add_path(ordered_rel, sorted_path);
5388  }
5389  }
5390 
5391  /*
5392  * If there is an FDW that's responsible for all baserels of the query,
5393  * let it consider adding ForeignPaths.
5394  */
5395  if (ordered_rel->fdwroutine &&
5396  ordered_rel->fdwroutine->GetForeignUpperPaths)
5397  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5398  input_rel, ordered_rel,
5399  NULL);
5400 
5401  /* Let extensions possibly add some more paths */
5403  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5404  input_rel, ordered_rel, NULL);
5405 
5406  /*
5407  * No need to bother with set_cheapest here; grouping_planner does not
5408  * need us to do it.
5409  */
5410  Assert(ordered_rel->pathlist != NIL);
5411 
5412  return ordered_rel;
5413 }
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1881
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
int parallel_workers
Definition: pathnodes.h:1657

References add_path(), apply_projection_to_path(), Assert, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, fetch_upper_rel(), lfirst, linitial, NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, Path::rows, RelOptInfo::serverid, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3995 of file planner.c.

4001 {
4002  Path *cheapest_path = input_rel->cheapest_total_path;
4003  RelOptInfo *partially_grouped_rel = NULL;
4004  double dNumGroups;
4006 
4007  /*
4008  * If this is the topmost grouping relation or if the parent relation is
4009  * doing some form of partitionwise aggregation, then we may be able to do
4010  * it at this level also. However, if the input relation is not
4011  * partitioned, partitionwise aggregate is impossible.
4012  */
4013  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4014  IS_PARTITIONED_REL(input_rel))
4015  {
4016  /*
4017  * If this is the topmost relation or if the parent relation is doing
4018  * full partitionwise aggregation, then we can do full partitionwise
4019  * aggregation provided that the GROUP BY clause contains all of the
4020  * partitioning columns at this level. Otherwise, we can do at most
4021  * partial partitionwise aggregation. But if partial aggregation is
4022  * not supported in general then we can't use it for partitionwise
4023  * aggregation either.
4024  *
4025  * Check parse->groupClause not processed_groupClause, because it's
4026  * okay if some of the partitioning columns were proved redundant.
4027  */
4028  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4029  group_by_has_partkey(input_rel, extra->targetList,
4030  root->parse->groupClause))
4032  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4034  else
4036  }
4037 
4038  /*
4039  * Before generating paths for grouped_rel, we first generate any possible
4040  * partially grouped paths; that way, later code can easily consider both
4041  * parallel and non-parallel approaches to grouping.
4042  */
4043  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4044  {
4045  bool force_rel_creation;
4046 
4047  /*
4048  * If we're doing partitionwise aggregation at this level, force
4049  * creation of a partially_grouped_rel so we can add partitionwise
4050  * paths to it.
4051  */
4052  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4053 
4054  partially_grouped_rel =
4056  grouped_rel,
4057  input_rel,
4058  gd,
4059  extra,
4060  force_rel_creation);
4061  }
4062 
4063  /* Set out parameter. */
4064  *partially_grouped_rel_p = partially_grouped_rel;
4065 
4066  /* Apply partitionwise aggregation technique, if possible. */
4067  if (patype != PARTITIONWISE_AGGREGATE_NONE)
4068  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4069  partially_grouped_rel, agg_costs,
4070  gd, patype, extra);
4071 
4072  /* If we are doing partial aggregation only, return. */
4074  {
4075  Assert(partially_grouped_rel);
4076 
4077  if (partially_grouped_rel->pathlist)
4078  set_cheapest(partially_grouped_rel);
4079 
4080  return;
4081  }
4082 
4083  /* Gather any partially grouped partial paths. */
4084  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4085  {
4086  gather_grouping_paths(root, partially_grouped_rel);
4087  set_cheapest(partially_grouped_rel);
4088  }
4089 
4090  /*
4091  * Estimate number of groups.
4092  */
4093  dNumGroups = get_number_of_groups(root,
4094  cheapest_path->rows,
4095  gd,
4096  extra->targetList);
4097 
4098  /* Build final grouping paths */
4099  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4100  partially_grouped_rel, agg_costs, gd,
4101  dNumGroups, extra);
4102 
4103  /* Give a helpful error if we failed to find any implementation */
4104  if (grouped_rel->pathlist == NIL)
4105  ereport(ERROR,
4106  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4107  errmsg("could not implement GROUP BY"),
4108  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4109 
4110  /*
4111  * If there is an FDW that's responsible for all baserels of the query,
4112  * let it consider adding ForeignPaths.
4113  */
4114  if (grouped_rel->fdwroutine &&
4115  grouped_rel->fdwroutine->GetForeignUpperPaths)
4116  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4117  input_rel, grouped_rel,
4118  extra);
4119 
4120  /* Let extensions possibly add some more paths */
4122  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4123  input_rel, grouped_rel,
4124  extra);
4125 }
PartitionwiseAggregateType
Definition: pathnodes.h:3269
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3272
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7201
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6966
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3622
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7862
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8006

References add_paths_to_grouping_rel(), Assert, RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4823 of file planner.c.

4826 {
4827  RelOptInfo *partial_distinct_rel;
4828  Query *parse;
4829  List *distinctExprs;
4830  double numDistinctRows;
4831  Path *cheapest_partial_path;
4832  ListCell *lc;
4833 
4834  /* nothing to do when there are no partial paths in the input rel */
4835  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4836  return;
4837 
4838  parse = root->parse;
4839 
4840  /* can't do parallel DISTINCT ON */
4841  if (parse->hasDistinctOn)
4842  return;
4843 
4844  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4845  NULL);
4846  partial_distinct_rel->reltarget = target;
4847  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4848 
4849  /*
4850  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4851  */
4852  partial_distinct_rel->serverid = input_rel->serverid;
4853  partial_distinct_rel->userid = input_rel->userid;
4854  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4855  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4856 
4857  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4858 
4859  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4860  parse->targetList);
4861 
4862  /* estimate how many distinct rows we'll get from each worker */
4863  numDistinctRows = estimate_num_groups(root, distinctExprs,
4864  cheapest_partial_path->rows,
4865  NULL, NULL);
4866 
4867  /*
4868  * Try sorting the cheapest path and incrementally sorting any paths with
4869  * presorted keys and put a unique paths atop of those.
4870  */
4871  if (grouping_is_sortable(root->processed_distinctClause))
4872  {
4873  foreach(lc, input_rel->partial_pathlist)
4874  {
4875  Path *input_path = (Path *) lfirst(lc);
4876  Path *sorted_path;
4877  bool is_sorted;
4878  int presorted_keys;
4879 
4880  is_sorted = pathkeys_count_contained_in(root->distinct_pathkeys,
4881  input_path->pathkeys,
4882  &presorted_keys);
4883 
4884  if (is_sorted)
4885  sorted_path = input_path;
4886  else
4887  {
4888  /*
4889  * Try at least sorting the cheapest path and also try
4890  * incrementally sorting any path which is partially sorted
4891  * already (no need to deal with paths which have presorted
4892  * keys when incremental sort is disabled unless it's the
4893  * cheapest partial path).
4894  */
4895  if (input_path != cheapest_partial_path &&
4896  (presorted_keys == 0 || !enable_incremental_sort))
4897  continue;
4898 
4899  /*
4900  * We've no need to consider both a sort and incremental sort.
4901  * We'll just do a sort if there are no presorted keys and an
4902  * incremental sort when there are presorted keys.
4903  */
4904  if (presorted_keys == 0 || !enable_incremental_sort)
4905  sorted_path = (Path *) create_sort_path(root,
4906  partial_distinct_rel,
4907  input_path,
4908  root->distinct_pathkeys,
4909  -1.0);
4910  else
4911  sorted_path = (Path *) create_incremental_sort_path(root,
4912  partial_distinct_rel,
4913  input_path,
4914  root->distinct_pathkeys,
4915  presorted_keys,
4916  -1.0);
4917  }
4918 
4919  /*
4920  * An empty distinct_pathkeys means all tuples have the same value
4921  * for the DISTINCT clause. See create_final_distinct_paths()
4922  */
4923  if (root->distinct_pathkeys == NIL)
4924  {
4925  Node *limitCount;
4926 
4927  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4928  sizeof(int64),
4929  Int64GetDatum(1), false,
4930  FLOAT8PASSBYVAL);
4931 
4932  /*
4933  * Apply a LimitPath onto the partial path to restrict the
4934  * tuples from each worker to 1. create_final_distinct_paths
4935  * will need to apply an additional LimitPath to restrict this
4936  * to a single row after the Gather node. If the query
4937  * already has a LIMIT clause, then we could end up with three
4938  * Limit nodes in the final plan. Consolidating the top two
4939  * of these could be done, but does not seem worth troubling
4940  * over.
4941  */
4942  add_partial_path(partial_distinct_rel, (Path *)
4943  create_limit_path(root, partial_distinct_rel,
4944  sorted_path,
4945  NULL,
4946  limitCount,
4948  0, 1));
4949  }
4950  else
4951  {
4952  add_partial_path(partial_distinct_rel, (Path *)
4953  create_upper_unique_path(root, partial_distinct_rel,
4954  sorted_path,
4955  list_length(root->distinct_pathkeys),
4956  numDistinctRows));
4957  }
4958  }
4959  }
4960 
4961  /*
4962  * Now try hash aggregate paths, if enabled and hashing is possible. Since
4963  * we're not on the hook to ensure we do our best to create at least one
4964  * path here, we treat enable_hashagg as a hard off-switch rather than the
4965  * slightly softer variant in create_final_distinct_paths.
4966  */
4967  if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4968  {
4969  add_partial_path(partial_distinct_rel, (Path *)
4971  partial_distinct_rel,
4972  cheapest_partial_path,
4973  cheapest_partial_path->pathtarget,
4974  AGG_HASHED,
4976  root->processed_distinctClause,
4977  NIL,
4978  NULL,
4979  numDistinctRows));
4980  }
4981 
4982  /*
4983  * If there is an FDW that's responsible for all baserels of the query,
4984  * let it consider adding ForeignPaths.
4985  */
4986  if (partial_distinct_rel->fdwroutine &&
4987  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4988  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4990  input_rel,
4991  partial_distinct_rel,
4992  NULL);
4993 
4994  /* Let extensions possibly add some more partial paths */
4996  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4997  input_rel, partial_distinct_rel, NULL);
4998 
4999  if (partial_distinct_rel->partial_pathlist != NIL)
5000  {
5001  generate_useful_gather_paths(root, partial_distinct_rel, true);
5002  set_cheapest(partial_distinct_rel);
5003 
5004  /*
5005  * Finally, create paths to distinctify the final result. This step
5006  * is needed to remove any duplicates due to combining rows from
5007  * parallel workers.
5008  */
5009  create_final_distinct_paths(root, partial_distinct_rel,
5010  final_distinct_rel);
5011  }
5012 }
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:747
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_paths_hook, create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), fetch_upper_rel(), FLOAT8PASSBYVAL, generate_useful_gather_paths(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), makeConst(), NIL, parse(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7201 of file planner.c.

7207 {
7208  Query *parse = root->parse;
7209  RelOptInfo *partially_grouped_rel;
7210  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7211  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7212  Path *cheapest_partial_path = NULL;
7213  Path *cheapest_total_path = NULL;
7214  double dNumPartialGroups = 0;
7215  double dNumPartialPartialGroups = 0;
7216  ListCell *lc;
7217  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7218  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7219 
7220  /*
7221  * Consider whether we should generate partially aggregated non-partial
7222  * paths. We can only do this if we have a non-partial path, and only if
7223  * the parent of the input rel is performing partial partitionwise
7224  * aggregation. (Note that extra->patype is the type of partitionwise
7225  * aggregation being used at the parent level, not this level.)
7226  */
7227  if (input_rel->pathlist != NIL &&
7229  cheapest_total_path = input_rel->cheapest_total_path;
7230 
7231  /*
7232  * If parallelism is possible for grouped_rel, then we should consider
7233  * generating partially-grouped partial paths. However, if the input rel
7234  * has no partial paths, then we can't.
7235  */
7236  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7237  cheapest_partial_path = linitial(input_rel->partial_pathlist);
7238 
7239  /*
7240  * If we can't partially aggregate partial paths, and we can't partially
7241  * aggregate non-partial paths, then don't bother creating the new
7242  * RelOptInfo at all, unless the caller specified force_rel_creation.
7243  */
7244  if (cheapest_total_path == NULL &&
7245  cheapest_partial_path == NULL &&
7246  !force_rel_creation)
7247  return NULL;
7248 
7249  /*
7250  * Build a new upper relation to represent the result of partially
7251  * aggregating the rows from the input relation.
7252  */
7253  partially_grouped_rel = fetch_upper_rel(root,
7255  grouped_rel->relids);
7256  partially_grouped_rel->consider_parallel =
7257  grouped_rel->consider_parallel;
7258  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7259  partially_grouped_rel->serverid = grouped_rel->serverid;
7260  partially_grouped_rel->userid = grouped_rel->userid;
7261  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7262  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7263 
7264  /*
7265  * Build target list for partial aggregate paths. These paths cannot just
7266  * emit the same tlist as regular aggregate paths, because (1) we must
7267  * include Vars and Aggrefs needed in HAVING, which might not appear in
7268  * the result tlist, and (2) the Aggrefs must be set in partial mode.
7269  */
7270  partially_grouped_rel->reltarget =
7272  extra->havingQual);
7273 
7274  if (!extra->partial_costs_set)
7275  {
7276  /*
7277  * Collect statistics about aggregates for estimating costs of
7278  * performing aggregation in parallel.
7279  */
7280  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7281  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7282  if (parse->hasAggs)
7283  {
7284  /* partial phase */
7286  agg_partial_costs);
7287 
7288  /* final phase */
7290  agg_final_costs);
7291  }
7292 
7293  extra->partial_costs_set = true;
7294  }
7295 
7296  /* Estimate number of partial groups. */
7297  if (cheapest_total_path != NULL)
7298  dNumPartialGroups =
7300  cheapest_total_path->rows,
7301  gd,
7302  extra->targetList);
7303  if (cheapest_partial_path != NULL)
7304  dNumPartialPartialGroups =
7306  cheapest_partial_path->rows,
7307  gd,
7308  extra->targetList);
7309 
7310  if (can_sort && cheapest_total_path != NULL)
7311  {
7312  /* This should have been checked previously */
7313  Assert(parse->hasAggs || parse->groupClause);
7314 
7315  /*
7316  * Use any available suitably-sorted path as input, and also consider
7317  * sorting the cheapest partial path.
7318  */
7319  foreach(lc, input_rel->pathlist)
7320  {
7321  ListCell *lc2;
7322  Path *path = (Path *) lfirst(lc);
7323  Path *path_save = path;
7324  List *pathkey_orderings = NIL;
7325 
7326  /* generate alternative group orderings that might be useful */
7327  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7328 
7329  Assert(list_length(pathkey_orderings) > 0);
7330 
7331  /* process all potentially interesting grouping reorderings */
7332  foreach(lc2, pathkey_orderings)
7333  {
7334  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7335 
7336  /* restore the path (we replace it in the loop) */
7337  path = path_save;
7338 
7339  path = make_ordered_path(root,
7340  partially_grouped_rel,
7341  path,
7342  cheapest_total_path,
7343  info->pathkeys);
7344 
7345  if (path == NULL)
7346  continue;
7347 
7348  if (parse->hasAggs)
7349  add_path(partially_grouped_rel, (Path *)
7351  partially_grouped_rel,
7352  path,
7353  partially_grouped_rel->reltarget,
7354  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7356  info->clauses,
7357  NIL,
7358  agg_partial_costs,
7359  dNumPartialGroups));
7360  else
7361  add_path(partially_grouped_rel, (Path *)
7363  partially_grouped_rel,
7364  path,
7365  info->clauses,
7366  NIL,
7367  dNumPartialGroups));
7368  }
7369  }
7370  }
7371 
7372  if (can_sort && cheapest_partial_path != NULL)
7373  {
7374  /* Similar to above logic, but for partial paths. */
7375  foreach(lc, input_rel->partial_pathlist)
7376  {
7377  ListCell *lc2;
7378  Path *path = (Path *) lfirst(lc);
7379  Path *path_save = path;
7380  List *pathkey_orderings = NIL;
7381 
7382  /* generate alternative group orderings that might be useful */
7383  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7384 
7385  Assert(list_length(pathkey_orderings) > 0);
7386 
7387  /* process all potentially interesting grouping reorderings */
7388  foreach(lc2, pathkey_orderings)
7389  {
7390  GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7391 
7392 
7393  /* restore the path (we replace it in the loop) */
7394  path = path_save;
7395 
7396  path = make_ordered_path(root,
7397  partially_grouped_rel,
7398  path,
7399  cheapest_partial_path,
7400  info->pathkeys);
7401 
7402  if (path == NULL)
7403  continue;
7404 
7405  if (parse->hasAggs)
7406  add_partial_path(partially_grouped_rel, (Path *)
7408  partially_grouped_rel,
7409  path,
7410  partially_grouped_rel->reltarget,
7411  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7413  info->clauses,
7414  NIL,
7415  agg_partial_costs,
7416  dNumPartialPartialGroups));
7417  else
7418  add_partial_path(partially_grouped_rel, (Path *)
7420  partially_grouped_rel,
7421  path,
7422  info->clauses,
7423  NIL,
7424  dNumPartialPartialGroups));
7425  }
7426  }
7427  }
7428 
7429  /*
7430  * Add a partially-grouped HashAgg Path where possible
7431  */
7432  if (can_hash && cheapest_total_path != NULL)
7433  {
7434  /* Checked above */
7435  Assert(parse->hasAggs || parse->groupClause);
7436 
7437  add_path(partially_grouped_rel, (Path *)
7439  partially_grouped_rel,
7440  cheapest_total_path,
7441  partially_grouped_rel->reltarget,
7442  AGG_HASHED,
7444  root->processed_groupClause,
7445  NIL,
7446  agg_partial_costs,
7447  dNumPartialGroups));
7448  }
7449 
7450  /*
7451  * Now add a partially-grouped HashAgg partial Path where possible
7452  */
7453  if (can_hash && cheapest_partial_path != NULL)
7454  {
7455  add_partial_path(partially_grouped_rel, (Path *)
7457  partially_grouped_rel,
7458  cheapest_partial_path,
7459  partially_grouped_rel->reltarget,
7460  AGG_HASHED,
7462  root->processed_groupClause,
7463  NIL,
7464  agg_partial_costs,
7465  dNumPartialPartialGroups));
7466  }
7467 
7468  /*
7469  * If there is an FDW that's responsible for all baserels of the query,
7470  * let it consider adding partially grouped ForeignPaths.
7471  */
7472  if (partially_grouped_rel->fdwroutine &&
7473  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7474  {
7475  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7476 
7477  fdwroutine->GetForeignUpperPaths(root,
7479  input_rel, partially_grouped_rel,
7480  extra);
7481  }
7482 
7483  return partially_grouped_rel;
7484 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:378
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5532
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3293
RelOptKind reloptkind
Definition: pathnodes.h:859

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, RelOptInfo::cheapest_total_path, GroupByOrdering::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, GroupByOrdering::pathkeys, RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7862 of file planner.c.

7870 {
7871  List *grouped_live_children = NIL;
7872  List *partially_grouped_live_children = NIL;
7873  PathTarget *target = grouped_rel->reltarget;
7874  bool partial_grouping_valid = true;
7875  int i;
7876 
7879  partially_grouped_rel != NULL);
7880 
7881  /* Add paths for partitionwise aggregation/grouping. */
7882  i = -1;
7883  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7884  {
7885  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7886  PathTarget *child_target;
7887  AppendRelInfo **appinfos;
7888  int nappinfos;
7889  GroupPathExtraData child_extra;
7890  RelOptInfo *child_grouped_rel;
7891  RelOptInfo *child_partially_grouped_rel;
7892 
7893  Assert(child_input_rel != NULL);
7894 
7895  /* Dummy children can be ignored. */
7896  if (IS_DUMMY_REL(child_input_rel))
7897  continue;
7898 
7899  child_target = copy_pathtarget(target);
7900 
7901  /*
7902  * Copy the given "extra" structure as is and then override the
7903  * members specific to this child.
7904  */
7905  memcpy(&child_extra, extra, sizeof(child_extra));
7906 
7907  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7908  &nappinfos);
7909 
7910  child_target->exprs = (List *)
7912  (Node *) target->exprs,
7913  nappinfos, appinfos);
7914 
7915  /* Translate havingQual and targetList. */
7916  child_extra.havingQual = (Node *)
7918  extra->havingQual,
7919  nappinfos, appinfos);
7920  child_extra.targetList = (List *)
7922  (Node *) extra->targetList,
7923  nappinfos, appinfos);
7924 
7925  /*
7926  * extra->patype was the value computed for our parent rel; patype is
7927  * the value for this relation. For the child, our value is its
7928  * parent rel's value.
7929  */
7930  child_extra.patype = patype;
7931 
7932  /*
7933  * Create grouping relation to hold fully aggregated grouping and/or
7934  * aggregation paths for the child.
7935  */
7936  child_grouped_rel = make_grouping_rel(root, child_input_rel,
7937  child_target,
7938  extra->target_parallel_safe,
7939  child_extra.havingQual);
7940 
7941  /* Create grouping paths for this child relation. */
7942  create_ordinary_grouping_paths(root, child_input_rel,
7943  child_grouped_rel,
7944  agg_costs, gd, &child_extra,
7945  &child_partially_grouped_rel);
7946 
7947  if (child_partially_grouped_rel)
7948  {
7949  partially_grouped_live_children =
7950  lappend(partially_grouped_live_children,
7951  child_partially_grouped_rel);
7952  }
7953  else
7954  partial_grouping_valid = false;
7955 
7956  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7957  {
7958  set_cheapest(child_grouped_rel);
7959  grouped_live_children = lappend(grouped_live_children,
7960  child_grouped_rel);
7961  }
7962 
7963  pfree(appinfos);
7964  }
7965 
7966  /*
7967  * Try to create append paths for partially grouped children. For full
7968  * partitionwise aggregation, we might have paths in the partial_pathlist
7969  * if parallel aggregation is possible. For partial partitionwise
7970  * aggregation, we may have paths in both pathlist and partial_pathlist.
7971  *
7972  * NB: We must have a partially grouped path for every child in order to
7973  * generate a partially grouped path for this relation.
7974  */
7975  if (partially_grouped_rel && partial_grouping_valid)
7976  {
7977  Assert(partially_grouped_live_children != NIL);
7978 
7979  add_paths_to_append_rel(root, partially_grouped_rel,
7980  partially_grouped_live_children);
7981 
7982  /*
7983  * We need call set_cheapest, since the finalization step will use the
7984  * cheapest path from the rel.
7985  */
7986  if (partially_grouped_rel->pathlist)
7987  set_cheapest(partially_grouped_rel);
7988  }
7989 
7990  /* If possible, create append paths for fully grouped children. */
7991  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7992  {
7993  Assert(grouped_live_children != NIL);
7994 
7995  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7996  }
7997 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4496 of file planner.c.

4503 {
4504  RelOptInfo *window_rel;
4505  ListCell *lc;
4506 
4507  /* For now, do all work in the (WINDOW, NULL) upperrel */
4508  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4509 
4510  /*
4511  * If the input relation is not parallel-safe, then the window relation
4512  * can't be parallel-safe, either. Otherwise, we need to examine the
4513  * target list and active windows for non-parallel-safe constructs.
4514  */
4515  if (input_rel->consider_parallel && output_target_parallel_safe &&
4516  is_parallel_safe(root, (Node *) activeWindows))
4517  window_rel->consider_parallel = true;
4518 
4519  /*
4520  * If the input rel belongs to a single FDW, so does the window rel.
4521  */
4522  window_rel->serverid = input_rel->serverid;
4523  window_rel->userid = input_rel->userid;
4524  window_rel->useridiscurrent = input_rel->useridiscurrent;
4525  window_rel->fdwroutine = input_rel->fdwroutine;
4526 
4527  /*
4528  * Consider computing window functions starting from the existing
4529  * cheapest-total path (which will likely require a sort) as well as any
4530  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4531  */
4532  foreach(lc, input_rel->pathlist)
4533  {
4534  Path *path = (Path *) lfirst(lc);
4535  int presorted_keys;
4536 
4537  if (path == input_rel->cheapest_total_path ||
4538  pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4539  &presorted_keys) ||
4540  presorted_keys > 0)
4542  window_rel,
4543  path,
4544  input_target,
4545  output_target,
4546  wflists,
4547  activeWindows);
4548  }
4549 
4550  /*
4551  * If there is an FDW that's responsible for all baserels of the query,
4552  * let it consider adding ForeignPaths.
4553  */
4554  if (window_rel->fdwroutine &&
4555  window_rel->fdwroutine->GetForeignUpperPaths)
4556  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4557  input_rel, window_rel,
4558  NULL);
4559 
4560  /* Let extensions possibly add some more paths */
4562  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4563  input_rel, window_rel, NULL);
4564 
4565  /* Now choose the best path(s) */
4566  set_cheapest(window_rel);
4567 
4568  return window_rel;
4569 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:753
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4583

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6581 of file planner.c.

6582 {
6583  Node *result;
6584 
6585  /*
6586  * Convert named-argument function calls, insert default arguments and
6587  * simplify constant subexprs
6588  */
6589  result = eval_const_expressions(NULL, (Node *) expr);
6590 
6591  /* Fill in opfuncid values if missing */
6592  fix_opfuncids(result);
6593 
6594  return (Expr *) result;
6595 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2254
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1837

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6608 of file planner.c.

6611 {
6612  Node *result;
6613  PlannerGlobal glob;
6614  PlannerInfo root;
6615 
6616  /* Make up dummy planner state so we can use setrefs machinery */
6617  MemSet(&glob, 0, sizeof(glob));
6618  glob.type = T_PlannerGlobal;
6619  glob.relationOids = NIL;
6620  glob.invalItems = NIL;
6621 
6622  MemSet(&root, 0, sizeof(root));
6623  root.type = T_PlannerInfo;
6624  root.glob = &glob;
6625 
6626  /*
6627  * Convert named-argument function calls, insert default arguments and
6628  * simplify constant subexprs. Collect identities of inlined functions
6629  * and elided domains, too.
6630  */
6631  result = eval_const_expressions(&root, (Node *) expr);
6632 
6633  /* Fill in opfuncid values if missing */
6634  fix_opfuncids(result);
6635 
6636  /*
6637  * Now walk the finished expression to find anything else we ought to
6638  * record as an expression dependency.
6639  */
6640  (void) extract_query_dependencies_walker(result, &root);
6641 
6642  *relationOids = glob.relationOids;
6643  *invalItems = glob.invalItems;
6644 
6645  return (Expr *) result;
6646 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3577
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2947 of file planner.c.

2948 {
2949  int num_sets_raw = list_length(groupingSets);
2950  int num_empty = 0;
2951  int num_sets = 0; /* distinct sets */
2952  int num_chains = 0;
2953  List *result = NIL;
2954  List **results;
2955  List **orig_sets;
2956  Bitmapset **set_masks;
2957  int *chains;
2958  short **adjacency;
2959  short *adjacency_buf;
2961  int i;
2962  int j;
2963  int j_size;
2964  ListCell *lc1 = list_head(groupingSets);
2965  ListCell *lc;
2966 
2967  /*
2968  * Start by stripping out empty sets. The algorithm doesn't require this,
2969  * but the planner currently needs all empty sets to be returned in the
2970  * first list, so we strip them here and add them back after.
2971  */
2972  while (lc1 && lfirst(lc1) == NIL)
2973  {
2974  ++num_empty;
2975  lc1 = lnext(groupingSets, lc1);
2976  }
2977 
2978  /* bail out now if it turns out that all we had were empty sets. */
2979  if (!lc1)
2980  return list_make1(groupingSets);
2981 
2982  /*----------
2983  * We don't strictly need to remove duplicate sets here, but if we don't,
2984  * they tend to become scattered through the result, which is a bit
2985  * confusing (and irritating if we ever decide to optimize them out).
2986  * So we remove them here and add them back after.
2987  *
2988  * For each non-duplicate set, we fill in the following:
2989  *
2990  * orig_sets[i] = list of the original set lists
2991  * set_masks[i] = bitmapset for testing inclusion
2992  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2993  *
2994  * chains[i] will be the result group this set is assigned to.
2995  *
2996  * We index all of these from 1 rather than 0 because it is convenient
2997  * to leave 0 free for the NIL node in the graph algorithm.
2998  *----------
2999  */
3000  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3001  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3002  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3003  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3004 
3005  j_size = 0;
3006  j = 0;
3007  i = 1;
3008 
3009  for_each_cell(lc, groupingSets, lc1)
3010  {
3011  List *candidate = (List *) lfirst(lc);
3012  Bitmapset *candidate_set = NULL;
3013  ListCell *lc2;
3014  int dup_of = 0;
3015 
3016  foreach(lc2, candidate)
3017  {
3018  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3019  }
3020 
3021  /* we can only be a dup if we're the same length as a previous set */
3022  if (j_size == list_length(candidate))
3023  {
3024  int k;
3025 
3026  for (k = j; k < i; ++k)
3027  {
3028  if (bms_equal(set_masks[k], candidate_set))
3029  {
3030  dup_of = k;
3031  break;
3032  }
3033  }
3034  }
3035  else if (j_size < list_length(candidate))
3036  {
3037  j_size = list_length(candidate);
3038  j = i;
3039  }
3040 
3041  if (dup_of > 0)
3042  {
3043  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3044  bms_free(candidate_set);
3045  }
3046  else
3047  {
3048  int k;
3049  int n_adj = 0;
3050 
3051  orig_sets[i] = list_make1(candidate);
3052  set_masks[i] = candidate_set;
3053 
3054  /* fill in adjacency list; no need to compare equal-size sets */
3055 
3056  for (k = j - 1; k > 0; --k)
3057  {
3058  if (bms_is_subset(set_masks[k], candidate_set))
3059  adjacency_buf[++n_adj] = k;
3060  }
3061 
3062  if (n_adj > 0)
3063  {
3064  adjacency_buf[0] = n_adj;
3065  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3066  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3067  }
3068  else
3069  adjacency[i] = NULL;
3070 
3071  ++i;
3072  }
3073  }
3074 
3075  num_sets = i - 1;
3076 
3077  /*
3078  * Apply the graph matching algorithm to do the work.
3079  */
3080  state = BipartiteMatch(num_sets, num_sets, adjacency);
3081 
3082  /*
3083  * Now, the state->pair* fields have the info we need to assign sets to
3084  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3085  * pair_vu[v] = u (both will be true, but we check both so that we can do
3086  * it in one pass)
3087  */
3088  chains = palloc0((num_sets + 1) * sizeof(int));
3089 
3090  for (i = 1; i <= num_sets; ++i)
3091  {
3092  int u = state->pair_vu[i];
3093  int v = state->pair_uv[i];
3094 
3095  if (u > 0 && u < i)
3096  chains[i] = chains[u];
3097  else if (v > 0 && v < i)
3098  chains[i] = chains[v];
3099  else
3100  chains[i] = ++num_chains;
3101  }
3102 
3103  /* build result lists. */
3104  results = palloc0((num_chains + 1) * sizeof(List *));
3105 
3106  for (i = 1; i <= num_sets; ++i)
3107  {
3108  int c = chains[i];
3109 
3110  Assert(c > 0);
3111 
3112  results[c] = list_concat(results[c], orig_sets[i]);
3113  }
3114 
3115  /* push any empty sets back on the first list. */
3116  while (num_empty-- > 0)
3117  results[1] = lcons(NIL, results[1]);
3118 
3119  /* make result list */
3120  for (i = 1; i <= num_chains; ++i)
3121  result = lappend(result, results[i]);
3122 
3123  /*
3124  * Free all the things.
3125  *
3126  * (This is over-fussy for small sets but for large sets we could have
3127  * tied up a nontrivial amount of memory.)
3128  */
3130  pfree(results);
3131  pfree(chains);
3132  for (i = 1; i <= num_sets; ++i)
3133  if (adjacency[i])
3134  pfree(adjacency[i]);
3135  pfree(adjacency);
3136  pfree(adjacency_buf);
3137  pfree(orig_sets);
3138  for (i = 1; i <= num_sets; ++i)
3139  bms_free(set_masks[i]);
3140  pfree(set_masks);
3141 
3142  return result;
3143 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1346
char * c
Definition: regguts.h:323

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7500 of file planner.c.

7501 {
7502  ListCell *lc;
7503  Path *cheapest_partial_path;
7504  List *groupby_pathkeys;
7505 
7506  /*
7507  * This occurs after any partial aggregation has taken place, so trim off
7508  * any pathkeys added for ORDER BY / DISTINCT aggregates.
7509  */
7510  if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7511  groupby_pathkeys = list_copy_head(root->group_pathkeys,
7512  root->num_groupby_pathkeys);
7513  else
7514  groupby_pathkeys = root->group_pathkeys;
7515 
7516  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7517  generate_useful_gather_paths(root, rel, true);
7518 
7519  cheapest_partial_path = linitial(rel->partial_pathlist);
7520 
7521  /* XXX Shouldn't this also consider the group-key-reordering? */
7522  foreach(lc, rel->partial_pathlist)
7523  {
7524  Path *path = (Path *) lfirst(lc);
7525  bool is_sorted;
7526  int presorted_keys;
7527  double total_groups;
7528 
7529  is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7530  path->pathkeys,
7531  &presorted_keys);
7532 
7533  if (is_sorted)
7534  continue;
7535 
7536  /*
7537  * Try at least sorting the cheapest path and also try incrementally
7538  * sorting any path which is partially sorted already (no need to deal
7539  * with paths which have presorted keys when incremental sort is
7540  * disabled unless it's the cheapest input path).
7541  */
7542  if (path != cheapest_partial_path &&
7543  (presorted_keys == 0 || !enable_incremental_sort))
7544  continue;
7545 
7546  total_groups = path->rows * path->parallel_workers;
7547 
7548  /*
7549  * We've no need to consider both a sort and incremental sort. We'll
7550  * just do a sort if there are no presorted keys and an incremental
7551  * sort when there are presorted keys.
7552  */
7553  if (presorted_keys == 0 || !enable_incremental_sort)
7554  path = (Path *) create_sort_path(root, rel, path,
7555  groupby_pathkeys,
7556  -1.0);
7557  else
7559  rel,
7560  path,
7561  groupby_pathkeys,
7562  presorted_keys,
7563  -1.0);
7564 
7565  path = (Path *)
7567  rel,
7568  path,
7569  rel->reltarget,
7570  groupby_pathkeys,
7571  NULL,
7572  &total_groups);
7573 
7574  add_path(rel, path);
7575  }
7576 }
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593

References add_path(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, and Path::rows.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 8063 of file planner.c.

8064 {
8065  List *grouplist = copyObject(op->groupClauses);
8066  ListCell *lg;
8067  ListCell *lt;
8068 
8069  lg = list_head(grouplist);
8070  foreach(lt, targetlist)
8071  {
8072  TargetEntry *tle = (TargetEntry *) lfirst(lt);
8073  SortGroupClause *sgc;
8074 
8075  /* resjunk columns could have sortgrouprefs. Leave these alone */
8076  if (tle->resjunk)
8077  continue;
8078 
8079  /* we expect every non-resjunk target to have a SortGroupClause */
8080  Assert(lg != NULL);
8081  sgc = (SortGroupClause *) lfirst(lg);
8082  lg = lnext(grouplist, lg);
8083 
8084  /* assign a tleSortGroupRef, or reuse the existing one */
8085  sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8086  }
8087  Assert(lg == NULL);
8088  return grouplist;
8089 }
Index assignSortGroupRef(TargetEntry *tle, List *tlist)

References Assert, assignSortGroupRef(), copyObject, lfirst, list_head(), lnext(), and SortGroupClause::tleSortGroupRef.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6422 of file planner.c.

6423 {
6424  Path *best_path = rel->cheapest_total_path;
6425  ListCell *l;
6426 
6427  /* If all tuples will be retrieved, just return the cheapest-total path */
6428  if (tuple_fraction <= 0.0)
6429  return best_path;
6430 
6431  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6432  if (tuple_fraction >= 1.0 && best_path->rows > 0)
6433  tuple_fraction /= best_path->rows;
6434 
6435  foreach(l, rel->pathlist)
6436  {
6437  Path *path = (Path *) lfirst(l);
6438 
6439  if (path == rel->cheapest_total_path ||
6440  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6441  continue;
6442 
6443  best_path = path;
6444  }
6445 
6446  return best_path;
6447 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:115

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3622 of file planner.c.

3626 {
3627  Query *parse = root->parse;
3628  double dNumGroups;
3629 
3630  if (parse->groupClause)
3631  {
3632  List *groupExprs;
3633 
3634  if (parse->groupingSets)
3635  {
3636  /* Add up the estimates for each grouping set */
3637  ListCell *lc;
3638 
3639  Assert(gd); /* keep Coverity happy */
3640 
3641  dNumGroups = 0;
3642 
3643  foreach(lc, gd->rollups)
3644  {
3645  RollupData *rollup = lfirst_node(RollupData, lc);
3646  ListCell *lc2;
3647  ListCell *lc3;
3648 
3649  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3650  target_list);
3651 
3652  rollup->numGroups = 0.0;
3653 
3654  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3655  {
3656  List *gset = (List *) lfirst(lc2);
3658  double numGroups = estimate_num_groups(root,
3659  groupExprs,
3660  path_rows,
3661  &gset,
3662  NULL);
3663 
3664  gs->numGroups = numGroups;
3665  rollup->numGroups += numGroups;
3666  }
3667 
3668  dNumGroups += rollup->numGroups;
3669  }
3670 
3671  if (gd->hash_sets_idx)
3672  {
3673  ListCell *lc2;
3674 
3675  gd->dNumHashGroups = 0;
3676 
3677  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3678  target_list);
3679 
3680  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3681  {
3682  List *gset = (List *) lfirst(lc);
3684  double numGroups = estimate_num_groups(root,
3685  groupExprs,
3686  path_rows,
3687  &gset,
3688  NULL);
3689 
3690  gs->numGroups = numGroups;
3691  gd->dNumHashGroups += numGroups;
3692  }
3693 
3694  dNumGroups += gd->dNumHashGroups;
3695  }
3696  }
3697  else
3698  {
3699  /* Plain GROUP BY -- estimate based on optimized groupClause */
3700  groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3701  target_list);
3702 
3703  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3704  NULL, NULL);
3705  }
3706  }
3707  else if (parse->groupingSets)
3708  {
3709  /* Empty grouping sets ... one result row for each one */
3710  dNumGroups = list_length(parse->groupingSets);
3711  }
3712  else if (parse->hasAggs || root->hasHavingQual)
3713  {
3714  /* Plain aggregation, one result row */
3715  dNumGroups = 1;
3716  }
3717  else
3718  {
3719  /* Not grouping */
3720  dNumGroups = 1;
3721  }
3722 
3723  return dNumGroups;
3724 }
List * hash_sets_idx
Definition: planner.c:98

References Assert, grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), grouping_sets_data::rollups, root, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 8006 of file planner.c.

8009 {
8010  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8011  int cnt = 0;
8012  int partnatts;
8013 
8014  /* Input relation should be partitioned. */
8015  Assert(input_rel->part_scheme);
8016 
8017  /* Rule out early, if there are no partition keys present. */
8018  if (!input_rel->partexprs)
8019  return false;
8020 
8021  partnatts = input_rel->part_scheme->partnatts;
8022 
8023  for (cnt = 0; cnt < partnatts; cnt++)
8024  {
8025  List *partexprs = input_rel->partexprs[cnt];
8026  ListCell *lc;
8027  bool found = false;
8028 
8029  foreach(lc, partexprs)
8030  {
8031  Expr *partexpr = lfirst(lc);
8032 
8033  if (list_member(groupexprs, partexpr))
8034  {
8035  found = true;
8036  break;
8037  }
8038  }
8039 
8040  /*
8041  * If none of the partition key expressions match with any of the
8042  * GROUP BY expression, return false.
8043  */
8044  if (!found)
8045  return false;
8046  }
8047 
8048  return true;
8049 }
bool list_member(const List *list, const void *datum)
Definition: list.c:661

References Assert, get_sortgrouplist_exprs(), lfirst, and list_member().

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1302 of file planner.c.

1304 {
1305  Query *parse = root->parse;
1306  int64 offset_est = 0;
1307  int64 count_est = 0;
1308  double limit_tuples = -1.0;
1309  bool have_postponed_srfs = false;
1310  PathTarget *final_target;
1311  List *final_targets;
1312  List *final_targets_contain_srfs;
1313  bool final_target_parallel_safe;
1314  RelOptInfo *current_rel;
1315  RelOptInfo *final_rel;
1316  FinalPathExtraData extra;
1317  ListCell *lc;
1318 
1319  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1320  if (parse->limitCount || parse->limitOffset)
1321  {
1322  tuple_fraction = preprocess_limit(root, tuple_fraction,
1323  &offset_est, &count_est);
1324 
1325  /*
1326  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1327  * estimate the effects of using a bounded sort.
1328  */
1329  if (count_est > 0 && offset_est >= 0)
1330  limit_tuples = (double) count_est + (double) offset_est;
1331  }
1332 
1333  /* Make tuple_fraction accessible to lower-level routines */
1334  root->tuple_fraction = tuple_fraction;
1335 
1336  if (parse->setOperations)
1337  {
1338  /*
1339  * Construct Paths for set operations. The results will not need any
1340  * work except perhaps a top-level sort and/or LIMIT. Note that any
1341  * special work for recursive unions is the responsibility of
1342  * plan_set_operations.
1343  */
1344  current_rel = plan_set_operations(root);
1345 
1346  /*
1347  * We should not need to call preprocess_targetlist, since we must be
1348  * in a SELECT query node. Instead, use the processed_tlist returned
1349  * by plan_set_operations (since this tells whether it returned any
1350  * resjunk columns!), and transfer any sort key information from the
1351  * original tlist.
1352  */
1353  Assert(parse->commandType == CMD_SELECT);
1354 
1355  /* for safety, copy processed_tlist instead of modifying in-place */
1356  root->processed_tlist =
1357  postprocess_setop_tlist(copyObject(root->processed_tlist),
1358  parse->targetList);
1359 
1360  /* Also extract the PathTarget form of the setop result tlist */
1361  final_target = current_rel->cheapest_total_path->pathtarget;
1362 
1363  /* And check whether it's parallel safe */
1364  final_target_parallel_safe =
1365  is_parallel_safe(root, (Node *) final_target->exprs);
1366 
1367  /* The setop result tlist couldn't contain any SRFs */
1368  Assert(!parse->hasTargetSRFs);
1369  final_targets = final_targets_contain_srfs = NIL;
1370 
1371  /*
1372  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1373  * checked already, but let's make sure).
1374  */
1375  if (parse->rowMarks)
1376  ereport(ERROR,
1377  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1378  /*------
1379  translator: %s is a SQL row locking clause such as FOR UPDATE */
1380  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1382  parse->rowMarks)->strength))));
1383 
1384  /*
1385  * Calculate pathkeys that represent result ordering requirements
1386  */
1387  Assert(parse->distinctClause == NIL);
1388  root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1389  parse->sortClause,
1390  root->processed_tlist);
1391  }
1392  else
1393  {
1394  /* No set operations, do regular planning */
1395  PathTarget *sort_input_target;
1396  List *sort_input_targets;
1397  List *sort_input_targets_contain_srfs;
1398  bool sort_input_target_parallel_safe;
1399  PathTarget *grouping_target;
1400  List *grouping_targets;
1401  List *grouping_targets_contain_srfs;
1402  bool grouping_target_parallel_safe;
1403  PathTarget *scanjoin_target;
1404  List *scanjoin_targets;
1405  List *scanjoin_targets_contain_srfs;
1406  bool scanjoin_target_parallel_safe;
1407  bool scanjoin_target_same_exprs;
1408  bool have_grouping;
1409  WindowFuncLists *wflists = NULL;
1410  List *activeWindows = NIL;
1411  grouping_sets_data *gset_data = NULL;
1412  standard_qp_extra qp_extra;
1413 
1414  /* A recursive query should always have setOperations */
1415  Assert(!root->hasRecursion);
1416 
1417  /* Preprocess grouping sets and GROUP BY clause, if any */
1418  if (parse->groupingSets)
1419  {
1420  gset_data = preprocess_grouping_sets(root);
1421  }
1422  else if (parse->groupClause)
1423  {
1424  /* Preprocess regular GROUP BY clause, if any */
1425  root->processed_groupClause = preprocess_groupclause(root, NIL);
1426  /* Remove any redundant GROUP BY columns */
1428  }
1429 
1430  /*
1431  * Preprocess targetlist. Note that much of the remaining planning
1432  * work will be done with the PathTarget representation of tlists, but
1433  * we must also maintain the full representation of the final tlist so
1434  * that we can transfer its decoration (resnames etc) to the topmost
1435  * tlist of the finished Plan. This is kept in processed_tlist.
1436  */
1438 
1439  /*
1440  * Mark all the aggregates with resolved aggtranstypes, and detect
1441  * aggregates that are duplicates or can share transition state. We
1442  * must do this before slicing and dicing the tlist into various
1443  * pathtargets, else some copies of the Aggref nodes might escape
1444  * being marked.
1445  */
1446  if (parse->hasAggs)
1447  {
1448  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1449  preprocess_aggrefs(root, (Node *) parse->havingQual);
1450  }
1451 
1452  /*
1453  * Locate any window functions in the tlist. (We don't need to look
1454  * anywhere else, since expressions used in ORDER BY will be in there
1455  * too.) Note that they could all have been eliminated by constant
1456  * folding, in which case we don't need to do any more work.
1457  */
1458  if (parse->hasWindowFuncs)
1459  {
1460  wflists = find_window_functions((Node *) root->processed_tlist,
1461  list_length(parse->windowClause));
1462  if (wflists->numWindowFuncs > 0)
1463  {
1464  /*
1465  * See if any modifications can be made to each WindowClause
1466  * to allow the executor to execute the WindowFuncs more
1467  * quickly.
1468  */
1469  optimize_window_clauses(root, wflists);
1470 
1471  activeWindows = select_active_windows(root, wflists);
1472  }
1473  else
1474  parse->hasWindowFuncs = false;
1475  }
1476 
1477  /*
1478  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1479  * adding logic between here and the query_planner() call. Anything
1480  * that is needed in MIN/MAX-optimizable cases will have to be
1481  * duplicated in planagg.c.
1482  */
1483  if (parse->hasAggs)
1485 
1486  /*
1487  * Figure out whether there's a hard limit on the number of rows that
1488  * query_planner's result subplan needs to return. Even if we know a
1489  * hard limit overall, it doesn't apply if the query has any
1490  * grouping/aggregation operations, or SRFs in the tlist.
1491  */
1492  if (parse->groupClause ||
1493  parse->groupingSets ||
1494  parse->distinctClause ||
1495  parse->hasAggs ||
1496  parse->hasWindowFuncs ||
1497  parse->hasTargetSRFs ||
1498  root->hasHavingQual)
1499  root->limit_tuples = -1.0;
1500  else
1501  root->limit_tuples = limit_tuples;
1502 
1503  /* Set up data needed by standard_qp_callback */
1504  qp_extra.activeWindows = activeWindows;
1505  qp_extra.gset_data = gset_data;
1506 
1507  /*
1508  * If we're a subquery for a set operation, store the SetOperationStmt
1509  * in qp_extra.
1510  */
1511  qp_extra.setop = setops;
1512 
1513  /*
1514  * Generate the best unsorted and presorted paths for the scan/join
1515  * portion of this Query, ie the processing represented by the
1516  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1517  * We also generate (in standard_qp_callback) pathkey representations
1518  * of the query's sort clause, distinct clause, etc.
1519  */
1520  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1521 
1522  /*
1523  * Convert the query's result tlist into PathTarget format.
1524  *
1525  * Note: this cannot be done before query_planner() has performed
1526  * appendrel expansion, because that might add resjunk entries to
1527  * root->processed_tlist. Waiting till afterwards is also helpful
1528  * because the target width estimates can use per-Var width numbers
1529  * that were obtained within query_planner().
1530  */
1531  final_target = create_pathtarget(root, root->processed_tlist);
1532  final_target_parallel_safe =
1533  is_parallel_safe(root, (Node *) final_target->exprs);
1534 
1535  /*
1536  * If ORDER BY was given, consider whether we should use a post-sort
1537  * projection, and compute the adjusted target for preceding steps if
1538  * so.
1539  */
1540  if (parse->sortClause)
1541  {
1542  sort_input_target = make_sort_input_target(root,
1543  final_target,
1544  &have_postponed_srfs);
1545  sort_input_target_parallel_safe =
1546  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1547  }
1548  else
1549  {
1550  sort_input_target = final_target;
1551  sort_input_target_parallel_safe = final_target_parallel_safe;
1552  }
1553 
1554  /*
1555  * If we have window functions to deal with, the output from any
1556  * grouping step needs to be what the window functions want;
1557  * otherwise, it should be sort_input_target.
1558  */
1559  if (activeWindows)
1560  {
1561  grouping_target = make_window_input_target(root,
1562  final_target,
1563  activeWindows);
1564  grouping_target_parallel_safe =
1565  is_parallel_safe(root, (Node *) grouping_target->exprs);
1566  }
1567  else
1568  {
1569  grouping_target = sort_input_target;
1570  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1571  }
1572 
1573  /*
1574  * If we have grouping or aggregation to do, the topmost scan/join
1575  * plan node must emit what the grouping step wants; otherwise, it
1576  * should emit grouping_target.
1577  */
1578  have_grouping = (parse->groupClause || parse->groupingSets ||
1579  parse->hasAggs || root->hasHavingQual);
1580  if (have_grouping)
1581  {
1582  scanjoin_target = make_group_input_target(root, final_target);
1583  scanjoin_target_parallel_safe =
1584  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1585  }
1586  else
1587  {
1588  scanjoin_target = grouping_target;
1589  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1590  }
1591 
1592  /*
1593  * If there are any SRFs in the targetlist, we must separate each of
1594  * these PathTargets into SRF-computing and SRF-free targets. Replace
1595  * each of the named targets with a SRF-free version, and remember the
1596  * list of additional projection steps we need to add afterwards.
1597  */
1598  if (parse->hasTargetSRFs)
1599  {
1600  /* final_target doesn't recompute any SRFs in sort_input_target */
1601  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1602  &final_targets,
1603  &final_targets_contain_srfs);
1604  final_target = linitial_node(PathTarget, final_targets);
1605  Assert(!linitial_int(final_targets_contain_srfs));
1606  /* likewise for sort_input_target vs. grouping_target */
1607  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1608  &sort_input_targets,
1609  &sort_input_targets_contain_srfs);
1610  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1611  Assert(!linitial_int(sort_input_targets_contain_srfs));
1612  /* likewise for grouping_target vs. scanjoin_target */
1613  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1614  &grouping_targets,
1615  &grouping_targets_contain_srfs);
1616  grouping_target = linitial_node(PathTarget, grouping_targets);
1617  Assert(!linitial_int(grouping_targets_contain_srfs));
1618  /* scanjoin_target will not have any SRFs precomputed for it */
1619  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1620  &scanjoin_targets,
1621  &scanjoin_targets_contain_srfs);
1622  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1623  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1624  }
1625  else
1626  {
1627  /* initialize lists; for most of these, dummy values are OK */
1628  final_targets = final_targets_contain_srfs = NIL;
1629  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1630  grouping_targets = grouping_targets_contain_srfs = NIL;
1631  scanjoin_targets = list_make1(scanjoin_target);
1632  scanjoin_targets_contain_srfs = NIL;
1633  }
1634 
1635  /* Apply scan/join target. */
1636  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1637  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1638  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1639  scanjoin_targets_contain_srfs,
1640  scanjoin_target_parallel_safe,
1641  scanjoin_target_same_exprs);
1642 
1643  /*
1644  * Save the various upper-rel PathTargets we just computed into
1645  * root->upper_targets[]. The core code doesn't use this, but it
1646  * provides a convenient place for extensions to get at the info. For
1647  * consistency, we save all the intermediate targets, even though some
1648  * of the corresponding upperrels might not be needed for this query.
1649  */
1650  root->upper_targets[UPPERREL_FINAL] = final_target;
1651  root->upper_targets[UPPERREL_ORDERED] = final_target;
1652  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1653  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1654  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1655  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1656 
1657  /*
1658  * If we have grouping and/or aggregation, consider ways to implement
1659  * that. We build a new upperrel representing the output of this
1660  * phase.
1661  */
1662  if (have_grouping)
1663  {
1664  current_rel = create_grouping_paths(root,
1665  current_rel,
1666  grouping_target,
1667  grouping_target_parallel_safe,
1668  gset_data);
1669  /* Fix things up if grouping_target contains SRFs */
1670  if (parse->hasTargetSRFs)
1671  adjust_paths_for_srfs(root, current_rel,
1672  grouping_targets,
1673  grouping_targets_contain_srfs);
1674  }
1675 
1676  /*
1677  * If we have window functions, consider ways to implement those. We
1678  * build a new upperrel representing the output of this phase.
1679  */
1680  if (activeWindows)
1681  {
1682  current_rel = create_window_paths(root,
1683  current_rel,
1684  grouping_target,
1685  sort_input_target,
1686  sort_input_target_parallel_safe,
1687  wflists,
1688  activeWindows);
1689  /* Fix things up if sort_input_target contains SRFs */
1690  if (parse->hasTargetSRFs)
1691  adjust_paths_for_srfs(root, current_rel,
1692  sort_input_targets,
1693  sort_input_targets_contain_srfs);
1694  }
1695 
1696  /*
1697  * If there is a DISTINCT clause, consider ways to implement that. We
1698  * build a new upperrel representing the output of this phase.
1699  */
1700  if (parse->distinctClause)
1701  {
1702  current_rel = create_distinct_paths(root,
1703  current_rel,
1704  sort_input_target);
1705  }
1706  } /* end of if (setOperations) */
1707 
1708  /*
1709  * If ORDER BY was given, consider ways to implement that, and generate a
1710  * new upperrel containing only paths that emit the correct ordering and
1711  * project the correct final_target. We can apply the original
1712  * limit_tuples limit in sort costing here, but only if there are no
1713  * postponed SRFs.
1714  */
1715  if (parse->sortClause)
1716  {
1717  current_rel = create_ordered_paths(root,
1718  current_rel,
1719  final_target,
1720  final_target_parallel_safe,
1721  have_postponed_srfs ? -1.0 :
1722  limit_tuples);
1723  /* Fix things up if final_target contains SRFs */
1724  if (parse->hasTargetSRFs)
1725  adjust_paths_for_srfs(root, current_rel,
1726  final_targets,
1727  final_targets_contain_srfs);
1728  }
1729 
1730  /*
1731  * Now we are prepared to build the final-output upperrel.
1732  */
1733  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1734 
1735  /*
1736  * If the input rel is marked consider_parallel and there's nothing that's
1737  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1738  * consider_parallel as well. Note that if the query has rowMarks or is
1739  * not a SELECT, consider_parallel will be false for every relation in the
1740  * query.
1741  */
1742  if (current_rel->consider_parallel &&
1743  is_parallel_safe(root, parse->limitOffset) &&
1744  is_parallel_safe(root, parse->limitCount))
1745  final_rel->consider_parallel = true;
1746 
1747  /*
1748  * If the current_rel belongs to a single FDW, so does the final_rel.
1749  */
1750  final_rel->serverid = current_rel->serverid;
1751  final_rel->userid = current_rel->userid;
1752  final_rel->useridiscurrent = current_rel->useridiscurrent;
1753  final_rel->fdwroutine = current_rel->fdwroutine;
1754 
1755  /*
1756  * Generate paths for the final_rel. Insert all surviving paths, with
1757  * LockRows, Limit, and/or ModifyTable steps added if needed.
1758  */
1759  foreach(lc, current_rel->pathlist)
1760  {
1761  Path *path = (Path *) lfirst(lc);
1762 
1763  /*
1764  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1765  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1766  * here. If there are only non-locking rowmarks, they should be
1767  * handled by the ModifyTable node instead. However, root->rowMarks
1768  * is what goes into the LockRows node.)
1769  */
1770  if (parse->rowMarks)
1771  {
1772  path = (Path *) create_lockrows_path(root, final_rel, path,
1773  root->rowMarks,
1775  }
1776 
1777  /*
1778  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1779  */
1780  if (limit_needed(parse))
1781  {
1782  path = (Path *) create_limit_path(root, final_rel, path,
1783  parse->limitOffset,
1784  parse->limitCount,
1785  parse->limitOption,
1786  offset_est, count_est);
1787  }
1788 
1789  /*
1790  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1791  */
1792  if (parse->commandType != CMD_SELECT)
1793  {
1794  Index rootRelation;
1795  List *resultRelations = NIL;
1796  List *updateColnosLists = NIL;
1797  List *withCheckOptionLists = NIL;
1798  List *returningLists = NIL;
1799  List *mergeActionLists = NIL;
1800  List *mergeJoinConditions = NIL;
1801  List *rowMarks;
1802 
1803  if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1804  {
1805  /* Inherited UPDATE/DELETE/MERGE */
1806  RelOptInfo *top_result_rel = find_base_rel(root,
1807  parse->resultRelation);
1808  int resultRelation = -1;
1809 
1810  /* Pass the root result rel forward to the executor. */
1811  rootRelation = parse->resultRelation;
1812 
1813  /* Add only leaf children to ModifyTable. */
1814  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1815  resultRelation)) >= 0)
1816  {
1817  RelOptInfo *this_result_rel = find_base_rel(root,
1818  resultRelation);
1819 
1820  /*
1821  * Also exclude any leaf rels that have turned dummy since
1822  * being added to the list, for example, by being excluded
1823  * by constraint exclusion.
1824  */
1825  if (IS_DUMMY_REL(this_result_rel))
1826  continue;
1827 
1828  /* Build per-target-rel lists needed by ModifyTable */
1829  resultRelations = lappend_int(resultRelations,
1830  resultRelation);
1831  if (parse->commandType == CMD_UPDATE)
1832  {
1833  List *update_colnos = root->update_colnos;
1834 
1835  if (this_result_rel != top_result_rel)
1836  update_colnos =
1838  update_colnos,
1839  this_result_rel->relid,
1840  top_result_rel->relid);
1841  updateColnosLists = lappend(updateColnosLists,
1842  update_colnos);
1843  }
1844  if (parse->withCheckOptions)
1845  {
1846  List *withCheckOptions = parse->withCheckOptions;
1847 
1848  if (this_result_rel != top_result_rel)
1849  withCheckOptions = (List *)
1851  (Node *) withCheckOptions,
1852  this_result_rel,
1853  top_result_rel);
1854  withCheckOptionLists = lappend(withCheckOptionLists,
1855  withCheckOptions);
1856  }
1857  if (parse->returningList)
1858  {
1859  List *returningList = parse->returningList;
1860 
1861  if (this_result_rel != top_result_rel)
1862  returningList = (List *)
1864  (Node *) returningList,
1865  this_result_rel,
1866  top_result_rel);
1867  returningLists = lappend(returningLists,
1868  returningList);
1869  }
1870  if (parse->mergeActionList)
1871  {
1872  ListCell *l;
1873  List *mergeActionList = NIL;
1874 
1875  /*
1876  * Copy MergeActions and translate stuff that
1877  * references attribute numbers.
1878  */
1879  foreach(l, parse->mergeActionList)
1880  {
1881  MergeAction *action = lfirst(l),
1882  *leaf_action = copyObject(action);
1883 
1884  leaf_action->qual =
1886  (Node *) action->qual,
1887  this_result_rel,
1888  top_result_rel);
1889  leaf_action->targetList = (List *)
1891  (Node *) action->targetList,
1892  this_result_rel,
1893  top_result_rel);
1894  if (leaf_action->commandType == CMD_UPDATE)
1895  leaf_action->updateColnos =
1897  action->updateColnos,
1898  this_result_rel->relid,
1899  top_result_rel->relid);
1900  mergeActionList = lappend(mergeActionList,
1901  leaf_action);
1902  }
1903 
1904  mergeActionLists = lappend(mergeActionLists,
1905  mergeActionList);
1906  }
1907  if (parse->commandType == CMD_MERGE)
1908  {
1909  Node *mergeJoinCondition = parse->mergeJoinCondition;
1910 
1911  if (this_result_rel != top_result_rel)
1912  mergeJoinCondition =
1914  mergeJoinCondition,
1915  this_result_rel,
1916  top_result_rel);
1917  mergeJoinConditions = lappend(mergeJoinConditions,
1918  mergeJoinCondition);
1919  }
1920  }
1921 
1922  if (resultRelations == NIL)
1923  {
1924  /*
1925  * We managed to exclude every child rel, so generate a
1926  * dummy one-relation plan using info for the top target
1927  * rel (even though that may not be a leaf target).
1928  * Although it's clear that no data will be updated or
1929  * deleted, we still need to have a ModifyTable node so
1930  * that any statement triggers will be executed. (This
1931  * could be cleaner if we fixed nodeModifyTable.c to allow
1932  * zero target relations, but that probably wouldn't be a
1933  * net win.)
1934  */
1935  resultRelations = list_make1_int(parse->resultRelation);
1936  if (parse->commandType == CMD_UPDATE)
1937  updateColnosLists = list_make1(root->update_colnos);
1938  if (parse->withCheckOptions)
1939  withCheckOptionLists = list_make1(parse->withCheckOptions);
1940  if (parse->returningList)
1941  returningLists = list_make1(parse->returningList);
1942  if (parse->mergeActionList)
1943  mergeActionLists = list_make1(parse->mergeActionList);
1944  if (parse->commandType == CMD_MERGE)
1945  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
1946  }
1947  }
1948  else
1949  {
1950  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
1951  rootRelation = 0; /* there's no separate root rel */
1952  resultRelations = list_make1_int(parse->resultRelation);
1953  if (parse->commandType == CMD_UPDATE)
1954  updateColnosLists = list_make1(root->update_colnos);
1955  if (parse->withCheckOptions)
1956  withCheckOptionLists = list_make1(parse->withCheckOptions);
1957  if (parse->returningList)
1958  returningLists = list_make1(parse->returningList);
1959  if (parse->mergeActionList)
1960  mergeActionLists = list_make1(parse->mergeActionList);
1961  if (parse->commandType == CMD_MERGE)
1962  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
1963  }
1964 
1965  /*
1966  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1967  * will have dealt with fetching non-locked marked rows, else we
1968  * need to have ModifyTable do that.
1969  */
1970  if (parse->rowMarks)
1971  rowMarks = NIL;
1972  else
1973  rowMarks = root->rowMarks;
1974 
1975  path = (Path *)
1976  create_modifytable_path(root, final_rel,
1977  path,
1978  parse->commandType,
1979  parse->canSetTag,
1980  parse->resultRelation,
1981  rootRelation,
1982  root->partColsUpdated,
1983  resultRelations,
1984  updateColnosLists,
1985  withCheckOptionLists,
1986  returningLists,
1987  rowMarks,
1988  parse->onConflict,
1989  mergeActionLists,
1990  mergeJoinConditions,
1992  }
1993 
1994  /* And shove it into final_rel */
1995  add_path(final_rel, path);
1996  }
1997 
1998  /*
1999  * Generate partial paths for final_rel, too, if outer query levels might
2000  * be able to make use of them.
2001  */
2002  if (final_rel->consider_parallel && root->query_level > 1 &&
2003  !limit_needed(parse))
2004  {
2005  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2006  foreach(lc, current_rel->partial_pathlist)
2007  {
2008  Path *partial_path = (Path *) lfirst(lc);
2009 
2010  add_partial_path(final_rel, partial_path);
2011  }
2012  }
2013 
2014  extra.limit_needed = limit_needed(parse);
2015  extra.limit_tuples = limit_tuples;
2016  extra.count_est = count_est;
2017  extra.offset_est = offset_est;
2018 
2019  /*
2020  * If there is an FDW that's responsible for all baserels of the query,
2021  * let it consider adding ForeignPaths.
2022  */
2023  if (final_rel->fdwroutine &&
2024  final_rel->fdwroutine->GetForeignUpperPaths)
2025  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2026  current_rel, final_rel,
2027  &extra);
2028 
2029  /* Let extensions possibly add some more paths */
2031  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2032  current_rel, final_rel, &extra);
2033 
2034  /* Note: currently, we leave it to callers to do set_cheapest() */
2035 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:521
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:662
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
@ BMS_MULTIPLE
Definition: bitmapset.h:73
unsigned int Index
Definition: c.h:614
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
List * lappend_int(List *list, int datum)
Definition: list.c:357
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:664
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3213
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3662
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3725
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:72
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5670
static void remove_useless_groupby_columns(PlannerInfo *root)
Definition: planner.c:2684
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2440
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6004
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4753
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5707
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6251
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2044
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5444
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5847
bool limit_needed(Query *parse)
Definition: planner.c:2625
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5229
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3744
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3433
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4496
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:99
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Cardinality limit_tuples
Definition: pathnodes.h:3316
Index relid
Definition: pathnodes.h:912
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:121
grouping_sets_data * gset_data
Definition: planner.c:122
SetOperationStmt * setop
Definition: planner.c:123
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_MERGE, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, remove_useless_groupby_columns(), root, select_active_windows(), RelOptInfo::serverid, standard_qp_extra::setop, split_pathtarget_at_srfs(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3207 of file planner.c.

3208 {
3209  ListCell *lc;
3210 
3211  foreach(lc, keys)
3212  {
3213  PathKey *pathkey = lfirst_node(PathKey, lc);
3214 
3215  if (pathkey->pk_eclass->ec_has_volatile)
3216  return true;
3217  }
3218 
3219  return false;
3220 }

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3910 of file planner.c.

3911 {
3912  Query *parse = root->parse;
3913 
3914  return (root->hasHavingQual || parse->groupingSets) &&
3915  !parse->hasAggs && parse->groupClause == NIL;
3916 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2625 of file planner.c.

2626 {
2627  Node *node;
2628 
2629  node = parse->limitCount;
2630  if (node)
2631  {
2632  if (IsA(node, Const))
2633  {
2634  /* NULL indicates LIMIT ALL, ie, no limit */
2635  if (!((Const *) node)->constisnull)
2636  return true; /* LIMIT with a constant value */
2637  }
2638  else
2639  return true; /* non-constant LIMIT */
2640  }
2641 
2642  node = parse->limitOffset;
2643  if (node)
2644  {
2645  if (IsA(node, Const))
2646  {
2647  /* Treat NULL as no offset; the executor would too */
2648  if (!((Const *) node)->constisnull)
2649  {
2650  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2651 
2652  if (offset != 0)
2653  return true; /* OFFSET with a nonzero value */
2654  }
2655  }
2656  else
2657  return true; /* non-constant OFFSET */
2658  }
2659 
2660  return false; /* don't need a Limit plan node */
2661 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5444 of file planner.c.

5445 {
5446  Query *parse = root->parse;
5447  PathTarget *input_target;
5448  List *non_group_cols;
5449  List *non_group_vars;
5450  int i;
5451  ListCell *lc;
5452 
5453  /*
5454  * We must build a target containing all grouping columns, plus any other
5455  * Vars mentioned in the query's targetlist and HAVING qual.
5456  */
5457  input_target = create_empty_pathtarget();
5458  non_group_cols = NIL;
5459 
5460  i = 0;
5461  foreach(lc, final_target->exprs)
5462  {
5463  Expr *expr = (Expr *) lfirst(lc);
5464  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5465 
5466  if (sgref && root->processed_groupClause &&
5468  root->processed_groupClause) != NULL)
5469  {
5470  /*
5471  * It's a grouping column, so add it to the input target as-is.
5472  */
5473  add_column_to_pathtarget(input_target, expr, sgref);
5474  }
5475  else
5476  {
5477  /*
5478  * Non-grouping column, so just remember the expression for later
5479  * call to pull_var_clause.
5480  */
5481  non_group_cols = lappend(non_group_cols, expr);
5482  }
5483 
5484  i++;
5485  }
5486 
5487  /*
5488  * If there's a HAVING clause, we'll need the Vars it uses, too.
5489  */
5490  if (parse->havingQual)
5491  non_group_cols = lappend(non_group_cols, parse->havingQual);
5492 
5493  /*
5494  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5495  * add them to the input target if not already present. (A Var used
5496  * directly as a GROUP BY item will be present already.) Note this
5497  * includes Vars used in resjunk items, so we are covering the needs of
5498  * ORDER BY and window specifications. Vars used within Aggrefs and
5499  * WindowFuncs will be pulled out here, too.
5500  */
5501  non_group_vars = pull_var_clause((Node *) non_group_cols,
5505  add_new_columns_to_pathtarget(input_target, non_group_vars);
5506 
5507  /* clean up cruft */
5508  list_free(non_group_vars);
5509  list_free(non_group_cols);
5510 
5511  /* XXX this causes some redundant cost calculation ... */
5512  return set_pathtarget_cost_width(root, input_target);
5513 }
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6256
void list_free(List *list)
Definition: list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:187
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:189
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:190
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1549
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:607

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3857 of file planner.c.

3860 {
3861  RelOptInfo *grouped_rel;
3862 
3863  if (IS_OTHER_REL(input_rel))
3864  {
3865  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3866  input_rel->relids);
3867  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3868  }
3869  else
3870  {
3871  /*
3872  * By tradition, the relids set for the main grouping relation is
3873  * NULL. (This could be changed, but might require adjustments
3874  * elsewhere.)
3875  */
3876  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3877  }
3878 
3879  /* Set target. */
3880  grouped_rel->reltarget = target;
3881 
3882  /*
3883  * If the input relation is not parallel-safe, then the grouped relation
3884  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3885  * target list and HAVING quals are parallel-safe.
3886  */
3887  if (input_rel->consider_parallel && target_parallel_safe &&
3888  is_parallel_safe(root, (Node *) havingQual))
3889  grouped_rel->consider_parallel = true;
3890 
3891  /*
3892  * If the input rel belongs to a single FDW, so does the grouped rel.
3893  */
3894  grouped_rel->serverid = input_rel->serverid;
3895  grouped_rel->userid = input_rel->userid;
3896  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3897  grouped_rel->fdwroutine = input_rel->fdwroutine;
3898 
3899  return grouped_rel;
3900 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:826

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path* make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys 
)
static

Definition at line 6915 of file planner.c.

6917 {
6918  bool is_sorted;
6919  int presorted_keys;
6920 
6921  is_sorted = pathkeys_count_contained_in(pathkeys,
6922  path->pathkeys,
6923  &presorted_keys);
6924 
6925  if (!is_sorted)
6926  {
6927  /*
6928  * Try at least sorting the cheapest path and also try incrementally
6929  * sorting any path which is partially sorted already (no need to deal
6930  * with paths which have presorted keys when incremental sort is
6931  * disabled unless it's the cheapest input path).
6932  */
6933  if (path != cheapest_path &&
6934  (presorted_keys == 0 || !enable_incremental_sort))
6935  return NULL;
6936 
6937  /*
6938  * We've no need to consider both a sort and incremental sort. We'll
6939  * just do a sort if there are no presorted keys and an incremental
6940  * sort when there are presorted keys.
6941  */
6942  if (presorted_keys == 0 || !enable_incremental_sort)
6943  path = (Path *) create_sort_path(root,
6944  rel,
6945  path,
6946  pathkeys,
6947  -1.0);
6948  else
6950  rel,
6951  path,
6952  pathkeys,
6953  presorted_keys,
6954  -1.0);
6955  }
6956 
6957  return path;
6958 }

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5532 of file planner.c.

5535 {
5536  PathTarget *partial_target;
5537  List *non_group_cols;
5538  List *non_group_exprs;
5539  int i;
5540  ListCell *lc;
5541 
5542  partial_target = create_empty_pathtarget();
5543  non_group_cols = NIL;
5544 
5545  i = 0;
5546  foreach(lc, grouping_target->exprs)
5547  {
5548  Expr *expr = (Expr *) lfirst(lc);
5549  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5550 
5551  if (sgref && root->processed_groupClause &&
5553  root->processed_groupClause) != NULL)
5554  {
5555  /*
5556  * It's a grouping column, so add it to the partial_target as-is.
5557  * (This allows the upper agg step to repeat the grouping calcs.)
5558  */
5559  add_column_to_pathtarget(partial_target, expr, sgref);
5560  }
5561  else
5562  {
5563  /*
5564  * Non-grouping column, so just remember the expression for later
5565  * call to pull_var_clause.
5566  */
5567  non_group_cols = lappend(non_group_cols, expr);
5568  }
5569 
5570  i++;
5571  }
5572 
5573  /*
5574  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5575  */
5576  if (havingQual)
5577  non_group_cols = lappend(non_group_cols, havingQual);
5578 
5579  /*
5580  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5581  * non-group cols (plus HAVING), and add them to the partial_target if not
5582  * already present. (An expression used directly as a GROUP BY item will
5583  * be present already.) Note this includes Vars used in resjunk items, so
5584  * we are covering the needs of ORDER BY and window specifications.
5585  */
5586  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5590 
5591  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5592 
5593  /*
5594  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5595  * are at the top level of the target list, so we can just scan the list
5596  * rather than recursing through the expression trees.
5597  */
5598  foreach(lc, partial_target->exprs)
5599  {
5600  Aggref *aggref = (Aggref *) lfirst(lc);
5601 
5602  if (IsA(aggref, Aggref))
5603  {
5604  Aggref *newaggref;
5605 
5606  /*
5607  * We shouldn't need to copy the substructure of the Aggref node,
5608  * but flat-copy the node itself to avoid damaging other trees.
5609  */
5610  newaggref = makeNode(Aggref);
5611  memcpy(newaggref, aggref, sizeof(Aggref));
5612 
5613  /* For now, assume serialization is required */
5615 
5616  lfirst(lc) = newaggref;
5617  }
5618  }
5619 
5620  /* clean up cruft */
5621  list_free(non_group_exprs);
5622  list_free(non_group_cols);
5623 
5624  /* XXX this causes some redundant cost calculation ... */
5625  return set_pathtarget_cost_width(root, partial_target);
5626 }
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:186
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5635

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6124 of file planner.c.

6126 {
6127  List *window_pathkeys = NIL;
6128 
6129  /* Throw error if can't sort */