PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/inherit.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "storage/dsm_impl.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/syscache.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  standard_qp_extra
 
struct  grouping_sets_data
 
struct  WindowClauseSortData
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int force_parallel_mode = FORCE_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 88 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 91 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 87 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 89 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 81 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 83 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 84 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 92 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 93 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 90 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 82 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 85 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 86 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6211 of file planner.c.

6217 {
6218  Query *parse = root->parse;
6219  Path *cheapest_path = input_rel->cheapest_total_path;
6220  ListCell *lc;
6221  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6222  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6223  List *havingQual = (List *) extra->havingQual;
6224  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6225 
6226  if (can_sort)
6227  {
6228  /*
6229  * Use any available suitably-sorted path as input, and also consider
6230  * sorting the cheapest-total path.
6231  */
6232  foreach(lc, input_rel->pathlist)
6233  {
6234  ListCell *lc2;
6235  Path *path = (Path *) lfirst(lc);
6236  Path *path_original = path;
6237 
6238  List *pathkey_orderings = NIL;
6239 
6240  List *group_pathkeys = root->group_pathkeys;
6241  List *group_clauses = parse->groupClause;
6242 
6243  /* generate alternative group orderings that might be useful */
6244  pathkey_orderings = get_useful_group_keys_orderings(root,
6245  path->rows,
6246  path->pathkeys,
6247  group_pathkeys,
6248  group_clauses);
6249 
6250  Assert(list_length(pathkey_orderings) > 0);
6251 
6252  /* process all potentially interesting grouping reorderings */
6253  foreach(lc2, pathkey_orderings)
6254  {
6255  bool is_sorted;
6256  int presorted_keys = 0;
6257  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6258 
6259  /* restore the path (we replace it in the loop) */
6260  path = path_original;
6261 
6262  is_sorted = pathkeys_count_contained_in(info->pathkeys,
6263  path->pathkeys,
6264  &presorted_keys);
6265 
6266  if (path == cheapest_path || is_sorted)
6267  {
6268  /* Sort the cheapest-total path if it isn't already sorted */
6269  if (!is_sorted)
6270  path = (Path *) create_sort_path(root,
6271  grouped_rel,
6272  path,
6273  info->pathkeys,
6274  -1.0);
6275 
6276  /* Now decide what to stick atop it */
6277  if (parse->groupingSets)
6278  {
6279  consider_groupingsets_paths(root, grouped_rel,
6280  path, true, can_hash,
6281  gd, agg_costs, dNumGroups);
6282  }
6283  else if (parse->hasAggs)
6284  {
6285  /*
6286  * We have aggregation, possibly with plain GROUP BY.
6287  * Make an AggPath.
6288  */
6289  add_path(grouped_rel, (Path *)
6290  create_agg_path(root,
6291  grouped_rel,
6292  path,
6293  grouped_rel->reltarget,
6294  info->clauses ? AGG_SORTED : AGG_PLAIN,
6296  info->clauses,
6297  havingQual,
6298  agg_costs,
6299  dNumGroups));
6300  }
6301  else if (group_clauses)
6302  {
6303  /*
6304  * We have GROUP BY without aggregation or grouping
6305  * sets. Make a GroupPath.
6306  */
6307  add_path(grouped_rel, (Path *)
6308  create_group_path(root,
6309  grouped_rel,
6310  path,
6311  info->clauses,
6312  havingQual,
6313  dNumGroups));
6314  }
6315  else
6316  {
6317  /* Other cases should have been handled above */
6318  Assert(false);
6319  }
6320  }
6321 
6322  /*
6323  * Now we may consider incremental sort on this path, but only
6324  * when the path is not already sorted and when incremental
6325  * sort is enabled.
6326  */
6327  if (is_sorted || !enable_incremental_sort)
6328  continue;
6329 
6330  /* Restore the input path (we might have added Sort on top). */
6331  path = path_original;
6332 
6333  /* no shared prefix, no point in building incremental sort */
6334  if (presorted_keys == 0)
6335  continue;
6336 
6337  /*
6338  * We should have already excluded pathkeys of length 1
6339  * because then presorted_keys > 0 would imply is_sorted was
6340  * true.
6341  */
6342  Assert(list_length(root->group_pathkeys) != 1);
6343 
6344  path = (Path *) create_incremental_sort_path(root,
6345  grouped_rel,
6346  path,
6347  info->pathkeys,
6348  presorted_keys,
6349  -1.0);
6350 
6351  /* Now decide what to stick atop it */
6352  if (parse->groupingSets)
6353  {
6354  consider_groupingsets_paths(root, grouped_rel,
6355  path, true, can_hash,
6356  gd, agg_costs, dNumGroups);
6357  }
6358  else if (parse->hasAggs)
6359  {
6360  /*
6361  * We have aggregation, possibly with plain GROUP BY. Make
6362  * an AggPath.
6363  */
6364  add_path(grouped_rel, (Path *)
6365  create_agg_path(root,
6366  grouped_rel,
6367  path,
6368  grouped_rel->reltarget,
6369  info->clauses ? AGG_SORTED : AGG_PLAIN,
6371  info->clauses,
6372  havingQual,
6373  agg_costs,
6374  dNumGroups));
6375  }
6376  else if (parse->groupClause)
6377  {
6378  /*
6379  * We have GROUP BY without aggregation or grouping sets.
6380  * Make a GroupPath.
6381  */
6382  add_path(grouped_rel, (Path *)
6383  create_group_path(root,
6384  grouped_rel,
6385  path,
6386  info->clauses,
6387  havingQual,
6388  dNumGroups));
6389  }
6390  else
6391  {
6392  /* Other cases should have been handled above */
6393  Assert(false);
6394  }
6395  }
6396  }
6397 
6398  /*
6399  * Instead of operating directly on the input relation, we can
6400  * consider finalizing a partially aggregated path.
6401  */
6402  if (partially_grouped_rel != NULL)
6403  {
6404  foreach(lc, partially_grouped_rel->pathlist)
6405  {
6406  ListCell *lc2;
6407  Path *path = (Path *) lfirst(lc);
6408  Path *path_original = path;
6409 
6410  List *pathkey_orderings = NIL;
6411 
6412  List *group_pathkeys = root->group_pathkeys;
6413  List *group_clauses = parse->groupClause;
6414 
6415  /* generate alternative group orderings that might be useful */
6416  pathkey_orderings = get_useful_group_keys_orderings(root,
6417  path->rows,
6418  path->pathkeys,
6419  group_pathkeys,
6420  group_clauses);
6421 
6422  Assert(list_length(pathkey_orderings) > 0);
6423 
6424  /* process all potentially interesting grouping reorderings */
6425  foreach(lc2, pathkey_orderings)
6426  {
6427  bool is_sorted;
6428  int presorted_keys = 0;
6429  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6430 
6431  /* restore the path (we replace it in the loop) */
6432  path = path_original;
6433 
6434  is_sorted = pathkeys_count_contained_in(info->pathkeys,
6435  path->pathkeys,
6436  &presorted_keys);
6437 
6438  /*
6439  * Insert a Sort node, if required. But there's no point
6440  * in sorting anything but the cheapest path.
6441  */
6442  if (!is_sorted)
6443  {
6444  if (path != partially_grouped_rel->cheapest_total_path)
6445  continue;
6446  path = (Path *) create_sort_path(root,
6447  grouped_rel,
6448  path,
6449  info->pathkeys,
6450  -1.0);
6451  }
6452 
6453  if (parse->hasAggs)
6454  add_path(grouped_rel, (Path *)
6455  create_agg_path(root,
6456  grouped_rel,
6457  path,
6458  grouped_rel->reltarget,
6459  info->clauses ? AGG_SORTED : AGG_PLAIN,
6461  info->clauses,
6462  havingQual,
6463  agg_final_costs,
6464  dNumGroups));
6465  else
6466  add_path(grouped_rel, (Path *)
6467  create_group_path(root,
6468  grouped_rel,
6469  path,
6470  info->clauses,
6471  havingQual,
6472  dNumGroups));
6473 
6474  /*
6475  * Now we may consider incremental sort on this path, but
6476  * only when the path is not already sorted and when
6477  * incremental sort is enabled.
6478  */
6479  if (is_sorted || !enable_incremental_sort)
6480  continue;
6481 
6482  /*
6483  * Restore the input path (we might have added Sort on
6484  * top).
6485  */
6486  path = path_original;
6487 
6488  /*
6489  * no shared prefix, not point in building incremental
6490  * sort
6491  */
6492  if (presorted_keys == 0)
6493  continue;
6494 
6495  /*
6496  * We should have already excluded pathkeys of length 1
6497  * because then presorted_keys > 0 would imply is_sorted
6498  * was true.
6499  */
6500  Assert(list_length(root->group_pathkeys) != 1);
6501 
6502  path = (Path *) create_incremental_sort_path(root,
6503  grouped_rel,
6504  path,
6505  info->pathkeys,
6506  presorted_keys,
6507  -1.0);
6508 
6509  if (parse->hasAggs)
6510  add_path(grouped_rel, (Path *)
6511  create_agg_path(root,
6512  grouped_rel,
6513  path,
6514  grouped_rel->reltarget,
6515  info->clauses ? AGG_SORTED : AGG_PLAIN,
6517  info->clauses,
6518  havingQual,
6519  agg_final_costs,
6520  dNumGroups));
6521  else
6522  add_path(grouped_rel, (Path *)
6523  create_group_path(root,
6524  grouped_rel,
6525  path,
6526  info->clauses,
6527  havingQual,
6528  dNumGroups));
6529  }
6530  }
6531  }
6532  }
6533 
6534  if (can_hash)
6535  {
6536  if (parse->groupingSets)
6537  {
6538  /*
6539  * Try for a hash-only groupingsets path over unsorted input.
6540  */
6541  consider_groupingsets_paths(root, grouped_rel,
6542  cheapest_path, false, true,
6543  gd, agg_costs, dNumGroups);
6544  }
6545  else
6546  {
6547  /*
6548  * Generate a HashAgg Path. We just need an Agg over the
6549  * cheapest-total input path, since input order won't matter.
6550  */
6551  add_path(grouped_rel, (Path *)
6552  create_agg_path(root, grouped_rel,
6553  cheapest_path,
6554  grouped_rel->reltarget,
6555  AGG_HASHED,
6557  parse->groupClause,
6558  havingQual,
6559  agg_costs,
6560  dNumGroups));
6561  }
6562 
6563  /*
6564  * Generate a Finalize HashAgg Path atop of the cheapest partially
6565  * grouped path, assuming there is one
6566  */
6567  if (partially_grouped_rel && partially_grouped_rel->pathlist)
6568  {
6569  Path *path = partially_grouped_rel->cheapest_total_path;
6570 
6571  add_path(grouped_rel, (Path *)
6572  create_agg_path(root,
6573  grouped_rel,
6574  path,
6575  grouped_rel->reltarget,
6576  AGG_HASHED,
6578  parse->groupClause,
6579  havingQual,
6580  agg_final_costs,
6581  dNumGroups));
6582  }
6583  }
6584 
6585  /*
6586  * When partitionwise aggregate is used, we might have fully aggregated
6587  * paths in the partial pathlist, because add_paths_to_append_rel() will
6588  * consider a path for grouped_rel consisting of a Parallel Append of
6589  * non-partial paths from each child.
6590  */
6591  if (grouped_rel->partial_pathlist != NIL)
6592  gather_grouping_paths(root, grouped_rel);
6593 }
bool enable_incremental_sort
Definition: costsize.c:141
Assert(fmt[strlen(fmt) - 1] !='\n')
@ AGG_SORTED
Definition: nodes.h:808
@ AGG_HASHED
Definition: nodes.h:809
@ AGG_PLAIN
Definition: nodes.h:807
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:834
@ AGGSPLIT_SIMPLE
Definition: nodes.h:830
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:866
List * get_useful_group_keys_orderings(PlannerInfo *root, double nrows, List *path_pathkeys, List *group_pathkeys, List *group_clauses)
Definition: pathkeys.c:745
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2942
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2986
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2893
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3097
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:2573
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:2572
#define lfirst(lc)
Definition: pg_list.h:169
static int list_length(const List *l)
Definition: pg_list.h:149
#define NIL
Definition: pg_list.h:65
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7056
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:3731
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:673
AggClauseCosts agg_final_costs
Definition: pathnodes.h:2613
Definition: pg_list.h:51
List * pathkeys
Definition: pathnodes.h:1080
List * clauses
Definition: pathnodes.h:1081
List * pathkeys
Definition: pathnodes.h:1208
Cardinality rows
Definition: pathnodes.h:1204
List * group_pathkeys
Definition: pathnodes.h:297
Query * parse
Definition: pathnodes.h:162
struct PathTarget * reltarget
Definition: pathnodes.h:693
List * pathlist
Definition: pathnodes.h:696
struct Path * cheapest_total_path
Definition: pathnodes.h:700
List * partial_pathlist
Definition: pathnodes.h:698

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, PathKeyInfo::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, PathKeyInfo::pathkeys, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, RelOptInfo::reltarget, and Path::rows.

Referenced by create_ordinary_grouping_paths().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 5766 of file planner.c.

5768 {
5769  ListCell *lc;
5770 
5771  Assert(list_length(targets) == list_length(targets_contain_srfs));
5772  Assert(!linitial_int(targets_contain_srfs));
5773 
5774  /* If no SRFs appear at this plan level, nothing to do */
5775  if (list_length(targets) == 1)
5776  return;
5777 
5778  /*
5779  * Stack SRF-evaluation nodes atop each path for the rel.
5780  *
5781  * In principle we should re-run set_cheapest() here to identify the
5782  * cheapest path, but it seems unlikely that adding the same tlist eval
5783  * costs to all the paths would change that, so we don't bother. Instead,
5784  * just assume that the cheapest-startup and cheapest-total paths remain
5785  * so. (There should be no parameterized paths anymore, so we needn't
5786  * worry about updating cheapest_parameterized_paths.)
5787  */
5788  foreach(lc, rel->pathlist)
5789  {
5790  Path *subpath = (Path *) lfirst(lc);
5791  Path *newpath = subpath;
5792  ListCell *lc1,
5793  *lc2;
5794 
5795  Assert(subpath->param_info == NULL);
5796  forboth(lc1, targets, lc2, targets_contain_srfs)
5797  {
5798  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
5799  bool contains_srfs = (bool) lfirst_int(lc2);
5800 
5801  /* If this level doesn't contain SRFs, do regular projection */
5802  if (contains_srfs)
5803  newpath = (Path *) create_set_projection_path(root,
5804  rel,
5805  newpath,
5806  thistarget);
5807  else
5808  newpath = (Path *) apply_projection_to_path(root,
5809  rel,
5810  newpath,
5811  thistarget);
5812  }
5813  lfirst(lc) = newpath;
5814  if (subpath == rel->cheapest_startup_path)
5815  rel->cheapest_startup_path = newpath;
5816  if (subpath == rel->cheapest_total_path)
5817  rel->cheapest_total_path = newpath;
5818  }
5819 
5820  /* Likewise for partial paths, if any */
5821  foreach(lc, rel->partial_pathlist)
5822  {
5823  Path *subpath = (Path *) lfirst(lc);
5824  Path *newpath = subpath;
5825  ListCell *lc1,
5826  *lc2;
5827 
5828  Assert(subpath->param_info == NULL);
5829  forboth(lc1, targets, lc2, targets_contain_srfs)
5830  {
5831  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
5832  bool contains_srfs = (bool) lfirst_int(lc2);
5833 
5834  /* If this level doesn't contain SRFs, do regular projection */
5835  if (contains_srfs)
5836  newpath = (Path *) create_set_projection_path(root,
5837  rel,
5838  newpath,
5839  thistarget);
5840  else
5841  {
5842  /* avoid apply_projection_to_path, in case of multiple refs */
5843  newpath = (Path *) create_projection_path(root,
5844  rel,
5845  newpath,
5846  thistarget);
5847  }
5848  }
5849  lfirst(lc) = newpath;
5850  }
5851 }
unsigned char bool
Definition: c.h:391
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2627
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2824
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2735
#define lfirst_node(type, lc)
Definition: pg_list.h:172
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:446
#define lfirst_int(lc)
Definition: pg_list.h:170
#define linitial_int(l)
Definition: pg_list.h:175
struct Path * cheapest_startup_path
Definition: pathnodes.h:699

References apply_projection_to_path(), Assert(), RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7186 of file planner.c.

7192 {
7193  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7194  PathTarget *scanjoin_target;
7195  ListCell *lc;
7196 
7197  /* This recurses, so be paranoid. */
7199 
7200  /*
7201  * If the rel is partitioned, we want to drop its existing paths and
7202  * generate new ones. This function would still be correct if we kept the
7203  * existing paths: we'd modify them to generate the correct target above
7204  * the partitioning Append, and then they'd compete on cost with paths
7205  * generating the target below the Append. However, in our current cost
7206  * model the latter way is always the same or cheaper cost, so modifying
7207  * the existing paths would just be useless work. Moreover, when the cost
7208  * is the same, varying roundoff errors might sometimes allow an existing
7209  * path to be picked, resulting in undesirable cross-platform plan
7210  * variations. So we drop old paths and thereby force the work to be done
7211  * below the Append, except in the case of a non-parallel-safe target.
7212  *
7213  * Some care is needed, because we have to allow
7214  * generate_useful_gather_paths to see the old partial paths in the next
7215  * stanza. Hence, zap the main pathlist here, then allow
7216  * generate_useful_gather_paths to add path(s) to the main list, and
7217  * finally zap the partial pathlist.
7218  */
7219  if (rel_is_partitioned)
7220  rel->pathlist = NIL;
7221 
7222  /*
7223  * If the scan/join target is not parallel-safe, partial paths cannot
7224  * generate it.
7225  */
7226  if (!scanjoin_target_parallel_safe)
7227  {
7228  /*
7229  * Since we can't generate the final scan/join target in parallel
7230  * workers, this is our last opportunity to use any partial paths that
7231  * exist; so build Gather path(s) that use them and emit whatever the
7232  * current reltarget is. We don't do this in the case where the
7233  * target is parallel-safe, since we will be able to generate superior
7234  * paths by doing it after the final scan/join target has been
7235  * applied.
7236  */
7237  generate_useful_gather_paths(root, rel, false);
7238 
7239  /* Can't use parallel query above this level. */
7240  rel->partial_pathlist = NIL;
7241  rel->consider_parallel = false;
7242  }
7243 
7244  /* Finish dropping old paths for a partitioned rel, per comment above */
7245  if (rel_is_partitioned)
7246  rel->partial_pathlist = NIL;
7247 
7248  /* Extract SRF-free scan/join target. */
7249  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7250 
7251  /*
7252  * Apply the SRF-free scan/join target to each existing path.
7253  *
7254  * If the tlist exprs are the same, we can just inject the sortgroupref
7255  * information into the existing pathtargets. Otherwise, replace each
7256  * path with a projection path that generates the SRF-free scan/join
7257  * target. This can't change the ordering of paths within rel->pathlist,
7258  * so we just modify the list in place.
7259  */
7260  foreach(lc, rel->pathlist)
7261  {
7262  Path *subpath = (Path *) lfirst(lc);
7263 
7264  /* Shouldn't have any parameterized paths anymore */
7265  Assert(subpath->param_info == NULL);
7266 
7267  if (tlist_same_exprs)
7268  subpath->pathtarget->sortgrouprefs =
7269  scanjoin_target->sortgrouprefs;
7270  else
7271  {
7272  Path *newpath;
7273 
7274  newpath = (Path *) create_projection_path(root, rel, subpath,
7275  scanjoin_target);
7276  lfirst(lc) = newpath;
7277  }
7278  }
7279 
7280  /* Likewise adjust the targets for any partial paths. */
7281  foreach(lc, rel->partial_pathlist)
7282  {
7283  Path *subpath = (Path *) lfirst(lc);
7284 
7285  /* Shouldn't have any parameterized paths anymore */
7286  Assert(subpath->param_info == NULL);
7287 
7288  if (tlist_same_exprs)
7289  subpath->pathtarget->sortgrouprefs =
7290  scanjoin_target->sortgrouprefs;
7291  else
7292  {
7293  Path *newpath;
7294 
7295  newpath = (Path *) create_projection_path(root, rel, subpath,
7296  scanjoin_target);
7297  lfirst(lc) = newpath;
7298  }
7299  }
7300 
7301  /*
7302  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7303  * atop each existing path. (Note that this function doesn't look at the
7304  * cheapest-path fields, which is a good thing because they're bogus right
7305  * now.)
7306  */
7307  if (root->parse->hasTargetSRFs)
7308  adjust_paths_for_srfs(root, rel,
7309  scanjoin_targets,
7310  scanjoin_targets_contain_srfs);
7311 
7312  /*
7313  * Update the rel's target to be the final (with SRFs) scan/join target.
7314  * This now matches the actual output of all the paths, and we might get
7315  * confused in createplan.c if they don't agree. We must do this now so
7316  * that any append paths made in the next part will use the correct
7317  * pathtarget (cf. create_append_path).
7318  *
7319  * Note that this is also necessary if GetForeignUpperPaths() gets called
7320  * on the final scan/join relation or on any of its children, since the
7321  * FDW might look at the rel's target to create ForeignPaths.
7322  */
7323  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7324 
7325  /*
7326  * If the relation is partitioned, recursively apply the scan/join target
7327  * to all partitions, and generate brand-new Append paths in which the
7328  * scan/join target is computed below the Append rather than above it.
7329  * Since Append is not projection-capable, that might save a separate
7330  * Result node, and it also is important for partitionwise aggregate.
7331  */
7332  if (rel_is_partitioned)
7333  {
7334  List *live_children = NIL;
7335  int i;
7336 
7337  /* Adjust each partition. */
7338  i = -1;
7339  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7340  {
7341  RelOptInfo *child_rel = rel->part_rels[i];
7342  AppendRelInfo **appinfos;
7343  int nappinfos;
7344  List *child_scanjoin_targets = NIL;
7345  ListCell *lc;
7346 
7347  Assert(child_rel != NULL);
7348 
7349  /* Dummy children can be ignored. */
7350  if (IS_DUMMY_REL(child_rel))
7351  continue;
7352 
7353  /* Translate scan/join targets for this child. */
7354  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7355  &nappinfos);
7356  foreach(lc, scanjoin_targets)
7357  {
7358  PathTarget *target = lfirst_node(PathTarget, lc);
7359 
7360  target = copy_pathtarget(target);
7361  target->exprs = (List *)
7363  (Node *) target->exprs,
7364  nappinfos, appinfos);
7365  child_scanjoin_targets = lappend(child_scanjoin_targets,
7366  target);
7367  }
7368  pfree(appinfos);
7369 
7370  /* Recursion does the real work. */
7371  apply_scanjoin_target_to_paths(root, child_rel,
7372  child_scanjoin_targets,
7373  scanjoin_targets_contain_srfs,
7374  scanjoin_target_parallel_safe,
7376 
7377  /* Save non-dummy children for Append paths. */
7378  if (!IS_DUMMY_REL(child_rel))
7379  live_children = lappend(live_children, child_rel);
7380  }
7381 
7382  /* Build new paths for this relation by appending child paths. */
7383  add_paths_to_append_rel(root, rel, live_children);
7384  }
7385 
7386  /*
7387  * Consider generating Gather or Gather Merge paths. We must only do this
7388  * if the relation is parallel safe, and we don't do it for child rels to
7389  * avoid creating multiple Gather nodes within the same plan. We must do
7390  * this after all paths have been generated and before set_cheapest, since
7391  * one of the generated paths may turn out to be the cheapest one.
7392  */
7393  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7394  generate_useful_gather_paths(root, rel, false);
7395 
7396  /*
7397  * Reassess which paths are the cheapest, now that we've potentially added
7398  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7399  * this relation.
7400  */
7401  set_cheapest(rel);
7402 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3107
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1287
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:715
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:195
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1045
int i
Definition: isn.c:73
List * lappend(List *list, void *datum)
Definition: list.c:336
void pfree(void *pointer)
Definition: mcxt.c:1175
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1478
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:787
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:670
#define linitial_node(type, l)
Definition: pg_list.h:177
#define llast_node(type, l)
Definition: pg_list.h:197
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7186
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:5766
void check_stack_depth(void)
Definition: postgres.c:3500
Definition: nodes.h:574
Index * sortgrouprefs
Definition: pathnodes.h:1123
List * exprs
Definition: pathnodes.h:1122
bool hasTargetSRFs
Definition: parsenodes.h:136
Relids relids
Definition: pathnodes.h:682
bool consider_parallel
Definition: pathnodes.h:690
struct RelOptInfo ** part_rels
Definition: pathnodes.h:769
Bitmapset * live_parts
Definition: pathnodes.h:771
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:207
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:646

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert(), bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), Query::hasTargetSRFs, i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, PlannerInfo::parse, RelOptInfo::part_rels, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), PathTarget::sortgrouprefs, subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7144 of file planner.c.

7145 {
7146  Query *parse = root->parse;
7147 
7148  if (!parse->hasAggs && parse->groupClause == NIL)
7149  {
7150  /*
7151  * We don't know how to do parallel aggregation unless we have either
7152  * some aggregates or a grouping clause.
7153  */
7154  return false;
7155  }
7156  else if (parse->groupingSets)
7157  {
7158  /* We don't know how to do grouping sets in parallel. */
7159  return false;
7160  }
7161  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7162  {
7163  /* Insufficient support for partial mode. */
7164  return false;
7165  }
7166 
7167  /* Everything looks good. */
7168  return true;
7169 }
bool hasNonPartialAggs
Definition: pathnodes.h:361
bool hasNonSerialAggs
Definition: pathnodes.h:362

References PlannerInfo::hasNonPartialAggs, PlannerInfo::hasNonSerialAggs, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5271 of file planner.c.

5272 {
5273  const WindowClauseSortData *wcsa = a;
5274  const WindowClauseSortData *wcsb = b;
5275  ListCell *item_a;
5276  ListCell *item_b;
5277 
5278  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5279  {
5282 
5283  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5284  return -1;
5285  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5286  return 1;
5287  else if (sca->sortop > scb->sortop)
5288  return -1;
5289  else if (sca->sortop < scb->sortop)
5290  return 1;
5291  else if (sca->nulls_first && !scb->nulls_first)
5292  return -1;
5293  else if (!sca->nulls_first && scb->nulls_first)
5294  return 1;
5295  /* no need to compare eqop, since it is fully determined by sortop */
5296  }
5297 
5298  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5299  return -1;
5300  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5301  return 1;
5302 
5303  return 0;
5304 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1305

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 3731 of file planner.c.

3739 {
3740  Query *parse = root->parse;
3741  Size hash_mem_limit = get_hash_memory_limit();
3742 
3743  /*
3744  * If we're not being offered sorted input, then only consider plans that
3745  * can be done entirely by hashing.
3746  *
3747  * We can hash everything if it looks like it'll fit in hash_mem. But if
3748  * the input is actually sorted despite not being advertised as such, we
3749  * prefer to make use of that in order to use less memory.
3750  *
3751  * If none of the grouping sets are sortable, then ignore the hash_mem
3752  * limit and generate a path anyway, since otherwise we'll just fail.
3753  */
3754  if (!is_sorted)
3755  {
3756  List *new_rollups = NIL;
3757  RollupData *unhashed_rollup = NULL;
3758  List *sets_data;
3759  List *empty_sets_data = NIL;
3760  List *empty_sets = NIL;
3761  ListCell *lc;
3762  ListCell *l_start = list_head(gd->rollups);
3763  AggStrategy strat = AGG_HASHED;
3764  double hashsize;
3765  double exclude_groups = 0.0;
3766 
3767  Assert(can_hash);
3768 
3769  /*
3770  * If the input is coincidentally sorted usefully (which can happen
3771  * even if is_sorted is false, since that only means that our caller
3772  * has set up the sorting for us), then save some hashtable space by
3773  * making use of that. But we need to watch out for degenerate cases:
3774  *
3775  * 1) If there are any empty grouping sets, then group_pathkeys might
3776  * be NIL if all non-empty grouping sets are unsortable. In this case,
3777  * there will be a rollup containing only empty groups, and the
3778  * pathkeys_contained_in test is vacuously true; this is ok.
3779  *
3780  * XXX: the above relies on the fact that group_pathkeys is generated
3781  * from the first rollup. If we add the ability to consider multiple
3782  * sort orders for grouping input, this assumption might fail.
3783  *
3784  * 2) If there are no empty sets and only unsortable sets, then the
3785  * rollups list will be empty (and thus l_start == NULL), and
3786  * group_pathkeys will be NIL; we must ensure that the vacuously-true
3787  * pathkeys_contained_in test doesn't cause us to crash.
3788  */
3789  if (l_start != NULL &&
3791  {
3792  unhashed_rollup = lfirst_node(RollupData, l_start);
3793  exclude_groups = unhashed_rollup->numGroups;
3794  l_start = lnext(gd->rollups, l_start);
3795  }
3796 
3797  hashsize = estimate_hashagg_tablesize(root,
3798  path,
3799  agg_costs,
3800  dNumGroups - exclude_groups);
3801 
3802  /*
3803  * gd->rollups is empty if we have only unsortable columns to work
3804  * with. Override hash_mem in that case; otherwise, we'll rely on the
3805  * sorted-input case to generate usable mixed paths.
3806  */
3807  if (hashsize > hash_mem_limit && gd->rollups)
3808  return; /* nope, won't fit */
3809 
3810  /*
3811  * We need to burst the existing rollups list into individual grouping
3812  * sets and recompute a groupClause for each set.
3813  */
3814  sets_data = list_copy(gd->unsortable_sets);
3815 
3816  for_each_cell(lc, gd->rollups, l_start)
3817  {
3818  RollupData *rollup = lfirst_node(RollupData, lc);
3819 
3820  /*
3821  * If we find an unhashable rollup that's not been skipped by the
3822  * "actually sorted" check above, we can't cope; we'd need sorted
3823  * input (with a different sort order) but we can't get that here.
3824  * So bail out; we'll get a valid path from the is_sorted case
3825  * instead.
3826  *
3827  * The mere presence of empty grouping sets doesn't make a rollup
3828  * unhashable (see preprocess_grouping_sets), we handle those
3829  * specially below.
3830  */
3831  if (!rollup->hashable)
3832  return;
3833 
3834  sets_data = list_concat(sets_data, rollup->gsets_data);
3835  }
3836  foreach(lc, sets_data)
3837  {
3839  List *gset = gs->set;
3840  RollupData *rollup;
3841 
3842  if (gset == NIL)
3843  {
3844  /* Empty grouping sets can't be hashed. */
3845  empty_sets_data = lappend(empty_sets_data, gs);
3846  empty_sets = lappend(empty_sets, NIL);
3847  }
3848  else
3849  {
3850  rollup = makeNode(RollupData);
3851 
3852  rollup->groupClause = preprocess_groupclause(root, gset);
3853  rollup->gsets_data = list_make1(gs);
3854  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
3855  rollup->gsets_data,
3856  gd->tleref_to_colnum_map);
3857  rollup->numGroups = gs->numGroups;
3858  rollup->hashable = true;
3859  rollup->is_hashed = true;
3860  new_rollups = lappend(new_rollups, rollup);
3861  }
3862  }
3863 
3864  /*
3865  * If we didn't find anything nonempty to hash, then bail. We'll
3866  * generate a path from the is_sorted case.
3867  */
3868  if (new_rollups == NIL)
3869  return;
3870 
3871  /*
3872  * If there were empty grouping sets they should have been in the
3873  * first rollup.
3874  */
3875  Assert(!unhashed_rollup || !empty_sets);
3876 
3877  if (unhashed_rollup)
3878  {
3879  new_rollups = lappend(new_rollups, unhashed_rollup);
3880  strat = AGG_MIXED;
3881  }
3882  else if (empty_sets)
3883  {
3884  RollupData *rollup = makeNode(RollupData);
3885 
3886  rollup->groupClause = NIL;
3887  rollup->gsets_data = empty_sets_data;
3888  rollup->gsets = empty_sets;
3889  rollup->numGroups = list_length(empty_sets);
3890  rollup->hashable = false;
3891  rollup->is_hashed = false;
3892  new_rollups = lappend(new_rollups, rollup);
3893  strat = AGG_MIXED;
3894  }
3895 
3896  add_path(grouped_rel, (Path *)
3898  grouped_rel,
3899  path,
3900  (List *) parse->havingQual,
3901  strat,
3902  new_rollups,
3903  agg_costs,
3904  dNumGroups));
3905  return;
3906  }
3907 
3908  /*
3909  * If we have sorted input but nothing we can do with it, bail.
3910  */
3911  if (list_length(gd->rollups) == 0)
3912  return;
3913 
3914  /*
3915  * Given sorted input, we try and make two paths: one sorted and one mixed
3916  * sort/hash. (We need to try both because hashagg might be disabled, or
3917  * some columns might not be sortable.)
3918  *
3919  * can_hash is passed in as false if some obstacle elsewhere (such as
3920  * ordered aggs) means that we shouldn't consider hashing at all.
3921  */
3922  if (can_hash && gd->any_hashable)
3923  {
3924  List *rollups = NIL;
3925  List *hash_sets = list_copy(gd->unsortable_sets);
3926  double availspace = hash_mem_limit;
3927  ListCell *lc;
3928 
3929  /*
3930  * Account first for space needed for groups we can't sort at all.
3931  */
3932  availspace -= estimate_hashagg_tablesize(root,
3933  path,
3934  agg_costs,
3935  gd->dNumHashGroups);
3936 
3937  if (availspace > 0 && list_length(gd->rollups) > 1)
3938  {
3939  double scale;
3940  int num_rollups = list_length(gd->rollups);
3941  int k_capacity;
3942  int *k_weights = palloc(num_rollups * sizeof(int));
3943  Bitmapset *hash_items = NULL;
3944  int i;
3945 
3946  /*
3947  * We treat this as a knapsack problem: the knapsack capacity
3948  * represents hash_mem, the item weights are the estimated memory
3949  * usage of the hashtables needed to implement a single rollup,
3950  * and we really ought to use the cost saving as the item value;
3951  * however, currently the costs assigned to sort nodes don't
3952  * reflect the comparison costs well, and so we treat all items as
3953  * of equal value (each rollup we hash instead saves us one sort).
3954  *
3955  * To use the discrete knapsack, we need to scale the values to a
3956  * reasonably small bounded range. We choose to allow a 5% error
3957  * margin; we have no more than 4096 rollups in the worst possible
3958  * case, which with a 5% error margin will require a bit over 42MB
3959  * of workspace. (Anyone wanting to plan queries that complex had
3960  * better have the memory for it. In more reasonable cases, with
3961  * no more than a couple of dozen rollups, the memory usage will
3962  * be negligible.)
3963  *
3964  * k_capacity is naturally bounded, but we clamp the values for
3965  * scale and weight (below) to avoid overflows or underflows (or
3966  * uselessly trying to use a scale factor less than 1 byte).
3967  */
3968  scale = Max(availspace / (20.0 * num_rollups), 1.0);
3969  k_capacity = (int) floor(availspace / scale);
3970 
3971  /*
3972  * We leave the first rollup out of consideration since it's the
3973  * one that matches the input sort order. We assign indexes "i"
3974  * to only those entries considered for hashing; the second loop,
3975  * below, must use the same condition.
3976  */
3977  i = 0;
3978  for_each_from(lc, gd->rollups, 1)
3979  {
3980  RollupData *rollup = lfirst_node(RollupData, lc);
3981 
3982  if (rollup->hashable)
3983  {
3984  double sz = estimate_hashagg_tablesize(root,
3985  path,
3986  agg_costs,
3987  rollup->numGroups);
3988 
3989  /*
3990  * If sz is enormous, but hash_mem (and hence scale) is
3991  * small, avoid integer overflow here.
3992  */
3993  k_weights[i] = (int) Min(floor(sz / scale),
3994  k_capacity + 1.0);
3995  ++i;
3996  }
3997  }
3998 
3999  /*
4000  * Apply knapsack algorithm; compute the set of items which
4001  * maximizes the value stored (in this case the number of sorts
4002  * saved) while keeping the total size (approximately) within
4003  * capacity.
4004  */
4005  if (i > 0)
4006  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4007 
4008  if (!bms_is_empty(hash_items))
4009  {
4010  rollups = list_make1(linitial(gd->rollups));
4011 
4012  i = 0;
4013  for_each_from(lc, gd->rollups, 1)
4014  {
4015  RollupData *rollup = lfirst_node(RollupData, lc);
4016 
4017  if (rollup->hashable)
4018  {
4019  if (bms_is_member(i, hash_items))
4020  hash_sets = list_concat(hash_sets,
4021  rollup->gsets_data);
4022  else
4023  rollups = lappend(rollups, rollup);
4024  ++i;
4025  }
4026  else
4027  rollups = lappend(rollups, rollup);
4028  }
4029  }
4030  }
4031 
4032  if (!rollups && hash_sets)
4033  rollups = list_copy(gd->rollups);
4034 
4035  foreach(lc, hash_sets)
4036  {
4038  RollupData *rollup = makeNode(RollupData);
4039 
4040  Assert(gs->set != NIL);
4041 
4042  rollup->groupClause = preprocess_groupclause(root, gs->set);
4043  rollup->gsets_data = list_make1(gs);
4044  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4045  rollup->gsets_data,
4046  gd->tleref_to_colnum_map);
4047  rollup->numGroups = gs->numGroups;
4048  rollup->hashable = true;
4049  rollup->is_hashed = true;
4050  rollups = lcons(rollup, rollups);
4051  }
4052 
4053  if (rollups)
4054  {
4055  add_path(grouped_rel, (Path *)
4057  grouped_rel,
4058  path,
4059  (List *) parse->havingQual,
4060  AGG_MIXED,
4061  rollups,
4062  agg_costs,
4063  dNumGroups));
4064  }
4065  }
4066 
4067  /*
4068  * Now try the simple sorted case.
4069  */
4070  if (!gd->unsortable_sets)
4071  add_path(grouped_rel, (Path *)
4073  grouped_rel,
4074  path,
4075  (List *) parse->havingQual,
4076  AGG_SORTED,
4077  gd->rollups,
4078  agg_costs,
4079  dNumGroups));
4080 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:703
#define Min(x, y)
Definition: c.h:986
#define Max(x, y)
Definition: c.h:980
size_t Size
Definition: c.h:540
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:54
List * list_copy(const List *oldlist)
Definition: list.c:1532
List * list_concat(List *list1, const List *list2)
Definition: list.c:540
List * lcons(void *datum, List *list)
Definition: list.c:474
void * palloc(Size size)
Definition: mcxt.c:1068
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3401
AggStrategy
Definition: nodes.h:806
@ AGG_MIXED
Definition: nodes.h:810
#define makeNode(_type_)
Definition: nodes.h:621
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:329
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs, double numGroups)
Definition: pathnode.c:3164
#define list_make1(x1)
Definition: pg_list.h:206
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:417
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
#define for_each_from(cell, lst, N)
Definition: pg_list.h:393
#define linitial(l)
Definition: pg_list.h:174
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:322
int scale
Definition: pgbench.c:194
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2771
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2152
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3901
Cardinality numGroups
Definition: pathnodes.h:1801
Cardinality numGroups
Definition: pathnodes.h:1810
List * groupClause
Definition: pathnodes.h:1807
List * gsets_data
Definition: pathnodes.h:1809
bool hashable
Definition: pathnodes.h:1811
List * gsets
Definition: pathnodes.h:1808
bool is_hashed
Definition: pathnodes.h:1812
int * tleref_to_colnum_map
Definition: planner.c:115
List * unsortable_sets
Definition: planner.c:114
double dNumHashGroups
Definition: planner.c:110

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert(), bms_is_empty(), bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), PlannerInfo::group_pathkeys, RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3531 of file planner.c.

3533 {
3534  Query *parse = root->parse;
3535  int nrows;
3536  Path *path;
3537 
3538  nrows = list_length(parse->groupingSets);
3539  if (nrows > 1)
3540  {
3541  /*
3542  * Doesn't seem worthwhile writing code to cons up a generate_series
3543  * or a values scan to emit multiple rows. Instead just make N clones
3544  * and append them. (With a volatile HAVING clause, this means you
3545  * might get between 0 and N output rows. Offhand I think that's
3546  * desired.)
3547  */
3548  List *paths = NIL;
3549 
3550  while (--nrows >= 0)
3551  {
3552  path = (Path *)
3553  create_group_result_path(root, grouped_rel,
3554  grouped_rel->reltarget,
3555  (List *) parse->havingQual);
3556  paths = lappend(paths, path);
3557  }
3558  path = (Path *)
3559  create_append_path(root,
3560  grouped_rel,
3561  paths,
3562  NIL,
3563  NIL,
3564  NULL,
3565  0,
3566  false,
3567  -1);
3568  }
3569  else
3570  {
3571  /* No grouping sets, or just one, so one output row */
3572  path = (Path *)
3573  create_group_result_path(root, grouped_rel,
3574  grouped_rel->reltarget,
3575  (List *) parse->havingQual);
3576  }
3577 
3578  add_path(grouped_rel, path);
3579 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1504

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), PlannerInfo::parse, and RelOptInfo::reltarget.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel 
)
static

Definition at line 4313 of file planner.c.

4314 {
4315  RelOptInfo *distinct_rel;
4316 
4317  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4318  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4319 
4320  /*
4321  * We don't compute anything at this level, so distinct_rel will be
4322  * parallel-safe if the input rel is parallel-safe. In particular, if
4323  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4324  * output those expressions, and will not be parallel-safe unless those
4325  * expressions are parallel-safe.
4326  */
4327  distinct_rel->consider_parallel = input_rel->consider_parallel;
4328 
4329  /*
4330  * If the input rel belongs to a single FDW, so does the distinct_rel.
4331  */
4332  distinct_rel->serverid = input_rel->serverid;
4333  distinct_rel->userid = input_rel->userid;
4334  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4335  distinct_rel->fdwroutine = input_rel->fdwroutine;
4336 
4337  /* build distinct paths based on input_rel's pathlist */
4338  create_final_distinct_paths(root, input_rel, distinct_rel);
4339 
4340  /* now build distinct paths based on input_rel's partial_pathlist */
4341  create_partial_distinct_paths(root, input_rel, distinct_rel);
4342 
4343  /* Give a helpful error if we failed to create any paths */
4344  if (distinct_rel->pathlist == NIL)
4345  ereport(ERROR,
4346  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4347  errmsg("could not implement DISTINCT"),
4348  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4349 
4350  /*
4351  * If there is an FDW that's responsible for all baserels of the query,
4352  * let it consider adding ForeignPaths.
4353  */
4354  if (distinct_rel->fdwroutine &&
4355  distinct_rel->fdwroutine->GetForeignUpperPaths)
4356  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4358  input_rel,
4359  distinct_rel,
4360  NULL);
4361 
4362  /* Let extensions possibly add some more paths */
4364  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4365  distinct_rel, NULL);
4366 
4367  /* Now choose the best path(s) */
4368  set_cheapest(distinct_rel);
4369 
4370  return distinct_rel;
4371 }
int errdetail(const char *fmt,...)
Definition: elog.c:1037
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define ERROR
Definition: elog.h:33
#define ereport(elevel,...)
Definition: elog.h:143
@ UPPERREL_DISTINCT
Definition: pathnodes.h:75
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
Definition: planner.c:4382
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4505
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:77
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1210
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
bool useridiscurrent
Definition: pathnodes.h:735
struct FdwRoutine * fdwroutine
Definition: pathnodes.h:737
Oid userid
Definition: pathnodes.h:734
Oid serverid
Definition: pathnodes.h:733

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, RelOptInfo::fdwroutine, fetch_upper_rel(), FdwRoutine::GetForeignUpperPaths, NIL, RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 4505 of file planner.c.

4507 {
4508  Query *parse = root->parse;
4509  Path *cheapest_input_path = input_rel->cheapest_total_path;
4510  double numDistinctRows;
4511  bool allow_hash;
4512  Path *path;
4513  ListCell *lc;
4514 
4515  /* Estimate number of distinct rows there will be */
4516  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4517  root->hasHavingQual)
4518  {
4519  /*
4520  * If there was grouping or aggregation, use the number of input rows
4521  * as the estimated number of DISTINCT rows (ie, assume the input is
4522  * already mostly unique).
4523  */
4524  numDistinctRows = cheapest_input_path->rows;
4525  }
4526  else
4527  {
4528  /*
4529  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4530  */
4531  List *distinctExprs;
4532 
4533  distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4534  parse->targetList);
4535  numDistinctRows = estimate_num_groups(root, distinctExprs,
4536  cheapest_input_path->rows,
4537  NULL, NULL);
4538  }
4539 
4540  /*
4541  * Consider sort-based implementations of DISTINCT, if possible.
4542  */
4543  if (grouping_is_sortable(parse->distinctClause))
4544  {
4545  /*
4546  * First, if we have any adequately-presorted paths, just stick a
4547  * Unique node on those. Then consider doing an explicit sort of the
4548  * cheapest input path and Unique'ing that.
4549  *
4550  * When we have DISTINCT ON, we must sort by the more rigorous of
4551  * DISTINCT and ORDER BY, else it won't have the desired behavior.
4552  * Also, if we do have to do an explicit sort, we might as well use
4553  * the more rigorous ordering to avoid a second sort later. (Note
4554  * that the parser will have ensured that one clause is a prefix of
4555  * the other.)
4556  */
4557  List *needed_pathkeys;
4558 
4559  if (parse->hasDistinctOn &&
4561  list_length(root->sort_pathkeys))
4562  needed_pathkeys = root->sort_pathkeys;
4563  else
4564  needed_pathkeys = root->distinct_pathkeys;
4565 
4566  foreach(lc, input_rel->pathlist)
4567  {
4568  Path *path = (Path *) lfirst(lc);
4569 
4570  if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4571  {
4572  add_path(distinct_rel, (Path *)
4573  create_upper_unique_path(root, distinct_rel,
4574  path,
4576  numDistinctRows));
4577  }
4578  }
4579 
4580  /* For explicit-sort case, always use the more rigorous clause */
4581  if (list_length(root->distinct_pathkeys) <
4582  list_length(root->sort_pathkeys))
4583  {
4584  needed_pathkeys = root->sort_pathkeys;
4585  /* Assert checks that parser didn't mess up... */
4587  needed_pathkeys));
4588  }
4589  else
4590  needed_pathkeys = root->distinct_pathkeys;
4591 
4592  path = cheapest_input_path;
4593  if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4594  path = (Path *) create_sort_path(root, distinct_rel,
4595  path,
4596  needed_pathkeys,
4597  -1.0);
4598 
4599  add_path(distinct_rel, (Path *)
4600  create_upper_unique_path(root, distinct_rel,
4601  path,
4603  numDistinctRows));
4604  }
4605 
4606  /*
4607  * Consider hash-based implementations of DISTINCT, if possible.
4608  *
4609  * If we were not able to make any other types of path, we *must* hash or
4610  * die trying. If we do have other choices, there are two things that
4611  * should prevent selection of hashing: if the query uses DISTINCT ON
4612  * (because it won't really have the expected behavior if we hash), or if
4613  * enable_hashagg is off.
4614  *
4615  * Note: grouping_is_hashable() is much more expensive to check than the
4616  * other gating conditions, so we want to do it last.
4617  */
4618  if (distinct_rel->pathlist == NIL)
4619  allow_hash = true; /* we have no alternatives */
4620  else if (parse->hasDistinctOn || !enable_hashagg)
4621  allow_hash = false; /* policy-based decision not to hash */
4622  else
4623  allow_hash = true; /* default */
4624 
4625  if (allow_hash && grouping_is_hashable(parse->distinctClause))
4626  {
4627  /* Generate hashed aggregate path --- no sort needed */
4628  add_path(distinct_rel, (Path *)
4629  create_agg_path(root,
4630  distinct_rel,
4631  cheapest_input_path,
4632  cheapest_input_path->pathtarget,
4633  AGG_HASHED,
4635  parse->distinctClause,
4636  NIL,
4637  NULL,
4638  numDistinctRows));
4639  }
4640 
4641  return distinct_rel;
4642 }
bool enable_hashagg
Definition: costsize.c:142
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3045
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3368
PathTarget * pathtarget
Definition: pathnodes.h:1195
List * distinct_pathkeys
Definition: pathnodes.h:299
bool hasHavingQual
Definition: pathnodes.h:349
List * sort_pathkeys
Definition: pathnodes.h:300
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:381
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:529
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:549

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, create_agg_path(), create_sort_path(), create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, estimate_num_groups(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), PlannerInfo::hasHavingQual, lfirst, list_length(), NIL, parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, Path::pathtarget, Path::rows, and PlannerInfo::sort_pathkeys.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3344 of file planner.c.

3349 {
3350  Query *parse = root->parse;
3351  RelOptInfo *grouped_rel;
3352  RelOptInfo *partially_grouped_rel;
3353  AggClauseCosts agg_costs;
3354 
3355  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3356  get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3357 
3358  /*
3359  * Create grouping relation to hold fully aggregated grouping and/or
3360  * aggregation paths.
3361  */
3362  grouped_rel = make_grouping_rel(root, input_rel, target,
3363  target_parallel_safe, parse->havingQual);
3364 
3365  /*
3366  * Create either paths for a degenerate grouping or paths for ordinary
3367  * grouping, as appropriate.
3368  */
3369  if (is_degenerate_grouping(root))
3370  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3371  else
3372  {
3373  int flags = 0;
3374  GroupPathExtraData extra;
3375 
3376  /*
3377  * Determine whether it's possible to perform sort-based
3378  * implementations of grouping. (Note that if groupClause is empty,
3379  * grouping_is_sortable() is trivially true, and all the
3380  * pathkeys_contained_in() tests will succeed too, so that we'll
3381  * consider every surviving input path.)
3382  *
3383  * If we have grouping sets, we might be able to sort some but not all
3384  * of them; in this case, we need can_sort to be true as long as we
3385  * must consider any sorted-input plan.
3386  */
3387  if ((gd && gd->rollups != NIL)
3388  || grouping_is_sortable(parse->groupClause))
3389  flags |= GROUPING_CAN_USE_SORT;
3390 
3391  /*
3392  * Determine whether we should consider hash-based implementations of
3393  * grouping.
3394  *
3395  * Hashed aggregation only applies if we're grouping. If we have
3396  * grouping sets, some groups might be hashable but others not; in
3397  * this case we set can_hash true as long as there is nothing globally
3398  * preventing us from hashing (and we should therefore consider plans
3399  * with hashes).
3400  *
3401  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3402  * BY aggregates. (Doing so would imply storing *all* the input
3403  * values in the hash table, and/or running many sorts in parallel,
3404  * either of which seems like a certain loser.) We similarly don't
3405  * support ordered-set aggregates in hashed aggregation, but that case
3406  * is also included in the numOrderedAggs count.
3407  *
3408  * Note: grouping_is_hashable() is much more expensive to check than
3409  * the other gating conditions, so we want to do it last.
3410  */
3411  if ((parse->groupClause != NIL &&
3412  root->numOrderedAggs == 0 &&
3413  (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
3414  flags |= GROUPING_CAN_USE_HASH;
3415 
3416  /*
3417  * Determine whether partial aggregation is possible.
3418  */
3419  if (can_partial_agg(root))
3420  flags |= GROUPING_CAN_PARTIAL_AGG;
3421 
3422  extra.flags = flags;
3423  extra.target_parallel_safe = target_parallel_safe;
3424  extra.havingQual = parse->havingQual;
3425  extra.targetList = parse->targetList;
3426  extra.partial_costs_set = false;
3427 
3428  /*
3429  * Determine whether partitionwise aggregation is in theory possible.
3430  * It can be disabled by the user, and for now, we don't try to
3431  * support grouping sets. create_ordinary_grouping_paths() will check
3432  * additional conditions, such as whether input_rel is partitioned.
3433  */
3434  if (enable_partitionwise_aggregate && !parse->groupingSets)
3436  else
3438 
3439  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3440  &agg_costs, gd, &extra,
3441  &partially_grouped_rel);
3442  }
3443 
3444  set_cheapest(grouped_rel);
3445  return grouped_rel;
3446 }
#define MemSet(start, val, len)
Definition: c.h:1008
bool enable_partitionwise_aggregate
Definition: costsize.c:150
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:2590
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:2589
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:2574
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3531
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3510
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3595
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7144
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3457
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:538
PartitionwiseAggregateType patype
Definition: pathnodes.h:2619
int numOrderedAggs
Definition: pathnodes.h:360

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, PlannerInfo::numOrderedAggs, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4183 of file planner.c.

4190 {
4191  PathTarget *window_target;
4192  ListCell *l;
4193  List *topqual = NIL;
4194 
4195  /*
4196  * Since each window clause could require a different sort order, we stack
4197  * up a WindowAgg node for each clause, with sort steps between them as
4198  * needed. (We assume that select_active_windows chose a good order for
4199  * executing the clauses in.)
4200  *
4201  * input_target should contain all Vars and Aggs needed for the result.
4202  * (In some cases we wouldn't need to propagate all of these all the way
4203  * to the top, since they might only be needed as inputs to WindowFuncs.
4204  * It's probably not worth trying to optimize that though.) It must also
4205  * contain all window partitioning and sorting expressions, to ensure
4206  * they're computed only once at the bottom of the stack (that's critical
4207  * for volatile functions). As we climb up the stack, we'll add outputs
4208  * for the WindowFuncs computed at each level.
4209  */
4210  window_target = input_target;
4211 
4212  foreach(l, activeWindows)
4213  {
4215  List *window_pathkeys;
4216  int presorted_keys;
4217  bool is_sorted;
4218  bool topwindow;
4219 
4220  window_pathkeys = make_pathkeys_for_window(root,
4221  wc,
4222  root->processed_tlist);
4223 
4224  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4225  path->pathkeys,
4226  &presorted_keys);
4227 
4228  /* Sort if necessary */
4229  if (!is_sorted)
4230  {
4231  /*
4232  * No presorted keys or incremental sort disabled, just perform a
4233  * complete sort.
4234  */
4235  if (presorted_keys == 0 || !enable_incremental_sort)
4236  path = (Path *) create_sort_path(root, window_rel,
4237  path,
4238  window_pathkeys,
4239  -1.0);
4240  else
4241  {
4242  /*
4243  * Since we have presorted keys and incremental sort is
4244  * enabled, just use incremental sort.
4245  */
4246  path = (Path *) create_incremental_sort_path(root,
4247  window_rel,
4248  path,
4249  window_pathkeys,
4250  presorted_keys,
4251  -1.0);
4252  }
4253  }
4254 
4255  if (lnext(activeWindows, l))
4256  {
4257  /*
4258  * Add the current WindowFuncs to the output target for this
4259  * intermediate WindowAggPath. We must copy window_target to
4260  * avoid changing the previous path's target.
4261  *
4262  * Note: a WindowFunc adds nothing to the target's eval costs; but
4263  * we do need to account for the increase in tlist width.
4264  */
4265  ListCell *lc2;
4266 
4267  window_target = copy_pathtarget(window_target);
4268  foreach(lc2, wflists->windowFuncs[wc->winref])
4269  {
4270  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4271 
4272  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4273  window_target->width += get_typavgwidth(wfunc->wintype, -1);
4274  }
4275  }
4276  else
4277  {
4278  /* Install the goal target in the topmost WindowAgg */
4279  window_target = output_target;
4280  }
4281 
4282  /* mark the final item in the list as the top-level window */
4283  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4284 
4285  /*
4286  * Accumulate all of the runConditions from each intermediate
4287  * WindowClause. The top-level WindowAgg must pass these as a qual so
4288  * that it filters out unwanted tuples correctly.
4289  */
4290  if (!topwindow)
4291  topqual = list_concat(topqual, wc->runCondition);
4292 
4293  path = (Path *)
4294  create_windowagg_path(root, window_rel, path, window_target,
4295  wflists->windowFuncs[wc->winref],
4296  wc, topwindow ? topqual : NIL, topwindow);
4297  }
4298 
4299  add_path(window_rel, path);
4300 }
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2535
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3400
#define foreach_current_index(cell)
Definition: pg_list.h:382
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:5458
List * processed_tlist
Definition: pathnodes.h:322
List * runCondition
Definition: parsenodes.h:1406
List ** windowFuncs
Definition: clauses.h:23
Oid wintype
Definition: primnodes.h:396
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:684

References add_column_to_pathtarget(), add_path(), copy_pathtarget(), create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), lfirst_node, list_concat(), list_length(), lnext(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), PlannerInfo::processed_tlist, WindowClause::runCondition, PathTarget::width, WindowFuncLists::windowFuncs, WindowClause::winref, and WindowFunc::wintype.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 4662 of file planner.c.

4667 {
4668  Path *cheapest_input_path = input_rel->cheapest_total_path;
4669  RelOptInfo *ordered_rel;
4670  ListCell *lc;
4671 
4672  /* For now, do all work in the (ORDERED, NULL) upperrel */
4673  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
4674 
4675  /*
4676  * If the input relation is not parallel-safe, then the ordered relation
4677  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4678  * target list is parallel-safe.
4679  */
4680  if (input_rel->consider_parallel && target_parallel_safe)
4681  ordered_rel->consider_parallel = true;
4682 
4683  /*
4684  * If the input rel belongs to a single FDW, so does the ordered_rel.
4685  */
4686  ordered_rel->serverid = input_rel->serverid;
4687  ordered_rel->userid = input_rel->userid;
4688  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
4689  ordered_rel->fdwroutine = input_rel->fdwroutine;
4690 
4691  foreach(lc, input_rel->pathlist)
4692  {
4693  Path *input_path = (Path *) lfirst(lc);
4694  Path *sorted_path = input_path;
4695  bool is_sorted;
4696  int presorted_keys;
4697 
4698  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
4699  input_path->pathkeys, &presorted_keys);
4700 
4701  if (is_sorted)
4702  {
4703  /* Use the input path as is, but add a projection step if needed */
4704  if (sorted_path->pathtarget != target)
4705  sorted_path = apply_projection_to_path(root, ordered_rel,
4706  sorted_path, target);
4707 
4708  add_path(ordered_rel, sorted_path);
4709  }
4710  else
4711  {
4712  /*
4713  * Try adding an explicit sort, but only to the cheapest total
4714  * path since a full sort should generally add the same cost to
4715  * all paths.
4716  */
4717  if (input_path == cheapest_input_path)
4718  {
4719  /*
4720  * Sort the cheapest input path. An explicit sort here can
4721  * take advantage of LIMIT.
4722  */
4723  sorted_path = (Path *) create_sort_path(root,
4724  ordered_rel,
4725  input_path,
4726  root->sort_pathkeys,
4727  limit_tuples);
4728  /* Add projection step if needed */
4729  if (sorted_path->pathtarget != target)
4730  sorted_path = apply_projection_to_path(root, ordered_rel,
4731  sorted_path, target);
4732 
4733  add_path(ordered_rel, sorted_path);
4734  }
4735 
4736  /*
4737  * If incremental sort is enabled, then try it as well. Unlike
4738  * with regular sorts, we can't just look at the cheapest path,
4739  * because the cost of incremental sort depends on how well
4740  * presorted the path is. Additionally incremental sort may enable
4741  * a cheaper startup path to win out despite higher total cost.
4742  */
4744  continue;
4745 
4746  /* Likewise, if the path can't be used for incremental sort. */
4747  if (!presorted_keys)
4748  continue;
4749 
4750  /* Also consider incremental sort. */
4751  sorted_path = (Path *) create_incremental_sort_path(root,
4752  ordered_rel,
4753  input_path,
4754  root->sort_pathkeys,
4755  presorted_keys,
4756  limit_tuples);
4757 
4758  /* Add projection step if needed */
4759  if (sorted_path->pathtarget != target)
4760  sorted_path = apply_projection_to_path(root, ordered_rel,
4761  sorted_path, target);
4762 
4763  add_path(ordered_rel, sorted_path);
4764  }
4765  }
4766 
4767  /*
4768  * generate_gather_paths() will have already generated a simple Gather
4769  * path for the best parallel path, if any, and the loop above will have
4770  * considered sorting it. Similarly, generate_gather_paths() will also
4771  * have generated order-preserving Gather Merge plans which can be used
4772  * without sorting if they happen to match the sort_pathkeys, and the loop
4773  * above will have handled those as well. However, there's one more
4774  * possibility: it may make sense to sort the cheapest partial path
4775  * according to the required output order and then use Gather Merge.
4776  */
4777  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
4778  input_rel->partial_pathlist != NIL)
4779  {
4780  Path *cheapest_partial_path;
4781 
4782  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4783 
4784  /*
4785  * If cheapest partial path doesn't need a sort, this is redundant
4786  * with what's already been tried.
4787  */
4789  cheapest_partial_path->pathkeys))
4790  {
4791  Path *path;
4792  double total_groups;
4793 
4794  path = (Path *) create_sort_path(root,
4795  ordered_rel,
4796  cheapest_partial_path,
4797  root->sort_pathkeys,
4798  limit_tuples);
4799 
4800  total_groups = cheapest_partial_path->rows *
4801  cheapest_partial_path->parallel_workers;
4802  path = (Path *)
4803  create_gather_merge_path(root, ordered_rel,
4804  path,
4805  path->pathtarget,
4806  root->sort_pathkeys, NULL,
4807  &total_groups);
4808 
4809  /* Add projection step if needed */
4810  if (path->pathtarget != target)
4811  path = apply_projection_to_path(root, ordered_rel,
4812  path, target);
4813 
4814  add_path(ordered_rel, path);
4815  }
4816 
4817  /*
4818  * Consider incremental sort with a gather merge on partial paths.
4819  *
4820  * We can also skip the entire loop when we only have a single-item
4821  * sort_pathkeys because then we can't possibly have a presorted
4822  * prefix of the list without having the list be fully sorted.
4823  */
4825  {
4826  ListCell *lc;
4827 
4828  foreach(lc, input_rel->partial_pathlist)
4829  {
4830  Path *input_path = (Path *) lfirst(lc);
4831  Path *sorted_path;
4832  bool is_sorted;
4833  int presorted_keys;
4834  double total_groups;
4835 
4836  /*
4837  * We don't care if this is the cheapest partial path - we
4838  * can't simply skip it, because it may be partially sorted in
4839  * which case we want to consider adding incremental sort
4840  * (instead of full sort, which is what happens above).
4841  */
4842 
4843  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
4844  input_path->pathkeys,
4845  &presorted_keys);
4846 
4847  /* No point in adding incremental sort on fully sorted paths. */
4848  if (is_sorted)
4849  continue;
4850 
4851  if (presorted_keys == 0)
4852  continue;
4853 
4854  /* Since we have presorted keys, consider incremental sort. */
4855  sorted_path = (Path *) create_incremental_sort_path(root,
4856  ordered_rel,
4857  input_path,
4858  root->sort_pathkeys,
4859  presorted_keys,
4860  limit_tuples);
4861  total_groups = input_path->rows *
4862  input_path->parallel_workers;
4863  sorted_path = (Path *)
4864  create_gather_merge_path(root, ordered_rel,
4865  sorted_path,
4866  sorted_path->pathtarget,
4867  root->sort_pathkeys, NULL,
4868  &total_groups);
4869 
4870  /* Add projection step if needed */
4871  if (sorted_path->pathtarget != target)
4872  sorted_path = apply_projection_to_path(root, ordered_rel,
4873  sorted_path, target);
4874 
4875  add_path(ordered_rel, sorted_path);
4876  }
4877  }
4878  }
4879 
4880  /*
4881  * If there is an FDW that's responsible for all baserels of the query,
4882  * let it consider adding ForeignPaths.
4883  */
4884  if (ordered_rel->fdwroutine &&
4885  ordered_rel->fdwroutine->GetForeignUpperPaths)
4886  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
4887  input_rel, ordered_rel,
4888  NULL);
4889 
4890  /* Let extensions possibly add some more paths */
4892  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
4893  input_rel, ordered_rel, NULL);
4894 
4895  /*
4896  * No need to bother with set_cheapest here; grouping_planner does not
4897  * need us to do it.
4898  */
4899  Assert(ordered_rel->pathlist != NIL);
4900 
4901  return ordered_rel;
4902 }
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1861
@ UPPERREL_ORDERED
Definition: pathnodes.h:76
int parallel_workers
Definition: pathnodes.h:1201

References add_path(), apply_projection_to_path(), Assert(), RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, RelOptInfo::fdwroutine, fetch_upper_rel(), FdwRoutine::GetForeignUpperPaths, lfirst, linitial, list_length(), NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::pathlist, Path::pathtarget, Path::rows, RelOptInfo::serverid, PlannerInfo::sort_pathkeys, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3595 of file planner.c.

3601 {
3602  Path *cheapest_path = input_rel->cheapest_total_path;
3603  RelOptInfo *partially_grouped_rel = NULL;
3604  double dNumGroups;
3606 
3607  /*
3608  * If this is the topmost grouping relation or if the parent relation is
3609  * doing some form of partitionwise aggregation, then we may be able to do
3610  * it at this level also. However, if the input relation is not
3611  * partitioned, partitionwise aggregate is impossible.
3612  */
3613  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3614  IS_PARTITIONED_REL(input_rel))
3615  {
3616  /*
3617  * If this is the topmost relation or if the parent relation is doing
3618  * full partitionwise aggregation, then we can do full partitionwise
3619  * aggregation provided that the GROUP BY clause contains all of the
3620  * partitioning columns at this level. Otherwise, we can do at most
3621  * partial partitionwise aggregation. But if partial aggregation is
3622  * not supported in general then we can't use it for partitionwise
3623  * aggregation either.
3624  */
3625  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3626  group_by_has_partkey(input_rel, extra->targetList,
3627  root->parse->groupClause))
3629  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3631  else
3633  }
3634 
3635  /*
3636  * Before generating paths for grouped_rel, we first generate any possible
3637  * partially grouped paths; that way, later code can easily consider both
3638  * parallel and non-parallel approaches to grouping.
3639  */
3640  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3641  {
3642  bool force_rel_creation;
3643 
3644  /*
3645  * If we're doing partitionwise aggregation at this level, force
3646  * creation of a partially_grouped_rel so we can add partitionwise
3647  * paths to it.
3648  */
3649  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3650 
3651  partially_grouped_rel =
3653  grouped_rel,
3654  input_rel,
3655  gd,
3656  extra,
3657  force_rel_creation);
3658  }
3659 
3660  /* Set out parameter. */
3661  *partially_grouped_rel_p = partially_grouped_rel;
3662 
3663  /* Apply partitionwise aggregation technique, if possible. */
3664  if (patype != PARTITIONWISE_AGGREGATE_NONE)
3665  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
3666  partially_grouped_rel, agg_costs,
3667  gd, patype, extra);
3668 
3669  /* If we are doing partial aggregation only, return. */
3671  {
3672  Assert(partially_grouped_rel);
3673 
3674  if (partially_grouped_rel->pathlist)
3675  set_cheapest(partially_grouped_rel);
3676 
3677  return;
3678  }
3679 
3680  /* Gather any partially grouped partial paths. */
3681  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
3682  {
3683  gather_grouping_paths(root, partially_grouped_rel);
3684  set_cheapest(partially_grouped_rel);
3685  }
3686 
3687  /*
3688  * Estimate number of groups.
3689  */
3690  dNumGroups = get_number_of_groups(root,
3691  cheapest_path->rows,
3692  gd,
3693  extra->targetList);
3694 
3695  /* Build final grouping paths */
3696  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
3697  partially_grouped_rel, agg_costs, gd,
3698  dNumGroups, extra);
3699 
3700  /* Give a helpful error if we failed to find any implementation */
3701  if (grouped_rel->pathlist == NIL)
3702  ereport(ERROR,
3703  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3704  errmsg("could not implement GROUP BY"),
3705  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
3706 
3707  /*
3708  * If there is an FDW that's responsible for all baserels of the query,
3709  * let it consider adding ForeignPaths.
3710  */
3711  if (grouped_rel->fdwroutine &&
3712  grouped_rel->fdwroutine->GetForeignUpperPaths)
3714  input_rel, grouped_rel,
3715  extra);
3716 
3717  /* Let extensions possibly add some more paths */
3719  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
3720  input_rel, grouped_rel,
3721  extra);
3722 }
PartitionwiseAggregateType
Definition: pathnodes.h:2588
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:2591
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:72
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:6612
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6211
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3222
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7422
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:7566
List * groupClause
Definition: parsenodes.h:163

References add_paths_to_grouping_rel(), Assert(), RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, RelOptInfo::fdwroutine, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), FdwRoutine::GetForeignUpperPaths, group_by_has_partkey(), Query::groupClause, GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, PlannerInfo::parse, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel 
)
static

Definition at line 4382 of file planner.c.

4384 {
4385  RelOptInfo *partial_distinct_rel;
4386  Query *parse;
4387  List *distinctExprs;
4388  double numDistinctRows;
4389  Path *cheapest_partial_path;
4390  ListCell *lc;
4391 
4392  /* nothing to do when there are no partial paths in the input rel */
4393  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4394  return;
4395 
4396  parse = root->parse;
4397 
4398  /* can't do parallel DISTINCT ON */
4399  if (parse->hasDistinctOn)
4400  return;
4401 
4402  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4403  NULL);
4404  partial_distinct_rel->reltarget = root->upper_targets[UPPERREL_PARTIAL_DISTINCT];
4405  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4406 
4407  /*
4408  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4409  */
4410  partial_distinct_rel->serverid = input_rel->serverid;
4411  partial_distinct_rel->userid = input_rel->userid;
4412  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4413  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4414 
4415  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4416 
4417  distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4418  parse->targetList);
4419 
4420  /* estimate how many distinct rows we'll get from each worker */
4421  numDistinctRows = estimate_num_groups(root, distinctExprs,
4422  cheapest_partial_path->rows,
4423  NULL, NULL);
4424 
4425  /* first try adding unique paths atop of sorted paths */
4426  if (grouping_is_sortable(parse->distinctClause))
4427  {
4428  foreach(lc, input_rel->partial_pathlist)
4429  {
4430  Path *path = (Path *) lfirst(lc);
4431 
4433  {
4434  add_partial_path(partial_distinct_rel, (Path *)
4436  partial_distinct_rel,
4437  path,
4439  numDistinctRows));
4440  }
4441  }
4442  }
4443 
4444  /*
4445  * Now try hash aggregate paths, if enabled and hashing is possible. Since
4446  * we're not on the hook to ensure we do our best to create at least one
4447  * path here, we treat enable_hashagg as a hard off-switch rather than the
4448  * slightly softer variant in create_final_distinct_paths.
4449  */
4450  if (enable_hashagg && grouping_is_hashable(parse->distinctClause))
4451  {
4452  add_partial_path(partial_distinct_rel, (Path *)
4453  create_agg_path(root,
4454  partial_distinct_rel,
4455  cheapest_partial_path,
4456  cheapest_partial_path->pathtarget,
4457  AGG_HASHED,
4459  parse->distinctClause,
4460  NIL,
4461  NULL,
4462  numDistinctRows));
4463  }
4464 
4465  /*
4466  * If there is an FDW that's responsible for all baserels of the query,
4467  * let it consider adding ForeignPaths.
4468  */
4469  if (partial_distinct_rel->fdwroutine &&
4470  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4471  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4473  input_rel,
4474  partial_distinct_rel,
4475  NULL);
4476 
4477  /* Let extensions possibly add some more partial paths */
4479  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4480  input_rel, partial_distinct_rel, NULL);
4481 
4482  if (partial_distinct_rel->partial_pathlist != NIL)
4483  {
4484  generate_gather_paths(root, partial_distinct_rel, true);
4485  set_cheapest(partial_distinct_rel);
4486 
4487  /*
4488  * Finally, create paths to distinctify the final result. This step
4489  * is needed to remove any duplicates due to combining rows from
4490  * parallel workers.
4491  */
4492  create_final_distinct_paths(root, partial_distinct_rel,
4493  final_distinct_rel);
4494  }
4495 }
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:2969
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:74
struct PathTarget * upper_targets[UPPERREL_FINAL+1]
Definition: pathnodes.h:311

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_upper_paths_hook, create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, estimate_num_groups(), RelOptInfo::fdwroutine, fetch_upper_rel(), generate_gather_paths(), get_sortgrouplist_exprs(), FdwRoutine::GetForeignUpperPaths, grouping_is_hashable(), grouping_is_sortable(), lfirst, linitial, list_length(), NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), Path::pathtarget, RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, set_cheapest(), PlannerInfo::upper_targets, UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 6612 of file planner.c.

6618 {
6619  Query *parse = root->parse;
6620  RelOptInfo *partially_grouped_rel;
6621  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
6622  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6623  Path *cheapest_partial_path = NULL;
6624  Path *cheapest_total_path = NULL;
6625  double dNumPartialGroups = 0;
6626  double dNumPartialPartialGroups = 0;
6627  ListCell *lc;
6628  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6629  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6630 
6631  /*
6632  * Consider whether we should generate partially aggregated non-partial
6633  * paths. We can only do this if we have a non-partial path, and only if
6634  * the parent of the input rel is performing partial partitionwise
6635  * aggregation. (Note that extra->patype is the type of partitionwise
6636  * aggregation being used at the parent level, not this level.)
6637  */
6638  if (input_rel->pathlist != NIL &&
6640  cheapest_total_path = input_rel->cheapest_total_path;
6641 
6642  /*
6643  * If parallelism is possible for grouped_rel, then we should consider
6644  * generating partially-grouped partial paths. However, if the input rel
6645  * has no partial paths, then we can't.
6646  */
6647  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
6648  cheapest_partial_path = linitial(input_rel->partial_pathlist);
6649 
6650  /*
6651  * If we can't partially aggregate partial paths, and we can't partially
6652  * aggregate non-partial paths, then don't bother creating the new
6653  * RelOptInfo at all, unless the caller specified force_rel_creation.
6654  */
6655  if (cheapest_total_path == NULL &&
6656  cheapest_partial_path == NULL &&
6657  !force_rel_creation)
6658  return NULL;
6659 
6660  /*
6661  * Build a new upper relation to represent the result of partially
6662  * aggregating the rows from the input relation.
6663  */
6664  partially_grouped_rel = fetch_upper_rel(root,
6666  grouped_rel->relids);
6667  partially_grouped_rel->consider_parallel =
6668  grouped_rel->consider_parallel;
6669  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
6670  partially_grouped_rel->serverid = grouped_rel->serverid;
6671  partially_grouped_rel->userid = grouped_rel->userid;
6672  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
6673  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
6674 
6675  /*
6676  * Build target list for partial aggregate paths. These paths cannot just
6677  * emit the same tlist as regular aggregate paths, because (1) we must
6678  * include Vars and Aggrefs needed in HAVING, which might not appear in
6679  * the result tlist, and (2) the Aggrefs must be set in partial mode.
6680  */
6681  partially_grouped_rel->reltarget =
6682  make_partial_grouping_target(root, grouped_rel->reltarget,
6683  extra->havingQual);
6684 
6685  if (!extra->partial_costs_set)
6686  {
6687  /*
6688  * Collect statistics about aggregates for estimating costs of
6689  * performing aggregation in parallel.
6690  */
6691  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
6692  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
6693  if (parse->hasAggs)
6694  {
6695  /* partial phase */
6697  agg_partial_costs);
6698 
6699  /* final phase */
6701  agg_final_costs);
6702  }
6703 
6704  extra->partial_costs_set = true;
6705  }
6706 
6707  /* Estimate number of partial groups. */
6708  if (cheapest_total_path != NULL)
6709  dNumPartialGroups =
6710  get_number_of_groups(root,
6711  cheapest_total_path->rows,
6712  gd,
6713  extra->targetList);
6714  if (cheapest_partial_path != NULL)
6715  dNumPartialPartialGroups =
6716  get_number_of_groups(root,
6717  cheapest_partial_path->rows,
6718  gd,
6719  extra->targetList);
6720 
6721  if (can_sort && cheapest_total_path != NULL)
6722  {
6723  /* This should have been checked previously */
6724  Assert(parse->hasAggs || parse->groupClause);
6725 
6726  /*
6727  * Use any available suitably-sorted path as input, and also consider
6728  * sorting the cheapest partial path.
6729  */
6730  foreach(lc, input_rel->pathlist)
6731  {
6732  ListCell *lc2;
6733  Path *path = (Path *) lfirst(lc);
6734  Path *path_save = path;
6735 
6736  List *pathkey_orderings = NIL;
6737 
6738  List *group_pathkeys = root->group_pathkeys;
6739  List *group_clauses = parse->groupClause;
6740 
6741  /* generate alternative group orderings that might be useful */
6742  pathkey_orderings = get_useful_group_keys_orderings(root,
6743  path->rows,
6744  path->pathkeys,
6745  group_pathkeys,
6746  group_clauses);
6747 
6748  Assert(list_length(pathkey_orderings) > 0);
6749 
6750  /* process all potentially interesting grouping reorderings */
6751  foreach(lc2, pathkey_orderings)
6752  {
6753  bool is_sorted;
6754  int presorted_keys = 0;
6755  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6756 
6757  /* restore the path (we replace it in the loop) */
6758  path = path_save;
6759 
6760  is_sorted = pathkeys_count_contained_in(info->pathkeys,
6761  path->pathkeys,
6762  &presorted_keys);
6763 
6764  if (path == cheapest_total_path || is_sorted)
6765  {
6766  /* Sort the cheapest partial path, if it isn't already */
6767  if (!is_sorted)
6768  {
6769  path = (Path *) create_sort_path(root,
6770  partially_grouped_rel,
6771  path,
6772  info->pathkeys,
6773  -1.0);
6774  }
6775 
6776  if (parse->hasAggs)
6777  add_path(partially_grouped_rel, (Path *)
6778  create_agg_path(root,
6779  partially_grouped_rel,
6780  path,
6781  partially_grouped_rel->reltarget,
6782  info->clauses ? AGG_SORTED : AGG_PLAIN,
6784  info->clauses,
6785  NIL,
6786  agg_partial_costs,
6787  dNumPartialGroups));
6788  else
6789  add_path(partially_grouped_rel, (Path *)
6790  create_group_path(root,
6791  partially_grouped_rel,
6792  path,
6793  info->clauses,
6794  NIL,
6795  dNumPartialGroups));
6796  }
6797  }
6798  }
6799 
6800  /*
6801  * Consider incremental sort on all partial paths, if enabled.
6802  *
6803  * We can also skip the entire loop when we only have a single-item
6804  * group_pathkeys because then we can't possibly have a presorted
6805  * prefix of the list without having the list be fully sorted.
6806  *
6807  * XXX Shouldn't this also consider the group-key-reordering?
6808  */
6810  {
6811  foreach(lc, input_rel->pathlist)
6812  {
6813  Path *path = (Path *) lfirst(lc);
6814  bool is_sorted;
6815  int presorted_keys;
6816 
6817  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6818  path->pathkeys,
6819  &presorted_keys);
6820 
6821  /* Ignore already sorted paths */
6822  if (is_sorted)
6823  continue;
6824 
6825  if (presorted_keys == 0)
6826  continue;
6827 
6828  /* Since we have presorted keys, consider incremental sort. */
6829  path = (Path *) create_incremental_sort_path(root,
6830  partially_grouped_rel,
6831  path,
6832  root->group_pathkeys,
6833  presorted_keys,
6834  -1.0);
6835 
6836  if (parse->hasAggs)
6837  add_path(partially_grouped_rel, (Path *)
6838  create_agg_path(root,
6839  partially_grouped_rel,
6840  path,
6841  partially_grouped_rel->reltarget,
6842  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6844  parse->groupClause,
6845  NIL,
6846  agg_partial_costs,
6847  dNumPartialGroups));
6848  else
6849  add_path(partially_grouped_rel, (Path *)
6850  create_group_path(root,
6851  partially_grouped_rel,
6852  path,
6853  parse->groupClause,
6854  NIL,
6855  dNumPartialGroups));
6856  }
6857  }
6858  }
6859 
6860  if (can_sort && cheapest_partial_path != NULL)
6861  {
6862  /* Similar to above logic, but for partial paths. */
6863  foreach(lc, input_rel->partial_pathlist)
6864  {
6865  ListCell *lc2;
6866  Path *path = (Path *) lfirst(lc);
6867  Path *path_original = path;
6868 
6869  List *pathkey_orderings = NIL;
6870 
6871  List *group_pathkeys = root->group_pathkeys;
6872  List *group_clauses = parse->groupClause;
6873 
6874  /* generate alternative group orderings that might be useful */
6875  pathkey_orderings = get_useful_group_keys_orderings(root,
6876  path->rows,
6877  path->pathkeys,
6878  group_pathkeys,
6879  group_clauses);
6880 
6881  Assert(list_length(pathkey_orderings) > 0);
6882 
6883  /* process all potentially interesting grouping reorderings */
6884  foreach(lc2, pathkey_orderings)
6885  {
6886  bool is_sorted;
6887  int presorted_keys = 0;
6888  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6889 
6890  /* restore the path (we replace it in the loop) */
6891  path = path_original;
6892 
6893  is_sorted = pathkeys_count_contained_in(info->pathkeys,
6894  path->pathkeys,
6895  &presorted_keys);
6896 
6897  if (path == cheapest_partial_path || is_sorted)
6898  {
6899 
6900  /* Sort the cheapest partial path, if it isn't already */
6901  if (!is_sorted)
6902  {
6903  path = (Path *) create_sort_path(root,
6904  partially_grouped_rel,
6905  path,
6906  info->pathkeys,
6907  -1.0);
6908  }
6909 
6910  if (parse->hasAggs)
6911  add_partial_path(partially_grouped_rel, (Path *)
6912  create_agg_path(root,
6913  partially_grouped_rel,
6914  path,
6915  partially_grouped_rel->reltarget,
6916  info->clauses ? AGG_SORTED : AGG_PLAIN,
6918  info->clauses,
6919  NIL,
6920  agg_partial_costs,
6921  dNumPartialPartialGroups));
6922  else
6923  add_partial_path(partially_grouped_rel, (Path *)
6924  create_group_path(root,
6925  partially_grouped_rel,
6926  path,
6927  info->clauses,
6928  NIL,
6929  dNumPartialPartialGroups));
6930  }
6931 
6932  /*
6933  * Now we may consider incremental sort on this path, but only
6934  * when the path is not already sorted and when incremental
6935  * sort is enabled.
6936  */
6937  if (is_sorted || !enable_incremental_sort)
6938  continue;
6939 
6940  /* Restore the input path (we might have added Sort on top). */
6941  path = path_original;
6942 
6943  /* no shared prefix, not point in building incremental sort */
6944  if (presorted_keys == 0)
6945  continue;
6946 
6947  /*
6948  * We should have already excluded pathkeys of length 1
6949  * because then presorted_keys > 0 would imply is_sorted was
6950  * true.
6951  */
6952  Assert(list_length(root->group_pathkeys) != 1);
6953 
6954  path = (Path *) create_incremental_sort_path(root,
6955  partially_grouped_rel,
6956  path,
6957  info->pathkeys,
6958  presorted_keys,
6959  -1.0);
6960 
6961  if (parse->hasAggs)
6962  add_partial_path(partially_grouped_rel, (Path *)
6963  create_agg_path(root,
6964  partially_grouped_rel,
6965  path,
6966  partially_grouped_rel->reltarget,
6967  info->clauses ? AGG_SORTED : AGG_PLAIN,
6969  info->clauses,
6970  NIL,
6971  agg_partial_costs,
6972  dNumPartialPartialGroups));
6973  else
6974  add_partial_path(partially_grouped_rel, (Path *)
6975  create_group_path(root,
6976  partially_grouped_rel,
6977  path,
6978  info->clauses,
6979  NIL,
6980  dNumPartialPartialGroups));
6981  }
6982  }
6983  }
6984 
6985  /*
6986  * Add a partially-grouped HashAgg Path where possible
6987  */
6988  if (can_hash && cheapest_total_path != NULL)
6989  {
6990  /* Checked above */
6991  Assert(parse->hasAggs || parse->groupClause);
6992 
6993  add_path(partially_grouped_rel, (Path *)
6994  create_agg_path(root,
6995  partially_grouped_rel,
6996  cheapest_total_path,
6997  partially_grouped_rel->reltarget,
6998  AGG_HASHED,
7000  parse->groupClause,
7001  NIL,
7002  agg_partial_costs,
7003  dNumPartialGroups));
7004  }
7005 
7006  /*
7007  * Now add a partially-grouped HashAgg partial Path where possible
7008  */
7009  if (can_hash && cheapest_partial_path != NULL)
7010  {
7011  add_partial_path(partially_grouped_rel, (Path *)
7012  create_agg_path(root,
7013  partially_grouped_rel,
7014  cheapest_partial_path,
7015  partially_grouped_rel->reltarget,
7016  AGG_HASHED,
7018  parse->groupClause,
7019  NIL,
7020  agg_partial_costs,
7021  dNumPartialPartialGroups));
7022  }
7023 
7024  /*
7025  * If there is an FDW that's responsible for all baserels of the query,
7026  * let it consider adding partially grouped ForeignPaths.
7027  */
7028  if (partially_grouped_rel->fdwroutine &&
7029  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7030  {
7031  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7032 
7033  fdwroutine->GetForeignUpperPaths(root,
7035  input_rel, partially_grouped_rel,
7036  extra);
7037  }
7038 
7039  return partially_grouped_rel;
7040 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:832
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:70
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5020
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:2612
RelOptKind reloptkind
Definition: pathnodes.h:679

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert(), RelOptInfo::cheapest_total_path, PathKeyInfo::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, RelOptInfo::fdwroutine, fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_partial_grouping_target(), MemSet, NIL, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, PathKeyInfo::pathkeys, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7422 of file planner.c.

7430 {
7431  List *grouped_live_children = NIL;
7432  List *partially_grouped_live_children = NIL;
7433  PathTarget *target = grouped_rel->reltarget;
7434  bool partial_grouping_valid = true;
7435  int i;
7436 
7439  partially_grouped_rel != NULL);
7440 
7441  /* Add paths for partitionwise aggregation/grouping. */
7442  i = -1;
7443  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7444  {
7445  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7446  PathTarget *child_target;
7447  AppendRelInfo **appinfos;
7448  int nappinfos;
7449  GroupPathExtraData child_extra;
7450  RelOptInfo *child_grouped_rel;
7451  RelOptInfo *child_partially_grouped_rel;
7452 
7453  Assert(child_input_rel != NULL);
7454 
7455  /* Dummy children can be ignored. */
7456  if (IS_DUMMY_REL(child_input_rel))
7457  continue;
7458 
7459  child_target = copy_pathtarget(target);
7460 
7461  /*
7462  * Copy the given "extra" structure as is and then override the
7463  * members specific to this child.
7464  */
7465  memcpy(&child_extra, extra, sizeof(child_extra));
7466 
7467  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7468  &nappinfos);
7469 
7470  child_target->exprs = (List *)
7472  (Node *) target->exprs,
7473  nappinfos, appinfos);
7474 
7475  /* Translate havingQual and targetList. */
7476  child_extra.havingQual = (Node *)
7478  extra->havingQual,
7479  nappinfos, appinfos);
7480  child_extra.targetList = (List *)
7482  (Node *) extra->targetList,
7483  nappinfos, appinfos);
7484 
7485  /*
7486  * extra->patype was the value computed for our parent rel; patype is
7487  * the value for this relation. For the child, our value is its
7488  * parent rel's value.
7489  */
7490  child_extra.patype = patype;
7491 
7492  /*
7493  * Create grouping relation to hold fully aggregated grouping and/or
7494  * aggregation paths for the child.
7495  */
7496  child_grouped_rel = make_grouping_rel(root, child_input_rel,
7497  child_target,
7498  extra->target_parallel_safe,
7499  child_extra.havingQual);
7500 
7501  /* Create grouping paths for this child relation. */
7502  create_ordinary_grouping_paths(root, child_input_rel,
7503  child_grouped_rel,
7504  agg_costs, gd, &child_extra,
7505  &child_partially_grouped_rel);
7506 
7507  if (child_partially_grouped_rel)
7508  {
7509  partially_grouped_live_children =
7510  lappend(partially_grouped_live_children,
7511  child_partially_grouped_rel);
7512  }
7513  else
7514  partial_grouping_valid = false;
7515 
7516  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7517  {
7518  set_cheapest(child_grouped_rel);
7519  grouped_live_children = lappend(grouped_live_children,
7520  child_grouped_rel);
7521  }
7522 
7523  pfree(appinfos);
7524  }
7525 
7526  /*
7527  * Try to create append paths for partially grouped children. For full
7528  * partitionwise aggregation, we might have paths in the partial_pathlist
7529  * if parallel aggregation is possible. For partial partitionwise
7530  * aggregation, we may have paths in both pathlist and partial_pathlist.
7531  *
7532  * NB: We must have a partially grouped path for every child in order to
7533  * generate a partially grouped path for this relation.
7534  */
7535  if (partially_grouped_rel && partial_grouping_valid)
7536  {
7537  Assert(partially_grouped_live_children != NIL);
7538 
7539  add_paths_to_append_rel(root, partially_grouped_rel,
7540  partially_grouped_live_children);
7541 
7542  /*
7543  * We need call set_cheapest, since the finalization step will use the
7544  * cheapest path from the rel.
7545  */
7546  if (partially_grouped_rel->pathlist)
7547  set_cheapest(partially_grouped_rel);
7548  }
7549 
7550  /* If possible, create append paths for fully grouped children. */
7551  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7552  {
7553  Assert(grouped_live_children != NIL);
7554 
7555  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7556  }
7557 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert(), bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, RelOptInfo::part_rels, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4096 of file planner.c.

4103 {
4104  RelOptInfo *window_rel;
4105  ListCell *lc;
4106 
4107  /* For now, do all work in the (WINDOW, NULL) upperrel */
4108  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4109 
4110  /*
4111  * If the input relation is not parallel-safe, then the window relation
4112  * can't be parallel-safe, either. Otherwise, we need to examine the
4113  * target list and active windows for non-parallel-safe constructs.
4114  */
4115  if (input_rel->consider_parallel && output_target_parallel_safe &&
4116  is_parallel_safe(root, (Node *) activeWindows))
4117  window_rel->consider_parallel = true;
4118 
4119  /*
4120  * If the input rel belongs to a single FDW, so does the window rel.
4121  */
4122  window_rel->serverid = input_rel->serverid;
4123  window_rel->userid = input_rel->userid;
4124  window_rel->useridiscurrent = input_rel->useridiscurrent;
4125  window_rel->fdwroutine = input_rel->fdwroutine;
4126 
4127  /*
4128  * Consider computing window functions starting from the existing
4129  * cheapest-total path (which will likely require a sort) as well as any
4130  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4131  */
4132  foreach(lc, input_rel->pathlist)
4133  {
4134  Path *path = (Path *) lfirst(lc);
4135  int presorted_keys;
4136 
4137  if (path == input_rel->cheapest_total_path ||
4139  &presorted_keys) ||
4140  presorted_keys > 0)
4142  window_rel,
4143  path,
4144  input_target,
4145  output_target,
4146  wflists,
4147  activeWindows);
4148  }
4149 
4150  /*
4151  * If there is an FDW that's responsible for all baserels of the query,
4152  * let it consider adding ForeignPaths.
4153  */
4154  if (window_rel->fdwroutine &&
4155  window_rel->fdwroutine->GetForeignUpperPaths)
4156  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4157  input_rel, window_rel,
4158  NULL);
4159 
4160  /* Let extensions possibly add some more paths */
4162  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4163  input_rel, window_rel, NULL);
4164 
4165  /* Now choose the best path(s) */
4166  set_cheapest(window_rel);
4167 
4168  return window_rel;
4169 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:683
@ UPPERREL_WINDOW
Definition: pathnodes.h:73
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4183
List * window_pathkeys
Definition: pathnodes.h:298

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, RelOptInfo::fdwroutine, fetch_upper_rel(), FdwRoutine::GetForeignUpperPaths, is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, RelOptInfo::useridiscurrent, and PlannerInfo::window_pathkeys.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 5882 of file planner.c.

5883 {
5884  Node *result;
5885 
5886  /*
5887  * Convert named-argument function calls, insert default arguments and
5888  * simplify constant subexprs
5889  */
5890  result = eval_const_expressions(NULL, (Node *) expr);
5891 
5892  /* Fill in opfuncid values if missing */
5893  fix_opfuncids(result);
5894 
5895  return (Expr *) result;
5896 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2150
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1763

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATPrepAlterColumnType(), BeginCopyFrom(), CheckMutability(), ComputePartitionAttrs(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 5909 of file planner.c.

5912 {
5913  Node *result;
5914  PlannerGlobal glob;
5915  PlannerInfo root;
5916 
5917  /* Make up dummy planner state so we can use setrefs machinery */
5918  MemSet(&glob, 0, sizeof(glob));
5919  glob.type = T_PlannerGlobal;
5920  glob.relationOids = NIL;
5921  glob.invalItems = NIL;
5922 
5923  MemSet(&root, 0, sizeof(root));
5924  root.type = T_PlannerInfo;
5925  root.glob = &glob;
5926 
5927  /*
5928  * Convert named-argument function calls, insert default arguments and
5929  * simplify constant subexprs. Collect identities of inlined functions
5930  * and elided domains, too.
5931  */
5932  result = eval_const_expressions(&root, (Node *) expr);
5933 
5934  /* Fill in opfuncid values if missing */
5935  fix_opfuncids(result);
5936 
5937  /*
5938  * Now walk the finished expression to find anything else we ought to
5939  * record as an expression dependency.
5940  */
5941  (void) extract_query_dependencies_walker(result, &root);
5942 
5943  *relationOids = glob.relationOids;
5944  *invalItems = glob.invalItems;
5945 
5946  return (Expr *) result;
5947 }
@ T_PlannerInfo
Definition: nodes.h:236
@ T_PlannerGlobal
Definition: nodes.h:237
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3319
NodeTag type
Definition: pathnodes.h:92
List * invalItems
Definition: pathnodes.h:112
List * relationOids
Definition: pathnodes.h:110
NodeTag type
Definition: pathnodes.h:160
PlannerGlobal * glob
Definition: pathnodes.h:164

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerInfo::glob, PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, T_PlannerGlobal, T_PlannerInfo, PlannerGlobal::type, and PlannerInfo::type.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2874 of file planner.c.

2875 {
2876  int num_sets_raw = list_length(groupingSets);
2877  int num_empty = 0;
2878  int num_sets = 0; /* distinct sets */
2879  int num_chains = 0;
2880  List *result = NIL;
2881  List **results;
2882  List **orig_sets;
2883  Bitmapset **set_masks;
2884  int *chains;
2885  short **adjacency;
2886  short *adjacency_buf;
2888  int i;
2889  int j;
2890  int j_size;
2891  ListCell *lc1 = list_head(groupingSets);
2892  ListCell *lc;
2893 
2894  /*
2895  * Start by stripping out empty sets. The algorithm doesn't require this,
2896  * but the planner currently needs all empty sets to be returned in the
2897  * first list, so we strip them here and add them back after.
2898  */
2899  while (lc1 && lfirst(lc1) == NIL)
2900  {
2901  ++num_empty;
2902  lc1 = lnext(groupingSets, lc1);
2903  }
2904 
2905  /* bail out now if it turns out that all we had were empty sets. */
2906  if (!lc1)
2907  return list_make1(groupingSets);
2908 
2909  /*----------
2910  * We don't strictly need to remove duplicate sets here, but if we don't,
2911  * they tend to become scattered through the result, which is a bit
2912  * confusing (and irritating if we ever decide to optimize them out).
2913  * So we remove them here and add them back after.
2914  *
2915  * For each non-duplicate set, we fill in the following:
2916  *
2917  * orig_sets[i] = list of the original set lists
2918  * set_masks[i] = bitmapset for testing inclusion
2919  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2920  *
2921  * chains[i] will be the result group this set is assigned to.
2922  *
2923  * We index all of these from 1 rather than 0 because it is convenient
2924  * to leave 0 free for the NIL node in the graph algorithm.
2925  *----------
2926  */
2927  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2928  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2929  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2930  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2931 
2932  j_size = 0;
2933  j = 0;
2934  i = 1;
2935 
2936  for_each_cell(lc, groupingSets, lc1)
2937  {
2938  List *candidate = (List *) lfirst(lc);
2939  Bitmapset *candidate_set = NULL;
2940  ListCell *lc2;
2941  int dup_of = 0;
2942 
2943  foreach(lc2, candidate)
2944  {
2945  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2946  }
2947 
2948  /* we can only be a dup if we're the same length as a previous set */
2949  if (j_size == list_length(candidate))
2950  {
2951  int k;
2952 
2953  for (k = j; k < i; ++k)
2954  {
2955  if (bms_equal(set_masks[k], candidate_set))
2956  {
2957  dup_of = k;
2958  break;
2959  }
2960  }
2961  }
2962  else if (j_size < list_length(candidate))
2963  {
2964  j_size = list_length(candidate);
2965  j = i;
2966  }
2967 
2968  if (dup_of > 0)
2969  {
2970  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2971  bms_free(candidate_set);
2972  }
2973  else
2974  {
2975  int k;
2976  int n_adj = 0;
2977 
2978  orig_sets[i] = list_make1(candidate);
2979  set_masks[i] = candidate_set;
2980 
2981  /* fill in adjacency list; no need to compare equal-size sets */
2982 
2983  for (k = j - 1; k > 0; --k)
2984  {
2985  if (bms_is_subset(set_masks[k], candidate_set))
2986  adjacency_buf[++n_adj] = k;
2987  }
2988 
2989  if (n_adj > 0)
2990  {
2991  adjacency_buf[0] = n_adj;
2992  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2993  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2994  }
2995  else
2996  adjacency[i] = NULL;
2997 
2998  ++i;
2999  }
3000  }
3001 
3002  num_sets = i - 1;
3003 
3004  /*
3005  * Apply the graph matching algorithm to do the work.
3006  */
3007  state = BipartiteMatch(num_sets, num_sets, adjacency);
3008 
3009  /*
3010  * Now, the state->pair* fields have the info we need to assign sets to
3011  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3012  * pair_vu[v] = u (both will be true, but we check both so that we can do
3013  * it in one pass)
3014  */
3015  chains = palloc0((num_sets + 1) * sizeof(int));
3016 
3017  for (i = 1; i <= num_sets; ++i)
3018  {
3019  int u = state->pair_vu[i];
3020  int v = state->pair_uv[i];
3021 
3022  if (u > 0 && u < i)
3023  chains[i] = chains[u];
3024  else if (v > 0 && v < i)
3025  chains[i] = chains[v];
3026  else
3027  chains[i] = ++num_chains;
3028  }
3029 
3030  /* build result lists. */
3031  results = palloc0((num_chains + 1) * sizeof(List *));
3032 
3033  for (i = 1; i <= num_sets; ++i)
3034  {
3035  int c = chains[i];
3036 
3037  Assert(c > 0);
3038 
3039  results[c] = list_concat(results[c], orig_sets[i]);
3040  }
3041 
3042  /* push any empty sets back on the first list. */
3043  while (num_empty-- > 0)
3044  results[1] = lcons(NIL, results[1]);
3045 
3046  /* make result list */
3047  for (i = 1; i <= num_chains; ++i)
3048  result = lappend(result, results[i]);
3049 
3050  /*
3051  * Free all the things.
3052  *
3053  * (This is over-fussy for small sets but for large sets we could have
3054  * tied up a nontrivial amount of memory.)
3055  */
3057  pfree(results);
3058  pfree(chains);
3059  for (i = 1; i <= num_sets; ++i)
3060  if (adjacency[i])
3061  pfree(adjacency[i]);
3062  pfree(adjacency);
3063  pfree(adjacency_buf);
3064  pfree(orig_sets);
3065  for (i = 1; i <= num_sets; ++i)
3066  bms_free(set_masks[i]);
3067  pfree(set_masks);
3068 
3069  return result;
3070 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:315
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:738
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1099
char * c
Definition: regguts.h:318

References Assert(), BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7056 of file planner.c.

7057 {
7058  ListCell *lc;
7059  Path *cheapest_partial_path;
7060 
7061  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7062  generate_useful_gather_paths(root, rel, true);
7063 
7064  /* Try cheapest partial path + explicit Sort + Gather Merge. */
7065  cheapest_partial_path = linitial(rel->partial_pathlist);
7067  cheapest_partial_path->pathkeys))
7068  {
7069  Path *path;
7070  double total_groups;
7071 
7072  total_groups =
7073  cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
7074  path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
7075  root->group_pathkeys,
7076  -1.0);
7077  path = (Path *)
7079  rel,
7080  path,
7081  rel->reltarget,
7082  root->group_pathkeys,
7083  NULL,
7084  &total_groups);
7085 
7086  add_path(rel, path);
7087  }
7088 
7089  /*
7090  * Consider incremental sort on all partial paths, if enabled.
7091  *
7092  * We can also skip the entire loop when we only have a single-item
7093  * group_pathkeys because then we can't possibly have a presorted prefix
7094  * of the list without having the list be fully sorted.
7095  */
7097  return;
7098 
7099  /* also consider incremental sort on partial paths, if enabled */
7100  foreach(lc, rel->partial_pathlist)
7101  {
7102  Path *path = (Path *) lfirst(lc);
7103  bool is_sorted;
7104  int presorted_keys;
7105  double total_groups;
7106 
7107  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7108  path->pathkeys,
7109  &presorted_keys);
7110 
7111  if (is_sorted)
7112  continue;
7113 
7114  if (presorted_keys == 0)
7115  continue;
7116 
7117  path = (Path *) create_incremental_sort_path(root,
7118  rel,
7119  path,
7120  root->group_pathkeys,
7121  presorted_keys,
7122  -1.0);
7123 
7124  path = (Path *)
7126  rel,
7127  path,
7128  rel->reltarget,
7129  root->group_pathkeys,
7130  NULL,
7131  &total_groups);
7132 
7133  add_path(rel, path);
7134  }
7135 }

References add_path(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), PlannerInfo::group_pathkeys, lfirst, linitial, list_length(), Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::reltarget, and Path::rows.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 5723 of file planner.c.

5724 {
5725  Path *best_path = rel->cheapest_total_path;
5726  ListCell *l;
5727 
5728  /* If all tuples will be retrieved, just return the cheapest-total path */
5729  if (tuple_fraction <= 0.0)
5730  return best_path;
5731 
5732  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
5733  if (tuple_fraction >= 1.0 && best_path->rows > 0)
5734  tuple_fraction /= best_path->rows;
5735 
5736  foreach(l, rel->pathlist)
5737  {
5738  Path *path = (Path *) lfirst(l);
5739 
5740  if (path == rel->cheapest_total_path ||
5741  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
5742  continue;
5743 
5744  best_path = path;
5745  }
5746 
5747  return best_path;
5748 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:117

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), recurse_set_operations(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3222 of file planner.c.

3226 {
3227  Query *parse = root->parse;
3228  double dNumGroups;
3229 
3230  if (parse->groupClause)
3231  {
3232  List *groupExprs;
3233 
3234  if (parse->groupingSets)
3235  {
3236  /* Add up the estimates for each grouping set */
3237  ListCell *lc;
3238  ListCell *lc2;
3239 
3240  Assert(gd); /* keep Coverity happy */
3241 
3242  dNumGroups = 0;
3243 
3244  foreach(lc, gd->rollups)
3245  {
3246  RollupData *rollup = lfirst_node(RollupData, lc);
3247  ListCell *lc;
3248 
3249  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3250  target_list);
3251 
3252  rollup->numGroups = 0.0;
3253 
3254  forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
3255  {
3256  List *gset = (List *) lfirst(lc);
3258  double numGroups = estimate_num_groups(root,
3259  groupExprs,
3260  path_rows,
3261  &gset,
3262  NULL);
3263 
3264  gs->numGroups = numGroups;
3265  rollup->numGroups += numGroups;
3266  }
3267 
3268  dNumGroups += rollup->numGroups;
3269  }
3270 
3271  if (gd->hash_sets_idx)
3272  {
3273  ListCell *lc;
3274 
3275  gd->dNumHashGroups = 0;
3276 
3277  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3278  target_list);
3279 
3280  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3281  {
3282  List *gset = (List *) lfirst(lc);
3284  double numGroups = estimate_num_groups(root,
3285  groupExprs,
3286  path_rows,
3287  &gset,
3288  NULL);
3289 
3290  gs->numGroups = numGroups;
3291  gd->dNumHashGroups += numGroups;
3292  }
3293 
3294  dNumGroups += gd->dNumHashGroups;
3295  }
3296  }
3297  else
3298  {
3299  /* Plain GROUP BY */
3300  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3301  target_list);
3302 
3303  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3304  NULL, NULL);
3305  }
3306  }
3307  else if (parse->groupingSets)
3308  {
3309  /* Empty grouping sets ... one result row for each one */
3310  dNumGroups = list_length(parse->groupingSets);
3311  }
3312  else if (parse->hasAggs || root->hasHavingQual)
3313  {
3314  /* Plain aggregation, one result row */
3315  dNumGroups = 1;
3316  }
3317  else
3318  {
3319  /* Not grouping */
3320  dNumGroups = 1;
3321  }
3322 
3323  return dNumGroups;
3324 }
List * hash_sets_idx
Definition: planner.c:109

References Assert(), grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, PlannerInfo::hasHavingQual, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), PlannerInfo::parse, grouping_sets_data::rollups, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 7566 of file planner.c.

7569 {
7570  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
7571  int cnt = 0;
7572  int partnatts;
7573 
7574  /* Input relation should be partitioned. */
7575  Assert(input_rel->part_scheme);
7576 
7577  /* Rule out early, if there are no partition keys present. */
7578  if (!input_rel->partexprs)
7579  return false;
7580 
7581  partnatts = input_rel->part_scheme->partnatts;
7582 
7583  for (cnt = 0; cnt < partnatts; cnt++)
7584  {
7585  List *partexprs = input_rel->partexprs[cnt];
7586  ListCell *lc;
7587  bool found = false;
7588 
7589  foreach(lc, partexprs)
7590  {
7591  Expr *partexpr = lfirst(lc);
7592 
7593  if (list_member(groupexprs, partexpr))
7594  {
7595  found = true;
7596  break;
7597  }
7598  }
7599 
7600  /*
7601  * If none of the partition key expressions match with any of the
7602  * GROUP BY expression, return false.
7603  */
7604  if (!found)
7605  return false;
7606  }
7607 
7608  return true;
7609 }
bool list_member(const List *list, const void *datum)
Definition: list.c:640
List ** partexprs
Definition: pathnodes.h:775
PartitionScheme part_scheme
Definition: pathnodes.h:761

References Assert(), get_sortgrouplist_exprs(), lfirst, list_member(), RelOptInfo::part_scheme, RelOptInfo::partexprs, and PartitionSchemeData::partnatts.

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction 
)
static

Definition at line 1253 of file planner.c.

1254 {
1255  Query *parse = root->parse;
1256  int64 offset_est = 0;
1257  int64 count_est = 0;
1258  double limit_tuples = -1.0;
1259  bool have_postponed_srfs = false;
1260  PathTarget *final_target;
1261  List *final_targets;
1262  List *final_targets_contain_srfs;
1263  bool final_target_parallel_safe;
1264  RelOptInfo *current_rel;
1265  RelOptInfo *final_rel;
1266  FinalPathExtraData extra;
1267  ListCell *lc;
1268 
1269  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1270  if (parse->limitCount || parse->limitOffset)
1271  {
1272  tuple_fraction = preprocess_limit(root, tuple_fraction,
1273  &offset_est, &count_est);
1274 
1275  /*
1276  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1277  * estimate the effects of using a bounded sort.
1278  */
1279  if (count_est > 0 && offset_est >= 0)
1280  limit_tuples = (double) count_est + (double) offset_est;
1281  }
1282 
1283  /* Make tuple_fraction accessible to lower-level routines */
1284  root->tuple_fraction = tuple_fraction;
1285 
1286  if (parse->setOperations)
1287  {
1288  /*
1289  * If there's a top-level ORDER BY, assume we have to fetch all the
1290  * tuples. This might be too simplistic given all the hackery below
1291  * to possibly avoid the sort; but the odds of accurate estimates here
1292  * are pretty low anyway. XXX try to get rid of this in favor of
1293  * letting plan_set_operations generate both fast-start and
1294  * cheapest-total paths.
1295  */
1296  if (parse->sortClause)
1297  root->tuple_fraction = 0.0;
1298 
1299  /*
1300  * Construct Paths for set operations. The results will not need any
1301  * work except perhaps a top-level sort and/or LIMIT. Note that any
1302  * special work for recursive unions is the responsibility of
1303  * plan_set_operations.
1304  */
1305  current_rel = plan_set_operations(root);
1306 
1307  /*
1308  * We should not need to call preprocess_targetlist, since we must be
1309  * in a SELECT query node. Instead, use the processed_tlist returned
1310  * by plan_set_operations (since this tells whether it returned any
1311  * resjunk columns!), and transfer any sort key information from the
1312  * original tlist.
1313  */
1314  Assert(parse->commandType == CMD_SELECT);
1315 
1316  /* for safety, copy processed_tlist instead of modifying in-place */
1317  root->processed_tlist =
1319  parse->targetList);
1320 
1321  /* Also extract the PathTarget form of the setop result tlist */
1322  final_target = current_rel->cheapest_total_path->pathtarget;
1323 
1324  /* And check whether it's parallel safe */
1325  final_target_parallel_safe =
1326  is_parallel_safe(root, (Node *) final_target->exprs);
1327 
1328  /* The setop result tlist couldn't contain any SRFs */
1329  Assert(!parse->hasTargetSRFs);
1330  final_targets = final_targets_contain_srfs = NIL;
1331 
1332  /*
1333  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1334  * checked already, but let's make sure).
1335  */
1336  if (parse->rowMarks)
1337  ereport(ERROR,
1338  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1339  /*------
1340  translator: %s is a SQL row locking clause such as FOR UPDATE */
1341  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1343  parse->rowMarks)->strength))));
1344 
1345  /*
1346  * Calculate pathkeys that represent result ordering requirements
1347  */
1348  Assert(parse->distinctClause == NIL);
1350  parse->sortClause,
1351  root->processed_tlist);
1352  }
1353  else
1354  {
1355  /* No set operations, do regular planning */
1356  PathTarget *sort_input_target;
1357  List *sort_input_targets;
1358  List *sort_input_targets_contain_srfs;
1359  bool sort_input_target_parallel_safe;
1360  PathTarget *grouping_target;
1361  List *grouping_targets;
1362  List *grouping_targets_contain_srfs;
1363  bool grouping_target_parallel_safe;
1364  PathTarget *scanjoin_target;
1365  List *scanjoin_targets;
1366  List *scanjoin_targets_contain_srfs;
1367  bool scanjoin_target_parallel_safe;
1368  bool scanjoin_target_same_exprs;
1369  bool have_grouping;
1370  WindowFuncLists *wflists = NULL;
1371  List *activeWindows = NIL;
1372  grouping_sets_data *gset_data = NULL;
1373  standard_qp_extra qp_extra;
1374 
1375  /* A recursive query should always have setOperations */
1376  Assert(!root->hasRecursion);
1377 
1378  /* Preprocess grouping sets and GROUP BY clause, if any */
1379  if (parse->groupingSets)
1380  {
1381  gset_data = preprocess_grouping_sets(root);
1382  }
1383  else
1384  {
1385  /* Preprocess regular GROUP BY clause, if any */
1386  if (parse->groupClause)
1387  parse->groupClause = preprocess_groupclause(root, NIL);
1388  }
1389 
1390  /*
1391  * Preprocess targetlist. Note that much of the remaining planning
1392  * work will be done with the PathTarget representation of tlists, but
1393  * we must also maintain the full representation of the final tlist so
1394  * that we can transfer its decoration (resnames etc) to the topmost
1395  * tlist of the finished Plan. This is kept in processed_tlist.
1396  */
1397  preprocess_targetlist(root);
1398 
1399  /*
1400  * Mark all the aggregates with resolved aggtranstypes, and detect
1401  * aggregates that are duplicates or can share transition state. We
1402  * must do this before slicing and dicing the tlist into various
1403  * pathtargets, else some copies of the Aggref nodes might escape
1404  * being marked.
1405  */
1406  if (parse->hasAggs)
1407  {
1408  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1409  preprocess_aggrefs(root, (Node *) parse->havingQual);
1410  }
1411 
1412  /*
1413  * Locate any window functions in the tlist. (We don't need to look
1414  * anywhere else, since expressions used in ORDER BY will be in there
1415  * too.) Note that they could all have been eliminated by constant
1416  * folding, in which case we don't need to do any more work.
1417  */
1418  if (parse->hasWindowFuncs)
1419  {
1420  wflists = find_window_functions((Node *) root->processed_tlist,
1421  list_length(parse->windowClause));
1422  if (wflists->numWindowFuncs > 0)
1423  activeWindows = select_active_windows(root, wflists);
1424  else
1425  parse->hasWindowFuncs = false;
1426  }
1427 
1428  /*
1429  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1430  * adding logic between here and the query_planner() call. Anything
1431  * that is needed in MIN/MAX-optimizable cases will have to be
1432  * duplicated in planagg.c.
1433  */
1434  if (parse->hasAggs)
1436 
1437  /*
1438  * Figure out whether there's a hard limit on the number of rows that
1439  * query_planner's result subplan needs to return. Even if we know a
1440  * hard limit overall, it doesn't apply if the query has any
1441  * grouping/aggregation operations, or SRFs in the tlist.
1442  */
1443  if (parse->groupClause ||
1444  parse->groupingSets ||
1445  parse->distinctClause ||
1446  parse->hasAggs ||
1447  parse->hasWindowFuncs ||
1448  parse->hasTargetSRFs ||
1449  root->hasHavingQual)
1450  root->limit_tuples = -1.0;
1451  else
1452  root->limit_tuples = limit_tuples;
1453 
1454  /* Set up data needed by standard_qp_callback */
1455  qp_extra.activeWindows = activeWindows;
1456  qp_extra.groupClause = (gset_data
1457  ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL)
1458  : parse->groupClause);
1459 
1460  /*
1461  * Generate the best unsorted and presorted paths for the scan/join
1462  * portion of this Query, ie the processing represented by the
1463  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1464  * We also generate (in standard_qp_callback) pathkey representations
1465  * of the query's sort clause, distinct clause, etc.
1466  */
1467  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1468 
1469  /*
1470  * Convert the query's result tlist into PathTarget format.
1471  *
1472  * Note: this cannot be done before query_planner() has performed
1473  * appendrel expansion, because that might add resjunk entries to
1474  * root->processed_tlist. Waiting till afterwards is also helpful
1475  * because the target width estimates can use per-Var width numbers
1476  * that were obtained within query_planner().
1477  */
1478  final_target = create_pathtarget(root, root->processed_tlist);
1479  final_target_parallel_safe =
1480  is_parallel_safe(root, (Node *) final_target->exprs);
1481 
1482  /*
1483  * If ORDER BY was given, consider whether we should use a post-sort
1484  * projection, and compute the adjusted target for preceding steps if
1485  * so.
1486  */
1487  if (parse->sortClause)
1488  {
1489  sort_input_target = make_sort_input_target(root,
1490  final_target,
1491  &have_postponed_srfs);
1492  sort_input_target_parallel_safe =
1493  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1494  }
1495  else
1496  {
1497  sort_input_target = final_target;
1498  sort_input_target_parallel_safe = final_target_parallel_safe;
1499  }
1500 
1501  /*
1502  * If we have window functions to deal with, the output from any
1503  * grouping step needs to be what the window functions want;
1504  * otherwise, it should be sort_input_target.
1505  */
1506  if (activeWindows)
1507  {
1508  grouping_target = make_window_input_target(root,
1509  final_target,
1510  activeWindows);
1511  grouping_target_parallel_safe =
1512  is_parallel_safe(root, (Node *) grouping_target->exprs);
1513  }
1514  else
1515  {
1516  grouping_target = sort_input_target;
1517  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1518  }
1519 
1520  /*
1521  * If we have grouping or aggregation to do, the topmost scan/join
1522  * plan node must emit what the grouping step wants; otherwise, it
1523  * should emit grouping_target.
1524  */
1525  have_grouping = (parse->groupClause || parse->groupingSets ||
1526  parse->hasAggs || root->hasHavingQual);
1527  if (have_grouping)
1528  {
1529  scanjoin_target = make_group_input_target(root, final_target);
1530  scanjoin_target_parallel_safe =
1531  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1532  }
1533  else
1534  {
1535  scanjoin_target = grouping_target;
1536  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1537  }
1538 
1539  /*
1540  * If there are any SRFs in the targetlist, we must separate each of
1541  * these PathTargets into SRF-computing and SRF-free targets. Replace
1542  * each of the named targets with a SRF-free version, and remember the
1543  * list of additional projection steps we need to add afterwards.
1544  */
1545  if (parse->hasTargetSRFs)
1546  {
1547  /* final_target doesn't recompute any SRFs in sort_input_target */
1548  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1549  &final_targets,
1550  &final_targets_contain_srfs);
1551  final_target = linitial_node(PathTarget, final_targets);
1552  Assert(!linitial_int(final_targets_contain_srfs));
1553  /* likewise for sort_input_target vs. grouping_target */
1554  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1555  &sort_input_targets,
1556  &sort_input_targets_contain_srfs);
1557  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1558  Assert(!linitial_int(sort_input_targets_contain_srfs));
1559  /* likewise for grouping_target vs. scanjoin_target */
1560  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1561  &grouping_targets,
1562  &grouping_targets_contain_srfs);
1563  grouping_target = linitial_node(PathTarget, grouping_targets);
1564  Assert(!linitial_int(grouping_targets_contain_srfs));
1565  /* scanjoin_target will not have any SRFs precomputed for it */
1566  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1567  &scanjoin_targets,
1568  &scanjoin_targets_contain_srfs);
1569  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1570  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1571  }
1572  else
1573  {
1574  /* initialize lists; for most of these, dummy values are OK */
1575  final_targets = final_targets_contain_srfs = NIL;
1576  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1577  grouping_targets = grouping_targets_contain_srfs = NIL;
1578  scanjoin_targets = list_make1(scanjoin_target);
1579  scanjoin_targets_contain_srfs = NIL;
1580  }
1581 
1582  /* Apply scan/join target. */
1583  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1584  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1585  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1586  scanjoin_targets_contain_srfs,
1587  scanjoin_target_parallel_safe,
1588  scanjoin_target_same_exprs);
1589 
1590  /*
1591  * Save the various upper-rel PathTargets we just computed into
1592  * root->upper_targets[]. The core code doesn't use this, but it
1593  * provides a convenient place for extensions to get at the info. For
1594  * consistency, we save all the intermediate targets, even though some
1595  * of the corresponding upperrels might not be needed for this query.
1596  */
1597  root->upper_targets[UPPERREL_FINAL] = final_target;
1598  root->upper_targets[UPPERREL_ORDERED] = final_target;
1599  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1600  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1601  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1602  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1603 
1604  /*
1605  * If we have grouping and/or aggregation, consider ways to implement
1606  * that. We build a new upperrel representing the output of this
1607  * phase.
1608  */
1609  if (have_grouping)
1610  {
1611  current_rel = create_grouping_paths(root,
1612  current_rel,
1613  grouping_target,
1614  grouping_target_parallel_safe,
1615  gset_data);
1616  /* Fix things up if grouping_target contains SRFs */
1617  if (parse->hasTargetSRFs)
1618  adjust_paths_for_srfs(root, current_rel,
1619  grouping_targets,
1620  grouping_targets_contain_srfs);
1621  }
1622 
1623  /*
1624  * If we have window functions, consider ways to implement those. We
1625  * build a new upperrel representing the output of this phase.
1626  */
1627  if (activeWindows)
1628  {
1629  current_rel = create_window_paths(root,
1630  current_rel,
1631  grouping_target,
1632  sort_input_target,
1633  sort_input_target_parallel_safe,
1634  wflists,
1635  activeWindows);
1636  /* Fix things up if sort_input_target contains SRFs */
1637  if (parse->hasTargetSRFs)
1638  adjust_paths_for_srfs(root, current_rel,
1639  sort_input_targets,
1640  sort_input_targets_contain_srfs);
1641  }
1642 
1643  /*
1644  * If there is a DISTINCT clause, consider ways to implement that. We
1645  * build a new upperrel representing the output of this phase.
1646  */
1647  if (parse->distinctClause)
1648  {
1649  current_rel = create_distinct_paths(root,
1650  current_rel);
1651  }
1652  } /* end of if (setOperations) */
1653 
1654  /*
1655  * If ORDER BY was given, consider ways to implement that, and generate a
1656  * new upperrel containing only paths that emit the correct ordering and
1657  * project the correct final_target. We can apply the original
1658  * limit_tuples limit in sort costing here, but only if there are no
1659  * postponed SRFs.
1660  */
1661  if (parse->sortClause)
1662  {
1663  current_rel = create_ordered_paths(root,
1664  current_rel,
1665  final_target,
1666  final_target_parallel_safe,
1667  have_postponed_srfs ? -1.0 :
1668  limit_tuples);
1669  /* Fix things up if final_target contains SRFs */
1670  if (parse->hasTargetSRFs)
1671  adjust_paths_for_srfs(root, current_rel,
1672  final_targets,
1673  final_targets_contain_srfs);
1674  }
1675 
1676  /*
1677  * Now we are prepared to build the final-output upperrel.
1678  */
1679  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1680 
1681  /*
1682  * If the input rel is marked consider_parallel and there's nothing that's
1683  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1684  * consider_parallel as well. Note that if the query has rowMarks or is
1685  * not a SELECT, consider_parallel will be false for every relation in the
1686  * query.
1687  */
1688  if (current_rel->consider_parallel &&
1689  is_parallel_safe(root, parse->limitOffset) &&
1690  is_parallel_safe(root, parse->limitCount))
1691  final_rel->consider_parallel = true;
1692 
1693  /*
1694  * If the current_rel belongs to a single FDW, so does the final_rel.
1695  */
1696  final_rel->serverid = current_rel->serverid;
1697  final_rel->userid = current_rel->userid;
1698  final_rel->useridiscurrent = current_rel->useridiscurrent;
1699  final_rel->fdwroutine = current_rel->fdwroutine;
1700 
1701  /*
1702  * Generate paths for the final_rel. Insert all surviving paths, with
1703  * LockRows, Limit, and/or ModifyTable steps added if needed.
1704  */
1705  foreach(lc, current_rel->pathlist)
1706  {
1707  Path *path = (Path *) lfirst(lc);
1708 
1709  /*
1710  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1711  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1712  * here. If there are only non-locking rowmarks, they should be
1713  * handled by the ModifyTable node instead. However, root->rowMarks
1714  * is what goes into the LockRows node.)
1715  */
1716  if (parse->rowMarks)
1717  {
1718  path = (Path *) create_lockrows_path(root, final_rel, path,
1719  root->rowMarks,
1721  }
1722 
1723  /*
1724  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1725  */
1726  if (limit_needed(parse))
1727  {
1728  path = (Path *) create_limit_path(root, final_rel, path,
1729  parse->limitOffset,
1730  parse->limitCount,
1731  parse->limitOption,
1732  offset_est, count_est);
1733  }
1734 
1735  /*
1736  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1737  */
1738  if (parse->commandType != CMD_SELECT)
1739  {
1740  Index rootRelation;
1741  List *resultRelations = NIL;
1742  List *updateColnosLists = NIL;
1743  List *withCheckOptionLists = NIL;
1744  List *returningLists = NIL;
1745  List *mergeActionLists = NIL;
1746  List *rowMarks;
1747 
1749  {
1750  /* Inherited UPDATE/DELETE */
1751  RelOptInfo *top_result_rel = find_base_rel(root,
1752  parse->resultRelation);
1753  int resultRelation = -1;
1754 
1755  /* Add only leaf children to ModifyTable. */
1756  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1757  resultRelation)) >= 0)
1758  {
1759  RelOptInfo *this_result_rel = find_base_rel(root,
1760  resultRelation);
1761 
1762  /*
1763  * Also exclude any leaf rels that have turned dummy since
1764  * being added to the list, for example, by being excluded
1765  * by constraint exclusion.
1766  */
1767  if (IS_DUMMY_REL(this_result_rel))
1768  continue;
1769 
1770  /* Build per-target-rel lists needed by ModifyTable */
1771  resultRelations = lappend_int(resultRelations,
1772  resultRelation);
1773  if (parse->commandType == CMD_UPDATE)
1774  {
1775  List *update_colnos = root->update_colnos;
1776 
1777  if (this_result_rel != top_result_rel)
1778  update_colnos =
1780  update_colnos,
1781  this_result_rel->relid,
1782  top_result_rel->relid);
1783  updateColnosLists = lappend(updateColnosLists,
1784  update_colnos);
1785  }
1786  if (parse->withCheckOptions)
1787  {
1788  List *withCheckOptions = parse->withCheckOptions;
1789 
1790  if (this_result_rel != top_result_rel)
1791  withCheckOptions = (List *)
1793  (Node *) withCheckOptions,
1794  this_result_rel->relids,
1795  top_result_rel->relids);
1796  withCheckOptionLists = lappend(withCheckOptionLists,
1797  withCheckOptions);
1798  }
1799  if (parse->returningList)
1800  {
1801  List *returningList = parse->returningList;
1802 
1803  if (this_result_rel != top_result_rel)
1804  returningList = (List *)
1806  (Node *) returningList,
1807  this_result_rel->relids,
1808  top_result_rel->relids);
1809  returningLists = lappend(returningLists,
1810  returningList);
1811  }
1812  if (parse->mergeActionList)
1813  {
1814  ListCell *l;
1815  List *mergeActionList = NIL;
1816 
1817  /*
1818  * Copy MergeActions and translate stuff that
1819  * references attribute numbers.
1820  */
1821  foreach(l, parse->mergeActionList)
1822  {
1823  MergeAction *action = lfirst(l),
1824  *leaf_action = copyObject(action);
1825 
1826  leaf_action->qual =
1828  (Node *) action->qual,
1829  this_result_rel->relids,
1830  top_result_rel->relids);
1831  leaf_action->targetList = (List *)
1833  (Node *) action->targetList,
1834  this_result_rel->relids,
1835  top_result_rel->relids);
1836  if (leaf_action->commandType == CMD_UPDATE)
1837  leaf_action->updateColnos =
1839  action->updateColnos,
1840  this_result_rel->relid,
1841  top_result_rel->relid);
1842  mergeActionList = lappend(mergeActionList,
1843  leaf_action);
1844  }
1845 
1846  mergeActionLists = lappend(mergeActionLists,
1847  mergeActionList);
1848  }
1849  }
1850 
1851  if (resultRelations == NIL)
1852  {
1853  /*
1854  * We managed to exclude every child rel, so generate a
1855  * dummy one-relation plan using info for the top target
1856  * rel (even though that may not be a leaf target).
1857  * Although it's clear that no data will be updated or
1858  * deleted, we still need to have a ModifyTable node so
1859  * that any statement triggers will be executed. (This
1860  * could be cleaner if we fixed nodeModifyTable.c to allow
1861  * zero target relations, but that probably wouldn't be a
1862  * net win.)
1863  */
1864  resultRelations = list_make1_int(parse->resultRelation);
1865  if (parse->commandType == CMD_UPDATE)
1866  updateColnosLists = list_make1(root->update_colnos);
1867  if (parse->withCheckOptions)
1868  withCheckOptionLists = list_make1(parse->withCheckOptions);
1869  if (parse->returningList)
1870  returningLists = list_make1(parse->returningList);
1871  if (parse->mergeActionList)
1872  mergeActionLists = list_make1(parse->mergeActionList);
1873  }
1874  }
1875  else
1876  {
1877  /* Single-relation INSERT/UPDATE/DELETE. */
1878  resultRelations = list_make1_int(parse->resultRelation);
1879  if (parse->commandType == CMD_UPDATE)
1880  updateColnosLists = list_make1(root->update_colnos);
1881  if (parse->withCheckOptions)
1882  withCheckOptionLists = list_make1(parse->withCheckOptions);
1883  if (parse->returningList)
1884  returningLists = list_make1(parse->returningList);
1885  if (parse->mergeActionList)
1886  mergeActionLists = list_make1(parse->mergeActionList);
1887  }
1888 
1889  /*
1890  * If target is a partition root table, we need to mark the
1891  * ModifyTable node appropriately for that.
1892  */
1893  if (rt_fetch(parse->resultRelation, parse->rtable)->relkind ==
1894  RELKIND_PARTITIONED_TABLE)
1895  rootRelation = parse->resultRelation;
1896  else
1897  rootRelation = 0;
1898 
1899  /*
1900  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1901  * will have dealt with fetching non-locked marked rows, else we
1902  * need to have ModifyTable do that.
1903  */
1904  if (parse->rowMarks)
1905  rowMarks = NIL;
1906  else
1907  rowMarks = root->rowMarks;
1908 
1909  path = (Path *)
1910  create_modifytable_path(root, final_rel,
1911  path,
1912  parse->commandType,
1913  parse->canSetTag,
1914  parse->resultRelation,
1915  rootRelation,
1916  root->partColsUpdated,
1917  resultRelations,
1918  updateColnosLists,
1919  withCheckOptionLists,
1920  returningLists,
1921  rowMarks,
1922  parse->onConflict,
1923  mergeActionLists,
1925  }
1926 
1927  /* And shove it into final_rel */
1928  add_path(final_rel, path);
1929  }
1930 
1931  /*
1932  * Generate partial paths for final_rel, too, if outer query levels might
1933  * be able to make use of them.
1934  */
1935  if (final_rel->consider_parallel && root->query_level > 1 &&
1936  !limit_needed(parse))
1937  {
1938  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
1939  foreach(lc, current_rel->partial_pathlist)
1940  {
1941  Path *partial_path = (Path *) lfirst(lc);
1942 
1943  add_partial_path(final_rel, partial_path);
1944  }
1945  }
1946 
1947  extra.limit_needed = limit_needed(parse);
1948  extra.limit_tuples = limit_tuples;
1949  extra.count_est = count_est;
1950  extra.offset_est = offset_est;
1951 
1952  /*
1953  * If there is an FDW that's responsible for all baserels of the query,
1954  * let it consider adding ForeignPaths.
1955  */
1956  if (final_rel->fdwroutine &&
1957  final_rel->fdwroutine->GetForeignUpperPaths)
1959  current_rel, final_rel,
1960  &extra);
1961 
1962  /* Let extensions possibly add some more paths */
1964  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
1965  current_rel, final_rel, &extra);
1966 
1967  /* Note: currently, we leave it to callers to do set_cheapest() */
1968 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, Relids child_relids, Relids top_parent_relids)
Definition: appendinfo.c:488
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:648
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:674
@ BMS_MULTIPLE
Definition: bitmapset.h:70
unsigned int Index
Definition: c.h:549
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:230
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3564
List * lappend_int(List *list, int datum)
Definition: list.c:354
#define copyObject(obj)
Definition: nodes.h:689
@ CMD_UPDATE
Definition: nodes.h:722
@ CMD_SELECT
Definition: nodes.h:721
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:584
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3118
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1648
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3736
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3576
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, int epqParam)
Definition: pathnode.c:3637
@ UPPERREL_FINAL
Definition: pathnodes.h:77
#define list_make1_int(x1)
Definition: pg_list.h:221
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:55
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5158
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2366
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5340
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
Definition: planner.c:4313
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:5552
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:1977
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:4933
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5191
bool limit_needed(Query *parse)
Definition: planner.c:2551
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:4662
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3344
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3132
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4096
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:111
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:103
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:375
Cardinality limit_tuples
Definition: pathnodes.h:2635
bool partColsUpdated
Definition: pathnodes.h:380
bool hasRecursion
Definition: pathnodes.h:353
Index query_level
Definition: pathnodes.h:166
List * rowMarks
Definition: pathnodes.h:289
Cardinality limit_tuples
Definition: pathnodes.h:342
Selectivity tuple_fraction
Definition: pathnodes.h:341
List * update_colnos
Definition: pathnodes.h:330
Relids all_result_relids
Definition: pathnodes.h:277
Relids leaf_result_relids
Definition: pathnodes.h:278
Index relid
Definition: pathnodes.h:710
int numWindowFuncs
Definition: clauses.h:21
List * groupClause
Definition: planner.c:99
List * activeWindows
Definition: planner.c:98
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:870
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), PlannerInfo::all_result_relids, apply_scanjoin_target_to_paths(), Assert(), assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, RelOptInfo::fdwroutine, fetch_upper_rel(), find_base_rel(), find_window_functions(), FdwRoutine::GetForeignUpperPaths, standard_qp_extra::groupClause, PlannerInfo::hasHavingQual, PlannerInfo::hasRecursion, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), PlannerInfo::leaf_result_relids, lfirst, limit_needed(), FinalPathExtraData::limit_needed, PlannerInfo::limit_tuples, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, parse(), PlannerInfo::parse, PlannerInfo::partColsUpdated, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, Path::pathtarget, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), PlannerInfo::processed_tlist, PlannerInfo::query_level, query_planner(), RelOptInfo::relid, RelOptInfo::relids, RelOptInfo::reltarget, grouping_sets_data::rollups, PlannerInfo::rowMarks, rt_fetch, select_active_windows(), RelOptInfo::serverid, PlannerInfo::sort_pathkeys, split_pathtarget_at_srfs(), standard_qp_callback(), PlannerInfo::tuple_fraction, PlannerInfo::update_colnos, PlannerInfo::upper_targets, UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3510 of file planner.c.

3511 {
3512  Query *parse = root->parse;
3513 
3514  return (root->hasHavingQual || parse->groupingSets) &&
3515  !parse->hasAggs && parse->groupClause == NIL;
3516 }

References PlannerInfo::hasHavingQual, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2551 of file planner.c.

2552 {
2553  Node *node;
2554 
2555  node = parse->limitCount;
2556  if (node)
2557  {
2558  if (IsA(node, Const))
2559  {
2560  /* NULL indicates LIMIT ALL, ie, no limit */
2561  if (!((Const *) node)->constisnull)
2562  return true; /* LIMIT with a constant value */
2563  }
2564  else
2565  return true; /* non-constant LIMIT */
2566  }
2567 
2568  node = parse->limitOffset;
2569  if (node)
2570  {
2571  if (IsA(node, Const))
2572  {
2573  /* Treat NULL as no offset; the executor would too */
2574  if (!((Const *) node)->constisnull)
2575  {
2576  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2577 
2578  if (offset != 0)
2579  return true; /* OFFSET with a nonzero value */
2580  }
2581  }
2582  else
2583  return true; /* non-constant OFFSET */
2584  }
2585 
2586  return false; /* don't need a Limit plan node */
2587 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:624
#define DatumGetInt64(X)
Definition: postgres.h:651

References DatumGetInt64, IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 4933 of file planner.c.

4934 {
4935  Query *parse = root->parse;
4936  PathTarget *input_target;
4937  List *non_group_cols;
4938  List *non_group_vars;
4939  int i;
4940  ListCell *lc;
4941 
4942  /*
4943  * We must build a target containing all grouping columns, plus any other
4944  * Vars mentioned in the query's targetlist and HAVING qual.
4945  */
4946  input_target = create_empty_pathtarget();
4947  non_group_cols = NIL;
4948 
4949  i = 0;
4950  foreach(lc, final_target->exprs)
4951  {
4952  Expr *expr = (Expr *) lfirst(lc);
4953  Index sgref = get_pathtarget_sortgroupref(final_target, i);
4954 
4955  if (sgref && parse->groupClause &&
4956  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
4957  {
4958  /*
4959  * It's a grouping column, so add it to the input target as-is.
4960  */
4961  add_column_to_pathtarget(input_target, expr, sgref);
4962  }
4963  else
4964  {
4965  /*
4966  * Non-grouping column, so just remember the expression for later
4967  * call to pull_var_clause.
4968  */
4969  non_group_cols = lappend(non_group_cols, expr);
4970  }
4971 
4972  i++;
4973  }
4974 
4975  /*
4976  * If there's a HAVING clause, we'll need the Vars it uses, too.
4977  */
4978  if (parse->havingQual)
4979  non_group_cols = lappend(non_group_cols, parse->havingQual);
4980 
4981  /*
4982  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
4983  * add them to the input target if not already present. (A Var used
4984  * directly as a GROUP BY item will be present already.) Note this
4985  * includes Vars used in resjunk items, so we are covering the needs of
4986  * ORDER BY and window specifications. Vars used within Aggrefs and
4987  * WindowFuncs will be pulled out here, too.
4988  */
4989  non_group_vars = pull_var_clause((Node *) non_group_cols,
4993  add_new_columns_to_pathtarget(input_target, non_group_vars);
4994 
4995  /* clean up cruft */
4996  list_free(non_group_vars);
4997  list_free(non_group_cols);
4998 
4999  /* XXX this causes some redundant cost calculation ... */
5000  return set_pathtarget_cost_width(root, input_target);
5001 }
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6309
void list_free(List *list)
Definition: list.c:1505
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:189
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:191
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:192
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1131
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:432
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:741
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:670
List * pull_var_clause(Node *node, int flags)
Definition: var.c:604

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3457 of file planner.c.

3460 {
3461  RelOptInfo *grouped_rel;
3462 
3463  if (IS_OTHER_REL(input_rel))
3464  {
3465  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3466  input_rel->relids);
3467  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3468  }
3469  else
3470  {
3471  /*
3472  * By tradition, the relids set for the main grouping relation is
3473  * NULL. (This could be changed, but might require adjustments
3474  * elsewhere.)
3475  */
3476  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3477  }
3478 
3479  /* Set target. */
3480  grouped_rel->reltarget = target;
3481 
3482  /*
3483  * If the input relation is not parallel-safe, then the grouped relation
3484  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3485  * target list and HAVING quals are parallel-safe.
3486  */
3487  if (input_rel->consider_parallel && target_parallel_safe &&
3488  is_parallel_safe(root, (Node *) havingQual))
3489  grouped_rel->consider_parallel = true;
3490 
3491  /*
3492  * If the input rel belongs to a single FDW, so does the grouped rel.
3493  */
3494  grouped_rel->serverid = input_rel->serverid;
3495  grouped_rel->userid = input_rel->userid;
3496  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3497  grouped_rel->fdwroutine = input_rel->fdwroutine;
3498 
3499  return grouped_rel;
3500 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:647

References RelOptInfo::consider_parallel, RelOptInfo::fdwroutine, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5020 of file planner.c.

5023 {
5024  Query *parse = root->parse;
5025  PathTarget *partial_target;
5026  List *non_group_cols;
5027  List *non_group_exprs;
5028  int i;
5029  ListCell *lc;
5030 
5031  partial_target = create_empty_pathtarget();
5032  non_group_cols = NIL;
5033 
5034  i = 0;
5035  foreach(lc, grouping_target->exprs)
5036  {
5037  Expr *expr = (Expr *) lfirst(lc);
5038  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5039 
5040  if (sgref && parse->groupClause &&
5041  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5042  {
5043  /*
5044  * It's a grouping column, so add it to the partial_target as-is.
5045  * (This allows the upper agg step to repeat the grouping calcs.)
5046  */
5047  add_column_to_pathtarget(partial_target, expr, sgref);
5048  }
5049  else
5050  {
5051  /*
5052  * Non-grouping column, so just remember the expression for later
5053  * call to pull_var_clause.
5054  */
5055  non_group_cols = lappend(non_group_cols, expr);
5056  }
5057 
5058  i++;
5059  }
5060 
5061  /*
5062  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5063  */
5064  if (havingQual)
5065  non_group_cols = lappend(non_group_cols, havingQual);
5066 
5067  /*
5068  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5069  * non-group cols (plus HAVING), and add them to the partial_target if not
5070  * already present. (An expression used directly as a GROUP BY item will
5071  * be present already.) Note this includes Vars used in resjunk items, so
5072  * we are covering the needs of ORDER BY and window specifications.
5073  */
5074  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5078 
5079  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5080 
5081  /*
5082  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5083  * are at the top level of the target list, so we can just scan the list
5084  * rather than recursing through the expression trees.
5085  */
5086  foreach(lc, partial_target->exprs)
5087  {
5088  Aggref *aggref = (Aggref *) lfirst(lc);
5089 
5090  if (IsA(aggref, Aggref))
5091  {
5092  Aggref *newaggref;
5093 
5094  /*
5095  * We shouldn't need to copy the substructure of the Aggref node,
5096  * but flat-copy the node itself to avoid damaging other trees.
5097  */
5098  newaggref = makeNode(Aggref);
5099  memcpy(newaggref, aggref, sizeof(Aggref));
5100 
5101  /* For now, assume serialization is required */
5103 
5104  lfirst(lc) = newaggref;
5105  }
5106  }
5107 
5108  /* clean up cruft */
5109  list_free(non_group_exprs);
5110  list_free(non_group_cols);
5111 
5112  /* XXX this causes some redundant cost calculation ... */
5113  return set_pathtarget_cost_width(root, partial_target);
5114 }
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:188
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5123

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 5458 of file planner.c.

5460 {
5461  List *window_pathkeys;
5462  List *window_sortclauses;
5463 
5464  /* Throw error if can't sort */
5466  ereport(ERROR,
5467  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5468  errmsg("could not implement window PARTITION BY"),
5469  errdetail("Window partitioning columns must be of sortable datatypes.")));
5471  ereport(ERROR,
5472  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5473  errmsg("could not implement window ORDER BY"),
5474  errdetail("Window ordering columns must be of sortable datatypes.")));
5475 
5476  /* Okay, make the combined pathkeys */
5477  window_sortclauses = list_concat_copy(wc->partitionClause, wc->orderClause);
5478  window_pathkeys = make_pathkeys_for_sortclauses(root,
5479  window_sortclauses,
5480  tlist);
5481  list_free(window_sortclauses);
5482  return window_pathkeys;
5483 }
List * list_concat_copy(const List *list1, const List *list2)
Definition: list.c:577
List * partitionClause
Definition: parsenodes.h:1401
List * orderClause
Definition: parsenodes.h:1402

References ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), list_concat_copy(), list_free(), make_pathkeys_for_sortclauses(), WindowClause::orderClause, and WindowClause::partitionClause.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 5552 of file planner.c.

5555 {
5556  Query *parse = root->parse;
5557  PathTarget *input_target;
5558  int ncols;
5559  bool *col_is_srf;
5560  bool *postpone_col;
5561  bool have_srf;
5562  bool have_volatile;
5563  bool have_expensive;
5564  bool have_srf_sortcols;
5565  bool postpone_srfs;
5566  List *postponable_cols;
5567  List *postponable_vars;
5568  int i;
5569  ListCell *lc;
5570 
5571  /* Shouldn't get here unless query has ORDER BY */
5572  Assert(parse->sortClause);
5573 
5574  *have_postponed_srfs = false; /* default result */
5575 
5576  /* Inspect tlist and collect per-column information */
5577  ncols = list_length(final_target->exprs);
5578  col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
5579  postpone_col = (bool *) palloc0(ncols * sizeof(bool));
5580  have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
5581 
5582  i = 0;
5583  foreach(lc, final_target->exprs)
5584  {
5585  Expr *expr = (Expr *) lfirst(lc);
5586 
5587  /*
5588  * If the column has a sortgroupref, assume it has to be evaluated
5589  * before sorting. Generally such columns would be ORDER BY, GROUP
5590  * BY, etc targets. One exception is columns that were removed from
5591  * GROUP BY by remove_useless_groupby_columns() ... but those would
5592  * only be Vars anyway. There don't seem to be any cases where it
5593  * would be worth the trouble to double-check.
5594  */
5595  if (get_pathtarget_sortgroupref(final_target, i) == 0)
5596  {
5597  /*
5598  * Check for SRF or volatile functions. Check the SRF case first
5599  * because we must know whether we have any postponed SRFs.
5600  */
5601  if (parse->hasTargetSRFs &&
5602  expression_returns_set((Node *) expr))
5603  {
5604  /* We'll decide below whether these are postponable */
5605  col_is_srf[i] = true;
5606  have_srf = true;
5607  }
5608  else if (contain_volatile_functions((Node *) expr))
5609  {
5610  /* Unconditionally postpone */
5611  postpone_col[i] = true;
5612  have_volatile = true;
5613  }
5614  else
5615  {
5616  /*
5617  * Else check the cost. XXX it's annoying to have to do this
5618  * when set_pathtarget_cost_width() just did it. Refactor to
5619  * allow sharing the work?
5620  */
5621  QualCost cost;
5622 
5623  cost_qual_eval_node(&cost, (Node *) expr, root);
5624 
5625  /*
5626  * We arbitrarily define "expensive" as "more than 10X
5627  * cpu_operator_cost". Note this will take in any PL function
5628  * with default cost.
5629  */
5630  if (cost.per_tuple > 10 * cpu_operator_cost)
5631  {
5632  postpone_col[i] = true;
5633  have_expensive = true;
5634  }
5635  }
5636  }
5637  else
5638  {
5639  /* For sortgroupref cols, just check if any contain SRFs */
5640  if (!have_srf_sortcols &&
5641  parse->hasTargetSRFs &&
5642  expression_returns_set((Node *) expr))
5643  have_srf_sortcols = true;
5644  }
5645 
5646  i++;
5647  }
5648 
5649  /*
5650  * We can postpone SRFs if we have some but none are in sortgroupref cols.
5651  */
5652  postpone_srfs = (have_srf && !have_srf_sortcols);
5653 
5654  /*
5655  * If we don't need a post-sort projection, just return final_target.
5656  */
5657  if (!(postpone_srfs || have_volatile ||
5658  (have_expensive &&
5659  (parse->limitCount || root->tuple_fraction > 0))))
5660  return final_target;
5661 
5662  /*
5663  * Report whether the post-sort projection will contain set-returning
5664  * functions. This is important because it affects whether the Sort can
5665  * rely on the query's LIMIT (if any) to bound the number of rows it needs
5666  * to return.