PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/inherit.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "storage/dsm_impl.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/syscache.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  standard_qp_extra
 
struct  grouping_sets_data
 
struct  WindowClauseSortData
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static Listmake_pathkeys_for_groupagg (PlannerInfo *root, List *groupClause, List *tlist, int *number_groupby_pathkeys)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int force_parallel_mode = FORCE_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 89 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 92 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 88 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 90 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 82 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 84 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 85 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 93 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 94 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 91 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 83 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 86 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 87 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6476 of file planner.c.

6482 {
6483  Query *parse = root->parse;
6484  Path *cheapest_path = input_rel->cheapest_total_path;
6485  ListCell *lc;
6486  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6487  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6488  List *havingQual = (List *) extra->havingQual;
6489  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6490 
6491  if (can_sort)
6492  {
6493  /*
6494  * Use any available suitably-sorted path as input, and also consider
6495  * sorting the cheapest-total path.
6496  */
6497  foreach(lc, input_rel->pathlist)
6498  {
6499  Path *path = (Path *) lfirst(lc);
6500  Path *path_original = path;
6501  bool is_sorted;
6502  int presorted_keys;
6503 
6504  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6505  path->pathkeys,
6506  &presorted_keys);
6507 
6508  if (path == cheapest_path || is_sorted)
6509  {
6510  /* Sort the cheapest-total path if it isn't already sorted */
6511  if (!is_sorted)
6512  path = (Path *) create_sort_path(root,
6513  grouped_rel,
6514  path,
6515  root->group_pathkeys,
6516  -1.0);
6517 
6518  /* Now decide what to stick atop it */
6519  if (parse->groupingSets)
6520  {
6521  consider_groupingsets_paths(root, grouped_rel,
6522  path, true, can_hash,
6523  gd, agg_costs, dNumGroups);
6524  }
6525  else if (parse->hasAggs)
6526  {
6527  /*
6528  * We have aggregation, possibly with plain GROUP BY. Make
6529  * an AggPath.
6530  */
6531  add_path(grouped_rel, (Path *)
6532  create_agg_path(root,
6533  grouped_rel,
6534  path,
6535  grouped_rel->reltarget,
6536  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6538  parse->groupClause,
6539  havingQual,
6540  agg_costs,
6541  dNumGroups));
6542  }
6543  else if (parse->groupClause)
6544  {
6545  /*
6546  * We have GROUP BY without aggregation or grouping sets.
6547  * Make a GroupPath.
6548  */
6549  add_path(grouped_rel, (Path *)
6550  create_group_path(root,
6551  grouped_rel,
6552  path,
6553  parse->groupClause,
6554  havingQual,
6555  dNumGroups));
6556  }
6557  else
6558  {
6559  /* Other cases should have been handled above */
6560  Assert(false);
6561  }
6562  }
6563 
6564  /*
6565  * Now we may consider incremental sort on this path, but only
6566  * when the path is not already sorted and when incremental sort
6567  * is enabled.
6568  */
6569  if (is_sorted || !enable_incremental_sort)
6570  continue;
6571 
6572  /* Restore the input path (we might have added Sort on top). */
6573  path = path_original;
6574 
6575  /* no shared prefix, no point in building incremental sort */
6576  if (presorted_keys == 0)
6577  continue;
6578 
6579  /*
6580  * We should have already excluded pathkeys of length 1 because
6581  * then presorted_keys > 0 would imply is_sorted was true.
6582  */
6583  Assert(list_length(root->group_pathkeys) != 1);
6584 
6585  path = (Path *) create_incremental_sort_path(root,
6586  grouped_rel,
6587  path,
6588  root->group_pathkeys,
6589  presorted_keys,
6590  -1.0);
6591 
6592  /* Now decide what to stick atop it */
6593  if (parse->groupingSets)
6594  {
6595  consider_groupingsets_paths(root, grouped_rel,
6596  path, true, can_hash,
6597  gd, agg_costs, dNumGroups);
6598  }
6599  else if (parse->hasAggs)
6600  {
6601  /*
6602  * We have aggregation, possibly with plain GROUP BY. Make an
6603  * AggPath.
6604  */
6605  add_path(grouped_rel, (Path *)
6606  create_agg_path(root,
6607  grouped_rel,
6608  path,
6609  grouped_rel->reltarget,
6610  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6612  parse->groupClause,
6613  havingQual,
6614  agg_costs,
6615  dNumGroups));
6616  }
6617  else if (parse->groupClause)
6618  {
6619  /*
6620  * We have GROUP BY without aggregation or grouping sets. Make
6621  * a GroupPath.
6622  */
6623  add_path(grouped_rel, (Path *)
6624  create_group_path(root,
6625  grouped_rel,
6626  path,
6627  parse->groupClause,
6628  havingQual,
6629  dNumGroups));
6630  }
6631  else
6632  {
6633  /* Other cases should have been handled above */
6634  Assert(false);
6635  }
6636  }
6637 
6638  /*
6639  * Instead of operating directly on the input relation, we can
6640  * consider finalizing a partially aggregated path.
6641  */
6642  if (partially_grouped_rel != NULL)
6643  {
6644  foreach(lc, partially_grouped_rel->pathlist)
6645  {
6646  Path *path = (Path *) lfirst(lc);
6647  Path *path_original = path;
6648  bool is_sorted;
6649  int presorted_keys;
6650 
6651  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6652  path->pathkeys,
6653  &presorted_keys);
6654 
6655  /*
6656  * Insert a Sort node, if required. But there's no point in
6657  * sorting anything but the cheapest path.
6658  */
6659  if (!is_sorted)
6660  {
6661  if (path != partially_grouped_rel->cheapest_total_path)
6662  continue;
6663  path = (Path *) create_sort_path(root,
6664  grouped_rel,
6665  path,
6666  root->group_pathkeys,
6667  -1.0);
6668  }
6669 
6670  if (parse->hasAggs)
6671  add_path(grouped_rel, (Path *)
6672  create_agg_path(root,
6673  grouped_rel,
6674  path,
6675  grouped_rel->reltarget,
6676  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6678  parse->groupClause,
6679  havingQual,
6680  agg_final_costs,
6681  dNumGroups));
6682  else
6683  add_path(grouped_rel, (Path *)
6684  create_group_path(root,
6685  grouped_rel,
6686  path,
6687  parse->groupClause,
6688  havingQual,
6689  dNumGroups));
6690 
6691  /*
6692  * Now we may consider incremental sort on this path, but only
6693  * when the path is not already sorted and when incremental
6694  * sort is enabled.
6695  */
6696  if (is_sorted || !enable_incremental_sort)
6697  continue;
6698 
6699  /* Restore the input path (we might have added Sort on top). */
6700  path = path_original;
6701 
6702  /* no shared prefix, not point in building incremental sort */
6703  if (presorted_keys == 0)
6704  continue;
6705 
6706  /*
6707  * We should have already excluded pathkeys of length 1
6708  * because then presorted_keys > 0 would imply is_sorted was
6709  * true.
6710  */
6711  Assert(list_length(root->group_pathkeys) != 1);
6712 
6713  path = (Path *) create_incremental_sort_path(root,
6714  grouped_rel,
6715  path,
6716  root->group_pathkeys,
6717  presorted_keys,
6718  -1.0);
6719 
6720  if (parse->hasAggs)
6721  add_path(grouped_rel, (Path *)
6722  create_agg_path(root,
6723  grouped_rel,
6724  path,
6725  grouped_rel->reltarget,
6726  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6728  parse->groupClause,
6729  havingQual,
6730  agg_final_costs,
6731  dNumGroups));
6732  else
6733  add_path(grouped_rel, (Path *)
6734  create_group_path(root,
6735  grouped_rel,
6736  path,
6737  parse->groupClause,
6738  havingQual,
6739  dNumGroups));
6740  }
6741  }
6742  }
6743 
6744  if (can_hash)
6745  {
6746  if (parse->groupingSets)
6747  {
6748  /*
6749  * Try for a hash-only groupingsets path over unsorted input.
6750  */
6751  consider_groupingsets_paths(root, grouped_rel,
6752  cheapest_path, false, true,
6753  gd, agg_costs, dNumGroups);
6754  }
6755  else
6756  {
6757  /*
6758  * Generate a HashAgg Path. We just need an Agg over the
6759  * cheapest-total input path, since input order won't matter.
6760  */
6761  add_path(grouped_rel, (Path *)
6762  create_agg_path(root, grouped_rel,
6763  cheapest_path,
6764  grouped_rel->reltarget,
6765  AGG_HASHED,
6767  parse->groupClause,
6768  havingQual,
6769  agg_costs,
6770  dNumGroups));
6771  }
6772 
6773  /*
6774  * Generate a Finalize HashAgg Path atop of the cheapest partially
6775  * grouped path, assuming there is one
6776  */
6777  if (partially_grouped_rel && partially_grouped_rel->pathlist)
6778  {
6779  Path *path = partially_grouped_rel->cheapest_total_path;
6780 
6781  add_path(grouped_rel, (Path *)
6782  create_agg_path(root,
6783  grouped_rel,
6784  path,
6785  grouped_rel->reltarget,
6786  AGG_HASHED,
6788  parse->groupClause,
6789  havingQual,
6790  agg_final_costs,
6791  dNumGroups));
6792  }
6793  }
6794 
6795  /*
6796  * When partitionwise aggregate is used, we might have fully aggregated
6797  * paths in the partial pathlist, because add_paths_to_append_rel() will
6798  * consider a path for grouped_rel consisting of a Parallel Append of
6799  * non-partial paths from each child.
6800  */
6801  if (grouped_rel->partial_pathlist != NIL)
6802  gather_grouping_paths(root, grouped_rel);
6803 }
bool enable_incremental_sort
Definition: costsize.c:141
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
@ AGG_SORTED
Definition: nodes.h:352
@ AGG_HASHED
Definition: nodes.h:353
@ AGG_PLAIN
Definition: nodes.h:351
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:378
@ AGGSPLIT_SIMPLE
Definition: nodes.h:374
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:365
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2961
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3005
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2912
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3116
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3039
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3038
#define lfirst(lc)
Definition: pg_list.h:170
static int list_length(const List *l)
Definition: pg_list.h:150
#define NIL
Definition: pg_list.h:66
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7206
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:3946
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3079
Definition: pg_list.h:52
List * pathkeys
Definition: pathnodes.h:1549
List * group_pathkeys
Definition: pathnodes.h:375
Query * parse
Definition: pathnodes.h:199
struct PathTarget * reltarget
Definition: pathnodes.h:843
List * pathlist
Definition: pathnodes.h:848
struct Path * cheapest_total_path
Definition: pathnodes.h:852
List * partial_pathlist
Definition: pathnodes.h:850

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, consider_groupingsets_paths(), create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, GroupPathExtraData::flags, gather_grouping_paths(), PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, and RelOptInfo::reltarget.

Referenced by create_ordinary_grouping_paths().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6031 of file planner.c.

6033 {
6034  ListCell *lc;
6035 
6036  Assert(list_length(targets) == list_length(targets_contain_srfs));
6037  Assert(!linitial_int(targets_contain_srfs));
6038 
6039  /* If no SRFs appear at this plan level, nothing to do */
6040  if (list_length(targets) == 1)
6041  return;
6042 
6043  /*
6044  * Stack SRF-evaluation nodes atop each path for the rel.
6045  *
6046  * In principle we should re-run set_cheapest() here to identify the
6047  * cheapest path, but it seems unlikely that adding the same tlist eval
6048  * costs to all the paths would change that, so we don't bother. Instead,
6049  * just assume that the cheapest-startup and cheapest-total paths remain
6050  * so. (There should be no parameterized paths anymore, so we needn't
6051  * worry about updating cheapest_parameterized_paths.)
6052  */
6053  foreach(lc, rel->pathlist)
6054  {
6055  Path *subpath = (Path *) lfirst(lc);
6056  Path *newpath = subpath;
6057  ListCell *lc1,
6058  *lc2;
6059 
6060  Assert(subpath->param_info == NULL);
6061  forboth(lc1, targets, lc2, targets_contain_srfs)
6062  {
6063  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6064  bool contains_srfs = (bool) lfirst_int(lc2);
6065 
6066  /* If this level doesn't contain SRFs, do regular projection */
6067  if (contains_srfs)
6068  newpath = (Path *) create_set_projection_path(root,
6069  rel,
6070  newpath,
6071  thistarget);
6072  else
6073  newpath = (Path *) apply_projection_to_path(root,
6074  rel,
6075  newpath,
6076  thistarget);
6077  }
6078  lfirst(lc) = newpath;
6079  if (subpath == rel->cheapest_startup_path)
6080  rel->cheapest_startup_path = newpath;
6081  if (subpath == rel->cheapest_total_path)
6082  rel->cheapest_total_path = newpath;
6083  }
6084 
6085  /* Likewise for partial paths, if any */
6086  foreach(lc, rel->partial_pathlist)
6087  {
6088  Path *subpath = (Path *) lfirst(lc);
6089  Path *newpath = subpath;
6090  ListCell *lc1,
6091  *lc2;
6092 
6093  Assert(subpath->param_info == NULL);
6094  forboth(lc1, targets, lc2, targets_contain_srfs)
6095  {
6096  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6097  bool contains_srfs = (bool) lfirst_int(lc2);
6098 
6099  /* If this level doesn't contain SRFs, do regular projection */
6100  if (contains_srfs)
6101  newpath = (Path *) create_set_projection_path(root,
6102  rel,
6103  newpath,
6104  thistarget);
6105  else
6106  {
6107  /* avoid apply_projection_to_path, in case of multiple refs */
6108  newpath = (Path *) create_projection_path(root,
6109  rel,
6110  newpath,
6111  thistarget);
6112  }
6113  }
6114  lfirst(lc) = newpath;
6115  }
6116 }
unsigned char bool
Definition: c.h:392
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2646
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2843
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2754
#define lfirst_node(type, lc)
Definition: pg_list.h:174
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:465
#define lfirst_int(lc)
Definition: pg_list.h:171
#define linitial_int(l)
Definition: pg_list.h:177
struct Path * cheapest_startup_path
Definition: pathnodes.h:851

References apply_projection_to_path(), Assert(), RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7336 of file planner.c.

7342 {
7343  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7344  PathTarget *scanjoin_target;
7345  ListCell *lc;
7346 
7347  /* This recurses, so be paranoid. */
7349 
7350  /*
7351  * If the rel is partitioned, we want to drop its existing paths and
7352  * generate new ones. This function would still be correct if we kept the
7353  * existing paths: we'd modify them to generate the correct target above
7354  * the partitioning Append, and then they'd compete on cost with paths
7355  * generating the target below the Append. However, in our current cost
7356  * model the latter way is always the same or cheaper cost, so modifying
7357  * the existing paths would just be useless work. Moreover, when the cost
7358  * is the same, varying roundoff errors might sometimes allow an existing
7359  * path to be picked, resulting in undesirable cross-platform plan
7360  * variations. So we drop old paths and thereby force the work to be done
7361  * below the Append, except in the case of a non-parallel-safe target.
7362  *
7363  * Some care is needed, because we have to allow
7364  * generate_useful_gather_paths to see the old partial paths in the next
7365  * stanza. Hence, zap the main pathlist here, then allow
7366  * generate_useful_gather_paths to add path(s) to the main list, and
7367  * finally zap the partial pathlist.
7368  */
7369  if (rel_is_partitioned)
7370  rel->pathlist = NIL;
7371 
7372  /*
7373  * If the scan/join target is not parallel-safe, partial paths cannot
7374  * generate it.
7375  */
7376  if (!scanjoin_target_parallel_safe)
7377  {
7378  /*
7379  * Since we can't generate the final scan/join target in parallel
7380  * workers, this is our last opportunity to use any partial paths that
7381  * exist; so build Gather path(s) that use them and emit whatever the
7382  * current reltarget is. We don't do this in the case where the
7383  * target is parallel-safe, since we will be able to generate superior
7384  * paths by doing it after the final scan/join target has been
7385  * applied.
7386  */
7387  generate_useful_gather_paths(root, rel, false);
7388 
7389  /* Can't use parallel query above this level. */
7390  rel->partial_pathlist = NIL;
7391  rel->consider_parallel = false;
7392  }
7393 
7394  /* Finish dropping old paths for a partitioned rel, per comment above */
7395  if (rel_is_partitioned)
7396  rel->partial_pathlist = NIL;
7397 
7398  /* Extract SRF-free scan/join target. */
7399  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7400 
7401  /*
7402  * Apply the SRF-free scan/join target to each existing path.
7403  *
7404  * If the tlist exprs are the same, we can just inject the sortgroupref
7405  * information into the existing pathtargets. Otherwise, replace each
7406  * path with a projection path that generates the SRF-free scan/join
7407  * target. This can't change the ordering of paths within rel->pathlist,
7408  * so we just modify the list in place.
7409  */
7410  foreach(lc, rel->pathlist)
7411  {
7412  Path *subpath = (Path *) lfirst(lc);
7413 
7414  /* Shouldn't have any parameterized paths anymore */
7415  Assert(subpath->param_info == NULL);
7416 
7417  if (tlist_same_exprs)
7418  subpath->pathtarget->sortgrouprefs =
7419  scanjoin_target->sortgrouprefs;
7420  else
7421  {
7422  Path *newpath;
7423 
7424  newpath = (Path *) create_projection_path(root, rel, subpath,
7425  scanjoin_target);
7426  lfirst(lc) = newpath;
7427  }
7428  }
7429 
7430  /* Likewise adjust the targets for any partial paths. */
7431  foreach(lc, rel->partial_pathlist)
7432  {
7433  Path *subpath = (Path *) lfirst(lc);
7434 
7435  /* Shouldn't have any parameterized paths anymore */
7436  Assert(subpath->param_info == NULL);
7437 
7438  if (tlist_same_exprs)
7439  subpath->pathtarget->sortgrouprefs =
7440  scanjoin_target->sortgrouprefs;
7441  else
7442  {
7443  Path *newpath;
7444 
7445  newpath = (Path *) create_projection_path(root, rel, subpath,
7446  scanjoin_target);
7447  lfirst(lc) = newpath;
7448  }
7449  }
7450 
7451  /*
7452  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7453  * atop each existing path. (Note that this function doesn't look at the
7454  * cheapest-path fields, which is a good thing because they're bogus right
7455  * now.)
7456  */
7457  if (root->parse->hasTargetSRFs)
7458  adjust_paths_for_srfs(root, rel,
7459  scanjoin_targets,
7460  scanjoin_targets_contain_srfs);
7461 
7462  /*
7463  * Update the rel's target to be the final (with SRFs) scan/join target.
7464  * This now matches the actual output of all the paths, and we might get
7465  * confused in createplan.c if they don't agree. We must do this now so
7466  * that any append paths made in the next part will use the correct
7467  * pathtarget (cf. create_append_path).
7468  *
7469  * Note that this is also necessary if GetForeignUpperPaths() gets called
7470  * on the final scan/join relation or on any of its children, since the
7471  * FDW might look at the rel's target to create ForeignPaths.
7472  */
7473  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7474 
7475  /*
7476  * If the relation is partitioned, recursively apply the scan/join target
7477  * to all partitions, and generate brand-new Append paths in which the
7478  * scan/join target is computed below the Append rather than above it.
7479  * Since Append is not projection-capable, that might save a separate
7480  * Result node, and it also is important for partitionwise aggregate.
7481  */
7482  if (rel_is_partitioned)
7483  {
7484  List *live_children = NIL;
7485  int i;
7486 
7487  /* Adjust each partition. */
7488  i = -1;
7489  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7490  {
7491  RelOptInfo *child_rel = rel->part_rels[i];
7492  AppendRelInfo **appinfos;
7493  int nappinfos;
7494  List *child_scanjoin_targets = NIL;
7495 
7496  Assert(child_rel != NULL);
7497 
7498  /* Dummy children can be ignored. */
7499  if (IS_DUMMY_REL(child_rel))
7500  continue;
7501 
7502  /* Translate scan/join targets for this child. */
7503  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7504  &nappinfos);
7505  foreach(lc, scanjoin_targets)
7506  {
7507  PathTarget *target = lfirst_node(PathTarget, lc);
7508 
7509  target = copy_pathtarget(target);
7510  target->exprs = (List *)
7512  (Node *) target->exprs,
7513  nappinfos, appinfos);
7514  child_scanjoin_targets = lappend(child_scanjoin_targets,
7515  target);
7516  }
7517  pfree(appinfos);
7518 
7519  /* Recursion does the real work. */
7520  apply_scanjoin_target_to_paths(root, child_rel,
7521  child_scanjoin_targets,
7522  scanjoin_targets_contain_srfs,
7523  scanjoin_target_parallel_safe,
7525 
7526  /* Save non-dummy children for Append paths. */
7527  if (!IS_DUMMY_REL(child_rel))
7528  live_children = lappend(live_children, child_rel);
7529  }
7530 
7531  /* Build new paths for this relation by appending child paths. */
7532  add_paths_to_append_rel(root, rel, live_children);
7533  }
7534 
7535  /*
7536  * Consider generating Gather or Gather Merge paths. We must only do this
7537  * if the relation is parallel safe, and we don't do it for child rels to
7538  * avoid creating multiple Gather nodes within the same plan. We must do
7539  * this after all paths have been generated and before set_cheapest, since
7540  * one of the generated paths may turn out to be the cheapest one.
7541  */
7542  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7543  generate_useful_gather_paths(root, rel, false);
7544 
7545  /*
7546  * Reassess which paths are the cheapest, now that we've potentially added
7547  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7548  * this relation.
7549  */
7550  set_cheapest(rel);
7551 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3140
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1287
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:697
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:195
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1047
int i
Definition: isn.c:73
List * lappend(List *list, void *datum)
Definition: list.c:338
void pfree(void *pointer)
Definition: mcxt.c:1306
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1820
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1007
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:804
#define linitial_node(type, l)
Definition: pg_list.h:179
#define llast_node(type, l)
Definition: pg_list.h:200
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7336
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6031
void check_stack_depth(void)
Definition: postgres.c:3440
Definition: nodes.h:118
List * exprs
Definition: pathnodes.h:1424
bool hasTargetSRFs
Definition: parsenodes.h:143
Relids relids
Definition: pathnodes.h:821
bool consider_parallel
Definition: pathnodes.h:837
Bitmapset * live_parts
Definition: pathnodes.h:984
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert(), bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), Query::hasTargetSRFs, i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, PlannerInfo::parse, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7294 of file planner.c.

7295 {
7296  Query *parse = root->parse;
7297 
7298  if (!parse->hasAggs && parse->groupClause == NIL)
7299  {
7300  /*
7301  * We don't know how to do parallel aggregation unless we have either
7302  * some aggregates or a grouping clause.
7303  */
7304  return false;
7305  }
7306  else if (parse->groupingSets)
7307  {
7308  /* We don't know how to do grouping sets in parallel. */
7309  return false;
7310  }
7311  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7312  {
7313  /* Insufficient support for partial mode. */
7314  return false;
7315  }
7316 
7317  /* Everything looks good. */
7318  return true;
7319 }
bool hasNonPartialAggs
Definition: pathnodes.h:475
bool hasNonSerialAggs
Definition: pathnodes.h:477

References PlannerInfo::hasNonPartialAggs, PlannerInfo::hasNonSerialAggs, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5536 of file planner.c.

5537 {
5538  const WindowClauseSortData *wcsa = a;
5539  const WindowClauseSortData *wcsb = b;
5540  ListCell *item_a;
5541  ListCell *item_b;
5542 
5543  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5544  {
5547 
5548  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5549  return -1;
5550  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5551  return 1;
5552  else if (sca->sortop > scb->sortop)
5553  return -1;
5554  else if (sca->sortop < scb->sortop)
5555  return 1;
5556  else if (sca->nulls_first && !scb->nulls_first)
5557  return -1;
5558  else if (!sca->nulls_first && scb->nulls_first)
5559  return 1;
5560  /* no need to compare eqop, since it is fully determined by sortop */
5561  }
5562 
5563  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5564  return -1;
5565  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5566  return 1;
5567 
5568  return 0;
5569 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1320

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 3946 of file planner.c.

3954 {
3955  Query *parse = root->parse;
3956  Size hash_mem_limit = get_hash_memory_limit();
3957 
3958  /*
3959  * If we're not being offered sorted input, then only consider plans that
3960  * can be done entirely by hashing.
3961  *
3962  * We can hash everything if it looks like it'll fit in hash_mem. But if
3963  * the input is actually sorted despite not being advertised as such, we
3964  * prefer to make use of that in order to use less memory.
3965  *
3966  * If none of the grouping sets are sortable, then ignore the hash_mem
3967  * limit and generate a path anyway, since otherwise we'll just fail.
3968  */
3969  if (!is_sorted)
3970  {
3971  List *new_rollups = NIL;
3972  RollupData *unhashed_rollup = NULL;
3973  List *sets_data;
3974  List *empty_sets_data = NIL;
3975  List *empty_sets = NIL;
3976  ListCell *lc;
3977  ListCell *l_start = list_head(gd->rollups);
3978  AggStrategy strat = AGG_HASHED;
3979  double hashsize;
3980  double exclude_groups = 0.0;
3981 
3982  Assert(can_hash);
3983 
3984  /*
3985  * If the input is coincidentally sorted usefully (which can happen
3986  * even if is_sorted is false, since that only means that our caller
3987  * has set up the sorting for us), then save some hashtable space by
3988  * making use of that. But we need to watch out for degenerate cases:
3989  *
3990  * 1) If there are any empty grouping sets, then group_pathkeys might
3991  * be NIL if all non-empty grouping sets are unsortable. In this case,
3992  * there will be a rollup containing only empty groups, and the
3993  * pathkeys_contained_in test is vacuously true; this is ok.
3994  *
3995  * XXX: the above relies on the fact that group_pathkeys is generated
3996  * from the first rollup. If we add the ability to consider multiple
3997  * sort orders for grouping input, this assumption might fail.
3998  *
3999  * 2) If there are no empty sets and only unsortable sets, then the
4000  * rollups list will be empty (and thus l_start == NULL), and
4001  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4002  * pathkeys_contained_in test doesn't cause us to crash.
4003  */
4004  if (l_start != NULL &&
4006  {
4007  unhashed_rollup = lfirst_node(RollupData, l_start);
4008  exclude_groups = unhashed_rollup->numGroups;
4009  l_start = lnext(gd->rollups, l_start);
4010  }
4011 
4012  hashsize = estimate_hashagg_tablesize(root,
4013  path,
4014  agg_costs,
4015  dNumGroups - exclude_groups);
4016 
4017  /*
4018  * gd->rollups is empty if we have only unsortable columns to work
4019  * with. Override hash_mem in that case; otherwise, we'll rely on the
4020  * sorted-input case to generate usable mixed paths.
4021  */
4022  if (hashsize > hash_mem_limit && gd->rollups)
4023  return; /* nope, won't fit */
4024 
4025  /*
4026  * We need to burst the existing rollups list into individual grouping
4027  * sets and recompute a groupClause for each set.
4028  */
4029  sets_data = list_copy(gd->unsortable_sets);
4030 
4031  for_each_cell(lc, gd->rollups, l_start)
4032  {
4033  RollupData *rollup = lfirst_node(RollupData, lc);
4034 
4035  /*
4036  * If we find an unhashable rollup that's not been skipped by the
4037  * "actually sorted" check above, we can't cope; we'd need sorted
4038  * input (with a different sort order) but we can't get that here.
4039  * So bail out; we'll get a valid path from the is_sorted case
4040  * instead.
4041  *
4042  * The mere presence of empty grouping sets doesn't make a rollup
4043  * unhashable (see preprocess_grouping_sets), we handle those
4044  * specially below.
4045  */
4046  if (!rollup->hashable)
4047  return;
4048 
4049  sets_data = list_concat(sets_data, rollup->gsets_data);
4050  }
4051  foreach(lc, sets_data)
4052  {
4054  List *gset = gs->set;
4055  RollupData *rollup;
4056 
4057  if (gset == NIL)
4058  {
4059  /* Empty grouping sets can't be hashed. */
4060  empty_sets_data = lappend(empty_sets_data, gs);
4061  empty_sets = lappend(empty_sets, NIL);
4062  }
4063  else
4064  {
4065  rollup = makeNode(RollupData);
4066 
4067  rollup->groupClause = preprocess_groupclause(root, gset);
4068  rollup->gsets_data = list_make1(gs);
4069  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4070  rollup->gsets_data,
4071  gd->tleref_to_colnum_map);
4072  rollup->numGroups = gs->numGroups;
4073  rollup->hashable = true;
4074  rollup->is_hashed = true;
4075  new_rollups = lappend(new_rollups, rollup);
4076  }
4077  }
4078 
4079  /*
4080  * If we didn't find anything nonempty to hash, then bail. We'll
4081  * generate a path from the is_sorted case.
4082  */
4083  if (new_rollups == NIL)
4084  return;
4085 
4086  /*
4087  * If there were empty grouping sets they should have been in the
4088  * first rollup.
4089  */
4090  Assert(!unhashed_rollup || !empty_sets);
4091 
4092  if (unhashed_rollup)
4093  {
4094  new_rollups = lappend(new_rollups, unhashed_rollup);
4095  strat = AGG_MIXED;
4096  }
4097  else if (empty_sets)
4098  {
4099  RollupData *rollup = makeNode(RollupData);
4100 
4101  rollup->groupClause = NIL;
4102  rollup->gsets_data = empty_sets_data;
4103  rollup->gsets = empty_sets;
4104  rollup->numGroups = list_length(empty_sets);
4105  rollup->hashable = false;
4106  rollup->is_hashed = false;
4107  new_rollups = lappend(new_rollups, rollup);
4108  strat = AGG_MIXED;
4109  }
4110 
4111  add_path(grouped_rel, (Path *)
4113  grouped_rel,
4114  path,
4115  (List *) parse->havingQual,
4116  strat,
4117  new_rollups,
4118  agg_costs));
4119  return;
4120  }
4121 
4122  /*
4123  * If we have sorted input but nothing we can do with it, bail.
4124  */
4125  if (gd->rollups == NIL)
4126  return;
4127 
4128  /*
4129  * Given sorted input, we try and make two paths: one sorted and one mixed
4130  * sort/hash. (We need to try both because hashagg might be disabled, or
4131  * some columns might not be sortable.)
4132  *
4133  * can_hash is passed in as false if some obstacle elsewhere (such as
4134  * ordered aggs) means that we shouldn't consider hashing at all.
4135  */
4136  if (can_hash && gd->any_hashable)
4137  {
4138  List *rollups = NIL;
4139  List *hash_sets = list_copy(gd->unsortable_sets);
4140  double availspace = hash_mem_limit;
4141  ListCell *lc;
4142 
4143  /*
4144  * Account first for space needed for groups we can't sort at all.
4145  */
4146  availspace -= estimate_hashagg_tablesize(root,
4147  path,
4148  agg_costs,
4149  gd->dNumHashGroups);
4150 
4151  if (availspace > 0 && list_length(gd->rollups) > 1)
4152  {
4153  double scale;
4154  int num_rollups = list_length(gd->rollups);
4155  int k_capacity;
4156  int *k_weights = palloc(num_rollups * sizeof(int));
4157  Bitmapset *hash_items = NULL;
4158  int i;
4159 
4160  /*
4161  * We treat this as a knapsack problem: the knapsack capacity
4162  * represents hash_mem, the item weights are the estimated memory
4163  * usage of the hashtables needed to implement a single rollup,
4164  * and we really ought to use the cost saving as the item value;
4165  * however, currently the costs assigned to sort nodes don't
4166  * reflect the comparison costs well, and so we treat all items as
4167  * of equal value (each rollup we hash instead saves us one sort).
4168  *
4169  * To use the discrete knapsack, we need to scale the values to a
4170  * reasonably small bounded range. We choose to allow a 5% error
4171  * margin; we have no more than 4096 rollups in the worst possible
4172  * case, which with a 5% error margin will require a bit over 42MB
4173  * of workspace. (Anyone wanting to plan queries that complex had
4174  * better have the memory for it. In more reasonable cases, with
4175  * no more than a couple of dozen rollups, the memory usage will
4176  * be negligible.)
4177  *
4178  * k_capacity is naturally bounded, but we clamp the values for
4179  * scale and weight (below) to avoid overflows or underflows (or
4180  * uselessly trying to use a scale factor less than 1 byte).
4181  */
4182  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4183  k_capacity = (int) floor(availspace / scale);
4184 
4185  /*
4186  * We leave the first rollup out of consideration since it's the
4187  * one that matches the input sort order. We assign indexes "i"
4188  * to only those entries considered for hashing; the second loop,
4189  * below, must use the same condition.
4190  */
4191  i = 0;
4192  for_each_from(lc, gd->rollups, 1)
4193  {
4194  RollupData *rollup = lfirst_node(RollupData, lc);
4195 
4196  if (rollup->hashable)
4197  {
4198  double sz = estimate_hashagg_tablesize(root,
4199  path,
4200  agg_costs,
4201  rollup->numGroups);
4202 
4203  /*
4204  * If sz is enormous, but hash_mem (and hence scale) is
4205  * small, avoid integer overflow here.
4206  */
4207  k_weights[i] = (int) Min(floor(sz / scale),
4208  k_capacity + 1.0);
4209  ++i;
4210  }
4211  }
4212 
4213  /*
4214  * Apply knapsack algorithm; compute the set of items which
4215  * maximizes the value stored (in this case the number of sorts
4216  * saved) while keeping the total size (approximately) within
4217  * capacity.
4218  */
4219  if (i > 0)
4220  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4221 
4222  if (!bms_is_empty(hash_items))
4223  {
4224  rollups = list_make1(linitial(gd->rollups));
4225 
4226  i = 0;
4227  for_each_from(lc, gd->rollups, 1)
4228  {
4229  RollupData *rollup = lfirst_node(RollupData, lc);
4230 
4231  if (rollup->hashable)
4232  {
4233  if (bms_is_member(i, hash_items))
4234  hash_sets = list_concat(hash_sets,
4235  rollup->gsets_data);
4236  else
4237  rollups = lappend(rollups, rollup);
4238  ++i;
4239  }
4240  else
4241  rollups = lappend(rollups, rollup);
4242  }
4243  }
4244  }
4245 
4246  if (!rollups && hash_sets)
4247  rollups = list_copy(gd->rollups);
4248 
4249  foreach(lc, hash_sets)
4250  {
4252  RollupData *rollup = makeNode(RollupData);
4253 
4254  Assert(gs->set != NIL);
4255 
4256  rollup->groupClause = preprocess_groupclause(root, gs->set);
4257  rollup->gsets_data = list_make1(gs);
4258  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4259  rollup->gsets_data,
4260  gd->tleref_to_colnum_map);
4261  rollup->numGroups = gs->numGroups;
4262  rollup->hashable = true;
4263  rollup->is_hashed = true;
4264  rollups = lcons(rollup, rollups);
4265  }
4266 
4267  if (rollups)
4268  {
4269  add_path(grouped_rel, (Path *)
4271  grouped_rel,
4272  path,
4273  (List *) parse->havingQual,
4274  AGG_MIXED,
4275  rollups,
4276  agg_costs));
4277  }
4278  }
4279 
4280  /*
4281  * Now try the simple sorted case.
4282  */
4283  if (!gd->unsortable_sets)
4284  add_path(grouped_rel, (Path *)
4286  grouped_rel,
4287  path,
4288  (List *) parse->havingQual,
4289  AGG_SORTED,
4290  gd->rollups,
4291  agg_costs));
4292 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:428
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:704
#define Min(x, y)
Definition: c.h:937
#define Max(x, y)
Definition: c.h:931
size_t Size
Definition: c.h:541
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:54
List * list_copy(const List *oldlist)
Definition: list.c:1572
List * list_concat(List *list1, const List *list2)
Definition: list.c:560
List * lcons(void *datum, List *list)
Definition: list.c:494
void * palloc(Size size)
Definition: mcxt.c:1199
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3390
AggStrategy
Definition: nodes.h:350
@ AGG_MIXED
Definition: nodes.h:354
#define makeNode(_type_)
Definition: nodes.h:165
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:346
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3182
#define list_make1(x1)
Definition: pg_list.h:210
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:436
static ListCell * list_head(const List *l)
Definition: pg_list.h:126
#define for_each_from(cell, lst, N)
Definition: pg_list.h:412
#define linitial(l)
Definition: pg_list.h:176
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:341
int scale
Definition: pgbench.c:190
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2774
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2155
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3886
Cardinality numGroups
Definition: pathnodes.h:2147
Cardinality numGroups
Definition: pathnodes.h:2158
List * groupClause
Definition: pathnodes.h:2155
List * gsets_data
Definition: pathnodes.h:2157
bool hashable
Definition: pathnodes.h:2159
List * gsets
Definition: pathnodes.h:2156
bool is_hashed
Definition: pathnodes.h:2160
int * tleref_to_colnum_map
Definition: planner.c:116
List * unsortable_sets
Definition: planner.c:115
double dNumHashGroups
Definition: planner.c:111

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert(), bms_is_empty(), bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), PlannerInfo::group_pathkeys, RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3746 of file planner.c.

3748 {
3749  Query *parse = root->parse;
3750  int nrows;
3751  Path *path;
3752 
3753  nrows = list_length(parse->groupingSets);
3754  if (nrows > 1)
3755  {
3756  /*
3757  * Doesn't seem worthwhile writing code to cons up a generate_series
3758  * or a values scan to emit multiple rows. Instead just make N clones
3759  * and append them. (With a volatile HAVING clause, this means you
3760  * might get between 0 and N output rows. Offhand I think that's
3761  * desired.)
3762  */
3763  List *paths = NIL;
3764 
3765  while (--nrows >= 0)
3766  {
3767  path = (Path *)
3768  create_group_result_path(root, grouped_rel,
3769  grouped_rel->reltarget,
3770  (List *) parse->havingQual);
3771  paths = lappend(paths, path);
3772  }
3773  path = (Path *)
3774  create_append_path(root,
3775  grouped_rel,
3776  paths,
3777  NIL,
3778  NIL,
3779  NULL,
3780  0,
3781  false,
3782  -1);
3783  }
3784  else
3785  {
3786  /* No grouping sets, or just one, so one output row */
3787  path = (Path *)
3788  create_group_result_path(root, grouped_rel,
3789  grouped_rel->reltarget,
3790  (List *) parse->havingQual);
3791  }
3792 
3793  add_path(grouped_rel, path);
3794 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1516

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), PlannerInfo::parse, and RelOptInfo::reltarget.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel 
)
static

Definition at line 4525 of file planner.c.

4526 {
4527  RelOptInfo *distinct_rel;
4528 
4529  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4530  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4531 
4532  /*
4533  * We don't compute anything at this level, so distinct_rel will be
4534  * parallel-safe if the input rel is parallel-safe. In particular, if
4535  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4536  * output those expressions, and will not be parallel-safe unless those
4537  * expressions are parallel-safe.
4538  */
4539  distinct_rel->consider_parallel = input_rel->consider_parallel;
4540 
4541  /*
4542  * If the input rel belongs to a single FDW, so does the distinct_rel.
4543  */
4544  distinct_rel->serverid = input_rel->serverid;
4545  distinct_rel->userid = input_rel->userid;
4546  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4547  distinct_rel->fdwroutine = input_rel->fdwroutine;
4548 
4549  /* build distinct paths based on input_rel's pathlist */
4550  create_final_distinct_paths(root, input_rel, distinct_rel);
4551 
4552  /* now build distinct paths based on input_rel's partial_pathlist */
4553  create_partial_distinct_paths(root, input_rel, distinct_rel);
4554 
4555  /* Give a helpful error if we failed to create any paths */
4556  if (distinct_rel->pathlist == NIL)
4557  ereport(ERROR,
4558  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4559  errmsg("could not implement DISTINCT"),
4560  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4561 
4562  /*
4563  * If there is an FDW that's responsible for all baserels of the query,
4564  * let it consider adding ForeignPaths.
4565  */
4566  if (distinct_rel->fdwroutine &&
4567  distinct_rel->fdwroutine->GetForeignUpperPaths)
4568  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4570  input_rel,
4571  distinct_rel,
4572  NULL);
4573 
4574  /* Let extensions possibly add some more paths */
4576  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4577  distinct_rel, NULL);
4578 
4579  /* Now choose the best path(s) */
4580  set_cheapest(distinct_rel);
4581 
4582  return distinct_rel;
4583 }
int errdetail(const char *fmt,...)
Definition: elog.c:1039
int errcode(int sqlerrcode)
Definition: elog.c:695
int errmsg(const char *fmt,...)
Definition: elog.c:906
#define ERROR
Definition: elog.h:35
#define ereport(elevel,...)
Definition: elog.h:145
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
Definition: planner.c:4594
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4717
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:78
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1211
bool useridiscurrent
Definition: pathnodes.h:913
Oid userid
Definition: pathnodes.h:911
Oid serverid
Definition: pathnodes.h:909

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 4717 of file planner.c.

4719 {
4720  Query *parse = root->parse;
4721  Path *cheapest_input_path = input_rel->cheapest_total_path;
4722  double numDistinctRows;
4723  bool allow_hash;
4724 
4725  /* Estimate number of distinct rows there will be */
4726  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4727  root->hasHavingQual)
4728  {
4729  /*
4730  * If there was grouping or aggregation, use the number of input rows
4731  * as the estimated number of DISTINCT rows (ie, assume the input is
4732  * already mostly unique).
4733  */
4734  numDistinctRows = cheapest_input_path->rows;
4735  }
4736  else
4737  {
4738  /*
4739  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4740  */
4741  List *distinctExprs;
4742 
4743  distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4744  parse->targetList);
4745  numDistinctRows = estimate_num_groups(root, distinctExprs,
4746  cheapest_input_path->rows,
4747  NULL, NULL);
4748  }
4749 
4750  /*
4751  * Consider sort-based implementations of DISTINCT, if possible.
4752  */
4753  if (grouping_is_sortable(parse->distinctClause))
4754  {
4755  /*
4756  * First, if we have any adequately-presorted paths, just stick a
4757  * Unique node on those. Then consider doing an explicit sort of the
4758  * cheapest input path and Unique'ing that.
4759  *
4760  * When we have DISTINCT ON, we must sort by the more rigorous of
4761  * DISTINCT and ORDER BY, else it won't have the desired behavior.
4762  * Also, if we do have to do an explicit sort, we might as well use
4763  * the more rigorous ordering to avoid a second sort later. (Note
4764  * that the parser will have ensured that one clause is a prefix of
4765  * the other.)
4766  */
4767  List *needed_pathkeys;
4768  Path *path;
4769  ListCell *lc;
4770 
4771  if (parse->hasDistinctOn &&
4773  list_length(root->sort_pathkeys))
4774  needed_pathkeys = root->sort_pathkeys;
4775  else
4776  needed_pathkeys = root->distinct_pathkeys;
4777 
4778  foreach(lc, input_rel->pathlist)
4779  {
4780  path = (Path *) lfirst(lc);
4781 
4782  if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4783  {
4784  /*
4785  * distinct_pathkeys may have become empty if all of the
4786  * pathkeys were determined to be redundant. If all of the
4787  * pathkeys are redundant then each DISTINCT target must only
4788  * allow a single value, therefore all resulting tuples must
4789  * be identical (or at least indistinguishable by an equality
4790  * check). We can uniquify these tuples simply by just taking
4791  * the first tuple. All we do here is add a path to do "LIMIT
4792  * 1" atop of 'path'. When doing a DISTINCT ON we may still
4793  * have a non-NIL sort_pathkeys list, so we must still only do
4794  * this with paths which are correctly sorted by
4795  * sort_pathkeys.
4796  */
4797  if (root->distinct_pathkeys == NIL)
4798  {
4799  Node *limitCount;
4800 
4801  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4802  sizeof(int64),
4803  Int64GetDatum(1), false,
4804  FLOAT8PASSBYVAL);
4805 
4806  /*
4807  * If the query already has a LIMIT clause, then we could
4808  * end up with a duplicate LimitPath in the final plan.
4809  * That does not seem worth troubling over too much.
4810  */
4811  add_path(distinct_rel, (Path *)
4812  create_limit_path(root, distinct_rel, path, NULL,
4813  limitCount, LIMIT_OPTION_COUNT,
4814  0, 1));
4815  }
4816  else
4817  {
4818  add_path(distinct_rel, (Path *)
4819  create_upper_unique_path(root, distinct_rel,
4820  path,
4822  numDistinctRows));
4823  }
4824  }
4825  }
4826 
4827  /* For explicit-sort case, always use the more rigorous clause */
4828  if (list_length(root->distinct_pathkeys) <
4829  list_length(root->sort_pathkeys))
4830  {
4831  needed_pathkeys = root->sort_pathkeys;
4832  /* Assert checks that parser didn't mess up... */
4834  needed_pathkeys));
4835  }
4836  else
4837  needed_pathkeys = root->distinct_pathkeys;
4838 
4839  path = cheapest_input_path;
4840  if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4841  path = (Path *) create_sort_path(root, distinct_rel,
4842  path,
4843  needed_pathkeys,
4844  root->distinct_pathkeys == NIL ?
4845  1.0 : -1.0);
4846 
4847  /*
4848  * As above, use a LimitPath instead of a UniquePath when all of the
4849  * distinct_pathkeys are redundant and we're only going to get a
4850  * series of tuples all with the same values anyway.
4851  */
4852  if (root->distinct_pathkeys == NIL)
4853  {
4854  Node *limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4855  sizeof(int64),
4856  Int64GetDatum(1), false,
4857  FLOAT8PASSBYVAL);
4858 
4859  add_path(distinct_rel, (Path *)
4860  create_limit_path(root, distinct_rel, path, NULL,
4861  limitCount, LIMIT_OPTION_COUNT, 0, 1));
4862  }
4863  else
4864  {
4865  add_path(distinct_rel, (Path *)
4866  create_upper_unique_path(root, distinct_rel,
4867  path,
4869  numDistinctRows));
4870  }
4871  }
4872 
4873  /*
4874  * Consider hash-based implementations of DISTINCT, if possible.
4875  *
4876  * If we were not able to make any other types of path, we *must* hash or
4877  * die trying. If we do have other choices, there are two things that
4878  * should prevent selection of hashing: if the query uses DISTINCT ON
4879  * (because it won't really have the expected behavior if we hash), or if
4880  * enable_hashagg is off.
4881  *
4882  * Note: grouping_is_hashable() is much more expensive to check than the
4883  * other gating conditions, so we want to do it last.
4884  */
4885  if (distinct_rel->pathlist == NIL)
4886  allow_hash = true; /* we have no alternatives */
4887  else if (parse->hasDistinctOn || !enable_hashagg)
4888  allow_hash = false; /* policy-based decision not to hash */
4889  else
4890  allow_hash = true; /* default */
4891 
4892  if (allow_hash && grouping_is_hashable(parse->distinctClause))
4893  {
4894  /* Generate hashed aggregate path --- no sort needed */
4895  add_path(distinct_rel, (Path *)
4896  create_agg_path(root,
4897  distinct_rel,
4898  cheapest_input_path,
4899  cheapest_input_path->pathtarget,
4900  AGG_HASHED,
4902  parse->distinctClause,
4903  NIL,
4904  NULL,
4905  numDistinctRows));
4906  }
4907 
4908  return distinct_rel;
4909 }
#define FLOAT8PASSBYVAL
Definition: c.h:571
bool enable_hashagg
Definition: costsize.c:142
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1683
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:299
@ LIMIT_OPTION_COUNT
Definition: nodes.h:428
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3754
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3064
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3385
Cardinality rows
Definition: pathnodes.h:1544
List * distinct_pathkeys
Definition: pathnodes.h:387
bool hasHavingQual
Definition: pathnodes.h:455
List * sort_pathkeys
Definition: pathnodes.h:389
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, create_agg_path(), create_limit_path(), create_sort_path(), create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, estimate_num_groups(), FLOAT8PASSBYVAL, get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), PlannerInfo::hasHavingQual, Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), makeConst(), NIL, parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, Path::rows, and PlannerInfo::sort_pathkeys.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3559 of file planner.c.

3564 {
3565  Query *parse = root->parse;
3566  RelOptInfo *grouped_rel;
3567  RelOptInfo *partially_grouped_rel;
3568  AggClauseCosts agg_costs;
3569 
3570  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3571  get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3572 
3573  /*
3574  * Create grouping relation to hold fully aggregated grouping and/or
3575  * aggregation paths.
3576  */
3577  grouped_rel = make_grouping_rel(root, input_rel, target,
3578  target_parallel_safe, parse->havingQual);
3579 
3580  /*
3581  * Create either paths for a degenerate grouping or paths for ordinary
3582  * grouping, as appropriate.
3583  */
3584  if (is_degenerate_grouping(root))
3585  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3586  else
3587  {
3588  int flags = 0;
3589  GroupPathExtraData extra;
3590 
3591  /*
3592  * Determine whether it's possible to perform sort-based
3593  * implementations of grouping. (Note that if groupClause is empty,
3594  * grouping_is_sortable() is trivially true, and all the
3595  * pathkeys_contained_in() tests will succeed too, so that we'll
3596  * consider every surviving input path.)
3597  *
3598  * If we have grouping sets, we might be able to sort some but not all
3599  * of them; in this case, we need can_sort to be true as long as we
3600  * must consider any sorted-input plan.
3601  */
3602  if ((gd && gd->rollups != NIL)
3603  || grouping_is_sortable(parse->groupClause))
3604  flags |= GROUPING_CAN_USE_SORT;
3605 
3606  /*
3607  * Determine whether we should consider hash-based implementations of
3608  * grouping.
3609  *
3610  * Hashed aggregation only applies if we're grouping. If we have
3611  * grouping sets, some groups might be hashable but others not; in
3612  * this case we set can_hash true as long as there is nothing globally
3613  * preventing us from hashing (and we should therefore consider plans
3614  * with hashes).
3615  *
3616  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3617  * BY aggregates. (Doing so would imply storing *all* the input
3618  * values in the hash table, and/or running many sorts in parallel,
3619  * either of which seems like a certain loser.) We similarly don't
3620  * support ordered-set aggregates in hashed aggregation, but that case
3621  * is also included in the numOrderedAggs count.
3622  *
3623  * Note: grouping_is_hashable() is much more expensive to check than
3624  * the other gating conditions, so we want to do it last.
3625  */
3626  if ((parse->groupClause != NIL &&
3627  root->numOrderedAggs == 0 &&
3628  (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
3629  flags |= GROUPING_CAN_USE_HASH;
3630 
3631  /*
3632  * Determine whether partial aggregation is possible.
3633  */
3634  if (can_partial_agg(root))
3635  flags |= GROUPING_CAN_PARTIAL_AGG;
3636 
3637  extra.flags = flags;
3638  extra.target_parallel_safe = target_parallel_safe;
3639  extra.havingQual = parse->havingQual;
3640  extra.targetList = parse->targetList;
3641  extra.partial_costs_set = false;
3642 
3643  /*
3644  * Determine whether partitionwise aggregation is in theory possible.
3645  * It can be disabled by the user, and for now, we don't try to
3646  * support grouping sets. create_ordinary_grouping_paths() will check
3647  * additional conditions, such as whether input_rel is partitioned.
3648  */
3649  if (enable_partitionwise_aggregate && !parse->groupingSets)
3651  else
3653 
3654  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3655  &agg_costs, gd, &extra,
3656  &partially_grouped_rel);
3657  }
3658 
3659  set_cheapest(grouped_rel);
3660  return grouped_rel;
3661 }
#define MemSet(start, val, len)
Definition: c.h:953
bool enable_partitionwise_aggregate
Definition: costsize.c:150
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3056
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3055
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3040
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3746
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3725
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3810
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7294
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3672
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:541
PartitionwiseAggregateType patype
Definition: pathnodes.h:3085
int numOrderedAggs
Definition: pathnodes.h:473

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, PlannerInfo::numOrderedAggs, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4395 of file planner.c.

4402 {
4403  PathTarget *window_target;
4404  ListCell *l;
4405  List *topqual = NIL;
4406 
4407  /*
4408  * Since each window clause could require a different sort order, we stack
4409  * up a WindowAgg node for each clause, with sort steps between them as
4410  * needed. (We assume that select_active_windows chose a good order for
4411  * executing the clauses in.)
4412  *
4413  * input_target should contain all Vars and Aggs needed for the result.
4414  * (In some cases we wouldn't need to propagate all of these all the way
4415  * to the top, since they might only be needed as inputs to WindowFuncs.
4416  * It's probably not worth trying to optimize that though.) It must also
4417  * contain all window partitioning and sorting expressions, to ensure
4418  * they're computed only once at the bottom of the stack (that's critical
4419  * for volatile functions). As we climb up the stack, we'll add outputs
4420  * for the WindowFuncs computed at each level.
4421  */
4422  window_target = input_target;
4423 
4424  foreach(l, activeWindows)
4425  {
4427  List *window_pathkeys;
4428  int presorted_keys;
4429  bool is_sorted;
4430  bool topwindow;
4431 
4432  window_pathkeys = make_pathkeys_for_window(root,
4433  wc,
4434  root->processed_tlist);
4435 
4436  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4437  path->pathkeys,
4438  &presorted_keys);
4439 
4440  /* Sort if necessary */
4441  if (!is_sorted)
4442  {
4443  /*
4444  * No presorted keys or incremental sort disabled, just perform a
4445  * complete sort.
4446  */
4447  if (presorted_keys == 0 || !enable_incremental_sort)
4448  path = (Path *) create_sort_path(root, window_rel,
4449  path,
4450  window_pathkeys,
4451  -1.0);
4452  else
4453  {
4454  /*
4455  * Since we have presorted keys and incremental sort is
4456  * enabled, just use incremental sort.
4457  */
4458  path = (Path *) create_incremental_sort_path(root,
4459  window_rel,
4460  path,
4461  window_pathkeys,
4462  presorted_keys,
4463  -1.0);
4464  }
4465  }
4466 
4467  if (lnext(activeWindows, l))
4468  {
4469  /*
4470  * Add the current WindowFuncs to the output target for this
4471  * intermediate WindowAggPath. We must copy window_target to
4472  * avoid changing the previous path's target.
4473  *
4474  * Note: a WindowFunc adds nothing to the target's eval costs; but
4475  * we do need to account for the increase in tlist width.
4476  */
4477  ListCell *lc2;
4478 
4479  window_target = copy_pathtarget(window_target);
4480  foreach(lc2, wflists->windowFuncs[wc->winref])
4481  {
4482  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4483 
4484  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4485  window_target->width += get_typavgwidth(wfunc->wintype, -1);
4486  }
4487  }
4488  else
4489  {
4490  /* Install the goal target in the topmost WindowAgg */
4491  window_target = output_target;
4492  }
4493 
4494  /* mark the final item in the list as the top-level window */
4495  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4496 
4497  /*
4498  * Accumulate all of the runConditions from each intermediate
4499  * WindowClause. The top-level WindowAgg must pass these as a qual so
4500  * that it filters out unwanted tuples correctly.
4501  */
4502  if (!topwindow)
4503  topqual = list_concat(topqual, wc->runCondition);
4504 
4505  path = (Path *)
4506  create_windowagg_path(root, window_rel, path, window_target,
4507  wflists->windowFuncs[wc->winref],
4508  wc, topwindow ? topqual : NIL, topwindow);
4509  }
4510 
4511  add_path(window_rel, path);
4512 }
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2536
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3417
#define foreach_current_index(cell)
Definition: pg_list.h:401
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:5723
List * processed_tlist
Definition: pathnodes.h:415
List * runCondition
Definition: parsenodes.h:1421
List ** windowFuncs
Definition: clauses.h:23
Oid wintype
Definition: primnodes.h:490
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), copy_pathtarget(), create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), lfirst_node, list_concat(), list_length(), lnext(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), PlannerInfo::processed_tlist, WindowClause::runCondition, PathTarget::width, WindowFuncLists::windowFuncs, WindowClause::winref, and WindowFunc::wintype.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 4929 of file planner.c.

4934 {
4935  Path *cheapest_input_path = input_rel->cheapest_total_path;
4936  RelOptInfo *ordered_rel;
4937  ListCell *lc;
4938 
4939  /* For now, do all work in the (ORDERED, NULL) upperrel */
4940  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
4941 
4942  /*
4943  * If the input relation is not parallel-safe, then the ordered relation
4944  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4945  * target list is parallel-safe.
4946  */
4947  if (input_rel->consider_parallel && target_parallel_safe)
4948  ordered_rel->consider_parallel = true;
4949 
4950  /*
4951  * If the input rel belongs to a single FDW, so does the ordered_rel.
4952  */
4953  ordered_rel->serverid = input_rel->serverid;
4954  ordered_rel->userid = input_rel->userid;
4955  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
4956  ordered_rel->fdwroutine = input_rel->fdwroutine;
4957 
4958  foreach(lc, input_rel->pathlist)
4959  {
4960  Path *input_path = (Path *) lfirst(lc);
4961  Path *sorted_path = input_path;
4962  bool is_sorted;
4963  int presorted_keys;
4964 
4965  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
4966  input_path->pathkeys, &presorted_keys);
4967 
4968  if (is_sorted)
4969  {
4970  /* Use the input path as is, but add a projection step if needed */
4971  if (sorted_path->pathtarget != target)
4972  sorted_path = apply_projection_to_path(root, ordered_rel,
4973  sorted_path, target);
4974 
4975  add_path(ordered_rel, sorted_path);
4976  }
4977  else
4978  {
4979  /*
4980  * Try adding an explicit sort, but only to the cheapest total
4981  * path since a full sort should generally add the same cost to
4982  * all paths.
4983  */
4984  if (input_path == cheapest_input_path)
4985  {
4986  /*
4987  * Sort the cheapest input path. An explicit sort here can
4988  * take advantage of LIMIT.
4989  */
4990  sorted_path = (Path *) create_sort_path(root,
4991  ordered_rel,
4992  input_path,
4993  root->sort_pathkeys,
4994  limit_tuples);
4995  /* Add projection step if needed */
4996  if (sorted_path->pathtarget != target)
4997  sorted_path = apply_projection_to_path(root, ordered_rel,
4998  sorted_path, target);
4999 
5000  add_path(ordered_rel, sorted_path);
5001  }
5002 
5003  /*
5004  * If incremental sort is enabled, then try it as well. Unlike
5005  * with regular sorts, we can't just look at the cheapest path,
5006  * because the cost of incremental sort depends on how well
5007  * presorted the path is. Additionally incremental sort may enable
5008  * a cheaper startup path to win out despite higher total cost.
5009  */
5011  continue;
5012 
5013  /* Likewise, if the path can't be used for incremental sort. */
5014  if (!presorted_keys)
5015  continue;
5016 
5017  /* Also consider incremental sort. */
5018  sorted_path = (Path *) create_incremental_sort_path(root,
5019  ordered_rel,
5020  input_path,
5021  root->sort_pathkeys,
5022  presorted_keys,
5023  limit_tuples);
5024 
5025  /* Add projection step if needed */
5026  if (sorted_path->pathtarget != target)
5027  sorted_path = apply_projection_to_path(root, ordered_rel,
5028  sorted_path, target);
5029 
5030  add_path(ordered_rel, sorted_path);
5031  }
5032  }
5033 
5034  /*
5035  * generate_gather_paths() will have already generated a simple Gather
5036  * path for the best parallel path, if any, and the loop above will have
5037  * considered sorting it. Similarly, generate_gather_paths() will also
5038  * have generated order-preserving Gather Merge plans which can be used
5039  * without sorting if they happen to match the sort_pathkeys, and the loop
5040  * above will have handled those as well. However, there's one more
5041  * possibility: it may make sense to sort the cheapest partial path
5042  * according to the required output order and then use Gather Merge.
5043  */
5044  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5045  input_rel->partial_pathlist != NIL)
5046  {
5047  Path *cheapest_partial_path;
5048 
5049  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5050 
5051  /*
5052  * If cheapest partial path doesn't need a sort, this is redundant
5053  * with what's already been tried.
5054  */
5056  cheapest_partial_path->pathkeys))
5057  {
5058  Path *path;
5059  double total_groups;
5060 
5061  path = (Path *) create_sort_path(root,
5062  ordered_rel,
5063  cheapest_partial_path,
5064  root->sort_pathkeys,
5065  limit_tuples);
5066 
5067  total_groups = cheapest_partial_path->rows *
5068  cheapest_partial_path->parallel_workers;
5069  path = (Path *)
5070  create_gather_merge_path(root, ordered_rel,
5071  path,
5072  path->pathtarget,
5073  root->sort_pathkeys, NULL,
5074  &total_groups);
5075 
5076  /* Add projection step if needed */
5077  if (path->pathtarget != target)
5078  path = apply_projection_to_path(root, ordered_rel,
5079  path, target);
5080 
5081  add_path(ordered_rel, path);
5082  }
5083 
5084  /*
5085  * Consider incremental sort with a gather merge on partial paths.
5086  *
5087  * We can also skip the entire loop when we only have a single-item
5088  * sort_pathkeys because then we can't possibly have a presorted
5089  * prefix of the list without having the list be fully sorted.
5090  */
5092  {
5093  foreach(lc, input_rel->partial_pathlist)
5094  {
5095  Path *input_path = (Path *) lfirst(lc);
5096  Path *sorted_path;
5097  bool is_sorted;
5098  int presorted_keys;
5099  double total_groups;
5100 
5101  /*
5102  * We don't care if this is the cheapest partial path - we
5103  * can't simply skip it, because it may be partially sorted in
5104  * which case we want to consider adding incremental sort
5105  * (instead of full sort, which is what happens above).
5106  */
5107 
5108  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5109  input_path->pathkeys,
5110  &presorted_keys);
5111 
5112  /* No point in adding incremental sort on fully sorted paths. */
5113  if (is_sorted)
5114  continue;
5115 
5116  if (presorted_keys == 0)
5117  continue;
5118 
5119  /* Since we have presorted keys, consider incremental sort. */
5120  sorted_path = (Path *) create_incremental_sort_path(root,
5121  ordered_rel,
5122  input_path,
5123  root->sort_pathkeys,
5124  presorted_keys,
5125  limit_tuples);
5126  total_groups = input_path->rows *
5127  input_path->parallel_workers;
5128  sorted_path = (Path *)
5129  create_gather_merge_path(root, ordered_rel,
5130  sorted_path,
5131  sorted_path->pathtarget,
5132  root->sort_pathkeys, NULL,
5133  &total_groups);
5134 
5135  /* Add projection step if needed */
5136  if (sorted_path->pathtarget != target)
5137  sorted_path = apply_projection_to_path(root, ordered_rel,
5138  sorted_path, target);
5139 
5140  add_path(ordered_rel, sorted_path);
5141  }
5142  }
5143  }
5144 
5145  /*
5146  * If there is an FDW that's responsible for all baserels of the query,
5147  * let it consider adding ForeignPaths.
5148  */
5149  if (ordered_rel->fdwroutine &&
5150  ordered_rel->fdwroutine->GetForeignUpperPaths)
5151  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5152  input_rel, ordered_rel,
5153  NULL);
5154 
5155  /* Let extensions possibly add some more paths */
5157  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5158  input_rel, ordered_rel, NULL);
5159 
5160  /*
5161  * No need to bother with set_cheapest here; grouping_planner does not
5162  * need us to do it.
5163  */
5164  Assert(ordered_rel->pathlist != NIL);
5165 
5166  return ordered_rel;
5167 }
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1873
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
int parallel_workers
Definition: pathnodes.h:1541

References add_path(), apply_projection_to_path(), Assert(), RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, fetch_upper_rel(), lfirst, linitial, list_length(), NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::pathlist, Path::rows, RelOptInfo::serverid, PlannerInfo::sort_pathkeys, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3810 of file planner.c.

3816 {
3817  Path *cheapest_path = input_rel->cheapest_total_path;
3818  RelOptInfo *partially_grouped_rel = NULL;
3819  double dNumGroups;
3821 
3822  /*
3823  * If this is the topmost grouping relation or if the parent relation is
3824  * doing some form of partitionwise aggregation, then we may be able to do
3825  * it at this level also. However, if the input relation is not
3826  * partitioned, partitionwise aggregate is impossible.
3827  */
3828  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3829  IS_PARTITIONED_REL(input_rel))
3830  {
3831  /*
3832  * If this is the topmost relation or if the parent relation is doing
3833  * full partitionwise aggregation, then we can do full partitionwise
3834  * aggregation provided that the GROUP BY clause contains all of the
3835  * partitioning columns at this level. Otherwise, we can do at most
3836  * partial partitionwise aggregation. But if partial aggregation is
3837  * not supported in general then we can't use it for partitionwise
3838  * aggregation either.
3839  */
3840  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3841  group_by_has_partkey(input_rel, extra->targetList,
3842  root->parse->groupClause))
3844  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3846  else
3848  }
3849 
3850  /*
3851  * Before generating paths for grouped_rel, we first generate any possible
3852  * partially grouped paths; that way, later code can easily consider both
3853  * parallel and non-parallel approaches to grouping.
3854  */
3855  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3856  {
3857  bool force_rel_creation;
3858 
3859  /*
3860  * If we're doing partitionwise aggregation at this level, force
3861  * creation of a partially_grouped_rel so we can add partitionwise
3862  * paths to it.
3863  */
3864  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3865 
3866  partially_grouped_rel =
3868  grouped_rel,
3869  input_rel,
3870  gd,
3871  extra,
3872  force_rel_creation);
3873  }
3874 
3875  /* Set out parameter. */
3876  *partially_grouped_rel_p = partially_grouped_rel;
3877 
3878  /* Apply partitionwise aggregation technique, if possible. */
3879  if (patype != PARTITIONWISE_AGGREGATE_NONE)
3880  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
3881  partially_grouped_rel, agg_costs,
3882  gd, patype, extra);
3883 
3884  /* If we are doing partial aggregation only, return. */
3886  {
3887  Assert(partially_grouped_rel);
3888 
3889  if (partially_grouped_rel->pathlist)
3890  set_cheapest(partially_grouped_rel);
3891 
3892  return;
3893  }
3894 
3895  /* Gather any partially grouped partial paths. */
3896  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
3897  {
3898  gather_grouping_paths(root, partially_grouped_rel);
3899  set_cheapest(partially_grouped_rel);
3900  }
3901 
3902  /*
3903  * Estimate number of groups.
3904  */
3905  dNumGroups = get_number_of_groups(root,
3906  cheapest_path->rows,
3907  gd,
3908  extra->targetList);
3909 
3910  /* Build final grouping paths */
3911  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
3912  partially_grouped_rel, agg_costs, gd,
3913  dNumGroups, extra);
3914 
3915  /* Give a helpful error if we failed to find any implementation */
3916  if (grouped_rel->pathlist == NIL)
3917  ereport(ERROR,
3918  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3919  errmsg("could not implement GROUP BY"),
3920  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
3921 
3922  /*
3923  * If there is an FDW that's responsible for all baserels of the query,
3924  * let it consider adding ForeignPaths.
3925  */
3926  if (grouped_rel->fdwroutine &&
3927  grouped_rel->fdwroutine->GetForeignUpperPaths)
3928  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
3929  input_rel, grouped_rel,
3930  extra);
3931 
3932  /* Let extensions possibly add some more paths */
3934  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
3935  input_rel, grouped_rel,
3936  extra);
3937 }
PartitionwiseAggregateType
Definition: pathnodes.h:3054
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3057
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:6822
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6476
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3437
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7571
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:7715
List * groupClause
Definition: parsenodes.h:170

References add_paths_to_grouping_rel(), Assert(), RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), Query::groupClause, GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, PlannerInfo::parse, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel 
)
static

Definition at line 4594 of file planner.c.

4596 {
4597  RelOptInfo *partial_distinct_rel;
4598  Query *parse;
4599  List *distinctExprs;
4600  double numDistinctRows;
4601  Path *cheapest_partial_path;
4602  ListCell *lc;
4603 
4604  /* nothing to do when there are no partial paths in the input rel */
4605  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4606  return;
4607 
4608  parse = root->parse;
4609 
4610  /* can't do parallel DISTINCT ON */
4611  if (parse->hasDistinctOn)
4612  return;
4613 
4614  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4615  NULL);
4616  partial_distinct_rel->reltarget = root->upper_targets[UPPERREL_PARTIAL_DISTINCT];
4617  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4618 
4619  /*
4620  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4621  */
4622  partial_distinct_rel->serverid = input_rel->serverid;
4623  partial_distinct_rel->userid = input_rel->userid;
4624  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4625  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4626 
4627  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4628 
4629  distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4630  parse->targetList);
4631 
4632  /* estimate how many distinct rows we'll get from each worker */
4633  numDistinctRows = estimate_num_groups(root, distinctExprs,
4634  cheapest_partial_path->rows,
4635  NULL, NULL);
4636 
4637  /* first try adding unique paths atop of sorted paths */
4638  if (grouping_is_sortable(parse->distinctClause))
4639  {
4640  foreach(lc, input_rel->partial_pathlist)
4641  {
4642  Path *path = (Path *) lfirst(lc);
4643 
4645  {
4646  add_partial_path(partial_distinct_rel, (Path *)
4648  partial_distinct_rel,
4649  path,
4651  numDistinctRows));
4652  }
4653  }
4654  }
4655 
4656  /*
4657  * Now try hash aggregate paths, if enabled and hashing is possible. Since
4658  * we're not on the hook to ensure we do our best to create at least one
4659  * path here, we treat enable_hashagg as a hard off-switch rather than the
4660  * slightly softer variant in create_final_distinct_paths.
4661  */
4662  if (enable_hashagg && grouping_is_hashable(parse->distinctClause))
4663  {
4664  add_partial_path(partial_distinct_rel, (Path *)
4665  create_agg_path(root,
4666  partial_distinct_rel,
4667  cheapest_partial_path,
4668  cheapest_partial_path->pathtarget,
4669  AGG_HASHED,
4671  parse->distinctClause,
4672  NIL,
4673  NULL,
4674  numDistinctRows));
4675  }
4676 
4677  /*
4678  * If there is an FDW that's responsible for all baserels of the query,
4679  * let it consider adding ForeignPaths.
4680  */
4681  if (partial_distinct_rel->fdwroutine &&
4682  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4683  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4685  input_rel,
4686  partial_distinct_rel,
4687  NULL);
4688 
4689  /* Let extensions possibly add some more partial paths */
4691  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4692  input_rel, partial_distinct_rel, NULL);
4693 
4694  if (partial_distinct_rel->partial_pathlist != NIL)
4695  {
4696  generate_gather_paths(root, partial_distinct_rel, true);
4697  set_cheapest(partial_distinct_rel);
4698 
4699  /*
4700  * Finally, create paths to distinctify the final result. This step
4701  * is needed to remove any duplicates due to combining rows from
4702  * parallel workers.
4703  */
4704  create_final_distinct_paths(root, partial_distinct_rel,
4705  final_distinct_rel);
4706  }
4707 }
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3002
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_upper_paths_hook, create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, estimate_num_groups(), fetch_upper_rel(), generate_gather_paths(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), lfirst, linitial, list_length(), NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 6822 of file planner.c.

6828 {
6829  Query *parse = root->parse;
6830  RelOptInfo *partially_grouped_rel;
6831  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
6832  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6833  Path *cheapest_partial_path = NULL;
6834  Path *cheapest_total_path = NULL;
6835  double dNumPartialGroups = 0;
6836  double dNumPartialPartialGroups = 0;
6837  ListCell *lc;
6838  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6839  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6840 
6841  /*
6842  * Consider whether we should generate partially aggregated non-partial
6843  * paths. We can only do this if we have a non-partial path, and only if
6844  * the parent of the input rel is performing partial partitionwise
6845  * aggregation. (Note that extra->patype is the type of partitionwise
6846  * aggregation being used at the parent level, not this level.)
6847  */
6848  if (input_rel->pathlist != NIL &&
6850  cheapest_total_path = input_rel->cheapest_total_path;
6851 
6852  /*
6853  * If parallelism is possible for grouped_rel, then we should consider
6854  * generating partially-grouped partial paths. However, if the input rel
6855  * has no partial paths, then we can't.
6856  */
6857  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
6858  cheapest_partial_path = linitial(input_rel->partial_pathlist);
6859 
6860  /*
6861  * If we can't partially aggregate partial paths, and we can't partially
6862  * aggregate non-partial paths, then don't bother creating the new
6863  * RelOptInfo at all, unless the caller specified force_rel_creation.
6864  */
6865  if (cheapest_total_path == NULL &&
6866  cheapest_partial_path == NULL &&
6867  !force_rel_creation)
6868  return NULL;
6869 
6870  /*
6871  * Build a new upper relation to represent the result of partially
6872  * aggregating the rows from the input relation.
6873  */
6874  partially_grouped_rel = fetch_upper_rel(root,
6876  grouped_rel->relids);
6877  partially_grouped_rel->consider_parallel =
6878  grouped_rel->consider_parallel;
6879  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
6880  partially_grouped_rel->serverid = grouped_rel->serverid;
6881  partially_grouped_rel->userid = grouped_rel->userid;
6882  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
6883  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
6884 
6885  /*
6886  * Build target list for partial aggregate paths. These paths cannot just
6887  * emit the same tlist as regular aggregate paths, because (1) we must
6888  * include Vars and Aggrefs needed in HAVING, which might not appear in
6889  * the result tlist, and (2) the Aggrefs must be set in partial mode.
6890  */
6891  partially_grouped_rel->reltarget =
6892  make_partial_grouping_target(root, grouped_rel->reltarget,
6893  extra->havingQual);
6894 
6895  if (!extra->partial_costs_set)
6896  {
6897  /*
6898  * Collect statistics about aggregates for estimating costs of
6899  * performing aggregation in parallel.
6900  */
6901  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
6902  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
6903  if (parse->hasAggs)
6904  {
6905  /* partial phase */
6907  agg_partial_costs);
6908 
6909  /* final phase */
6911  agg_final_costs);
6912  }
6913 
6914  extra->partial_costs_set = true;
6915  }
6916 
6917  /* Estimate number of partial groups. */
6918  if (cheapest_total_path != NULL)
6919  dNumPartialGroups =
6920  get_number_of_groups(root,
6921  cheapest_total_path->rows,
6922  gd,
6923  extra->targetList);
6924  if (cheapest_partial_path != NULL)
6925  dNumPartialPartialGroups =
6926  get_number_of_groups(root,
6927  cheapest_partial_path->rows,
6928  gd,
6929  extra->targetList);
6930 
6931  if (can_sort && cheapest_total_path != NULL)
6932  {
6933  /* This should have been checked previously */
6934  Assert(parse->hasAggs || parse->groupClause);
6935 
6936  /*
6937  * Use any available suitably-sorted path as input, and also consider
6938  * sorting the cheapest partial path.
6939  */
6940  foreach(lc, input_rel->pathlist)
6941  {
6942  Path *path = (Path *) lfirst(lc);
6943  bool is_sorted;
6944 
6945  is_sorted = pathkeys_contained_in(root->group_pathkeys,
6946  path->pathkeys);
6947  if (path == cheapest_total_path || is_sorted)
6948  {
6949  /* Sort the cheapest partial path, if it isn't already */
6950  if (!is_sorted)
6951  path = (Path *) create_sort_path(root,
6952  partially_grouped_rel,
6953  path,
6954  root->group_pathkeys,
6955  -1.0);
6956 
6957  if (parse->hasAggs)
6958  add_path(partially_grouped_rel, (Path *)
6959  create_agg_path(root,
6960  partially_grouped_rel,
6961  path,
6962  partially_grouped_rel->reltarget,
6963  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6965  parse->groupClause,
6966  NIL,
6967  agg_partial_costs,
6968  dNumPartialGroups));
6969  else
6970  add_path(partially_grouped_rel, (Path *)
6971  create_group_path(root,
6972  partially_grouped_rel,
6973  path,
6974  parse->groupClause,
6975  NIL,
6976  dNumPartialGroups));
6977  }
6978  }
6979 
6980  /*
6981  * Consider incremental sort on all partial paths, if enabled.
6982  *
6983  * We can also skip the entire loop when we only have a single-item
6984  * group_pathkeys because then we can't possibly have a presorted
6985  * prefix of the list without having the list be fully sorted.
6986  */
6988  {
6989  foreach(lc, input_rel->pathlist)
6990  {
6991  Path *path = (Path *) lfirst(lc);
6992  bool is_sorted;
6993  int presorted_keys;
6994 
6995  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6996  path->pathkeys,
6997  &presorted_keys);
6998 
6999  /* Ignore already sorted paths */
7000  if (is_sorted)
7001  continue;
7002 
7003  if (presorted_keys == 0)
7004  continue;
7005 
7006  /* Since we have presorted keys, consider incremental sort. */
7007  path = (Path *) create_incremental_sort_path(root,
7008  partially_grouped_rel,
7009  path,
7010  root->group_pathkeys,
7011  presorted_keys,
7012  -1.0);
7013 
7014  if (parse->hasAggs)
7015  add_path(partially_grouped_rel, (Path *)
7016  create_agg_path(root,
7017  partially_grouped_rel,
7018  path,
7019  partially_grouped_rel->reltarget,
7020  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7022  parse->groupClause,
7023  NIL,
7024  agg_partial_costs,
7025  dNumPartialGroups));
7026  else
7027  add_path(partially_grouped_rel, (Path *)
7028  create_group_path(root,
7029  partially_grouped_rel,
7030  path,
7031  parse->groupClause,
7032  NIL,
7033  dNumPartialGroups));
7034  }
7035  }
7036  }
7037 
7038  if (can_sort && cheapest_partial_path != NULL)
7039  {
7040  /* Similar to above logic, but for partial paths. */
7041  foreach(lc, input_rel->partial_pathlist)
7042  {
7043  Path *path = (Path *) lfirst(lc);
7044  Path *path_original = path;
7045  bool is_sorted;
7046  int presorted_keys;
7047 
7048  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7049  path->pathkeys,
7050  &presorted_keys);
7051 
7052  if (path == cheapest_partial_path || is_sorted)
7053  {
7054  /* Sort the cheapest partial path, if it isn't already */
7055  if (!is_sorted)
7056  path = (Path *) create_sort_path(root,
7057  partially_grouped_rel,
7058  path,
7059  root->group_pathkeys,
7060  -1.0);
7061 
7062  if (parse->hasAggs)
7063  add_partial_path(partially_grouped_rel, (Path *)
7064  create_agg_path(root,
7065  partially_grouped_rel,
7066  path,
7067  partially_grouped_rel->reltarget,
7068  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7070  parse->groupClause,
7071  NIL,
7072  agg_partial_costs,
7073  dNumPartialPartialGroups));
7074  else
7075  add_partial_path(partially_grouped_rel, (Path *)
7076  create_group_path(root,
7077  partially_grouped_rel,
7078  path,
7079  parse->groupClause,
7080  NIL,
7081  dNumPartialPartialGroups));
7082  }
7083 
7084  /*
7085  * Now we may consider incremental sort on this path, but only
7086  * when the path is not already sorted and when incremental sort
7087  * is enabled.
7088  */
7089  if (is_sorted || !enable_incremental_sort)
7090  continue;
7091 
7092  /* Restore the input path (we might have added Sort on top). */
7093  path = path_original;
7094 
7095  /* no shared prefix, not point in building incremental sort */
7096  if (presorted_keys == 0)
7097  continue;
7098 
7099  /*
7100  * We should have already excluded pathkeys of length 1 because
7101  * then presorted_keys > 0 would imply is_sorted was true.
7102  */
7103  Assert(list_length(root->group_pathkeys) != 1);
7104 
7105  path = (Path *) create_incremental_sort_path(root,
7106  partially_grouped_rel,
7107  path,
7108  root->group_pathkeys,
7109  presorted_keys,
7110  -1.0);
7111 
7112  if (parse->hasAggs)
7113  add_partial_path(partially_grouped_rel, (Path *)
7114  create_agg_path(root,
7115  partially_grouped_rel,
7116  path,
7117  partially_grouped_rel->reltarget,
7118  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7120  parse->groupClause,
7121  NIL,
7122  agg_partial_costs,
7123  dNumPartialPartialGroups));
7124  else
7125  add_partial_path(partially_grouped_rel, (Path *)
7126  create_group_path(root,
7127  partially_grouped_rel,
7128  path,
7129  parse->groupClause,
7130  NIL,
7131  dNumPartialPartialGroups));
7132  }
7133  }
7134 
7135  /*
7136  * Add a partially-grouped HashAgg Path where possible
7137  */
7138  if (can_hash && cheapest_total_path != NULL)
7139  {
7140  /* Checked above */
7141  Assert(parse->hasAggs || parse->groupClause);
7142 
7143  add_path(partially_grouped_rel, (Path *)
7144  create_agg_path(root,
7145  partially_grouped_rel,
7146  cheapest_total_path,
7147  partially_grouped_rel->reltarget,
7148  AGG_HASHED,
7150  parse->groupClause,
7151  NIL,
7152  agg_partial_costs,
7153  dNumPartialGroups));
7154  }
7155 
7156  /*
7157  * Now add a partially-grouped HashAgg partial Path where possible
7158  */
7159  if (can_hash && cheapest_partial_path != NULL)
7160  {
7161  add_partial_path(partially_grouped_rel, (Path *)
7162  create_agg_path(root,
7163  partially_grouped_rel,
7164  cheapest_partial_path,
7165  partially_grouped_rel->reltarget,
7166  AGG_HASHED,
7168  parse->groupClause,
7169  NIL,
7170  agg_partial_costs,
7171  dNumPartialPartialGroups));
7172  }
7173 
7174  /*
7175  * If there is an FDW that's responsible for all baserels of the query,
7176  * let it consider adding partially grouped ForeignPaths.
7177  */
7178  if (partially_grouped_rel->fdwroutine &&
7179  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7180  {
7181  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7182 
7183  fdwroutine->GetForeignUpperPaths(root,
7185  input_rel, partially_grouped_rel,
7186  extra);
7187  }
7188 
7189  return partially_grouped_rel;
7190 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:376
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5285
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3078
RelOptKind reloptkind
Definition: pathnodes.h:815

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert(), RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), FdwRoutine::GetForeignUpperPaths, PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_partial_grouping_target(), MemSet, NIL, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7571 of file planner.c.

7579 {
7580  List *grouped_live_children = NIL;
7581  List *partially_grouped_live_children = NIL;
7582  PathTarget *target = grouped_rel->reltarget;
7583  bool partial_grouping_valid = true;
7584  int i;
7585 
7588  partially_grouped_rel != NULL);
7589 
7590  /* Add paths for partitionwise aggregation/grouping. */
7591  i = -1;
7592  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7593  {
7594  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7595  PathTarget *child_target;
7596  AppendRelInfo **appinfos;
7597  int nappinfos;
7598  GroupPathExtraData child_extra;
7599  RelOptInfo *child_grouped_rel;
7600  RelOptInfo *child_partially_grouped_rel;
7601 
7602  Assert(child_input_rel != NULL);
7603 
7604  /* Dummy children can be ignored. */
7605  if (IS_DUMMY_REL(child_input_rel))
7606  continue;
7607 
7608  child_target = copy_pathtarget(target);
7609 
7610  /*
7611  * Copy the given "extra" structure as is and then override the
7612  * members specific to this child.
7613  */
7614  memcpy(&child_extra, extra, sizeof(child_extra));
7615 
7616  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7617  &nappinfos);
7618 
7619  child_target->exprs = (List *)
7621  (Node *) target->exprs,
7622  nappinfos, appinfos);
7623 
7624  /* Translate havingQual and targetList. */
7625  child_extra.havingQual = (Node *)
7627  extra->havingQual,
7628  nappinfos, appinfos);
7629  child_extra.targetList = (List *)
7631  (Node *) extra->targetList,
7632  nappinfos, appinfos);
7633 
7634  /*
7635  * extra->patype was the value computed for our parent rel; patype is
7636  * the value for this relation. For the child, our value is its
7637  * parent rel's value.
7638  */
7639  child_extra.patype = patype;
7640 
7641  /*
7642  * Create grouping relation to hold fully aggregated grouping and/or
7643  * aggregation paths for the child.
7644  */
7645  child_grouped_rel = make_grouping_rel(root, child_input_rel,
7646  child_target,
7647  extra->target_parallel_safe,
7648  child_extra.havingQual);
7649 
7650  /* Create grouping paths for this child relation. */
7651  create_ordinary_grouping_paths(root, child_input_rel,
7652  child_grouped_rel,
7653  agg_costs, gd, &child_extra,
7654  &child_partially_grouped_rel);
7655 
7656  if (child_partially_grouped_rel)
7657  {
7658  partially_grouped_live_children =
7659  lappend(partially_grouped_live_children,
7660  child_partially_grouped_rel);
7661  }
7662  else
7663  partial_grouping_valid = false;
7664 
7665  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7666  {
7667  set_cheapest(child_grouped_rel);
7668  grouped_live_children = lappend(grouped_live_children,
7669  child_grouped_rel);
7670  }
7671 
7672  pfree(appinfos);
7673  }
7674 
7675  /*
7676  * Try to create append paths for partially grouped children. For full
7677  * partitionwise aggregation, we might have paths in the partial_pathlist
7678  * if parallel aggregation is possible. For partial partitionwise
7679  * aggregation, we may have paths in both pathlist and partial_pathlist.
7680  *
7681  * NB: We must have a partially grouped path for every child in order to
7682  * generate a partially grouped path for this relation.
7683  */
7684  if (partially_grouped_rel && partial_grouping_valid)
7685  {
7686  Assert(partially_grouped_live_children != NIL);
7687 
7688  add_paths_to_append_rel(root, partially_grouped_rel,
7689  partially_grouped_live_children);
7690 
7691  /*
7692  * We need call set_cheapest, since the finalization step will use the
7693  * cheapest path from the rel.
7694  */
7695  if (partially_grouped_rel->pathlist)
7696  set_cheapest(partially_grouped_rel);
7697  }
7698 
7699  /* If possible, create append paths for fully grouped children. */
7700  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7701  {
7702  Assert(grouped_live_children != NIL);
7703 
7704  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7705  }
7706 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert(), bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4308 of file planner.c.

4315 {
4316  RelOptInfo *window_rel;
4317  ListCell *lc;
4318 
4319  /* For now, do all work in the (WINDOW, NULL) upperrel */
4320  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4321 
4322  /*
4323  * If the input relation is not parallel-safe, then the window relation
4324  * can't be parallel-safe, either. Otherwise, we need to examine the
4325  * target list and active windows for non-parallel-safe constructs.
4326  */
4327  if (input_rel->consider_parallel && output_target_parallel_safe &&
4328  is_parallel_safe(root, (Node *) activeWindows))
4329  window_rel->consider_parallel = true;
4330 
4331  /*
4332  * If the input rel belongs to a single FDW, so does the window rel.
4333  */
4334  window_rel->serverid = input_rel->serverid;
4335  window_rel->userid = input_rel->userid;
4336  window_rel->useridiscurrent = input_rel->useridiscurrent;
4337  window_rel->fdwroutine = input_rel->fdwroutine;
4338 
4339  /*
4340  * Consider computing window functions starting from the existing
4341  * cheapest-total path (which will likely require a sort) as well as any
4342  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4343  */
4344  foreach(lc, input_rel->pathlist)
4345  {
4346  Path *path = (Path *) lfirst(lc);
4347  int presorted_keys;
4348 
4349  if (path == input_rel->cheapest_total_path ||
4351  &presorted_keys) ||
4352  presorted_keys > 0)
4354  window_rel,
4355  path,
4356  input_target,
4357  output_target,
4358  wflists,
4359  activeWindows);
4360  }
4361 
4362  /*
4363  * If there is an FDW that's responsible for all baserels of the query,
4364  * let it consider adding ForeignPaths.
4365  */
4366  if (window_rel->fdwroutine &&
4367  window_rel->fdwroutine->GetForeignUpperPaths)
4368  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4369  input_rel, window_rel,
4370  NULL);
4371 
4372  /* Let extensions possibly add some more paths */
4374  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4375  input_rel, window_rel, NULL);
4376 
4377  /* Now choose the best path(s) */
4378  set_cheapest(window_rel);
4379 
4380  return window_rel;
4381 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:634
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4395
List * window_pathkeys
Definition: pathnodes.h:385

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, RelOptInfo::useridiscurrent, and PlannerInfo::window_pathkeys.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6147 of file planner.c.

6148 {
6149  Node *result;
6150 
6151  /*
6152  * Convert named-argument function calls, insert default arguments and
6153  * simplify constant subexprs
6154  */
6155  result = eval_const_expressions(NULL, (Node *) expr);
6156 
6157  /* Fill in opfuncid values if missing */
6158  fix_opfuncids(result);
6159 
6160  return (Expr *) result;
6161 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2132
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1641

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATPrepAlterColumnType(), BeginCopyFrom(), CheckMutability(), ComputePartitionAttrs(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6174 of file planner.c.

6177 {
6178  Node *result;
6179  PlannerGlobal glob;
6180  PlannerInfo root;
6181 
6182  /* Make up dummy planner state so we can use setrefs machinery */
6183  MemSet(&glob, 0, sizeof(glob));
6184  glob.type = T_PlannerGlobal;
6185  glob.relationOids = NIL;
6186  glob.invalItems = NIL;
6187 
6188  MemSet(&root, 0, sizeof(root));
6189  root.type = T_PlannerInfo;
6190  root.glob = &glob;
6191 
6192  /*
6193  * Convert named-argument function calls, insert default arguments and
6194  * simplify constant subexprs. Collect identities of inlined functions
6195  * and elided domains, too.
6196  */
6197  result = eval_const_expressions(&root, (Node *) expr);
6198 
6199  /* Fill in opfuncid values if missing */
6200  fix_opfuncids(result);
6201 
6202  /*
6203  * Now walk the finished expression to find anything else we ought to
6204  * record as an expression dependency.
6205  */
6206  (void) extract_query_dependencies_walker(result, &root);
6207 
6208  *relationOids = glob.relationOids;
6209  *invalItems = glob.invalItems;
6210 
6211  return (Expr *) result;
6212 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3333
List * invalItems
Definition: pathnodes.h:132
List * relationOids
Definition: pathnodes.h:129
PlannerGlobal * glob
Definition: pathnodes.h:202

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerInfo::glob, PlannerGlobal::invalItems, MemSet, NIL, and PlannerGlobal::relationOids.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2877 of file planner.c.

2878 {
2879  int num_sets_raw = list_length(groupingSets);
2880  int num_empty = 0;
2881  int num_sets = 0; /* distinct sets */
2882  int num_chains = 0;
2883  List *result = NIL;
2884  List **results;
2885  List **orig_sets;
2886  Bitmapset **set_masks;
2887  int *chains;
2888  short **adjacency;
2889  short *adjacency_buf;
2891  int i;
2892  int j;
2893  int j_size;
2894  ListCell *lc1 = list_head(groupingSets);
2895  ListCell *lc;
2896 
2897  /*
2898  * Start by stripping out empty sets. The algorithm doesn't require this,
2899  * but the planner currently needs all empty sets to be returned in the
2900  * first list, so we strip them here and add them back after.
2901  */
2902  while (lc1 && lfirst(lc1) == NIL)
2903  {
2904  ++num_empty;
2905  lc1 = lnext(groupingSets, lc1);
2906  }
2907 
2908  /* bail out now if it turns out that all we had were empty sets. */
2909  if (!lc1)
2910  return list_make1(groupingSets);
2911 
2912  /*----------
2913  * We don't strictly need to remove duplicate sets here, but if we don't,
2914  * they tend to become scattered through the result, which is a bit
2915  * confusing (and irritating if we ever decide to optimize them out).
2916  * So we remove them here and add them back after.
2917  *
2918  * For each non-duplicate set, we fill in the following:
2919  *
2920  * orig_sets[i] = list of the original set lists
2921  * set_masks[i] = bitmapset for testing inclusion
2922  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2923  *
2924  * chains[i] will be the result group this set is assigned to.
2925  *
2926  * We index all of these from 1 rather than 0 because it is convenient
2927  * to leave 0 free for the NIL node in the graph algorithm.
2928  *----------
2929  */
2930  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2931  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2932  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2933  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2934 
2935  j_size = 0;
2936  j = 0;
2937  i = 1;
2938 
2939  for_each_cell(lc, groupingSets, lc1)
2940  {
2941  List *candidate = (List *) lfirst(lc);
2942  Bitmapset *candidate_set = NULL;
2943  ListCell *lc2;
2944  int dup_of = 0;
2945 
2946  foreach(lc2, candidate)
2947  {
2948  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2949  }
2950 
2951  /* we can only be a dup if we're the same length as a previous set */
2952  if (j_size == list_length(candidate))
2953  {
2954  int k;
2955 
2956  for (k = j; k < i; ++k)
2957  {
2958  if (bms_equal(set_masks[k], candidate_set))
2959  {
2960  dup_of = k;
2961  break;
2962  }
2963  }
2964  }
2965  else if (j_size < list_length(candidate))
2966  {
2967  j_size = list_length(candidate);
2968  j = i;
2969  }
2970 
2971  if (dup_of > 0)
2972  {
2973  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2974  bms_free(candidate_set);
2975  }
2976  else
2977  {
2978  int k;
2979  int n_adj = 0;
2980 
2981  orig_sets[i] = list_make1(candidate);
2982  set_masks[i] = candidate_set;
2983 
2984  /* fill in adjacency list; no need to compare equal-size sets */
2985 
2986  for (k = j - 1; k > 0; --k)
2987  {
2988  if (bms_is_subset(set_masks[k], candidate_set))
2989  adjacency_buf[++n_adj] = k;
2990  }
2991 
2992  if (n_adj > 0)
2993  {
2994  adjacency_buf[0] = n_adj;
2995  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2996  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2997  }
2998  else
2999  adjacency[i] = NULL;
3000 
3001  ++i;
3002  }
3003  }
3004 
3005  num_sets = i - 1;
3006 
3007  /*
3008  * Apply the graph matching algorithm to do the work.
3009  */
3010  state = BipartiteMatch(num_sets, num_sets, adjacency);
3011 
3012  /*
3013  * Now, the state->pair* fields have the info we need to assign sets to
3014  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3015  * pair_vu[v] = u (both will be true, but we check both so that we can do
3016  * it in one pass)
3017  */
3018  chains = palloc0((num_sets + 1) * sizeof(int));
3019 
3020  for (i = 1; i <= num_sets; ++i)
3021  {
3022  int u = state->pair_vu[i];
3023  int v = state->pair_uv[i];
3024 
3025  if (u > 0 && u < i)
3026  chains[i] = chains[u];
3027  else if (v > 0 && v < i)
3028  chains[i] = chains[v];
3029  else
3030  chains[i] = ++num_chains;
3031  }
3032 
3033  /* build result lists. */
3034  results = palloc0((num_chains + 1) * sizeof(List *));
3035 
3036  for (i = 1; i <= num_sets; ++i)
3037  {
3038  int c = chains[i];
3039 
3040  Assert(c > 0);
3041 
3042  results[c] = list_concat(results[c], orig_sets[i]);
3043  }
3044 
3045  /* push any empty sets back on the first list. */
3046  while (num_empty-- > 0)
3047  results[1] = lcons(NIL, results[1]);
3048 
3049  /* make result list */
3050  for (i = 1; i <= num_chains; ++i)
3051  result = lappend(result, results[i]);
3052 
3053  /*
3054  * Free all the things.
3055  *
3056  * (This is over-fussy for small sets but for large sets we could have
3057  * tied up a nontrivial amount of memory.)
3058  */
3060  pfree(results);
3061  pfree(chains);
3062  for (i = 1; i <= num_sets; ++i)
3063  if (adjacency[i])
3064  pfree(adjacency[i]);
3065  pfree(adjacency);
3066  pfree(adjacency_buf);
3067  pfree(orig_sets);
3068  for (i = 1; i <= num_sets; ++i)
3069  bms_free(set_masks[i]);
3070  pfree(set_masks);
3071 
3072  return result;
3073 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:316
void bms_free(Bitmapset *a)
Definition: bitmapset.c:209
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:739
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1230
char * c
Definition: regguts.h:318

References Assert(), BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7206 of file planner.c.

7207 {
7208  ListCell *lc;
7209  Path *cheapest_partial_path;
7210 
7211  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7212  generate_useful_gather_paths(root, rel, true);
7213 
7214  /* Try cheapest partial path + explicit Sort + Gather Merge. */
7215  cheapest_partial_path = linitial(rel->partial_pathlist);
7217  cheapest_partial_path->pathkeys))
7218  {
7219  Path *path;
7220  double total_groups;
7221 
7222  total_groups =
7223  cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
7224  path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
7225  root->group_pathkeys,
7226  -1.0);
7227  path = (Path *)
7229  rel,
7230  path,
7231  rel->reltarget,
7232  root->group_pathkeys,
7233  NULL,
7234  &total_groups);
7235 
7236  add_path(rel, path);
7237  }
7238 
7239  /*
7240  * Consider incremental sort on all partial paths, if enabled.
7241  *
7242  * We can also skip the entire loop when we only have a single-item
7243  * group_pathkeys because then we can't possibly have a presorted prefix
7244  * of the list without having the list be fully sorted.
7245  */
7247  return;
7248 
7249  /* also consider incremental sort on partial paths, if enabled */
7250  foreach(lc, rel->partial_pathlist)
7251  {
7252  Path *path = (Path *) lfirst(lc);
7253  bool is_sorted;
7254  int presorted_keys;
7255  double total_groups;
7256 
7257  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7258  path->pathkeys,
7259  &presorted_keys);
7260 
7261  if (is_sorted)
7262  continue;
7263 
7264  if (presorted_keys == 0)
7265  continue;
7266 
7267  path = (Path *) create_incremental_sort_path(root,
7268  rel,
7269  path,
7270  root->group_pathkeys,
7271  presorted_keys,
7272  -1.0);
7273 
7274  path = (Path *)
7276  rel,
7277  path,
7278  rel->reltarget,
7279  root->group_pathkeys,
7280  NULL,
7281  &total_groups);
7282 
7283  add_path(rel, path);
7284  }
7285 }

References add_path(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), PlannerInfo::group_pathkeys, lfirst, linitial, list_length(), Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::reltarget, and Path::rows.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 5988 of file planner.c.

5989 {
5990  Path *best_path = rel->cheapest_total_path;
5991  ListCell *l;
5992 
5993  /* If all tuples will be retrieved, just return the cheapest-total path */
5994  if (tuple_fraction <= 0.0)
5995  return best_path;
5996 
5997  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
5998  if (tuple_fraction >= 1.0 && best_path->rows > 0)
5999  tuple_fraction /= best_path->rows;
6000 
6001  foreach(l, rel->pathlist)
6002  {
6003  Path *path = (Path *) lfirst(l);
6004 
6005  if (path == rel->cheapest_total_path ||
6006  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6007  continue;
6008 
6009  best_path = path;
6010  }
6011 
6012  return best_path;
6013 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:117

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), recurse_set_operations(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3437 of file planner.c.

3441 {
3442  Query *parse = root->parse;
3443  double dNumGroups;
3444 
3445  if (parse->groupClause)
3446  {
3447  List *groupExprs;
3448 
3449  if (parse->groupingSets)
3450  {
3451  /* Add up the estimates for each grouping set */
3452  ListCell *lc;
3453 
3454  Assert(gd); /* keep Coverity happy */
3455 
3456  dNumGroups = 0;
3457 
3458  foreach(lc, gd->rollups)
3459  {
3460  RollupData *rollup = lfirst_node(RollupData, lc);
3461  ListCell *lc2;
3462  ListCell *lc3;
3463 
3464  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3465  target_list);
3466 
3467  rollup->numGroups = 0.0;
3468 
3469  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3470  {
3471  List *gset = (List *) lfirst(lc2);
3473  double numGroups = estimate_num_groups(root,
3474  groupExprs,
3475  path_rows,
3476  &gset,
3477  NULL);
3478 
3479  gs->numGroups = numGroups;
3480  rollup->numGroups += numGroups;
3481  }
3482 
3483  dNumGroups += rollup->numGroups;
3484  }
3485 
3486  if (gd->hash_sets_idx)
3487  {
3488  ListCell *lc2;
3489 
3490  gd->dNumHashGroups = 0;
3491 
3492  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3493  target_list);
3494 
3495  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3496  {
3497  List *gset = (List *) lfirst(lc);
3499  double numGroups = estimate_num_groups(root,
3500  groupExprs,
3501  path_rows,
3502  &gset,
3503  NULL);
3504 
3505  gs->numGroups = numGroups;
3506  gd->dNumHashGroups += numGroups;
3507  }
3508 
3509  dNumGroups += gd->dNumHashGroups;
3510  }
3511  }
3512  else
3513  {
3514  /* Plain GROUP BY */
3515  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3516  target_list);
3517 
3518  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3519  NULL, NULL);
3520  }
3521  }
3522  else if (parse->groupingSets)
3523  {
3524  /* Empty grouping sets ... one result row for each one */
3525  dNumGroups = list_length(parse->groupingSets);
3526  }
3527  else if (parse->hasAggs || root->hasHavingQual)
3528  {
3529  /* Plain aggregation, one result row */
3530  dNumGroups = 1;
3531  }
3532  else
3533  {
3534  /* Not grouping */
3535  dNumGroups = 1;
3536  }
3537 
3538  return dNumGroups;
3539 }
List * hash_sets_idx
Definition: planner.c:110

References Assert(), grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, PlannerInfo::hasHavingQual, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), PlannerInfo::parse, grouping_sets_data::rollups, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 7715 of file planner.c.

7718 {
7719  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
7720  int cnt = 0;
7721  int partnatts;
7722 
7723  /* Input relation should be partitioned. */
7724  Assert(input_rel->part_scheme);
7725 
7726  /* Rule out early, if there are no partition keys present. */
7727  if (!input_rel->partexprs)
7728  return false;
7729 
7730  partnatts = input_rel->part_scheme->partnatts;
7731 
7732  for (cnt = 0; cnt < partnatts; cnt++)
7733  {
7734  List *partexprs = input_rel->partexprs[cnt];
7735  ListCell *lc;
7736  bool found = false;
7737 
7738  foreach(lc, partexprs)
7739  {
7740  Expr *partexpr = lfirst(lc);
7741 
7742  if (list_member(groupexprs, partexpr))
7743  {
7744  found = true;
7745  break;
7746  }
7747  }
7748 
7749  /*
7750  * If none of the partition key expressions match with any of the
7751  * GROUP BY expression, return false.
7752  */
7753  if (!found)
7754  return false;
7755  }
7756 
7757  return true;
7758 }
bool list_member(const List *list, const void *datum)
Definition: list.c:660

References Assert(), get_sortgrouplist_exprs(), lfirst, and list_member().

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction 
)
static

Definition at line 1256 of file planner.c.

1257 {
1258  Query *parse = root->parse;
1259  int64 offset_est = 0;
1260  int64 count_est = 0;
1261  double limit_tuples = -1.0;
1262  bool have_postponed_srfs = false;
1263  PathTarget *final_target;
1264  List *final_targets;
1265  List *final_targets_contain_srfs;
1266  bool final_target_parallel_safe;
1267  RelOptInfo *current_rel;
1268  RelOptInfo *final_rel;
1269  FinalPathExtraData extra;
1270  ListCell *lc;
1271 
1272  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1273  if (parse->limitCount || parse->limitOffset)
1274  {
1275  tuple_fraction = preprocess_limit(root, tuple_fraction,
1276  &offset_est, &count_est);
1277 
1278  /*
1279  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1280  * estimate the effects of using a bounded sort.
1281  */
1282  if (count_est > 0 && offset_est >= 0)
1283  limit_tuples = (double) count_est + (double) offset_est;
1284  }
1285 
1286  /* Make tuple_fraction accessible to lower-level routines */
1287  root->tuple_fraction = tuple_fraction;
1288 
1289  if (parse->setOperations)
1290  {
1291  /*
1292  * If there's a top-level ORDER BY, assume we have to fetch all the
1293  * tuples. This might be too simplistic given all the hackery below
1294  * to possibly avoid the sort; but the odds of accurate estimates here
1295  * are pretty low anyway. XXX try to get rid of this in favor of
1296  * letting plan_set_operations generate both fast-start and
1297  * cheapest-total paths.
1298  */
1299  if (parse->sortClause)
1300  root->tuple_fraction = 0.0;
1301 
1302  /*
1303  * Construct Paths for set operations. The results will not need any
1304  * work except perhaps a top-level sort and/or LIMIT. Note that any
1305  * special work for recursive unions is the responsibility of
1306  * plan_set_operations.
1307  */
1308  current_rel = plan_set_operations(root);
1309 
1310  /*
1311  * We should not need to call preprocess_targetlist, since we must be
1312  * in a SELECT query node. Instead, use the processed_tlist returned
1313  * by plan_set_operations (since this tells whether it returned any
1314  * resjunk columns!), and transfer any sort key information from the
1315  * original tlist.
1316  */
1317  Assert(parse->commandType == CMD_SELECT);
1318 
1319  /* for safety, copy processed_tlist instead of modifying in-place */
1320  root->processed_tlist =
1322  parse->targetList);
1323 
1324  /* Also extract the PathTarget form of the setop result tlist */
1325  final_target = current_rel->cheapest_total_path->pathtarget;
1326 
1327  /* And check whether it's parallel safe */
1328  final_target_parallel_safe =
1329  is_parallel_safe(root, (Node *) final_target->exprs);
1330 
1331  /* The setop result tlist couldn't contain any SRFs */
1332  Assert(!parse->hasTargetSRFs);
1333  final_targets = final_targets_contain_srfs = NIL;
1334 
1335  /*
1336  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1337  * checked already, but let's make sure).
1338  */
1339  if (parse->rowMarks)
1340  ereport(ERROR,
1341  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1342  /*------
1343  translator: %s is a SQL row locking clause such as FOR UPDATE */
1344  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1346  parse->rowMarks)->strength))));
1347 
1348  /*
1349  * Calculate pathkeys that represent result ordering requirements
1350  */
1351  Assert(parse->distinctClause == NIL);
1353  parse->sortClause,
1354  root->processed_tlist);
1355  }
1356  else
1357  {
1358  /* No set operations, do regular planning */
1359  PathTarget *sort_input_target;
1360  List *sort_input_targets;
1361  List *sort_input_targets_contain_srfs;
1362  bool sort_input_target_parallel_safe;
1363  PathTarget *grouping_target;
1364  List *grouping_targets;
1365  List *grouping_targets_contain_srfs;
1366  bool grouping_target_parallel_safe;
1367  PathTarget *scanjoin_target;
1368  List *scanjoin_targets;
1369  List *scanjoin_targets_contain_srfs;
1370  bool scanjoin_target_parallel_safe;
1371  bool scanjoin_target_same_exprs;
1372  bool have_grouping;
1373  WindowFuncLists *wflists = NULL;
1374  List *activeWindows = NIL;
1375  grouping_sets_data *gset_data = NULL;
1376  standard_qp_extra qp_extra;
1377 
1378  /* A recursive query should always have setOperations */
1379  Assert(!root->hasRecursion);
1380 
1381  /* Preprocess grouping sets and GROUP BY clause, if any */
1382  if (parse->groupingSets)
1383  {
1384  gset_data = preprocess_grouping_sets(root);
1385  }
1386  else
1387  {
1388  /* Preprocess regular GROUP BY clause, if any */
1389  if (parse->groupClause)
1390  parse->groupClause = preprocess_groupclause(root, NIL);
1391  }
1392 
1393  /*
1394  * Preprocess targetlist. Note that much of the remaining planning
1395  * work will be done with the PathTarget representation of tlists, but
1396  * we must also maintain the full representation of the final tlist so
1397  * that we can transfer its decoration (resnames etc) to the topmost
1398  * tlist of the finished Plan. This is kept in processed_tlist.
1399  */
1400  preprocess_targetlist(root);
1401 
1402  /*
1403  * Mark all the aggregates with resolved aggtranstypes, and detect
1404  * aggregates that are duplicates or can share transition state. We
1405  * must do this before slicing and dicing the tlist into various
1406  * pathtargets, else some copies of the Aggref nodes might escape
1407  * being marked.
1408  */
1409  if (parse->hasAggs)
1410  {
1411  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1412  preprocess_aggrefs(root, (Node *) parse->havingQual);
1413  }
1414 
1415  /*
1416  * Locate any window functions in the tlist. (We don't need to look
1417  * anywhere else, since expressions used in ORDER BY will be in there
1418  * too.) Note that they could all have been eliminated by constant
1419  * folding, in which case we don't need to do any more work.
1420  */
1421  if (parse->hasWindowFuncs)
1422  {
1423  wflists = find_window_functions((Node *) root->processed_tlist,
1424  list_length(parse->windowClause));
1425  if (wflists->numWindowFuncs > 0)
1426  activeWindows = select_active_windows(root, wflists);
1427  else
1428  parse->hasWindowFuncs = false;
1429  }
1430 
1431  /*
1432  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1433  * adding logic between here and the query_planner() call. Anything
1434  * that is needed in MIN/MAX-optimizable cases will have to be
1435  * duplicated in planagg.c.
1436  */
1437  if (parse->hasAggs)
1439 
1440  /*
1441  * Figure out whether there's a hard limit on the number of rows that
1442  * query_planner's result subplan needs to return. Even if we know a
1443  * hard limit overall, it doesn't apply if the query has any
1444  * grouping/aggregation operations, or SRFs in the tlist.
1445  */
1446  if (parse->groupClause ||
1447  parse->groupingSets ||
1448  parse->distinctClause ||
1449  parse->hasAggs ||
1450  parse->hasWindowFuncs ||
1451  parse->hasTargetSRFs ||
1452  root->hasHavingQual)
1453  root->limit_tuples = -1.0;
1454  else
1455  root->limit_tuples = limit_tuples;
1456 
1457  /* Set up data needed by standard_qp_callback */
1458  qp_extra.activeWindows = activeWindows;
1459  qp_extra.groupClause = (gset_data
1460  ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL)
1461  : parse->groupClause);
1462 
1463  /*
1464  * Generate the best unsorted and presorted paths for the scan/join
1465  * portion of this Query, ie the processing represented by the
1466  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1467  * We also generate (in standard_qp_callback) pathkey representations
1468  * of the query's sort clause, distinct clause, etc.
1469  */
1470  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1471 
1472  /*
1473  * Convert the query's result tlist into PathTarget format.
1474  *
1475  * Note: this cannot be done before query_planner() has performed
1476  * appendrel expansion, because that might add resjunk entries to
1477  * root->processed_tlist. Waiting till afterwards is also helpful
1478  * because the target width estimates can use per-Var width numbers
1479  * that were obtained within query_planner().
1480  */
1481  final_target = create_pathtarget(root, root->processed_tlist);
1482  final_target_parallel_safe =
1483  is_parallel_safe(root, (Node *) final_target->exprs);
1484 
1485  /*
1486  * If ORDER BY was given, consider whether we should use a post-sort
1487  * projection, and compute the adjusted target for preceding steps if
1488  * so.
1489  */
1490  if (parse->sortClause)
1491  {
1492  sort_input_target = make_sort_input_target(root,
1493  final_target,
1494  &have_postponed_srfs);
1495  sort_input_target_parallel_safe =
1496  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1497  }
1498  else
1499  {
1500  sort_input_target = final_target;
1501  sort_input_target_parallel_safe = final_target_parallel_safe;
1502  }
1503 
1504  /*
1505  * If we have window functions to deal with, the output from any
1506  * grouping step needs to be what the window functions want;
1507  * otherwise, it should be sort_input_target.
1508  */
1509  if (activeWindows)
1510  {
1511  grouping_target = make_window_input_target(root,
1512  final_target,
1513  activeWindows);
1514  grouping_target_parallel_safe =
1515  is_parallel_safe(root, (Node *) grouping_target->exprs);
1516  }
1517  else
1518  {
1519  grouping_target = sort_input_target;
1520  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1521  }
1522 
1523  /*
1524  * If we have grouping or aggregation to do, the topmost scan/join
1525  * plan node must emit what the grouping step wants; otherwise, it
1526  * should emit grouping_target.
1527  */
1528  have_grouping = (parse->groupClause || parse->groupingSets ||
1529  parse->hasAggs || root->hasHavingQual);
1530  if (have_grouping)
1531  {
1532  scanjoin_target = make_group_input_target(root, final_target);
1533  scanjoin_target_parallel_safe =
1534  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1535  }
1536  else
1537  {
1538  scanjoin_target = grouping_target;
1539  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1540  }
1541 
1542  /*
1543  * If there are any SRFs in the targetlist, we must separate each of
1544  * these PathTargets into SRF-computing and SRF-free targets. Replace
1545  * each of the named targets with a SRF-free version, and remember the
1546  * list of additional projection steps we need to add afterwards.
1547  */
1548  if (parse->hasTargetSRFs)
1549  {
1550  /* final_target doesn't recompute any SRFs in sort_input_target */
1551  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1552  &final_targets,
1553  &final_targets_contain_srfs);
1554  final_target = linitial_node(PathTarget, final_targets);
1555  Assert(!linitial_int(final_targets_contain_srfs));
1556  /* likewise for sort_input_target vs. grouping_target */
1557  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1558  &sort_input_targets,
1559  &sort_input_targets_contain_srfs);
1560  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1561  Assert(!linitial_int(sort_input_targets_contain_srfs));
1562  /* likewise for grouping_target vs. scanjoin_target */
1563  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1564  &grouping_targets,
1565  &grouping_targets_contain_srfs);
1566  grouping_target = linitial_node(PathTarget, grouping_targets);
1567  Assert(!linitial_int(grouping_targets_contain_srfs));
1568  /* scanjoin_target will not have any SRFs precomputed for it */
1569  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1570  &scanjoin_targets,
1571  &scanjoin_targets_contain_srfs);
1572  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1573  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1574  }
1575  else
1576  {
1577  /* initialize lists; for most of these, dummy values are OK */
1578  final_targets = final_targets_contain_srfs = NIL;
1579  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1580  grouping_targets = grouping_targets_contain_srfs = NIL;
1581  scanjoin_targets = list_make1(scanjoin_target);
1582  scanjoin_targets_contain_srfs = NIL;
1583  }
1584 
1585  /* Apply scan/join target. */
1586  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1587  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1588  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1589  scanjoin_targets_contain_srfs,
1590  scanjoin_target_parallel_safe,
1591  scanjoin_target_same_exprs);
1592 
1593  /*
1594  * Save the various upper-rel PathTargets we just computed into
1595  * root->upper_targets[]. The core code doesn't use this, but it
1596  * provides a convenient place for extensions to get at the info. For
1597  * consistency, we save all the intermediate targets, even though some
1598  * of the corresponding upperrels might not be needed for this query.
1599  */
1600  root->upper_targets[UPPERREL_FINAL] = final_target;
1601  root->upper_targets[UPPERREL_ORDERED] = final_target;
1602  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1603  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1604  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1605  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1606 
1607  /*
1608  * If we have grouping and/or aggregation, consider ways to implement
1609  * that. We build a new upperrel representing the output of this
1610  * phase.
1611  */
1612  if (have_grouping)
1613  {
1614  current_rel = create_grouping_paths(root,
1615  current_rel,
1616  grouping_target,
1617  grouping_target_parallel_safe,
1618  gset_data);
1619  /* Fix things up if grouping_target contains SRFs */
1620  if (parse->hasTargetSRFs)
1621  adjust_paths_for_srfs(root, current_rel,
1622  grouping_targets,
1623  grouping_targets_contain_srfs);
1624  }
1625 
1626  /*
1627  * If we have window functions, consider ways to implement those. We
1628  * build a new upperrel representing the output of this phase.
1629  */
1630  if (activeWindows)
1631  {
1632  current_rel = create_window_paths(root,
1633  current_rel,
1634  grouping_target,
1635  sort_input_target,
1636  sort_input_target_parallel_safe,
1637  wflists,
1638  activeWindows);
1639  /* Fix things up if sort_input_target contains SRFs */
1640  if (parse->hasTargetSRFs)
1641  adjust_paths_for_srfs(root, current_rel,
1642  sort_input_targets,
1643  sort_input_targets_contain_srfs);
1644  }
1645 
1646  /*
1647  * If there is a DISTINCT clause, consider ways to implement that. We
1648  * build a new upperrel representing the output of this phase.
1649  */
1650  if (parse->distinctClause)
1651  {
1652  current_rel = create_distinct_paths(root,
1653  current_rel);
1654  }
1655  } /* end of if (setOperations) */
1656 
1657  /*
1658  * If ORDER BY was given, consider ways to implement that, and generate a
1659  * new upperrel containing only paths that emit the correct ordering and
1660  * project the correct final_target. We can apply the original
1661  * limit_tuples limit in sort costing here, but only if there are no
1662  * postponed SRFs.
1663  */
1664  if (parse->sortClause)
1665  {
1666  current_rel = create_ordered_paths(root,
1667  current_rel,
1668  final_target,
1669  final_target_parallel_safe,
1670  have_postponed_srfs ? -1.0 :
1671  limit_tuples);
1672  /* Fix things up if final_target contains SRFs */
1673  if (parse->hasTargetSRFs)
1674  adjust_paths_for_srfs(root, current_rel,
1675  final_targets,
1676  final_targets_contain_srfs);
1677  }
1678 
1679  /*
1680  * Now we are prepared to build the final-output upperrel.
1681  */
1682  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1683 
1684  /*
1685  * If the input rel is marked consider_parallel and there's nothing that's
1686  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1687  * consider_parallel as well. Note that if the query has rowMarks or is
1688  * not a SELECT, consider_parallel will be false for every relation in the
1689  * query.
1690  */
1691  if (current_rel->consider_parallel &&
1692  is_parallel_safe(root, parse->limitOffset) &&
1693  is_parallel_safe(root, parse->limitCount))
1694  final_rel->consider_parallel = true;
1695 
1696  /*
1697  * If the current_rel belongs to a single FDW, so does the final_rel.
1698  */
1699  final_rel->serverid = current_rel->serverid;
1700  final_rel->userid = current_rel->userid;
1701  final_rel->useridiscurrent = current_rel->useridiscurrent;
1702  final_rel->fdwroutine = current_rel->fdwroutine;
1703 
1704  /*
1705  * Generate paths for the final_rel. Insert all surviving paths, with
1706  * LockRows, Limit, and/or ModifyTable steps added if needed.
1707  */
1708  foreach(lc, current_rel->pathlist)
1709  {
1710  Path *path = (Path *) lfirst(lc);
1711 
1712  /*
1713  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1714  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1715  * here. If there are only non-locking rowmarks, they should be
1716  * handled by the ModifyTable node instead. However, root->rowMarks
1717  * is what goes into the LockRows node.)
1718  */
1719  if (parse->rowMarks)
1720  {
1721  path = (Path *) create_lockrows_path(root, final_rel, path,
1722  root->rowMarks,
1724  }
1725 
1726  /*
1727  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1728  */
1729  if (limit_needed(parse))
1730  {
1731  path = (Path *) create_limit_path(root, final_rel, path,
1732  parse->limitOffset,
1733  parse->limitCount,
1734  parse->limitOption,
1735  offset_est, count_est);
1736  }
1737 
1738  /*
1739  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1740  */
1741  if (parse->commandType != CMD_SELECT)
1742  {
1743  Index rootRelation;
1744  List *resultRelations = NIL;
1745  List *updateColnosLists = NIL;
1746  List *withCheckOptionLists = NIL;
1747  List *returningLists = NIL;
1748  List *mergeActionLists = NIL;
1749  List *rowMarks;
1750 
1752  {
1753  /* Inherited UPDATE/DELETE/MERGE */
1754  RelOptInfo *top_result_rel = find_base_rel(root,
1755  parse->resultRelation);
1756  int resultRelation = -1;
1757 
1758  /* Add only leaf children to ModifyTable. */
1759  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1760  resultRelation)) >= 0)
1761  {
1762  RelOptInfo *this_result_rel = find_base_rel(root,
1763  resultRelation);
1764 
1765  /*
1766  * Also exclude any leaf rels that have turned dummy since
1767  * being added to the list, for example, by being excluded
1768  * by constraint exclusion.
1769  */
1770  if (IS_DUMMY_REL(this_result_rel))
1771  continue;
1772 
1773  /* Build per-target-rel lists needed by ModifyTable */
1774  resultRelations = lappend_int(resultRelations,
1775  resultRelation);
1776  if (parse->commandType == CMD_UPDATE)
1777  {
1778  List *update_colnos = root->update_colnos;
1779 
1780  if (this_result_rel != top_result_rel)
1781  update_colnos =
1783  update_colnos,
1784  this_result_rel->relid,
1785  top_result_rel->relid);
1786  updateColnosLists = lappend(updateColnosLists,
1787  update_colnos);
1788  }
1789  if (parse->withCheckOptions)
1790  {
1791  List *withCheckOptions = parse->withCheckOptions;
1792 
1793  if (this_result_rel != top_result_rel)
1794  withCheckOptions = (List *)
1796  (Node *) withCheckOptions,
1797  this_result_rel,
1798  top_result_rel);
1799  withCheckOptionLists = lappend(withCheckOptionLists,
1800  withCheckOptions);
1801  }
1802  if (parse->returningList)
1803  {
1804  List *returningList = parse->returningList;
1805 
1806  if (this_result_rel != top_result_rel)
1807  returningList = (List *)
1809  (Node *) returningList,
1810  this_result_rel,
1811  top_result_rel);
1812  returningLists = lappend(returningLists,
1813  returningList);
1814  }
1815  if (parse->mergeActionList)
1816  {
1817  ListCell *l;
1818  List *mergeActionList = NIL;
1819 
1820  /*
1821  * Copy MergeActions and translate stuff that
1822  * references attribute numbers.
1823  */
1824  foreach(l, parse->mergeActionList)
1825  {
1826  MergeAction *action = lfirst(l),
1827  *leaf_action = copyObject(action);
1828 
1829  leaf_action->qual =
1831  (Node *) action->qual,
1832  this_result_rel,
1833  top_result_rel);
1834  leaf_action->targetList = (List *)
1836  (Node *) action->targetList,
1837  this_result_rel,
1838  top_result_rel);
1839  if (leaf_action->commandType == CMD_UPDATE)
1840  leaf_action->updateColnos =
1842  action->updateColnos,
1843  this_result_rel->relid,
1844  top_result_rel->relid);
1845  mergeActionList = lappend(mergeActionList,
1846  leaf_action);
1847  }
1848 
1849  mergeActionLists = lappend(mergeActionLists,
1850  mergeActionList);
1851  }
1852  }
1853 
1854  if (resultRelations == NIL)
1855  {
1856  /*
1857  * We managed to exclude every child rel, so generate a
1858  * dummy one-relation plan using info for the top target
1859  * rel (even though that may not be a leaf target).
1860  * Although it's clear that no data will be updated or
1861  * deleted, we still need to have a ModifyTable node so
1862  * that any statement triggers will be executed. (This
1863  * could be cleaner if we fixed nodeModifyTable.c to allow
1864  * zero target relations, but that probably wouldn't be a
1865  * net win.)
1866  */
1867  resultRelations = list_make1_int(parse->resultRelation);
1868  if (parse->commandType == CMD_UPDATE)
1869  updateColnosLists = list_make1(root->update_colnos);
1870  if (parse->withCheckOptions)
1871  withCheckOptionLists = list_make1(parse->withCheckOptions);
1872  if (parse->returningList)
1873  returningLists = list_make1(parse->returningList);
1874  if (parse->mergeActionList)
1875  mergeActionLists = list_make1(parse->mergeActionList);
1876  }
1877  }
1878  else
1879  {
1880  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
1881  resultRelations = list_make1_int(parse->resultRelation);
1882  if (parse->commandType == CMD_UPDATE)
1883  updateColnosLists = list_make1(root->update_colnos);
1884  if (parse->withCheckOptions)
1885  withCheckOptionLists = list_make1(parse->withCheckOptions);
1886  if (parse->returningList)
1887  returningLists = list_make1(parse->returningList);
1888  if (parse->mergeActionList)
1889  mergeActionLists = list_make1(parse->mergeActionList);
1890  }
1891 
1892  /*
1893  * If target is a partition root table, we need to mark the
1894  * ModifyTable node appropriately for that.
1895  */
1896  if (rt_fetch(parse->resultRelation, parse->rtable)->relkind ==
1897  RELKIND_PARTITIONED_TABLE)
1898  rootRelation = parse->resultRelation;
1899  else
1900  rootRelation = 0;
1901 
1902  /*
1903  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1904  * will have dealt with fetching non-locked marked rows, else we
1905  * need to have ModifyTable do that.
1906  */
1907  if (parse->rowMarks)
1908  rowMarks = NIL;
1909  else
1910  rowMarks = root->rowMarks;
1911 
1912  path = (Path *)
1913  create_modifytable_path(root, final_rel,
1914  path,
1915  parse->commandType,
1916  parse->canSetTag,
1917  parse->resultRelation,
1918  rootRelation,
1919  root->partColsUpdated,
1920  resultRelations,
1921  updateColnosLists,
1922  withCheckOptionLists,
1923  returningLists,
1924  rowMarks,
1925  parse->onConflict,
1926  mergeActionLists,
1928  }
1929 
1930  /* And shove it into final_rel */
1931  add_path(final_rel, path);
1932  }
1933 
1934  /*
1935  * Generate partial paths for final_rel, too, if outer query levels might
1936  * be able to make use of them.
1937  */
1938  if (final_rel->consider_parallel && root->query_level > 1 &&
1939  !limit_needed(parse))
1940  {
1941  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
1942  foreach(lc, current_rel->partial_pathlist)
1943  {
1944  Path *partial_path = (Path *) lfirst(lc);
1945 
1946  add_partial_path(final_rel, partial_path);
1947  }
1948  }
1949 
1950  extra.limit_needed = limit_needed(parse);
1951  extra.limit_tuples = limit_tuples;
1952  extra.count_est = count_est;
1953  extra.offset_est = offset_est;
1954 
1955  /*
1956  * If there is an FDW that's responsible for all baserels of the query,
1957  * let it consider adding ForeignPaths.
1958  */
1959  if (final_rel->fdwroutine &&
1960  final_rel->fdwroutine->GetForeignUpperPaths)
1961  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
1962  current_rel, final_rel,
1963  &extra);
1964 
1965  /* Let extensions possibly add some more paths */
1967  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
1968  current_rel, final_rel, &extra);
1969 
1970  /* Note: currently, we leave it to callers to do set_cheapest() */
1971 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:489
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:630
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:675
@ BMS_MULTIPLE
Definition: bitmapset.h:75
unsigned int Index
Definition: c.h:550
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:225
List * lappend_int(List *list, int datum)
Definition: list.c:356
#define copyObject(obj)
Definition: nodes.h:233
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:580
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3118
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1152
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3593
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, int epqParam)
Definition: pathnode.c:3655
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:225
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:55
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5423
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2369
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5605
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
Definition: planner.c:4525
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:5817
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:1980
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5198
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5456
bool limit_needed(Query *parse)
Definition: planner.c:2554
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:4929
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3559
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3346
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4308
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:111
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:104
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:360
Cardinality limit_tuples
Definition: pathnodes.h:3101
bool partColsUpdated
Definition: pathnodes.h:508
bool hasRecursion
Definition: pathnodes.h:463
Index query_level
Definition: pathnodes.h:205
List * rowMarks
Definition: pathnodes.h:358
Cardinality limit_tuples
Definition: pathnodes.h:442
Selectivity tuple_fraction
Definition: pathnodes.h:440
List * update_colnos
Definition: pathnodes.h:423
Relids all_result_relids
Definition: pathnodes.h:341
Relids leaf_result_relids
Definition: pathnodes.h:343
Index relid
Definition: pathnodes.h:868
int numWindowFuncs
Definition: clauses.h:21
List * groupClause
Definition: planner.c:100
List * activeWindows
Definition: planner.c:99
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), PlannerInfo::all_result_relids, apply_scanjoin_target_to_paths(), Assert(), assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::groupClause, PlannerInfo::hasHavingQual, PlannerInfo::hasRecursion, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), PlannerInfo::leaf_result_relids, lfirst, limit_needed(), FinalPathExtraData::limit_needed, PlannerInfo::limit_tuples, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, parse(), PlannerInfo::parse, PlannerInfo::partColsUpdated, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), PlannerInfo::processed_tlist, PlannerInfo::query_level, query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, grouping_sets_data::rollups, PlannerInfo::rowMarks, rt_fetch, select_active_windows(), RelOptInfo::serverid, PlannerInfo::sort_pathkeys, split_pathtarget_at_srfs(), standard_qp_callback(), PlannerInfo::tuple_fraction, PlannerInfo::update_colnos, UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3725 of file planner.c.

3726 {
3727  Query *parse = root->parse;
3728 
3729  return (root->hasHavingQual || parse->groupingSets) &&
3730  !parse->hasAggs && parse->groupClause == NIL;
3731 }

References PlannerInfo::hasHavingQual, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2554 of file planner.c.

2555 {
2556  Node *node;
2557 
2558  node = parse->limitCount;
2559  if (node)
2560  {
2561  if (IsA(node, Const))
2562  {
2563  /* NULL indicates LIMIT ALL, ie, no limit */
2564  if (!((Const *) node)->constisnull)
2565  return true; /* LIMIT with a constant value */
2566  }
2567  else
2568  return true; /* non-constant LIMIT */
2569  }
2570 
2571  node = parse->limitOffset;
2572  if (node)
2573  {
2574  if (IsA(node, Const))
2575  {
2576  /* Treat NULL as no offset; the executor would too */
2577  if (!((Const *) node)->constisnull)
2578  {
2579  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2580 
2581  if (offset != 0)
2582  return true; /* OFFSET with a nonzero value */
2583  }
2584  }
2585  else
2586  return true; /* non-constant OFFSET */
2587  }
2588 
2589  return false; /* don't need a Limit plan node */
2590 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:168
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:733

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5198 of file planner.c.

5199 {
5200  Query *parse = root->parse;
5201  PathTarget *input_target;
5202  List *non_group_cols;
5203  List *non_group_vars;
5204  int i;
5205  ListCell *lc;
5206 
5207  /*
5208  * We must build a target containing all grouping columns, plus any other
5209  * Vars mentioned in the query's targetlist and HAVING qual.
5210  */
5211  input_target = create_empty_pathtarget();
5212  non_group_cols = NIL;
5213 
5214  i = 0;
5215  foreach(lc, final_target->exprs)
5216  {
5217  Expr *expr = (Expr *) lfirst(lc);
5218  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5219 
5220  if (sgref && parse->groupClause &&
5221  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5222  {
5223  /*
5224  * It's a grouping column, so add it to the input target as-is.
5225  */
5226  add_column_to_pathtarget(input_target, expr, sgref);
5227  }
5228  else
5229  {
5230  /*
5231  * Non-grouping column, so just remember the expression for later
5232  * call to pull_var_clause.
5233  */
5234  non_group_cols = lappend(non_group_cols, expr);
5235  }
5236 
5237  i++;
5238  }
5239 
5240  /*
5241  * If there's a HAVING clause, we'll need the Vars it uses, too.
5242  */
5243  if (parse->havingQual)
5244  non_group_cols = lappend(non_group_cols, parse->havingQual);
5245 
5246  /*
5247  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5248  * add them to the input target if not already present. (A Var used
5249  * directly as a GROUP BY item will be present already.) Note this
5250  * includes Vars used in resjunk items, so we are covering the needs of
5251  * ORDER BY and window specifications. Vars used within Aggrefs and
5252  * WindowFuncs will be pulled out here, too.
5253  */
5254  non_group_vars = pull_var_clause((Node *) non_group_cols,
5258  add_new_columns_to_pathtarget(input_target, non_group_vars);
5259 
5260  /* clean up cruft */
5261  list_free(non_group_vars);
5262  list_free(non_group_cols);
5263 
5264  /* XXX this causes some redundant cost calculation ... */
5265  return set_pathtarget_cost_width(root, input_target);
5266 }
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6006
void list_free(List *list)
Definition: list.c:1545
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:184
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:186
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:187
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1440
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:597

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3672 of file planner.c.

3675 {
3676  RelOptInfo *grouped_rel;
3677 
3678  if (IS_OTHER_REL(input_rel))
3679  {
3680  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3681  input_rel->relids);
3682  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3683  }
3684  else
3685  {
3686  /*
3687  * By tradition, the relids set for the main grouping relation is
3688  * NULL. (This could be changed, but might require adjustments
3689  * elsewhere.)
3690  */
3691  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3692  }
3693 
3694  /* Set target. */
3695  grouped_rel->reltarget = target;
3696 
3697  /*
3698  * If the input relation is not parallel-safe, then the grouped relation
3699  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3700  * target list and HAVING quals are parallel-safe.
3701  */
3702  if (input_rel->consider_parallel && target_parallel_safe &&
3703  is_parallel_safe(root, (Node *) havingQual))
3704  grouped_rel->consider_parallel = true;
3705 
3706  /*
3707  * If the input rel belongs to a single FDW, so does the grouped rel.
3708  */
3709  grouped_rel->serverid = input_rel->serverid;
3710  grouped_rel->userid = input_rel->userid;
3711  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3712  grouped_rel->fdwroutine = input_rel->fdwroutine;
3713 
3714  return grouped_rel;
3715 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:781

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5285 of file planner.c.

5288 {
5289  Query *parse = root->parse;
5290  PathTarget *partial_target;
5291  List *non_group_cols;
5292  List *non_group_exprs;
5293  int i;
5294  ListCell *lc;
5295 
5296  partial_target = create_empty_pathtarget();
5297  non_group_cols = NIL;
5298 
5299  i = 0;
5300  foreach(lc, grouping_target->exprs)
5301  {
5302  Expr *expr = (Expr *) lfirst(lc);
5303  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5304 
5305  if (sgref && parse->groupClause &&
5306  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5307  {
5308  /*
5309  * It's a grouping column, so add it to the partial_target as-is.
5310  * (This allows the upper agg step to repeat the grouping calcs.)
5311  */
5312  add_column_to_pathtarget(partial_target, expr, sgref);
5313  }
5314  else
5315  {
5316  /*
5317  * Non-grouping column, so just remember the expression for later
5318  * call to pull_var_clause.
5319  */
5320  non_group_cols = lappend(non_group_cols, expr);
5321  }
5322 
5323  i++;
5324  }
5325 
5326  /*
5327  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5328  */
5329  if (havingQual)
5330  non_group_cols = lappend(non_group_cols, havingQual);
5331 
5332  /*
5333  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5334  * non-group cols (plus HAVING), and add them to the partial_target if not
5335  * already present. (An expression used directly as a GROUP BY item will
5336  * be present already.) Note this includes Vars used in resjunk items, so
5337  * we are covering the needs of ORDER BY and window specifications.
5338  */
5339  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5343 
5344  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5345 
5346  /*
5347  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5348  * are at the top level of the target list, so we can just scan the list
5349  * rather than recursing through the expression trees.
5350  */
5351  foreach(lc, partial_target->exprs)
5352  {
5353  Aggref *aggref = (Aggref *) lfirst(lc);
5354 
5355  if (IsA(aggref, Aggref))
5356  {
5357  Aggref *newaggref;
5358 
5359  /*
5360  * We shouldn't need to copy the substructure of the Aggref node,
5361  * but flat-copy the node itself to avoid damaging other trees.
5362  */
5363  newaggref = makeNode(Aggref);
5364  memcpy(newaggref, aggref, sizeof(Aggref));
5365 
5366  /* For now, assume serialization is required */
5368 
5369  lfirst(lc) = newaggref;
5370  }
5371  }
5372 
5373  /* clean up cruft */
5374  list_free(non_group_exprs);
5375  list_free(non_group_cols);
5376 
5377  /* XXX this causes some redundant cost calculation ... */
5378  return set_pathtarget_cost_width(root, partial_target);
5379 }
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:183
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5388

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_groupagg()

static List* make_pathkeys_for_groupagg ( PlannerInfo root,
List groupClause,
List tlist,
int *  number_groupby_pathkeys 
)
static

Definition at line 3154 of file planner.c.

3156 {
3157  List *grouppathkeys = NIL;
3158  List *bestpathkeys;
3159  Bitmapset *bestaggs;
3160  Bitmapset *unprocessed_aggs;
3161  ListCell *lc;
3162  int i;
3163 
3164  Assert(groupClause != NIL || root->numOrderedAggs > 0);
3165 
3166  if (groupClause != NIL)
3167  {
3168  /* no pathkeys possible if there's an unsortable GROUP BY */
3169  if (!grouping_is_sortable(groupClause))
3170  {
3171  *number_groupby_pathkeys = 0;
3172  return NIL;
3173  }
3174 
3175  grouppathkeys = make_pathkeys_for_sortclauses(root, groupClause,
3176  tlist);
3177  *number_groupby_pathkeys = list_length(grouppathkeys);
3178  }
3179  else
3180  *number_groupby_pathkeys = 0;
3181 
3182  /*
3183  * We can't add pathkeys for ordered aggregates if there are any grouping
3184  * sets. All handling specific to ordered aggregates must be done by the
3185  * executor in that case.
3186  */
3187  if (root->numOrderedAggs == 0 || root->parse->groupingSets != NIL)
3188  return grouppathkeys;
3189 
3190  /*
3191  * Make a first pass over all AggInfos to collect a Bitmapset containing
3192  * the indexes of all AggInfos to be processed below.
3193  */
3194  unprocessed_aggs = NULL;
3195  foreach(lc, root->agginfos)
3196  {
3197  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3198  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3199 
3200  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3201  continue;
3202 
3203  /* only add aggregates with a DISTINCT or ORDER BY */
3204  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3205  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3206  foreach_current_index(lc));
3207  }
3208 
3209  /*
3210  * Now process all the unprocessed_aggs to find the best set of pathkeys
3211  * for the given set of aggregates.
3212  *
3213  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3214  * this during the first loop using the pathkeys for the very first
3215  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3216  * a more strict set of compatible pathkeys. Once the outer loop is
3217  * complete, we mark off all the aggregates with compatible pathkeys then
3218  * remove those from the unprocessed_aggs and repeat the process to try to
3219  * find another set of pathkeys that are suitable for a larger number of
3220  * aggregates. The outer loop will stop when there are not enough
3221  * unprocessed aggregates for it to be possible to find a set of pathkeys
3222  * to suit a larger number of aggregates.
3223  */
3224  bestpathkeys = NIL;
3225  bestaggs = NULL;
3226  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3227  {
3228  Bitmapset *aggindexes = NULL;
3229  List *currpathkeys = NIL;
3230 
3231  i = -1;
3232  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3233  {
3234  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3235  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3236  List *sortlist;
3237 
3238  if (aggref->aggdistinct != NIL)
3239  sortlist = aggref->aggdistinct;
3240  else
3241  sortlist = aggref->aggorder;
3242 
3243  /*
3244  * When not set yet, take the pathkeys from the first unprocessed
3245  * aggregate.
3246  */
3247  if (currpathkeys == NIL)
3248  {
3249  currpathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3250  aggref->args);
3251 
3252  /* include the GROUP BY pathkeys, if they exist */
3253  if (grouppathkeys != NIL)
3254  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3255  currpathkeys);
3256 
3257  /* record that we found pathkeys for this aggregate */
3258  aggindexes = bms_add_member(aggindexes, i);
3259  }
3260  else
3261  {
3262  List *pathkeys;
3263 
3264  /* now look for a stronger set of matching pathkeys */
3265  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3266  aggref->args);
3267 
3268  /* include the GROUP BY pathkeys, if they exist */
3269  if (grouppathkeys != NIL)
3270  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3271  pathkeys);
3272 
3273  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3274  switch (compare_pathkeys(currpathkeys, pathkeys))
3275  {
3276  case PATHKEYS_BETTER2:
3277  /* 'pathkeys' are stronger, use these ones instead */
3278  currpathkeys = pathkeys;
3279  /* FALLTHROUGH */
3280 
3281  case PATHKEYS_BETTER1:
3282  /* 'pathkeys' are less strict */
3283  /* FALLTHROUGH */
3284 
3285  case PATHKEYS_EQUAL:
3286  /* mark this aggregate as covered by 'currpathkeys' */
3287  aggindexes = bms_add_member(aggindexes, i);
3288  break;
3289 
3290  case PATHKEYS_DIFFERENT:
3291  break;
3292  }
3293  }
3294  }
3295 
3296  /* remove the aggregates that we've just processed */
3297  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3298 
3299  /*
3300  * If this pass included more aggregates than the previous best then
3301  * use these ones as the best set.
3302  */
3303  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3304  {
3305  bestaggs = aggindexes;
3306  bestpathkeys = currpathkeys;
3307  }
3308  }
3309 
3310  /*
3311  * Now that we've found the best set of aggregates we can set the
3312  * presorted flag to indicate to the executor that it needn't bother
3313  * performing a sort for these Aggrefs. We're able to do this now as
3314  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3315  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3316  * of ordered aggregates.
3317  */
3318  i = -1;
3319  while ((i = bms_next_member(bestaggs, i)) >= 0)
3320  {
3321  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3322 
3323  foreach(lc, agginfo->aggrefs)
3324  {
3325  Aggref *aggref = lfirst_node(Aggref, lc);
3326 
3327  aggref->aggpresorted = true;
3328  }
3329  }
3330 
3331  /*
3332  * bestpathkeys includes the GROUP BY pathkeys, so if we found any ordered
3333  * aggregates, then return bestpathkeys, otherwise return the
3334  * grouppathkeys.
3335  */
3336  if (bestpathkeys != NIL)
3337  return bestpathkeys;
3338 
3339  return grouppathkeys;
3340 }
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:649
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:932
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:105
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:307
@ PATHKEYS_BETTER2
Definition: paths.h:198
@ PATHKEYS_BETTER1
Definition: paths.h:197
@ PATHKEYS_DIFFERENT
Definition: paths.h:199
@ PATHKEYS_EQUAL
Definition: paths.h:196
#define list_nth_node(type, list, n)
Definition: pg_list.h:325
List * aggrefs
Definition: pathnodes.h:3160
List * aggdistinct
Definition: primnodes.h:403
char aggkind
Definition: primnodes.h:418
List * args
Definition: primnodes.h:397
List * aggorder
Definition: primnodes.h:400
List * agginfos
Definition: pathnodes.h:469
List * groupingSets
Definition: parsenodes.h:173

References Aggref::aggdistinct, PlannerInfo::agginfos, Aggref::aggkind, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert(), bms_add_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), foreach_current_index, grouping_is_sortable(), Query::groupingSets, i, lfirst_node, linitial_node, list_copy(), list_length(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PlannerInfo::numOrderedAggs, PlannerInfo::parse, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, and PATHKEYS_EQUAL.

Referenced by standard_qp_callback().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 5723 of file planner.c.

5725 {
5726  List *window_pathkeys;
5727  List *window_sortclauses;
5728 
5729  /* Throw error if can't sort */
5731  ereport(ERROR,
5732  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5733  errmsg("could not implement window PARTITION BY"),
5734  errdetail("Window partitioning columns must be of sortable datatypes.")));
5736  ereport(ERROR,
5737  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5738  errmsg("could not implement window ORDER BY"),
5739  errdetail("Window ordering columns must be of sortable datatypes.")));
5740 
5741  /* Okay, make the combined pathkeys */
5742  window_sortclauses = list_concat_copy(wc->partitionClause, wc->orderClause);
5743  window_pathkeys = make_pathkeys_for_sortclauses(root,
5744  window_sortclauses,
5745  tlist);
5746  list_free(window_sortclauses);
5747  return window_pathkeys;
5748 }
List * list_concat_copy(const List *list1, const List *list2)
Definition: list.c:597
List * partitionClause
Definition: parsenodes.h:1416
List * orderClause
Definition: parsenodes.h:1417

References ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), list_concat_copy(), list_free(), make_pathkeys_for_sortclauses(), WindowClause::orderClause, and WindowClause::partitionClause.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)