PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/inherit.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
#include "storage/dsm_impl.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/syscache.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 91 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 94 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 90 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 92 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 84 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 86 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 87 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 95 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 96 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 93 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 85 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 88 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 89 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6767 of file planner.c.

6773 {
6774  Query *parse = root->parse;
6775  Path *cheapest_path = input_rel->cheapest_total_path;
6776  ListCell *lc;
6777  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6778  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6779  List *havingQual = (List *) extra->havingQual;
6780  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6781 
6782  if (can_sort)
6783  {
6784  /*
6785  * Use any available suitably-sorted path as input, and also consider
6786  * sorting the cheapest-total path and incremental sort on any paths
6787  * with presorted keys.
6788  */
6789  foreach(lc, input_rel->pathlist)
6790  {
6791  Path *path = (Path *) lfirst(lc);
6792  bool is_sorted;
6793  int presorted_keys;
6794 
6795  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6796  path->pathkeys,
6797  &presorted_keys);
6798 
6799  if (!is_sorted)
6800  {
6801  /*
6802  * Try at least sorting the cheapest path and also try
6803  * incrementally sorting any path which is partially sorted
6804  * already (no need to deal with paths which have presorted
6805  * keys when incremental sort is disabled unless it's the
6806  * cheapest input path).
6807  */
6808  if (path != cheapest_path &&
6809  (presorted_keys == 0 || !enable_incremental_sort))
6810  continue;
6811 
6812  /*
6813  * We've no need to consider both a sort and incremental sort.
6814  * We'll just do a sort if there are no presorted keys and an
6815  * incremental sort when there are presorted keys.
6816  */
6817  if (presorted_keys == 0 || !enable_incremental_sort)
6818  path = (Path *) create_sort_path(root,
6819  grouped_rel,
6820  path,
6821  root->group_pathkeys,
6822  -1.0);
6823  else
6824  path = (Path *) create_incremental_sort_path(root,
6825  grouped_rel,
6826  path,
6827  root->group_pathkeys,
6828  presorted_keys,
6829  -1.0);
6830  }
6831 
6832  /* Now decide what to stick atop it */
6833  if (parse->groupingSets)
6834  {
6835  consider_groupingsets_paths(root, grouped_rel,
6836  path, true, can_hash,
6837  gd, agg_costs, dNumGroups);
6838  }
6839  else if (parse->hasAggs)
6840  {
6841  /*
6842  * We have aggregation, possibly with plain GROUP BY. Make an
6843  * AggPath.
6844  */
6845  add_path(grouped_rel, (Path *)
6846  create_agg_path(root,
6847  grouped_rel,
6848  path,
6849  grouped_rel->reltarget,
6850  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6852  root->processed_groupClause,
6853  havingQual,
6854  agg_costs,
6855  dNumGroups));
6856  }
6857  else if (parse->groupClause)
6858  {
6859  /*
6860  * We have GROUP BY without aggregation or grouping sets. Make
6861  * a GroupPath.
6862  */
6863  add_path(grouped_rel, (Path *)
6864  create_group_path(root,
6865  grouped_rel,
6866  path,
6867  root->processed_groupClause,
6868  havingQual,
6869  dNumGroups));
6870  }
6871  else
6872  {
6873  /* Other cases should have been handled above */
6874  Assert(false);
6875  }
6876  }
6877 
6878  /*
6879  * Instead of operating directly on the input relation, we can
6880  * consider finalizing a partially aggregated path.
6881  */
6882  if (partially_grouped_rel != NULL)
6883  {
6884  foreach(lc, partially_grouped_rel->pathlist)
6885  {
6886  Path *path = (Path *) lfirst(lc);
6887  bool is_sorted;
6888  int presorted_keys;
6889 
6890  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6891  path->pathkeys,
6892  &presorted_keys);
6893 
6894  if (!is_sorted)
6895  {
6896  /*
6897  * Try at least sorting the cheapest path and also try
6898  * incrementally sorting any path which is partially
6899  * sorted already (no need to deal with paths which have
6900  * presorted keys when incremental sort is disabled unless
6901  * it's the cheapest input path).
6902  */
6903  if (path != partially_grouped_rel->cheapest_total_path &&
6904  (presorted_keys == 0 || !enable_incremental_sort))
6905  continue;
6906 
6907  /*
6908  * We've no need to consider both a sort and incremental
6909  * sort. We'll just do a sort if there are no pre-sorted
6910  * keys and an incremental sort when there are presorted
6911  * keys.
6912  */
6913  if (presorted_keys == 0 || !enable_incremental_sort)
6914  path = (Path *) create_sort_path(root,
6915  grouped_rel,
6916  path,
6917  root->group_pathkeys,
6918  -1.0);
6919  else
6920  path = (Path *) create_incremental_sort_path(root,
6921  grouped_rel,
6922  path,
6923  root->group_pathkeys,
6924  presorted_keys,
6925  -1.0);
6926  }
6927 
6928  if (parse->hasAggs)
6929  add_path(grouped_rel, (Path *)
6930  create_agg_path(root,
6931  grouped_rel,
6932  path,
6933  grouped_rel->reltarget,
6934  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6936  root->processed_groupClause,
6937  havingQual,
6938  agg_final_costs,
6939  dNumGroups));
6940  else
6941  add_path(grouped_rel, (Path *)
6942  create_group_path(root,
6943  grouped_rel,
6944  path,
6945  root->processed_groupClause,
6946  havingQual,
6947  dNumGroups));
6948 
6949  }
6950  }
6951  }
6952 
6953  if (can_hash)
6954  {
6955  if (parse->groupingSets)
6956  {
6957  /*
6958  * Try for a hash-only groupingsets path over unsorted input.
6959  */
6960  consider_groupingsets_paths(root, grouped_rel,
6961  cheapest_path, false, true,
6962  gd, agg_costs, dNumGroups);
6963  }
6964  else
6965  {
6966  /*
6967  * Generate a HashAgg Path. We just need an Agg over the
6968  * cheapest-total input path, since input order won't matter.
6969  */
6970  add_path(grouped_rel, (Path *)
6971  create_agg_path(root, grouped_rel,
6972  cheapest_path,
6973  grouped_rel->reltarget,
6974  AGG_HASHED,
6976  root->processed_groupClause,
6977  havingQual,
6978  agg_costs,
6979  dNumGroups));
6980  }
6981 
6982  /*
6983  * Generate a Finalize HashAgg Path atop of the cheapest partially
6984  * grouped path, assuming there is one
6985  */
6986  if (partially_grouped_rel && partially_grouped_rel->pathlist)
6987  {
6988  Path *path = partially_grouped_rel->cheapest_total_path;
6989 
6990  add_path(grouped_rel, (Path *)
6991  create_agg_path(root,
6992  grouped_rel,
6993  path,
6994  grouped_rel->reltarget,
6995  AGG_HASHED,
6997  root->processed_groupClause,
6998  havingQual,
6999  agg_final_costs,
7000  dNumGroups));
7001  }
7002  }
7003 
7004  /*
7005  * When partitionwise aggregate is used, we might have fully aggregated
7006  * paths in the partial pathlist, because add_paths_to_append_rel() will
7007  * consider a path for grouped_rel consisting of a Parallel Append of
7008  * non-partial paths from each child.
7009  */
7010  if (grouped_rel->partial_pathlist != NIL)
7011  gather_grouping_paths(root, grouped_rel);
7012 }
bool enable_incremental_sort
Definition: costsize.c:141
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
@ AGG_SORTED
Definition: nodes.h:363
@ AGG_HASHED
Definition: nodes.h:364
@ AGG_PLAIN
Definition: nodes.h:362
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:389
@ AGGSPLIT_SIMPLE
Definition: nodes.h:385
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:359
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2953
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2997
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2904
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3108
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:422
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3188
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3187
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7354
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4068
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3228
Definition: pg_list.h:54
List * pathkeys
Definition: pathnodes.h:1639
List * group_pathkeys
Definition: pathnodes.h:388
List * processed_groupClause
Definition: pathnodes.h:433
Query * parse
Definition: pathnodes.h:202
struct PathTarget * reltarget
Definition: pathnodes.h:884
List * pathlist
Definition: pathnodes.h:889
struct Path * cheapest_total_path
Definition: pathnodes.h:893
List * partial_pathlist
Definition: pathnodes.h:891

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert(), RelOptInfo::cheapest_total_path, consider_groupingsets_paths(), create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, GroupPathExtraData::flags, gather_grouping_paths(), PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, PlannerInfo::processed_groupClause, and RelOptInfo::reltarget.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3218 of file planner.c.

3219 {
3220  List *grouppathkeys = root->group_pathkeys;
3221  List *bestpathkeys;
3222  Bitmapset *bestaggs;
3223  Bitmapset *unprocessed_aggs;
3224  ListCell *lc;
3225  int i;
3226 
3227  /* Shouldn't be here if there are grouping sets */
3228  Assert(root->parse->groupingSets == NIL);
3229  /* Shouldn't be here unless there are some ordered aggregates */
3230  Assert(root->numOrderedAggs > 0);
3231 
3232  /* Do nothing if disabled */
3234  return;
3235 
3236  /*
3237  * Make a first pass over all AggInfos to collect a Bitmapset containing
3238  * the indexes of all AggInfos to be processed below.
3239  */
3240  unprocessed_aggs = NULL;
3241  foreach(lc, root->agginfos)
3242  {
3243  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3244  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3245 
3246  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3247  continue;
3248 
3249  /* only add aggregates with a DISTINCT or ORDER BY */
3250  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3251  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3252  foreach_current_index(lc));
3253  }
3254 
3255  /*
3256  * Now process all the unprocessed_aggs to find the best set of pathkeys
3257  * for the given set of aggregates.
3258  *
3259  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3260  * this during the first loop using the pathkeys for the very first
3261  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3262  * a more strict set of compatible pathkeys. Once the outer loop is
3263  * complete, we mark off all the aggregates with compatible pathkeys then
3264  * remove those from the unprocessed_aggs and repeat the process to try to
3265  * find another set of pathkeys that are suitable for a larger number of
3266  * aggregates. The outer loop will stop when there are not enough
3267  * unprocessed aggregates for it to be possible to find a set of pathkeys
3268  * to suit a larger number of aggregates.
3269  */
3270  bestpathkeys = NIL;
3271  bestaggs = NULL;
3272  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3273  {
3274  Bitmapset *aggindexes = NULL;
3275  List *currpathkeys = NIL;
3276 
3277  i = -1;
3278  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3279  {
3280  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3281  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3282  List *sortlist;
3283  List *pathkeys;
3284 
3285  if (aggref->aggdistinct != NIL)
3286  sortlist = aggref->aggdistinct;
3287  else
3288  sortlist = aggref->aggorder;
3289 
3290  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3291  aggref->args);
3292 
3293  /*
3294  * Ignore Aggrefs which have volatile functions in their ORDER BY
3295  * or DISTINCT clause.
3296  */
3297  if (has_volatile_pathkey(pathkeys))
3298  {
3299  unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3300  continue;
3301  }
3302 
3303  /*
3304  * When not set yet, take the pathkeys from the first unprocessed
3305  * aggregate.
3306  */
3307  if (currpathkeys == NIL)
3308  {
3309  currpathkeys = pathkeys;
3310 
3311  /* include the GROUP BY pathkeys, if they exist */
3312  if (grouppathkeys != NIL)
3313  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3314  currpathkeys);
3315 
3316  /* record that we found pathkeys for this aggregate */
3317  aggindexes = bms_add_member(aggindexes, i);
3318  }
3319  else
3320  {
3321  /* now look for a stronger set of matching pathkeys */
3322 
3323  /* include the GROUP BY pathkeys, if they exist */
3324  if (grouppathkeys != NIL)
3325  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3326  pathkeys);
3327 
3328  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3329  switch (compare_pathkeys(currpathkeys, pathkeys))
3330  {
3331  case PATHKEYS_BETTER2:
3332  /* 'pathkeys' are stronger, use these ones instead */
3333  currpathkeys = pathkeys;
3334  /* FALLTHROUGH */
3335 
3336  case PATHKEYS_BETTER1:
3337  /* 'pathkeys' are less strict */
3338  /* FALLTHROUGH */
3339 
3340  case PATHKEYS_EQUAL:
3341  /* mark this aggregate as covered by 'currpathkeys' */
3342  aggindexes = bms_add_member(aggindexes, i);
3343  break;
3344 
3345  case PATHKEYS_DIFFERENT:
3346  break;
3347  }
3348  }
3349  }
3350 
3351  /* remove the aggregates that we've just processed */
3352  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3353 
3354  /*
3355  * If this pass included more aggregates than the previous best then
3356  * use these ones as the best set.
3357  */
3358  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3359  {
3360  bestaggs = aggindexes;
3361  bestpathkeys = currpathkeys;
3362  }
3363  }
3364 
3365  /*
3366  * If we found any ordered aggregates, update root->group_pathkeys to add
3367  * the best set of aggregate pathkeys. Note that bestpathkeys includes
3368  * the original GROUP BY pathkeys already.
3369  */
3370  if (bestpathkeys != NIL)
3371  root->group_pathkeys = bestpathkeys;
3372 
3373  /*
3374  * Now that we've found the best set of aggregates we can set the
3375  * presorted flag to indicate to the executor that it needn't bother
3376  * performing a sort for these Aggrefs. We're able to do this now as
3377  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3378  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3379  * of ordered aggregates.
3380  */
3381  i = -1;
3382  while ((i = bms_next_member(bestaggs, i)) >= 0)
3383  {
3384  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3385 
3386  foreach(lc, agginfo->aggrefs)
3387  {
3388  Aggref *aggref = lfirst_node(Aggref, lc);
3389 
3390  aggref->aggpresorted = true;
3391  }
3392  }
3393 }
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1039
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:665
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:755
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:960
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:792
bool enable_presorted_aggregate
Definition: costsize.c:154
int i
Definition: isn.c:73
List * list_copy(const List *oldlist)
Definition: list.c:1572
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:105
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1129
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:301
@ PATHKEYS_BETTER2
Definition: paths.h:198
@ PATHKEYS_BETTER1
Definition: paths.h:197
@ PATHKEYS_DIFFERENT
Definition: paths.h:199
@ PATHKEYS_EQUAL
Definition: paths.h:196
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
#define foreach_current_index(cell)
Definition: pg_list.h:403
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3173
List * aggrefs
Definition: pathnodes.h:3309
List * aggdistinct
Definition: primnodes.h:452
List * args
Definition: primnodes.h:446
List * aggorder
Definition: primnodes.h:449
int numOrderedAggs
Definition: pathnodes.h:514
List * agginfos
Definition: pathnodes.h:510
List * groupingSets
Definition: parsenodes.h:201

References Aggref::aggdistinct, PlannerInfo::agginfos, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert(), bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, PlannerInfo::group_pathkeys, Query::groupingSets, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PlannerInfo::numOrderedAggs, PlannerInfo::parse, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, and PATHKEYS_EQUAL.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6318 of file planner.c.

6320 {
6321  ListCell *lc;
6322 
6323  Assert(list_length(targets) == list_length(targets_contain_srfs));
6324  Assert(!linitial_int(targets_contain_srfs));
6325 
6326  /* If no SRFs appear at this plan level, nothing to do */
6327  if (list_length(targets) == 1)
6328  return;
6329 
6330  /*
6331  * Stack SRF-evaluation nodes atop each path for the rel.
6332  *
6333  * In principle we should re-run set_cheapest() here to identify the
6334  * cheapest path, but it seems unlikely that adding the same tlist eval
6335  * costs to all the paths would change that, so we don't bother. Instead,
6336  * just assume that the cheapest-startup and cheapest-total paths remain
6337  * so. (There should be no parameterized paths anymore, so we needn't
6338  * worry about updating cheapest_parameterized_paths.)
6339  */
6340  foreach(lc, rel->pathlist)
6341  {
6342  Path *subpath = (Path *) lfirst(lc);
6343  Path *newpath = subpath;
6344  ListCell *lc1,
6345  *lc2;
6346 
6347  Assert(subpath->param_info == NULL);
6348  forboth(lc1, targets, lc2, targets_contain_srfs)
6349  {
6350  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6351  bool contains_srfs = (bool) lfirst_int(lc2);
6352 
6353  /* If this level doesn't contain SRFs, do regular projection */
6354  if (contains_srfs)
6355  newpath = (Path *) create_set_projection_path(root,
6356  rel,
6357  newpath,
6358  thistarget);
6359  else
6360  newpath = (Path *) apply_projection_to_path(root,
6361  rel,
6362  newpath,
6363  thistarget);
6364  }
6365  lfirst(lc) = newpath;
6366  if (subpath == rel->cheapest_startup_path)
6367  rel->cheapest_startup_path = newpath;
6368  if (subpath == rel->cheapest_total_path)
6369  rel->cheapest_total_path = newpath;
6370  }
6371 
6372  /* Likewise for partial paths, if any */
6373  foreach(lc, rel->partial_pathlist)
6374  {
6375  Path *subpath = (Path *) lfirst(lc);
6376  Path *newpath = subpath;
6377  ListCell *lc1,
6378  *lc2;
6379 
6380  Assert(subpath->param_info == NULL);
6381  forboth(lc1, targets, lc2, targets_contain_srfs)
6382  {
6383  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6384  bool contains_srfs = (bool) lfirst_int(lc2);
6385 
6386  /* If this level doesn't contain SRFs, do regular projection */
6387  if (contains_srfs)
6388  newpath = (Path *) create_set_projection_path(root,
6389  rel,
6390  newpath,
6391  thistarget);
6392  else
6393  {
6394  /* avoid apply_projection_to_path, in case of multiple refs */
6395  newpath = (Path *) create_projection_path(root,
6396  rel,
6397  newpath,
6398  thistarget);
6399  }
6400  }
6401  lfirst(lc) = newpath;
6402  }
6403 }
unsigned char bool
Definition: c.h:440
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:241
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2638
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2835
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2746
static int list_length(const List *l)
Definition: pg_list.h:152
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:467
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:892

References apply_projection_to_path(), Assert(), RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7484 of file planner.c.

7490 {
7491  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7492  PathTarget *scanjoin_target;
7493  ListCell *lc;
7494 
7495  /* This recurses, so be paranoid. */
7497 
7498  /*
7499  * If the rel is partitioned, we want to drop its existing paths and
7500  * generate new ones. This function would still be correct if we kept the
7501  * existing paths: we'd modify them to generate the correct target above
7502  * the partitioning Append, and then they'd compete on cost with paths
7503  * generating the target below the Append. However, in our current cost
7504  * model the latter way is always the same or cheaper cost, so modifying
7505  * the existing paths would just be useless work. Moreover, when the cost
7506  * is the same, varying roundoff errors might sometimes allow an existing
7507  * path to be picked, resulting in undesirable cross-platform plan
7508  * variations. So we drop old paths and thereby force the work to be done
7509  * below the Append, except in the case of a non-parallel-safe target.
7510  *
7511  * Some care is needed, because we have to allow
7512  * generate_useful_gather_paths to see the old partial paths in the next
7513  * stanza. Hence, zap the main pathlist here, then allow
7514  * generate_useful_gather_paths to add path(s) to the main list, and
7515  * finally zap the partial pathlist.
7516  */
7517  if (rel_is_partitioned)
7518  rel->pathlist = NIL;
7519 
7520  /*
7521  * If the scan/join target is not parallel-safe, partial paths cannot
7522  * generate it.
7523  */
7524  if (!scanjoin_target_parallel_safe)
7525  {
7526  /*
7527  * Since we can't generate the final scan/join target in parallel
7528  * workers, this is our last opportunity to use any partial paths that
7529  * exist; so build Gather path(s) that use them and emit whatever the
7530  * current reltarget is. We don't do this in the case where the
7531  * target is parallel-safe, since we will be able to generate superior
7532  * paths by doing it after the final scan/join target has been
7533  * applied.
7534  */
7535  generate_useful_gather_paths(root, rel, false);
7536 
7537  /* Can't use parallel query above this level. */
7538  rel->partial_pathlist = NIL;
7539  rel->consider_parallel = false;
7540  }
7541 
7542  /* Finish dropping old paths for a partitioned rel, per comment above */
7543  if (rel_is_partitioned)
7544  rel->partial_pathlist = NIL;
7545 
7546  /* Extract SRF-free scan/join target. */
7547  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7548 
7549  /*
7550  * Apply the SRF-free scan/join target to each existing path.
7551  *
7552  * If the tlist exprs are the same, we can just inject the sortgroupref
7553  * information into the existing pathtargets. Otherwise, replace each
7554  * path with a projection path that generates the SRF-free scan/join
7555  * target. This can't change the ordering of paths within rel->pathlist,
7556  * so we just modify the list in place.
7557  */
7558  foreach(lc, rel->pathlist)
7559  {
7560  Path *subpath = (Path *) lfirst(lc);
7561 
7562  /* Shouldn't have any parameterized paths anymore */
7563  Assert(subpath->param_info == NULL);
7564 
7565  if (tlist_same_exprs)
7566  subpath->pathtarget->sortgrouprefs =
7567  scanjoin_target->sortgrouprefs;
7568  else
7569  {
7570  Path *newpath;
7571 
7572  newpath = (Path *) create_projection_path(root, rel, subpath,
7573  scanjoin_target);
7574  lfirst(lc) = newpath;
7575  }
7576  }
7577 
7578  /* Likewise adjust the targets for any partial paths. */
7579  foreach(lc, rel->partial_pathlist)
7580  {
7581  Path *subpath = (Path *) lfirst(lc);
7582 
7583  /* Shouldn't have any parameterized paths anymore */
7584  Assert(subpath->param_info == NULL);
7585 
7586  if (tlist_same_exprs)
7587  subpath->pathtarget->sortgrouprefs =
7588  scanjoin_target->sortgrouprefs;
7589  else
7590  {
7591  Path *newpath;
7592 
7593  newpath = (Path *) create_projection_path(root, rel, subpath,
7594  scanjoin_target);
7595  lfirst(lc) = newpath;
7596  }
7597  }
7598 
7599  /*
7600  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7601  * atop each existing path. (Note that this function doesn't look at the
7602  * cheapest-path fields, which is a good thing because they're bogus right
7603  * now.)
7604  */
7605  if (root->parse->hasTargetSRFs)
7606  adjust_paths_for_srfs(root, rel,
7607  scanjoin_targets,
7608  scanjoin_targets_contain_srfs);
7609 
7610  /*
7611  * Update the rel's target to be the final (with SRFs) scan/join target.
7612  * This now matches the actual output of all the paths, and we might get
7613  * confused in createplan.c if they don't agree. We must do this now so
7614  * that any append paths made in the next part will use the correct
7615  * pathtarget (cf. create_append_path).
7616  *
7617  * Note that this is also necessary if GetForeignUpperPaths() gets called
7618  * on the final scan/join relation or on any of its children, since the
7619  * FDW might look at the rel's target to create ForeignPaths.
7620  */
7621  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7622 
7623  /*
7624  * If the relation is partitioned, recursively apply the scan/join target
7625  * to all partitions, and generate brand-new Append paths in which the
7626  * scan/join target is computed below the Append rather than above it.
7627  * Since Append is not projection-capable, that might save a separate
7628  * Result node, and it also is important for partitionwise aggregate.
7629  */
7630  if (rel_is_partitioned)
7631  {
7632  List *live_children = NIL;
7633  int i;
7634 
7635  /* Adjust each partition. */
7636  i = -1;
7637  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7638  {
7639  RelOptInfo *child_rel = rel->part_rels[i];
7640  AppendRelInfo **appinfos;
7641  int nappinfos;
7642  List *child_scanjoin_targets = NIL;
7643 
7644  Assert(child_rel != NULL);
7645 
7646  /* Dummy children can be ignored. */
7647  if (IS_DUMMY_REL(child_rel))
7648  continue;
7649 
7650  /* Translate scan/join targets for this child. */
7651  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7652  &nappinfos);
7653  foreach(lc, scanjoin_targets)
7654  {
7655  PathTarget *target = lfirst_node(PathTarget, lc);
7656 
7657  target = copy_pathtarget(target);
7658  target->exprs = (List *)
7660  (Node *) target->exprs,
7661  nappinfos, appinfos);
7662  child_scanjoin_targets = lappend(child_scanjoin_targets,
7663  target);
7664  }
7665  pfree(appinfos);
7666 
7667  /* Recursion does the real work. */
7668  apply_scanjoin_target_to_paths(root, child_rel,
7669  child_scanjoin_targets,
7670  scanjoin_targets_contain_srfs,
7671  scanjoin_target_parallel_safe,
7673 
7674  /* Save non-dummy children for Append paths. */
7675  if (!IS_DUMMY_REL(child_rel))
7676  live_children = lappend(live_children, child_rel);
7677  }
7678 
7679  /* Build new paths for this relation by appending child paths. */
7680  add_paths_to_append_rel(root, rel, live_children);
7681  }
7682 
7683  /*
7684  * Consider generating Gather or Gather Merge paths. We must only do this
7685  * if the relation is parallel safe, and we don't do it for child rels to
7686  * avoid creating multiple Gather nodes within the same plan. We must do
7687  * this after all paths have been generated and before set_cheapest, since
7688  * one of the generated paths may turn out to be the cheapest one.
7689  */
7690  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7691  generate_useful_gather_paths(root, rel, false);
7692 
7693  /*
7694  * Reassess which paths are the cheapest, now that we've potentially added
7695  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7696  * this relation.
7697  */
7698  set_cheapest(rel);
7699 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3198
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1305
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:732
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:195
List * lappend(List *list, void *datum)
Definition: list.c:338
void pfree(void *pointer)
Definition: mcxt.c:1436
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:244
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1907
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1047
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:845
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7484
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6318
void check_stack_depth(void)
Definition: postgres.c:3461
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1507
Relids relids
Definition: pathnodes.h:862
bool consider_parallel
Definition: pathnodes.h:878
Bitmapset * live_parts
Definition: pathnodes.h:1024
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert(), bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, PlannerInfo::parse, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7442 of file planner.c.

7443 {
7444  Query *parse = root->parse;
7445 
7446  if (!parse->hasAggs && parse->groupClause == NIL)
7447  {
7448  /*
7449  * We don't know how to do parallel aggregation unless we have either
7450  * some aggregates or a grouping clause.
7451  */
7452  return false;
7453  }
7454  else if (parse->groupingSets)
7455  {
7456  /* We don't know how to do grouping sets in parallel. */
7457  return false;
7458  }
7459  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7460  {
7461  /* Insufficient support for partial mode. */
7462  return false;
7463  }
7464 
7465  /* Everything looks good. */
7466  return true;
7467 }
bool hasNonPartialAggs
Definition: pathnodes.h:516
bool hasNonSerialAggs
Definition: pathnodes.h:518

References PlannerInfo::hasNonPartialAggs, PlannerInfo::hasNonSerialAggs, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5824 of file planner.c.

5825 {
5826  const WindowClauseSortData *wcsa = a;
5827  const WindowClauseSortData *wcsb = b;
5828  ListCell *item_a;
5829  ListCell *item_b;
5830 
5831  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5832  {
5835 
5836  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5837  return -1;
5838  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5839  return 1;
5840  else if (sca->sortop > scb->sortop)
5841  return -1;
5842  else if (sca->sortop < scb->sortop)
5843  return 1;
5844  else if (sca->nulls_first && !scb->nulls_first)
5845  return -1;
5846  else if (!sca->nulls_first && scb->nulls_first)
5847  return 1;
5848  /* no need to compare eqop, since it is fully determined by sortop */
5849  }
5850 
5851  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5852  return -1;
5853  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5854  return 1;
5855 
5856  return 0;
5857 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1393

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4068 of file planner.c.

4076 {
4077  Query *parse = root->parse;
4078  Size hash_mem_limit = get_hash_memory_limit();
4079 
4080  /*
4081  * If we're not being offered sorted input, then only consider plans that
4082  * can be done entirely by hashing.
4083  *
4084  * We can hash everything if it looks like it'll fit in hash_mem. But if
4085  * the input is actually sorted despite not being advertised as such, we
4086  * prefer to make use of that in order to use less memory.
4087  *
4088  * If none of the grouping sets are sortable, then ignore the hash_mem
4089  * limit and generate a path anyway, since otherwise we'll just fail.
4090  */
4091  if (!is_sorted)
4092  {
4093  List *new_rollups = NIL;
4094  RollupData *unhashed_rollup = NULL;
4095  List *sets_data;
4096  List *empty_sets_data = NIL;
4097  List *empty_sets = NIL;
4098  ListCell *lc;
4099  ListCell *l_start = list_head(gd->rollups);
4100  AggStrategy strat = AGG_HASHED;
4101  double hashsize;
4102  double exclude_groups = 0.0;
4103 
4104  Assert(can_hash);
4105 
4106  /*
4107  * If the input is coincidentally sorted usefully (which can happen
4108  * even if is_sorted is false, since that only means that our caller
4109  * has set up the sorting for us), then save some hashtable space by
4110  * making use of that. But we need to watch out for degenerate cases:
4111  *
4112  * 1) If there are any empty grouping sets, then group_pathkeys might
4113  * be NIL if all non-empty grouping sets are unsortable. In this case,
4114  * there will be a rollup containing only empty groups, and the
4115  * pathkeys_contained_in test is vacuously true; this is ok.
4116  *
4117  * XXX: the above relies on the fact that group_pathkeys is generated
4118  * from the first rollup. If we add the ability to consider multiple
4119  * sort orders for grouping input, this assumption might fail.
4120  *
4121  * 2) If there are no empty sets and only unsortable sets, then the
4122  * rollups list will be empty (and thus l_start == NULL), and
4123  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4124  * pathkeys_contained_in test doesn't cause us to crash.
4125  */
4126  if (l_start != NULL &&
4128  {
4129  unhashed_rollup = lfirst_node(RollupData, l_start);
4130  exclude_groups = unhashed_rollup->numGroups;
4131  l_start = lnext(gd->rollups, l_start);
4132  }
4133 
4134  hashsize = estimate_hashagg_tablesize(root,
4135  path,
4136  agg_costs,
4137  dNumGroups - exclude_groups);
4138 
4139  /*
4140  * gd->rollups is empty if we have only unsortable columns to work
4141  * with. Override hash_mem in that case; otherwise, we'll rely on the
4142  * sorted-input case to generate usable mixed paths.
4143  */
4144  if (hashsize > hash_mem_limit && gd->rollups)
4145  return; /* nope, won't fit */
4146 
4147  /*
4148  * We need to burst the existing rollups list into individual grouping
4149  * sets and recompute a groupClause for each set.
4150  */
4151  sets_data = list_copy(gd->unsortable_sets);
4152 
4153  for_each_cell(lc, gd->rollups, l_start)
4154  {
4155  RollupData *rollup = lfirst_node(RollupData, lc);
4156 
4157  /*
4158  * If we find an unhashable rollup that's not been skipped by the
4159  * "actually sorted" check above, we can't cope; we'd need sorted
4160  * input (with a different sort order) but we can't get that here.
4161  * So bail out; we'll get a valid path from the is_sorted case
4162  * instead.
4163  *
4164  * The mere presence of empty grouping sets doesn't make a rollup
4165  * unhashable (see preprocess_grouping_sets), we handle those
4166  * specially below.
4167  */
4168  if (!rollup->hashable)
4169  return;
4170 
4171  sets_data = list_concat(sets_data, rollup->gsets_data);
4172  }
4173  foreach(lc, sets_data)
4174  {
4176  List *gset = gs->set;
4177  RollupData *rollup;
4178 
4179  if (gset == NIL)
4180  {
4181  /* Empty grouping sets can't be hashed. */
4182  empty_sets_data = lappend(empty_sets_data, gs);
4183  empty_sets = lappend(empty_sets, NIL);
4184  }
4185  else
4186  {
4187  rollup = makeNode(RollupData);
4188 
4189  rollup->groupClause = preprocess_groupclause(root, gset);
4190  rollup->gsets_data = list_make1(gs);
4191  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4192  rollup->gsets_data,
4193  gd->tleref_to_colnum_map);
4194  rollup->numGroups = gs->numGroups;
4195  rollup->hashable = true;
4196  rollup->is_hashed = true;
4197  new_rollups = lappend(new_rollups, rollup);
4198  }
4199  }
4200 
4201  /*
4202  * If we didn't find anything nonempty to hash, then bail. We'll
4203  * generate a path from the is_sorted case.
4204  */
4205  if (new_rollups == NIL)
4206  return;
4207 
4208  /*
4209  * If there were empty grouping sets they should have been in the
4210  * first rollup.
4211  */
4212  Assert(!unhashed_rollup || !empty_sets);
4213 
4214  if (unhashed_rollup)
4215  {
4216  new_rollups = lappend(new_rollups, unhashed_rollup);
4217  strat = AGG_MIXED;
4218  }
4219  else if (empty_sets)
4220  {
4221  RollupData *rollup = makeNode(RollupData);
4222 
4223  rollup->groupClause = NIL;
4224  rollup->gsets_data = empty_sets_data;
4225  rollup->gsets = empty_sets;
4226  rollup->numGroups = list_length(empty_sets);
4227  rollup->hashable = false;
4228  rollup->is_hashed = false;
4229  new_rollups = lappend(new_rollups, rollup);
4230  strat = AGG_MIXED;
4231  }
4232 
4233  add_path(grouped_rel, (Path *)
4235  grouped_rel,
4236  path,
4237  (List *) parse->havingQual,
4238  strat,
4239  new_rollups,
4240  agg_costs));
4241  return;
4242  }
4243 
4244  /*
4245  * If we have sorted input but nothing we can do with it, bail.
4246  */
4247  if (gd->rollups == NIL)
4248  return;
4249 
4250  /*
4251  * Given sorted input, we try and make two paths: one sorted and one mixed
4252  * sort/hash. (We need to try both because hashagg might be disabled, or
4253  * some columns might not be sortable.)
4254  *
4255  * can_hash is passed in as false if some obstacle elsewhere (such as
4256  * ordered aggs) means that we shouldn't consider hashing at all.
4257  */
4258  if (can_hash && gd->any_hashable)
4259  {
4260  List *rollups = NIL;
4261  List *hash_sets = list_copy(gd->unsortable_sets);
4262  double availspace = hash_mem_limit;
4263  ListCell *lc;
4264 
4265  /*
4266  * Account first for space needed for groups we can't sort at all.
4267  */
4268  availspace -= estimate_hashagg_tablesize(root,
4269  path,
4270  agg_costs,
4271  gd->dNumHashGroups);
4272 
4273  if (availspace > 0 && list_length(gd->rollups) > 1)
4274  {
4275  double scale;
4276  int num_rollups = list_length(gd->rollups);
4277  int k_capacity;
4278  int *k_weights = palloc(num_rollups * sizeof(int));
4279  Bitmapset *hash_items = NULL;
4280  int i;
4281 
4282  /*
4283  * We treat this as a knapsack problem: the knapsack capacity
4284  * represents hash_mem, the item weights are the estimated memory
4285  * usage of the hashtables needed to implement a single rollup,
4286  * and we really ought to use the cost saving as the item value;
4287  * however, currently the costs assigned to sort nodes don't
4288  * reflect the comparison costs well, and so we treat all items as
4289  * of equal value (each rollup we hash instead saves us one sort).
4290  *
4291  * To use the discrete knapsack, we need to scale the values to a
4292  * reasonably small bounded range. We choose to allow a 5% error
4293  * margin; we have no more than 4096 rollups in the worst possible
4294  * case, which with a 5% error margin will require a bit over 42MB
4295  * of workspace. (Anyone wanting to plan queries that complex had
4296  * better have the memory for it. In more reasonable cases, with
4297  * no more than a couple of dozen rollups, the memory usage will
4298  * be negligible.)
4299  *
4300  * k_capacity is naturally bounded, but we clamp the values for
4301  * scale and weight (below) to avoid overflows or underflows (or
4302  * uselessly trying to use a scale factor less than 1 byte).
4303  */
4304  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4305  k_capacity = (int) floor(availspace / scale);
4306 
4307  /*
4308  * We leave the first rollup out of consideration since it's the
4309  * one that matches the input sort order. We assign indexes "i"
4310  * to only those entries considered for hashing; the second loop,
4311  * below, must use the same condition.
4312  */
4313  i = 0;
4314  for_each_from(lc, gd->rollups, 1)
4315  {
4316  RollupData *rollup = lfirst_node(RollupData, lc);
4317 
4318  if (rollup->hashable)
4319  {
4320  double sz = estimate_hashagg_tablesize(root,
4321  path,
4322  agg_costs,
4323  rollup->numGroups);
4324 
4325  /*
4326  * If sz is enormous, but hash_mem (and hence scale) is
4327  * small, avoid integer overflow here.
4328  */
4329  k_weights[i] = (int) Min(floor(sz / scale),
4330  k_capacity + 1.0);
4331  ++i;
4332  }
4333  }
4334 
4335  /*
4336  * Apply knapsack algorithm; compute the set of items which
4337  * maximizes the value stored (in this case the number of sorts
4338  * saved) while keeping the total size (approximately) within
4339  * capacity.
4340  */
4341  if (i > 0)
4342  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4343 
4344  if (!bms_is_empty(hash_items))
4345  {
4346  rollups = list_make1(linitial(gd->rollups));
4347 
4348  i = 0;
4349  for_each_from(lc, gd->rollups, 1)
4350  {
4351  RollupData *rollup = lfirst_node(RollupData, lc);
4352 
4353  if (rollup->hashable)
4354  {
4355  if (bms_is_member(i, hash_items))
4356  hash_sets = list_concat(hash_sets,
4357  rollup->gsets_data);
4358  else
4359  rollups = lappend(rollups, rollup);
4360  ++i;
4361  }
4362  else
4363  rollups = lappend(rollups, rollup);
4364  }
4365  }
4366  }
4367 
4368  if (!rollups && hash_sets)
4369  rollups = list_copy(gd->rollups);
4370 
4371  foreach(lc, hash_sets)
4372  {
4374  RollupData *rollup = makeNode(RollupData);
4375 
4376  Assert(gs->set != NIL);
4377 
4378  rollup->groupClause = preprocess_groupclause(root, gs->set);
4379  rollup->gsets_data = list_make1(gs);
4380  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4381  rollup->gsets_data,
4382  gd->tleref_to_colnum_map);
4383  rollup->numGroups = gs->numGroups;
4384  rollup->hashable = true;
4385  rollup->is_hashed = true;
4386  rollups = lcons(rollup, rollups);
4387  }
4388 
4389  if (rollups)
4390  {
4391  add_path(grouped_rel, (Path *)
4393  grouped_rel,
4394  path,
4395  (List *) parse->havingQual,
4396  AGG_MIXED,
4397  rollups,
4398  agg_costs));
4399  }
4400  }
4401 
4402  /*
4403  * Now try the simple sorted case.
4404  */
4405  if (!gd->unsortable_sets)
4406  add_path(grouped_rel, (Path *)
4408  grouped_rel,
4409  path,
4410  (List *) parse->havingQual,
4411  AGG_SORTED,
4412  gd->rollups,
4413  agg_costs));
4414 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:444
#define bms_is_empty(a)
Definition: bitmapset.h:105
#define Min(x, y)
Definition: c.h:988
#define Max(x, y)
Definition: c.h:982
size_t Size
Definition: c.h:589
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:54
List * list_concat(List *list1, const List *list2)
Definition: list.c:560
List * lcons(void *datum, List *list)
Definition: list.c:494
void * palloc(Size size)
Definition: mcxt.c:1210
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3390
AggStrategy
Definition: nodes.h:361
@ AGG_MIXED
Definition: nodes.h:365
#define makeNode(_type_)
Definition: nodes.h:176
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:340
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3174
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
int scale
Definition: pgbench.c:191
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2810
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2187
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3887
Cardinality numGroups
Definition: pathnodes.h:2234
Cardinality numGroups
Definition: pathnodes.h:2245
List * groupClause
Definition: pathnodes.h:2242
List * gsets_data
Definition: pathnodes.h:2244
bool hashable
Definition: pathnodes.h:2246
List * gsets
Definition: pathnodes.h:2243
bool is_hashed
Definition: pathnodes.h:2247
int * tleref_to_colnum_map
Definition: planner.c:110
List * unsortable_sets
Definition: planner.c:109
double dNumHashGroups
Definition: planner.c:105

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert(), bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), PlannerInfo::group_pathkeys, RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3865 of file planner.c.

3867 {
3868  Query *parse = root->parse;
3869  int nrows;
3870  Path *path;
3871 
3872  nrows = list_length(parse->groupingSets);
3873  if (nrows > 1)
3874  {
3875  /*
3876  * Doesn't seem worthwhile writing code to cons up a generate_series
3877  * or a values scan to emit multiple rows. Instead just make N clones
3878  * and append them. (With a volatile HAVING clause, this means you
3879  * might get between 0 and N output rows. Offhand I think that's
3880  * desired.)
3881  */
3882  List *paths = NIL;
3883 
3884  while (--nrows >= 0)
3885  {
3886  path = (Path *)
3887  create_group_result_path(root, grouped_rel,
3888  grouped_rel->reltarget,
3889  (List *) parse->havingQual);
3890  paths = lappend(paths, path);
3891  }
3892  path = (Path *)
3893  create_append_path(root,
3894  grouped_rel,
3895  paths,
3896  NIL,
3897  NIL,
3898  NULL,
3899  0,
3900  false,
3901  -1);
3902  }
3903  else
3904  {
3905  /* No grouping sets, or just one, so one output row */
3906  path = (Path *)
3907  create_group_result_path(root, grouped_rel,
3908  grouped_rel->reltarget,
3909  (List *) parse->havingQual);
3910  }
3911 
3912  add_path(grouped_rel, path);
3913 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1242
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1516

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), PlannerInfo::parse, and RelOptInfo::reltarget.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel 
)
static

Definition at line 4647 of file planner.c.

4648 {
4649  RelOptInfo *distinct_rel;
4650 
4651  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4652  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4653 
4654  /*
4655  * We don't compute anything at this level, so distinct_rel will be
4656  * parallel-safe if the input rel is parallel-safe. In particular, if
4657  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4658  * output those expressions, and will not be parallel-safe unless those
4659  * expressions are parallel-safe.
4660  */
4661  distinct_rel->consider_parallel = input_rel->consider_parallel;
4662 
4663  /*
4664  * If the input rel belongs to a single FDW, so does the distinct_rel.
4665  */
4666  distinct_rel->serverid = input_rel->serverid;
4667  distinct_rel->userid = input_rel->userid;
4668  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4669  distinct_rel->fdwroutine = input_rel->fdwroutine;
4670 
4671  /* build distinct paths based on input_rel's pathlist */
4672  create_final_distinct_paths(root, input_rel, distinct_rel);
4673 
4674  /* now build distinct paths based on input_rel's partial_pathlist */
4675  create_partial_distinct_paths(root, input_rel, distinct_rel);
4676 
4677  /* Give a helpful error if we failed to create any paths */
4678  if (distinct_rel->pathlist == NIL)
4679  ereport(ERROR,
4680  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4681  errmsg("could not implement DISTINCT"),
4682  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4683 
4684  /*
4685  * If there is an FDW that's responsible for all baserels of the query,
4686  * let it consider adding ForeignPaths.
4687  */
4688  if (distinct_rel->fdwroutine &&
4689  distinct_rel->fdwroutine->GetForeignUpperPaths)
4690  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4692  input_rel,
4693  distinct_rel,
4694  NULL);
4695 
4696  /* Let extensions possibly add some more paths */
4698  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4699  distinct_rel, NULL);
4700 
4701  /* Now choose the best path(s) */
4702  set_cheapest(distinct_rel);
4703 
4704  return distinct_rel;
4705 }
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel)
Definition: planner.c:4716
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4880
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:80
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1411
bool useridiscurrent
Definition: pathnodes.h:953
Oid userid
Definition: pathnodes.h:951
Oid serverid
Definition: pathnodes.h:949

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 4880 of file planner.c.

4882 {
4883  Query *parse = root->parse;
4884  Path *cheapest_input_path = input_rel->cheapest_total_path;
4885  double numDistinctRows;
4886  bool allow_hash;
4887 
4888  /* Estimate number of distinct rows there will be */
4889  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4890  root->hasHavingQual)
4891  {
4892  /*
4893  * If there was grouping or aggregation, use the number of input rows
4894  * as the estimated number of DISTINCT rows (ie, assume the input is
4895  * already mostly unique).
4896  */
4897  numDistinctRows = cheapest_input_path->rows;
4898  }
4899  else
4900  {
4901  /*
4902  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4903  */
4904  List *distinctExprs;
4905 
4906  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4907  parse->targetList);
4908  numDistinctRows = estimate_num_groups(root, distinctExprs,
4909  cheapest_input_path->rows,
4910  NULL, NULL);
4911  }
4912 
4913  /*
4914  * Consider sort-based implementations of DISTINCT, if possible.
4915  */
4917  {
4918  /*
4919  * Firstly, if we have any adequately-presorted paths, just stick a
4920  * Unique node on those. We also, consider doing an explicit sort of
4921  * the cheapest input path and Unique'ing that. If any paths have
4922  * presorted keys then we'll create an incremental sort atop of those
4923  * before adding a unique node on the top.
4924  *
4925  * When we have DISTINCT ON, we must sort by the more rigorous of
4926  * DISTINCT and ORDER BY, else it won't have the desired behavior.
4927  * Also, if we do have to do an explicit sort, we might as well use
4928  * the more rigorous ordering to avoid a second sort later. (Note
4929  * that the parser will have ensured that one clause is a prefix of
4930  * the other.)
4931  */
4932  List *needed_pathkeys;
4933  ListCell *lc;
4934  double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
4935 
4936  if (parse->hasDistinctOn &&
4938  list_length(root->sort_pathkeys))
4939  needed_pathkeys = root->sort_pathkeys;
4940  else
4941  needed_pathkeys = root->distinct_pathkeys;
4942 
4943  foreach(lc, input_rel->pathlist)
4944  {
4945  Path *input_path = (Path *) lfirst(lc);
4946  Path *sorted_path;
4947  bool is_sorted;
4948  int presorted_keys;
4949 
4950  is_sorted = pathkeys_count_contained_in(needed_pathkeys,
4951  input_path->pathkeys,
4952  &presorted_keys);
4953 
4954  if (is_sorted)
4955  sorted_path = input_path;
4956  else
4957  {
4958  /*
4959  * Try at least sorting the cheapest path and also try
4960  * incrementally sorting any path which is partially sorted
4961  * already (no need to deal with paths which have presorted
4962  * keys when incremental sort is disabled unless it's the
4963  * cheapest input path).
4964  */
4965  if (input_path != cheapest_input_path &&
4966  (presorted_keys == 0 || !enable_incremental_sort))
4967  continue;
4968 
4969  /*
4970  * We've no need to consider both a sort and incremental sort.
4971  * We'll just do a sort if there are no presorted keys and an
4972  * incremental sort when there are presorted keys.
4973  */
4974  if (presorted_keys == 0 || !enable_incremental_sort)
4975  sorted_path = (Path *) create_sort_path(root,
4976  distinct_rel,
4977  input_path,
4978  needed_pathkeys,
4979  limittuples);
4980  else
4981  sorted_path = (Path *) create_incremental_sort_path(root,
4982  distinct_rel,
4983  input_path,
4984  needed_pathkeys,
4985  presorted_keys,
4986  limittuples);
4987  }
4988 
4989  /*
4990  * distinct_pathkeys may have become empty if all of the pathkeys
4991  * were determined to be redundant. If all of the pathkeys are
4992  * redundant then each DISTINCT target must only allow a single
4993  * value, therefore all resulting tuples must be identical (or at
4994  * least indistinguishable by an equality check). We can uniquify
4995  * these tuples simply by just taking the first tuple. All we do
4996  * here is add a path to do "LIMIT 1" atop of 'sorted_path'. When
4997  * doing a DISTINCT ON we may still have a non-NIL sort_pathkeys
4998  * list, so we must still only do this with paths which are
4999  * correctly sorted by sort_pathkeys.
5000  */
5001  if (root->distinct_pathkeys == NIL)
5002  {
5003  Node *limitCount;
5004 
5005  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5006  sizeof(int64),
5007  Int64GetDatum(1), false,
5008  FLOAT8PASSBYVAL);
5009 
5010  /*
5011  * If the query already has a LIMIT clause, then we could end
5012  * up with a duplicate LimitPath in the final plan. That does
5013  * not seem worth troubling over too much.
5014  */
5015  add_path(distinct_rel, (Path *)
5016  create_limit_path(root, distinct_rel, sorted_path,
5017  NULL, limitCount,
5018  LIMIT_OPTION_COUNT, 0, 1));
5019  }
5020  else
5021  {
5022  add_path(distinct_rel, (Path *)
5023  create_upper_unique_path(root, distinct_rel,
5024  sorted_path,
5026  numDistinctRows));
5027  }
5028  }
5029  }
5030 
5031  /*
5032  * Consider hash-based implementations of DISTINCT, if possible.
5033  *
5034  * If we were not able to make any other types of path, we *must* hash or
5035  * die trying. If we do have other choices, there are two things that
5036  * should prevent selection of hashing: if the query uses DISTINCT ON
5037  * (because it won't really have the expected behavior if we hash), or if
5038  * enable_hashagg is off.
5039  *
5040  * Note: grouping_is_hashable() is much more expensive to check than the
5041  * other gating conditions, so we want to do it last.
5042  */
5043  if (distinct_rel->pathlist == NIL)
5044  allow_hash = true; /* we have no alternatives */
5045  else if (parse->hasDistinctOn || !enable_hashagg)
5046  allow_hash = false; /* policy-based decision not to hash */
5047  else
5048  allow_hash = true; /* default */
5049 
5050  if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5051  {
5052  /* Generate hashed aggregate path --- no sort needed */
5053  add_path(distinct_rel, (Path *)
5054  create_agg_path(root,
5055  distinct_rel,
5056  cheapest_input_path,
5057  cheapest_input_path->pathtarget,
5058  AGG_HASHED,
5061  NIL,
5062  NULL,
5063  numDistinctRows));
5064  }
5065 
5066  return distinct_rel;
5067 }
#define FLOAT8PASSBYVAL
Definition: c.h:619
bool enable_hashagg
Definition: costsize.c:142
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1794
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
@ LIMIT_OPTION_COUNT
Definition: nodes.h:439
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3746
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3056
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3386
Cardinality rows
Definition: pathnodes.h:1634
List * distinct_pathkeys
Definition: pathnodes.h:400
bool hasHavingQual
Definition: pathnodes.h:496
List * sort_pathkeys
Definition: pathnodes.h:402
List * processed_distinctClause
Definition: pathnodes.h:445
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::cheapest_total_path, create_agg_path(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, enable_incremental_sort, estimate_num_groups(), FLOAT8PASSBYVAL, get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), PlannerInfo::hasHavingQual, Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), makeConst(), NIL, parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, PlannerInfo::processed_distinctClause, Path::rows, and PlannerInfo::sort_pathkeys.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3678 of file planner.c.

3683 {
3684  Query *parse = root->parse;
3685  RelOptInfo *grouped_rel;
3686  RelOptInfo *partially_grouped_rel;
3687  AggClauseCosts agg_costs;
3688 
3689  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3690  get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3691 
3692  /*
3693  * Create grouping relation to hold fully aggregated grouping and/or
3694  * aggregation paths.
3695  */
3696  grouped_rel = make_grouping_rel(root, input_rel, target,
3697  target_parallel_safe, parse->havingQual);
3698 
3699  /*
3700  * Create either paths for a degenerate grouping or paths for ordinary
3701  * grouping, as appropriate.
3702  */
3703  if (is_degenerate_grouping(root))
3704  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3705  else
3706  {
3707  int flags = 0;
3708  GroupPathExtraData extra;
3709 
3710  /*
3711  * Determine whether it's possible to perform sort-based
3712  * implementations of grouping. (Note that if processed_groupClause
3713  * is empty, grouping_is_sortable() is trivially true, and all the
3714  * pathkeys_contained_in() tests will succeed too, so that we'll
3715  * consider every surviving input path.)
3716  *
3717  * If we have grouping sets, we might be able to sort some but not all
3718  * of them; in this case, we need can_sort to be true as long as we
3719  * must consider any sorted-input plan.
3720  */
3721  if ((gd && gd->rollups != NIL)
3723  flags |= GROUPING_CAN_USE_SORT;
3724 
3725  /*
3726  * Determine whether we should consider hash-based implementations of
3727  * grouping.
3728  *
3729  * Hashed aggregation only applies if we're grouping. If we have
3730  * grouping sets, some groups might be hashable but others not; in
3731  * this case we set can_hash true as long as there is nothing globally
3732  * preventing us from hashing (and we should therefore consider plans
3733  * with hashes).
3734  *
3735  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3736  * BY aggregates. (Doing so would imply storing *all* the input
3737  * values in the hash table, and/or running many sorts in parallel,
3738  * either of which seems like a certain loser.) We similarly don't
3739  * support ordered-set aggregates in hashed aggregation, but that case
3740  * is also included in the numOrderedAggs count.
3741  *
3742  * Note: grouping_is_hashable() is much more expensive to check than
3743  * the other gating conditions, so we want to do it last.
3744  */
3745  if ((parse->groupClause != NIL &&
3746  root->numOrderedAggs == 0 &&
3748  flags |= GROUPING_CAN_USE_HASH;
3749 
3750  /*
3751  * Determine whether partial aggregation is possible.
3752  */
3753  if (can_partial_agg(root))
3754  flags |= GROUPING_CAN_PARTIAL_AGG;
3755 
3756  extra.flags = flags;
3757  extra.target_parallel_safe = target_parallel_safe;
3758  extra.havingQual = parse->havingQual;
3759  extra.targetList = parse->targetList;
3760  extra.partial_costs_set = false;
3761 
3762  /*
3763  * Determine whether partitionwise aggregation is in theory possible.
3764  * It can be disabled by the user, and for now, we don't try to
3765  * support grouping sets. create_ordinary_grouping_paths() will check
3766  * additional conditions, such as whether input_rel is partitioned.
3767  */
3768  if (enable_partitionwise_aggregate && !parse->groupingSets)
3770  else
3772 
3773  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3774  &agg_costs, gd, &extra,
3775  &partially_grouped_rel);
3776  }
3777 
3778  set_cheapest(grouped_rel);
3779  return grouped_rel;
3780 }
#define MemSet(start, val, len)
Definition: c.h:1004
bool enable_partitionwise_aggregate
Definition: costsize.c:150
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3205
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3204
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3189
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3865
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3844
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3929
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7442
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3791
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:561
PartitionwiseAggregateType patype
Definition: pathnodes.h:3234

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, PlannerInfo::numOrderedAggs, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, PlannerInfo::processed_groupClause, grouping_sets_data::rollups, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4517 of file planner.c.

4524 {
4525  PathTarget *window_target;
4526  ListCell *l;
4527  List *topqual = NIL;
4528 
4529  /*
4530  * Since each window clause could require a different sort order, we stack
4531  * up a WindowAgg node for each clause, with sort steps between them as
4532  * needed. (We assume that select_active_windows chose a good order for
4533  * executing the clauses in.)
4534  *
4535  * input_target should contain all Vars and Aggs needed for the result.
4536  * (In some cases we wouldn't need to propagate all of these all the way
4537  * to the top, since they might only be needed as inputs to WindowFuncs.
4538  * It's probably not worth trying to optimize that though.) It must also
4539  * contain all window partitioning and sorting expressions, to ensure
4540  * they're computed only once at the bottom of the stack (that's critical
4541  * for volatile functions). As we climb up the stack, we'll add outputs
4542  * for the WindowFuncs computed at each level.
4543  */
4544  window_target = input_target;
4545 
4546  foreach(l, activeWindows)
4547  {
4549  List *window_pathkeys;
4550  int presorted_keys;
4551  bool is_sorted;
4552  bool topwindow;
4553 
4554  window_pathkeys = make_pathkeys_for_window(root,
4555  wc,
4556  root->processed_tlist);
4557 
4558  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4559  path->pathkeys,
4560  &presorted_keys);
4561 
4562  /* Sort if necessary */
4563  if (!is_sorted)
4564  {
4565  /*
4566  * No presorted keys or incremental sort disabled, just perform a
4567  * complete sort.
4568  */
4569  if (presorted_keys == 0 || !enable_incremental_sort)
4570  path = (Path *) create_sort_path(root, window_rel,
4571  path,
4572  window_pathkeys,
4573  -1.0);
4574  else
4575  {
4576  /*
4577  * Since we have presorted keys and incremental sort is
4578  * enabled, just use incremental sort.
4579  */
4580  path = (Path *) create_incremental_sort_path(root,
4581  window_rel,
4582  path,
4583  window_pathkeys,
4584  presorted_keys,
4585  -1.0);
4586  }
4587  }
4588 
4589  if (lnext(activeWindows, l))
4590  {
4591  /*
4592  * Add the current WindowFuncs to the output target for this
4593  * intermediate WindowAggPath. We must copy window_target to
4594  * avoid changing the previous path's target.
4595  *
4596  * Note: a WindowFunc adds nothing to the target's eval costs; but
4597  * we do need to account for the increase in tlist width.
4598  */
4599  ListCell *lc2;
4600 
4601  window_target = copy_pathtarget(window_target);
4602  foreach(lc2, wflists->windowFuncs[wc->winref])
4603  {
4604  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4605 
4606  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4607  window_target->width += get_typavgwidth(wfunc->wintype, -1);
4608  }
4609  }
4610  else
4611  {
4612  /* Install the goal target in the topmost WindowAgg */
4613  window_target = output_target;
4614  }
4615 
4616  /* mark the final item in the list as the top-level window */
4617  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4618 
4619  /*
4620  * Accumulate all of the runConditions from each intermediate
4621  * WindowClause. The top-level WindowAgg must pass these as a qual so
4622  * that it filters out unwanted tuples correctly.
4623  */
4624  if (!topwindow)
4625  topqual = list_concat(topqual, wc->runCondition);
4626 
4627  path = (Path *)
4628  create_windowagg_path(root, window_rel, path, window_target,
4629  wflists->windowFuncs[wc->winref],
4630  wc, topwindow ? topqual : NIL, topwindow);
4631  }
4632 
4633  add_path(window_rel, path);
4634 }
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2536
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3409
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6010
List * processed_tlist
Definition: pathnodes.h:456
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), copy_pathtarget(), create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), lfirst_node, list_concat(), list_length(), lnext(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), PlannerInfo::processed_tlist, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5087 of file planner.c.

5092 {
5093  Path *cheapest_input_path = input_rel->cheapest_total_path;
5094  RelOptInfo *ordered_rel;
5095  ListCell *lc;
5096 
5097  /* For now, do all work in the (ORDERED, NULL) upperrel */
5098  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5099 
5100  /*
5101  * If the input relation is not parallel-safe, then the ordered relation
5102  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5103  * target list is parallel-safe.
5104  */
5105  if (input_rel->consider_parallel && target_parallel_safe)
5106  ordered_rel->consider_parallel = true;
5107 
5108  /*
5109  * If the input rel belongs to a single FDW, so does the ordered_rel.
5110  */
5111  ordered_rel->serverid = input_rel->serverid;
5112  ordered_rel->userid = input_rel->userid;
5113  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5114  ordered_rel->fdwroutine = input_rel->fdwroutine;
5115 
5116  foreach(lc, input_rel->pathlist)
5117  {
5118  Path *input_path = (Path *) lfirst(lc);
5119  Path *sorted_path;
5120  bool is_sorted;
5121  int presorted_keys;
5122 
5123  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5124  input_path->pathkeys, &presorted_keys);
5125 
5126  if (is_sorted)
5127  sorted_path = input_path;
5128  else
5129  {
5130  /*
5131  * Try at least sorting the cheapest path and also try
5132  * incrementally sorting any path which is partially sorted
5133  * already (no need to deal with paths which have presorted keys
5134  * when incremental sort is disabled unless it's the cheapest
5135  * input path).
5136  */
5137  if (input_path != cheapest_input_path &&
5138  (presorted_keys == 0 || !enable_incremental_sort))
5139  continue;
5140 
5141  /*
5142  * We've no need to consider both a sort and incremental sort.
5143  * We'll just do a sort if there are no presorted keys and an
5144  * incremental sort when there are presorted keys.
5145  */
5146  if (presorted_keys == 0 || !enable_incremental_sort)
5147  sorted_path = (Path *) create_sort_path(root,
5148  ordered_rel,
5149  input_path,
5150  root->sort_pathkeys,
5151  limit_tuples);
5152  else
5153  sorted_path = (Path *) create_incremental_sort_path(root,
5154  ordered_rel,
5155  input_path,
5156  root->sort_pathkeys,
5157  presorted_keys,
5158  limit_tuples);
5159  }
5160 
5161  /* Add projection step if needed */
5162  if (sorted_path->pathtarget != target)
5163  sorted_path = apply_projection_to_path(root, ordered_rel,
5164  sorted_path, target);
5165 
5166  add_path(ordered_rel, sorted_path);
5167  }
5168 
5169  /*
5170  * generate_gather_paths() will have already generated a simple Gather
5171  * path for the best parallel path, if any, and the loop above will have
5172  * considered sorting it. Similarly, generate_gather_paths() will also
5173  * have generated order-preserving Gather Merge plans which can be used
5174  * without sorting if they happen to match the sort_pathkeys, and the loop
5175  * above will have handled those as well. However, there's one more
5176  * possibility: it may make sense to sort the cheapest partial path
5177  * according to the required output order and then use Gather Merge.
5178  */
5179  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5180  input_rel->partial_pathlist != NIL)
5181  {
5182  Path *cheapest_partial_path;
5183 
5184  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5185 
5186  /*
5187  * If cheapest partial path doesn't need a sort, this is redundant
5188  * with what's already been tried.
5189  */
5191  cheapest_partial_path->pathkeys))
5192  {
5193  Path *path;
5194  double total_groups;
5195 
5196  path = (Path *) create_sort_path(root,
5197  ordered_rel,
5198  cheapest_partial_path,
5199  root->sort_pathkeys,
5200  limit_tuples);
5201 
5202  total_groups = cheapest_partial_path->rows *
5203  cheapest_partial_path->parallel_workers;
5204  path = (Path *)
5205  create_gather_merge_path(root, ordered_rel,
5206  path,
5207  path->pathtarget,
5208  root->sort_pathkeys, NULL,
5209  &total_groups);
5210 
5211  /* Add projection step if needed */
5212  if (path->pathtarget != target)
5213  path = apply_projection_to_path(root, ordered_rel,
5214  path, target);
5215 
5216  add_path(ordered_rel, path);
5217  }
5218 
5219  /*
5220  * Consider incremental sort with a gather merge on partial paths.
5221  *
5222  * We can also skip the entire loop when we only have a single-item
5223  * sort_pathkeys because then we can't possibly have a presorted
5224  * prefix of the list without having the list be fully sorted.
5225  */
5227  {
5228  foreach(lc, input_rel->partial_pathlist)
5229  {
5230  Path *input_path = (Path *) lfirst(lc);
5231  Path *sorted_path;
5232  bool is_sorted;
5233  int presorted_keys;
5234  double total_groups;
5235 
5236  /*
5237  * We don't care if this is the cheapest partial path - we
5238  * can't simply skip it, because it may be partially sorted in
5239  * which case we want to consider adding incremental sort
5240  * (instead of full sort, which is what happens above).
5241  */
5242 
5243  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5244  input_path->pathkeys,
5245  &presorted_keys);
5246 
5247  /* No point in adding incremental sort on fully sorted paths. */
5248  if (is_sorted)
5249  continue;
5250 
5251  if (presorted_keys == 0)
5252  continue;
5253 
5254  /* Since we have presorted keys, consider incremental sort. */
5255  sorted_path = (Path *) create_incremental_sort_path(root,
5256  ordered_rel,
5257  input_path,
5258  root->sort_pathkeys,
5259  presorted_keys,
5260  limit_tuples);
5261  total_groups = input_path->rows *
5262  input_path->parallel_workers;
5263  sorted_path = (Path *)
5264  create_gather_merge_path(root, ordered_rel,
5265  sorted_path,
5266  sorted_path->pathtarget,
5267  root->sort_pathkeys, NULL,
5268  &total_groups);
5269 
5270  /* Add projection step if needed */
5271  if (sorted_path->pathtarget != target)
5272  sorted_path = apply_projection_to_path(root, ordered_rel,
5273  sorted_path, target);
5274 
5275  add_path(ordered_rel, sorted_path);
5276  }
5277  }
5278  }
5279 
5280  /*
5281  * If there is an FDW that's responsible for all baserels of the query,
5282  * let it consider adding ForeignPaths.
5283  */
5284  if (ordered_rel->fdwroutine &&
5285  ordered_rel->fdwroutine->GetForeignUpperPaths)
5286  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5287  input_rel, ordered_rel,
5288  NULL);
5289 
5290  /* Let extensions possibly add some more paths */
5292  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5293  input_rel, ordered_rel, NULL);
5294 
5295  /*
5296  * No need to bother with set_cheapest here; grouping_planner does not
5297  * need us to do it.
5298  */
5299  Assert(ordered_rel->pathlist != NIL);
5300 
5301  return ordered_rel;
5302 }
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1873
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
int parallel_workers
Definition: pathnodes.h:1631

References add_path(), apply_projection_to_path(), Assert(), RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, fetch_upper_rel(), lfirst, linitial, list_length(), NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::pathlist, Path::rows, RelOptInfo::serverid, PlannerInfo::sort_pathkeys, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3929 of file planner.c.

3935 {
3936  Path *cheapest_path = input_rel->cheapest_total_path;
3937  RelOptInfo *partially_grouped_rel = NULL;
3938  double dNumGroups;
3940 
3941  /*
3942  * If this is the topmost grouping relation or if the parent relation is
3943  * doing some form of partitionwise aggregation, then we may be able to do
3944  * it at this level also. However, if the input relation is not
3945  * partitioned, partitionwise aggregate is impossible.
3946  */
3947  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3948  IS_PARTITIONED_REL(input_rel))
3949  {
3950  /*
3951  * If this is the topmost relation or if the parent relation is doing
3952  * full partitionwise aggregation, then we can do full partitionwise
3953  * aggregation provided that the GROUP BY clause contains all of the
3954  * partitioning columns at this level. Otherwise, we can do at most
3955  * partial partitionwise aggregation. But if partial aggregation is
3956  * not supported in general then we can't use it for partitionwise
3957  * aggregation either.
3958  *
3959  * Check parse->groupClause not processed_groupClause, because it's
3960  * okay if some of the partitioning columns were proved redundant.
3961  */
3962  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3963  group_by_has_partkey(input_rel, extra->targetList,
3964  root->parse->groupClause))
3966  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3968  else
3970  }
3971 
3972  /*
3973  * Before generating paths for grouped_rel, we first generate any possible
3974  * partially grouped paths; that way, later code can easily consider both
3975  * parallel and non-parallel approaches to grouping.
3976  */
3977  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3978  {
3979  bool force_rel_creation;
3980 
3981  /*
3982  * If we're doing partitionwise aggregation at this level, force
3983  * creation of a partially_grouped_rel so we can add partitionwise
3984  * paths to it.
3985  */
3986  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3987 
3988  partially_grouped_rel =
3990  grouped_rel,
3991  input_rel,
3992  gd,
3993  extra,
3994  force_rel_creation);
3995  }
3996 
3997  /* Set out parameter. */
3998  *partially_grouped_rel_p = partially_grouped_rel;
3999 
4000  /* Apply partitionwise aggregation technique, if possible. */
4001  if (patype != PARTITIONWISE_AGGREGATE_NONE)
4002  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4003  partially_grouped_rel, agg_costs,
4004  gd, patype, extra);
4005 
4006  /* If we are doing partial aggregation only, return. */
4008  {
4009  Assert(partially_grouped_rel);
4010 
4011  if (partially_grouped_rel->pathlist)
4012  set_cheapest(partially_grouped_rel);
4013 
4014  return;
4015  }
4016 
4017  /* Gather any partially grouped partial paths. */
4018  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4019  {
4020  gather_grouping_paths(root, partially_grouped_rel);
4021  set_cheapest(partially_grouped_rel);
4022  }
4023 
4024  /*
4025  * Estimate number of groups.
4026  */
4027  dNumGroups = get_number_of_groups(root,
4028  cheapest_path->rows,
4029  gd,
4030  extra->targetList);
4031 
4032  /* Build final grouping paths */
4033  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4034  partially_grouped_rel, agg_costs, gd,
4035  dNumGroups, extra);
4036 
4037  /* Give a helpful error if we failed to find any implementation */
4038  if (grouped_rel->pathlist == NIL)
4039  ereport(ERROR,
4040  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4041  errmsg("could not implement GROUP BY"),
4042  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4043 
4044  /*
4045  * If there is an FDW that's responsible for all baserels of the query,
4046  * let it consider adding ForeignPaths.
4047  */
4048  if (grouped_rel->fdwroutine &&
4049  grouped_rel->fdwroutine->GetForeignUpperPaths)
4050  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4051  input_rel, grouped_rel,
4052  extra);
4053 
4054  /* Let extensions possibly add some more paths */
4056  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4057  input_rel, grouped_rel,
4058  extra);
4059 }
PartitionwiseAggregateType
Definition: pathnodes.h:3203
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3206
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7031
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6767
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3556
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7719
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:7863
List * groupClause
Definition: parsenodes.h:198

References add_paths_to_grouping_rel(), Assert(), RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), Query::groupClause, GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, PlannerInfo::parse, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel 
)
static

Definition at line 4716 of file planner.c.

4718 {
4719  RelOptInfo *partial_distinct_rel;
4720  Query *parse;
4721  List *distinctExprs;
4722  double numDistinctRows;
4723  Path *cheapest_partial_path;
4724  ListCell *lc;
4725 
4726  /* nothing to do when there are no partial paths in the input rel */
4727  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4728  return;
4729 
4730  parse = root->parse;
4731 
4732  /* can't do parallel DISTINCT ON */
4733  if (parse->hasDistinctOn)
4734  return;
4735 
4736  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4737  NULL);
4738  partial_distinct_rel->reltarget = root->upper_targets[UPPERREL_PARTIAL_DISTINCT];
4739  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4740 
4741  /*
4742  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4743  */
4744  partial_distinct_rel->serverid = input_rel->serverid;
4745  partial_distinct_rel->userid = input_rel->userid;
4746  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4747  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4748 
4749  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4750 
4751  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4752  parse->targetList);
4753 
4754  /* estimate how many distinct rows we'll get from each worker */
4755  numDistinctRows = estimate_num_groups(root, distinctExprs,
4756  cheapest_partial_path->rows,
4757  NULL, NULL);
4758 
4759  /*
4760  * Try sorting the cheapest path and incrementally sorting any paths with
4761  * presorted keys and put a unique paths atop of those.
4762  */
4764  {
4765  foreach(lc, input_rel->partial_pathlist)
4766  {
4767  Path *input_path = (Path *) lfirst(lc);
4768  Path *sorted_path;
4769  bool is_sorted;
4770  int presorted_keys;
4771 
4773  input_path->pathkeys,
4774  &presorted_keys);
4775 
4776  if (is_sorted)
4777  sorted_path = input_path;
4778  else
4779  {
4780  /*
4781  * Try at least sorting the cheapest path and also try
4782  * incrementally sorting any path which is partially sorted
4783  * already (no need to deal with paths which have presorted
4784  * keys when incremental sort is disabled unless it's the
4785  * cheapest partial path).
4786  */
4787  if (input_path != cheapest_partial_path &&
4788  (presorted_keys == 0 || !enable_incremental_sort))
4789  continue;
4790 
4791  /*
4792  * We've no need to consider both a sort and incremental sort.
4793  * We'll just do a sort if there are no presorted keys and an
4794  * incremental sort when there are presorted keys.
4795  */
4796  if (presorted_keys == 0 || !enable_incremental_sort)
4797  sorted_path = (Path *) create_sort_path(root,
4798  partial_distinct_rel,
4799  input_path,
4800  root->distinct_pathkeys,
4801  -1.0);
4802  else
4803  sorted_path = (Path *) create_incremental_sort_path(root,
4804  partial_distinct_rel,
4805  input_path,
4806  root->distinct_pathkeys,
4807  presorted_keys,
4808  -1.0);
4809  }
4810 
4811  add_partial_path(partial_distinct_rel, (Path *)
4812  create_upper_unique_path(root, partial_distinct_rel,
4813  sorted_path,
4815  numDistinctRows));
4816  }
4817  }
4818 
4819  /*
4820  * Now try hash aggregate paths, if enabled and hashing is possible. Since
4821  * we're not on the hook to ensure we do our best to create at least one
4822  * path here, we treat enable_hashagg as a hard off-switch rather than the
4823  * slightly softer variant in create_final_distinct_paths.
4824  */
4826  {
4827  add_partial_path(partial_distinct_rel, (Path *)
4828  create_agg_path(root,
4829  partial_distinct_rel,
4830  cheapest_partial_path,
4831  cheapest_partial_path->pathtarget,
4832  AGG_HASHED,
4835  NIL,
4836  NULL,
4837  numDistinctRows));
4838  }
4839 
4840  /*
4841  * If there is an FDW that's responsible for all baserels of the query,
4842  * let it consider adding ForeignPaths.
4843  */
4844  if (partial_distinct_rel->fdwroutine &&
4845  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4846  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4848  input_rel,
4849  partial_distinct_rel,
4850  NULL);
4851 
4852  /* Let extensions possibly add some more partial paths */
4854  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4855  input_rel, partial_distinct_rel, NULL);
4856 
4857  if (partial_distinct_rel->partial_pathlist != NIL)
4858  {
4859  generate_gather_paths(root, partial_distinct_rel, true);
4860  set_cheapest(partial_distinct_rel);
4861 
4862  /*
4863  * Finally, create paths to distinctify the final result. This step
4864  * is needed to remove any duplicates due to combining rows from
4865  * parallel workers.
4866  */
4867  create_final_distinct_paths(root, partial_distinct_rel,
4868  final_distinct_rel);
4869  }
4870 }
void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3060
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:749
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, create_upper_unique_path(), PlannerInfo::distinct_pathkeys, enable_hashagg, enable_incremental_sort, estimate_num_groups(), fetch_upper_rel(), generate_gather_paths(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), lfirst, linitial, list_length(), NIL, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), PlannerInfo::processed_distinctClause, RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7031 of file planner.c.

7037 {
7038  Query *parse = root->parse;
7039  RelOptInfo *partially_grouped_rel;
7040  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7041  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7042  Path *cheapest_partial_path = NULL;
7043  Path *cheapest_total_path = NULL;
7044  double dNumPartialGroups = 0;
7045  double dNumPartialPartialGroups = 0;
7046  ListCell *lc;
7047  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7048  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7049 
7050  /*
7051  * Consider whether we should generate partially aggregated non-partial
7052  * paths. We can only do this if we have a non-partial path, and only if
7053  * the parent of the input rel is performing partial partitionwise
7054  * aggregation. (Note that extra->patype is the type of partitionwise
7055  * aggregation being used at the parent level, not this level.)
7056  */
7057  if (input_rel->pathlist != NIL &&
7059  cheapest_total_path = input_rel->cheapest_total_path;
7060 
7061  /*
7062  * If parallelism is possible for grouped_rel, then we should consider
7063  * generating partially-grouped partial paths. However, if the input rel
7064  * has no partial paths, then we can't.
7065  */
7066  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7067  cheapest_partial_path = linitial(input_rel->partial_pathlist);
7068 
7069  /*
7070  * If we can't partially aggregate partial paths, and we can't partially
7071  * aggregate non-partial paths, then don't bother creating the new
7072  * RelOptInfo at all, unless the caller specified force_rel_creation.
7073  */
7074  if (cheapest_total_path == NULL &&
7075  cheapest_partial_path == NULL &&
7076  !force_rel_creation)
7077  return NULL;
7078 
7079  /*
7080  * Build a new upper relation to represent the result of partially
7081  * aggregating the rows from the input relation.
7082  */
7083  partially_grouped_rel = fetch_upper_rel(root,
7085  grouped_rel->relids);
7086  partially_grouped_rel->consider_parallel =
7087  grouped_rel->consider_parallel;
7088  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7089  partially_grouped_rel->serverid = grouped_rel->serverid;
7090  partially_grouped_rel->userid = grouped_rel->userid;
7091  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7092  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7093 
7094  /*
7095  * Build target list for partial aggregate paths. These paths cannot just
7096  * emit the same tlist as regular aggregate paths, because (1) we must
7097  * include Vars and Aggrefs needed in HAVING, which might not appear in
7098  * the result tlist, and (2) the Aggrefs must be set in partial mode.
7099  */
7100  partially_grouped_rel->reltarget =
7101  make_partial_grouping_target(root, grouped_rel->reltarget,
7102  extra->havingQual);
7103 
7104  if (!extra->partial_costs_set)
7105  {
7106  /*
7107  * Collect statistics about aggregates for estimating costs of
7108  * performing aggregation in parallel.
7109  */
7110  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7111  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7112  if (parse->hasAggs)
7113  {
7114  /* partial phase */
7116  agg_partial_costs);
7117 
7118  /* final phase */
7120  agg_final_costs);
7121  }
7122 
7123  extra->partial_costs_set = true;
7124  }
7125 
7126  /* Estimate number of partial groups. */
7127  if (cheapest_total_path != NULL)
7128  dNumPartialGroups =
7129  get_number_of_groups(root,
7130  cheapest_total_path->rows,
7131  gd,
7132  extra->targetList);
7133  if (cheapest_partial_path != NULL)
7134  dNumPartialPartialGroups =
7135  get_number_of_groups(root,
7136  cheapest_partial_path->rows,
7137  gd,
7138  extra->targetList);
7139 
7140  if (can_sort && cheapest_total_path != NULL)
7141  {
7142  /* This should have been checked previously */
7143  Assert(parse->hasAggs || parse->groupClause);
7144 
7145  /*
7146  * Use any available suitably-sorted path as input, and also consider
7147  * sorting the cheapest partial path.
7148  */
7149  foreach(lc, input_rel->pathlist)
7150  {
7151  Path *path = (Path *) lfirst(lc);
7152  bool is_sorted;
7153  int presorted_keys;
7154 
7155  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7156  path->pathkeys,
7157  &presorted_keys);
7158  if (!is_sorted)
7159  {
7160  /*
7161  * Try at least sorting the cheapest path and also try
7162  * incrementally sorting any path which is partially sorted
7163  * already (no need to deal with paths which have presorted
7164  * keys when incremental sort is disabled unless it's the
7165  * cheapest input path).
7166  */
7167  if (path != cheapest_total_path &&
7168  (presorted_keys == 0 || !enable_incremental_sort))
7169  continue;
7170 
7171  /*
7172  * We've no need to consider both a sort and incremental sort.
7173  * We'll just do a sort if there are no presorted keys and an
7174  * incremental sort when there are presorted keys.
7175  */
7176  if (presorted_keys == 0 || !enable_incremental_sort)
7177  path = (Path *) create_sort_path(root,
7178  partially_grouped_rel,
7179  path,
7180  root->group_pathkeys,
7181  -1.0);
7182  else
7183  path = (Path *) create_incremental_sort_path(root,
7184  partially_grouped_rel,
7185  path,
7186  root->group_pathkeys,
7187  presorted_keys,
7188  -1.0);
7189  }
7190 
7191  if (parse->hasAggs)
7192  add_path(partially_grouped_rel, (Path *)
7193  create_agg_path(root,
7194  partially_grouped_rel,
7195  path,
7196  partially_grouped_rel->reltarget,
7197  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7199  root->processed_groupClause,
7200  NIL,
7201  agg_partial_costs,
7202  dNumPartialGroups));
7203  else
7204  add_path(partially_grouped_rel, (Path *)
7205  create_group_path(root,
7206  partially_grouped_rel,
7207  path,
7208  root->processed_groupClause,
7209  NIL,
7210  dNumPartialGroups));
7211  }
7212  }
7213 
7214  if (can_sort && cheapest_partial_path != NULL)
7215  {
7216  /* Similar to above logic, but for partial paths. */
7217  foreach(lc, input_rel->partial_pathlist)
7218  {
7219  Path *path = (Path *) lfirst(lc);
7220  bool is_sorted;
7221  int presorted_keys;
7222 
7223  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7224  path->pathkeys,
7225  &presorted_keys);
7226 
7227  if (!is_sorted)
7228  {
7229  /*
7230  * Try at least sorting the cheapest path and also try
7231  * incrementally sorting any path which is partially sorted
7232  * already (no need to deal with paths which have presorted
7233  * keys when incremental sort is disabled unless it's the
7234  * cheapest input path).
7235  */
7236  if (path != cheapest_partial_path &&
7237  (presorted_keys == 0 || !enable_incremental_sort))
7238  continue;
7239 
7240  /*
7241  * We've no need to consider both a sort and incremental sort.
7242  * We'll just do a sort if there are no presorted keys and an
7243  * incremental sort when there are presorted keys.
7244  */
7245  if (presorted_keys == 0 || !enable_incremental_sort)
7246  path = (Path *) create_sort_path(root,
7247  partially_grouped_rel,
7248  path,
7249  root->group_pathkeys,
7250  -1.0);
7251  else
7252  path = (Path *) create_incremental_sort_path(root,
7253  partially_grouped_rel,
7254  path,
7255  root->group_pathkeys,
7256  presorted_keys,
7257  -1.0);
7258  }
7259 
7260  if (parse->hasAggs)
7261  add_partial_path(partially_grouped_rel, (Path *)
7262  create_agg_path(root,
7263  partially_grouped_rel,
7264  path,
7265  partially_grouped_rel->reltarget,
7266  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7268  root->processed_groupClause,
7269  NIL,
7270  agg_partial_costs,
7271  dNumPartialPartialGroups));
7272  else
7273  add_partial_path(partially_grouped_rel, (Path *)
7274  create_group_path(root,
7275  partially_grouped_rel,
7276  path,
7277  root->processed_groupClause,
7278  NIL,
7279  dNumPartialPartialGroups));
7280  }
7281  }
7282 
7283  /*
7284  * Add a partially-grouped HashAgg Path where possible
7285  */
7286  if (can_hash && cheapest_total_path != NULL)
7287  {
7288  /* Checked above */
7289  Assert(parse->hasAggs || parse->groupClause);
7290 
7291  add_path(partially_grouped_rel, (Path *)
7292  create_agg_path(root,
7293  partially_grouped_rel,
7294  cheapest_total_path,
7295  partially_grouped_rel->reltarget,
7296  AGG_HASHED,
7298  root->processed_groupClause,
7299  NIL,
7300  agg_partial_costs,
7301  dNumPartialGroups));
7302  }
7303 
7304  /*
7305  * Now add a partially-grouped HashAgg partial Path where possible
7306  */
7307  if (can_hash && cheapest_partial_path != NULL)
7308  {
7309  add_partial_path(partially_grouped_rel, (Path *)
7310  create_agg_path(root,
7311  partially_grouped_rel,
7312  cheapest_partial_path,
7313  partially_grouped_rel->reltarget,
7314  AGG_HASHED,
7316  root->processed_groupClause,
7317  NIL,
7318  agg_partial_costs,
7319  dNumPartialPartialGroups));
7320  }
7321 
7322  /*
7323  * If there is an FDW that's responsible for all baserels of the query,
7324  * let it consider adding partially grouped ForeignPaths.
7325  */
7326  if (partially_grouped_rel->fdwroutine &&
7327  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7328  {
7329  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7330 
7331  fdwroutine->GetForeignUpperPaths(root,
7333  input_rel, partially_grouped_rel,
7334  extra);
7335  }
7336 
7337  return partially_grouped_rel;
7338 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:387
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5421
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3227
RelOptKind reloptkind
Definition: pathnodes.h:856

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert(), RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), FdwRoutine::GetForeignUpperPaths, PlannerInfo::group_pathkeys, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, make_partial_grouping_target(), MemSet, NIL, parse(), PlannerInfo::parse, GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, GroupPathExtraData::patype, PlannerInfo::processed_groupClause, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7719 of file planner.c.

7727 {
7728  List *grouped_live_children = NIL;
7729  List *partially_grouped_live_children = NIL;
7730  PathTarget *target = grouped_rel->reltarget;
7731  bool partial_grouping_valid = true;
7732  int i;
7733 
7736  partially_grouped_rel != NULL);
7737 
7738  /* Add paths for partitionwise aggregation/grouping. */
7739  i = -1;
7740  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7741  {
7742  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7743  PathTarget *child_target;
7744  AppendRelInfo **appinfos;
7745  int nappinfos;
7746  GroupPathExtraData child_extra;
7747  RelOptInfo *child_grouped_rel;
7748  RelOptInfo *child_partially_grouped_rel;
7749 
7750  Assert(child_input_rel != NULL);
7751 
7752  /* Dummy children can be ignored. */
7753  if (IS_DUMMY_REL(child_input_rel))
7754  continue;
7755 
7756  child_target = copy_pathtarget(target);
7757 
7758  /*
7759  * Copy the given "extra" structure as is and then override the
7760  * members specific to this child.
7761  */
7762  memcpy(&child_extra, extra, sizeof(child_extra));
7763 
7764  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7765  &nappinfos);
7766 
7767  child_target->exprs = (List *)
7769  (Node *) target->exprs,
7770  nappinfos, appinfos);
7771 
7772  /* Translate havingQual and targetList. */
7773  child_extra.havingQual = (Node *)
7775  extra->havingQual,
7776  nappinfos, appinfos);
7777  child_extra.targetList = (List *)
7779  (Node *) extra->targetList,
7780  nappinfos, appinfos);
7781 
7782  /*
7783  * extra->patype was the value computed for our parent rel; patype is
7784  * the value for this relation. For the child, our value is its
7785  * parent rel's value.
7786  */
7787  child_extra.patype = patype;
7788 
7789  /*
7790  * Create grouping relation to hold fully aggregated grouping and/or
7791  * aggregation paths for the child.
7792  */
7793  child_grouped_rel = make_grouping_rel(root, child_input_rel,
7794  child_target,
7795  extra->target_parallel_safe,
7796  child_extra.havingQual);
7797 
7798  /* Create grouping paths for this child relation. */
7799  create_ordinary_grouping_paths(root, child_input_rel,
7800  child_grouped_rel,
7801  agg_costs, gd, &child_extra,
7802  &child_partially_grouped_rel);
7803 
7804  if (child_partially_grouped_rel)
7805  {
7806  partially_grouped_live_children =
7807  lappend(partially_grouped_live_children,
7808  child_partially_grouped_rel);
7809  }
7810  else
7811  partial_grouping_valid = false;
7812 
7813  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7814  {
7815  set_cheapest(child_grouped_rel);
7816  grouped_live_children = lappend(grouped_live_children,
7817  child_grouped_rel);
7818  }
7819 
7820  pfree(appinfos);
7821  }
7822 
7823  /*
7824  * Try to create append paths for partially grouped children. For full
7825  * partitionwise aggregation, we might have paths in the partial_pathlist
7826  * if parallel aggregation is possible. For partial partitionwise
7827  * aggregation, we may have paths in both pathlist and partial_pathlist.
7828  *
7829  * NB: We must have a partially grouped path for every child in order to
7830  * generate a partially grouped path for this relation.
7831  */
7832  if (partially_grouped_rel && partial_grouping_valid)
7833  {
7834  Assert(partially_grouped_live_children != NIL);
7835 
7836  add_paths_to_append_rel(root, partially_grouped_rel,
7837  partially_grouped_live_children);
7838 
7839  /*
7840  * We need call set_cheapest, since the finalization step will use the
7841  * cheapest path from the rel.
7842  */
7843  if (partially_grouped_rel->pathlist)
7844  set_cheapest(partially_grouped_rel);
7845  }
7846 
7847  /* If possible, create append paths for fully grouped children. */
7848  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7849  {
7850  Assert(grouped_live_children != NIL);
7851 
7852  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7853  }
7854 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert(), bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4430 of file planner.c.

4437 {
4438  RelOptInfo *window_rel;
4439  ListCell *lc;
4440 
4441  /* For now, do all work in the (WINDOW, NULL) upperrel */
4442  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4443 
4444  /*
4445  * If the input relation is not parallel-safe, then the window relation
4446  * can't be parallel-safe, either. Otherwise, we need to examine the
4447  * target list and active windows for non-parallel-safe constructs.
4448  */
4449  if (input_rel->consider_parallel && output_target_parallel_safe &&
4450  is_parallel_safe(root, (Node *) activeWindows))
4451  window_rel->consider_parallel = true;
4452 
4453  /*
4454  * If the input rel belongs to a single FDW, so does the window rel.
4455  */
4456  window_rel->serverid = input_rel->serverid;
4457  window_rel->userid = input_rel->userid;
4458  window_rel->useridiscurrent = input_rel->useridiscurrent;
4459  window_rel->fdwroutine = input_rel->fdwroutine;
4460 
4461  /*
4462  * Consider computing window functions starting from the existing
4463  * cheapest-total path (which will likely require a sort) as well as any
4464  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4465  */
4466  foreach(lc, input_rel->pathlist)
4467  {
4468  Path *path = (Path *) lfirst(lc);
4469  int presorted_keys;
4470 
4471  if (path == input_rel->cheapest_total_path ||
4473  &presorted_keys) ||
4474  presorted_keys > 0)
4476  window_rel,
4477  path,
4478  input_target,
4479  output_target,
4480  wflists,
4481  activeWindows);
4482  }
4483 
4484  /*
4485  * If there is an FDW that's responsible for all baserels of the query,
4486  * let it consider adding ForeignPaths.
4487  */
4488  if (window_rel->fdwroutine &&
4489  window_rel->fdwroutine->GetForeignUpperPaths)
4490  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4491  input_rel, window_rel,
4492  NULL);
4493 
4494  /* Let extensions possibly add some more paths */
4496  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4497  input_rel, window_rel, NULL);
4498 
4499  /* Now choose the best path(s) */
4500  set_cheapest(window_rel);
4501 
4502  return window_rel;
4503 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:634
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4517
List * window_pathkeys
Definition: pathnodes.h:398

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, RelOptInfo::useridiscurrent, and PlannerInfo::window_pathkeys.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6434 of file planner.c.

6435 {
6436  Node *result;
6437 
6438  /*
6439  * Convert named-argument function calls, insert default arguments and
6440  * simplify constant subexprs
6441  */
6442  result = eval_const_expressions(NULL, (Node *) expr);
6443 
6444  /* Fill in opfuncid values if missing */
6445  fix_opfuncids(result);
6446 
6447  return (Expr *) result;
6448 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2134
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1641

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATPrepAlterColumnType(), BeginCopyFrom(), CheckMutability(), ComputePartitionAttrs(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6461 of file planner.c.

6464 {
6465  Node *result;
6466  PlannerGlobal glob;
6467  PlannerInfo root;
6468 
6469  /* Make up dummy planner state so we can use setrefs machinery */
6470  MemSet(&glob, 0, sizeof(glob));
6471  glob.type = T_PlannerGlobal;
6472  glob.relationOids = NIL;
6473  glob.invalItems = NIL;
6474 
6475  MemSet(&root, 0, sizeof(root));
6476  root.type = T_PlannerInfo;
6477  root.glob = &glob;
6478 
6479  /*
6480  * Convert named-argument function calls, insert default arguments and
6481  * simplify constant subexprs. Collect identities of inlined functions
6482  * and elided domains, too.
6483  */
6484  result = eval_const_expressions(&root, (Node *) expr);
6485 
6486  /* Fill in opfuncid values if missing */
6487  fix_opfuncids(result);
6488 
6489  /*
6490  * Now walk the finished expression to find anything else we ought to
6491  * record as an expression dependency.
6492  */
6493  (void) extract_query_dependencies_walker(result, &root);
6494 
6495  *relationOids = glob.relationOids;
6496  *invalItems = glob.invalItems;
6497 
6498  return (Expr *) result;
6499 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3518
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132
PlannerGlobal * glob
Definition: pathnodes.h:205

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerInfo::glob, PlannerGlobal::invalItems, MemSet, NIL, and PlannerGlobal::relationOids.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2913 of file planner.c.

2914 {
2915  int num_sets_raw = list_length(groupingSets);
2916  int num_empty = 0;
2917  int num_sets = 0; /* distinct sets */
2918  int num_chains = 0;
2919  List *result = NIL;
2920  List **results;
2921  List **orig_sets;
2922  Bitmapset **set_masks;
2923  int *chains;
2924  short **adjacency;
2925  short *adjacency_buf;
2927  int i;
2928  int j;
2929  int j_size;
2930  ListCell *lc1 = list_head(groupingSets);
2931  ListCell *lc;
2932 
2933  /*
2934  * Start by stripping out empty sets. The algorithm doesn't require this,
2935  * but the planner currently needs all empty sets to be returned in the
2936  * first list, so we strip them here and add them back after.
2937  */
2938  while (lc1 && lfirst(lc1) == NIL)
2939  {
2940  ++num_empty;
2941  lc1 = lnext(groupingSets, lc1);
2942  }
2943 
2944  /* bail out now if it turns out that all we had were empty sets. */
2945  if (!lc1)
2946  return list_make1(groupingSets);
2947 
2948  /*----------
2949  * We don't strictly need to remove duplicate sets here, but if we don't,
2950  * they tend to become scattered through the result, which is a bit
2951  * confusing (and irritating if we ever decide to optimize them out).
2952  * So we remove them here and add them back after.
2953  *
2954  * For each non-duplicate set, we fill in the following:
2955  *
2956  * orig_sets[i] = list of the original set lists
2957  * set_masks[i] = bitmapset for testing inclusion
2958  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2959  *
2960  * chains[i] will be the result group this set is assigned to.
2961  *
2962  * We index all of these from 1 rather than 0 because it is convenient
2963  * to leave 0 free for the NIL node in the graph algorithm.
2964  *----------
2965  */
2966  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2967  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2968  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2969  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2970 
2971  j_size = 0;
2972  j = 0;
2973  i = 1;
2974 
2975  for_each_cell(lc, groupingSets, lc1)
2976  {
2977  List *candidate = (List *) lfirst(lc);
2978  Bitmapset *candidate_set = NULL;
2979  ListCell *lc2;
2980  int dup_of = 0;
2981 
2982  foreach(lc2, candidate)
2983  {
2984  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2985  }
2986 
2987  /* we can only be a dup if we're the same length as a previous set */
2988  if (j_size == list_length(candidate))
2989  {
2990  int k;
2991 
2992  for (k = j; k < i; ++k)
2993  {
2994  if (bms_equal(set_masks[k], candidate_set))
2995  {
2996  dup_of = k;
2997  break;
2998  }
2999  }
3000  }
3001  else if (j_size < list_length(candidate))
3002  {
3003  j_size = list_length(candidate);
3004  j = i;
3005  }
3006 
3007  if (dup_of > 0)
3008  {
3009  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3010  bms_free(candidate_set);
3011  }
3012  else
3013  {
3014  int k;
3015  int n_adj = 0;
3016 
3017  orig_sets[i] = list_make1(candidate);
3018  set_masks[i] = candidate_set;
3019 
3020  /* fill in adjacency list; no need to compare equal-size sets */
3021 
3022  for (k = j - 1; k > 0; --k)
3023  {
3024  if (bms_is_subset(set_masks[k], candidate_set))
3025  adjacency_buf[++n_adj] = k;
3026  }
3027 
3028  if (n_adj > 0)
3029  {
3030  adjacency_buf[0] = n_adj;
3031  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3032  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3033  }
3034  else
3035  adjacency[i] = NULL;
3036 
3037  ++i;
3038  }
3039  }
3040 
3041  num_sets = i - 1;
3042 
3043  /*
3044  * Apply the graph matching algorithm to do the work.
3045  */
3046  state = BipartiteMatch(num_sets, num_sets, adjacency);
3047 
3048  /*
3049  * Now, the state->pair* fields have the info we need to assign sets to
3050  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3051  * pair_vu[v] = u (both will be true, but we check both so that we can do
3052  * it in one pass)
3053  */
3054  chains = palloc0((num_sets + 1) * sizeof(int));
3055 
3056  for (i = 1; i <= num_sets; ++i)
3057  {
3058  int u = state->pair_vu[i];
3059  int v = state->pair_uv[i];
3060 
3061  if (u > 0 && u < i)
3062  chains[i] = chains[u];
3063  else if (v > 0 && v < i)
3064  chains[i] = chains[v];
3065  else
3066  chains[i] = ++num_chains;
3067  }
3068 
3069  /* build result lists. */
3070  results = palloc0((num_chains + 1) * sizeof(List *));
3071 
3072  for (i = 1; i <= num_sets; ++i)
3073  {
3074  int c = chains[i];
3075 
3076  Assert(c > 0);
3077 
3078  results[c] = list_concat(results[c], orig_sets[i]);
3079  }
3080 
3081  /* push any empty sets back on the first list. */
3082  while (num_empty-- > 0)
3083  results[1] = lcons(NIL, results[1]);
3084 
3085  /* make result list */
3086  for (i = 1; i <= num_chains; ++i)
3087  result = lappend(result, results[i]);
3088 
3089  /*
3090  * Free all the things.
3091  *
3092  * (This is over-fussy for small sets but for large sets we could have
3093  * tied up a nontrivial amount of memory.)
3094  */
3096  pfree(results);
3097  pfree(chains);
3098  for (i = 1; i <= num_sets; ++i)
3099  if (adjacency[i])
3100  pfree(adjacency[i]);
3101  pfree(adjacency);
3102  pfree(adjacency_buf);
3103  pfree(orig_sets);
3104  for (i = 1; i <= num_sets; ++i)
3105  bms_free(set_masks[i]);
3106  pfree(set_masks);
3107 
3108  return result;
3109 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:94
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:332
void bms_free(Bitmapset *a)
Definition: bitmapset.c:209
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1241
char * c
Definition: regguts.h:318

References Assert(), BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7354 of file planner.c.

7355 {
7356  ListCell *lc;
7357  Path *cheapest_partial_path;
7358 
7359  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7360  generate_useful_gather_paths(root, rel, true);
7361 
7362  /* Try cheapest partial path + explicit Sort + Gather Merge. */
7363  cheapest_partial_path = linitial(rel->partial_pathlist);
7365  cheapest_partial_path->pathkeys))
7366  {
7367  Path *path;
7368  double total_groups;
7369 
7370  total_groups =
7371  cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
7372  path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
7373  root->group_pathkeys,
7374  -1.0);
7375  path = (Path *)
7377  rel,
7378  path,
7379  rel->reltarget,
7380  root->group_pathkeys,
7381  NULL,
7382  &total_groups);
7383 
7384  add_path(rel, path);
7385  }
7386 
7387  /*
7388  * Consider incremental sort on all partial paths, if enabled.
7389  *
7390  * We can also skip the entire loop when we only have a single-item
7391  * group_pathkeys because then we can't possibly have a presorted prefix
7392  * of the list without having the list be fully sorted.
7393  */
7395  return;
7396 
7397  /* also consider incremental sort on partial paths, if enabled */
7398  foreach(lc, rel->partial_pathlist)
7399  {
7400  Path *path = (Path *) lfirst(lc);
7401  bool is_sorted;
7402  int presorted_keys;
7403  double total_groups;
7404 
7405  is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7406  path->pathkeys,
7407  &presorted_keys);
7408 
7409  if (is_sorted)
7410  continue;
7411 
7412  if (presorted_keys == 0)
7413  continue;
7414 
7415  path = (Path *) create_incremental_sort_path(root,
7416  rel,
7417  path,
7418  root->group_pathkeys,
7419  presorted_keys,
7420  -1.0);
7421 
7422  path = (Path *)
7424  rel,
7425  path,
7426  rel->reltarget,
7427  root->group_pathkeys,
7428  NULL,
7429  &total_groups);
7430 
7431  add_path(rel, path);
7432  }
7433 }

References add_path(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), PlannerInfo::group_pathkeys, lfirst, linitial, list_length(), Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), pathkeys_count_contained_in(), RelOptInfo::reltarget, and Path::rows.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6275 of file planner.c.

6276 {
6277  Path *best_path = rel->cheapest_total_path;
6278  ListCell *l;
6279 
6280  /* If all tuples will be retrieved, just return the cheapest-total path */
6281  if (tuple_fraction <= 0.0)
6282  return best_path;
6283 
6284  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6285  if (tuple_fraction >= 1.0 && best_path->rows > 0)
6286  tuple_fraction /= best_path->rows;
6287 
6288  foreach(l, rel->pathlist)
6289  {
6290  Path *path = (Path *) lfirst(l);
6291 
6292  if (path == rel->cheapest_total_path ||
6293  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6294  continue;
6295 
6296  best_path = path;
6297  }
6298 
6299  return best_path;
6300 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:117

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), recurse_set_operations(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3556 of file planner.c.

3560 {
3561  Query *parse = root->parse;
3562  double dNumGroups;
3563 
3564  if (parse->groupClause)
3565  {
3566  List *groupExprs;
3567 
3568  if (parse->groupingSets)
3569  {
3570  /* Add up the estimates for each grouping set */
3571  ListCell *lc;
3572 
3573  Assert(gd); /* keep Coverity happy */
3574 
3575  dNumGroups = 0;
3576 
3577  foreach(lc, gd->rollups)
3578  {
3579  RollupData *rollup = lfirst_node(RollupData, lc);
3580  ListCell *lc2;
3581  ListCell *lc3;
3582 
3583  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3584  target_list);
3585 
3586  rollup->numGroups = 0.0;
3587 
3588  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3589  {
3590  List *gset = (List *) lfirst(lc2);
3592  double numGroups = estimate_num_groups(root,
3593  groupExprs,
3594  path_rows,
3595  &gset,
3596  NULL);
3597 
3598  gs->numGroups = numGroups;
3599  rollup->numGroups += numGroups;
3600  }
3601 
3602  dNumGroups += rollup->numGroups;
3603  }
3604 
3605  if (gd->hash_sets_idx)
3606  {
3607  ListCell *lc2;
3608 
3609  gd->dNumHashGroups = 0;
3610 
3611  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3612  target_list);
3613 
3614  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3615  {
3616  List *gset = (List *) lfirst(lc);
3618  double numGroups = estimate_num_groups(root,
3619  groupExprs,
3620  path_rows,
3621  &gset,
3622  NULL);
3623 
3624  gs->numGroups = numGroups;
3625  gd->dNumHashGroups += numGroups;
3626  }
3627 
3628  dNumGroups += gd->dNumHashGroups;
3629  }
3630  }
3631  else
3632  {
3633  /* Plain GROUP BY -- estimate based on optimized groupClause */
3635  target_list);
3636 
3637  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3638  NULL, NULL);
3639  }
3640  }
3641  else if (parse->groupingSets)
3642  {
3643  /* Empty grouping sets ... one result row for each one */
3644  dNumGroups = list_length(parse->groupingSets);
3645  }
3646  else if (parse->hasAggs || root->hasHavingQual)
3647  {
3648  /* Plain aggregation, one result row */
3649  dNumGroups = 1;
3650  }
3651  else
3652  {
3653  /* Not grouping */
3654  dNumGroups = 1;
3655  }
3656 
3657  return dNumGroups;
3658 }
List * hash_sets_idx
Definition: planner.c:104

References Assert(), grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, PlannerInfo::hasHavingQual, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), PlannerInfo::parse, PlannerInfo::processed_groupClause, grouping_sets_data::rollups, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 7863 of file planner.c.

7866 {
7867  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
7868  int cnt = 0;
7869  int partnatts;
7870 
7871  /* Input relation should be partitioned. */
7872  Assert(input_rel->part_scheme);
7873 
7874  /* Rule out early, if there are no partition keys present. */
7875  if (!input_rel->partexprs)
7876  return false;
7877 
7878  partnatts = input_rel->part_scheme->partnatts;
7879 
7880  for (cnt = 0; cnt < partnatts; cnt++)
7881  {
7882  List *partexprs = input_rel->partexprs[cnt];
7883  ListCell *lc;
7884  bool found = false;
7885 
7886  foreach(lc, partexprs)
7887  {
7888  Expr *partexpr = lfirst(lc);
7889 
7890  if (list_member(groupexprs, partexpr))
7891  {
7892  found = true;
7893  break;
7894  }
7895  }
7896 
7897  /*
7898  * If none of the partition key expressions match with any of the
7899  * GROUP BY expression, return false.
7900  */
7901  if (!found)
7902  return false;
7903  }
7904 
7905  return true;
7906 }
bool list_member(const List *list, const void *datum)
Definition: list.c:660

References Assert(), get_sortgrouplist_exprs(), lfirst, and list_member().

Referenced by create_ordinary_grouping_paths().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction 
)
static

Definition at line 1274 of file planner.c.

1275 {
1276  Query *parse = root->parse;
1277  int64 offset_est = 0;
1278  int64 count_est = 0;
1279  double limit_tuples = -1.0;
1280  bool have_postponed_srfs = false;
1281  PathTarget *final_target;
1282  List *final_targets;
1283  List *final_targets_contain_srfs;
1284  bool final_target_parallel_safe;
1285  RelOptInfo *current_rel;
1286  RelOptInfo *final_rel;
1287  FinalPathExtraData extra;
1288  ListCell *lc;
1289 
1290  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1291  if (parse->limitCount || parse->limitOffset)
1292  {
1293  tuple_fraction = preprocess_limit(root, tuple_fraction,
1294  &offset_est, &count_est);
1295 
1296  /*
1297  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1298  * estimate the effects of using a bounded sort.
1299  */
1300  if (count_est > 0 && offset_est >= 0)
1301  limit_tuples = (double) count_est + (double) offset_est;
1302  }
1303 
1304  /* Make tuple_fraction accessible to lower-level routines */
1305  root->tuple_fraction = tuple_fraction;
1306 
1307  if (parse->setOperations)
1308  {
1309  /*
1310  * If there's a top-level ORDER BY, assume we have to fetch all the
1311  * tuples. This might be too simplistic given all the hackery below
1312  * to possibly avoid the sort; but the odds of accurate estimates here
1313  * are pretty low anyway. XXX try to get rid of this in favor of
1314  * letting plan_set_operations generate both fast-start and
1315  * cheapest-total paths.
1316  */
1317  if (parse->sortClause)
1318  root->tuple_fraction = 0.0;
1319 
1320  /*
1321  * Construct Paths for set operations. The results will not need any
1322  * work except perhaps a top-level sort and/or LIMIT. Note that any
1323  * special work for recursive unions is the responsibility of
1324  * plan_set_operations.
1325  */
1326  current_rel = plan_set_operations(root);
1327 
1328  /*
1329  * We should not need to call preprocess_targetlist, since we must be
1330  * in a SELECT query node. Instead, use the processed_tlist returned
1331  * by plan_set_operations (since this tells whether it returned any
1332  * resjunk columns!), and transfer any sort key information from the
1333  * original tlist.
1334  */
1335  Assert(parse->commandType == CMD_SELECT);
1336 
1337  /* for safety, copy processed_tlist instead of modifying in-place */
1338  root->processed_tlist =
1340  parse->targetList);
1341 
1342  /* Also extract the PathTarget form of the setop result tlist */
1343  final_target = current_rel->cheapest_total_path->pathtarget;
1344 
1345  /* And check whether it's parallel safe */
1346  final_target_parallel_safe =
1347  is_parallel_safe(root, (Node *) final_target->exprs);
1348 
1349  /* The setop result tlist couldn't contain any SRFs */
1350  Assert(!parse->hasTargetSRFs);
1351  final_targets = final_targets_contain_srfs = NIL;
1352 
1353  /*
1354  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1355  * checked already, but let's make sure).
1356  */
1357  if (parse->rowMarks)
1358  ereport(ERROR,
1359  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1360  /*------
1361  translator: %s is a SQL row locking clause such as FOR UPDATE */
1362  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1364  parse->rowMarks)->strength))));
1365 
1366  /*
1367  * Calculate pathkeys that represent result ordering requirements
1368  */
1369  Assert(parse->distinctClause == NIL);
1371  parse->sortClause,
1372  root->processed_tlist);
1373  }
1374  else
1375  {
1376  /* No set operations, do regular planning */
1377  PathTarget *sort_input_target;
1378  List *sort_input_targets;
1379  List *sort_input_targets_contain_srfs;
1380  bool sort_input_target_parallel_safe;
1381  PathTarget *grouping_target;
1382  List *grouping_targets;
1383  List *grouping_targets_contain_srfs;
1384  bool grouping_target_parallel_safe;
1385  PathTarget *scanjoin_target;
1386  List *scanjoin_targets;
1387  List *scanjoin_targets_contain_srfs;
1388  bool scanjoin_target_parallel_safe;
1389  bool scanjoin_target_same_exprs;
1390  bool have_grouping;
1391  WindowFuncLists *wflists = NULL;
1392  List *activeWindows = NIL;
1393  grouping_sets_data *gset_data = NULL;
1394  standard_qp_extra qp_extra;
1395 
1396  /* A recursive query should always have setOperations */
1397  Assert(!root->hasRecursion);
1398 
1399  /* Preprocess grouping sets and GROUP BY clause, if any */
1400  if (parse->groupingSets)
1401  {
1402  gset_data = preprocess_grouping_sets(root);
1403  }
1404  else if (parse->groupClause)
1405  {
1406  /* Preprocess regular GROUP BY clause, if any */
1408  /* Remove any redundant GROUP BY columns */
1410  }
1411 
1412  /*
1413  * Preprocess targetlist. Note that much of the remaining planning
1414  * work will be done with the PathTarget representation of tlists, but
1415  * we must also maintain the full representation of the final tlist so
1416  * that we can transfer its decoration (resnames etc) to the topmost
1417  * tlist of the finished Plan. This is kept in processed_tlist.
1418  */
1419  preprocess_targetlist(root);
1420 
1421  /*
1422  * Mark all the aggregates with resolved aggtranstypes, and detect
1423  * aggregates that are duplicates or can share transition state. We
1424  * must do this before slicing and dicing the tlist into various
1425  * pathtargets, else some copies of the Aggref nodes might escape
1426  * being marked.
1427  */
1428  if (parse->hasAggs)
1429  {
1430  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1431  preprocess_aggrefs(root, (Node *) parse->havingQual);
1432  }
1433 
1434  /*
1435  * Locate any window functions in the tlist. (We don't need to look
1436  * anywhere else, since expressions used in ORDER BY will be in there
1437  * too.) Note that they could all have been eliminated by constant
1438  * folding, in which case we don't need to do any more work.
1439  */
1440  if (parse->hasWindowFuncs)
1441  {
1442  wflists = find_window_functions((Node *) root->processed_tlist,
1443  list_length(parse->windowClause));
1444  if (wflists->numWindowFuncs > 0)
1445  {
1446  /*
1447  * See if any modifications can be made to each WindowClause
1448  * to allow the executor to execute the WindowFuncs more
1449  * quickly.
1450  */
1451  optimize_window_clauses(root, wflists);
1452 
1453  activeWindows = select_active_windows(root, wflists);
1454  }
1455  else
1456  parse->hasWindowFuncs = false;
1457  }
1458 
1459  /*
1460  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1461  * adding logic between here and the query_planner() call. Anything
1462  * that is needed in MIN/MAX-optimizable cases will have to be
1463  * duplicated in planagg.c.
1464  */
1465  if (parse->hasAggs)
1467 
1468  /*
1469  * Figure out whether there's a hard limit on the number of rows that
1470  * query_planner's result subplan needs to return. Even if we know a
1471  * hard limit overall, it doesn't apply if the query has any
1472  * grouping/aggregation operations, or SRFs in the tlist.
1473  */
1474  if (parse->groupClause ||
1475  parse->groupingSets ||
1476  parse->distinctClause ||
1477  parse->hasAggs ||
1478  parse->hasWindowFuncs ||
1479  parse->hasTargetSRFs ||
1480  root->hasHavingQual)
1481  root->limit_tuples = -1.0;
1482  else
1483  root->limit_tuples = limit_tuples;
1484 
1485  /* Set up data needed by standard_qp_callback */
1486  qp_extra.activeWindows = activeWindows;
1487  qp_extra.gset_data = gset_data;
1488 
1489  /*
1490  * Generate the best unsorted and presorted paths for the scan/join
1491  * portion of this Query, ie the processing represented by the
1492  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1493  * We also generate (in standard_qp_callback) pathkey representations
1494  * of the query's sort clause, distinct clause, etc.
1495  */
1496  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1497 
1498  /*
1499  * Convert the query's result tlist into PathTarget format.
1500  *
1501  * Note: this cannot be done before query_planner() has performed
1502  * appendrel expansion, because that might add resjunk entries to
1503  * root->processed_tlist. Waiting till afterwards is also helpful
1504  * because the target width estimates can use per-Var width numbers
1505  * that were obtained within query_planner().
1506  */
1507  final_target = create_pathtarget(root, root->processed_tlist);
1508  final_target_parallel_safe =
1509  is_parallel_safe(root, (Node *) final_target->exprs);
1510 
1511  /*
1512  * If ORDER BY was given, consider whether we should use a post-sort
1513  * projection, and compute the adjusted target for preceding steps if
1514  * so.
1515  */
1516  if (parse->sortClause)
1517  {
1518  sort_input_target = make_sort_input_target(root,
1519  final_target,
1520  &have_postponed_srfs);
1521  sort_input_target_parallel_safe =
1522  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1523  }
1524  else
1525  {
1526  sort_input_target = final_target;
1527  sort_input_target_parallel_safe = final_target_parallel_safe;
1528  }
1529 
1530  /*
1531  * If we have window functions to deal with, the output from any
1532  * grouping step needs to be what the window functions want;
1533  * otherwise, it should be sort_input_target.
1534  */
1535  if (activeWindows)
1536  {
1537  grouping_target = make_window_input_target(root,
1538  final_target,
1539  activeWindows);
1540  grouping_target_parallel_safe =
1541  is_parallel_safe(root, (Node *) grouping_target->exprs);
1542  }
1543  else
1544  {
1545  grouping_target = sort_input_target;
1546  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1547  }
1548 
1549  /*
1550  * If we have grouping or aggregation to do, the topmost scan/join
1551  * plan node must emit what the grouping step wants; otherwise, it
1552  * should emit grouping_target.
1553  */
1554  have_grouping = (parse->groupClause || parse->groupingSets ||
1555  parse->hasAggs || root->hasHavingQual);
1556  if (have_grouping)
1557  {
1558  scanjoin_target = make_group_input_target(root, final_target);
1559  scanjoin_target_parallel_safe =
1560  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1561  }
1562  else
1563  {
1564  scanjoin_target = grouping_target;
1565  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1566  }
1567 
1568  /*
1569  * If there are any SRFs in the targetlist, we must separate each of
1570  * these PathTargets into SRF-computing and SRF-free targets. Replace
1571  * each of the named targets with a SRF-free version, and remember the
1572  * list of additional projection steps we need to add afterwards.
1573  */
1574  if (parse->hasTargetSRFs)
1575  {
1576  /* final_target doesn't recompute any SRFs in sort_input_target */
1577  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1578  &final_targets,
1579  &final_targets_contain_srfs);
1580  final_target = linitial_node(PathTarget, final_targets);
1581  Assert(!linitial_int(final_targets_contain_srfs));
1582  /* likewise for sort_input_target vs. grouping_target */
1583  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1584  &sort_input_targets,
1585  &sort_input_targets_contain_srfs);
1586  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1587  Assert(!linitial_int(sort_input_targets_contain_srfs));
1588  /* likewise for grouping_target vs. scanjoin_target */
1589  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1590  &grouping_targets,
1591  &grouping_targets_contain_srfs);
1592  grouping_target = linitial_node(PathTarget, grouping_targets);
1593  Assert(!linitial_int(grouping_targets_contain_srfs));
1594  /* scanjoin_target will not have any SRFs precomputed for it */
1595  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1596  &scanjoin_targets,
1597  &scanjoin_targets_contain_srfs);
1598  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1599  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1600  }
1601  else
1602  {
1603  /* initialize lists; for most of these, dummy values are OK */
1604  final_targets = final_targets_contain_srfs = NIL;
1605  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1606  grouping_targets = grouping_targets_contain_srfs = NIL;
1607  scanjoin_targets = list_make1(scanjoin_target);
1608  scanjoin_targets_contain_srfs = NIL;
1609  }
1610 
1611  /* Apply scan/join target. */
1612  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1613  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1614  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1615  scanjoin_targets_contain_srfs,
1616  scanjoin_target_parallel_safe,
1617  scanjoin_target_same_exprs);
1618 
1619  /*
1620  * Save the various upper-rel PathTargets we just computed into
1621  * root->upper_targets[]. The core code doesn't use this, but it
1622  * provides a convenient place for extensions to get at the info. For
1623  * consistency, we save all the intermediate targets, even though some
1624  * of the corresponding upperrels might not be needed for this query.
1625  */
1626  root->upper_targets[UPPERREL_FINAL] = final_target;
1627  root->upper_targets[UPPERREL_ORDERED] = final_target;
1628  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1629  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1630  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1631  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1632 
1633  /*
1634  * If we have grouping and/or aggregation, consider ways to implement
1635  * that. We build a new upperrel representing the output of this
1636  * phase.
1637  */
1638  if (have_grouping)
1639  {
1640  current_rel = create_grouping_paths(root,
1641  current_rel,
1642  grouping_target,
1643  grouping_target_parallel_safe,
1644  gset_data);
1645  /* Fix things up if grouping_target contains SRFs */
1646  if (parse->hasTargetSRFs)
1647  adjust_paths_for_srfs(root, current_rel,
1648  grouping_targets,
1649  grouping_targets_contain_srfs);
1650  }
1651 
1652  /*
1653  * If we have window functions, consider ways to implement those. We
1654  * build a new upperrel representing the output of this phase.
1655  */
1656  if (activeWindows)
1657  {
1658  current_rel = create_window_paths(root,
1659  current_rel,
1660  grouping_target,
1661  sort_input_target,
1662  sort_input_target_parallel_safe,
1663  wflists,
1664  activeWindows);
1665  /* Fix things up if sort_input_target contains SRFs */
1666  if (parse->hasTargetSRFs)
1667  adjust_paths_for_srfs(root, current_rel,
1668  sort_input_targets,
1669  sort_input_targets_contain_srfs);
1670  }
1671 
1672  /*
1673  * If there is a DISTINCT clause, consider ways to implement that. We
1674  * build a new upperrel representing the output of this phase.
1675  */
1676  if (parse->distinctClause)
1677  {
1678  current_rel = create_distinct_paths(root,
1679  current_rel);
1680  }
1681  } /* end of if (setOperations) */
1682 
1683  /*
1684  * If ORDER BY was given, consider ways to implement that, and generate a
1685  * new upperrel containing only paths that emit the correct ordering and
1686  * project the correct final_target. We can apply the original
1687  * limit_tuples limit in sort costing here, but only if there are no
1688  * postponed SRFs.
1689  */
1690  if (parse->sortClause)
1691  {
1692  current_rel = create_ordered_paths(root,
1693  current_rel,
1694  final_target,
1695  final_target_parallel_safe,
1696  have_postponed_srfs ? -1.0 :
1697  limit_tuples);
1698  /* Fix things up if final_target contains SRFs */
1699  if (parse->hasTargetSRFs)
1700  adjust_paths_for_srfs(root, current_rel,
1701  final_targets,
1702  final_targets_contain_srfs);
1703  }
1704 
1705  /*
1706  * Now we are prepared to build the final-output upperrel.
1707  */
1708  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1709 
1710  /*
1711  * If the input rel is marked consider_parallel and there's nothing that's
1712  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1713  * consider_parallel as well. Note that if the query has rowMarks or is
1714  * not a SELECT, consider_parallel will be false for every relation in the
1715  * query.
1716  */
1717  if (current_rel->consider_parallel &&
1718  is_parallel_safe(root, parse->limitOffset) &&
1719  is_parallel_safe(root, parse->limitCount))
1720  final_rel->consider_parallel = true;
1721 
1722  /*
1723  * If the current_rel belongs to a single FDW, so does the final_rel.
1724  */
1725  final_rel->serverid = current_rel->serverid;
1726  final_rel->userid = current_rel->userid;
1727  final_rel->useridiscurrent = current_rel->useridiscurrent;
1728  final_rel->fdwroutine = current_rel->fdwroutine;
1729 
1730  /*
1731  * Generate paths for the final_rel. Insert all surviving paths, with
1732  * LockRows, Limit, and/or ModifyTable steps added if needed.
1733  */
1734  foreach(lc, current_rel->pathlist)
1735  {
1736  Path *path = (Path *) lfirst(lc);
1737 
1738  /*
1739  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1740  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1741  * here. If there are only non-locking rowmarks, they should be
1742  * handled by the ModifyTable node instead. However, root->rowMarks
1743  * is what goes into the LockRows node.)
1744  */
1745  if (parse->rowMarks)
1746  {
1747  path = (Path *) create_lockrows_path(root, final_rel, path,
1748  root->rowMarks,
1750  }
1751 
1752  /*
1753  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1754  */
1755  if (limit_needed(parse))
1756  {
1757  path = (Path *) create_limit_path(root, final_rel, path,
1758  parse->limitOffset,
1759  parse->limitCount,
1760  parse->limitOption,
1761  offset_est, count_est);
1762  }
1763 
1764  /*
1765  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1766  */
1767  if (parse->commandType != CMD_SELECT)
1768  {
1769  Index rootRelation;
1770  List *resultRelations = NIL;
1771  List *updateColnosLists = NIL;
1772  List *withCheckOptionLists = NIL;
1773  List *returningLists = NIL;
1774  List *mergeActionLists = NIL;
1775  List *rowMarks;
1776 
1778  {
1779  /* Inherited UPDATE/DELETE/MERGE */
1780  RelOptInfo *top_result_rel = find_base_rel(root,
1781  parse->resultRelation);
1782  int resultRelation = -1;
1783 
1784  /* Add only leaf children to ModifyTable. */
1785  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1786  resultRelation)) >= 0)
1787  {
1788  RelOptInfo *this_result_rel = find_base_rel(root,
1789  resultRelation);
1790 
1791  /*
1792  * Also exclude any leaf rels that have turned dummy since
1793  * being added to the list, for example, by being excluded
1794  * by constraint exclusion.
1795  */
1796  if (IS_DUMMY_REL(this_result_rel))
1797  continue;
1798 
1799  /* Build per-target-rel lists needed by ModifyTable */
1800  resultRelations = lappend_int(resultRelations,
1801  resultRelation);
1802  if (parse->commandType == CMD_UPDATE)
1803  {
1804  List *update_colnos = root->update_colnos;
1805 
1806  if (this_result_rel != top_result_rel)
1807  update_colnos =
1809  update_colnos,
1810  this_result_rel->relid,
1811  top_result_rel->relid);
1812  updateColnosLists = lappend(updateColnosLists,
1813  update_colnos);
1814  }
1815  if (parse->withCheckOptions)
1816  {
1817  List *withCheckOptions = parse->withCheckOptions;
1818 
1819  if (this_result_rel != top_result_rel)
1820  withCheckOptions = (List *)
1822  (Node *) withCheckOptions,
1823  this_result_rel,
1824  top_result_rel);
1825  withCheckOptionLists = lappend(withCheckOptionLists,
1826  withCheckOptions);
1827  }
1828  if (parse->returningList)
1829  {
1830  List *returningList = parse->returningList;
1831 
1832  if (this_result_rel != top_result_rel)
1833  returningList = (List *)
1835  (Node *) returningList,
1836  this_result_rel,
1837  top_result_rel);
1838  returningLists = lappend(returningLists,
1839  returningList);
1840  }
1841  if (parse->mergeActionList)
1842  {
1843  ListCell *l;
1844  List *mergeActionList = NIL;
1845 
1846  /*
1847  * Copy MergeActions and translate stuff that
1848  * references attribute numbers.
1849  */
1850  foreach(l, parse->mergeActionList)
1851  {
1852  MergeAction *action = lfirst(l),
1853  *leaf_action = copyObject(action);
1854 
1855  leaf_action->qual =
1857  (Node *) action->qual,
1858  this_result_rel,
1859  top_result_rel);
1860  leaf_action->targetList = (List *)
1862  (Node *) action->targetList,
1863  this_result_rel,
1864  top_result_rel);
1865  if (leaf_action->commandType == CMD_UPDATE)
1866  leaf_action->updateColnos =
1868  action->updateColnos,
1869  this_result_rel->relid,
1870  top_result_rel->relid);
1871  mergeActionList = lappend(mergeActionList,
1872  leaf_action);
1873  }
1874 
1875  mergeActionLists = lappend(mergeActionLists,
1876  mergeActionList);
1877  }
1878  }
1879 
1880  if (resultRelations == NIL)
1881  {
1882  /*
1883  * We managed to exclude every child rel, so generate a
1884  * dummy one-relation plan using info for the top target
1885  * rel (even though that may not be a leaf target).
1886  * Although it's clear that no data will be updated or
1887  * deleted, we still need to have a ModifyTable node so
1888  * that any statement triggers will be executed. (This
1889  * could be cleaner if we fixed nodeModifyTable.c to allow
1890  * zero target relations, but that probably wouldn't be a
1891  * net win.)
1892  */
1893  resultRelations = list_make1_int(parse->resultRelation);
1894  if (parse->commandType == CMD_UPDATE)
1895  updateColnosLists = list_make1(root->update_colnos);
1896  if (parse->withCheckOptions)
1897  withCheckOptionLists = list_make1(parse->withCheckOptions);
1898  if (parse->returningList)
1899  returningLists = list_make1(parse->returningList);
1900  if (parse->mergeActionList)
1901  mergeActionLists = list_make1(parse->mergeActionList);
1902  }
1903  }
1904  else
1905  {
1906  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
1907  resultRelations = list_make1_int(parse->resultRelation);
1908  if (parse->commandType == CMD_UPDATE)
1909  updateColnosLists = list_make1(root->update_colnos);
1910  if (parse->withCheckOptions)
1911  withCheckOptionLists = list_make1(parse->withCheckOptions);
1912  if (parse->returningList)
1913  returningLists = list_make1(parse->returningList);
1914  if (parse->mergeActionList)
1915  mergeActionLists = list_make1(parse->mergeActionList);
1916  }
1917 
1918  /*
1919  * If target is a partition root table, we need to mark the
1920  * ModifyTable node appropriately for that.
1921  */
1922  if (rt_fetch(parse->resultRelation, parse->rtable)->relkind ==
1923  RELKIND_PARTITIONED_TABLE)
1924  rootRelation = parse->resultRelation;
1925  else
1926  rootRelation = 0;
1927 
1928  /*
1929  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1930  * will have dealt with fetching non-locked marked rows, else we
1931  * need to have ModifyTable do that.
1932  */
1933  if (parse->rowMarks)
1934  rowMarks = NIL;
1935  else
1936  rowMarks = root->rowMarks;
1937 
1938  path = (Path *)
1939  create_modifytable_path(root, final_rel,
1940  path,
1941  parse->commandType,
1942  parse->canSetTag,
1943  parse->resultRelation,
1944  rootRelation,
1945  root->partColsUpdated,
1946  resultRelations,
1947  updateColnosLists,
1948  withCheckOptionLists,
1949  returningLists,
1950  rowMarks,
1951  parse->onConflict,
1952  mergeActionLists,
1954  }
1955 
1956  /* And shove it into final_rel */
1957  add_path(final_rel, path);
1958  }
1959 
1960  /*
1961  * Generate partial paths for final_rel, too, if outer query levels might
1962  * be able to make use of them.
1963  */
1964  if (final_rel->consider_parallel && root->query_level > 1 &&
1965  !limit_needed(parse))
1966  {
1967  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
1968  foreach(lc, current_rel->partial_pathlist)
1969  {
1970  Path *partial_path = (Path *) lfirst(lc);
1971 
1972  add_partial_path(final_rel, partial_path);
1973  }
1974  }
1975 
1976  extra.limit_needed = limit_needed(parse);
1977  extra.limit_tuples = limit_tuples;
1978  extra.count_est = count_est;
1979  extra.offset_est = offset_est;
1980 
1981  /*
1982  * If there is an FDW that's responsible for all baserels of the query,
1983  * let it consider adding ForeignPaths.
1984  */
1985  if (final_rel->fdwroutine &&
1986  final_rel->fdwroutine->GetForeignUpperPaths)
1987  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
1988  current_rel, final_rel,
1989  &extra);
1990 
1991  /* Let extensions possibly add some more paths */
1993  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
1994  current_rel, final_rel, &extra);
1995 
1996  /* Note: currently, we leave it to callers to do set_cheapest() */
1997 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:520
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:661
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:691
@ BMS_MULTIPLE
Definition: bitmapset.h:73
unsigned int Index
Definition: c.h:598
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
List * lappend_int(List *list, int datum)
Definition: list.c:356
#define copyObject(obj)
Definition: nodes.h:244
@ CMD_UPDATE
Definition: nodes.h:277
@ CMD_SELECT
Definition: nodes.h:276
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:580
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3130
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3585
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, int epqParam)
Definition: pathnode.c:3647
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:55
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5559
static void remove_useless_groupby_columns(PlannerInfo *root)
Definition: planner.c:2645
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2401
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5893
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
Definition: planner.c:4647
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5596
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6104
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2006
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5333
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5736
bool limit_needed(Query *parse)
Definition: planner.c:2586
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5087
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3678
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3399
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4430
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:111
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:104
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:404
Cardinality limit_tuples
Definition: pathnodes.h:3250
bool partColsUpdated
Definition: pathnodes.h:549
bool hasRecursion
Definition: pathnodes.h:504
Index query_level
Definition: pathnodes.h:208
List * rowMarks
Definition: pathnodes.h:371
Cardinality limit_tuples
Definition: pathnodes.h:483
Selectivity tuple_fraction
Definition: pathnodes.h:481
List * update_colnos
Definition: pathnodes.h:464
Relids all_result_relids
Definition: pathnodes.h:354
Relids leaf_result_relids
Definition: pathnodes.h:356
Index relid
Definition: pathnodes.h:909
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:127
grouping_sets_data * gset_data
Definition: planner.c:128
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), PlannerInfo::all_result_relids, apply_scanjoin_target_to_paths(), Assert(), assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, PlannerInfo::hasHavingQual, PlannerInfo::hasRecursion, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), PlannerInfo::leaf_result_relids, lfirst, limit_needed(), FinalPathExtraData::limit_needed, PlannerInfo::limit_tuples, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), PlannerInfo::parse, PlannerInfo::partColsUpdated, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), PlannerInfo::processed_groupClause, PlannerInfo::processed_tlist, PlannerInfo::query_level, query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, remove_useless_groupby_columns(), PlannerInfo::rowMarks, rt_fetch, select_active_windows(), RelOptInfo::serverid, PlannerInfo::sort_pathkeys, split_pathtarget_at_srfs(), standard_qp_callback(), PlannerInfo::tuple_fraction, PlannerInfo::update_colnos, UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3173 of file planner.c.

3174 {
3175  ListCell *lc;
3176 
3177  foreach(lc, keys)
3178  {
3179  PathKey *pathkey = lfirst_node(PathKey, lc);
3180 
3181  if (pathkey->pk_eclass->ec_has_volatile)
3182  return true;
3183  }
3184 
3185  return false;
3186 }

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3844 of file planner.c.

3845 {
3846  Query *parse = root->parse;
3847 
3848  return (root->hasHavingQual || parse->groupingSets) &&
3849  !parse->hasAggs && parse->groupClause == NIL;
3850 }

References PlannerInfo::hasHavingQual, NIL, parse(), and PlannerInfo::parse.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2586 of file planner.c.

2587 {
2588  Node *node;
2589 
2590  node = parse->limitCount;
2591  if (node)
2592  {
2593  if (IsA(node, Const))
2594  {
2595  /* NULL indicates LIMIT ALL, ie, no limit */
2596  if (!((Const *) node)->constisnull)
2597  return true; /* LIMIT with a constant value */
2598  }
2599  else
2600  return true; /* non-constant LIMIT */
2601  }
2602 
2603  node = parse->limitOffset;
2604  if (node)
2605  {
2606  if (IsA(node, Const))
2607  {
2608  /* Treat NULL as no offset; the executor would too */
2609  if (!((Const *) node)->constisnull)
2610  {
2611  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2612 
2613  if (offset != 0)
2614  return true; /* OFFSET with a nonzero value */
2615  }
2616  }
2617  else
2618  return true; /* non-constant OFFSET */
2619  }
2620 
2621  return false; /* don't need a Limit plan node */
2622 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:179
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5333 of file planner.c.

5334 {
5335  Query *parse = root->parse;
5336  PathTarget *input_target;
5337  List *non_group_cols;
5338  List *non_group_vars;
5339  int i;
5340  ListCell *lc;
5341 
5342  /*
5343  * We must build a target containing all grouping columns, plus any other
5344  * Vars mentioned in the query's targetlist and HAVING qual.
5345  */
5346  input_target = create_empty_pathtarget();
5347  non_group_cols = NIL;
5348 
5349  i = 0;
5350  foreach(lc, final_target->exprs)
5351  {
5352  Expr *expr = (Expr *) lfirst(lc);
5353  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5354 
5355  if (sgref && root->processed_groupClause &&
5357  root->processed_groupClause) != NULL)
5358  {
5359  /*
5360  * It's a grouping column, so add it to the input target as-is.
5361  */
5362  add_column_to_pathtarget(input_target, expr, sgref);
5363  }
5364  else
5365  {
5366  /*
5367  * Non-grouping column, so just remember the expression for later
5368  * call to pull_var_clause.
5369  */
5370  non_group_cols = lappend(non_group_cols, expr);
5371  }
5372 
5373  i++;
5374  }
5375 
5376  /*
5377  * If there's a HAVING clause, we'll need the Vars it uses, too.
5378  */
5379  if (parse->havingQual)
5380  non_group_cols = lappend(non_group_cols, parse->havingQual);
5381 
5382  /*
5383  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5384  * add them to the input target if not already present. (A Var used
5385  * directly as a GROUP BY item will be present already.) Note this
5386  * includes Vars used in resjunk items, so we are covering the needs of
5387  * ORDER BY and window specifications. Vars used within Aggrefs and
5388  * WindowFuncs will be pulled out here, too.
5389  */
5390  non_group_vars = pull_var_clause((Node *) non_group_cols,
5394  add_new_columns_to_pathtarget(input_target, non_group_vars);
5395 
5396  /* clean up cruft */
5397  list_free(non_group_vars);
5398  list_free(non_group_cols);
5399 
5400  /* XXX this causes some redundant cost calculation ... */
5401  return set_pathtarget_cost_width(root, input_target);
5402 }
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6015
void list_free(List *list)
Definition: list.c:1545
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:184
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:186
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:187
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1523
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:607

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), PlannerInfo::parse, PlannerInfo::processed_groupClause, pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3791 of file planner.c.

3794 {
3795  RelOptInfo *grouped_rel;
3796 
3797  if (IS_OTHER_REL(input_rel))
3798  {
3799  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3800  input_rel->relids);
3801  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3802  }
3803  else
3804  {
3805  /*
3806  * By tradition, the relids set for the main grouping relation is
3807  * NULL. (This could be changed, but might require adjustments
3808  * elsewhere.)
3809  */
3810  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3811  }
3812 
3813  /* Set target. */
3814  grouped_rel->reltarget = target;
3815 
3816  /*
3817  * If the input relation is not parallel-safe, then the grouped relation
3818  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3819  * target list and HAVING quals are parallel-safe.
3820  */
3821  if (input_rel->consider_parallel && target_parallel_safe &&
3822  is_parallel_safe(root, (Node *) havingQual))
3823  grouped_rel->consider_parallel = true;
3824 
3825  /*
3826  * If the input rel belongs to a single FDW, so does the grouped rel.
3827  */
3828  grouped_rel->serverid = input_rel->serverid;
3829  grouped_rel->userid = input_rel->userid;
3830  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3831  grouped_rel->fdwroutine = input_rel->fdwroutine;
3832 
3833  return grouped_rel;
3834 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:823

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5421 of file planner.c.

5424 {
5425  PathTarget *partial_target;
5426  List *non_group_cols;
5427  List *non_group_exprs;
5428  int i;
5429  ListCell *lc;
5430 
5431  partial_target = create_empty_pathtarget();
5432  non_group_cols = NIL;
5433 
5434  i = 0;
5435  foreach(lc, grouping_target->exprs)
5436  {
5437  Expr *expr = (Expr *) lfirst(lc);
5438  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5439 
5440  if (sgref && root->processed_groupClause &&
5442  root->processed_groupClause) != NULL)
5443  {
5444  /*
5445  * It's a grouping column, so add it to the partial_target as-is.
5446  * (This allows the upper agg step to repeat the grouping calcs.)
5447  */
5448  add_column_to_pathtarget(partial_target, expr, sgref);
5449  }
5450  else
5451  {
5452  /*
5453  * Non-grouping column, so just remember the expression for later
5454  * call to pull_var_clause.
5455  */
5456  non_group_cols = lappend(non_group_cols, expr);
5457  }
5458 
5459  i++;
5460  }
5461 
5462  /*
5463  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5464  */
5465  if (havingQual)
5466  non_group_cols = lappend(non_group_cols, havingQual);
5467 
5468  /*
5469  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5470  * non-group cols (plus HAVING), and add them to the partial_target if not
5471  * already present. (An expression used directly as a GROUP BY item will
5472  * be present already.) Note this includes Vars used in resjunk items, so
5473  * we are covering the needs of ORDER BY and window specifications.
5474  */
5475  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5479 
5480  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5481 
5482  /*
5483  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5484  * are at the top level of the target list, so we can just scan the list
5485  * rather than recursing through the expression trees.
5486  */
5487  foreach(lc, partial_target->exprs)
5488  {
5489  Aggref *aggref = (Aggref *) lfirst(lc);
5490 
5491  if (IsA(aggref, Aggref))
5492  {
5493  Aggref *newaggref;
5494 
5495  /*
5496  * We shouldn't need to copy the substructure of the Aggref node,
5497  * but flat-copy the node itself to avoid damaging other trees.
5498  */
5499  newaggref = makeNode(Aggref);
5500  memcpy(newaggref, aggref, sizeof(Aggref));
5501 
5502  /* For now, assume serialization is required */
5504 
5505  lfirst(lc) = newaggref;
5506  }
5507  }
5508 
5509  /* clean up cruft */
5510  list_free(non_group_exprs);
5511  list_free(non_group_cols);
5512 
5513  /* XXX this causes some redundant cost calculation ... */
5514  return set_pathtarget_cost_width(root, partial_target);
5515 }
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:183
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5524

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, PlannerInfo::processed_groupClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6010 of file planner.c.

6012 {
6013  List *window_pathkeys;
6014  List *window_sortclauses;
6015 
6016  /* Throw error if can't sort */
6018  ereport(ERROR,
6019  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6020  errmsg("could not implement window PARTITION BY"),
6021  errdetail("Window partitioning columns must be of sortable datatypes.")));
6023  ereport(ERROR,
6024  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6025  errmsg("could not implement window ORDER BY"),
6026  errdetail("Window ordering columns must be of sortable datatypes.")));
6027 
6028  /* Okay, make the combined pathkeys */
6029  window_sortclauses = list_concat_copy(wc->partitionClause, wc->orderClause);
6030  window_pathkeys = make_pathkeys_for_sortclauses(root,
6031  window_sortclauses,
6032  tlist);
6033  list_free(window_sortclauses);
6034  return window_pathkeys;
6035 }
List * list_concat_copy(const List *list1, const List *list2)
Definition: list.c:597
List * partitionClause
Definition: parsenodes.h:1495
List * orderClause
Definition: parsenodes.h:1497

References ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), list_concat_copy(), list_free(), make_pathkeys_for_sortclauses(), WindowClause::orderClause, and WindowClause::partitionClause.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 6104 of file planner.c.

6107 {
6108  Query *parse = root->parse;
6109  PathTarget *input_target;
6110  int ncols;
6111  bool *col_is_srf;
6112  bool *postpone_col;
6113  bool have_srf;
6114  bool have_volatile;
6115  bool have_expensive;
6116  bool have_srf_sortcols;
6117  bool postpone_srfs;
6118  List *postponable_cols;
6119  List *postponable_vars;
6120  int i;
6121  ListCell *lc;
6122 
6123  /* Shouldn't get here unless query has ORDER BY */
6124  Assert(parse->sortClause);
6125 
6126  *have_postponed_srfs = false; /* default result */
6127 
6128  /* Inspect tlist and collect per-column information */
6129  ncols = list_length(final_target->exprs);
6130  col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6131  postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6132  have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6133 
6134  i = 0;
6135  foreach(lc, final_target->exprs)
6136  {
6137  Expr *expr = (Expr *) lfirst(lc);
6138 
6139  /*
6140  * If the column has a sortgroupref, assume it has to be evaluated
6141  * before sorting. Generally such columns would be ORDER BY, GROUP
6142  * BY, etc targets. One exception is columns that were removed from
6143  * GROUP BY by remove_useless_groupby_columns() ... but those would
6144  * only be Vars anyway. There don't seem to be any cases where it
6145  * would be worth the trouble to double-check.
6146  */
6147  if (get_pathtarget_sortgroupref(final_target, i) == 0)
6148  {
6149  /*
6150  * Check for SRF or volatile functions. Check the SRF case first
6151  * because we must know whether we have any postponed SRFs.
6152  */
6153  if (parse->hasTargetSRFs &&
6154  expression_returns_set((Node *) expr))
6155  {
6156  /* We'll decide below whether these are postponable */
6157  col_is_srf[i] = true;
6158  have_srf = true;
6159  }
6160  else if (contain_volatile_functions((Node *) expr))
6161  {
6162  /* Unconditionally postpone */
6163  postpone_col[i] = true;
6164  have_volatile = true;
6165  }
6166  else
6167  {
6168  /*
6169  * Else check the cost. XXX it's annoying to have to do this
6170  * when set_pathtarget_cost_width() just did it. Refactor to
6171  * allow sharing the work?
6172  */
6173  QualCost cost;
6174 
6175  cost_qual_eval_node(&cost, (Node *) expr, root);
6176 
6177  /*
6178  * We arbitrarily define "expensive" as "more than 10X
6179  * cpu_operator_cost". Note this will take in any PL function
6180  * with default cost.
6181  */
6182  if (cost.per_tuple > 10 * cpu_operator_cost)
6183  {
6184  postpone_col[i] = true;
6185  have_expensive = true;
6186  }
6187  }
6188  }
6189  else
6190  {
6191  /* For sortgroupref cols, just check if any contain SRFs */
6192  if (!have_srf_sortcols &&
6193  parse->hasTargetSRFs &&
6194  expression_returns_set((Node *) expr))
6195  have_srf_sortcols = true;
6196  }
6197 
6198  i++;
6199  }
6200 
6201  /*
6202  * We can postpone SRFs if we have some but none are in sortgroupref cols.
6203  */
6204  postpone_srfs = (have_srf && !have_srf_sortcols);
6205 
6206  /*
6207  * If we don't need a post-sort projection, just return final_target.
6208  */
6209  if (!(postpone_srfs || have_volatile ||
6210  (have_expensive &&
6211  (parse->limitCount || root->tuple_fraction > 0))))
6212  return final_target;
6213 
6214  /*
6215  * Report whether the post-sort projection will contain set-returning
6216  * functio