PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/genam.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "foreign/fdwapi.h"
#include "jit/jit.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/appendinfo.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/paramassign.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "parser/analyze.h"
#include "parser/parse_agg.h"
#include "parser/parse_clause.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  grouping_sets_data
 
struct  WindowClauseSortData
 
struct  standard_qp_extra
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void grouping_planner (PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listgroupclause_apply_groupingset (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
 
static bool is_degenerate_grouping (PlannerInfo *root)
 
static void create_degenerate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
 
static RelOptInfomake_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
 
static void create_ordinary_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
 
static void create_partial_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
 
static RelOptInfocreate_final_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static void optimize_window_clauses (PlannerInfo *root, WindowFuncLists *wflists)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
static void add_paths_to_grouping_rel (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
 
static RelOptInfocreate_partial_grouping_paths (PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
 
static void gather_grouping_paths (PlannerInfo *root, RelOptInfo *rel)
 
static bool can_partial_agg (PlannerInfo *root)
 
static void apply_scanjoin_target_to_paths (PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
 
static void create_partitionwise_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
 
static bool group_by_has_partkey (RelOptInfo *input_rel, List *targetList, List *groupClause)
 
static int common_prefix_cmp (const void *a, const void *b)
 
static Listgenerate_setop_child_grouplist (SetOperationStmt *op, List *targetlist)
 
PlannedStmtplanner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
bool limit_needed (Query *parse)
 
static bool has_volatile_pathkey (List *keys)
 
static void adjust_group_pathkeys_for_groupagg (PlannerInfo *root)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
Exprexpression_planner_with_deps (Expr *expr, List **relationOids, List **invalItems)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
int plan_create_index_workers (Oid tableOid, Oid indexOid)
 
static Pathmake_ordered_path (PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int debug_parallel_query = DEBUG_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 85 of file planner.c.

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 88 of file planner.c.

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 84 of file planner.c.

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 86 of file planner.c.

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 78 of file planner.c.

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 80 of file planner.c.

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 81 of file planner.c.

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 89 of file planner.c.

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 90 of file planner.c.

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 87 of file planner.c.

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 79 of file planner.c.

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 82 of file planner.c.

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 83 of file planner.c.

Function Documentation

◆ add_paths_to_grouping_rel()

static void add_paths_to_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
double  dNumGroups,
GroupPathExtraData extra 
)
static

Definition at line 6842 of file planner.c.

6848 {
6849  Query *parse = root->parse;
6850  Path *cheapest_path = input_rel->cheapest_total_path;
6851  ListCell *lc;
6852  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6853  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6854  List *havingQual = (List *) extra->havingQual;
6855  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6856 
6857  if (can_sort)
6858  {
6859  /*
6860  * Use any available suitably-sorted path as input, and also consider
6861  * sorting the cheapest-total path and incremental sort on any paths
6862  * with presorted keys.
6863  */
6864  foreach(lc, input_rel->pathlist)
6865  {
6866  ListCell *lc2;
6867  Path *path = (Path *) lfirst(lc);
6868  Path *path_save = path;
6869  List *pathkey_orderings = NIL;
6870 
6871  /* generate alternative group orderings that might be useful */
6872  pathkey_orderings = get_useful_group_keys_orderings(root, path);
6873 
6874  Assert(list_length(pathkey_orderings) > 0);
6875 
6876  foreach(lc2, pathkey_orderings)
6877  {
6878  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6879 
6880  /* restore the path (we replace it in the loop) */
6881  path = path_save;
6882 
6883  path = make_ordered_path(root,
6884  grouped_rel,
6885  path,
6886  cheapest_path,
6887  info->pathkeys);
6888  if (path == NULL)
6889  continue;
6890 
6891  /* Now decide what to stick atop it */
6892  if (parse->groupingSets)
6893  {
6894  consider_groupingsets_paths(root, grouped_rel,
6895  path, true, can_hash,
6896  gd, agg_costs, dNumGroups);
6897  }
6898  else if (parse->hasAggs)
6899  {
6900  /*
6901  * We have aggregation, possibly with plain GROUP BY. Make
6902  * an AggPath.
6903  */
6904  add_path(grouped_rel, (Path *)
6906  grouped_rel,
6907  path,
6908  grouped_rel->reltarget,
6909  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6911  info->clauses,
6912  havingQual,
6913  agg_costs,
6914  dNumGroups));
6915  }
6916  else if (parse->groupClause)
6917  {
6918  /*
6919  * We have GROUP BY without aggregation or grouping sets.
6920  * Make a GroupPath.
6921  */
6922  add_path(grouped_rel, (Path *)
6924  grouped_rel,
6925  path,
6926  info->clauses,
6927  havingQual,
6928  dNumGroups));
6929  }
6930  else
6931  {
6932  /* Other cases should have been handled above */
6933  Assert(false);
6934  }
6935  }
6936  }
6937 
6938  /*
6939  * Instead of operating directly on the input relation, we can
6940  * consider finalizing a partially aggregated path.
6941  */
6942  if (partially_grouped_rel != NULL)
6943  {
6944  foreach(lc, partially_grouped_rel->pathlist)
6945  {
6946  ListCell *lc2;
6947  Path *path = (Path *) lfirst(lc);
6948  Path *path_save = path;
6949  List *pathkey_orderings = NIL;
6950 
6951  /* generate alternative group orderings that might be useful */
6952  pathkey_orderings = get_useful_group_keys_orderings(root, path);
6953 
6954  Assert(list_length(pathkey_orderings) > 0);
6955 
6956  /* process all potentially interesting grouping reorderings */
6957  foreach(lc2, pathkey_orderings)
6958  {
6959  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
6960 
6961  /* restore the path (we replace it in the loop) */
6962  path = path_save;
6963 
6964  path = make_ordered_path(root,
6965  grouped_rel,
6966  path,
6967  partially_grouped_rel->cheapest_total_path,
6968  info->pathkeys);
6969 
6970  if (path == NULL)
6971  continue;
6972 
6973  if (parse->hasAggs)
6974  add_path(grouped_rel, (Path *)
6976  grouped_rel,
6977  path,
6978  grouped_rel->reltarget,
6979  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6981  info->clauses,
6982  havingQual,
6983  agg_final_costs,
6984  dNumGroups));
6985  else
6986  add_path(grouped_rel, (Path *)
6988  grouped_rel,
6989  path,
6990  info->clauses,
6991  havingQual,
6992  dNumGroups));
6993 
6994  }
6995  }
6996  }
6997  }
6998 
6999  if (can_hash)
7000  {
7001  if (parse->groupingSets)
7002  {
7003  /*
7004  * Try for a hash-only groupingsets path over unsorted input.
7005  */
7006  consider_groupingsets_paths(root, grouped_rel,
7007  cheapest_path, false, true,
7008  gd, agg_costs, dNumGroups);
7009  }
7010  else
7011  {
7012  /*
7013  * Generate a HashAgg Path. We just need an Agg over the
7014  * cheapest-total input path, since input order won't matter.
7015  */
7016  add_path(grouped_rel, (Path *)
7017  create_agg_path(root, grouped_rel,
7018  cheapest_path,
7019  grouped_rel->reltarget,
7020  AGG_HASHED,
7022  root->processed_groupClause,
7023  havingQual,
7024  agg_costs,
7025  dNumGroups));
7026  }
7027 
7028  /*
7029  * Generate a Finalize HashAgg Path atop of the cheapest partially
7030  * grouped path, assuming there is one
7031  */
7032  if (partially_grouped_rel && partially_grouped_rel->pathlist)
7033  {
7034  Path *path = partially_grouped_rel->cheapest_total_path;
7035 
7036  add_path(grouped_rel, (Path *)
7038  grouped_rel,
7039  path,
7040  grouped_rel->reltarget,
7041  AGG_HASHED,
7043  root->processed_groupClause,
7044  havingQual,
7045  agg_final_costs,
7046  dNumGroups));
7047  }
7048  }
7049 
7050  /*
7051  * When partitionwise aggregate is used, we might have fully aggregated
7052  * paths in the partial pathlist, because add_paths_to_append_rel() will
7053  * consider a path for grouped_rel consisting of a Parallel Append of
7054  * non-partial paths from each child.
7055  */
7056  if (grouped_rel->partial_pathlist != NIL)
7057  gather_grouping_paths(root, grouped_rel);
7058 }
#define Assert(condition)
Definition: c.h:858
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
@ AGG_SORTED
Definition: nodes.h:354
@ AGG_HASHED
Definition: nodes.h:355
@ AGG_PLAIN
Definition: nodes.h:353
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:380
@ AGGSPLIT_SIMPLE
Definition: nodes.h:376
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:485
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3044
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3155
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3242
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3241
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7376
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4048
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys)
Definition: planner.c:6791
tree ctl root
Definition: radixtree.h:1884
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:715
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3282
Definition: pg_list.h:54
List * pathkeys
Definition: pathnodes.h:1476
List * clauses
Definition: pathnodes.h:1477
struct PathTarget * reltarget
Definition: pathnodes.h:883
List * pathlist
Definition: pathnodes.h:888
struct Path * cheapest_total_path
Definition: pathnodes.h:892
List * partial_pathlist
Definition: pathnodes.h:890

References add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, PathKeyInfo::clauses, consider_groupingsets_paths(), create_agg_path(), create_group_path(), GroupPathExtraData::flags, gather_grouping_paths(), get_useful_group_keys_orderings(), GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, if(), lfirst, list_length(), make_ordered_path(), NIL, parse(), RelOptInfo::partial_pathlist, PathKeyInfo::pathkeys, RelOptInfo::pathlist, RelOptInfo::reltarget, and root.

Referenced by create_ordinary_grouping_paths().

◆ adjust_group_pathkeys_for_groupagg()

static void adjust_group_pathkeys_for_groupagg ( PlannerInfo root)
static

Definition at line 3173 of file planner.c.

3174 {
3175  List *grouppathkeys = root->group_pathkeys;
3176  List *bestpathkeys;
3177  Bitmapset *bestaggs;
3178  Bitmapset *unprocessed_aggs;
3179  ListCell *lc;
3180  int i;
3181 
3182  /* Shouldn't be here if there are grouping sets */
3183  Assert(root->parse->groupingSets == NIL);
3184  /* Shouldn't be here unless there are some ordered aggregates */
3185  Assert(root->numOrderedAggs > 0);
3186 
3187  /* Do nothing if disabled */
3189  return;
3190 
3191  /*
3192  * Make a first pass over all AggInfos to collect a Bitmapset containing
3193  * the indexes of all AggInfos to be processed below.
3194  */
3195  unprocessed_aggs = NULL;
3196  foreach(lc, root->agginfos)
3197  {
3198  AggInfo *agginfo = lfirst_node(AggInfo, lc);
3199  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3200 
3201  if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3202  continue;
3203 
3204  /* only add aggregates with a DISTINCT or ORDER BY */
3205  if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3206  unprocessed_aggs = bms_add_member(unprocessed_aggs,
3207  foreach_current_index(lc));
3208  }
3209 
3210  /*
3211  * Now process all the unprocessed_aggs to find the best set of pathkeys
3212  * for the given set of aggregates.
3213  *
3214  * On the first outer loop here 'bestaggs' will be empty. We'll populate
3215  * this during the first loop using the pathkeys for the very first
3216  * AggInfo then taking any stronger pathkeys from any other AggInfos with
3217  * a more strict set of compatible pathkeys. Once the outer loop is
3218  * complete, we mark off all the aggregates with compatible pathkeys then
3219  * remove those from the unprocessed_aggs and repeat the process to try to
3220  * find another set of pathkeys that are suitable for a larger number of
3221  * aggregates. The outer loop will stop when there are not enough
3222  * unprocessed aggregates for it to be possible to find a set of pathkeys
3223  * to suit a larger number of aggregates.
3224  */
3225  bestpathkeys = NIL;
3226  bestaggs = NULL;
3227  while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3228  {
3229  Bitmapset *aggindexes = NULL;
3230  List *currpathkeys = NIL;
3231 
3232  i = -1;
3233  while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3234  {
3235  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3236  Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3237  List *sortlist;
3238  List *pathkeys;
3239 
3240  if (aggref->aggdistinct != NIL)
3241  sortlist = aggref->aggdistinct;
3242  else
3243  sortlist = aggref->aggorder;
3244 
3245  pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3246  aggref->args);
3247 
3248  /*
3249  * Ignore Aggrefs which have volatile functions in their ORDER BY
3250  * or DISTINCT clause.
3251  */
3252  if (has_volatile_pathkey(pathkeys))
3253  {
3254  unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3255  continue;
3256  }
3257 
3258  /*
3259  * When not set yet, take the pathkeys from the first unprocessed
3260  * aggregate.
3261  */
3262  if (currpathkeys == NIL)
3263  {
3264  currpathkeys = pathkeys;
3265 
3266  /* include the GROUP BY pathkeys, if they exist */
3267  if (grouppathkeys != NIL)
3268  currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3269  currpathkeys);
3270 
3271  /* record that we found pathkeys for this aggregate */
3272  aggindexes = bms_add_member(aggindexes, i);
3273  }
3274  else
3275  {
3276  /* now look for a stronger set of matching pathkeys */
3277 
3278  /* include the GROUP BY pathkeys, if they exist */
3279  if (grouppathkeys != NIL)
3280  pathkeys = append_pathkeys(list_copy(grouppathkeys),
3281  pathkeys);
3282 
3283  /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3284  switch (compare_pathkeys(currpathkeys, pathkeys))
3285  {
3286  case PATHKEYS_BETTER2:
3287  /* 'pathkeys' are stronger, use these ones instead */
3288  currpathkeys = pathkeys;
3289  /* FALLTHROUGH */
3290 
3291  case PATHKEYS_BETTER1:
3292  /* 'pathkeys' are less strict */
3293  /* FALLTHROUGH */
3294 
3295  case PATHKEYS_EQUAL:
3296  /* mark this aggregate as covered by 'currpathkeys' */
3297  aggindexes = bms_add_member(aggindexes, i);
3298  break;
3299 
3300  case PATHKEYS_DIFFERENT:
3301  break;
3302  }
3303  }
3304  }
3305 
3306  /* remove the aggregates that we've just processed */
3307  unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3308 
3309  /*
3310  * If this pass included more aggregates than the previous best then
3311  * use these ones as the best set.
3312  */
3313  if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3314  {
3315  bestaggs = aggindexes;
3316  bestpathkeys = currpathkeys;
3317  }
3318  }
3319 
3320  /*
3321  * If we found any ordered aggregates, update root->group_pathkeys to add
3322  * the best set of aggregate pathkeys. Note that bestpathkeys includes
3323  * the original GROUP BY pathkeys already.
3324  */
3325  if (bestpathkeys != NIL)
3326  root->group_pathkeys = bestpathkeys;
3327 
3328  /*
3329  * Now that we've found the best set of aggregates we can set the
3330  * presorted flag to indicate to the executor that it needn't bother
3331  * performing a sort for these Aggrefs. We're able to do this now as
3332  * there's no chance of a Hash Aggregate plan as create_grouping_paths
3333  * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3334  * of ordered aggregates.
3335  */
3336  i = -1;
3337  while ((i = bms_next_member(bestaggs, i)) >= 0)
3338  {
3339  AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3340 
3341  foreach(lc, agginfo->aggrefs)
3342  {
3343  Aggref *aggref = lfirst_node(Aggref, lc);
3344 
3345  aggref->aggpresorted = true;
3346  }
3347  }
3348 }
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool enable_presorted_aggregate
Definition: costsize.c:153
int i
Definition: isn.c:73
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:106
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1347
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:302
@ PATHKEYS_BETTER2
Definition: paths.h:207
@ PATHKEYS_BETTER1
Definition: paths.h:206
@ PATHKEYS_DIFFERENT
Definition: paths.h:208
@ PATHKEYS_EQUAL
Definition: paths.h:205
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define linitial_node(type, l)
Definition: pg_list.h:181
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3128
List * aggrefs
Definition: pathnodes.h:3363
List * aggdistinct
Definition: primnodes.h:474
List * args
Definition: primnodes.h:468
List * aggorder
Definition: primnodes.h:471

References Aggref::aggdistinct, Aggref::aggorder, AggInfo::aggrefs, append_pathkeys(), Aggref::args, Assert, bms_add_member(), bms_del_member(), bms_del_members(), bms_next_member(), bms_num_members(), compare_pathkeys(), enable_presorted_aggregate, foreach_current_index, has_volatile_pathkey(), i, lfirst_node, linitial_node, list_copy(), list_nth_node, make_pathkeys_for_sortclauses(), NIL, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, PATHKEYS_EQUAL, and root.

Referenced by standard_qp_callback().

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 6341 of file planner.c.

6343 {
6344  ListCell *lc;
6345 
6346  Assert(list_length(targets) == list_length(targets_contain_srfs));
6347  Assert(!linitial_int(targets_contain_srfs));
6348 
6349  /* If no SRFs appear at this plan level, nothing to do */
6350  if (list_length(targets) == 1)
6351  return;
6352 
6353  /*
6354  * Stack SRF-evaluation nodes atop each path for the rel.
6355  *
6356  * In principle we should re-run set_cheapest() here to identify the
6357  * cheapest path, but it seems unlikely that adding the same tlist eval
6358  * costs to all the paths would change that, so we don't bother. Instead,
6359  * just assume that the cheapest-startup and cheapest-total paths remain
6360  * so. (There should be no parameterized paths anymore, so we needn't
6361  * worry about updating cheapest_parameterized_paths.)
6362  */
6363  foreach(lc, rel->pathlist)
6364  {
6365  Path *subpath = (Path *) lfirst(lc);
6366  Path *newpath = subpath;
6367  ListCell *lc1,
6368  *lc2;
6369 
6370  Assert(subpath->param_info == NULL);
6371  forboth(lc1, targets, lc2, targets_contain_srfs)
6372  {
6373  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6374  bool contains_srfs = (bool) lfirst_int(lc2);
6375 
6376  /* If this level doesn't contain SRFs, do regular projection */
6377  if (contains_srfs)
6378  newpath = (Path *) create_set_projection_path(root,
6379  rel,
6380  newpath,
6381  thistarget);
6382  else
6383  newpath = (Path *) apply_projection_to_path(root,
6384  rel,
6385  newpath,
6386  thistarget);
6387  }
6388  lfirst(lc) = newpath;
6389  if (subpath == rel->cheapest_startup_path)
6390  rel->cheapest_startup_path = newpath;
6391  if (subpath == rel->cheapest_total_path)
6392  rel->cheapest_total_path = newpath;
6393  }
6394 
6395  /* Likewise for partial paths, if any */
6396  foreach(lc, rel->partial_pathlist)
6397  {
6398  Path *subpath = (Path *) lfirst(lc);
6399  Path *newpath = subpath;
6400  ListCell *lc1,
6401  *lc2;
6402 
6403  Assert(subpath->param_info == NULL);
6404  forboth(lc1, targets, lc2, targets_contain_srfs)
6405  {
6406  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6407  bool contains_srfs = (bool) lfirst_int(lc2);
6408 
6409  /* If this level doesn't contain SRFs, do regular projection */
6410  if (contains_srfs)
6411  newpath = (Path *) create_set_projection_path(root,
6412  rel,
6413  newpath,
6414  thistarget);
6415  else
6416  {
6417  /* avoid apply_projection_to_path, in case of multiple refs */
6418  newpath = (Path *) create_projection_path(root,
6419  rel,
6420  newpath,
6421  thistarget);
6422  }
6423  }
6424  lfirst(lc) = newpath;
6425  }
6426 }
unsigned char bool
Definition: c.h:456
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:310
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2685
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2882
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2793
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
struct Path * cheapest_startup_path
Definition: pathnodes.h:891

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, root, and subpath().

Referenced by apply_scanjoin_target_to_paths(), and grouping_planner().

◆ apply_scanjoin_target_to_paths()

static void apply_scanjoin_target_to_paths ( PlannerInfo root,
RelOptInfo rel,
List scanjoin_targets,
List scanjoin_targets_contain_srfs,
bool  scanjoin_target_parallel_safe,
bool  tlist_same_exprs 
)
static

Definition at line 7503 of file planner.c.

7509 {
7510  bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7511  PathTarget *scanjoin_target;
7512  ListCell *lc;
7513 
7514  /* This recurses, so be paranoid. */
7516 
7517  /*
7518  * If the rel is partitioned, we want to drop its existing paths and
7519  * generate new ones. This function would still be correct if we kept the
7520  * existing paths: we'd modify them to generate the correct target above
7521  * the partitioning Append, and then they'd compete on cost with paths
7522  * generating the target below the Append. However, in our current cost
7523  * model the latter way is always the same or cheaper cost, so modifying
7524  * the existing paths would just be useless work. Moreover, when the cost
7525  * is the same, varying roundoff errors might sometimes allow an existing
7526  * path to be picked, resulting in undesirable cross-platform plan
7527  * variations. So we drop old paths and thereby force the work to be done
7528  * below the Append, except in the case of a non-parallel-safe target.
7529  *
7530  * Some care is needed, because we have to allow
7531  * generate_useful_gather_paths to see the old partial paths in the next
7532  * stanza. Hence, zap the main pathlist here, then allow
7533  * generate_useful_gather_paths to add path(s) to the main list, and
7534  * finally zap the partial pathlist.
7535  */
7536  if (rel_is_partitioned)
7537  rel->pathlist = NIL;
7538 
7539  /*
7540  * If the scan/join target is not parallel-safe, partial paths cannot
7541  * generate it.
7542  */
7543  if (!scanjoin_target_parallel_safe)
7544  {
7545  /*
7546  * Since we can't generate the final scan/join target in parallel
7547  * workers, this is our last opportunity to use any partial paths that
7548  * exist; so build Gather path(s) that use them and emit whatever the
7549  * current reltarget is. We don't do this in the case where the
7550  * target is parallel-safe, since we will be able to generate superior
7551  * paths by doing it after the final scan/join target has been
7552  * applied.
7553  */
7554  generate_useful_gather_paths(root, rel, false);
7555 
7556  /* Can't use parallel query above this level. */
7557  rel->partial_pathlist = NIL;
7558  rel->consider_parallel = false;
7559  }
7560 
7561  /* Finish dropping old paths for a partitioned rel, per comment above */
7562  if (rel_is_partitioned)
7563  rel->partial_pathlist = NIL;
7564 
7565  /* Extract SRF-free scan/join target. */
7566  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7567 
7568  /*
7569  * Apply the SRF-free scan/join target to each existing path.
7570  *
7571  * If the tlist exprs are the same, we can just inject the sortgroupref
7572  * information into the existing pathtargets. Otherwise, replace each
7573  * path with a projection path that generates the SRF-free scan/join
7574  * target. This can't change the ordering of paths within rel->pathlist,
7575  * so we just modify the list in place.
7576  */
7577  foreach(lc, rel->pathlist)
7578  {
7579  Path *subpath = (Path *) lfirst(lc);
7580 
7581  /* Shouldn't have any parameterized paths anymore */
7582  Assert(subpath->param_info == NULL);
7583 
7584  if (tlist_same_exprs)
7585  subpath->pathtarget->sortgrouprefs =
7586  scanjoin_target->sortgrouprefs;
7587  else
7588  {
7589  Path *newpath;
7590 
7591  newpath = (Path *) create_projection_path(root, rel, subpath,
7592  scanjoin_target);
7593  lfirst(lc) = newpath;
7594  }
7595  }
7596 
7597  /* Likewise adjust the targets for any partial paths. */
7598  foreach(lc, rel->partial_pathlist)
7599  {
7600  Path *subpath = (Path *) lfirst(lc);
7601 
7602  /* Shouldn't have any parameterized paths anymore */
7603  Assert(subpath->param_info == NULL);
7604 
7605  if (tlist_same_exprs)
7606  subpath->pathtarget->sortgrouprefs =
7607  scanjoin_target->sortgrouprefs;
7608  else
7609  {
7610  Path *newpath;
7611 
7612  newpath = (Path *) create_projection_path(root, rel, subpath,
7613  scanjoin_target);
7614  lfirst(lc) = newpath;
7615  }
7616  }
7617 
7618  /*
7619  * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7620  * atop each existing path. (Note that this function doesn't look at the
7621  * cheapest-path fields, which is a good thing because they're bogus right
7622  * now.)
7623  */
7624  if (root->parse->hasTargetSRFs)
7626  scanjoin_targets,
7627  scanjoin_targets_contain_srfs);
7628 
7629  /*
7630  * Update the rel's target to be the final (with SRFs) scan/join target.
7631  * This now matches the actual output of all the paths, and we might get
7632  * confused in createplan.c if they don't agree. We must do this now so
7633  * that any append paths made in the next part will use the correct
7634  * pathtarget (cf. create_append_path).
7635  *
7636  * Note that this is also necessary if GetForeignUpperPaths() gets called
7637  * on the final scan/join relation or on any of its children, since the
7638  * FDW might look at the rel's target to create ForeignPaths.
7639  */
7640  rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7641 
7642  /*
7643  * If the relation is partitioned, recursively apply the scan/join target
7644  * to all partitions, and generate brand-new Append paths in which the
7645  * scan/join target is computed below the Append rather than above it.
7646  * Since Append is not projection-capable, that might save a separate
7647  * Result node, and it also is important for partitionwise aggregate.
7648  */
7649  if (rel_is_partitioned)
7650  {
7651  List *live_children = NIL;
7652  int i;
7653 
7654  /* Adjust each partition. */
7655  i = -1;
7656  while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7657  {
7658  RelOptInfo *child_rel = rel->part_rels[i];
7659  AppendRelInfo **appinfos;
7660  int nappinfos;
7661  List *child_scanjoin_targets = NIL;
7662 
7663  Assert(child_rel != NULL);
7664 
7665  /* Dummy children can be ignored. */
7666  if (IS_DUMMY_REL(child_rel))
7667  continue;
7668 
7669  /* Translate scan/join targets for this child. */
7670  appinfos = find_appinfos_by_relids(root, child_rel->relids,
7671  &nappinfos);
7672  foreach(lc, scanjoin_targets)
7673  {
7674  PathTarget *target = lfirst_node(PathTarget, lc);
7675 
7676  target = copy_pathtarget(target);
7677  target->exprs = (List *)
7679  (Node *) target->exprs,
7680  nappinfos, appinfos);
7681  child_scanjoin_targets = lappend(child_scanjoin_targets,
7682  target);
7683  }
7684  pfree(appinfos);
7685 
7686  /* Recursion does the real work. */
7688  child_scanjoin_targets,
7689  scanjoin_targets_contain_srfs,
7690  scanjoin_target_parallel_safe,
7692 
7693  /* Save non-dummy children for Append paths. */
7694  if (!IS_DUMMY_REL(child_rel))
7695  live_children = lappend(live_children, child_rel);
7696  }
7697 
7698  /* Build new paths for this relation by appending child paths. */
7699  add_paths_to_append_rel(root, rel, live_children);
7700  }
7701 
7702  /*
7703  * Consider generating Gather or Gather Merge paths. We must only do this
7704  * if the relation is parallel safe, and we don't do it for child rels to
7705  * avoid creating multiple Gather nodes within the same plan. We must do
7706  * this after all paths have been generated and before set_cheapest, since
7707  * one of the generated paths may turn out to be the cheapest one.
7708  */
7709  if (rel->consider_parallel && !IS_OTHER_REL(rel))
7710  generate_useful_gather_paths(root, rel, false);
7711 
7712  /*
7713  * Reassess which paths are the cheapest, now that we've potentially added
7714  * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7715  * this relation.
7716  */
7717  set_cheapest(rel);
7718 }
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3202
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1302
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:733
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:196
List * lappend(List *list, void *datum)
Definition: list.c:339
void pfree(void *pointer)
Definition: mcxt.c:1520
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1935
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1052
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:844
#define llast_node(type, l)
Definition: pg_list.h:202
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7503
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6341
void check_stack_depth(void)
Definition: postgres.c:3531
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1522
Relids relids
Definition: pathnodes.h:861
bool consider_parallel
Definition: pathnodes.h:877
Bitmapset * live_parts
Definition: pathnodes.h:1029
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References add_paths_to_append_rel(), adjust_appendrel_attrs(), adjust_paths_for_srfs(), Assert, bms_next_member(), check_stack_depth(), RelOptInfo::consider_parallel, copy_pathtarget(), create_projection_path(), PathTarget::exprs, find_appinfos_by_relids(), generate_useful_gather_paths(), i, IS_DUMMY_REL, IS_OTHER_REL, IS_PARTITIONED_REL, lappend(), lfirst, lfirst_node, linitial_node, RelOptInfo::live_parts, llast_node, NIL, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), subpath(), and tlist_same_exprs().

Referenced by grouping_planner().

◆ can_partial_agg()

static bool can_partial_agg ( PlannerInfo root)
static

Definition at line 7461 of file planner.c.

7462 {
7463  Query *parse = root->parse;
7464 
7465  if (!parse->hasAggs && parse->groupClause == NIL)
7466  {
7467  /*
7468  * We don't know how to do parallel aggregation unless we have either
7469  * some aggregates or a grouping clause.
7470  */
7471  return false;
7472  }
7473  else if (parse->groupingSets)
7474  {
7475  /* We don't know how to do grouping sets in parallel. */
7476  return false;
7477  }
7478  else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7479  {
7480  /* Insufficient support for partial mode. */
7481  return false;
7482  }
7483 
7484  /* Everything looks good. */
7485  return true;
7486 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ common_prefix_cmp()

static int common_prefix_cmp ( const void *  a,
const void *  b 
)
static

Definition at line 5812 of file planner.c.

5813 {
5814  const WindowClauseSortData *wcsa = a;
5815  const WindowClauseSortData *wcsb = b;
5816  ListCell *item_a;
5817  ListCell *item_b;
5818 
5819  forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5820  {
5823 
5824  if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5825  return -1;
5826  else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5827  return 1;
5828  else if (sca->sortop > scb->sortop)
5829  return -1;
5830  else if (sca->sortop < scb->sortop)
5831  return 1;
5832  else if (sca->nulls_first && !scb->nulls_first)
5833  return -1;
5834  else if (!sca->nulls_first && scb->nulls_first)
5835  return 1;
5836  /* no need to compare eqop, since it is fully determined by sortop */
5837  }
5838 
5839  if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5840  return -1;
5841  else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5842  return 1;
5843 
5844  return 0;
5845 }
int b
Definition: isn.c:70
int a
Definition: isn.c:69
Index tleSortGroupRef
Definition: parsenodes.h:1442

References a, b, forboth, lfirst_node, list_length(), SortGroupClause::nulls_first, SortGroupClause::sortop, SortGroupClause::tleSortGroupRef, and WindowClauseSortData::uniqueOrder.

Referenced by select_active_windows().

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4048 of file planner.c.

4056 {
4057  Query *parse = root->parse;
4058  Size hash_mem_limit = get_hash_memory_limit();
4059 
4060  /*
4061  * If we're not being offered sorted input, then only consider plans that
4062  * can be done entirely by hashing.
4063  *
4064  * We can hash everything if it looks like it'll fit in hash_mem. But if
4065  * the input is actually sorted despite not being advertised as such, we
4066  * prefer to make use of that in order to use less memory.
4067  *
4068  * If none of the grouping sets are sortable, then ignore the hash_mem
4069  * limit and generate a path anyway, since otherwise we'll just fail.
4070  */
4071  if (!is_sorted)
4072  {
4073  List *new_rollups = NIL;
4074  RollupData *unhashed_rollup = NULL;
4075  List *sets_data;
4076  List *empty_sets_data = NIL;
4077  List *empty_sets = NIL;
4078  ListCell *lc;
4079  ListCell *l_start = list_head(gd->rollups);
4080  AggStrategy strat = AGG_HASHED;
4081  double hashsize;
4082  double exclude_groups = 0.0;
4083 
4084  Assert(can_hash);
4085 
4086  /*
4087  * If the input is coincidentally sorted usefully (which can happen
4088  * even if is_sorted is false, since that only means that our caller
4089  * has set up the sorting for us), then save some hashtable space by
4090  * making use of that. But we need to watch out for degenerate cases:
4091  *
4092  * 1) If there are any empty grouping sets, then group_pathkeys might
4093  * be NIL if all non-empty grouping sets are unsortable. In this case,
4094  * there will be a rollup containing only empty groups, and the
4095  * pathkeys_contained_in test is vacuously true; this is ok.
4096  *
4097  * XXX: the above relies on the fact that group_pathkeys is generated
4098  * from the first rollup. If we add the ability to consider multiple
4099  * sort orders for grouping input, this assumption might fail.
4100  *
4101  * 2) If there are no empty sets and only unsortable sets, then the
4102  * rollups list will be empty (and thus l_start == NULL), and
4103  * group_pathkeys will be NIL; we must ensure that the vacuously-true
4104  * pathkeys_contained_in test doesn't cause us to crash.
4105  */
4106  if (l_start != NULL &&
4107  pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4108  {
4109  unhashed_rollup = lfirst_node(RollupData, l_start);
4110  exclude_groups = unhashed_rollup->numGroups;
4111  l_start = lnext(gd->rollups, l_start);
4112  }
4113 
4114  hashsize = estimate_hashagg_tablesize(root,
4115  path,
4116  agg_costs,
4117  dNumGroups - exclude_groups);
4118 
4119  /*
4120  * gd->rollups is empty if we have only unsortable columns to work
4121  * with. Override hash_mem in that case; otherwise, we'll rely on the
4122  * sorted-input case to generate usable mixed paths.
4123  */
4124  if (hashsize > hash_mem_limit && gd->rollups)
4125  return; /* nope, won't fit */
4126 
4127  /*
4128  * We need to burst the existing rollups list into individual grouping
4129  * sets and recompute a groupClause for each set.
4130  */
4131  sets_data = list_copy(gd->unsortable_sets);
4132 
4133  for_each_cell(lc, gd->rollups, l_start)
4134  {
4135  RollupData *rollup = lfirst_node(RollupData, lc);
4136 
4137  /*
4138  * If we find an unhashable rollup that's not been skipped by the
4139  * "actually sorted" check above, we can't cope; we'd need sorted
4140  * input (with a different sort order) but we can't get that here.
4141  * So bail out; we'll get a valid path from the is_sorted case
4142  * instead.
4143  *
4144  * The mere presence of empty grouping sets doesn't make a rollup
4145  * unhashable (see preprocess_grouping_sets), we handle those
4146  * specially below.
4147  */
4148  if (!rollup->hashable)
4149  return;
4150 
4151  sets_data = list_concat(sets_data, rollup->gsets_data);
4152  }
4153  foreach(lc, sets_data)
4154  {
4156  List *gset = gs->set;
4157  RollupData *rollup;
4158 
4159  if (gset == NIL)
4160  {
4161  /* Empty grouping sets can't be hashed. */
4162  empty_sets_data = lappend(empty_sets_data, gs);
4163  empty_sets = lappend(empty_sets, NIL);
4164  }
4165  else
4166  {
4167  rollup = makeNode(RollupData);
4168 
4170  rollup->gsets_data = list_make1(gs);
4171  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4172  rollup->gsets_data,
4173  gd->tleref_to_colnum_map);
4174  rollup->numGroups = gs->numGroups;
4175  rollup->hashable = true;
4176  rollup->is_hashed = true;
4177  new_rollups = lappend(new_rollups, rollup);
4178  }
4179  }
4180 
4181  /*
4182  * If we didn't find anything nonempty to hash, then bail. We'll
4183  * generate a path from the is_sorted case.
4184  */
4185  if (new_rollups == NIL)
4186  return;
4187 
4188  /*
4189  * If there were empty grouping sets they should have been in the
4190  * first rollup.
4191  */
4192  Assert(!unhashed_rollup || !empty_sets);
4193 
4194  if (unhashed_rollup)
4195  {
4196  new_rollups = lappend(new_rollups, unhashed_rollup);
4197  strat = AGG_MIXED;
4198  }
4199  else if (empty_sets)
4200  {
4201  RollupData *rollup = makeNode(RollupData);
4202 
4203  rollup->groupClause = NIL;
4204  rollup->gsets_data = empty_sets_data;
4205  rollup->gsets = empty_sets;
4206  rollup->numGroups = list_length(empty_sets);
4207  rollup->hashable = false;
4208  rollup->is_hashed = false;
4209  new_rollups = lappend(new_rollups, rollup);
4210  strat = AGG_MIXED;
4211  }
4212 
4213  add_path(grouped_rel, (Path *)
4215  grouped_rel,
4216  path,
4217  (List *) parse->havingQual,
4218  strat,
4219  new_rollups,
4220  agg_costs));
4221  return;
4222  }
4223 
4224  /*
4225  * If we have sorted input but nothing we can do with it, bail.
4226  */
4227  if (gd->rollups == NIL)
4228  return;
4229 
4230  /*
4231  * Given sorted input, we try and make two paths: one sorted and one mixed
4232  * sort/hash. (We need to try both because hashagg might be disabled, or
4233  * some columns might not be sortable.)
4234  *
4235  * can_hash is passed in as false if some obstacle elsewhere (such as
4236  * ordered aggs) means that we shouldn't consider hashing at all.
4237  */
4238  if (can_hash && gd->any_hashable)
4239  {
4240  List *rollups = NIL;
4241  List *hash_sets = list_copy(gd->unsortable_sets);
4242  double availspace = hash_mem_limit;
4243  ListCell *lc;
4244 
4245  /*
4246  * Account first for space needed for groups we can't sort at all.
4247  */
4248  availspace -= estimate_hashagg_tablesize(root,
4249  path,
4250  agg_costs,
4251  gd->dNumHashGroups);
4252 
4253  if (availspace > 0 && list_length(gd->rollups) > 1)
4254  {
4255  double scale;
4256  int num_rollups = list_length(gd->rollups);
4257  int k_capacity;
4258  int *k_weights = palloc(num_rollups * sizeof(int));
4259  Bitmapset *hash_items = NULL;
4260  int i;
4261 
4262  /*
4263  * We treat this as a knapsack problem: the knapsack capacity
4264  * represents hash_mem, the item weights are the estimated memory
4265  * usage of the hashtables needed to implement a single rollup,
4266  * and we really ought to use the cost saving as the item value;
4267  * however, currently the costs assigned to sort nodes don't
4268  * reflect the comparison costs well, and so we treat all items as
4269  * of equal value (each rollup we hash instead saves us one sort).
4270  *
4271  * To use the discrete knapsack, we need to scale the values to a
4272  * reasonably small bounded range. We choose to allow a 5% error
4273  * margin; we have no more than 4096 rollups in the worst possible
4274  * case, which with a 5% error margin will require a bit over 42MB
4275  * of workspace. (Anyone wanting to plan queries that complex had
4276  * better have the memory for it. In more reasonable cases, with
4277  * no more than a couple of dozen rollups, the memory usage will
4278  * be negligible.)
4279  *
4280  * k_capacity is naturally bounded, but we clamp the values for
4281  * scale and weight (below) to avoid overflows or underflows (or
4282  * uselessly trying to use a scale factor less than 1 byte).
4283  */
4284  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4285  k_capacity = (int) floor(availspace / scale);
4286 
4287  /*
4288  * We leave the first rollup out of consideration since it's the
4289  * one that matches the input sort order. We assign indexes "i"
4290  * to only those entries considered for hashing; the second loop,
4291  * below, must use the same condition.
4292  */
4293  i = 0;
4294  for_each_from(lc, gd->rollups, 1)
4295  {
4296  RollupData *rollup = lfirst_node(RollupData, lc);
4297 
4298  if (rollup->hashable)
4299  {
4300  double sz = estimate_hashagg_tablesize(root,
4301  path,
4302  agg_costs,
4303  rollup->numGroups);
4304 
4305  /*
4306  * If sz is enormous, but hash_mem (and hence scale) is
4307  * small, avoid integer overflow here.
4308  */
4309  k_weights[i] = (int) Min(floor(sz / scale),
4310  k_capacity + 1.0);
4311  ++i;
4312  }
4313  }
4314 
4315  /*
4316  * Apply knapsack algorithm; compute the set of items which
4317  * maximizes the value stored (in this case the number of sorts
4318  * saved) while keeping the total size (approximately) within
4319  * capacity.
4320  */
4321  if (i > 0)
4322  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4323 
4324  if (!bms_is_empty(hash_items))
4325  {
4326  rollups = list_make1(linitial(gd->rollups));
4327 
4328  i = 0;
4329  for_each_from(lc, gd->rollups, 1)
4330  {
4331  RollupData *rollup = lfirst_node(RollupData, lc);
4332 
4333  if (rollup->hashable)
4334  {
4335  if (bms_is_member(i, hash_items))
4336  hash_sets = list_concat(hash_sets,
4337  rollup->gsets_data);
4338  else
4339  rollups = lappend(rollups, rollup);
4340  ++i;
4341  }
4342  else
4343  rollups = lappend(rollups, rollup);
4344  }
4345  }
4346  }
4347 
4348  if (!rollups && hash_sets)
4349  rollups = list_copy(gd->rollups);
4350 
4351  foreach(lc, hash_sets)
4352  {
4354  RollupData *rollup = makeNode(RollupData);
4355 
4356  Assert(gs->set != NIL);
4357 
4359  rollup->gsets_data = list_make1(gs);
4360  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4361  rollup->gsets_data,
4362  gd->tleref_to_colnum_map);
4363  rollup->numGroups = gs->numGroups;
4364  rollup->hashable = true;
4365  rollup->is_hashed = true;
4366  rollups = lcons(rollup, rollups);
4367  }
4368 
4369  if (rollups)
4370  {
4371  add_path(grouped_rel, (Path *)
4373  grouped_rel,
4374  path,
4375  (List *) parse->havingQual,
4376  AGG_MIXED,
4377  rollups,
4378  agg_costs));
4379  }
4380  }
4381 
4382  /*
4383  * Now try the simple sorted case.
4384  */
4385  if (!gd->unsortable_sets)
4386  add_path(grouped_rel, (Path *)
4388  grouped_rel,
4389  path,
4390  (List *) parse->havingQual,
4391  AGG_SORTED,
4392  gd->rollups,
4393  agg_costs));
4394 }
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define Min(x, y)
Definition: c.h:1004
#define Max(x, y)
Definition: c.h:998
size_t Size
Definition: c.h:605
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * lcons(void *datum, List *list)
Definition: list.c:495
void * palloc(Size size)
Definition: mcxt.c:1316
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3595
AggStrategy
Definition: nodes.h:352
@ AGG_MIXED
Definition: nodes.h:356
#define makeNode(_type_)
Definition: nodes.h:155
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:341
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3237
#define list_make1(x1)
Definition: pg_list.h:212
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
int scale
Definition: pgbench.c:181
static List * groupclause_apply_groupingset(PlannerInfo *root, List *force)
Definition: planner.c:2832
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2228
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3917
Cardinality numGroups
Definition: pathnodes.h:2262
List * pathkeys
Definition: pathnodes.h:1654
Cardinality numGroups
Definition: pathnodes.h:2273
List * groupClause
Definition: pathnodes.h:2270
List * gsets_data
Definition: pathnodes.h:2272
bool hashable
Definition: pathnodes.h:2274
List * gsets
Definition: pathnodes.h:2271
bool is_hashed
Definition: pathnodes.h:2275
int * tleref_to_colnum_map
Definition: planner.c:104
List * rollups
Definition: planner.c:97
List * unsortable_sets
Definition: planner.c:103
double dNumHashGroups
Definition: planner.c:99

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert, bms_is_empty, bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, for_each_from, get_hash_memory_limit(), RollupData::groupClause, groupclause_apply_groupingset(), RollupData::gsets, RollupData::gsets_data, RollupData::hashable, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext(), makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), Path::pathkeys, pathkeys_contained_in(), remap_to_groupclause_idx(), grouping_sets_data::rollups, root, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, and grouping_sets_data::unsortable_sets.

Referenced by add_paths_to_grouping_rel().

◆ create_degenerate_grouping_paths()

static void create_degenerate_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel 
)
static

Definition at line 3845 of file planner.c.

3847 {
3848  Query *parse = root->parse;
3849  int nrows;
3850  Path *path;
3851 
3852  nrows = list_length(parse->groupingSets);
3853  if (nrows > 1)
3854  {
3855  /*
3856  * Doesn't seem worthwhile writing code to cons up a generate_series
3857  * or a values scan to emit multiple rows. Instead just make N clones
3858  * and append them. (With a volatile HAVING clause, this means you
3859  * might get between 0 and N output rows. Offhand I think that's
3860  * desired.)
3861  */
3862  List *paths = NIL;
3863 
3864  while (--nrows >= 0)
3865  {
3866  path = (Path *)
3867  create_group_result_path(root, grouped_rel,
3868  grouped_rel->reltarget,
3869  (List *) parse->havingQual);
3870  paths = lappend(paths, path);
3871  }
3872  path = (Path *)
3874  grouped_rel,
3875  paths,
3876  NIL,
3877  NIL,
3878  NULL,
3879  0,
3880  false,
3881  -1);
3882  }
3883  else
3884  {
3885  /* No grouping sets, or just one, so one output row */
3886  path = (Path *)
3887  create_group_result_path(root, grouped_rel,
3888  grouped_rel->reltarget,
3889  (List *) parse->havingQual);
3890  }
3891 
3892  add_path(grouped_rel, path);
3893 }
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1244
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1518

References add_path(), create_append_path(), create_group_result_path(), lappend(), list_length(), NIL, parse(), RelOptInfo::reltarget, and root.

Referenced by create_grouping_paths().

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target 
)
static

Definition at line 4630 of file planner.c.

4632 {
4633  RelOptInfo *distinct_rel;
4634 
4635  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4636  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4637 
4638  /*
4639  * We don't compute anything at this level, so distinct_rel will be
4640  * parallel-safe if the input rel is parallel-safe. In particular, if
4641  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4642  * output those expressions, and will not be parallel-safe unless those
4643  * expressions are parallel-safe.
4644  */
4645  distinct_rel->consider_parallel = input_rel->consider_parallel;
4646 
4647  /*
4648  * If the input rel belongs to a single FDW, so does the distinct_rel.
4649  */
4650  distinct_rel->serverid = input_rel->serverid;
4651  distinct_rel->userid = input_rel->userid;
4652  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4653  distinct_rel->fdwroutine = input_rel->fdwroutine;
4654 
4655  /* build distinct paths based on input_rel's pathlist */
4656  create_final_distinct_paths(root, input_rel, distinct_rel);
4657 
4658  /* now build distinct paths based on input_rel's partial_pathlist */
4659  create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4660 
4661  /* Give a helpful error if we failed to create any paths */
4662  if (distinct_rel->pathlist == NIL)
4663  ereport(ERROR,
4664  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4665  errmsg("could not implement DISTINCT"),
4666  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4667 
4668  /*
4669  * If there is an FDW that's responsible for all baserels of the query,
4670  * let it consider adding ForeignPaths.
4671  */
4672  if (distinct_rel->fdwroutine &&
4673  distinct_rel->fdwroutine->GetForeignUpperPaths)
4674  distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4676  input_rel,
4677  distinct_rel,
4678  NULL);
4679 
4680  /* Let extensions possibly add some more paths */
4682  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4683  distinct_rel, NULL);
4684 
4685  /* Now choose the best path(s) */
4686  set_cheapest(distinct_rel);
4687 
4688  return distinct_rel;
4689 }
int errdetail(const char *fmt,...)
Definition: elog.c:1205
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4899
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4700
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:74
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1470
bool useridiscurrent
Definition: pathnodes.h:958
Oid userid
Definition: pathnodes.h:956
Oid serverid
Definition: pathnodes.h:954

References RelOptInfo::consider_parallel, create_final_distinct_paths(), create_partial_distinct_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, fetch_upper_rel(), NIL, RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_final_distinct_paths()

static RelOptInfo * create_final_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo distinct_rel 
)
static

Definition at line 4899 of file planner.c.

4901 {
4902  Query *parse = root->parse;
4903  Path *cheapest_input_path = input_rel->cheapest_total_path;
4904  double numDistinctRows;
4905  bool allow_hash;
4906 
4907  /* Estimate number of distinct rows there will be */
4908  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4909  root->hasHavingQual)
4910  {
4911  /*
4912  * If there was grouping or aggregation, use the number of input rows
4913  * as the estimated number of DISTINCT rows (ie, assume the input is
4914  * already mostly unique).
4915  */
4916  numDistinctRows = cheapest_input_path->rows;
4917  }
4918  else
4919  {
4920  /*
4921  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4922  */
4923  List *distinctExprs;
4924 
4925  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4926  parse->targetList);
4927  numDistinctRows = estimate_num_groups(root, distinctExprs,
4928  cheapest_input_path->rows,
4929  NULL, NULL);
4930  }
4931 
4932  /*
4933  * Consider sort-based implementations of DISTINCT, if possible.
4934  */
4935  if (grouping_is_sortable(root->processed_distinctClause))
4936  {
4937  /*
4938  * Firstly, if we have any adequately-presorted paths, just stick a
4939  * Unique node on those. We also, consider doing an explicit sort of
4940  * the cheapest input path and Unique'ing that. If any paths have
4941  * presorted keys then we'll create an incremental sort atop of those
4942  * before adding a unique node on the top.
4943  *
4944  * When we have DISTINCT ON, we must sort by the more rigorous of
4945  * DISTINCT and ORDER BY, else it won't have the desired behavior.
4946  * Also, if we do have to do an explicit sort, we might as well use
4947  * the more rigorous ordering to avoid a second sort later. (Note
4948  * that the parser will have ensured that one clause is a prefix of
4949  * the other.)
4950  */
4951  List *needed_pathkeys;
4952  ListCell *lc;
4953  double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
4954 
4955  if (parse->hasDistinctOn &&
4956  list_length(root->distinct_pathkeys) <
4957  list_length(root->sort_pathkeys))
4958  needed_pathkeys = root->sort_pathkeys;
4959  else
4960  needed_pathkeys = root->distinct_pathkeys;
4961 
4962  foreach(lc, input_rel->pathlist)
4963  {
4964  Path *input_path = (Path *) lfirst(lc);
4965  Path *sorted_path;
4966  bool is_sorted;
4967  int presorted_keys;
4968 
4969  is_sorted = pathkeys_count_contained_in(needed_pathkeys,
4970  input_path->pathkeys,
4971  &presorted_keys);
4972 
4973  if (is_sorted)
4974  sorted_path = input_path;
4975  else
4976  {
4977  /*
4978  * Try at least sorting the cheapest path and also try
4979  * incrementally sorting any path which is partially sorted
4980  * already (no need to deal with paths which have presorted
4981  * keys when incremental sort is disabled unless it's the
4982  * cheapest input path).
4983  */
4984  if (input_path != cheapest_input_path &&
4985  (presorted_keys == 0 || !enable_incremental_sort))
4986  continue;
4987 
4988  /*
4989  * We've no need to consider both a sort and incremental sort.
4990  * We'll just do a sort if there are no presorted keys and an
4991  * incremental sort when there are presorted keys.
4992  */
4993  if (presorted_keys == 0 || !enable_incremental_sort)
4994  sorted_path = (Path *) create_sort_path(root,
4995  distinct_rel,
4996  input_path,
4997  needed_pathkeys,
4998  limittuples);
4999  else
5000  sorted_path = (Path *) create_incremental_sort_path(root,
5001  distinct_rel,
5002  input_path,
5003  needed_pathkeys,
5004  presorted_keys,
5005  limittuples);
5006  }
5007 
5008  /*
5009  * distinct_pathkeys may have become empty if all of the pathkeys
5010  * were determined to be redundant. If all of the pathkeys are
5011  * redundant then each DISTINCT target must only allow a single
5012  * value, therefore all resulting tuples must be identical (or at
5013  * least indistinguishable by an equality check). We can uniquify
5014  * these tuples simply by just taking the first tuple. All we do
5015  * here is add a path to do "LIMIT 1" atop of 'sorted_path'. When
5016  * doing a DISTINCT ON we may still have a non-NIL sort_pathkeys
5017  * list, so we must still only do this with paths which are
5018  * correctly sorted by sort_pathkeys.
5019  */
5020  if (root->distinct_pathkeys == NIL)
5021  {
5022  Node *limitCount;
5023 
5024  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5025  sizeof(int64),
5026  Int64GetDatum(1), false,
5027  FLOAT8PASSBYVAL);
5028 
5029  /*
5030  * If the query already has a LIMIT clause, then we could end
5031  * up with a duplicate LimitPath in the final plan. That does
5032  * not seem worth troubling over too much.
5033  */
5034  add_path(distinct_rel, (Path *)
5035  create_limit_path(root, distinct_rel, sorted_path,
5036  NULL, limitCount,
5037  LIMIT_OPTION_COUNT, 0, 1));
5038  }
5039  else
5040  {
5041  add_path(distinct_rel, (Path *)
5042  create_upper_unique_path(root, distinct_rel,
5043  sorted_path,
5044  list_length(root->distinct_pathkeys),
5045  numDistinctRows));
5046  }
5047  }
5048  }
5049 
5050  /*
5051  * Consider hash-based implementations of DISTINCT, if possible.
5052  *
5053  * If we were not able to make any other types of path, we *must* hash or
5054  * die trying. If we do have other choices, there are two things that
5055  * should prevent selection of hashing: if the query uses DISTINCT ON
5056  * (because it won't really have the expected behavior if we hash), or if
5057  * enable_hashagg is off.
5058  *
5059  * Note: grouping_is_hashable() is much more expensive to check than the
5060  * other gating conditions, so we want to do it last.
5061  */
5062  if (distinct_rel->pathlist == NIL)
5063  allow_hash = true; /* we have no alternatives */
5064  else if (parse->hasDistinctOn || !enable_hashagg)
5065  allow_hash = false; /* policy-based decision not to hash */
5066  else
5067  allow_hash = true; /* default */
5068 
5069  if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5070  {
5071  /* Generate hashed aggregate path --- no sort needed */
5072  add_path(distinct_rel, (Path *)
5074  distinct_rel,
5075  cheapest_input_path,
5076  cheapest_input_path->pathtarget,
5077  AGG_HASHED,
5079  root->processed_distinctClause,
5080  NIL,
5081  NULL,
5082  numDistinctRows));
5083  }
5084 
5085  return distinct_rel;
5086 }
#define FLOAT8PASSBYVAL
Definition: c.h:635
bool enable_hashagg
Definition: costsize.c:141
bool enable_incremental_sort
Definition: costsize.c:140
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
@ LIMIT_OPTION_COUNT
Definition: nodes.h:430
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:573
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3000
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3823
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3103
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2951
#define InvalidOid
Definition: postgres_ext.h:36
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3416
Cardinality rows
Definition: pathnodes.h:1649
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::cheapest_total_path, create_agg_path(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), FLOAT8PASSBYVAL, get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, list_length(), makeConst(), NIL, parse(), Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, and Path::rows.

Referenced by create_distinct_paths(), and create_partial_distinct_paths().

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
grouping_sets_data gd 
)
static

Definition at line 3658 of file planner.c.

3663 {
3664  Query *parse = root->parse;
3665  RelOptInfo *grouped_rel;
3666  RelOptInfo *partially_grouped_rel;
3667  AggClauseCosts agg_costs;
3668 
3669  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3671 
3672  /*
3673  * Create grouping relation to hold fully aggregated grouping and/or
3674  * aggregation paths.
3675  */
3676  grouped_rel = make_grouping_rel(root, input_rel, target,
3677  target_parallel_safe, parse->havingQual);
3678 
3679  /*
3680  * Create either paths for a degenerate grouping or paths for ordinary
3681  * grouping, as appropriate.
3682  */
3684  create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3685  else
3686  {
3687  int flags = 0;
3688  GroupPathExtraData extra;
3689 
3690  /*
3691  * Determine whether it's possible to perform sort-based
3692  * implementations of grouping. (Note that if processed_groupClause
3693  * is empty, grouping_is_sortable() is trivially true, and all the
3694  * pathkeys_contained_in() tests will succeed too, so that we'll
3695  * consider every surviving input path.)
3696  *
3697  * If we have grouping sets, we might be able to sort some but not all
3698  * of them; in this case, we need can_sort to be true as long as we
3699  * must consider any sorted-input plan.
3700  */
3701  if ((gd && gd->rollups != NIL)
3702  || grouping_is_sortable(root->processed_groupClause))
3703  flags |= GROUPING_CAN_USE_SORT;
3704 
3705  /*
3706  * Determine whether we should consider hash-based implementations of
3707  * grouping.
3708  *
3709  * Hashed aggregation only applies if we're grouping. If we have
3710  * grouping sets, some groups might be hashable but others not; in
3711  * this case we set can_hash true as long as there is nothing globally
3712  * preventing us from hashing (and we should therefore consider plans
3713  * with hashes).
3714  *
3715  * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3716  * BY aggregates. (Doing so would imply storing *all* the input
3717  * values in the hash table, and/or running many sorts in parallel,
3718  * either of which seems like a certain loser.) We similarly don't
3719  * support ordered-set aggregates in hashed aggregation, but that case
3720  * is also included in the numOrderedAggs count.
3721  *
3722  * Note: grouping_is_hashable() is much more expensive to check than
3723  * the other gating conditions, so we want to do it last.
3724  */
3725  if ((parse->groupClause != NIL &&
3726  root->numOrderedAggs == 0 &&
3727  (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3728  flags |= GROUPING_CAN_USE_HASH;
3729 
3730  /*
3731  * Determine whether partial aggregation is possible.
3732  */
3733  if (can_partial_agg(root))
3734  flags |= GROUPING_CAN_PARTIAL_AGG;
3735 
3736  extra.flags = flags;
3737  extra.target_parallel_safe = target_parallel_safe;
3738  extra.havingQual = parse->havingQual;
3739  extra.targetList = parse->targetList;
3740  extra.partial_costs_set = false;
3741 
3742  /*
3743  * Determine whether partitionwise aggregation is in theory possible.
3744  * It can be disabled by the user, and for now, we don't try to
3745  * support grouping sets. create_ordinary_grouping_paths() will check
3746  * additional conditions, such as whether input_rel is partitioned.
3747  */
3748  if (enable_partitionwise_aggregate && !parse->groupingSets)
3750  else
3752 
3753  create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3754  &agg_costs, gd, &extra,
3755  &partially_grouped_rel);
3756  }
3757 
3758  set_cheapest(grouped_rel);
3759  return grouped_rel;
3760 }
#define MemSet(start, val, len)
Definition: c.h:1020
bool enable_partitionwise_aggregate
Definition: costsize.c:149
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3259
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3258
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3243
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3845
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3824
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3909
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7461
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3771
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:560
PartitionwiseAggregateType patype
Definition: pathnodes.h:3288

References AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, can_partial_agg(), create_degenerate_grouping_paths(), create_ordinary_grouping_paths(), enable_partitionwise_aggregate, GroupPathExtraData::flags, get_agg_clause_costs(), GROUPING_CAN_PARTIAL_AGG, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, grouping_is_hashable(), grouping_is_sortable(), GroupPathExtraData::havingQual, is_degenerate_grouping(), make_grouping_rel(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, GroupPathExtraData::patype, grouping_sets_data::rollups, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by grouping_planner().

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4497 of file planner.c.

4504 {
4505  PathTarget *window_target;
4506  ListCell *l;
4507  List *topqual = NIL;
4508 
4509  /*
4510  * Since each window clause could require a different sort order, we stack
4511  * up a WindowAgg node for each clause, with sort steps between them as
4512  * needed. (We assume that select_active_windows chose a good order for
4513  * executing the clauses in.)
4514  *
4515  * input_target should contain all Vars and Aggs needed for the result.
4516  * (In some cases we wouldn't need to propagate all of these all the way
4517  * to the top, since they might only be needed as inputs to WindowFuncs.
4518  * It's probably not worth trying to optimize that though.) It must also
4519  * contain all window partitioning and sorting expressions, to ensure
4520  * they're computed only once at the bottom of the stack (that's critical
4521  * for volatile functions). As we climb up the stack, we'll add outputs
4522  * for the WindowFuncs computed at each level.
4523  */
4524  window_target = input_target;
4525 
4526  foreach(l, activeWindows)
4527  {
4529  List *window_pathkeys;
4530  int presorted_keys;
4531  bool is_sorted;
4532  bool topwindow;
4533 
4534  window_pathkeys = make_pathkeys_for_window(root,
4535  wc,
4536  root->processed_tlist);
4537 
4538  is_sorted = pathkeys_count_contained_in(window_pathkeys,
4539  path->pathkeys,
4540  &presorted_keys);
4541 
4542  /* Sort if necessary */
4543  if (!is_sorted)
4544  {
4545  /*
4546  * No presorted keys or incremental sort disabled, just perform a
4547  * complete sort.
4548  */
4549  if (presorted_keys == 0 || !enable_incremental_sort)
4550  path = (Path *) create_sort_path(root, window_rel,
4551  path,
4552  window_pathkeys,
4553  -1.0);
4554  else
4555  {
4556  /*
4557  * Since we have presorted keys and incremental sort is
4558  * enabled, just use incremental sort.
4559  */
4561  window_rel,
4562  path,
4563  window_pathkeys,
4564  presorted_keys,
4565  -1.0);
4566  }
4567  }
4568 
4569  if (lnext(activeWindows, l))
4570  {
4571  /*
4572  * Add the current WindowFuncs to the output target for this
4573  * intermediate WindowAggPath. We must copy window_target to
4574  * avoid changing the previous path's target.
4575  *
4576  * Note: a WindowFunc adds nothing to the target's eval costs; but
4577  * we do need to account for the increase in tlist width.
4578  */
4579  int64 tuple_width = window_target->width;
4580  ListCell *lc2;
4581 
4582  window_target = copy_pathtarget(window_target);
4583  foreach(lc2, wflists->windowFuncs[wc->winref])
4584  {
4585  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4586 
4587  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4588  tuple_width += get_typavgwidth(wfunc->wintype, -1);
4589  }
4590  window_target->width = clamp_width_est(tuple_width);
4591  }
4592  else
4593  {
4594  /* Install the goal target in the topmost WindowAgg */
4595  window_target = output_target;
4596  }
4597 
4598  /* mark the final item in the list as the top-level window */
4599  topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4600 
4601  /*
4602  * Accumulate all of the runConditions from each intermediate
4603  * WindowClause. The top-level WindowAgg must pass these as a qual so
4604  * that it filters out unwanted tuples correctly.
4605  */
4606  if (!topwindow)
4607  topqual = list_concat(topqual, wc->runCondition);
4608 
4609  path = (Path *)
4610  create_windowagg_path(root, window_rel, path, window_target,
4611  wflists->windowFuncs[wc->winref],
4612  wc, topwindow ? topqual : NIL, topwindow);
4613  }
4614 
4615  add_path(window_rel, path);
4616 }
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:231
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3484
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6001
List ** windowFuncs
Definition: clauses.h:23
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), add_path(), clamp_width_est(), copy_pathtarget(), create_incremental_sort_path(), create_sort_path(), create_windowagg_path(), enable_incremental_sort, foreach_current_index, get_typavgwidth(), lfirst_node, list_concat(), list_length(), lnext(), make_pathkeys_for_window(), NIL, Path::pathkeys, pathkeys_count_contained_in(), root, PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
double  limit_tuples 
)
static

Definition at line 5106 of file planner.c.

5111 {
5112  Path *cheapest_input_path = input_rel->cheapest_total_path;
5113  RelOptInfo *ordered_rel;
5114  ListCell *lc;
5115 
5116  /* For now, do all work in the (ORDERED, NULL) upperrel */
5117  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5118 
5119  /*
5120  * If the input relation is not parallel-safe, then the ordered relation
5121  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5122  * target list is parallel-safe.
5123  */
5124  if (input_rel->consider_parallel && target_parallel_safe)
5125  ordered_rel->consider_parallel = true;
5126 
5127  /*
5128  * If the input rel belongs to a single FDW, so does the ordered_rel.
5129  */
5130  ordered_rel->serverid = input_rel->serverid;
5131  ordered_rel->userid = input_rel->userid;
5132  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5133  ordered_rel->fdwroutine = input_rel->fdwroutine;
5134 
5135  foreach(lc, input_rel->pathlist)
5136  {
5137  Path *input_path = (Path *) lfirst(lc);
5138  Path *sorted_path;
5139  bool is_sorted;
5140  int presorted_keys;
5141 
5142  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5143  input_path->pathkeys, &presorted_keys);
5144 
5145  if (is_sorted)
5146  sorted_path = input_path;
5147  else
5148  {
5149  /*
5150  * Try at least sorting the cheapest path and also try
5151  * incrementally sorting any path which is partially sorted
5152  * already (no need to deal with paths which have presorted keys
5153  * when incremental sort is disabled unless it's the cheapest
5154  * input path).
5155  */
5156  if (input_path != cheapest_input_path &&
5157  (presorted_keys == 0 || !enable_incremental_sort))
5158  continue;
5159 
5160  /*
5161  * We've no need to consider both a sort and incremental sort.
5162  * We'll just do a sort if there are no presorted keys and an
5163  * incremental sort when there are presorted keys.
5164  */
5165  if (presorted_keys == 0 || !enable_incremental_sort)
5166  sorted_path = (Path *) create_sort_path(root,
5167  ordered_rel,
5168  input_path,
5169  root->sort_pathkeys,
5170  limit_tuples);
5171  else
5172  sorted_path = (Path *) create_incremental_sort_path(root,
5173  ordered_rel,
5174  input_path,
5175  root->sort_pathkeys,
5176  presorted_keys,
5177  limit_tuples);
5178  }
5179 
5180  /* Add projection step if needed */
5181  if (sorted_path->pathtarget != target)
5182  sorted_path = apply_projection_to_path(root, ordered_rel,
5183  sorted_path, target);
5184 
5185  add_path(ordered_rel, sorted_path);
5186  }
5187 
5188  /*
5189  * generate_gather_paths() will have already generated a simple Gather
5190  * path for the best parallel path, if any, and the loop above will have
5191  * considered sorting it. Similarly, generate_gather_paths() will also
5192  * have generated order-preserving Gather Merge plans which can be used
5193  * without sorting if they happen to match the sort_pathkeys, and the loop
5194  * above will have handled those as well. However, there's one more
5195  * possibility: it may make sense to sort the cheapest partial path or
5196  * incrementally sort any partial path that is partially sorted according
5197  * to the required output order and then use Gather Merge.
5198  */
5199  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5200  input_rel->partial_pathlist != NIL)
5201  {
5202  Path *cheapest_partial_path;
5203 
5204  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5205 
5206  foreach(lc, input_rel->partial_pathlist)
5207  {
5208  Path *input_path = (Path *) lfirst(lc);
5209  Path *sorted_path;
5210  bool is_sorted;
5211  int presorted_keys;
5212  double total_groups;
5213 
5214  is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5215  input_path->pathkeys,
5216  &presorted_keys);
5217 
5218  if (is_sorted)
5219  continue;
5220 
5221  /*
5222  * Try at least sorting the cheapest path and also try
5223  * incrementally sorting any path which is partially sorted
5224  * already (no need to deal with paths which have presorted keys
5225  * when incremental sort is disabled unless it's the cheapest
5226  * partial path).
5227  */
5228  if (input_path != cheapest_partial_path &&
5229  (presorted_keys == 0 || !enable_incremental_sort))
5230  continue;
5231 
5232  /*
5233  * We've no need to consider both a sort and incremental sort.
5234  * We'll just do a sort if there are no presorted keys and an
5235  * incremental sort when there are presorted keys.
5236  */
5237  if (presorted_keys == 0 || !enable_incremental_sort)
5238  sorted_path = (Path *) create_sort_path(root,
5239  ordered_rel,
5240  input_path,
5241  root->sort_pathkeys,
5242  limit_tuples);
5243  else
5244  sorted_path = (Path *) create_incremental_sort_path(root,
5245  ordered_rel,
5246  input_path,
5247  root->sort_pathkeys,
5248  presorted_keys,
5249  limit_tuples);
5250  total_groups = input_path->rows *
5251  input_path->parallel_workers;
5252  sorted_path = (Path *)
5253  create_gather_merge_path(root, ordered_rel,
5254  sorted_path,
5255  sorted_path->pathtarget,
5256  root->sort_pathkeys, NULL,
5257  &total_groups);
5258 
5259  /* Add projection step if needed */
5260  if (sorted_path->pathtarget != target)
5261  sorted_path = apply_projection_to_path(root, ordered_rel,
5262  sorted_path, target);
5263 
5264  add_path(ordered_rel, sorted_path);
5265  }
5266  }
5267 
5268  /*
5269  * If there is an FDW that's responsible for all baserels of the query,
5270  * let it consider adding ForeignPaths.
5271  */
5272  if (ordered_rel->fdwroutine &&
5273  ordered_rel->fdwroutine->GetForeignUpperPaths)
5274  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5275  input_rel, ordered_rel,
5276  NULL);
5277 
5278  /* Let extensions possibly add some more paths */
5280  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5281  input_rel, ordered_rel, NULL);
5282 
5283  /*
5284  * No need to bother with set_cheapest here; grouping_planner does not
5285  * need us to do it.
5286  */
5287  Assert(ordered_rel->pathlist != NIL);
5288 
5289  return ordered_rel;
5290 }
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1881
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
int parallel_workers
Definition: pathnodes.h:1646

References add_path(), apply_projection_to_path(), Assert, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), create_upper_paths_hook, enable_incremental_sort, fetch_upper_rel(), lfirst, linitial, NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, Path::rows, RelOptInfo::serverid, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ create_ordinary_grouping_paths()

static void create_ordinary_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
GroupPathExtraData extra,
RelOptInfo **  partially_grouped_rel_p 
)
static

Definition at line 3909 of file planner.c.

3915 {
3916  Path *cheapest_path = input_rel->cheapest_total_path;
3917  RelOptInfo *partially_grouped_rel = NULL;
3918  double dNumGroups;
3920 
3921  /*
3922  * If this is the topmost grouping relation or if the parent relation is
3923  * doing some form of partitionwise aggregation, then we may be able to do
3924  * it at this level also. However, if the input relation is not
3925  * partitioned, partitionwise aggregate is impossible.
3926  */
3927  if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3928  IS_PARTITIONED_REL(input_rel))
3929  {
3930  /*
3931  * If this is the topmost relation or if the parent relation is doing
3932  * full partitionwise aggregation, then we can do full partitionwise
3933  * aggregation provided that the GROUP BY clause contains all of the
3934  * partitioning columns at this level. Otherwise, we can do at most
3935  * partial partitionwise aggregation. But if partial aggregation is
3936  * not supported in general then we can't use it for partitionwise
3937  * aggregation either.
3938  *
3939  * Check parse->groupClause not processed_groupClause, because it's
3940  * okay if some of the partitioning columns were proved redundant.
3941  */
3942  if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3943  group_by_has_partkey(input_rel, extra->targetList,
3944  root->parse->groupClause))
3946  else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3948  else
3950  }
3951 
3952  /*
3953  * Before generating paths for grouped_rel, we first generate any possible
3954  * partially grouped paths; that way, later code can easily consider both
3955  * parallel and non-parallel approaches to grouping.
3956  */
3957  if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3958  {
3959  bool force_rel_creation;
3960 
3961  /*
3962  * If we're doing partitionwise aggregation at this level, force
3963  * creation of a partially_grouped_rel so we can add partitionwise
3964  * paths to it.
3965  */
3966  force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3967 
3968  partially_grouped_rel =
3970  grouped_rel,
3971  input_rel,
3972  gd,
3973  extra,
3974  force_rel_creation);
3975  }
3976 
3977  /* Set out parameter. */
3978  *partially_grouped_rel_p = partially_grouped_rel;
3979 
3980  /* Apply partitionwise aggregation technique, if possible. */
3981  if (patype != PARTITIONWISE_AGGREGATE_NONE)
3982  create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
3983  partially_grouped_rel, agg_costs,
3984  gd, patype, extra);
3985 
3986  /* If we are doing partial aggregation only, return. */
3988  {
3989  Assert(partially_grouped_rel);
3990 
3991  if (partially_grouped_rel->pathlist)
3992  set_cheapest(partially_grouped_rel);
3993 
3994  return;
3995  }
3996 
3997  /* Gather any partially grouped partial paths. */
3998  if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
3999  {
4000  gather_grouping_paths(root, partially_grouped_rel);
4001  set_cheapest(partially_grouped_rel);
4002  }
4003 
4004  /*
4005  * Estimate number of groups.
4006  */
4007  dNumGroups = get_number_of_groups(root,
4008  cheapest_path->rows,
4009  gd,
4010  extra->targetList);
4011 
4012  /* Build final grouping paths */
4013  add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4014  partially_grouped_rel, agg_costs, gd,
4015  dNumGroups, extra);
4016 
4017  /* Give a helpful error if we failed to find any implementation */
4018  if (grouped_rel->pathlist == NIL)
4019  ereport(ERROR,
4020  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4021  errmsg("could not implement GROUP BY"),
4022  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4023 
4024  /*
4025  * If there is an FDW that's responsible for all baserels of the query,
4026  * let it consider adding ForeignPaths.
4027  */
4028  if (grouped_rel->fdwroutine &&
4029  grouped_rel->fdwroutine->GetForeignUpperPaths)
4030  grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4031  input_rel, grouped_rel,
4032  extra);
4033 
4034  /* Let extensions possibly add some more paths */
4036  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4037  input_rel, grouped_rel,
4038  extra);
4039 }
PartitionwiseAggregateType
Definition: pathnodes.h:3257
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3260
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7077
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6842
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3536
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7738
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:7882

References add_paths_to_grouping_rel(), Assert, RelOptInfo::cheapest_total_path, create_partial_grouping_paths(), create_partitionwise_grouping_paths(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, GroupPathExtraData::flags, gather_grouping_paths(), get_number_of_groups(), group_by_has_partkey(), GROUPING_CAN_PARTIAL_AGG, IS_PARTITIONED_REL, NIL, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, root, Path::rows, set_cheapest(), GroupPathExtraData::targetList, and UPPERREL_GROUP_AGG.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ create_partial_distinct_paths()

static void create_partial_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo final_distinct_rel,
PathTarget target 
)
static

Definition at line 4700 of file planner.c.

4703 {
4704  RelOptInfo *partial_distinct_rel;
4705  Query *parse;
4706  List *distinctExprs;
4707  double numDistinctRows;
4708  Path *cheapest_partial_path;
4709  ListCell *lc;
4710 
4711  /* nothing to do when there are no partial paths in the input rel */
4712  if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4713  return;
4714 
4715  parse = root->parse;
4716 
4717  /* can't do parallel DISTINCT ON */
4718  if (parse->hasDistinctOn)
4719  return;
4720 
4721  partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4722  NULL);
4723  partial_distinct_rel->reltarget = target;
4724  partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4725 
4726  /*
4727  * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4728  */
4729  partial_distinct_rel->serverid = input_rel->serverid;
4730  partial_distinct_rel->userid = input_rel->userid;
4731  partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4732  partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4733 
4734  cheapest_partial_path = linitial(input_rel->partial_pathlist);
4735 
4736  distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4737  parse->targetList);
4738 
4739  /* estimate how many distinct rows we'll get from each worker */
4740  numDistinctRows = estimate_num_groups(root, distinctExprs,
4741  cheapest_partial_path->rows,
4742  NULL, NULL);
4743 
4744  /*
4745  * Try sorting the cheapest path and incrementally sorting any paths with
4746  * presorted keys and put a unique paths atop of those.
4747  */
4748  if (grouping_is_sortable(root->processed_distinctClause))
4749  {
4750  foreach(lc, input_rel->partial_pathlist)
4751  {
4752  Path *input_path = (Path *) lfirst(lc);
4753  Path *sorted_path;
4754  bool is_sorted;
4755  int presorted_keys;
4756 
4757  is_sorted = pathkeys_count_contained_in(root->distinct_pathkeys,
4758  input_path->pathkeys,
4759  &presorted_keys);
4760 
4761  if (is_sorted)
4762  sorted_path = input_path;
4763  else
4764  {
4765  /*
4766  * Try at least sorting the cheapest path and also try
4767  * incrementally sorting any path which is partially sorted
4768  * already (no need to deal with paths which have presorted
4769  * keys when incremental sort is disabled unless it's the
4770  * cheapest partial path).
4771  */
4772  if (input_path != cheapest_partial_path &&
4773  (presorted_keys == 0 || !enable_incremental_sort))
4774  continue;
4775 
4776  /*
4777  * We've no need to consider both a sort and incremental sort.
4778  * We'll just do a sort if there are no presorted keys and an
4779  * incremental sort when there are presorted keys.
4780  */
4781  if (presorted_keys == 0 || !enable_incremental_sort)
4782  sorted_path = (Path *) create_sort_path(root,
4783  partial_distinct_rel,
4784  input_path,
4785  root->distinct_pathkeys,
4786  -1.0);
4787  else
4788  sorted_path = (Path *) create_incremental_sort_path(root,
4789  partial_distinct_rel,
4790  input_path,
4791  root->distinct_pathkeys,
4792  presorted_keys,
4793  -1.0);
4794  }
4795 
4796  /*
4797  * An empty distinct_pathkeys means all tuples have the same value
4798  * for the DISTINCT clause. See create_final_distinct_paths()
4799  */
4800  if (root->distinct_pathkeys == NIL)
4801  {
4802  Node *limitCount;
4803 
4804  limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4805  sizeof(int64),
4806  Int64GetDatum(1), false,
4807  FLOAT8PASSBYVAL);
4808 
4809  /*
4810  * Apply a LimitPath onto the partial path to restrict the
4811  * tuples from each worker to 1. create_final_distinct_paths
4812  * will need to apply an additional LimitPath to restrict this
4813  * to a single row after the Gather node. If the query
4814  * already has a LIMIT clause, then we could end up with three
4815  * Limit nodes in the final plan. Consolidating the top two
4816  * of these could be done, but does not seem worth troubling
4817  * over.
4818  */
4819  add_partial_path(partial_distinct_rel, (Path *)
4820  create_limit_path(root, partial_distinct_rel,
4821  sorted_path,
4822  NULL,
4823  limitCount,
4825  0, 1));
4826  }
4827  else
4828  {
4829  add_partial_path(partial_distinct_rel, (Path *)
4830  create_upper_unique_path(root, partial_distinct_rel,
4831  sorted_path,
4832  list_length(root->distinct_pathkeys),
4833  numDistinctRows));
4834  }
4835  }
4836  }
4837 
4838  /*
4839  * Now try hash aggregate paths, if enabled and hashing is possible. Since
4840  * we're not on the hook to ensure we do our best to create at least one
4841  * path here, we treat enable_hashagg as a hard off-switch rather than the
4842  * slightly softer variant in create_final_distinct_paths.
4843  */
4844  if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4845  {
4846  add_partial_path(partial_distinct_rel, (Path *)
4848  partial_distinct_rel,
4849  cheapest_partial_path,
4850  cheapest_partial_path->pathtarget,
4851  AGG_HASHED,
4853  root->processed_distinctClause,
4854  NIL,
4855  NULL,
4856  numDistinctRows));
4857  }
4858 
4859  /*
4860  * If there is an FDW that's responsible for all baserels of the query,
4861  * let it consider adding ForeignPaths.
4862  */
4863  if (partial_distinct_rel->fdwroutine &&
4864  partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4865  partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4867  input_rel,
4868  partial_distinct_rel,
4869  NULL);
4870 
4871  /* Let extensions possibly add some more partial paths */
4873  (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4874  input_rel, partial_distinct_rel, NULL);
4875 
4876  if (partial_distinct_rel->partial_pathlist != NIL)
4877  {
4878  generate_useful_gather_paths(root, partial_distinct_rel, true);
4879  set_cheapest(partial_distinct_rel);
4880 
4881  /*
4882  * Finally, create paths to distinctify the final result. This step
4883  * is needed to remove any duplicates due to combining rows from
4884  * parallel workers.
4885  */
4886  create_final_distinct_paths(root, partial_distinct_rel,
4887  final_distinct_rel);
4888  }
4889 }
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:747
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76

References add_partial_path(), AGG_HASHED, AGGSPLIT_SIMPLE, RelOptInfo::consider_parallel, create_agg_path(), create_final_distinct_paths(), create_incremental_sort_path(), create_limit_path(), create_sort_path(), create_upper_paths_hook, create_upper_unique_path(), enable_hashagg, enable_incremental_sort, estimate_num_groups(), fetch_upper_rel(), FLOAT8PASSBYVAL, generate_useful_gather_paths(), get_sortgrouplist_exprs(), grouping_is_hashable(), grouping_is_sortable(), Int64GetDatum(), InvalidOid, lfirst, LIMIT_OPTION_COUNT, linitial, list_length(), makeConst(), NIL, parse(), RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, set_cheapest(), UPPERREL_PARTIAL_DISTINCT, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_distinct_paths().

◆ create_partial_grouping_paths()

static RelOptInfo * create_partial_grouping_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
RelOptInfo input_rel,
grouping_sets_data gd,
GroupPathExtraData extra,
bool  force_rel_creation 
)
static

Definition at line 7077 of file planner.c.

7083 {
7084  Query *parse = root->parse;
7085  RelOptInfo *partially_grouped_rel;
7086  AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7087  AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7088  Path *cheapest_partial_path = NULL;
7089  Path *cheapest_total_path = NULL;
7090  double dNumPartialGroups = 0;
7091  double dNumPartialPartialGroups = 0;
7092  ListCell *lc;
7093  bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7094  bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7095 
7096  /*
7097  * Consider whether we should generate partially aggregated non-partial
7098  * paths. We can only do this if we have a non-partial path, and only if
7099  * the parent of the input rel is performing partial partitionwise
7100  * aggregation. (Note that extra->patype is the type of partitionwise
7101  * aggregation being used at the parent level, not this level.)
7102  */
7103  if (input_rel->pathlist != NIL &&
7105  cheapest_total_path = input_rel->cheapest_total_path;
7106 
7107  /*
7108  * If parallelism is possible for grouped_rel, then we should consider
7109  * generating partially-grouped partial paths. However, if the input rel
7110  * has no partial paths, then we can't.
7111  */
7112  if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7113  cheapest_partial_path = linitial(input_rel->partial_pathlist);
7114 
7115  /*
7116  * If we can't partially aggregate partial paths, and we can't partially
7117  * aggregate non-partial paths, then don't bother creating the new
7118  * RelOptInfo at all, unless the caller specified force_rel_creation.
7119  */
7120  if (cheapest_total_path == NULL &&
7121  cheapest_partial_path == NULL &&
7122  !force_rel_creation)
7123  return NULL;
7124 
7125  /*
7126  * Build a new upper relation to represent the result of partially
7127  * aggregating the rows from the input relation.
7128  */
7129  partially_grouped_rel = fetch_upper_rel(root,
7131  grouped_rel->relids);
7132  partially_grouped_rel->consider_parallel =
7133  grouped_rel->consider_parallel;
7134  partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7135  partially_grouped_rel->serverid = grouped_rel->serverid;
7136  partially_grouped_rel->userid = grouped_rel->userid;
7137  partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7138  partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7139 
7140  /*
7141  * Build target list for partial aggregate paths. These paths cannot just
7142  * emit the same tlist as regular aggregate paths, because (1) we must
7143  * include Vars and Aggrefs needed in HAVING, which might not appear in
7144  * the result tlist, and (2) the Aggrefs must be set in partial mode.
7145  */
7146  partially_grouped_rel->reltarget =
7148  extra->havingQual);
7149 
7150  if (!extra->partial_costs_set)
7151  {
7152  /*
7153  * Collect statistics about aggregates for estimating costs of
7154  * performing aggregation in parallel.
7155  */
7156  MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7157  MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7158  if (parse->hasAggs)
7159  {
7160  /* partial phase */
7162  agg_partial_costs);
7163 
7164  /* final phase */
7166  agg_final_costs);
7167  }
7168 
7169  extra->partial_costs_set = true;
7170  }
7171 
7172  /* Estimate number of partial groups. */
7173  if (cheapest_total_path != NULL)
7174  dNumPartialGroups =
7176  cheapest_total_path->rows,
7177  gd,
7178  extra->targetList);
7179  if (cheapest_partial_path != NULL)
7180  dNumPartialPartialGroups =
7182  cheapest_partial_path->rows,
7183  gd,
7184  extra->targetList);
7185 
7186  if (can_sort && cheapest_total_path != NULL)
7187  {
7188  /* This should have been checked previously */
7189  Assert(parse->hasAggs || parse->groupClause);
7190 
7191  /*
7192  * Use any available suitably-sorted path as input, and also consider
7193  * sorting the cheapest partial path.
7194  */
7195  foreach(lc, input_rel->pathlist)
7196  {
7197  ListCell *lc2;
7198  Path *path = (Path *) lfirst(lc);
7199  Path *path_save = path;
7200  List *pathkey_orderings = NIL;
7201 
7202  /* generate alternative group orderings that might be useful */
7203  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7204 
7205  Assert(list_length(pathkey_orderings) > 0);
7206 
7207  /* process all potentially interesting grouping reorderings */
7208  foreach(lc2, pathkey_orderings)
7209  {
7210  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
7211 
7212  /* restore the path (we replace it in the loop) */
7213  path = path_save;
7214 
7215  path = make_ordered_path(root,
7216  partially_grouped_rel,
7217  path,
7218  cheapest_total_path,
7219  info->pathkeys);
7220 
7221  if (path == NULL)
7222  continue;
7223 
7224  if (parse->hasAggs)
7225  add_path(partially_grouped_rel, (Path *)
7227  partially_grouped_rel,
7228  path,
7229  partially_grouped_rel->reltarget,
7230  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7232  info->clauses,
7233  NIL,
7234  agg_partial_costs,
7235  dNumPartialGroups));
7236  else
7237  add_path(partially_grouped_rel, (Path *)
7239  partially_grouped_rel,
7240  path,
7241  info->clauses,
7242  NIL,
7243  dNumPartialGroups));
7244  }
7245  }
7246  }
7247 
7248  if (can_sort && cheapest_partial_path != NULL)
7249  {
7250  /* Similar to above logic, but for partial paths. */
7251  foreach(lc, input_rel->partial_pathlist)
7252  {
7253  ListCell *lc2;
7254  Path *path = (Path *) lfirst(lc);
7255  Path *path_save = path;
7256  List *pathkey_orderings = NIL;
7257 
7258  /* generate alternative group orderings that might be useful */
7259  pathkey_orderings = get_useful_group_keys_orderings(root, path);
7260 
7261  Assert(list_length(pathkey_orderings) > 0);
7262 
7263  /* process all potentially interesting grouping reorderings */
7264  foreach(lc2, pathkey_orderings)
7265  {
7266  PathKeyInfo *info = (PathKeyInfo *) lfirst(lc2);
7267 
7268 
7269  /* restore the path (we replace it in the loop) */
7270  path = path_save;
7271 
7272  path = make_ordered_path(root,
7273  partially_grouped_rel,
7274  path,
7275  cheapest_partial_path,
7276  info->pathkeys);
7277 
7278  if (path == NULL)
7279  continue;
7280 
7281  if (parse->hasAggs)
7282  add_partial_path(partially_grouped_rel, (Path *)
7284  partially_grouped_rel,
7285  path,
7286  partially_grouped_rel->reltarget,
7287  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7289  info->clauses,
7290  NIL,
7291  agg_partial_costs,
7292  dNumPartialPartialGroups));
7293  else
7294  add_partial_path(partially_grouped_rel, (Path *)
7296  partially_grouped_rel,
7297  path,
7298  info->clauses,
7299  NIL,
7300  dNumPartialPartialGroups));
7301  }
7302  }
7303  }
7304 
7305  /*
7306  * Add a partially-grouped HashAgg Path where possible
7307  */
7308  if (can_hash && cheapest_total_path != NULL)
7309  {
7310  /* Checked above */
7311  Assert(parse->hasAggs || parse->groupClause);
7312 
7313  add_path(partially_grouped_rel, (Path *)
7315  partially_grouped_rel,
7316  cheapest_total_path,
7317  partially_grouped_rel->reltarget,
7318  AGG_HASHED,
7320  root->processed_groupClause,
7321  NIL,
7322  agg_partial_costs,
7323  dNumPartialGroups));
7324  }
7325 
7326  /*
7327  * Now add a partially-grouped HashAgg partial Path where possible
7328  */
7329  if (can_hash && cheapest_partial_path != NULL)
7330  {
7331  add_partial_path(partially_grouped_rel, (Path *)
7333  partially_grouped_rel,
7334  cheapest_partial_path,
7335  partially_grouped_rel->reltarget,
7336  AGG_HASHED,
7338  root->processed_groupClause,
7339  NIL,
7340  agg_partial_costs,
7341  dNumPartialPartialGroups));
7342  }
7343 
7344  /*
7345  * If there is an FDW that's responsible for all baserels of the query,
7346  * let it consider adding partially grouped ForeignPaths.
7347  */
7348  if (partially_grouped_rel->fdwroutine &&
7349  partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7350  {
7351  FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7352 
7353  fdwroutine->GetForeignUpperPaths(root,
7355  input_rel, partially_grouped_rel,
7356  extra);
7357  }
7358 
7359  return partially_grouped_rel;
7360 }
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:378
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5409
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3281
RelOptKind reloptkind
Definition: pathnodes.h:855

References add_partial_path(), add_path(), GroupPathExtraData::agg_final_costs, AGG_HASHED, GroupPathExtraData::agg_partial_costs, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, Assert, RelOptInfo::cheapest_total_path, PathKeyInfo::clauses, RelOptInfo::consider_parallel, create_agg_path(), create_group_path(), fetch_upper_rel(), GroupPathExtraData::flags, get_agg_clause_costs(), get_number_of_groups(), get_useful_group_keys_orderings(), FdwRoutine::GetForeignUpperPaths, GROUPING_CAN_USE_HASH, GROUPING_CAN_USE_SORT, GroupPathExtraData::havingQual, lfirst, linitial, list_length(), make_ordered_path(), make_partial_grouping_target(), MemSet, NIL, parse(), GroupPathExtraData::partial_costs_set, RelOptInfo::partial_pathlist, PARTITIONWISE_AGGREGATE_PARTIAL, PathKeyInfo::pathkeys, RelOptInfo::pathlist, GroupPathExtraData::patype, RelOptInfo::relids, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, RelOptInfo::serverid, GroupPathExtraData::targetList, UPPERREL_PARTIAL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_ordinary_grouping_paths().

◆ create_partitionwise_grouping_paths()

static void create_partitionwise_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
RelOptInfo grouped_rel,
RelOptInfo partially_grouped_rel,
const AggClauseCosts agg_costs,
grouping_sets_data gd,
PartitionwiseAggregateType  patype,
GroupPathExtraData extra 
)
static

Definition at line 7738 of file planner.c.

7746 {
7747  List *grouped_live_children = NIL;
7748  List *partially_grouped_live_children = NIL;
7749  PathTarget *target = grouped_rel->reltarget;
7750  bool partial_grouping_valid = true;
7751  int i;
7752 
7755  partially_grouped_rel != NULL);
7756 
7757  /* Add paths for partitionwise aggregation/grouping. */
7758  i = -1;
7759  while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7760  {
7761  RelOptInfo *child_input_rel = input_rel->part_rels[i];
7762  PathTarget *child_target;
7763  AppendRelInfo **appinfos;
7764  int nappinfos;
7765  GroupPathExtraData child_extra;
7766  RelOptInfo *child_grouped_rel;
7767  RelOptInfo *child_partially_grouped_rel;
7768 
7769  Assert(child_input_rel != NULL);
7770 
7771  /* Dummy children can be ignored. */
7772  if (IS_DUMMY_REL(child_input_rel))
7773  continue;
7774 
7775  child_target = copy_pathtarget(target);
7776 
7777  /*
7778  * Copy the given "extra" structure as is and then override the
7779  * members specific to this child.
7780  */
7781  memcpy(&child_extra, extra, sizeof(child_extra));
7782 
7783  appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7784  &nappinfos);
7785 
7786  child_target->exprs = (List *)
7788  (Node *) target->exprs,
7789  nappinfos, appinfos);
7790 
7791  /* Translate havingQual and targetList. */
7792  child_extra.havingQual = (Node *)
7794  extra->havingQual,
7795  nappinfos, appinfos);
7796  child_extra.targetList = (List *)
7798  (Node *) extra->targetList,
7799  nappinfos, appinfos);
7800 
7801  /*
7802  * extra->patype was the value computed for our parent rel; patype is
7803  * the value for this relation. For the child, our value is its
7804  * parent rel's value.
7805  */
7806  child_extra.patype = patype;
7807 
7808  /*
7809  * Create grouping relation to hold fully aggregated grouping and/or
7810  * aggregation paths for the child.
7811  */
7812  child_grouped_rel = make_grouping_rel(root, child_input_rel,
7813  child_target,
7814  extra->target_parallel_safe,
7815  child_extra.havingQual);
7816 
7817  /* Create grouping paths for this child relation. */
7818  create_ordinary_grouping_paths(root, child_input_rel,
7819  child_grouped_rel,
7820  agg_costs, gd, &child_extra,
7821  &child_partially_grouped_rel);
7822 
7823  if (child_partially_grouped_rel)
7824  {
7825  partially_grouped_live_children =
7826  lappend(partially_grouped_live_children,
7827  child_partially_grouped_rel);
7828  }
7829  else
7830  partial_grouping_valid = false;
7831 
7832  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7833  {
7834  set_cheapest(child_grouped_rel);
7835  grouped_live_children = lappend(grouped_live_children,
7836  child_grouped_rel);
7837  }
7838 
7839  pfree(appinfos);
7840  }
7841 
7842  /*
7843  * Try to create append paths for partially grouped children. For full
7844  * partitionwise aggregation, we might have paths in the partial_pathlist
7845  * if parallel aggregation is possible. For partial partitionwise
7846  * aggregation, we may have paths in both pathlist and partial_pathlist.
7847  *
7848  * NB: We must have a partially grouped path for every child in order to
7849  * generate a partially grouped path for this relation.
7850  */
7851  if (partially_grouped_rel && partial_grouping_valid)
7852  {
7853  Assert(partially_grouped_live_children != NIL);
7854 
7855  add_paths_to_append_rel(root, partially_grouped_rel,
7856  partially_grouped_live_children);
7857 
7858  /*
7859  * We need call set_cheapest, since the finalization step will use the
7860  * cheapest path from the rel.
7861  */
7862  if (partially_grouped_rel->pathlist)
7863  set_cheapest(partially_grouped_rel);
7864  }
7865 
7866  /* If possible, create append paths for fully grouped children. */
7867  if (patype == PARTITIONWISE_AGGREGATE_FULL)
7868  {
7869  Assert(grouped_live_children != NIL);
7870 
7871  add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7872  }
7873 }

References add_paths_to_append_rel(), adjust_appendrel_attrs(), Assert, bms_next_member(), copy_pathtarget(), create_ordinary_grouping_paths(), PathTarget::exprs, find_appinfos_by_relids(), GroupPathExtraData::havingQual, i, IS_DUMMY_REL, lappend(), RelOptInfo::live_parts, make_grouping_rel(), NIL, PARTITIONWISE_AGGREGATE_FULL, PARTITIONWISE_AGGREGATE_NONE, PARTITIONWISE_AGGREGATE_PARTIAL, RelOptInfo::pathlist, GroupPathExtraData::patype, pfree(), RelOptInfo::relids, RelOptInfo::reltarget, root, set_cheapest(), GroupPathExtraData::target_parallel_safe, and GroupPathExtraData::targetList.

Referenced by create_ordinary_grouping_paths().

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
bool  output_target_parallel_safe,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4410 of file planner.c.

4417 {
4418  RelOptInfo *window_rel;
4419  ListCell *lc;
4420 
4421  /* For now, do all work in the (WINDOW, NULL) upperrel */
4422  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4423 
4424  /*
4425  * If the input relation is not parallel-safe, then the window relation
4426  * can't be parallel-safe, either. Otherwise, we need to examine the
4427  * target list and active windows for non-parallel-safe constructs.
4428  */
4429  if (input_rel->consider_parallel && output_target_parallel_safe &&
4430  is_parallel_safe(root, (Node *) activeWindows))
4431  window_rel->consider_parallel = true;
4432 
4433  /*
4434  * If the input rel belongs to a single FDW, so does the window rel.
4435  */
4436  window_rel->serverid = input_rel->serverid;
4437  window_rel->userid = input_rel->userid;
4438  window_rel->useridiscurrent = input_rel->useridiscurrent;
4439  window_rel->fdwroutine = input_rel->fdwroutine;
4440 
4441  /*
4442  * Consider computing window functions starting from the existing
4443  * cheapest-total path (which will likely require a sort) as well as any
4444  * existing paths that satisfy or partially satisfy root->window_pathkeys.
4445  */
4446  foreach(lc, input_rel->pathlist)
4447  {
4448  Path *path = (Path *) lfirst(lc);
4449  int presorted_keys;
4450 
4451  if (path == input_rel->cheapest_total_path ||
4452  pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4453  &presorted_keys) ||
4454  presorted_keys > 0)
4456  window_rel,
4457  path,
4458  input_target,
4459  output_target,
4460  wflists,
4461  activeWindows);
4462  }
4463 
4464  /*
4465  * If there is an FDW that's responsible for all baserels of the query,
4466  * let it consider adding ForeignPaths.
4467  */
4468  if (window_rel->fdwroutine &&
4469  window_rel->fdwroutine->GetForeignUpperPaths)
4470  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4471  input_rel, window_rel,
4472  NULL);
4473 
4474  /* Let extensions possibly add some more paths */
4476  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4477  input_rel, window_rel, NULL);
4478 
4479  /* Now choose the best path(s) */
4480  set_cheapest(window_rel);
4481 
4482  return window_rel;
4483 }
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:753
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4497

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, fetch_upper_rel(), is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::pathlist, root, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6457 of file planner.c.

6458 {
6459  Node *result;
6460 
6461  /*
6462  * Convert named-argument function calls, insert default arguments and
6463  * simplify constant subexprs
6464  */
6465  result = eval_const_expressions(NULL, (Node *) expr);
6466 
6467  /* Fill in opfuncid values if missing */
6468  fix_opfuncids(result);
6469 
6470  return (Expr *) result;
6471 }
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2254
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1837

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATExecSetExpression(), ATPrepAlterColumnType(), BeginCopyFrom(), ComputePartitionAttrs(), contain_mutable_functions_after_planning(), contain_volatile_functions_after_planning(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), load_domaintype_info(), set_baserel_partition_constraint(), slot_fill_defaults(), StoreAttrDefault(), and transformPartitionBoundValue().

◆ expression_planner_with_deps()

Expr* expression_planner_with_deps ( Expr expr,
List **  relationOids,
List **  invalItems 
)

Definition at line 6484 of file planner.c.

6487 {
6488  Node *result;
6489  PlannerGlobal glob;
6490  PlannerInfo root;
6491 
6492  /* Make up dummy planner state so we can use setrefs machinery */
6493  MemSet(&glob, 0, sizeof(glob));
6494  glob.type = T_PlannerGlobal;
6495  glob.relationOids = NIL;
6496  glob.invalItems = NIL;
6497 
6498  MemSet(&root, 0, sizeof(root));
6499  root.type = T_PlannerInfo;
6500  root.glob = &glob;
6501 
6502  /*
6503  * Convert named-argument function calls, insert default arguments and
6504  * simplify constant subexprs. Collect identities of inlined functions
6505  * and elided domains, too.
6506  */
6507  result = eval_const_expressions(&root, (Node *) expr);
6508 
6509  /* Fill in opfuncid values if missing */
6510  fix_opfuncids(result);
6511 
6512  /*
6513  * Now walk the finished expression to find anything else we ought to
6514  * record as an expression dependency.
6515  */
6516  (void) extract_query_dependencies_walker(result, &root);
6517 
6518  *relationOids = glob.relationOids;
6519  *invalItems = glob.invalItems;
6520 
6521  return (Expr *) result;
6522 }
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3577
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132

References eval_const_expressions(), extract_query_dependencies_walker(), fix_opfuncids(), PlannerGlobal::invalItems, MemSet, NIL, PlannerGlobal::relationOids, and root.

Referenced by GetCachedExpression().

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 2868 of file planner.c.

2869 {
2870  int num_sets_raw = list_length(groupingSets);
2871  int num_empty = 0;
2872  int num_sets = 0; /* distinct sets */
2873  int num_chains = 0;
2874  List *result = NIL;
2875  List **results;
2876  List **orig_sets;
2877  Bitmapset **set_masks;
2878  int *chains;
2879  short **adjacency;
2880  short *adjacency_buf;
2882  int i;
2883  int j;
2884  int j_size;
2885  ListCell *lc1 = list_head(groupingSets);
2886  ListCell *lc;
2887 
2888  /*
2889  * Start by stripping out empty sets. The algorithm doesn't require this,
2890  * but the planner currently needs all empty sets to be returned in the
2891  * first list, so we strip them here and add them back after.
2892  */
2893  while (lc1 && lfirst(lc1) == NIL)
2894  {
2895  ++num_empty;
2896  lc1 = lnext(groupingSets, lc1);
2897  }
2898 
2899  /* bail out now if it turns out that all we had were empty sets. */
2900  if (!lc1)
2901  return list_make1(groupingSets);
2902 
2903  /*----------
2904  * We don't strictly need to remove duplicate sets here, but if we don't,
2905  * they tend to become scattered through the result, which is a bit
2906  * confusing (and irritating if we ever decide to optimize them out).
2907  * So we remove them here and add them back after.
2908  *
2909  * For each non-duplicate set, we fill in the following:
2910  *
2911  * orig_sets[i] = list of the original set lists
2912  * set_masks[i] = bitmapset for testing inclusion
2913  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2914  *
2915  * chains[i] will be the result group this set is assigned to.
2916  *
2917  * We index all of these from 1 rather than 0 because it is convenient
2918  * to leave 0 free for the NIL node in the graph algorithm.
2919  *----------
2920  */
2921  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2922  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2923  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2924  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2925 
2926  j_size = 0;
2927  j = 0;
2928  i = 1;
2929 
2930  for_each_cell(lc, groupingSets, lc1)
2931  {
2932  List *candidate = (List *) lfirst(lc);
2933  Bitmapset *candidate_set = NULL;
2934  ListCell *lc2;
2935  int dup_of = 0;
2936 
2937  foreach(lc2, candidate)
2938  {
2939  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2940  }
2941 
2942  /* we can only be a dup if we're the same length as a previous set */
2943  if (j_size == list_length(candidate))
2944  {
2945  int k;
2946 
2947  for (k = j; k < i; ++k)
2948  {
2949  if (bms_equal(set_masks[k], candidate_set))
2950  {
2951  dup_of = k;
2952  break;
2953  }
2954  }
2955  }
2956  else if (j_size < list_length(candidate))
2957  {
2958  j_size = list_length(candidate);
2959  j = i;
2960  }
2961 
2962  if (dup_of > 0)
2963  {
2964  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2965  bms_free(candidate_set);
2966  }
2967  else
2968  {
2969  int k;
2970  int n_adj = 0;
2971 
2972  orig_sets[i] = list_make1(candidate);
2973  set_masks[i] = candidate_set;
2974 
2975  /* fill in adjacency list; no need to compare equal-size sets */
2976 
2977  for (k = j - 1; k > 0; --k)
2978  {
2979  if (bms_is_subset(set_masks[k], candidate_set))
2980  adjacency_buf[++n_adj] = k;
2981  }
2982 
2983  if (n_adj > 0)
2984  {
2985  adjacency_buf[0] = n_adj;
2986  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2987  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2988  }
2989  else
2990  adjacency[i] = NULL;
2991 
2992  ++i;
2993  }
2994  }
2995 
2996  num_sets = i - 1;
2997 
2998  /*
2999  * Apply the graph matching algorithm to do the work.
3000  */
3001  state = BipartiteMatch(num_sets, num_sets, adjacency);
3002 
3003  /*
3004  * Now, the state->pair* fields have the info we need to assign sets to
3005  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3006  * pair_vu[v] = u (both will be true, but we check both so that we can do
3007  * it in one pass)
3008  */
3009  chains = palloc0((num_sets + 1) * sizeof(int));
3010 
3011  for (i = 1; i <= num_sets; ++i)
3012  {
3013  int u = state->pair_vu[i];
3014  int v = state->pair_uv[i];
3015 
3016  if (u > 0 && u < i)
3017  chains[i] = chains[u];
3018  else if (v > 0 && v < i)
3019  chains[i] = chains[v];
3020  else
3021  chains[i] = ++num_chains;
3022  }
3023 
3024  /* build result lists. */
3025  results = palloc0((num_chains + 1) * sizeof(List *));
3026 
3027  for (i = 1; i <= num_sets; ++i)
3028  {
3029  int c = chains[i];
3030 
3031  Assert(c > 0);
3032 
3033  results[c] = list_concat(results[c], orig_sets[i]);
3034  }
3035 
3036  /* push any empty sets back on the first list. */
3037  while (num_empty-- > 0)
3038  results[1] = lcons(NIL, results[1]);
3039 
3040  /* make result list */
3041  for (i = 1; i <= num_chains; ++i)
3042  result = lappend(result, results[i]);
3043 
3044  /*
3045  * Free all the things.
3046  *
3047  * (This is over-fussy for small sets but for large sets we could have
3048  * tied up a nontrivial amount of memory.)
3049  */
3051  pfree(results);
3052  pfree(chains);
3053  for (i = 1; i <= num_sets; ++i)
3054  if (adjacency[i])
3055  pfree(adjacency[i]);
3056  pfree(adjacency);
3057  pfree(adjacency_buf);
3058  pfree(orig_sets);
3059  for (i = 1; i <= num_sets; ++i)
3060  bms_free(set_masks[i]);
3061  pfree(set_masks);
3062 
3063  return result;
3064 }
void BipartiteMatchFree(BipartiteMatchState *state)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1346
char * c
Definition: regguts.h:323

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, j, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext(), NIL, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

◆ gather_grouping_paths()

static void gather_grouping_paths ( PlannerInfo root,
RelOptInfo rel 
)
static

Definition at line 7376 of file planner.c.

7377 {
7378  ListCell *lc;
7379  Path *cheapest_partial_path;
7380  List *groupby_pathkeys;
7381 
7382  /*
7383  * This occurs after any partial aggregation has taken place, so trim off
7384  * any pathkeys added for ORDER BY / DISTINCT aggregates.
7385  */
7386  if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7387  groupby_pathkeys = list_copy_head(root->group_pathkeys,
7388  root->num_groupby_pathkeys);
7389  else
7390  groupby_pathkeys = root->group_pathkeys;
7391 
7392  /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7393  generate_useful_gather_paths(root, rel, true);
7394 
7395  cheapest_partial_path = linitial(rel->partial_pathlist);
7396 
7397  /* XXX Shouldn't this also consider the group-key-reordering? */
7398  foreach(lc, rel->partial_pathlist)
7399  {
7400  Path *path = (Path *) lfirst(lc);
7401  bool is_sorted;
7402  int presorted_keys;
7403  double total_groups;
7404 
7405  is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7406  path->pathkeys,
7407  &presorted_keys);
7408 
7409  if (is_sorted)
7410  continue;
7411 
7412  /*
7413  * Try at least sorting the cheapest path and also try incrementally
7414  * sorting any path which is partially sorted already (no need to deal
7415  * with paths which have presorted keys when incremental sort is
7416  * disabled unless it's the cheapest input path).
7417  */
7418  if (path != cheapest_partial_path &&
7419  (presorted_keys == 0 || !enable_incremental_sort))
7420  continue;
7421 
7422  total_groups = path->rows * path->parallel_workers;
7423 
7424  /*
7425  * We've no need to consider both a sort and incremental sort. We'll
7426  * just do a sort if there are no presorted keys and an incremental
7427  * sort when there are presorted keys.
7428  */
7429  if (presorted_keys == 0 || !enable_incremental_sort)
7430  path = (Path *) create_sort_path(root, rel, path,
7431  groupby_pathkeys,
7432  -1.0);
7433  else
7435  rel,
7436  path,
7437  groupby_pathkeys,
7438  presorted_keys,
7439  -1.0);
7440 
7441  path = (Path *)
7443  rel,
7444  path,
7445  rel->reltarget,
7446  groupby_pathkeys,
7447  NULL,
7448  &total_groups);
7449 
7450  add_path(rel, path);
7451  }
7452 }
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593

References add_path(), create_gather_merge_path(), create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, generate_useful_gather_paths(), lfirst, linitial, list_copy_head(), list_length(), Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_count_contained_in(), RelOptInfo::reltarget, root, and Path::rows.

Referenced by add_paths_to_grouping_rel(), and create_ordinary_grouping_paths().

◆ generate_setop_child_grouplist()

static List * generate_setop_child_grouplist ( SetOperationStmt op,
List targetlist 
)
static

Definition at line 7939 of file planner.c.

7940 {
7941  List *grouplist = copyObject(op->groupClauses);
7942  ListCell *lg;
7943  ListCell *lt;
7944 
7945  lg = list_head(grouplist);
7946  foreach(lt, targetlist)
7947  {
7948  TargetEntry *tle = (TargetEntry *) lfirst(lt);
7949  SortGroupClause *sgc;
7950 
7951  /* resjunk columns could have sortgrouprefs. Leave these alone */
7952  if (tle->resjunk)
7953  continue;
7954 
7955  /* we expect every non-resjunk target to have a SortGroupClause */
7956  Assert(lg != NULL);
7957  sgc = (SortGroupClause *) lfirst(lg);
7958  lg = lnext(grouplist, lg);
7959 
7960  /* assign a tleSortGroupRef, or reuse the existing one */
7961  sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
7962  }
7963  Assert(lg == NULL);
7964  return grouplist;
7965 }
#define copyObject(obj)
Definition: nodes.h:224
Index assignSortGroupRef(TargetEntry *tle, List *tlist)

References Assert, assignSortGroupRef(), copyObject, lfirst, list_head(), lnext(), and SortGroupClause::tleSortGroupRef.

Referenced by standard_qp_callback().

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 6298 of file planner.c.

6299 {
6300  Path *best_path = rel->cheapest_total_path;
6301  ListCell *l;
6302 
6303  /* If all tuples will be retrieved, just return the cheapest-total path */
6304  if (tuple_fraction <= 0.0)
6305  return best_path;
6306 
6307  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6308  if (tuple_fraction >= 1.0 && best_path->rows > 0)
6309  tuple_fraction /= best_path->rows;
6310 
6311  foreach(l, rel->pathlist)
6312  {
6313  Path *path = (Path *) lfirst(l);
6314 
6315  if (path == rel->cheapest_total_path ||
6316  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6317  continue;
6318 
6319  best_path = path;
6320  }
6321 
6322  return best_path;
6323 }
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:115

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), and standard_planner().

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd,
List target_list 
)
static

Definition at line 3536 of file planner.c.

3540 {
3541  Query *parse = root->parse;
3542  double dNumGroups;
3543 
3544  if (parse->groupClause)
3545  {
3546  List *groupExprs;
3547 
3548  if (parse->groupingSets)
3549  {
3550  /* Add up the estimates for each grouping set */
3551  ListCell *lc;
3552 
3553  Assert(gd); /* keep Coverity happy */
3554 
3555  dNumGroups = 0;
3556 
3557  foreach(lc, gd->rollups)
3558  {
3559  RollupData *rollup = lfirst_node(RollupData, lc);
3560  ListCell *lc2;
3561  ListCell *lc3;
3562 
3563  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3564  target_list);
3565 
3566  rollup->numGroups = 0.0;
3567 
3568  forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3569  {
3570  List *gset = (List *) lfirst(lc2);
3572  double numGroups = estimate_num_groups(root,
3573  groupExprs,
3574  path_rows,
3575  &gset,
3576  NULL);
3577 
3578  gs->numGroups = numGroups;
3579  rollup->numGroups += numGroups;
3580  }
3581 
3582  dNumGroups += rollup->numGroups;
3583  }
3584 
3585  if (gd->hash_sets_idx)
3586  {
3587  ListCell *lc2;
3588 
3589  gd->dNumHashGroups = 0;
3590 
3591  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3592  target_list);
3593 
3594  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3595  {
3596  List *gset = (List *) lfirst(lc);
3598  double numGroups = estimate_num_groups(root,
3599  groupExprs,
3600  path_rows,
3601  &gset,
3602  NULL);
3603 
3604  gs->numGroups = numGroups;
3605  gd->dNumHashGroups += numGroups;
3606  }
3607 
3608  dNumGroups += gd->dNumHashGroups;
3609  }
3610  }
3611  else
3612  {
3613  /* Plain GROUP BY -- estimate based on optimized groupClause */
3614  groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3615  target_list);
3616 
3617  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3618  NULL, NULL);
3619  }
3620  }
3621  else if (parse->groupingSets)
3622  {
3623  /* Empty grouping sets ... one result row for each one */
3624  dNumGroups = list_length(parse->groupingSets);
3625  }
3626  else if (parse->hasAggs || root->hasHavingQual)
3627  {
3628  /* Plain aggregation, one result row */
3629  dNumGroups = 1;
3630  }
3631  else
3632  {
3633  /* Not grouping */
3634  dNumGroups = 1;
3635  }
3636 
3637  return dNumGroups;
3638 }
List * hash_sets_idx
Definition: planner.c:98

References Assert, grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), grouping_sets_data::rollups, root, and grouping_sets_data::unsortable_sets.

Referenced by create_ordinary_grouping_paths(), and create_partial_grouping_paths().

◆ group_by_has_partkey()

static bool group_by_has_partkey ( RelOptInfo input_rel,
List targetList,
List groupClause 
)
static

Definition at line 7882 of file planner.c.

7885 {
7886  List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
7887  int cnt = 0;
7888  int partnatts;
7889 
7890  /* Input relation should be partitioned. */
7891  Assert(input_rel->part_scheme);
7892 
7893  /* Rule out early, if there are no partition keys present. */
7894  if (!input_rel->partexprs)
7895  return false;
7896 
7897  partnatts = input_rel->part_scheme->partnatts;
7898 
7899  for (cnt = 0; cnt < partnatts; cnt++)
7900  {
7901  List *partexprs = input_rel->partexprs[cnt];
7902  ListCell *lc;
7903  bool found = false;
7904 
7905  foreach(lc, partexprs)
7906  {
7907  Expr *partexpr = lfirst(lc);
7908 
7909  if (list_member(groupexprs, partexpr))
7910  {
7911  found = true;
7912  break;
7913  }
7914  }
7915 
7916  /*
7917  * If none of the partition key expressions match with any of the
7918  * GROUP BY expression, return false.
7919  */
7920  if (!found)
7921  return false;
7922  }
7923 
7924  return true;
7925 }
bool list_member(const List *list, const void *datum)
Definition: list.c:661

References Assert, get_sortgrouplist_exprs(), lfirst, and list_member().

Referenced by create_ordinary_grouping_paths().

◆ groupclause_apply_groupingset()

static List * groupclause_apply_groupingset ( PlannerInfo root,
List force 
)
static

Definition at line 2832 of file planner.c.

2833 {
2834  Query *parse = root->parse;
2835  List *new_groupclause = NIL;
2836  ListCell *sl;
2837 
2838  foreach(sl, gset)
2839  {
2840  Index ref = lfirst_int(sl);
2841  SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2842 
2843  new_groupclause = lappend(new_groupclause, cl);
2844  }
2845  return new_groupclause;
2846 }
unsigned int Index
Definition: c.h:614
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:422

References get_sortgroupref_clause(), lappend(), lfirst_int, NIL, parse(), and root.

Referenced by consider_groupingsets_paths(), and preprocess_grouping_sets().

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
double  tuple_fraction,
SetOperationStmt setops 
)
static

Definition at line 1305 of file planner.c.

1307 {
1308  Query *parse = root->parse;
1309  int64 offset_est = 0;
1310  int64 count_est = 0;
1311  double limit_tuples = -1.0;
1312  bool have_postponed_srfs = false;
1313  PathTarget *final_target;
1314  List *final_targets;
1315  List *final_targets_contain_srfs;
1316  bool final_target_parallel_safe;
1317  RelOptInfo *current_rel;
1318  RelOptInfo *final_rel;
1319  FinalPathExtraData extra;
1320  ListCell *lc;
1321 
1322  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1323  if (parse->limitCount || parse->limitOffset)
1324  {
1325  tuple_fraction = preprocess_limit(root, tuple_fraction,
1326  &offset_est, &count_est);
1327 
1328  /*
1329  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1330  * estimate the effects of using a bounded sort.
1331  */
1332  if (count_est > 0 && offset_est >= 0)
1333  limit_tuples = (double) count_est + (double) offset_est;
1334  }
1335 
1336  /* Make tuple_fraction accessible to lower-level routines */
1337  root->tuple_fraction = tuple_fraction;
1338 
1339  if (parse->setOperations)
1340  {
1341  /*
1342  * Construct Paths for set operations. The results will not need any
1343  * work except perhaps a top-level sort and/or LIMIT. Note that any
1344  * special work for recursive unions is the responsibility of
1345  * plan_set_operations.
1346  */
1347  current_rel = plan_set_operations(root);
1348 
1349  /*
1350  * We should not need to call preprocess_targetlist, since we must be
1351  * in a SELECT query node. Instead, use the processed_tlist returned
1352  * by plan_set_operations (since this tells whether it returned any
1353  * resjunk columns!), and transfer any sort key information from the
1354  * original tlist.
1355  */
1356  Assert(parse->commandType == CMD_SELECT);
1357 
1358  /* for safety, copy processed_tlist instead of modifying in-place */
1359  root->processed_tlist =
1360  postprocess_setop_tlist(copyObject(root->processed_tlist),
1361  parse->targetList);
1362 
1363  /* Also extract the PathTarget form of the setop result tlist */
1364  final_target = current_rel->cheapest_total_path->pathtarget;
1365 
1366  /* And check whether it's parallel safe */
1367  final_target_parallel_safe =
1368  is_parallel_safe(root, (Node *) final_target->exprs);
1369 
1370  /* The setop result tlist couldn't contain any SRFs */
1371  Assert(!parse->hasTargetSRFs);
1372  final_targets = final_targets_contain_srfs = NIL;
1373 
1374  /*
1375  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1376  * checked already, but let's make sure).
1377  */
1378  if (parse->rowMarks)
1379  ereport(ERROR,
1380  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1381  /*------
1382  translator: %s is a SQL row locking clause such as FOR UPDATE */
1383  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1385  parse->rowMarks)->strength))));
1386 
1387  /*
1388  * Calculate pathkeys that represent result ordering requirements
1389  */
1390  Assert(parse->distinctClause == NIL);
1391  root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1392  parse->sortClause,
1393  root->processed_tlist);
1394  }
1395  else
1396  {
1397  /* No set operations, do regular planning */
1398  PathTarget *sort_input_target;
1399  List *sort_input_targets;
1400  List *sort_input_targets_contain_srfs;
1401  bool sort_input_target_parallel_safe;
1402  PathTarget *grouping_target;
1403  List *grouping_targets;
1404  List *grouping_targets_contain_srfs;
1405  bool grouping_target_parallel_safe;
1406  PathTarget *scanjoin_target;
1407  List *scanjoin_targets;
1408  List *scanjoin_targets_contain_srfs;
1409  bool scanjoin_target_parallel_safe;
1410  bool scanjoin_target_same_exprs;
1411  bool have_grouping;
1412  WindowFuncLists *wflists = NULL;
1413  List *activeWindows = NIL;
1414  grouping_sets_data *gset_data = NULL;
1415  standard_qp_extra qp_extra;
1416 
1417  /* A recursive query should always have setOperations */
1418  Assert(!root->hasRecursion);
1419 
1420  /* Preprocess grouping sets and GROUP BY clause, if any */
1421  if (parse->groupingSets)
1422  {
1423  gset_data = preprocess_grouping_sets(root);
1424  }
1425  else if (parse->groupClause)
1426  {
1427  /* Preprocess regular GROUP BY clause, if any */
1428  root->processed_groupClause = list_copy(parse->groupClause);
1429  /* Remove any redundant GROUP BY columns */
1431  }
1432 
1433  /*
1434  * Preprocess targetlist. Note that much of the remaining planning
1435  * work will be done with the PathTarget representation of tlists, but
1436  * we must also maintain the full representation of the final tlist so
1437  * that we can transfer its decoration (resnames etc) to the topmost
1438  * tlist of the finished Plan. This is kept in processed_tlist.
1439  */
1441 
1442  /*
1443  * Mark all the aggregates with resolved aggtranstypes, and detect
1444  * aggregates that are duplicates or can share transition state. We
1445  * must do this before slicing and dicing the tlist into various
1446  * pathtargets, else some copies of the Aggref nodes might escape
1447  * being marked.
1448  */
1449  if (parse->hasAggs)
1450  {
1451  preprocess_aggrefs(root, (Node *) root->processed_tlist);
1452  preprocess_aggrefs(root, (Node *) parse->havingQual);
1453  }
1454 
1455  /*
1456  * Locate any window functions in the tlist. (We don't need to look
1457  * anywhere else, since expressions used in ORDER BY will be in there
1458  * too.) Note that they could all have been eliminated by constant
1459  * folding, in which case we don't need to do any more work.
1460  */
1461  if (parse->hasWindowFuncs)
1462  {
1463  wflists = find_window_functions((Node *) root->processed_tlist,
1464  list_length(parse->windowClause));
1465  if (wflists->numWindowFuncs > 0)
1466  {
1467  /*
1468  * See if any modifications can be made to each WindowClause
1469  * to allow the executor to execute the WindowFuncs more
1470  * quickly.
1471  */
1472  optimize_window_clauses(root, wflists);
1473 
1474  activeWindows = select_active_windows(root, wflists);
1475  }
1476  else
1477  parse->hasWindowFuncs = false;
1478  }
1479 
1480  /*
1481  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1482  * adding logic between here and the query_planner() call. Anything
1483  * that is needed in MIN/MAX-optimizable cases will have to be
1484  * duplicated in planagg.c.
1485  */
1486  if (parse->hasAggs)
1488 
1489  /*
1490  * Figure out whether there's a hard limit on the number of rows that
1491  * query_planner's result subplan needs to return. Even if we know a
1492  * hard limit overall, it doesn't apply if the query has any
1493  * grouping/aggregation operations, or SRFs in the tlist.
1494  */
1495  if (parse->groupClause ||
1496  parse->groupingSets ||
1497  parse->distinctClause ||
1498  parse->hasAggs ||
1499  parse->hasWindowFuncs ||
1500  parse->hasTargetSRFs ||
1501  root->hasHavingQual)
1502  root->limit_tuples = -1.0;
1503  else
1504  root->limit_tuples = limit_tuples;
1505 
1506  /* Set up data needed by standard_qp_callback */
1507  qp_extra.activeWindows = activeWindows;
1508  qp_extra.gset_data = gset_data;
1509 
1510  /*
1511  * If we're a subquery for a set operation, store the SetOperationStmt
1512  * in qp_extra.
1513  */
1514  qp_extra.setop = setops;
1515 
1516  /*
1517  * Generate the best unsorted and presorted paths for the scan/join
1518  * portion of this Query, ie the processing represented by the
1519  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1520  * We also generate (in standard_qp_callback) pathkey representations
1521  * of the query's sort clause, distinct clause, etc.
1522  */
1523  current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1524 
1525  /*
1526  * Convert the query's result tlist into PathTarget format.
1527  *
1528  * Note: this cannot be done before query_planner() has performed
1529  * appendrel expansion, because that might add resjunk entries to
1530  * root->processed_tlist. Waiting till afterwards is also helpful
1531  * because the target width estimates can use per-Var width numbers
1532  * that were obtained within query_planner().
1533  */
1534  final_target = create_pathtarget(root, root->processed_tlist);
1535  final_target_parallel_safe =
1536  is_parallel_safe(root, (Node *) final_target->exprs);
1537 
1538  /*
1539  * If ORDER BY was given, consider whether we should use a post-sort
1540  * projection, and compute the adjusted target for preceding steps if
1541  * so.
1542  */
1543  if (parse->sortClause)
1544  {
1545  sort_input_target = make_sort_input_target(root,
1546  final_target,
1547  &have_postponed_srfs);
1548  sort_input_target_parallel_safe =
1549  is_parallel_safe(root, (Node *) sort_input_target->exprs);
1550  }
1551  else
1552  {
1553  sort_input_target = final_target;
1554  sort_input_target_parallel_safe = final_target_parallel_safe;
1555  }
1556 
1557  /*
1558  * If we have window functions to deal with, the output from any
1559  * grouping step needs to be what the window functions want;
1560  * otherwise, it should be sort_input_target.
1561  */
1562  if (activeWindows)
1563  {
1564  grouping_target = make_window_input_target(root,
1565  final_target,
1566  activeWindows);
1567  grouping_target_parallel_safe =
1568  is_parallel_safe(root, (Node *) grouping_target->exprs);
1569  }
1570  else
1571  {
1572  grouping_target = sort_input_target;
1573  grouping_target_parallel_safe = sort_input_target_parallel_safe;
1574  }
1575 
1576  /*
1577  * If we have grouping or aggregation to do, the topmost scan/join
1578  * plan node must emit what the grouping step wants; otherwise, it
1579  * should emit grouping_target.
1580  */
1581  have_grouping = (parse->groupClause || parse->groupingSets ||
1582  parse->hasAggs || root->hasHavingQual);
1583  if (have_grouping)
1584  {
1585  scanjoin_target = make_group_input_target(root, final_target);
1586  scanjoin_target_parallel_safe =
1587  is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1588  }
1589  else
1590  {
1591  scanjoin_target = grouping_target;
1592  scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1593  }
1594 
1595  /*
1596  * If there are any SRFs in the targetlist, we must separate each of
1597  * these PathTargets into SRF-computing and SRF-free targets. Replace
1598  * each of the named targets with a SRF-free version, and remember the
1599  * list of additional projection steps we need to add afterwards.
1600  */
1601  if (parse->hasTargetSRFs)
1602  {
1603  /* final_target doesn't recompute any SRFs in sort_input_target */
1604  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1605  &final_targets,
1606  &final_targets_contain_srfs);
1607  final_target = linitial_node(PathTarget, final_targets);
1608  Assert(!linitial_int(final_targets_contain_srfs));
1609  /* likewise for sort_input_target vs. grouping_target */
1610  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1611  &sort_input_targets,
1612  &sort_input_targets_contain_srfs);
1613  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1614  Assert(!linitial_int(sort_input_targets_contain_srfs));
1615  /* likewise for grouping_target vs. scanjoin_target */
1616  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1617  &grouping_targets,
1618  &grouping_targets_contain_srfs);
1619  grouping_target = linitial_node(PathTarget, grouping_targets);
1620  Assert(!linitial_int(grouping_targets_contain_srfs));
1621  /* scanjoin_target will not have any SRFs precomputed for it */
1622  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1623  &scanjoin_targets,
1624  &scanjoin_targets_contain_srfs);
1625  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1626  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1627  }
1628  else
1629  {
1630  /* initialize lists; for most of these, dummy values are OK */
1631  final_targets = final_targets_contain_srfs = NIL;
1632  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1633  grouping_targets = grouping_targets_contain_srfs = NIL;
1634  scanjoin_targets = list_make1(scanjoin_target);
1635  scanjoin_targets_contain_srfs = NIL;
1636  }
1637 
1638  /* Apply scan/join target. */
1639  scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1640  && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1641  apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1642  scanjoin_targets_contain_srfs,
1643  scanjoin_target_parallel_safe,
1644  scanjoin_target_same_exprs);
1645 
1646  /*
1647  * Save the various upper-rel PathTargets we just computed into
1648  * root->upper_targets[]. The core code doesn't use this, but it
1649  * provides a convenient place for extensions to get at the info. For
1650  * consistency, we save all the intermediate targets, even though some
1651  * of the corresponding upperrels might not be needed for this query.
1652  */
1653  root->upper_targets[UPPERREL_FINAL] = final_target;
1654  root->upper_targets[UPPERREL_ORDERED] = final_target;
1655  root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1656  root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1657  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1658  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1659 
1660  /*
1661  * If we have grouping and/or aggregation, consider ways to implement
1662  * that. We build a new upperrel representing the output of this
1663  * phase.
1664  */
1665  if (have_grouping)
1666  {
1667  current_rel = create_grouping_paths(root,
1668  current_rel,
1669  grouping_target,
1670  grouping_target_parallel_safe,
1671  gset_data);
1672  /* Fix things up if grouping_target contains SRFs */
1673  if (parse->hasTargetSRFs)
1674  adjust_paths_for_srfs(root, current_rel,
1675  grouping_targets,
1676  grouping_targets_contain_srfs);
1677  }
1678 
1679  /*
1680  * If we have window functions, consider ways to implement those. We
1681  * build a new upperrel representing the output of this phase.
1682  */
1683  if (activeWindows)
1684  {
1685  current_rel = create_window_paths(root,
1686  current_rel,
1687  grouping_target,
1688  sort_input_target,
1689  sort_input_target_parallel_safe,
1690  wflists,
1691  activeWindows);
1692  /* Fix things up if sort_input_target contains SRFs */
1693  if (parse->hasTargetSRFs)
1694  adjust_paths_for_srfs(root, current_rel,
1695  sort_input_targets,
1696  sort_input_targets_contain_srfs);
1697  }
1698 
1699  /*
1700  * If there is a DISTINCT clause, consider ways to implement that. We
1701  * build a new upperrel representing the output of this phase.
1702  */
1703  if (parse->distinctClause)
1704  {
1705  current_rel = create_distinct_paths(root,
1706  current_rel,
1707  sort_input_target);
1708  }
1709  } /* end of if (setOperations) */
1710 
1711  /*
1712  * If ORDER BY was given, consider ways to implement that, and generate a
1713  * new upperrel containing only paths that emit the correct ordering and
1714  * project the correct final_target. We can apply the original
1715  * limit_tuples limit in sort costing here, but only if there are no
1716  * postponed SRFs.
1717  */
1718  if (parse->sortClause)
1719  {
1720  current_rel = create_ordered_paths(root,
1721  current_rel,
1722  final_target,
1723  final_target_parallel_safe,
1724  have_postponed_srfs ? -1.0 :
1725  limit_tuples);
1726  /* Fix things up if final_target contains SRFs */
1727  if (parse->hasTargetSRFs)
1728  adjust_paths_for_srfs(root, current_rel,
1729  final_targets,
1730  final_targets_contain_srfs);
1731  }
1732 
1733  /*
1734  * Now we are prepared to build the final-output upperrel.
1735  */
1736  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1737 
1738  /*
1739  * If the input rel is marked consider_parallel and there's nothing that's
1740  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1741  * consider_parallel as well. Note that if the query has rowMarks or is
1742  * not a SELECT, consider_parallel will be false for every relation in the
1743  * query.
1744  */
1745  if (current_rel->consider_parallel &&
1746  is_parallel_safe(root, parse->limitOffset) &&
1747  is_parallel_safe(root, parse->limitCount))
1748  final_rel->consider_parallel = true;
1749 
1750  /*
1751  * If the current_rel belongs to a single FDW, so does the final_rel.
1752  */
1753  final_rel->serverid = current_rel->serverid;
1754  final_rel->userid = current_rel->userid;
1755  final_rel->useridiscurrent = current_rel->useridiscurrent;
1756  final_rel->fdwroutine = current_rel->fdwroutine;
1757 
1758  /*
1759  * Generate paths for the final_rel. Insert all surviving paths, with
1760  * LockRows, Limit, and/or ModifyTable steps added if needed.
1761  */
1762  foreach(lc, current_rel->pathlist)
1763  {
1764  Path *path = (Path *) lfirst(lc);
1765 
1766  /*
1767  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1768  * (Note: we intentionally test parse->rowMarks not root->rowMarks
1769  * here. If there are only non-locking rowmarks, they should be
1770  * handled by the ModifyTable node instead. However, root->rowMarks
1771  * is what goes into the LockRows node.)
1772  */
1773  if (parse->rowMarks)
1774  {
1775  path = (Path *) create_lockrows_path(root, final_rel, path,
1776  root->rowMarks,
1778  }
1779 
1780  /*
1781  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1782  */
1783  if (limit_needed(parse))
1784  {
1785  path = (Path *) create_limit_path(root, final_rel, path,
1786  parse->limitOffset,
1787  parse->limitCount,
1788  parse->limitOption,
1789  offset_est, count_est);
1790  }
1791 
1792  /*
1793  * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1794  */
1795  if (parse->commandType != CMD_SELECT)
1796  {
1797  Index rootRelation;
1798  List *resultRelations = NIL;
1799  List *updateColnosLists = NIL;
1800  List *withCheckOptionLists = NIL;
1801  List *returningLists = NIL;
1802  List *mergeActionLists = NIL;
1803  List *mergeJoinConditions = NIL;
1804  List *rowMarks;
1805 
1806  if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1807  {
1808  /* Inherited UPDATE/DELETE/MERGE */
1809  RelOptInfo *top_result_rel = find_base_rel(root,
1810  parse->resultRelation);
1811  int resultRelation = -1;
1812 
1813  /* Pass the root result rel forward to the executor. */
1814  rootRelation = parse->resultRelation;
1815 
1816  /* Add only leaf children to ModifyTable. */
1817  while ((resultRelation = bms_next_member(root->leaf_result_relids,
1818  resultRelation)) >= 0)
1819  {
1820  RelOptInfo *this_result_rel = find_base_rel(root,
1821  resultRelation);
1822 
1823  /*
1824  * Also exclude any leaf rels that have turned dummy since
1825  * being added to the list, for example, by being excluded
1826  * by constraint exclusion.
1827  */
1828  if (IS_DUMMY_REL(this_result_rel))
1829  continue;
1830 
1831  /* Build per-target-rel lists needed by ModifyTable */
1832  resultRelations = lappend_int(resultRelations,
1833  resultRelation);
1834  if (parse->commandType == CMD_UPDATE)
1835  {
1836  List *update_colnos = root->update_colnos;
1837 
1838  if (this_result_rel != top_result_rel)
1839  update_colnos =
1841  update_colnos,
1842  this_result_rel->relid,
1843  top_result_rel->relid);
1844  updateColnosLists = lappend(updateColnosLists,
1845  update_colnos);
1846  }
1847  if (parse->withCheckOptions)
1848  {
1849  List *withCheckOptions = parse->withCheckOptions;
1850 
1851  if (this_result_rel != top_result_rel)
1852  withCheckOptions = (List *)
1854  (Node *) withCheckOptions,
1855  this_result_rel,
1856  top_result_rel);
1857  withCheckOptionLists = lappend(withCheckOptionLists,
1858  withCheckOptions);
1859  }
1860  if (parse->returningList)
1861  {
1862  List *returningList = parse->returningList;
1863 
1864  if (this_result_rel != top_result_rel)
1865  returningList = (List *)
1867  (Node *) returningList,
1868  this_result_rel,
1869  top_result_rel);
1870  returningLists = lappend(returningLists,
1871  returningList);
1872  }
1873  if (parse->mergeActionList)
1874  {
1875  ListCell *l;
1876  List *mergeActionList = NIL;
1877 
1878  /*
1879  * Copy MergeActions and translate stuff that
1880  * references attribute numbers.
1881  */
1882  foreach(l, parse->mergeActionList)
1883  {
1884  MergeAction *action = lfirst(l),
1885  *leaf_action = copyObject(action);
1886 
1887  leaf_action->qual =
1889  (Node *) action->qual,
1890  this_result_rel,
1891  top_result_rel);
1892  leaf_action->targetList = (List *)
1894  (Node *) action->targetList,
1895  this_result_rel,
1896  top_result_rel);
1897  if (leaf_action->commandType == CMD_UPDATE)
1898  leaf_action->updateColnos =
1900  action->updateColnos,
1901  this_result_rel->relid,
1902  top_result_rel->relid);
1903  mergeActionList = lappend(mergeActionList,
1904  leaf_action);
1905  }
1906 
1907  mergeActionLists = lappend(mergeActionLists,
1908  mergeActionList);
1909  }
1910  if (parse->commandType == CMD_MERGE)
1911  {
1912  Node *mergeJoinCondition = parse->mergeJoinCondition;
1913 
1914  if (this_result_rel != top_result_rel)
1915  mergeJoinCondition =
1917  mergeJoinCondition,
1918  this_result_rel,
1919  top_result_rel);
1920  mergeJoinConditions = lappend(mergeJoinConditions,
1921  mergeJoinCondition);
1922  }
1923  }
1924 
1925  if (resultRelations == NIL)
1926  {
1927  /*
1928  * We managed to exclude every child rel, so generate a
1929  * dummy one-relation plan using info for the top target
1930  * rel (even though that may not be a leaf target).
1931  * Although it's clear that no data will be updated or
1932  * deleted, we still need to have a ModifyTable node so
1933  * that any statement triggers will be executed. (This
1934  * could be cleaner if we fixed nodeModifyTable.c to allow
1935  * zero target relations, but that probably wouldn't be a
1936  * net win.)
1937  */
1938  resultRelations = list_make1_int(parse->resultRelation);
1939  if (parse->commandType == CMD_UPDATE)
1940  updateColnosLists = list_make1(root->update_colnos);
1941  if (parse->withCheckOptions)
1942  withCheckOptionLists = list_make1(parse->withCheckOptions);
1943  if (parse->returningList)
1944  returningLists = list_make1(parse->returningList);
1945  if (parse->mergeActionList)
1946  mergeActionLists = list_make1(parse->mergeActionList);
1947  if (parse->commandType == CMD_MERGE)
1948  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
1949  }
1950  }
1951  else
1952  {
1953  /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
1954  rootRelation = 0; /* there's no separate root rel */
1955  resultRelations = list_make1_int(parse->resultRelation);
1956  if (parse->commandType == CMD_UPDATE)
1957  updateColnosLists = list_make1(root->update_colnos);
1958  if (parse->withCheckOptions)
1959  withCheckOptionLists = list_make1(parse->withCheckOptions);
1960  if (parse->returningList)
1961  returningLists = list_make1(parse->returningList);
1962  if (parse->mergeActionList)
1963  mergeActionLists = list_make1(parse->mergeActionList);
1964  if (parse->commandType == CMD_MERGE)
1965  mergeJoinConditions = list_make1(parse->mergeJoinCondition);
1966  }
1967 
1968  /*
1969  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1970  * will have dealt with fetching non-locked marked rows, else we
1971  * need to have ModifyTable do that.
1972  */
1973  if (parse->rowMarks)
1974  rowMarks = NIL;
1975  else
1976  rowMarks = root->rowMarks;
1977 
1978  path = (Path *)
1979  create_modifytable_path(root, final_rel,
1980  path,
1981  parse->commandType,
1982  parse->canSetTag,
1983  parse->resultRelation,
1984  rootRelation,
1985  root->partColsUpdated,
1986  resultRelations,
1987  updateColnosLists,
1988  withCheckOptionLists,
1989  returningLists,
1990  rowMarks,
1991  parse->onConflict,
1992  mergeActionLists,
1993  mergeJoinConditions,
1995  }
1996 
1997  /* And shove it into final_rel */
1998  add_path(final_rel, path);
1999  }
2000 
2001  /*
2002  * Generate partial paths for final_rel, too, if outer query levels might
2003  * be able to make use of them.
2004  */
2005  if (final_rel->consider_parallel && root->query_level > 1 &&
2006  !limit_needed(parse))
2007  {
2008  Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2009  foreach(lc, current_rel->partial_pathlist)
2010  {
2011  Path *partial_path = (Path *) lfirst(lc);
2012 
2013  add_partial_path(final_rel, partial_path);
2014  }
2015  }
2016 
2017  extra.limit_needed = limit_needed(parse);
2018  extra.limit_tuples = limit_tuples;
2019  extra.count_est = count_est;
2020  extra.offset_est = offset_est;
2021 
2022  /*
2023  * If there is an FDW that's responsible for all baserels of the query,
2024  * let it consider adding ForeignPaths.
2025  */
2026  if (final_rel->fdwroutine &&
2027  final_rel->fdwroutine->GetForeignUpperPaths)
2028  final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2029  current_rel, final_rel,
2030  &extra);
2031 
2032  /* Let extensions possibly add some more paths */
2034  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2035  current_rel, final_rel, &extra);
2036 
2037  /* Note: currently, we leave it to callers to do set_cheapest() */
2038 }
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:521
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:662
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
@ BMS_MULTIPLE
Definition: bitmapset.h:73
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
List * lappend_int(List *list, int datum)
Definition: list.c:357
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:664
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3213
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3659
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3722
@ UPPERREL_FINAL
Definition: pathnodes.h:79
#define list_make1_int(x1)
Definition: pg_list.h:227
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:72
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5547
static void remove_useless_groupby_columns(PlannerInfo *root)
Definition: planner.c:2687
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2443
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5881
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4630
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5584
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6127
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2047
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5321
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5724
bool limit_needed(Query *parse)
Definition: planner.c:2628
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5106
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3658
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3354
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4410
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:99
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
Cardinality limit_tuples
Definition: pathnodes.h:3304
Index relid
Definition: pathnodes.h:908
int numWindowFuncs
Definition: clauses.h:21
List * activeWindows
Definition: planner.c:121
grouping_sets_data * gset_data
Definition: planner.c:122
SetOperationStmt * setop
Definition: planner.c:123
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
#define create_pathtarget(root, tlist)
Definition: tlist.h:53

References generate_unaccent_rules::action, standard_qp_extra::activeWindows, add_partial_path(), add_path(), adjust_appendrel_attrs_multilevel(), adjust_inherited_attnums_multilevel(), adjust_paths_for_srfs(), apply_scanjoin_target_to_paths(), Assert, assign_special_exec_param(), bms_membership(), BMS_MULTIPLE, bms_next_member(), RelOptInfo::cheapest_total_path, CMD_MERGE, CMD_SELECT, CMD_UPDATE, RelOptInfo::consider_parallel, copyObject, FinalPathExtraData::count_est, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_upper_paths_hook, create_window_paths(), equal(), ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, fetch_upper_rel(), find_base_rel(), find_window_functions(), standard_qp_extra::gset_data, IS_DUMMY_REL, is_parallel_safe(), lappend(), lappend_int(), LCS_asString(), lfirst, limit_needed(), FinalPathExtraData::limit_needed, FinalPathExtraData::limit_tuples, linitial_int, linitial_node, list_copy(), list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), NIL, WindowFuncLists::numWindowFuncs, FinalPathExtraData::offset_est, optimize_window_clauses(), parse(), RelOptInfo::partial_pathlist, RelOptInfo::pathlist, plan_set_operations(), postprocess_setop_tlist(), preprocess_aggrefs(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_targetlist(), query_planner(), RelOptInfo::relid, RelOptInfo::reltarget, remove_useless_groupby_columns(), root, select_active_windows(), RelOptInfo::serverid, standard_qp_extra::setop, split_pathtarget_at_srfs(), standard_qp_callback(), UPPERREL_DISTINCT, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_ORDERED, UPPERREL_PARTIAL_DISTINCT, UPPERREL_WINDOW, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by subquery_planner().

◆ has_volatile_pathkey()

static bool has_volatile_pathkey ( List keys)
static

Definition at line 3128 of file planner.c.

3129 {
3130  ListCell *lc;
3131 
3132  foreach(lc, keys)
3133  {
3134  PathKey *pathkey = lfirst_node(PathKey, lc);
3135 
3136  if (pathkey->pk_eclass->ec_has_volatile)
3137  return true;
3138  }
3139 
3140  return false;
3141 }

References lfirst_node.

Referenced by adjust_group_pathkeys_for_groupagg().

◆ is_degenerate_grouping()

static bool is_degenerate_grouping ( PlannerInfo root)
static

Definition at line 3824 of file planner.c.

3825 {
3826  Query *parse = root->parse;
3827 
3828  return (root->hasHavingQual || parse->groupingSets) &&
3829  !parse->hasAggs && parse->groupClause == NIL;
3830 }

References NIL, parse(), and root.

Referenced by create_grouping_paths().

◆ limit_needed()

bool limit_needed ( Query parse)

Definition at line 2628 of file planner.c.

2629 {
2630  Node *node;
2631 
2632  node = parse->limitCount;
2633  if (node)
2634  {
2635  if (IsA(node, Const))
2636  {
2637  /* NULL indicates LIMIT ALL, ie, no limit */
2638  if (!((Const *) node)->constisnull)
2639  return true; /* LIMIT with a constant value */
2640  }
2641  else
2642  return true; /* non-constant LIMIT */
2643  }
2644 
2645  node = parse->limitOffset;
2646  if (node)
2647  {
2648  if (IsA(node, Const))
2649  {
2650  /* Treat NULL as no offset; the executor would too */
2651  if (!((Const *) node)->constisnull)
2652  {
2653  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2654 
2655  if (offset != 0)
2656  return true; /* OFFSET with a nonzero value */
2657  }
2658  }
2659  else
2660  return true; /* non-constant OFFSET */
2661  }
2662 
2663  return false; /* don't need a Limit plan node */
2664 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385

References DatumGetInt64(), IsA, and parse().

Referenced by grouping_planner(), and set_rel_consider_parallel().

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5321 of file planner.c.

5322 {
5323  Query *parse = root->parse;
5324  PathTarget *input_target;
5325  List *non_group_cols;
5326  List *non_group_vars;
5327  int i;
5328  ListCell *lc;
5329 
5330  /*
5331  * We must build a target containing all grouping columns, plus any other
5332  * Vars mentioned in the query's targetlist and HAVING qual.
5333  */
5334  input_target = create_empty_pathtarget();
5335  non_group_cols = NIL;
5336 
5337  i = 0;
5338  foreach(lc, final_target->exprs)
5339  {
5340  Expr *expr = (Expr *) lfirst(lc);
5341  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5342 
5343  if (sgref && root->processed_groupClause &&
5345  root->processed_groupClause) != NULL)
5346  {
5347  /*
5348  * It's a grouping column, so add it to the input target as-is.
5349  */
5350  add_column_to_pathtarget(input_target, expr, sgref);
5351  }
5352  else
5353  {
5354  /*
5355  * Non-grouping column, so just remember the expression for later
5356  * call to pull_var_clause.
5357  */
5358  non_group_cols = lappend(non_group_cols, expr);
5359  }
5360 
5361  i++;
5362  }
5363 
5364  /*
5365  * If there's a HAVING clause, we'll need the Vars it uses, too.
5366  */
5367  if (parse->havingQual)
5368  non_group_cols = lappend(non_group_cols, parse->havingQual);
5369 
5370  /*
5371  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5372  * add them to the input target if not already present. (A Var used
5373  * directly as a GROUP BY item will be present already.) Note this
5374  * includes Vars used in resjunk items, so we are covering the needs of
5375  * ORDER BY and window specifications. Vars used within Aggrefs and
5376  * WindowFuncs will be pulled out here, too.
5377  */
5378  non_group_vars = pull_var_clause((Node *) non_group_cols,
5382  add_new_columns_to_pathtarget(input_target, non_group_vars);
5383 
5384  /* clean up cruft */
5385  list_free(non_group_vars);
5386  list_free(non_group_cols);
5387 
5388  /* XXX this causes some redundant cost calculation ... */
5389  return set_pathtarget_cost_width(root, input_target);
5390 }
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6256
void list_free(List *list)
Definition: list.c:1546
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:187
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:189
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:190
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1538
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * pull_var_clause(Node *node, int flags)
Definition: var.c:607

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, lappend(), lfirst, list_free(), NIL, parse(), pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_grouping_rel()

static RelOptInfo * make_grouping_rel ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
bool  target_parallel_safe,
Node havingQual 
)
static

Definition at line 3771 of file planner.c.

3774 {
3775  RelOptInfo *grouped_rel;
3776 
3777  if (IS_OTHER_REL(input_rel))
3778  {
3779  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3780  input_rel->relids);
3781  grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3782  }
3783  else
3784  {
3785  /*
3786  * By tradition, the relids set for the main grouping relation is
3787  * NULL. (This could be changed, but might require adjustments
3788  * elsewhere.)
3789  */
3790  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3791  }
3792 
3793  /* Set target. */
3794  grouped_rel->reltarget = target;
3795 
3796  /*
3797  * If the input relation is not parallel-safe, then the grouped relation
3798  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3799  * target list and HAVING quals are parallel-safe.
3800  */
3801  if (input_rel->consider_parallel && target_parallel_safe &&
3802  is_parallel_safe(root, (Node *) havingQual))
3803  grouped_rel->consider_parallel = true;
3804 
3805  /*
3806  * If the input rel belongs to a single FDW, so does the grouped rel.
3807  */
3808  grouped_rel->serverid = input_rel->serverid;
3809  grouped_rel->userid = input_rel->userid;
3810  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3811  grouped_rel->fdwroutine = input_rel->fdwroutine;
3812 
3813  return grouped_rel;
3814 }
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:822

References RelOptInfo::consider_parallel, fetch_upper_rel(), IS_OTHER_REL, is_parallel_safe(), RelOptInfo::relids, RELOPT_OTHER_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::serverid, UPPERREL_GROUP_AGG, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by create_grouping_paths(), and create_partitionwise_grouping_paths().

◆ make_ordered_path()

static Path* make_ordered_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
Path cheapest_path,
List pathkeys 
)
static

Definition at line 6791 of file planner.c.

6793 {
6794  bool is_sorted;
6795  int presorted_keys;
6796 
6797  is_sorted = pathkeys_count_contained_in(pathkeys,
6798  path->pathkeys,
6799  &presorted_keys);
6800 
6801  if (!is_sorted)
6802  {
6803  /*
6804  * Try at least sorting the cheapest path and also try incrementally
6805  * sorting any path which is partially sorted already (no need to deal
6806  * with paths which have presorted keys when incremental sort is
6807  * disabled unless it's the cheapest input path).
6808  */
6809  if (path != cheapest_path &&
6810  (presorted_keys == 0 || !enable_incremental_sort))
6811  return NULL;
6812 
6813  /*
6814  * We've no need to consider both a sort and incremental sort. We'll
6815  * just do a sort if there are no presorted keys and an incremental
6816  * sort when there are presorted keys.
6817  */
6818  if (presorted_keys == 0 || !enable_incremental_sort)
6819  path = (Path *) create_sort_path(root,
6820  rel,
6821  path,
6822  pathkeys,
6823  -1.0);
6824  else
6826  rel,
6827  path,
6828  pathkeys,
6829  presorted_keys,
6830  -1.0);
6831  }
6832 
6833  return path;
6834 }

References create_incremental_sort_path(), create_sort_path(), enable_incremental_sort, Path::pathkeys, pathkeys_count_contained_in(), and root.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target,
Node havingQual 
)
static

Definition at line 5409 of file planner.c.

5412 {
5413  PathTarget *partial_target;
5414  List *non_group_cols;
5415  List *non_group_exprs;
5416  int i;
5417  ListCell *lc;
5418 
5419  partial_target = create_empty_pathtarget();
5420  non_group_cols = NIL;
5421 
5422  i = 0;
5423  foreach(lc, grouping_target->exprs)
5424  {
5425  Expr *expr = (Expr *) lfirst(lc);
5426  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5427 
5428  if (sgref && root->processed_groupClause &&
5430  root->processed_groupClause) != NULL)
5431  {
5432  /*
5433  * It's a grouping column, so add it to the partial_target as-is.
5434  * (This allows the upper agg step to repeat the grouping calcs.)
5435  */
5436  add_column_to_pathtarget(partial_target, expr, sgref);
5437  }
5438  else
5439  {
5440  /*
5441  * Non-grouping column, so just remember the expression for later
5442  * call to pull_var_clause.
5443  */
5444  non_group_cols = lappend(non_group_cols, expr);
5445  }
5446 
5447  i++;
5448  }
5449 
5450  /*
5451  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5452  */
5453  if (havingQual)
5454  non_group_cols = lappend(non_group_cols, havingQual);
5455 
5456  /*
5457  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5458  * non-group cols (plus HAVING), and add them to the partial_target if not
5459  * already present. (An expression used directly as a GROUP BY item will
5460  * be present already.) Note this includes Vars used in resjunk items, so
5461  * we are covering the needs of ORDER BY and window specifications.
5462  */
5463  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5467 
5468  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5469 
5470  /*
5471  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5472  * are at the top level of the target list, so we can just scan the list
5473  * rather than recursing through the expression trees.
5474  */
5475  foreach(lc, partial_target->exprs)
5476  {
5477  Aggref *aggref = (Aggref *) lfirst(lc);
5478 
5479  if (IsA(aggref, Aggref))
5480  {
5481  Aggref *newaggref;
5482 
5483  /*
5484  * We shouldn't need to copy the substructure of the Aggref node,
5485  * but flat-copy the node itself to avoid damaging other trees.
5486  */
5487  newaggref = makeNode(Aggref);
5488  memcpy(newaggref, aggref, sizeof(Aggref));
5489 
5490  /* For now, assume serialization is required */
5492 
5493  lfirst(lc) = newaggref;
5494  }
5495  }
5496 
5497  /* clean up cruft */
5498  list_free(non_group_exprs);
5499  list_free(non_group_cols);
5500 
5501  /* XXX this causes some redundant cost calculation ... */
5502  return set_pathtarget_cost_width(root, partial_target);
5503 }
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:186
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5512

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by create_partial_grouping_paths().

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 6001 of file planner.c.

6003 {
6004  List *window_pathkeys = NIL;
6005 
6006  /* Throw error if can't sort */
6008  ereport(ERROR,
6009  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6010  errmsg("could not implement window PARTITION BY"),
6011  errdetail("Window partitioning columns must be of sortable datatypes.")));
6013  ereport(ERROR,
6014  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6015  errmsg("could not implement window ORDER BY"),
6016  errdetail("Window ordering columns must be of sortable datatypes.")));
6017 
6018  /*
6019  * First fetch the pathkeys for the PARTITION BY clause. We can safely
6020  * remove any clauses from the wc->partitionClause for redundant pathkeys.
6021  */
6022  if (wc->partitionClause != NIL)
6023  {
6024  bool sortable;
6025 
6026  window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6027  &wc->partitionClause,
6028  tlist,
6029  true,
6030  &sortable);
6031 
6032  Assert(sortable);
6033  }
6034 
6035  /*
6036  * In principle, we could also consider removing redundant ORDER BY items
6037  * too as doing so does not alter the result of peer row checks done by
6038  * the executor. However, we must *not* remove the ordering column for
6039  * RANGE OFFSET cases, as the executor needs that for in_range tests even
6040  * if it's known to be equal to some partitioning column.
6041  */
6042  if (wc->orderClause != NIL)
6043  {
6044  List *orderby_pathkeys;
6045 
6046  orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6047  wc->orderClause,
6048  tlist);
6049 
6050  /* Okay, make the combined pathkeys */
6051  if (window_pathkeys != NIL)
6052  window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6053  else
6054  window_pathkeys = orderby_pathkeys;
6055  }
6056 
6057  return window_pathkeys;
6058 }
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool *sortable)
Definition: pathkeys.c:1384
List * partitionClause
Definition: parsenodes.h:1546
List * orderClause
Definition: parsenodes.h:1548

References append_pathkeys(), Assert, ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), NIL, WindowClause::orderClause, WindowClause::partitionClause, and root.

Referenced by create_one_window_path(), and standard_qp_callback().

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 6127 of file planner.c.

6130 {
6131  Query *parse = root->parse;
6132  PathTarget *input_target;
6133  int ncols;
6134  bool *col_is_srf;
6135  bool *postpone_col;
6136  bool have_srf;
6137  bool have_volatile;
6138  bool have_expensive;
6139  bool have_srf_sortcols;
6140  bool postpone_srfs;
6141  List *postponable_cols;
6142  List *postponable_vars;
6143  int i;
6144  ListCell *lc;
6145 
6146  /* Shouldn't get here unless query has ORDER BY */
6147  Assert(parse->sortClause);
6148 
6149  *have_postponed_srfs = false; /* default result */
6150 
6151  /* Inspect tlist and collect per-column information */
6152  ncols = list_length(final_target->exprs);
6153  col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6154  postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6155  have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6156 
6157  i = 0;
6158  foreach(lc, final_target->exprs)
6159  {
6160  Expr *expr = (Expr *) lfirst(lc);
6161 
6162  /*
6163  * If the column has a sortgroupref, assume it has to be evaluated
6164  * before sorting. Generally such columns would be ORDER BY, GROUP
6165  * BY, etc targets. One exception is columns that were removed from
6166  * GROUP BY by remove_useless_groupby_columns() ... but those would
6167  * only be Vars anyway. There don't seem to be any cases where it
6168  * would be worth the trouble to double-check.
6169  */
6170  if (get_pathtarget_sortgroupref(final_target, i) == 0)
6171  {
6172  /*
6173  * Check for SRF or volatile functions. Check the SRF case first
6174  * because we must know whether we have any postponed SRFs.
6175  */
6176  if (parse->hasTargetSRFs &&
6177  expression_returns_set((Node *) expr))
6178  {
6179  /* We'll decide below whether these are postponable */
6180  col_is_srf[i] = true;
6181  have_srf = true;
6182  }
6183  else if (contain_volatile_functions((Node *) expr))
6184  {
6185  /* Unconditionally postpone */
6186  postpone_col[i] = true;
6187  have_volatile = true;
6188  }
6189  else
6190  {
6191  /*
6192  * Else check the cost. XXX it's annoying to have to do this
6193  * when set_pathtarget_cost_width() just did it. Refactor to
6194  * allow sharing the work?
6195  */
6196  QualCost cost;
6197 
6198  cost_qual_eval_node(&cost, (Node *) expr, root);
6199 
6200  /*
6201  * We arbitrarily define "expensive" as "more than 10X
6202  * cpu_operator_cost". Note this will take in any PL function
6203  * with default cost.
6204  */
6205  if (cost.per_tuple > 10 * cpu_operator_cost)
6206  {
6207  postpone_col[i] = true;
6208  have_expensive = true;
6209  }
6210  }
6211  }
6212  else
6213  {
6214  /* For sortgroupref cols, just check if any contain SRFs */
6215  if (!have_srf_sortcols &&
6216  parse->hasTargetSRFs &&
6217  expression_returns_set((Node *) expr))
6218  have_srf_sortcols = true;
6219  }
6220 
6221  i++;
6222  }
6223 
6224  /*
6225  * We can postpone SRFs if we have some but none are in sortgroupref cols.
6226  */
6227  postpone_srfs = (have_srf && !have_srf_sortcols);
6228 
6229  /*
6230  * If we don't need a post-sort projection, just return final_target.
6231  */
6232  if (!(postpone_srfs || have_volatile ||
6233  (have_expensive &&
6234  (parse->limitCount || root->tuple_fraction > 0))))
6235  return final_target;
6236 
6237  /*
6238  * Report whether the post-sort projection will contain set-returning
6239  * functions. This is important because it affects whether the Sort can
6240  * rely on the query's LIMIT (if any) to bound the number of rows it needs
6241  * to return.
6242  */
6243  *have_postponed_srfs = postpone_srfs;
6244 
6245  /*
6246  * Construct the sort-input target, taking all non-postponable columns and
6247  * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6248  * the postponable ones.
6249  */
6250  input_target = create_empty_pathtarget();
6251  postponable_cols = NIL;
6252 
6253  i = 0;
6254  foreach(lc, final_target->exprs)
6255  {
6256  Expr *expr = (Expr *) lfirst(lc);
6257 
6258  if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6259  postponable_cols = lappend(postponable_cols, expr);
6260  else
6261  add_column_to_pathtarget(input_target, expr,
6262  get_pathtarget_sortgroupref(final_target, i));
6263 
6264  i++;
6265  }
6266 
6267  /*
6268  * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6269  * postponable columns, and add them to the sort-input target if not
6270  * already present. (Some might be there already.) We mustn't
6271  * deconstruct Aggrefs or WindowFuncs here, since the projection node
6272  * would be unable to recompute them.
6273  */
6274  postponable_vars = pull_var_clause((Node *) postponable_cols,
6278  add_new_columns_to_pathtarget(input_target, postponable_vars);
6279 
6280  /* clean up cruft */
6281  list_free(postponable_vars);
6282  list_free(postponable_cols);
6283 
6284  /* XXX this represents even more redundant cost calculation ... */
6285  return set_pathtarget_cost_width(root, input_target);
6286 }
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:538
double cpu_operator_cost
Definition: costsize.c:123
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4666
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:758
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:188
Cost per_tuple
Definition: pathnodes.h:48

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, contain_volatile_functions(), cost_qual_eval_node(), cpu_operator_cost, create_empty_pathtarget(), expression_returns_set(), PathTarget::exprs, get_pathtarget_sortgroupref, i, lappend(), lfirst, list_free(), list_length(), NIL, palloc0(), parse(), QualCost::per_tuple, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_INCLUDE_WINDOWFUNCS, root, and set_pathtarget_cost_width().

Referenced by grouping_planner().

◆ make_window_input_target()

static PathTarget * make_window_input_target ( PlannerInfo root,
PathTarget final_target,
List activeWindows 
)
static

Definition at line 5881 of file planner.c.

5884 {
5885  PathTarget *input_target;
5886  Bitmapset *sgrefs;
5887  List *flattenable_cols;
5888  List *flattenable_vars;
5889  int i;
5890  ListCell *lc;
5891 
5892  Assert(root->parse->hasWindowFuncs);
5893 
5894  /*
5895  * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
5896  * into a bitmapset for convenient reference below.
5897  */
5898  sgrefs = NULL;
5899  foreach(lc, activeWindows)
5900  {
5902  ListCell *lc2;
5903 
5904  foreach(lc2, wc->partitionClause)
5905  {
5907 
5908  sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5909  }
5910  foreach(lc2, wc->orderClause)
5911  {
5913 
5914  sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5915  }
5916  }
5917 
5918  /* Add in sortgroupref numbers of GROUP BY clauses, too */
5919  foreach(lc, root->processed_groupClause)
5920  {
5922 
5923  sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
5924  }
5925 
5926  /*
5927  * Construct a target containing all the non-flattenable targetlist items,
5928  * and save aside the others for a moment.
5929  */
5930  input_target = create_empty_pathtarget();
5931  flattenable_cols = NIL;
5932 
5933  i = 0;
5934  foreach(lc, final_target->exprs)
5935  {
5936  Expr *expr = (Expr *) lfirst(lc);
5937  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5938 
5939  /*
5940  * Don't want to deconstruct window clauses or GROUP BY items. (Note
5941  * that such items can't contain window functions, so it's okay to
5942  * compute them below the WindowAgg nodes.)
5943  */
5944  if (sgref != 0 && bms_is_member(sgref, sgrefs))
5945  {
5946  /*
5947  * Don't want to deconstruct this value, so add it to the input
5948  * target as-is.
5949  */
5950  add_column_to_pathtarget(input_target, expr, sgref);
5951  }
5952  else
5953  {
5954  /*
5955  * Column is to be flattened, so just remember the expression for
5956  * later call to pull_var_clause.
5957  */
5958  flattenable_cols = lappend(flattenable_cols, expr);
5959  }
5960 
5961  i++;
5962  }
5963 
5964  /*
5965  * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
5966  * add them to the input target if not already present. (Some might be
5967  * there already because they're used directly as window/group clauses.)
5968  *
5969  * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
5970  * Aggrefs are placed in the Agg node's tlist and not left to be computed
5971  * at higher levels. On the other hand, we should recurse into
5972  * WindowFuncs to make sure their input expressions are available.
5973  */
5974  flattenable_vars = pull_var_clause((Node *) flattenable_cols,
5978  add_new_columns_to_pathtarget(input_target, flattenable_vars);
5979 
5980  /* clean up cruft */
5981  list_free(flattenable_vars);
5982  list_free(flattenable_cols);
5983 
5984  /* XXX this causes some redundant cost calculation ... */
5985  return set_pathtarget_cost_width(root, input_target);
5986 }

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_add_member(), bms_is_member(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, i, lappend(), lfirst, lfirst_node, list_free(), NIL, WindowClause::orderClause, WindowClause::partitionClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, root, set_pathtarget_cost_width(), and SortGroupClause::tleSortGroupRef.

Referenced by grouping_planner().

◆ mark_partial_aggref()

void mark_partial_aggref ( Aggref agg,
AggSplit  aggsplit 
)

Definition at line 5512 of file planner.c.

5513 {
5514  /* aggtranstype should be computed by this point */
5515  Assert(OidIsValid(agg->aggtranstype));
5516  /* ... but aggsplit should still be as the parser left it */
5517  Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5518 
5519  /* Mark the Aggref with the intended partial-aggregation mode */
5520  agg->aggsplit = aggsplit;
5521 
5522  /*
5523  * Adjust result type if needed. Normally, a partial aggregate returns
5524  * the aggregate's transition type; but if that's INTERNAL and we're
5525  * serializing, it returns BYTEA instead.
5526  */
5527  if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5528  {
5529  if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5530  agg->aggtype = BYTEAOID;
5531  else
5532  agg->aggtype = agg->aggtranstype;
5533  }
5534 }
#define OidIsValid(objectId)
Definition: c.h:775
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:385
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:386

References AGGSPLIT_SIMPLE, Assert, DO_AGGSPLIT_SERIALIZE, DO_AGGSPLIT_SKIPFINAL, and OidIsValid.

Referenced by convert_combining_aggrefs(), and make_partial_grouping_target().

◆ optimize_window_clauses()

static void optimize_window_clauses ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5584 of file planner.c.

5585 {
5586  List *windowClause = root->parse->windowClause;
5587  ListCell *lc;
5588 
5589  foreach(lc, windowClause)
5590  {
5592  ListCell *lc2;
5593  int optimizedFrameOptions = 0;
5594 
5595  Assert(wc->winref <= wflists->maxWinRef);
5596 
5597  /* skip any WindowClauses that have no WindowFuncs */
5598  if (wflists->windowFuncs[wc->winref] == NIL)
5599  continue;
5600 
5601  foreach(lc2, wflists->windowFuncs[wc->winref])
5602  {
5605  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5606  Oid prosupport;
5607 
5608  prosupport = get_func_support(wfunc->winfnoid);
5609 
5610  /* Check if there's a support function for 'wfunc' */
5611  if (!OidIsValid(prosupport))
5612  break; /* can't optimize this WindowClause */
5613 
5614  req.type = T_SupportRequestOptimizeWindowClause;
5615  req.window_clause = wc;
5616  req.window_func = wfunc;
5617  req.frameOptions = wc->frameOptions;
5618 
5619  /* call the support function */
5621  DatumGetPointer(OidFunctionCall1(prosupport,
5622  PointerGetDatum(&req)));
5623 
5624  /*
5625  * Skip to next WindowClause if the support function does not
5626  * support this request type.
5627  */
5628  if (res == NULL)
5629  break;
5630 
5631  /*
5632  * Save these frameOptions for the first WindowFunc for this
5633  * WindowClause.
5634  */
5635  if (foreach_current_index(lc2) == 0)
5636  optimizedFrameOptions = res->frameOptions;
5637 
5638  /*
5639  * On subsequent WindowFuncs, if the frameOptions are not the same
5640  * then we're unable to optimize the frameOptions for this
5641  * WindowClause.
5642  */
5643  else if (optimizedFrameOptions != res->frameOptions)
5644  break; /* skip to the next WindowClause, if any */
5645  }
5646 
5647  /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5648  if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5649  {
5650  ListCell *lc3;
5651 
5652  /* apply the new frame options */
5653  wc->frameOptions = optimizedFrameOptions;
5654 
5655  /*
5656  * We now check to see if changing the frameOptions has caused
5657  * this WindowClause to be a duplicate of some other WindowClause.
5658  * This can only happen if we have multiple WindowClauses, so
5659  * don't bother if there's only 1.
5660  */
5661  if (list_length(windowClause) == 1)
5662  continue;
5663 
5664  /*
5665  * Do the duplicate check and reuse the existing WindowClause if
5666  * we find a duplicate.
5667  */
5668  foreach(lc3, windowClause)
5669  {
5670  WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5671 
5672  /* skip over the WindowClause we're currently editing */
5673  if (existing_wc == wc)
5674  continue;
5675 
5676  /*
5677  * Perform the same duplicate check that is done in
5678  * transformWindowFuncCall.
5679  */
5680  if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5681  equal(wc->orderClause, existing_wc->orderClause) &&
5682  wc->frameOptions == existing_wc->frameOptions &&
5683  equal(wc->startOffset, existing_wc->startOffset) &&
5684  equal(wc->endOffset, existing_wc->endOffset))
5685  {
5686  ListCell *lc4;
5687 
5688  /*
5689  * Now move each WindowFunc in 'wc' into 'existing_wc'.
5690  * This required adjusting each WindowFunc's winref and
5691  * moving the WindowFuncs in 'wc' to the list of
5692  * WindowFuncs in 'existing_wc'.
5693  */
5694  foreach(lc4, wflists->windowFuncs[wc->winref])
5695  {
5696  WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5697 
5698  wfunc->winref = existing_wc->winref;
5699  }
5700 
5701  /* move list items */
5702  wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5703  wflists->windowFuncs[wc->winref]);
5704  wflists->windowFuncs[wc->winref] = NIL;
5705 
5706  /*
5707  * transformWindowFuncCall() should have made sure there
5708  * are no other duplicates, so we needn't bother looking
5709  * any further.
5710  */
5711  break;
5712  }
5713  }
5714  }
5715  }
5716 }
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:680
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1858
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
unsigned int Oid
Definition: postgres_ext.h:31
struct WindowClause * window_clause
Definition: supportnodes.h:339
Node * startOffset
Definition: parsenodes.h:1550
Node * endOffset
Definition: parsenodes.h:1551
Index maxWinRef
Definition: clauses.h:22
Index winref
Definition: primnodes.h:579
Oid winfnoid
Definition: primnodes.h:567

References Assert, DatumGetPointer(), WindowClause::endOffset, equal(), foreach_current_index, WindowClause::frameOptions, SupportRequestOptimizeWindowClause::frameOptions, get_func_support(), if(), lfirst_node, list_concat(), list_length(), WindowFuncLists::maxWinRef, NIL, OidFunctionCall1, OidIsValid, WindowClause::orderClause, WindowClause::partitionClause, PointerGetDatum(), res, root, WindowClause::startOffset, SupportRequestOptimizeWindowClause::type, SupportRequestOptimizeWindowClause::window_clause, SupportRequestOptimizeWindowClause::window_func, WindowFuncLists::windowFuncs, WindowFunc::winfnoid, WindowClause::winref, and WindowFunc::winref.

Referenced by grouping_planner().

◆ plan_cluster_use_sort()

bool plan_cluster_use_sort ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6537 of file planner.c.

6538 {
6539  PlannerInfo *root;
6540  Query *query;
6541  PlannerGlobal *glob;
6542  RangeTblEntry *rte;
6543  RelOptInfo *rel;
6544  IndexOptInfo *indexInfo;
6545  QualCost indexExprCost;
6546  Cost comparisonCost;
6547  Path *seqScanPath;
6548  Path seqScanAndSortPath;
6549  IndexPath *indexScanPath;
6550  ListCell *lc;
6551 
6552  /* We can short-circuit the cost comparison if indexscans are disabled */
6553  if (!enable_indexscan)
6554  return true; /* use sort */
6555 
6556  /* Set up mostly-dummy planner state */
6557  query = makeNode(Query);
6558  query->commandType = CMD_SELECT;
6559 
6560  glob = makeNode(PlannerGlobal);
6561 
6563  root->parse = query;
6564  root->glob = glob;
6565  root->query_level = 1;
6566  root->planner_cxt = CurrentMemoryContext;
6567  root->wt_param_id = -1;
6568  root->join_domains = list_make1(makeNode(JoinDomain));
6569 
6570  /* Build a minimal RTE for the rel */
6571  rte = makeNode(RangeTblEntry);
6572  rte->rtekind = RTE_RELATION;
6573  rte->relid = tableOid;
6574  rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6575  rte->rellockmode = AccessShareLock;
6576  rte->lateral = false;
6577  rte->inh = false;
6578  rte->inFromCl = true;
6579  query->rtable = list_make1(rte);
6580  addRTEPermissionInfo(&query->rteperminfos, rte);
6581 
6582  /* Set up RTE/RelOptInfo arrays */
6584 
6585  /* Build RelOptInfo */
6586  rel = build_simple_rel(root, 1, NULL);
6587 
6588  /* Locate IndexOptInfo for the target index */
6589  indexInfo = NULL;
6590  foreach(lc, rel->indexlist)
6591  {
6592  indexInfo = lfirst_node(IndexOptInfo, lc);
6593  if (indexInfo->indexoid == indexOid)
6594  break;
6595  }
6596 
6597  /*
6598  * It's possible that get_relation_info did not generate an IndexOptInfo
6599  * for the desired index; this could happen if it's not yet reached its
6600  * indcheckxmin usability horizon, or if it's a system index and we're
6601  * ignoring system indexes. In such cases we should tell CLUSTER to not
6602  * trust the index contents but use seqscan-and-sort.
6603  */
6604  if (lc == NULL) /* not in the list? */
6605  return true; /* use sort */
6606 
6607  /*
6608  * Rather than doing all the pushups that would be needed to use
6609  * set_baserel_size_estimates, just do a quick hack for rows and width.
6610  */
6611  rel->rows = rel->tuples;
6612  rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6613 
6614  root->total_table_pages = rel->pages;
6615 
6616  /*
6617  * Determine eval cost of the index expressions, if any. We need to
6618  * charge twice that amount for each tuple comparison that happens during
6619  * the sort, since tuplesort.c will have to re-evaluate the index
6620  * expressions each time. (XXX that's pretty inefficient...)
6621  */
6622  cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6623  comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6624 
6625  /* Estimate the cost of seq scan + sort */
6626  seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6627  cost_sort(&seqScanAndSortPath, root, NIL,
6628  seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6629  comparisonCost, maintenance_work_mem, -1.0);
6630 
6631  /* Estimate the cost of index scan */
6632  indexScanPath = create_index_path(root, indexInfo,
6633  NIL, NIL, NIL, NIL,
6634  ForwardScanDirection, false,
6635  NULL, 1.0, false);
6636 
6637  return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6638 }
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4640
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2124
bool enable_indexscan
Definition: costsize.c:135
int maintenance_work_mem
Definition: globals.c:130
#define AccessShareLock
Definition: lockdefs.h:36
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
double Cost
Definition: nodes.h:251
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
@ RTE_RELATION
Definition: parsenodes.h:1028
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:993
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:927
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1208
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:94
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:192
@ ForwardScanDirection
Definition: sdir.h:28
Path path
Definition: pathnodes.h:1698
Cost total_cost
Definition: pathnodes.h:1651
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:168
CmdType commandType
Definition: parsenodes.h:121
RTEKind rtekind
Definition: parsenodes.h:1057
Cardinality tuples
Definition: pathnodes.h:939
BlockNumber pages
Definition: pathnodes.h:938
List * indexlist
Definition: pathnodes.h:934
Cardinality rows
Definition: pathnodes.h:867

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, cost_qual_eval(), cost_sort(), create_index_path(), create_seqscan_path(), CurrentMemoryContext, enable_indexscan, ForwardScanDirection, get_relation_data_width(), RelOptInfo::indexlist, IndexOptInfo::indexoid, RangeTblEntry::inh, lfirst_node, list_make1, maintenance_work_mem, makeNode, NIL, RelOptInfo::pages, IndexPath::path, QualCost::per_tuple, RangeTblEntry::relid, RelOptInfo::reltarget, root, RelOptInfo::rows, Query::rtable, RTE_RELATION, RangeTblEntry::rtekind, setup_simple_rel_arrays(), QualCost::startup, Path::total_cost, RelOptInfo::tuples, and PathTarget::width.

Referenced by copy_table_data().

◆ plan_create_index_workers()

int plan_create_index_workers ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6657 of file planner.c.

6658 {
6659  PlannerInfo *root;
6660  Query *query;
6661  PlannerGlobal *glob;
6662  RangeTblEntry *rte;
6663  Relation heap;
6664  Relation index;
6665  RelOptInfo *rel;
6666  int parallel_workers;
6667  BlockNumber heap_blocks;
6668  double reltuples;
6669  double allvisfrac;
6670 
6671  /*
6672  * We don't allow performing parallel operation in standalone backend or
6673  * when parallelism is disabled.
6674  */
6676  return 0;
6677 
6678  /* Set up largely-dummy planner state */
6679  query = makeNode(Query);
6680  query->commandType = CMD_SELECT;
6681 
6682  glob = makeNode(PlannerGlobal);
6683 
6685  root->parse = query;
6686  root->glob = glob;
6687  root->query_level = 1;
6688  root->planner_cxt = CurrentMemoryContext;
6689  root->wt_param_id = -1;
6690  root->join_domains = list_make1(makeNode(JoinDomain));
6691 
6692  /*
6693  * Build a minimal RTE.
6694  *
6695  * Mark the RTE with inh = true. This is a kludge to prevent
6696  * get_relation_info() from fetching index info, which is necessary
6697  * because it does not expect that any IndexOptInfo is currently
6698  * undergoing REINDEX.
6699  */
6700  rte = makeNode(RangeTblEntry);
6701  rte->rtekind = RTE_RELATION;
6702  rte->relid = tableOid;
6703  rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6704  rte->rellockmode = AccessShareLock;
6705  rte->lateral = false;
6706  rte->inh = true;
6707  rte->inFromCl = true;
6708  query->rtable = list_make1(rte);
6709  addRTEPermissionInfo(&query->rteperminfos, rte);
6710 
6711  /* Set up RTE/RelOptInfo arrays */
6713 
6714  /* Build RelOptInfo */
6715  rel = build_simple_rel(root, 1, NULL);
6716 
6717  /* Rels are assumed already locked by the caller */
6718  heap = table_open(tableOid, NoLock);
6719  index = index_open(indexOid, NoLock);
6720 
6721  /*
6722  * Determine if it's safe to proceed.
6723  *
6724  * Currently, parallel workers can't access the leader's temporary tables.
6725  * Furthermore, any index predicate or index expressions must be parallel
6726  * safe.
6727  */
6728  if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6731  {
6732  parallel_workers = 0;
6733  goto done;
6734  }
6735 
6736  /*
6737  * If parallel_workers storage parameter is set for the table, accept that
6738  * as the number of parallel worker processes to launch (though still cap
6739  * at max_parallel_maintenance_workers). Note that we deliberately do not
6740  * consider any other factor when parallel_workers is set. (e.g., memory
6741  * use by workers.)
6742  */
6743  if (rel->rel_parallel_workers != -1)
6744  {
6745  parallel_workers = Min(rel->rel_parallel_workers,
6747  goto done;
6748  }
6749 
6750  /*
6751  * Estimate heap relation size ourselves, since rel->pages cannot be
6752  * trusted (heap RTE was marked as inheritance parent)
6753  */
6754  estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6755 
6756  /*
6757  * Determine number of workers to scan the heap relation using generic
6758  * model
6759  */
6760  parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6762 
6763  /*
6764  * Cap workers based on available maintenance_work_mem as needed.
6765  *
6766  * Note that each tuplesort participant receives an even share of the
6767  * total maintenance_work_mem budget. Aim to leave participants
6768  * (including the leader as a participant) with no less than 32MB of
6769  * memory. This leaves cases where maintenance_work_mem is set to 64MB
6770  * immediately past the threshold of being capable of launching a single
6771  * parallel worker to sort.
6772  */
6773  while (parallel_workers > 0 &&
6774  maintenance_work_mem / (parallel_workers + 1) < 32768L)
6775  parallel_workers--;
6776 
6777 done:
6779  table_close(heap, NoLock);
6780 
6781  return parallel_workers;
6782 }
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4215
uint32 BlockNumber
Definition: block.h:31
int max_parallel_maintenance_workers
Definition: globals.c:131
bool IsUnderPostmaster
Definition: globals.c:117
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define NoLock
Definition: lockdefs.h:34
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1041
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5138
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5025
int rel_parallel_workers
Definition: pathnodes.h:946
Form_pg_class rd_rel
Definition: rel.h:111
Definition: type.h:95
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40

References AccessShareLock, addRTEPermissionInfo(), build_simple_rel(), CMD_SELECT, Query::commandType, compute_parallel_worker(), CurrentMemoryContext, estimate_rel_size(), index_close(), index_open(), RangeTblEntry::inh, is_parallel_safe(), IsUnderPostmaster, list_make1, maintenance_work_mem, makeNode, max_parallel_maintenance_workers, Min, NoLock, RelationData::rd_rel, RelOptInfo::rel_parallel_workers, RelationGetIndexExpressions(), RelationGetIndexPredicate(), RangeTblEntry::relid, root, Query::rtable, RTE_RELATION, RangeTblEntry::rtekind, setup_simple_rel_arrays(), table_close(), and table_open().

Referenced by index_build().

◆ planner()

PlannedStmt* planner ( Query parse,
const char *  query_string,
int  cursorOptions,
ParamListInfo  boundParams 
)

Definition at line 274 of file planner.c.

276 {
277  PlannedStmt *result;
278 
279  if (planner_hook)
280  result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
281  else
282  result = standard_planner(parse, query_string, cursorOptions, boundParams);
283  return result;
284 }
planner_hook_type planner_hook
Definition: planner.c:71
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:287

References parse(), planner_hook, and standard_planner().

Referenced by pg_plan_query().

◆ postprocess_setop_tlist()

static List * postprocess_setop_tlist ( List new_tlist,
List orig_tlist 
)
static

Definition at line 5547 of file planner.c.

5548 {
5549  ListCell *l;
5550  ListCell *orig_tlist_item = list_head(orig_tlist);
5551 
5552  foreach(l, new_tlist)
5553  {
5554  TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5555  TargetEntry *orig_tle;
5556 
5557  /* ignore resjunk columns in setop result */
5558  if (new_tle->resjunk)
5559  continue;
5560 
5561  Assert(orig_tlist_item != NULL);
5562  orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5563  orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5564  if (orig_tle->resjunk) /* should not happen */
5565  elog(ERROR, "resjunk output columns are not implemented");
5566  Assert(new_tle->resno == orig_tle->resno);
5567  new_tle->ressortgroupref = orig_tle->ressortgroupref;
5568  }
5569  if (orig_tlist_item != NULL)
5570  elog(ERROR, "resjunk output columns are not implemented");
5571  return new_tlist;
5572 }
#define elog(elevel,...)
Definition: elog.h:224
AttrNumber resno
Definition: primnodes.h:2164
Index ressortgroupref
Definition: primnodes.h:2168

References Assert, elog, ERROR, lfirst_node, list_head(), lnext(), TargetEntry::resno, and TargetEntry::ressortgroupref.

Referenced by grouping_planner().

◆ preprocess_expression()

static Node * preprocess_expression ( PlannerInfo root,
Node expr,
int  kind 
)
static

Definition at line 1126 of file planner.c.

1127 {
1128  /*
1129  * Fall out quickly if expression is empty. This occurs often enough to
1130  * be worth checking. Note that null->null is the correct conversion for
1131  * implicit-AND result format, too.
1132  */
1133  if (expr == NULL)
1134  return NULL;
1135 
1136  /*
1137  * If the query has any join RTEs, replace join alias variables with
1138  * base-relation variables. We must do this first, since any expressions
1139  * we may extract from the joinaliasvars lists have not been preprocessed.
1140  * For example, if we did this after sublink processing, sublinks expanded
1141  * out from join aliases would not get processed. But we can skip this in
1142  * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1143  * they can't contain any Vars of the current query level.
1144  */
1145  if (root->hasJoinRTEs &&
1146  !(kind == EXPRKIND_RTFUNC ||
1147  kind == EXPRKIND_VALUES ||
1148  kind == EXPRKIND_TABLESAMPLE ||
1149  kind == EXPRKIND_TABLEFUNC))
1150  expr = flatten_join_alias_vars(root, root->parse, expr);
1151 
1152  /*
1153  * Simplify constant expressions. For function RTEs, this was already
1154  * done by preprocess_function_rtes. (But note we must do it again for
1155  * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1156  * un-simplified subexpressions inserted by flattening of subqueries or
1157  * join alias variables.)
1158  *
1159  * Note: an essential effect of this is to convert named-argument function
1160  * calls to positional notation and insert the current actual values of
1161  * any default arguments for functions. To ensure that happens, we *must*
1162  * process all expressions here. Previous PG versions sometimes skipped
1163  * const-simplification if it didn't seem worth the trouble, but we can't
1164  * do that anymore.
1165  *
1166  * Note: this also flattens nested AND and OR expressions into N-argument
1167  * form. All processing of a qual expression after this point must be
1168  * careful to maintain AND/OR flatness --- that is, do not generate a tree
1169  * with AND directly under AND, nor OR directly under OR.
1170  */
1171  if (kind != EXPRKIND_RTFUNC)
1172  expr = eval_const_expressions(root, expr);
1173 
1174  /*
1175  * If it's a qual or havingQual, canonicalize it.
1176  */
1177  if (kind == EXPRKIND_QUAL)
1178  {
1179  expr = (Node *) canonicalize_qual((Expr *) expr, false);
1180 
1181 #ifdef OPTIMIZER_DEBUG
1182  printf("After canonicalize_qual()\n");
1183  pprint(expr);
1184 #endif
1185  }
1186 
1187  /*
1188  * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1189  * hashfuncid of any that might execute more quickly by using hash lookups
1190  * instead of a linear search.
1191  */
1192  if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1193  {
1195  }
1196 
1197  /* Expand SubLinks to SubPlans */
1198  if (root->parse->hasSubLinks)
1199  expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1200 
1201  /*
1202  * XXX do not insert anything here unless you have grokked the comments in
1203  * SS_replace_correlation_vars ...
1204  */
1205 
1206  /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1207  if (root->query_level > 1)
1208  expr = SS_replace_correlation_vars(root, expr);
1209 
1210  /*
1211  * If it's a qual or havingQual, convert it to implicit-AND format. (We
1212  * don't want to do this before eval_const_expressions, since the latter
1213  * would be unable to simplify a top-level AND correctly. Also,
1214  * SS_process_sublinks expects explicit-AND format.)
1215  */
1216  if (kind == EXPRKIND_QUAL)
1217  expr = (Node *) make_ands_implicit((Expr *) expr);
1218 
1219  return expr;
1220 }
void pprint(const void *obj)
Definition: print.c:54
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2287
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:737
#define EXPRKIND_TARGET
Definition: planner.c:79
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:87
#define EXPRKIND_VALUES
Definition: planner.c:82
#define EXPRKIND_QUAL
Definition: planner.c:78
#define EXPRKIND_TABLEFUNC
Definition: planner.c:89
#define EXPRKIND_RTFUNC
Definition: planner.c:80
#define printf(...)
Definition: port.h:244
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:1868
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:1919
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:744

References canonicalize_qual(), convert_saop_to_hashed_saop(), eval_const_expressions(), EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_TABLEFUNC, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, flatten_join_alias_vars(), make_ands_implicit(), pprint(), printf, root, SS_process_sublinks(), and SS_replace_correlation_vars().

Referenced by preprocess_phv_expression(), preprocess_qual_conditions(), and subquery_planner().

◆ preprocess_grouping_sets()

static grouping_sets_data * preprocess_grouping_sets ( PlannerInfo root)
static

Definition at line 2047 of file planner.c.

2048 {
2049  Query *parse = root->parse;
2050  List *sets;
2051  int maxref = 0;
2052  ListCell *lc_set;
2054 
2055  parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2056 
2057  gd->any_hashable = false;
2058  gd->unhashable_refs = NULL;
2059  gd->unsortable_refs = NULL;
2060  gd->unsortable_sets = NIL;
2061 
2062  /*
2063  * We don't currently make any attempt to optimize the groupClause when
2064  * there are grouping sets, so just duplicate it in processed_groupClause.
2065  */
2066  root->processed_groupClause = parse->groupClause;
2067 
2068  if (parse->groupClause)
2069  {
2070  ListCell *lc;
2071 
2072  foreach(lc, parse->groupClause)
2073  {
2075  Index ref = gc->tleSortGroupRef;
2076 
2077  if (ref > maxref)
2078  maxref = ref;
2079 
2080  if (!gc->hashable)
2082 
2083  if (!OidIsValid(gc->sortop))
2085  }
2086  }
2087 
2088  /* Allocate workspace array for remapping */
2089  gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2090 
2091  /*
2092  * If we have any unsortable sets, we must extract them before trying to
2093  * prepare rollups. Unsortable sets don't go through
2094  * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2095  * here.
2096  */
2097  if (!bms_is_empty(gd->unsortable_refs))
2098  {
2099  List *sortable_sets = NIL;
2100  ListCell *lc;
2101 
2102  foreach(lc, parse->groupingSets)
2103  {
2104  List *gset = (List *) lfirst(lc);
2105 
2106  if (bms_overlap_list(gd->unsortable_refs, gset))
2107  {
2109 
2110  gs->set = gset;
2111  gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2112 
2113  /*
2114  * We must enforce here that an unsortable set is hashable;
2115  * later code assumes this. Parse analysis only checks that
2116  * every individual column is either hashable or sortable.
2117  *
2118  * Note that passing this test doesn't guarantee we can
2119  * generate a plan; there might be other showstoppers.
2120  */
2121  if (bms_overlap_list(gd->unhashable_refs, gset))
2122  ereport(ERROR,
2123  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2124  errmsg("could not implement GROUP BY"),
2125  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2126  }
2127  else
2128  sortable_sets = lappend(sortable_sets, gset);
2129  }
2130 
2131  if (sortable_sets)
2132  sets = extract_rollup_sets(sortable_sets);
2133  else
2134  sets = NIL;
2135  }
2136  else
2137  sets = extract_rollup_sets(parse->groupingSets);
2138 
2139  foreach(lc_set, sets)
2140  {
2141  List *current_sets = (List *) lfirst(lc_set);
2142  RollupData *rollup = makeNode(RollupData);
2143  GroupingSetData *gs;
2144 
2145  /*
2146  * Reorder the current list of grouping sets into correct prefix
2147  * order. If only one aggregation pass is needed, try to make the
2148  * list match the ORDER BY clause; if more than one pass is needed, we
2149  * don't bother with that.
2150  *
2151  * Note that this reorders the sets from smallest-member-first to
2152  * largest-member-first, and applies the GroupingSetData annotations,
2153  * though the data will be filled in later.
2154  */
2155  current_sets = reorder_grouping_sets(current_sets,
2156  (list_length(sets) == 1
2157  ? parse->sortClause
2158  : NIL));
2159 
2160  /*
2161  * Get the initial (and therefore largest) grouping set.
2162  */
2163  gs = linitial_node(GroupingSetData, current_sets);
2164 
2165  /*
2166  * Order the groupClause appropriately. If the first grouping set is
2167  * empty, then the groupClause must also be empty; otherwise we have
2168  * to force the groupClause to match that grouping set's order.
2169  *
2170  * (The first grouping set can be empty even though parse->groupClause
2171  * is not empty only if all non-empty grouping sets are unsortable.
2172  * The groupClauses for hashed grouping sets are built later on.)
2173  */
2174  if (gs->set)
2176  else
2177  rollup->groupClause = NIL;
2178 
2179  /*
2180  * Is it hashable? We pretend empty sets are hashable even though we
2181  * actually force them not to be hashed later. But don't bother if
2182  * there's nothing but empty sets (since in that case we can't hash
2183  * anything).
2184  */
2185  if (gs->set &&
2187  {
2188  rollup->hashable = true;
2189  gd->any_hashable = true;
2190  }
2191 
2192  /*
2193  * Now that we've pinned down an order for the groupClause for this
2194  * list of grouping sets, we need to remap the entries in the grouping
2195  * sets from sortgrouprefs to plain indices (0-based) into the
2196  * groupClause for this collection of grouping sets. We keep the
2197  * original form for later use, though.
2198  */
2199  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2200  current_sets,
2201  gd->tleref_to_colnum_map);
2202  rollup->gsets_data = current_sets;
2203 
2204  gd->rollups = lappend(gd->rollups, rollup);
2205  }
2206 
2207  if (gd->unsortable_sets)
2208  {
2209  /*
2210  * We have not yet pinned down a groupclause for this, but we will
2211  * need index-based lists for estimation purposes. Construct
2212  * hash_sets_idx based on the entire original groupclause for now.
2213  */
2214  gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2215  gd->unsortable_sets,
2216  gd->tleref_to_colnum_map);
2217  gd->any_hashable = true;
2218  }
2219 
2220  return gd;
2221 }
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:608
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1805
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3080
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2868
Bitmapset * unhashable_refs
Definition: planner.c:102
Bitmapset * unsortable_refs
Definition: planner.c:101

References grouping_sets_data::any_hashable, bms_add_member(), bms_is_empty, bms_overlap_list(), ereport, errcode(), errdetail(), errmsg(), ERROR, expand_grouping_sets(), extract_rollup_sets(), RollupData::groupClause, groupclause_apply_groupingset(), RollupData::gsets, RollupData::gsets_data, grouping_sets_data::hash_sets_idx, RollupData::hashable, lappend(), lfirst, lfirst_node, linitial_node, list_length(), makeNode, NIL, OidIsValid, palloc(), palloc0(), parse(), remap_to_groupclause_idx(), reorder_grouping_sets(), grouping_sets_data::rollups, root, GroupingSetData::set, SortGroupClause::sortop, grouping_sets_data::tleref_to_colnum_map, SortGroupClause::tleSortGroupRef, grouping_sets_data::unhashable_refs, grouping_sets_data::unsortable_refs, and grouping_sets_data::unsortable_sets.

Referenced by grouping_planner().

◆ preprocess_limit()

static double preprocess_limit ( PlannerInfo root,
double  tuple_fraction,
int64 *  offset_est,
int64 *  count_est 
)
static

Definition at line 2443 of file planner.c.

2445 {
2446  Query *parse = root->parse;
2447  Node *est;
2448  double limit_fraction;
2449 
2450  /* Should not be called unless LIMIT or OFFSET */
2451  Assert(parse->limitCount || parse->limitOffset);
2452 
2453  /*
2454  * Try to obtain the clause values. We use estimate_expression_value
2455  * primarily because it can sometimes do something useful with Params.
2456  */
2457  if (parse->limitCount)
2458  {
2459  est = estimate_expression_value(root, parse->limitCount);
2460  if (est && IsA(est, Const))
2461  {
2462  if (((Const *) est)->constisnull)
2463  {
2464  /* NULL indicates LIMIT ALL, ie, no limit */
2465  *count_est = 0; /* treat as not present */
2466  }
2467  else
2468  {
2469  *count_est = DatumGetInt64(((Const *) est)->constvalue);
2470  if (*count_est <= 0)
2471  *count_est = 1; /* force to at least 1 */
2472  }
2473  }
2474  else
2475  *count_est = -1; /* can't estimate */
2476  }
2477  else
2478  *count_est = 0; /* not present */
2479 
2480  if (parse->limitOffset)
2481  {
2482  est = estimate_expression_value(root, parse->limitOffset);
2483  if (est && IsA(est, Const))
2484  {
2485  if (((Const *) est)->constisnull)
2486  {
2487  /* Treat NULL as no offset; the executor will too */
2488  *offset_est = 0; /* treat as not present */
2489  }
2490  else
2491  {
2492  *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2493  if (*offset_est < 0)
2494  *offset_est = 0; /* treat as not present */
2495  }
2496  }
2497  else
2498  *offset_est = -1; /* can't estimate */
2499  }
2500  else
2501  *offset_est = 0; /* not present */
2502 
2503  if (*count_est != 0)
2504  {
2505  /*
2506  * A LIMIT clause limits the absolute number of tuples returned.
2507  * However, if it's not a constant LIMIT then we have to guess; for
2508  * lack of a better idea, assume 10% of the plan's result is wanted.
2509  */
2510  if (*count_est < 0 || *offset_est < 0)
2511  {
2512  /* LIMIT or OFFSET is an expression ... punt ... */
2513  limit_fraction = 0.10;
2514  }
2515  else
2516  {
2517  /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2518  limit_fraction = (double) *count_est + (double) *offset_est;
2519  }
2520 
2521  /*
2522  * If we have absolute limits from both caller and LIMIT, use the
2523  * smaller value; likewise if they are both fractional. If one is
2524  * fractional and the other absolute, we can't easily determine which
2525  * is smaller, but we use the heuristic that the absolute will usually
2526  * be smaller.
2527  */
2528  if (tuple_fraction >= 1.0)
2529  {
2530  if (limit_fraction >= 1.0)
2531  {
2532  /* both absolute */
2533  tuple_fraction = Min(tuple_fraction, limit_fraction);
2534  }
2535  else
2536  {
2537  /* caller absolute, limit fractional; use caller's value */
2538  }
2539  }
2540  else if (tuple_fraction > 0.0)
2541  {
2542  if (limit_fraction >= 1.0)
2543  {
2544  /* caller fractional, limit absolute; use limit */
2545  tuple_fraction = limit_fraction;
2546  }
2547  else
2548  {
2549  /* both fractional */
2550  tuple_fraction = Min(tuple_fraction, limit_fraction);
2551  }
2552  }
2553  else
2554  {
2555  /* no info from caller, just use limit */
2556  tuple_fraction = limit_fraction;
2557  }
2558  }
2559  else if (*offset_est != 0 && tuple_fraction > 0.0)
2560  {
2561  /*
2562  * We have an OFFSET but no LIMIT. This acts entirely differently
2563  * from the LIMIT case: here, we need to increase rather than decrease
2564  * the caller's tuple_fraction, because the OFFSET acts to cause more
2565  * tuples to be fetched instead of fewer. This only matters if we got
2566  * a tuple_fraction > 0, however.
2567  *
2568  * As above, use 10% if OFFSET is present but unestimatable.
2569  */
2570  if (*offset_est < 0)
2571  limit_fraction = 0.10;
2572  else
2573  limit_fraction = (double) *offset_est;
2574 
2575  /*
2576  * If we have absolute counts from both caller and OFFSET, add them
2577  * together; likewise if they are both fractional. If one is
2578  * fractional and the other absolute, we want to take the larger, and
2579  * we heuristically assume that's the fractional one.
2580  */
2581  if (tuple_fraction >= 1.0)
2582  {
2583  if (limit_fraction >= 1.0)
2584  {
2585  /* both absolute, so add them together */
2586  tuple_fraction += limit_fraction;
2587  }
2588  else
2589  {
2590  /* caller absolute, limit fractional; use limit */
2591  tuple_fraction = limit_fraction;
2592  }
2593  }
2594  else
2595  {
2596  if (limit_fraction >= 1.0)
2597  {
2598  /* caller fractional, limit absolute; use caller's value */
2599  }
2600  else
2601  {
2602  /* both fractional, so add them together */
2603  tuple_fraction += limit_fraction;
2604  if (tuple_fraction >= 1.0)
2605  tuple_fraction = 0.0; /* assume fetch all */
2606  }
2607  }
2608  }
2609 
2610  return tuple_fraction;
2611 }
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2395

References Assert, DatumGetInt64(), estimate_expression_value(), IsA, Min, parse(), and root.

Referenced by grouping_planner().

◆ preprocess_phv_expression()

Expr* preprocess_phv_expression ( PlannerInfo root,
Expr expr 
)

Definition at line 1272 of file planner.c.

1273 {
1274  return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1275 }
#define EXPRKIND_PHV
Definition: planner.c:86
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1126

References EXPRKIND_PHV, preprocess_expression(), and root.

Referenced by extract_lateral_references().

◆ preprocess_qual_conditions()

static void preprocess_qual_conditions ( PlannerInfo root,
Node jtnode 
)
static

Definition at line 1228 of file planner.c.

1229 {
1230  if (jtnode == NULL)
1231  return;
1232  if (IsA(jtnode, RangeTblRef))
1233  {
1234  /* nothing to do here */
1235  }
1236  else if (IsA(jtnode, FromExpr))
1237  {
1238  FromExpr *f = (FromExpr *) jtnode;
1239  ListCell *l;
1240 
1241  foreach(l, f->fromlist)
1243 
1245  }
1246  else if (IsA(jtnode, JoinExpr))
1247  {
1248  JoinExpr *j = (JoinExpr *) jtnode;
1249 
1252 
1253  j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1254  }
1255  else
1256  elog(ERROR, "unrecognized node type: %d",
1257  (int) nodeTag(jtnode));
1258 }
#define nodeTag(nodeptr)
Definition: nodes.h:133
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1228
Node * quals
Definition: primnodes.h:2281
List * fromlist
Definition: primnodes.h:2280

References elog, ERROR, EXPRKIND_QUAL, FromExpr::fromlist, IsA, j, lfirst, nodeTag, preprocess_expression(), FromExpr::quals, and root.

Referenced by subquery_planner().

◆ preprocess_rowmarks()

static void preprocess_rowmarks ( PlannerInfo root)
static

Definition at line 2265 of file planner.c.

2266 {
2267  Query *parse = root->parse;
2268  Bitmapset *rels;
2269  List *prowmarks;
2270  ListCell *l;
2271  int i;
2272 
2273  if (parse->rowMarks)
2274  {
2275  /*
2276  * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2277  * grouping, since grouping renders a reference to individual tuple
2278  * CTIDs invalid. This is also checked at parse time, but that's
2279  * insufficient because of rule substitution, query pullup, etc.
2280  */
2282  parse->rowMarks)->strength);
2283  }
2284  else
2285  {
2286  /*
2287  * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2288  * UPDATE/SHARE.
2289  */
2290  if (parse->commandType != CMD_UPDATE &&
2291  parse->commandType != CMD_DELETE &&
2292  parse->commandType != CMD_MERGE)
2293  return;
2294  }
2295 
2296  /*
2297  * We need to have rowmarks for all base relations except the target. We
2298  * make a bitmapset of all base rels and then remove the items we don't
2299  * need or have FOR [KEY] UPDATE/SHARE marks for.
2300  */
2301  rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2302  if (parse->resultRelation)
2303  rels = bms_del_member(rels, parse->resultRelation);
2304 
2305  /*
2306  * Convert RowMarkClauses to PlanRowMark representation.
2307  */
2308  prowmarks = NIL;
2309  foreach(l, parse->rowMarks)
2310  {
2312  RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2313  PlanRowMark *newrc;
2314 
2315  /*
2316  * Currently, it is syntactically impossible to have FOR UPDATE et al
2317  * applied to an update/delete target rel. If that ever becomes
2318  * possible, we should drop the target from the PlanRowMark list.
2319  */
2320  Assert(rc->rti != parse->resultRelation);
2321 
2322  /*
2323  * Ignore RowMarkClauses for subqueries; they aren't real tables and
2324  * can't support true locking. Subqueries that got flattened into the
2325  * main query should be ignored completely. Any that didn't will get
2326  * ROW_MARK_COPY items in the next loop.
2327  */
2328  if (rte->rtekind != RTE_RELATION)
2329  continue;
2330 
2331  rels = bms_del_member(rels, rc->rti);
2332 
2333  newrc = makeNode(PlanRowMark);
2334  newrc->rti = newrc->prti = rc->rti;
2335  newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2336  newrc->markType = select_rowmark_type(rte, rc->strength);
2337  newrc->allMarkTypes = (1 << newrc->markType);
2338  newrc->strength = rc->strength;
2339  newrc->waitPolicy = rc->waitPolicy;
2340  newrc->isParent = false;
2341 
2342  prowmarks = lappend(prowmarks, newrc);
2343  }
2344 
2345  /*
2346  * Now, add rowmarks for any non-target, non-locked base relations.
2347  */
2348  i = 0;
2349  foreach(l, parse->rtable)
2350  {
2352  PlanRowMark *newrc;
2353 
2354  i++;
2355  if (!bms_is_member(i, rels))
2356  continue;
2357 
2358  newrc = makeNode(PlanRowMark);
2359  newrc->rti = newrc->prti = i;
2360  newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2361  newrc->markType = select_rowmark_type(rte, LCS_NONE);
2362  newrc->allMarkTypes = (1 << newrc->markType);
2363  newrc->strength = LCS_NONE;
2364  newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2365  newrc->isParent = false;
2366 
2367  prowmarks = lappend(prowmarks, newrc);
2368  }
2369 
2370  root->rowMarks = prowmarks;
2371 }
@ LockWaitBlock
Definition: lockoptions.h:39
@ LCS_NONE
Definition: lockoptions.h:23
@ CMD_DELETE
Definition: nodes.h:268
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3238
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2377
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
LockClauseStrength strength
Definition: plannodes.h:1387
Index prti
Definition: plannodes.h:1383
RowMarkType markType
Definition: plannodes.h:1385
LockWaitPolicy waitPolicy
Definition: plannodes.h:1388
bool isParent
Definition: plannodes.h:1389
Index rowmarkId
Definition: plannodes.h:1384
int allMarkTypes
Definition: plannodes.h:1386
LockClauseStrength strength
Definition: parsenodes.h:1585
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1586

References PlanRowMark::allMarkTypes, Assert, bms_del_member(), bms_is_member(), CheckSelectLocking(), CMD_DELETE, CMD_MERGE, CMD_UPDATE, get_relids_in_jointree(), i, PlanRowMark::isParent, lappend(), LCS_NONE, lfirst_node, linitial_node, LockWaitBlock, makeNode, PlanRowMark::markType, NIL, parse(), PlanRowMark::prti, root, PlanRowMark::rowmarkId, rt_fetch, RTE_RELATION, RangeTblEntry::rtekind, RowMarkClause::rti, PlanRowMark::rti, select_rowmark_type(), RowMarkClause::strength, PlanRowMark::strength, RowMarkClause::waitPolicy, and PlanRowMark::waitPolicy.

Referenced by subquery_planner().

◆ remap_to_groupclause_idx()

static List * remap_to_groupclause_idx ( List groupClause,
List gsets,
int *  tleref_to_colnum_map 
)
static

Definition at line 2228 of file planner.c.

2231 {
2232  int ref = 0;
2233  List *result = NIL;
2234  ListCell *lc;
2235 
2236  foreach(lc, groupClause)
2237  {
2239 
2240  tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2241  }
2242 
2243  foreach(lc, gsets)
2244  {
2245  List *set = NIL;
2246  ListCell *lc2;
2248 
2249  foreach(lc2, gs->set)
2250  {
2251  set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2252  }
2253 
2254  result = lappend(result, set);
2255  }
2256 
2257  return result;
2258 }

References lappend(), lappend_int(), lfirst_int, lfirst_node, NIL, GroupingSetData::set, and SortGroupClause::tleSortGroupRef.

Referenced by consider_groupingsets_paths(), and preprocess_grouping_sets().

◆ remove_useless_groupby_columns()

static void remove_useless_groupby_columns ( PlannerInfo root)
static

Definition at line 2687 of file planner.c.

2688 {
2689  Query *parse = root->parse;
2690  Bitmapset **groupbyattnos;
2691  Bitmapset **surplusvars;
2692  ListCell *lc;
2693  int relid;
2694 
2695  /* No chance to do anything if there are less than two GROUP BY items */
2696  if (list_length(root->processed_groupClause) < 2)
2697  return;
2698 
2699  /* Don't fiddle with the GROUP BY clause if the query has grouping sets */
2700  if (parse->groupingSets)
2701  return;
2702 
2703  /*
2704  * Scan the GROUP BY clause to find GROUP BY items that are simple Vars.
2705  * Fill groupbyattnos[k] with a bitmapset of the column attnos of RTE k
2706  * that are GROUP BY items.
2707  */
2708  groupbyattnos = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
2709  (list_length(parse->rtable) + 1));
2710  foreach(lc, root->processed_groupClause)
2711  {
2713  TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
2714  Var *var = (Var *) tle->expr;
2715 
2716  /*
2717  * Ignore non-Vars and Vars from other query levels.
2718  *
2719  * XXX in principle, stable expressions containing Vars could also be
2720  * removed, if all the Vars are functionally dependent on other GROUP
2721  * BY items. But it's not clear that such cases occur often enough to
2722  * be worth troubling over.
2723  */
2724  if (!IsA(var, Var) ||
2725  var->varlevelsup > 0)
2726  continue;
2727 
2728  /* OK, remember we have this Var */
2729  relid = var->varno;
2730  Assert(relid <= list_length(parse->rtable));
2731  groupbyattnos[relid] = bms_add_member(groupbyattnos[relid],
2733  }
2734 
2735  /*
2736  * Consider each relation and see if it is possible to remove some of its
2737  * Vars from GROUP BY. For simplicity and speed, we do the actual removal
2738  * in a separate pass. Here, we just fill surplusvars[k] with a bitmapset
2739  * of the column attnos of RTE k that are removable GROUP BY items.
2740  */
2741  surplusvars = NULL; /* don't allocate array unless required */
2742  relid = 0;
2743  foreach(lc, parse->rtable)
2744  {
2746  Bitmapset *relattnos;
2747  Bitmapset *pkattnos;
2748  Oid constraintOid;
2749 
2750  relid++;
2751 
2752  /* Only plain relations could have primary-key constraints */
2753  if (rte->rtekind != RTE_RELATION)
2754  continue;
2755 
2756  /*
2757  * We must skip inheritance parent tables as some of the child rels
2758  * may cause duplicate rows. This cannot happen with partitioned
2759  * tables, however.
2760  */
2761  if (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)
2762  continue;
2763 
2764  /* Nothing to do unless this rel has multiple Vars in GROUP BY */
2765  relattnos = groupbyattnos[relid];
2766  if (bms_membership(relattnos) != BMS_MULTIPLE)
2767  continue;
2768 
2769  /*
2770  * Can't remove any columns for this rel if there is no suitable
2771  * (i.e., nondeferrable) primary key constraint.
2772  */
2773  pkattnos = get_primary_key_attnos(rte->relid, false, &constraintOid);
2774  if (pkattnos == NULL)
2775  continue;
2776 
2777  /*
2778  * If the primary key is a proper subset of relattnos then we have
2779  * some items in the GROUP BY that can be removed.
2780  */
2781  if (bms_subset_compare(pkattnos, relattnos) == BMS_SUBSET1)
2782  {
2783  /*
2784  * To easily remember whether we've found anything to do, we don't
2785  * allocate the surplusvars[] array until we find something.
2786  */
2787  if (surplusvars == NULL)
2788  surplusvars = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
2789  (list_length(parse->rtable) + 1));
2790 
2791  /* Remember the attnos of the removable columns */
2792  surplusvars[relid] = bms_difference(relattnos, pkattnos);
2793  }
2794  }
2795 
2796  /*
2797  * If we found any surplus Vars, build a new GROUP BY clause without them.
2798  * (Note: this may leave some TLEs with unreferenced ressortgroupref
2799  * markings, but that's harmless.)
2800  */
2801  if (surplusvars != NULL)
2802  {
2803  List *new_groupby = NIL;
2804 
2805  foreach(lc, root->processed_groupClause)
2806  {
2808  TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
2809  Var *var = (Var *) tle->expr;
2810 
2811  /*
2812  * New list must include non-Vars, outer Vars, and anything not
2813  * marked as surplus.
2814  */
2815  if (!IsA(var, Var) ||
2816  var->varlevelsup > 0 ||
2818  surplusvars[var->varno]))
2819  new_groupby = lappend(new_groupby, sgc);
2820  }
2821 
2822  root->processed_groupClause = new_groupby;
2823  }
2824 }
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:346
@ BMS_SUBSET1
Definition: bitmapset.h:63
Bitmapset * get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid)
Expr * expr
Definition: primnodes.h:2162
Definition: primnodes.h:248
AttrNumber varattno
Definition: primnodes.h:260
int varno
Definition: primnodes.h:255
Index varlevelsup
Definition: primnodes.h:280
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
TargetEntry * get_sortgroupclause_tle(SortGroupClause *sgClause, List *targetList)
Definition: tlist.c:367

References Assert, bms_add_member(), bms_difference(), bms_is_member(), bms_membership(), BMS_MULTIPLE, BMS_SUBSET1, bms_subset_compare(), TargetEntry::expr, FirstLowInvalidHeapAttributeNumber, get_primary_key_attnos(), get_sortgroupclause_tle(), if(), RangeTblEntry::inh, IsA, lappend(), lfirst_node, list_length(), NIL, palloc0(), parse(), RangeTblEntry::relid, root, RTE_RELATION, RangeTblEntry::rtekind, Var::varattno, Var::varlevelsup, and Var::varno.

Referenced by grouping_planner().

◆ reorder_grouping_sets()

static List * reorder_grouping_sets ( List groupingSets,
List sortclause 
)
static

Definition at line 3080 of file planner.c.

3081 {
3082  ListCell *lc;
3083  List *previous = NIL;
3084  List *result = NIL;
3085 
3086  foreach(lc, groupingSets)
3087  {
3088  List *candidate = (List *) lfirst(lc);
3089  List *new_elems = list_difference_int(candidate, previous);
3091 
3092  while (list_length(sortclause) > list_length(previous) &&
3093  new_elems != NIL)
3094  {
3095  SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3096  int ref = sc->tleSortGroupRef;
3097 
3098  if (list_member_int(new_elems, ref))
3099  {
3100  previous = lappend_int(previous, ref);
3101  new_elems = list_delete_int(new_elems, ref);
3102  }
3103  else
3104  {
3105  /* diverged from the sortclause; give up on it */
3106  sortclause = NIL;
3107  break;
3108  }
3109  }
3110 
3111  previous = list_concat(previous, new_elems);
3112 
3113  gs->set = list_copy(previous);
3114  result = lcons(gs, result);
3115  }
3116 
3117  list_free(previous);
3118 
3119  return result;
3120 }
List * list_delete_int(List *list, int datum)
Definition: list.c:891
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
bool list_member_int(const List *list, int datum)
Definition: list.c:702
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299

References lappend_int(), lcons(), lfirst, list_concat(), list_copy(), list_delete_int(), list_difference_int(), list_free(), list_length(), list_member_int(), list_nth(), makeNode, NIL, GroupingSetData::set, and SortGroupClause::tleSortGroupRef.

Referenced by preprocess_grouping_sets().

◆ select_active_windows()

static List * select_active_windows ( PlannerInfo root,
WindowFuncLists wflists 
)
static

Definition at line 5724 of file planner.c.

5725 {
5726  List *windowClause = root->parse->windowClause;
5727  List *result = NIL;
5728  ListCell *lc;
5729  int nActive = 0;
5731  * list_length(windowClause));
5732 
5733  /* First, construct an array of the active windows */
5734  foreach(lc, windowClause)
5735  {
5737 
5738  /* It's only active if wflists shows some related WindowFuncs */
5739  Assert(wc->winref <= wflists->maxWinRef);
5740  if (wflists->windowFuncs[wc->winref] == NIL)
5741  continue;
5742 
5743  actives[nActive].wc = wc; /* original clause */
5744 
5745  /*
5746  * For sorting, we want the list of partition keys followed by the
5747  * list of sort keys. But pathkeys construction will remove duplicates
5748  * between the two, so we can as well (even though we can't detect all
5749  * of the duplicates, since some may come from ECs - that might mean
5750  * we miss optimization chances here). We must, however, ensure that
5751  * the order of entries is preserved with respect to the ones we do
5752  * keep.
5753  *
5754  * partitionClause and orderClause had their own duplicates removed in
5755  * parse analysis, so we're only concerned here with removing
5756  * orderClause entries that also appear in partitionClause.
5757  */
5758  actives[nActive].uniqueOrder =
5760  wc->orderClause);
5761  nActive++;
5762  }
5763 
5764  /*
5765  * Sort active windows by their partitioning/ordering clauses, ignoring
5766  * any framing clauses, so that the windows that need the same sorting are
5767  * adjacent in the list. When we come to generate paths, this will avoid
5768  * inserting additional Sort nodes.
5769  *
5770  * This is how we implement a specific requirement from the SQL standard,
5771  * which says that when two or more windows are order-equivalent (i.e.
5772  * have matching partition and order clauses, even if their names or
5773  * framing clauses differ), then all peer rows must be presented in the
5774  * same order in all of them. If we allowed multiple sort nodes for such
5775  * cases, we'd risk having the peer rows end up in different orders in
5776  * equivalent windows due to sort instability. (See General Rule 4 of
5777  * <window clause> in SQL2008 - SQL2016.)
5778  *
5779  * Additionally, if the entire list of clauses of one window is a prefix
5780  * of another, put first the window with stronger sorting requirements.
5781  * This way we will first sort for stronger window, and won't have to sort
5782  * again for the weaker one.
5783  */
5784  qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5785 
5786  /* build ordered list of the original WindowClause nodes */
5787  for (int i = 0; i < nActive; i++)
5788  result = lappend(result, actives[i].wc);
5789 
5790  pfree(actives);
5791 
5792  return result;
5793 }
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:5812
#define qsort(a, b, c, d)
Definition: port.h:449
WindowClause * wc
Definition: planner.c:113

References Assert, common_prefix_cmp(), i, lappend(), lfirst_node, list_concat_unique(), list_copy(), list_length(), WindowFuncLists::maxWinRef, NIL, WindowClause::orderClause, palloc(), WindowClause::partitionClause, pfree(), qsort, root, WindowClauseSortData::uniqueOrder, WindowClauseSortData::wc, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by grouping_planner().

◆ select_rowmark_type()

RowMarkType select_rowmark_type ( RangeTblEntry rte,
LockClauseStrength  strength 
)

Definition at line 2377 of file planner.c.

2378 {
2379  if (rte->rtekind != RTE_RELATION)
2380  {
2381  /* If it's not a table at all, use ROW_MARK_COPY */
2382  return ROW_MARK_COPY;
2383  }
2384  else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2385  {
2386  /* Let the FDW select the rowmark type, if it wants to */
2387  FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2388 
2389  if (fdwroutine->GetForeignRowMarkType != NULL)
2390  return fdwroutine->GetForeignRowMarkType(rte, strength);
2391  /* Otherwise, use ROW_MARK_COPY by default */
2392  return ROW_MARK_COPY;
2393  }
2394  else
2395  {
2396  /* Regular table, apply the appropriate lock type */
2397  switch (strength)
2398  {
2399  case LCS_NONE:
2400 
2401  /*
2402  * We don't need a tuple lock, only the ability to re-fetch
2403  * the row.
2404  */
2405  return ROW_MARK_REFERENCE;
2406  break;
2407  case LCS_FORKEYSHARE:
2408  return ROW_MARK_KEYSHARE;
2409  break;
2410  case LCS_FORSHARE:
2411  return ROW_MARK_SHARE;
2412  break;
2413  case LCS_FORNOKEYUPDATE:
2414  return ROW_MARK_NOKEYEXCLUSIVE;
2415  break;
2416  case LCS_FORUPDATE:
2417  return ROW_MARK_EXCLUSIVE;
2418  break;
2419  }
2420  elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2421  return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2422  }
2423 }
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:409
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
@ ROW_MARK_COPY
Definition: plannodes.h:1334
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1333
@ ROW_MARK_SHARE
Definition: plannodes.h:1331
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1329
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1330
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1332
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247

References elog, ERROR, GetFdwRoutineByRelId(), FdwRoutine::GetForeignRowMarkType, LCS_FORKEYSHARE, LCS_FORNOKEYUPDATE, LCS_FORSHARE, LCS_FORUPDATE, LCS_NONE, RangeTblEntry::relid, ROW_MARK_COPY, ROW_MARK_EXCLUSIVE, ROW_MARK_KEYSHARE, ROW_MARK_NOKEYEXCLUSIVE, ROW_MARK_REFERENCE, ROW_MARK_SHARE, RTE_RELATION, and RangeTblEntry::rtekind.

Referenced by expand_single_inheritance_child(), and preprocess_rowmarks().

◆ standard_planner()

PlannedStmt* standard_planner ( Query parse,
const char *  query_string,
int  cursorOptions,
ParamListInfo  boundParams 
)

Definition at line 287 of file planner.c.

289 {
290  PlannedStmt *result;
291  PlannerGlobal *glob;
292  double tuple_fraction;
293  PlannerInfo *root;
294  RelOptInfo *final_rel;
295  Path *best_path;
296  Plan *top_plan;
297  ListCell *lp,
298  *lr;
299 
300  /*
301  * Set up global state for this planner invocation. This data is needed
302  * across all levels of sub-Query that might exist in the given command,
303  * so we keep it in a separate struct that's linked to by each per-Query
304  * PlannerInfo.
305  */
306  glob = makeNode(PlannerGlobal);
307 
308  glob->boundParams = boundParams;
309  glob->subplans = NIL;
310  glob->subpaths = NIL;
311  glob->subroots = NIL;
312  glob->rewindPlanIDs = NULL;
313  glob->finalrtable = NIL;
314  glob->finalrteperminfos = NIL;
315  glob->finalrowmarks = NIL;
316  glob->resultRelations = NIL;
317  glob->appendRelations = NIL;
318  glob->relationOids = NIL;
319  glob->invalItems = NIL;
320  glob->paramExecTypes = NIL;
321  glob->lastPHId = 0;
322  glob->lastRowMarkId = 0;
323  glob->lastPlanNodeId = 0;
324  glob->transientPlan = false;
325  glob->dependsOnRole = false;
326 
327  /*
328  * Assess whether it's feasible to use parallel mode for this query. We
329  * can't do this in a standalone backend, or if the command will try to
330  * modify any data, or if this is a cursor operation, or if GUCs are set
331  * to values that don't permit parallelism, or if parallel-unsafe
332  * functions are present in the query tree.
333  *
334  * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
335  * MATERIALIZED VIEW to use parallel plans, but this is safe only because
336  * the command is writing into a completely new table which workers won't
337  * be able to see. If the workers could see the table, the fact that
338  * group locking would cause them to ignore the leader's heavyweight GIN
339  * page locks would make this unsafe. We'll have to fix that somehow if
340  * we want to allow parallel inserts in general; updates and deletes have
341  * additional problems especially around combo CIDs.)
342  *
343  * For now, we don't try to use parallel mode if we're running inside a
344  * parallel worker. We might eventually be able to relax this
345  * restriction, but for now it seems best not to have parallel workers
346  * trying to create their own parallel workers.
347  */
348  if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
350  parse->commandType == CMD_SELECT &&
351  !parse->hasModifyingCTE &&
353  !IsParallelWorker())
354  {
355  /* all the cheap tests pass, so scan the query tree */
357  glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
358  }
359  else
360  {
361  /* skip the query tree scan, just assume it's unsafe */
362  glob->maxParallelHazard = PROPARALLEL_UNSAFE;
363  glob->parallelModeOK = false;
364  }
365 
366  /*
367  * glob->parallelModeNeeded is normally set to false here and changed to
368  * true during plan creation if a Gather or Gather Merge plan is actually
369  * created (cf. create_gather_plan, create_gather_merge_plan).
370  *
371  * However, if debug_parallel_query = on or debug_parallel_query =
372  * regress, then we impose parallel mode whenever it's safe to do so, even
373  * if the final plan doesn't use parallelism. It's not safe to do so if
374  * the query contains anything parallel-unsafe; parallelModeOK will be
375  * false in that case. Note that parallelModeOK can't change after this
376  * point. Otherwise, everything in the query is either parallel-safe or
377  * parallel-restricted, and in either case it should be OK to impose
378  * parallel-mode restrictions. If that ends up breaking something, then
379  * either some function the user included in the query is incorrectly
380  * labeled as parallel-safe or parallel-restricted when in reality it's
381  * parallel-unsafe, or else the query planner itself has a bug.
382  */
383  glob->parallelModeNeeded = glob->parallelModeOK &&
385 
386  /* Determine what fraction of the plan is likely to be scanned */
387  if (cursorOptions & CURSOR_OPT_FAST_PLAN)
388  {
389  /*
390  * We have no real idea how many tuples the user will ultimately FETCH
391  * from a cursor, but it is often the case that he doesn't want 'em
392  * all, or would prefer a fast-start plan anyway so that he can
393  * process some of the tuples sooner. Use a GUC parameter to decide
394  * what fraction to optimize for.
395  */
396  tuple_fraction = cursor_tuple_fraction;
397 
398  /*
399  * We document cursor_tuple_fraction as simply being a fraction, which
400  * means the edge cases 0 and 1 have to be treated specially here. We
401  * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
402  */
403  if (tuple_fraction >= 1.0)
404  tuple_fraction = 0.0;
405  else if (tuple_fraction <= 0.0)
406  tuple_fraction = 1e-10;
407  }
408  else
409  {
410  /* Default assumption is we need all the tuples */
411  tuple_fraction = 0.0;
412  }
413 
414  /* primary planning entry point (may recurse for subqueries) */
415  root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
416 
417  /* Select best Path and turn it into a Plan */
418  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
419  best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
420 
421  top_plan = create_plan(root, best_path);
422 
423  /*
424  * If creating a plan for a scrollable cursor, make sure it can run
425  * backwards on demand. Add a Material node at the top at need.
426  */
427  if (cursorOptions & CURSOR_OPT_SCROLL)
428  {
429  if (!ExecSupportsBackwardScan(top_plan))
430  top_plan = materialize_finished_plan(top_plan);
431  }
432 
433  /*
434  * Optionally add a Gather node for testing purposes, provided this is
435  * actually a safe thing to do.
436  *
437  * We can add Gather even when top_plan has parallel-safe initPlans, but
438  * then we have to move the initPlans to the Gather node because of
439  * SS_finalize_plan's limitations. That would cause cosmetic breakage of
440  * regression tests when debug_parallel_query = regress, because initPlans
441  * that would normally appear on the top_plan move to the Gather, causing
442  * them to disappear from EXPLAIN output. That doesn't seem worth kluging
443  * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
444  */
446  top_plan->parallel_safe &&
447  (top_plan->initPlan == NIL ||
449  {
450  Gather *gather = makeNode(Gather);
451  Cost initplan_cost;
452  bool unsafe_initplans;
453 
454  gather->plan.targetlist = top_plan->targetlist;
455  gather->plan.qual = NIL;
456  gather->plan.lefttree = top_plan;
457  gather->plan.righttree = NULL;
458  gather->num_workers = 1;
459  gather->single_copy = true;
461 
462  /* Transfer any initPlans to the new top node */
463  gather->plan.initPlan = top_plan->initPlan;
464  top_plan->initPlan = NIL;
465 
466  /*
467  * Since this Gather has no parallel-aware descendants to signal to,
468  * we don't need a rescan Param.
469  */
470  gather->rescan_param = -1;
471 
472  /*
473  * Ideally we'd use cost_gather here, but setting up dummy path data
474  * to satisfy it doesn't seem much cleaner than knowing what it does.
475  */
476  gather->plan.startup_cost = top_plan->startup_cost +
478  gather->plan.total_cost = top_plan->total_cost +
480  gather->plan.plan_rows = top_plan->plan_rows;
481  gather->plan.plan_width = top_plan->plan_width;
482  gather->plan.parallel_aware = false;
483  gather->plan.parallel_safe = false;
484 
485  /*
486  * Delete the initplans' cost from top_plan. We needn't add it to the
487  * Gather node, since the above coding already included it there.
488  */
490  &initplan_cost, &unsafe_initplans);
491  top_plan->startup_cost -= initplan_cost;
492  top_plan->total_cost -= initplan_cost;
493 
494  /* use parallel mode for parallel plans. */
495  root->glob->parallelModeNeeded = true;
496 
497  top_plan = &gather->plan;
498  }
499 
500  /*
501  * If any Params were generated, run through the plan tree and compute
502  * each plan node's extParam/allParam sets. Ideally we'd merge this into
503  * set_plan_references' tree traversal, but for now it has to be separate
504  * because we need to visit subplans before not after main plan.
505  */
506  if (glob->paramExecTypes != NIL)
507  {
508  Assert(list_length(glob->subplans) == list_length(glob->subroots));
509  forboth(lp, glob->subplans, lr, glob->subroots)
510  {
511  Plan *subplan = (Plan *) lfirst(lp);
512  PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
513 
514  SS_finalize_plan(subroot, subplan);
515  }
516  SS_finalize_plan(root, top_plan);
517  }
518 
519  /* final cleanup of the plan */
520  Assert(glob->finalrtable == NIL);
521  Assert(glob->finalrteperminfos == NIL);
522  Assert(glob->finalrowmarks == NIL);
523  Assert(glob->resultRelations == NIL);
524  Assert(glob->appendRelations == NIL);
525  top_plan = set_plan_references(root, top_plan);
526  /* ... and the subplans (both regular subplans and initplans) */
527  Assert(list_length(glob->subplans) == list_length(glob->subroots));
528  forboth(lp, glob->subplans, lr, glob->subroots)
529  {
530  Plan *subplan = (Plan *) lfirst(lp);
531  PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
532 
533  lfirst(lp) = set_plan_references(subroot, subplan);
534  }
535 
536  /* build the PlannedStmt result */
537  result = makeNode(PlannedStmt);
538 
539  result->commandType = parse->commandType;
540  result->queryId = parse->queryId;
541  result->hasReturning = (parse->returningList != NIL);
542  result->hasModifyingCTE = parse->hasModifyingCTE;
543  result->canSetTag = parse->canSetTag;
544  result->transientPlan = glob->transientPlan;
545  result->dependsOnRole = glob->dependsOnRole;
546  result->parallelModeNeeded = glob->parallelModeNeeded;
547  result->planTree = top_plan;
548  result->rtable = glob->finalrtable;
549  result->permInfos = glob->finalrteperminfos;
550  result->resultRelations = glob->resultRelations;
551  result->appendRelations = glob->appendRelations;
552  result->subplans = glob->subplans;
553  result->rewindPlanIDs = glob->rewindPlanIDs;
554  result->rowMarks = glob->finalrowmarks;
555  result->relationOids = glob->relationOids;
556  result->invalItems = glob->invalItems;
557  result->paramExecTypes = glob->paramExecTypes;
558  /* utilityStmt should be null, but we might as well copy it */
559  result->utilityStmt = parse->utilityStmt;
560  result->stmt_location = parse->stmt_location;
561  result->stmt_len = parse->stmt_len;
562 
563  result->jitFlags = PGJIT_NONE;
564  if (jit_enabled && jit_above_cost >= 0 &&
565  top_plan->total_cost > jit_above_cost)
566  {
567  result->jitFlags |= PGJIT_PERFORM;
568 
569  /*
570  * Decide how much effort should be put into generating better code.
571  */
572  if (jit_optimize_above_cost >= 0 &&
574  result->jitFlags |= PGJIT_OPT3;
575  if (jit_inline_above_cost >= 0 &&
576  top_plan->total_cost > jit_inline_above_cost)
577  result->jitFlags |= PGJIT_INLINE;
578 
579  /*
580  * Decide which operations should be JITed.
581  */
582  if (jit_expressions)
583  result->jitFlags |= PGJIT_EXPR;
585  result->jitFlags |= PGJIT_DEFORM;
586  }
587 
588  if (glob->partition_directory != NULL)
589  DestroyPartitionDirectory(glob->partition_directory);
590 
591  return result;
592 }
char max_parallel_hazard(Query *parse)
Definition: clauses.c:734
int max_parallel_workers_per_gather
Definition: costsize.c:132
double parallel_setup_cost
Definition: costsize.c:125
double parallel_tuple_cost
Definition: costsize.c:124
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:337
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6527
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:510
#define IsParallelWorker()
Definition: parallel.h:60
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:108
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:106
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3293
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3299
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3302
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:442
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:628
double cursor_tuple_fraction
Definition: planner.c:66
int debug_parallel_query
Definition: planner.c:67
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6298
e
Definition: preproc-init.c:82
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:287
int num_workers
Definition: plannodes.h:1143
bool invisible
Definition: plannodes.h:1146
bool single_copy
Definition: plannodes.h:1145
Plan plan
Definition: plannodes.h:1142
int rescan_param
Definition: plannodes.h:1144
struct Plan * lefttree
Definition: plannodes.h:154
Cost total_cost
Definition: plannodes.h:129
struct Plan * righttree
Definition: plannodes.h:155
bool parallel_aware
Definition: plannodes.h:140
Cost startup_cost
Definition: plannodes.h:128
List * qual
Definition: plannodes.h:153
int plan_width
Definition: plannodes.h:135
bool parallel_safe
Definition: plannodes.h:141
Cardinality plan_rows
Definition: plannodes.h:134
List * targetlist
Definition: plannodes.h:152
List * initPlan
Definition: plannodes.h:156
struct Plan * planTree
Definition: plannodes.h:70
bool hasModifyingCTE
Definition: plannodes.h:58
List * appendRelations
Definition: plannodes.h:80
List * permInfos
Definition: plannodes.h:74
bool canSetTag
Definition: plannodes.h:60
List * rowMarks
Definition: plannodes.h:87
int jitFlags
Definition: plannodes.h:68
Bitmapset * rewindPlanIDs
Definition: plannodes.h:85
ParseLoc stmt_len
Definition: plannodes.h:99
bool hasReturning
Definition: plannodes.h:56
ParseLoc stmt_location
Definition: plannodes.h:98
List * invalItems
Definition: plannodes.h:91
bool transientPlan
Definition: plannodes.h:62
List * resultRelations
Definition: plannodes.h:78
List * subplans
Definition: plannodes.h:82
List * relationOids
Definition: plannodes.h:89
bool dependsOnRole
Definition: plannodes.h:64
CmdType commandType
Definition: plannodes.h:52
Node * utilityStmt
Definition: plannodes.h:95
List * rtable
Definition: plannodes.h:72
List * paramExecTypes
Definition: plannodes.h:93
bool parallelModeNeeded
Definition: plannodes.h:66
uint64 queryId
Definition: plannodes.h:54
int lastPlanNodeId
Definition: pathnodes.h:147
char maxParallelHazard
Definition: pathnodes.h:162
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:153
List * appendRelations
Definition: pathnodes.h:129
List * finalrowmarks
Definition: pathnodes.h:123
List * paramExecTypes
Definition: pathnodes.h:138
bool parallelModeOK
Definition: pathnodes.h:156
bool transientPlan
Definition: pathnodes.h:150
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:114
List * finalrteperminfos
Definition: pathnodes.h:120
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:141
Index lastRowMarkId
Definition: pathnodes.h:144
List * resultRelations
Definition: pathnodes.h:126
List * finalrtable
Definition: pathnodes.h:117
bool parallelModeNeeded
Definition: pathnodes.h:159
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2254
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2198

References PlannerGlobal::appendRelations, PlannedStmt::appendRelations, Assert, PlannedStmt::canSetTag, CMD_SELECT, PlannedStmt::commandType, create_plan(), CURSOR_OPT_FAST_PLAN, CURSOR_OPT_PARALLEL_OK, CURSOR_OPT_SCROLL, cursor_tuple_fraction, DEBUG_PARALLEL_OFF, debug_parallel_query, DEBUG_PARALLEL_REGRESS, PlannerGlobal::dependsOnRole, PlannedStmt::dependsOnRole, DestroyPartitionDirectory(), ExecSupportsBackwardScan(), fetch_upper_rel(), PlannerGlobal::finalrowmarks, PlannerGlobal::finalrtable, PlannerGlobal::finalrteperminfos, forboth, get_cheapest_fractional_path(), PlannedStmt::hasModifyingCTE, PlannedStmt::hasReturning, Plan::initPlan, PlannerGlobal::invalItems, PlannedStmt::invalItems, Gather::invisible, IsParallelWorker, IsUnderPostmaster, jit_above_cost, jit_enabled, jit_expressions, jit_inline_above_cost, jit_optimize_above_cost, jit_tuple_deforming, PlannedStmt::jitFlags, PlannerGlobal::lastPHId, PlannerGlobal::lastPlanNodeId, PlannerGlobal::lastRowMarkId, Plan::lefttree, lfirst, lfirst_node, list_length(), makeNode, materialize_finished_plan(), max_parallel_hazard(), max_parallel_workers_per_gather, PlannerGlobal::maxParallelHazard, NIL, Gather::num_workers, Plan::parallel_aware, Plan::parallel_safe, parallel_setup_cost, parallel_tuple_cost, PlannerGlobal::parallelModeNeeded, PlannedStmt::parallelModeNeeded, PlannerGlobal::parallelModeOK, PlannerGlobal::paramExecTypes, PlannedStmt::paramExecTypes, parse(), PlannedStmt::permInfos, PGJIT_DEFORM, PGJIT_EXPR, PGJIT_INLINE, PGJIT_NONE, PGJIT_OPT3, PGJIT_PERFORM, Gather::plan, Plan::plan_rows, Plan::plan_width, PlannedStmt::planTree, Plan::qual, PlannedStmt::queryId, PlannerGlobal::relationOids, PlannedStmt::relationOids, Gather::rescan_param, PlannerGlobal::resultRelations, PlannedStmt::resultRelations, PlannerGlobal::rewindPlanIDs, PlannedStmt::rewindPlanIDs, Plan::righttree, root, PlannedStmt::rowMarks, PlannedStmt::rtable, set_plan_references(), Gather::single_copy, SS_compute_initplan_cost(), SS_finalize_plan(), Plan::startup_cost, PlannedStmt::stmt_len, PlannedStmt::stmt_location, PlannerGlobal::subpaths, PlannerGlobal::subplans, PlannedStmt::subplans, subquery_planner(), Plan::targetlist, Plan::total_cost, PlannerGlobal::transientPlan, PlannedStmt::transientPlan, UPPERREL_FINAL, and PlannedStmt::utilityStmt.

Referenced by delay_execution_planner(), pgss_planner(), and planner().

◆ standard_qp_callback()

static void standard_qp_callback ( PlannerInfo root,
void *  extra 
)
static

Definition at line 3354 of file planner.c.

3355 {
3356  Query *parse = root->parse;
3357  standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3358  List *tlist = root->processed_tlist;
3359  List *activeWindows = qp_extra->activeWindows;
3360 
3361  /*
3362  * Calculate pathkeys that represent grouping/ordering and/or ordered
3363  * aggregate requirements.
3364  */
3365  if (qp_extra->gset_data)
3366  {
3367  /*
3368  * With grouping sets, just use the first RollupData's groupClause. We
3369  * don't make any effort to optimize grouping clauses when there are
3370  * grouping sets, nor can we combine aggregate ordering keys with
3371  * grouping.
3372  */
3373  List *rollups = qp_extra->gset_data->rollups;
3374  List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3375 
3376  if (grouping_is_sortable(groupClause))
3377  {
3378  root->group_pathkeys = make_pathkeys_for_sortclauses(root,
3379  groupClause,
3380  tlist);
3381  root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3382  }
3383  else
3384  {
3385  root->group_pathkeys = NIL;
3386  root->num_groupby_pathkeys = 0;
3387  }
3388  }
3389  else if (parse->groupClause || root->numOrderedAggs > 0)
3390  {
3391  /*
3392  * With a plain GROUP BY list, we can remove any grouping items that
3393  * are proven redundant by EquivalenceClass processing. For example,
3394  * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3395  * especially common cases, but they're nearly free to detect. Note
3396  * that we remove redundant items from processed_groupClause but not
3397  * the original parse->groupClause.
3398  */
3399  bool sortable;
3400 
3401  root->group_pathkeys =
3403  &root->processed_groupClause,
3404  tlist,
3405  true,
3406  &sortable);
3407  if (!sortable)
3408  {
3409  /* Can't sort; no point in considering aggregate ordering either */
3410  root->group_pathkeys = NIL;
3411  root->num_groupby_pathkeys = 0;
3412  }
3413  else
3414  {
3415  root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3416  /* If we have ordered aggs, consider adding onto group_pathkeys */
3417  if (root->numOrderedAggs > 0)
3419  }
3420  }
3421  else
3422  {
3423  root->group_pathkeys = NIL;
3424  root->num_groupby_pathkeys = 0;
3425  }
3426 
3427  /* We consider only the first (bottom) window in pathkeys logic */
3428  if (activeWindows != NIL)
3429  {
3430  WindowClause *wc = linitial_node(WindowClause, activeWindows);
3431 
3432  root->window_pathkeys = make_pathkeys_for_window(root,
3433  wc,
3434  tlist);
3435  }
3436  else
3437  root->window_pathkeys = NIL;
3438 
3439  /*
3440  * As with GROUP BY, we can discard any DISTINCT items that are proven
3441  * redundant by EquivalenceClass processing. The non-redundant list is
3442  * kept in root->processed_distinctClause, leaving the original
3443  * parse->distinctClause alone.
3444  */
3445  if (parse->distinctClause)
3446  {
3447  bool sortable;
3448 
3449  /* Make a copy since pathkey processing can modify the list */
3450  root->processed_distinctClause = list_copy(parse->distinctClause);
3451  root->distinct_pathkeys =
3453  &root->processed_distinctClause,
3454  tlist,
3455  true,
3456  &sortable);
3457  if (!sortable)
3458  root->distinct_pathkeys = NIL;
3459  }
3460  else
3461  root->distinct_pathkeys = NIL;
3462 
3463  root->sort_pathkeys =
3465  parse->sortClause,
3466  tlist);
3467 
3468  /* setting setop_pathkeys might be useful to the union planner */
3469  if (qp_extra->setop != NULL &&
3471  {
3472  List *groupClauses;
3473  bool sortable;
3474 
3475  groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3476 
3477  root->setop_pathkeys =
3479  &groupClauses,
3480  tlist,
3481  false,
3482  &sortable);
3483  if (!sortable)
3484  root->setop_pathkeys = NIL;
3485  }
3486  else
3487  root->setop_pathkeys = NIL;
3488 
3489  /*
3490  * Figure out whether we want a sorted result from query_planner.
3491  *
3492  * If we have a sortable GROUP BY clause, then we want a result sorted
3493  * properly for grouping. Otherwise, if we have window functions to
3494  * evaluate, we try to sort for the first window. Otherwise, if there's a
3495  * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3496  * we try to produce output that's sufficiently well sorted for the
3497  * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3498  * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3499  * for a set operation which can benefit from presorted results and have a
3500  * sortable targetlist, we want to sort by the target list.
3501  *
3502  * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3503  * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3504  * that might just leave us failing to exploit an available sort order at
3505  * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3506  * much easier, since we know that the parser ensured that one is a
3507  * superset of the other.
3508  */
3509  if (root->group_pathkeys)
3510  root->query_pathkeys = root->group_pathkeys;
3511  else if (root->window_pathkeys)
3512  root->query_pathkeys = root->window_pathkeys;
3513  else if (list_length(root->distinct_pathkeys) >
3514  list_length(root->sort_pathkeys))
3515  root->query_pathkeys = root->distinct_pathkeys;
3516  else if (root->sort_pathkeys)
3517  root->query_pathkeys = root->sort_pathkeys;
3518  else if (root->setop_pathkeys != NIL)
3519  root->query_pathkeys = root->setop_pathkeys;
3520  else
3521  root->query_pathkeys = NIL;
3522 }
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3173
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:7939
bool set_operation_ordered_results_useful(SetOperationStmt *setop)
Definition: prepunion.c:188

References standard_qp_extra::activeWindows, adjust_group_pathkeys_for_groupagg(), generate_setop_child_grouplist(), grouping_is_sortable(), standard_qp_extra::gset_data, linitial_node, list_copy(), list_length(), make_pathkeys_for_sortclauses(), make_pathkeys_for_sortclauses_extended(), make_pathkeys_for_window(), NIL, parse(), grouping_sets_data::rollups, root, set_operation_ordered_results_useful(), and standard_qp_extra::setop.

Referenced by grouping_planner().

◆ subquery_planner()

PlannerInfo* subquery_planner ( PlannerGlobal glob,
Query parse,
PlannerInfo parent_root,
bool  hasRecursion,
double  tuple_fraction,
SetOperationStmt setops 
)

Definition at line 628 of file planner.c.

631 {
632  PlannerInfo *root;
633  List *newWithCheckOptions;
634  List *newHaving;
635  bool hasOuterJoins;
636  bool hasResultRTEs;
637  RelOptInfo *final_rel;
638  ListCell *l;
639 
640  /* Create a PlannerInfo data structure for this subquery */
642  root->parse = parse;
643  root->glob = glob;
644  root->query_level = parent_root ? parent_root->query_level + 1 : 1;
645  root->parent_root = parent_root;
646  root->plan_params = NIL;
647  root->outer_params = NULL;
648  root->planner_cxt = CurrentMemoryContext;
649  root->init_plans = NIL;
650  root->cte_plan_ids = NIL;
651  root->multiexpr_params = NIL;
652  root->join_domains = NIL;
653  root->eq_classes = NIL;
654  root->ec_merging_done = false;
655  root->last_rinfo_serial = 0;
656  root->all_result_relids =
657  parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
658  root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
659  root->append_rel_list = NIL;
660  root->row_identity_vars = NIL;
661  root->rowMarks = NIL;
662  memset(root->upper_rels, 0, sizeof(root->upper_rels));
663  memset(root->upper_targets, 0, sizeof(root->upper_targets));
664  root->processed_groupClause = NIL;
665  root->processed_distinctClause = NIL;
666  root->processed_tlist = NIL;
667  root->update_colnos = NIL;
668  root->grouping_map = NULL;
669  root->minmax_aggs = NIL;
670  root->qual_security_level = 0;
671  root->hasPseudoConstantQuals = false;
672  root->hasAlternativeSubPlans = false;
673  root->placeholdersFrozen = false;
674  root->hasRecursion = hasRecursion;
675  if (hasRecursion)
676  root->wt_param_id = assign_special_exec_param(root);
677  else
678  root->wt_param_id = -1;
679  root->non_recursive_path = NULL;
680  root->partColsUpdated = false;
681 
682  /*
683  * Create the top-level join domain. This won't have valid contents until
684  * deconstruct_jointree fills it in, but the node needs to exist before
685  * that so we can build EquivalenceClasses referencing it.
686  */
687  root->join_domains = list_make1(makeNode(JoinDomain));
688 
689  /*
690  * If there is a WITH list, process each WITH query and either convert it
691  * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
692  */
693  if (parse->cteList)
695 
696  /*
697  * If it's a MERGE command, transform the joinlist as appropriate.
698  */
700 
701  /*
702  * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
703  * that we don't need so many special cases to deal with that situation.
704  */
706 
707  /*
708  * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
709  * to transform them into joins. Note that this step does not descend
710  * into subqueries; if we pull up any subqueries below, their SubLinks are
711  * processed just before pulling them up.
712  */
713  if (parse->hasSubLinks)
715 
716  /*
717  * Scan the rangetable for function RTEs, do const-simplification on them,
718  * and then inline them if possible (producing subqueries that might get
719  * pulled up next). Recursion issues here are handled in the same way as
720  * for SubLinks.
721  */
723 
724  /*
725  * Check to see if any subqueries in the jointree can be merged into this
726  * query.
727  */
729 
730  /*
731  * If this is a simple UNION ALL query, flatten it into an appendrel. We
732  * do this now because it requires applying pull_up_subqueries to the leaf
733  * queries of the UNION ALL, which weren't touched above because they
734  * weren't referenced by the jointree (they will be after we do this).
735  */
736  if (parse->setOperations)
738 
739  /*
740  * Survey the rangetable to see what kinds of entries are present. We can
741  * skip some later processing if relevant SQL features are not used; for
742  * example if there are no JOIN RTEs we can avoid the expense of doing
743  * flatten_join_alias_vars(). This must be done after we have finished
744  * adding rangetable entries, of course. (Note: actually, processing of
745  * inherited or partitioned rels can cause RTEs for their child tables to
746  * get added later; but those must all be RTE_RELATION entries, so they
747  * don't invalidate the conclusions drawn here.)
748  */
749  root->hasJoinRTEs = false;
750  root->hasLateralRTEs = false;
751  hasOuterJoins = false;
752  hasResultRTEs = false;
753  foreach(l, parse->rtable)
754  {
756 
757  switch (rte->rtekind)
758  {
759  case RTE_RELATION:
760  if (rte->inh)
761  {
762  /*
763  * Check to see if the relation actually has any children;
764  * if not, clear the inh flag so we can treat it as a
765  * plain base relation.
766  *
767  * Note: this could give a false-positive result, if the
768  * rel once had children but no longer does. We used to
769  * be able to clear rte->inh later on when we discovered
770  * that, but no more; we have to handle such cases as
771  * full-fledged inheritance.
772  */
773  rte->inh = has_subclass(rte->relid);
774  }
775  break;
776  case RTE_JOIN:
777  root->hasJoinRTEs = true;
778  if (IS_OUTER_JOIN(rte->jointype))
779  hasOuterJoins = true;
780  break;
781  case RTE_RESULT:
782  hasResultRTEs = true;
783  break;
784  default:
785  /* No work here for other RTE types */
786  break;
787  }
788 
789  if (rte->lateral)
790  root->hasLateralRTEs = true;
791 
792  /*
793  * We can also determine the maximum security level required for any
794  * securityQuals now. Addition of inheritance-child RTEs won't affect
795  * this, because child tables don't have their own securityQuals; see
796  * expand_single_inheritance_child().
797  */
798  if (rte->securityQuals)
799  root->qual_security_level = Max(root->qual_security_level,
800  list_length(rte->securityQuals));
801  }
802 
803  /*
804  * If we have now verified that the query target relation is
805  * non-inheriting, mark it as a leaf target.
806  */
807  if (parse->resultRelation)
808  {
809  RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
810 
811  if (!rte->inh)
812  root->leaf_result_relids =
813  bms_make_singleton(parse->resultRelation);
814  }
815 
816  /*
817  * Preprocess RowMark information. We need to do this after subquery
818  * pullup, so that all base relations are present.
819  */
821 
822  /*
823  * Set hasHavingQual to remember if HAVING clause is present. Needed
824  * because preprocess_expression will reduce a constant-true condition to
825  * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
826  */
827  root->hasHavingQual = (parse->havingQual != NULL);
828 
829  /*
830  * Do expression preprocessing on targetlist and quals, as well as other
831  * random expressions in the querytree. Note that we do not need to
832  * handle sort/group expressions explicitly, because they are actually
833  * part of the targetlist.
834  */
835  parse->targetList = (List *)
836  preprocess_expression(root, (Node *) parse->targetList,
838 
839  /* Constant-folding might have removed all set-returning functions */
840  if (parse->hasTargetSRFs)
841  parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
842 
843  newWithCheckOptions = NIL;
844  foreach(l, parse->withCheckOptions)
845  {
847 
848  wco->qual = preprocess_expression(root, wco->qual,
849  EXPRKIND_QUAL);
850  if (wco->qual != NULL)
851  newWithCheckOptions = lappend(newWithCheckOptions, wco);
852  }
853  parse->withCheckOptions = newWithCheckOptions;
854 
855  parse->returningList = (List *)
856  preprocess_expression(root, (Node *) parse->returningList,
858 
859  preprocess_qual_conditions(root, (Node *) parse->jointree);
860 
861  parse->havingQual = preprocess_expression(root, parse->havingQual,
862  EXPRKIND_QUAL);
863 
864  foreach(l, parse->windowClause)
865  {
867 
868  /* partitionClause/orderClause are sort/group expressions */
873  wc->runCondition = (List *) preprocess_expression(root,
874  (Node *) wc->runCondition,
876  }
877 
878  parse->limitOffset = preprocess_expression(root, parse->limitOffset,
880  parse->limitCount = preprocess_expression(root, parse->limitCount,
882 
883  if (parse->onConflict)
884  {
885  parse->onConflict->arbiterElems = (List *)
887  (Node *) parse->onConflict->arbiterElems,
889  parse->onConflict->arbiterWhere =
891  parse->onConflict->arbiterWhere,
892  EXPRKIND_QUAL);
893  parse->onConflict->onConflictSet = (List *)
895  (Node *) parse->onConflict->onConflictSet,
897  parse->onConflict->onConflictWhere =
899  parse->onConflict->onConflictWhere,
900  EXPRKIND_QUAL);
901  /* exclRelTlist contains only Vars, so no preprocessing needed */
902  }
903 
904  foreach(l, parse->mergeActionList)
905  {
907 
908  action->targetList = (List *)
910  (Node *) action->targetList,
912  action->qual =
914  (Node *) action->qual,
915  EXPRKIND_QUAL);
916  }
917 
918  parse->mergeJoinCondition =
919  preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
920 
921  root->append_rel_list = (List *)
922  preprocess_expression(root, (Node *) root->append_rel_list,
924 
925  /* Also need to preprocess expressions within RTEs */
926  foreach(l, parse->rtable)
927  {
929  int kind;
930  ListCell *lcsq;
931 
932  if (rte->rtekind == RTE_RELATION)
933  {
934  if (rte->tablesample)
935  rte->tablesample = (TableSampleClause *)
937  (Node *) rte->tablesample,
939  }
940  else if (rte->rtekind == RTE_SUBQUERY)
941  {
942  /*
943  * We don't want to do all preprocessing yet on the subquery's
944  * expressions, since that will happen when we plan it. But if it
945  * contains any join aliases of our level, those have to get
946  * expanded now, because planning of the subquery won't do it.
947  * That's only possible if the subquery is LATERAL.
948  */
949  if (rte->lateral && root->hasJoinRTEs)
950  rte->subquery = (Query *)
952  (Node *) rte->subquery);
953  }
954  else if (rte->rtekind == RTE_FUNCTION)
955  {
956  /* Preprocess the function expression(s) fully */
957  kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
958  rte->functions = (List *)
959  preprocess_expression(root, (Node *) rte->functions, kind);
960  }
961  else if (rte->rtekind == RTE_TABLEFUNC)
962  {
963  /* Preprocess the function expression(s) fully */
964  kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
965  rte->tablefunc = (TableFunc *)
966  preprocess_expression(root, (Node *) rte->tablefunc, kind);
967  }
968  else if (rte->rtekind == RTE_VALUES)
969  {
970  /* Preprocess the values lists fully */
971  kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
972  rte->values_lists = (List *)
973  preprocess_expression(root, (Node *) rte->values_lists, kind);
974  }
975 
976  /*
977  * Process each element of the securityQuals list as if it were a
978  * separate qual expression (as indeed it is). We need to do it this
979  * way to get proper canonicalization of AND/OR structure. Note that
980  * this converts each element into an implicit-AND sublist.
981  */
982  foreach(lcsq, rte->securityQuals)
983  {
985  (Node *) lfirst(lcsq),
986  EXPRKIND_QUAL);
987  }
988  }
989 
990  /*
991  * Now that we are done preprocessing expressions, and in particular done
992  * flattening join alias variables, get rid of the joinaliasvars lists.
993  * They no longer match what expressions in the rest of the tree look
994  * like, because we have not preprocessed expressions in those lists (and
995  * do not want to; for example, expanding a SubLink there would result in
996  * a useless unreferenced subplan). Leaving them in place simply creates
997  * a hazard for later scans of the tree. We could try to prevent that by
998  * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
999  * but that doesn't sound very reliable.
1000  */
1001  if (root->hasJoinRTEs)
1002  {
1003  foreach(l, parse->rtable)
1004  {
1006 
1007  rte->joinaliasvars = NIL;
1008  }
1009  }
1010 
1011  /*
1012  * In some cases we may want to transfer a HAVING clause into WHERE. We
1013  * cannot do so if the HAVING clause contains aggregates (obviously) or
1014  * volatile functions (since a HAVING clause is supposed to be executed
1015  * only once per group). We also can't do this if there are any nonempty
1016  * grouping sets; moving such a clause into WHERE would potentially change
1017  * the results, if any referenced column isn't present in all the grouping
1018  * sets. (If there are only empty grouping sets, then the HAVING clause
1019  * must be degenerate as discussed below.)
1020  *
1021  * Also, it may be that the clause is so expensive to execute that we're
1022  * better off doing it only once per group, despite the loss of
1023  * selectivity. This is hard to estimate short of doing the entire
1024  * planning process twice, so we use a heuristic: clauses containing
1025  * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1026  * clause into WHERE, in hopes of eliminating tuples before aggregation
1027  * instead of after.
1028  *
1029  * If the query has explicit grouping then we can simply move such a
1030  * clause into WHERE; any group that fails the clause will not be in the
1031  * output because none of its tuples will reach the grouping or
1032  * aggregation stage. Otherwise we must have a degenerate (variable-free)
1033  * HAVING clause, which we put in WHERE so that query_planner() can use it
1034  * in a gating Result node, but also keep in HAVING to ensure that we
1035  * don't emit a bogus aggregated row. (This could be done better, but it
1036  * seems not worth optimizing.)
1037  *
1038  * Note that both havingQual and parse->jointree->quals are in
1039  * implicitly-ANDed-list form at this point, even though they are declared
1040  * as Node *.
1041  */
1042  newHaving = NIL;
1043  foreach(l, (List *) parse->havingQual)
1044  {
1045  Node *havingclause = (Node *) lfirst(l);
1046 
1047  if ((parse->groupClause && parse->groupingSets) ||
1048  contain_agg_clause(havingclause) ||
1049  contain_volatile_functions(havingclause) ||
1050  contain_subplans(havingclause))
1051  {
1052  /* keep it in HAVING */
1053  newHaving = lappend(newHaving, havingclause);
1054  }
1055  else if (parse->groupClause && !parse->groupingSets)
1056  {
1057  /* move it to WHERE */
1058  parse->jointree->quals = (Node *)
1059  lappend((List *) parse->jointree->quals, havingclause);
1060  }
1061  else
1062  {
1063  /* put a copy in WHERE, keep it in HAVING */
1064  parse->jointree->quals = (Node *)
1065  lappend((List *) parse->jointree->quals,
1066  copyObject(havingclause));
1067  newHaving = lappend(newHaving, havingclause);
1068  }
1069  }
1070  parse->havingQual = (Node *) newHaving;
1071 
1072  /*
1073  * If we have any outer joins, try to reduce them to plain inner joins.
1074  * This step is most easily done after we've done expression
1075  * preprocessing.
1076  */
1077  if (hasOuterJoins)
1079 
1080  /*
1081  * If we have any RTE_RESULT relations, see if they can be deleted from
1082  * the jointree. We also rely on this processing to flatten single-child
1083  * FromExprs underneath outer joins. This step is most effectively done
1084  * after we've done expression preprocessing and outer join reduction.
1085  */
1086  if (hasResultRTEs || hasOuterJoins)
1088 
1089  /*
1090  * Do the main planning.
1091  */
1092  grouping_planner(root, tuple_fraction, setops);
1093 
1094  /*
1095  * Capture the set of outer-level param IDs we have access to, for use in
1096  * extParam/allParam calculations later.
1097  */
1099 
1100  /*
1101  * If any initPlans were created in this query level, adjust the surviving
1102  * Paths' costs and parallel-safety flags to account for them. The
1103  * initPlans won't actually get attached to the plan tree till
1104  * create_plan() runs, but we must include their effects now.
1105  */
1106  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1107  SS_charge_for_initplans(root, final_rel);
1108 
1109  /*
1110  * Make sure we've identified the cheapest Path for the final rel. (By
1111  * doing this here not in grouping_planner, we include initPlan costs in
1112  * the decision, though it's unlikely that will change anything.)
1113  */
1114  set_cheapest(final_rel);
1115 
1116  return root;
1117 }
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
bool contain_agg_clause(Node *clause)
Definition: clauses.c:177
bool contain_subplans(Node *clause)
Definition: clauses.c:330
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:337
@ RTE_JOIN
Definition: parsenodes.h:1030
@ RTE_VALUES
Definition: parsenodes.h:1033
@ RTE_SUBQUERY
Definition: parsenodes.h:1029
@ RTE_RESULT
Definition: parsenodes.h:1036
@ RTE_FUNCTION
Definition: parsenodes.h:1031
@ RTE_TABLEFUNC
Definition: parsenodes.h:1032
bool has_subclass(Oid relationId)
Definition: pg_inherits.c:355
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:90
#define EXPRKIND_APPINFO
Definition: planner.c:85
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2265
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:81
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:83
#define EXPRKIND_LIMIT
Definition: planner.c:84
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1305
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:88
void preprocess_function_rtes(PlannerInfo *root)
Definition: prepjointree.c:776
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:152
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:342
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:284
void pull_up_subqueries(PlannerInfo *root)
Definition: prepjointree.c:817
void reduce_outer_joins(PlannerInfo *root)
Index query_level
Definition: pathnodes.h:208
TableFunc * tablefunc
Definition: parsenodes.h:1194
struct TableSampleClause * tablesample
Definition: parsenodes.h:1108
Query * subquery
Definition: parsenodes.h:1114
List * values_lists
Definition: parsenodes.h:1200
JoinType jointype
Definition: parsenodes.h:1161
List * functions
Definition: parsenodes.h:1187
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:880
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2072
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2134

References generate_unaccent_rules::action, assign_special_exec_param(), bms_make_singleton(), contain_agg_clause(), contain_subplans(), contain_volatile_functions(), copyObject, CurrentMemoryContext, WindowClause::endOffset, expression_returns_set(), EXPRKIND_APPINFO, EXPRKIND_ARBITER_ELEM, EXPRKIND_LIMIT, EXPRKIND_QUAL, EXPRKIND_RTFUNC, EXPRKIND_RTFUNC_LATERAL, EXPRKIND_TABLEFUNC, EXPRKIND_TABLEFUNC_LATERAL, EXPRKIND_TABLESAMPLE, EXPRKIND_TARGET, EXPRKIND_VALUES, EXPRKIND_VALUES_LATERAL, fetch_upper_rel(), flatten_join_alias_vars(), flatten_simple_union_all(), RangeTblEntry::functions, grouping_planner(), has_subclass(), RangeTblEntry::inh, IS_OUTER_JOIN, RangeTblEntry::jointype, lappend(), lfirst, lfirst_node, list_length(), list_make1, makeNode, Max, NIL, parse(), preprocess_expression(), preprocess_function_rtes(), preprocess_qual_conditions(), preprocess_rowmarks(), pull_up_sublinks(), pull_up_subqueries(), WithCheckOption::qual, PlannerInfo::query_level, reduce_outer_joins(), RangeTblEntry::relid, remove_useless_result_rtes(), replace_empty_jointree(), root, rt_fetch, RTE_FUNCTION, RTE_JOIN, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, set_cheapest(), SS_charge_for_initplans(), SS_identify_outer_params(), SS_process_ctes(), WindowClause::startOffset, RangeTblEntry::subquery, RangeTblEntry::tablefunc, RangeTblEntry::tablesample, transform_MERGE_to_join(), UPPERREL_FINAL, and RangeTblEntry::values_lists.

Referenced by make_subplan(), recurse_set_operations(), set_subquery_pathlist(), SS_process_ctes(), and standard_planner().

Variable Documentation

◆ create_upper_paths_hook

◆ cursor_tuple_fraction

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION

Definition at line 66 of file planner.c.

Referenced by standard_planner().

◆ debug_parallel_query

int debug_parallel_query = DEBUG_PARALLEL_OFF

Definition at line 67 of file planner.c.

Referenced by HandleParallelMessage(), query_planner(), and standard_planner().

◆ parallel_leader_participation

bool parallel_leader_participation = true

Definition at line 68 of file planner.c.

Referenced by ExecGather(), ExecGatherMerge(), ExecInitGather(), and get_parallel_divisor().

◆ planner_hook

planner_hook_type planner_hook = NULL

Definition at line 71 of file planner.c.

Referenced by _PG_init(), and planner().