PostgreSQL Source Code  git master
planner.c File Reference
#include "postgres.h"
#include <limits.h>
#include <math.h>
#include "access/htup_details.h"
#include "access/parallel.h"
#include "access/sysattr.h"
#include "access/xact.h"
#include "catalog/pg_constraint_fn.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "executor/nodeAgg.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "lib/bipartite_match.h"
#include "lib/knapsack.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
#include "optimizer/var.h"
#include "parser/analyze.h"
#include "parser/parsetree.h"
#include "parser/parse_agg.h"
#include "rewrite/rewriteManip.h"
#include "storage/dsm_impl.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
Include dependency graph for planner.c:

Go to the source code of this file.

Data Structures

struct  standard_qp_extra
 
struct  grouping_sets_data
 

Macros

#define EXPRKIND_QUAL   0
 
#define EXPRKIND_TARGET   1
 
#define EXPRKIND_RTFUNC   2
 
#define EXPRKIND_RTFUNC_LATERAL   3
 
#define EXPRKIND_VALUES   4
 
#define EXPRKIND_VALUES_LATERAL   5
 
#define EXPRKIND_LIMIT   6
 
#define EXPRKIND_APPINFO   7
 
#define EXPRKIND_PHV   8
 
#define EXPRKIND_TABLESAMPLE   9
 
#define EXPRKIND_ARBITER_ELEM   10
 
#define EXPRKIND_TABLEFUNC   11
 
#define EXPRKIND_TABLEFUNC_LATERAL   12
 

Functions

static Nodepreprocess_expression (PlannerInfo *root, Node *expr, int kind)
 
static void preprocess_qual_conditions (PlannerInfo *root, Node *jtnode)
 
static void inheritance_planner (PlannerInfo *root)
 
static void grouping_planner (PlannerInfo *root, bool inheritance_update, double tuple_fraction)
 
static grouping_sets_datapreprocess_grouping_sets (PlannerInfo *root)
 
static Listremap_to_groupclause_idx (List *groupClause, List *gsets, int *tleref_to_colnum_map)
 
static void preprocess_rowmarks (PlannerInfo *root)
 
static double preprocess_limit (PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
 
static bool limit_needed (Query *parse)
 
static void remove_useless_groupby_columns (PlannerInfo *root)
 
static Listpreprocess_groupclause (PlannerInfo *root, List *force)
 
static Listextract_rollup_sets (List *groupingSets)
 
static Listreorder_grouping_sets (List *groupingSets, List *sortclause)
 
static void standard_qp_callback (PlannerInfo *root, void *extra)
 
static double get_number_of_groups (PlannerInfo *root, double path_rows, grouping_sets_data *gd)
 
static Size estimate_hashagg_tablesize (Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_grouping_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, const AggClauseCosts *agg_costs, grouping_sets_data *gd)
 
static void consider_groupingsets_paths (PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, PathTarget *target, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
 
static RelOptInfocreate_window_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, List *tlist, WindowFuncLists *wflists, List *activeWindows)
 
static void create_one_window_path (PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, List *tlist, WindowFuncLists *wflists, List *activeWindows)
 
static RelOptInfocreate_distinct_paths (PlannerInfo *root, RelOptInfo *input_rel)
 
static RelOptInfocreate_ordered_paths (PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, double limit_tuples)
 
static PathTargetmake_group_input_target (PlannerInfo *root, PathTarget *final_target)
 
static PathTargetmake_partial_grouping_target (PlannerInfo *root, PathTarget *grouping_target)
 
static Listpostprocess_setop_tlist (List *new_tlist, List *orig_tlist)
 
static Listselect_active_windows (PlannerInfo *root, WindowFuncLists *wflists)
 
static PathTargetmake_window_input_target (PlannerInfo *root, PathTarget *final_target, List *activeWindows)
 
static Listmake_pathkeys_for_window (PlannerInfo *root, WindowClause *wc, List *tlist)
 
static PathTargetmake_sort_input_target (PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
 
static void adjust_paths_for_srfs (PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
 
PlannedStmtplanner (Query *parse, int cursorOptions, ParamListInfo boundParams)
 
PlannedStmtstandard_planner (Query *parse, int cursorOptions, ParamListInfo boundParams)
 
PlannerInfosubquery_planner (PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction)
 
Exprpreprocess_phv_expression (PlannerInfo *root, Expr *expr)
 
bool is_dummy_plan (Plan *plan)
 
RowMarkType select_rowmark_type (RangeTblEntry *rte, LockClauseStrength strength)
 
void mark_partial_aggref (Aggref *agg, AggSplit aggsplit)
 
Pathget_cheapest_fractional_path (RelOptInfo *rel, double tuple_fraction)
 
Exprexpression_planner (Expr *expr)
 
bool plan_cluster_use_sort (Oid tableOid, Oid indexOid)
 
Listget_partitioned_child_rels (PlannerInfo *root, Index rti)
 
Listget_partitioned_child_rels_for_join (PlannerInfo *root, Relids join_relids)
 

Variables

double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION
 
int force_parallel_mode = FORCE_PARALLEL_OFF
 
bool parallel_leader_participation = true
 
planner_hook_type planner_hook = NULL
 
create_upper_paths_hook_type create_upper_paths_hook = NULL
 

Macro Definition Documentation

◆ EXPRKIND_APPINFO

#define EXPRKIND_APPINFO   7

Definition at line 81 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_ARBITER_ELEM

#define EXPRKIND_ARBITER_ELEM   10

Definition at line 84 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_LIMIT

#define EXPRKIND_LIMIT   6

Definition at line 80 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_PHV

#define EXPRKIND_PHV   8

Definition at line 82 of file planner.c.

Referenced by preprocess_phv_expression().

◆ EXPRKIND_QUAL

#define EXPRKIND_QUAL   0

Definition at line 74 of file planner.c.

Referenced by preprocess_expression(), preprocess_qual_conditions(), and subquery_planner().

◆ EXPRKIND_RTFUNC

#define EXPRKIND_RTFUNC   2

Definition at line 76 of file planner.c.

Referenced by preprocess_expression(), and subquery_planner().

◆ EXPRKIND_RTFUNC_LATERAL

#define EXPRKIND_RTFUNC_LATERAL   3

Definition at line 77 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_TABLEFUNC

#define EXPRKIND_TABLEFUNC   11

Definition at line 85 of file planner.c.

Referenced by preprocess_expression(), and subquery_planner().

◆ EXPRKIND_TABLEFUNC_LATERAL

#define EXPRKIND_TABLEFUNC_LATERAL   12

Definition at line 86 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_TABLESAMPLE

#define EXPRKIND_TABLESAMPLE   9

Definition at line 83 of file planner.c.

Referenced by preprocess_expression(), and subquery_planner().

◆ EXPRKIND_TARGET

#define EXPRKIND_TARGET   1

Definition at line 75 of file planner.c.

Referenced by subquery_planner().

◆ EXPRKIND_VALUES

#define EXPRKIND_VALUES   4

Definition at line 78 of file planner.c.

Referenced by preprocess_expression(), and subquery_planner().

◆ EXPRKIND_VALUES_LATERAL

#define EXPRKIND_VALUES_LATERAL   5

Definition at line 79 of file planner.c.

Referenced by subquery_planner().

Function Documentation

◆ adjust_paths_for_srfs()

static void adjust_paths_for_srfs ( PlannerInfo root,
RelOptInfo rel,
List targets,
List targets_contain_srfs 
)
static

Definition at line 5919 of file planner.c.

References apply_projection_to_path(), Assert, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, create_projection_path(), create_set_projection_path(), forboth, lfirst, lfirst_int, lfirst_node, linitial_int, list_length(), Path::param_info, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, and subpath().

Referenced by grouping_planner().

5921 {
5922  ListCell *lc;
5923 
5924  Assert(list_length(targets) == list_length(targets_contain_srfs));
5925  Assert(!linitial_int(targets_contain_srfs));
5926 
5927  /* If no SRFs appear at this plan level, nothing to do */
5928  if (list_length(targets) == 1)
5929  return;
5930 
5931  /*
5932  * Stack SRF-evaluation nodes atop each path for the rel.
5933  *
5934  * In principle we should re-run set_cheapest() here to identify the
5935  * cheapest path, but it seems unlikely that adding the same tlist eval
5936  * costs to all the paths would change that, so we don't bother. Instead,
5937  * just assume that the cheapest-startup and cheapest-total paths remain
5938  * so. (There should be no parameterized paths anymore, so we needn't
5939  * worry about updating cheapest_parameterized_paths.)
5940  */
5941  foreach(lc, rel->pathlist)
5942  {
5943  Path *subpath = (Path *) lfirst(lc);
5944  Path *newpath = subpath;
5945  ListCell *lc1,
5946  *lc2;
5947 
5948  Assert(subpath->param_info == NULL);
5949  forboth(lc1, targets, lc2, targets_contain_srfs)
5950  {
5951  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
5952  bool contains_srfs = (bool) lfirst_int(lc2);
5953 
5954  /* If this level doesn't contain SRFs, do regular projection */
5955  if (contains_srfs)
5956  newpath = (Path *) create_set_projection_path(root,
5957  rel,
5958  newpath,
5959  thistarget);
5960  else
5961  newpath = (Path *) apply_projection_to_path(root,
5962  rel,
5963  newpath,
5964  thistarget);
5965  }
5966  lfirst(lc) = newpath;
5967  if (subpath == rel->cheapest_startup_path)
5968  rel->cheapest_startup_path = newpath;
5969  if (subpath == rel->cheapest_total_path)
5970  rel->cheapest_total_path = newpath;
5971  }
5972 
5973  /* Likewise for partial paths, if any */
5974  foreach(lc, rel->partial_pathlist)
5975  {
5976  Path *subpath = (Path *) lfirst(lc);
5977  Path *newpath = subpath;
5978  ListCell *lc1,
5979  *lc2;
5980 
5981  Assert(subpath->param_info == NULL);
5982  forboth(lc1, targets, lc2, targets_contain_srfs)
5983  {
5984  PathTarget *thistarget = lfirst_node(PathTarget, lc1);
5985  bool contains_srfs = (bool) lfirst_int(lc2);
5986 
5987  /* If this level doesn't contain SRFs, do regular projection */
5988  if (contains_srfs)
5989  newpath = (Path *) create_set_projection_path(root,
5990  rel,
5991  newpath,
5992  thistarget);
5993  else
5994  {
5995  /* avoid apply_projection_to_path, in case of multiple refs */
5996  newpath = (Path *) create_projection_path(root,
5997  rel,
5998  newpath,
5999  thistarget);
6000  }
6001  }
6002  lfirst(lc) = newpath;
6003  }
6004 }
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2384
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:180
struct Path * cheapest_startup_path
Definition: relation.h:602
ParamPathInfo * param_info
Definition: relation.h:1045
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2293
List * partial_pathlist
Definition: relation.h:601
char bool
Definition: c.h:247
#define linitial_int(l)
Definition: pg_list.h:112
#define lfirst_int(lc)
Definition: pg_list.h:107
#define lfirst_node(type, lc)
Definition: pg_list.h:109
struct Path * cheapest_total_path
Definition: relation.h:603
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2473
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
static int list_length(const List *l)
Definition: pg_list.h:89
List * pathlist
Definition: relation.h:599
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234

◆ consider_groupingsets_paths()

static void consider_groupingsets_paths ( PlannerInfo root,
RelOptInfo grouped_rel,
Path path,
bool  is_sorted,
bool  can_hash,
PathTarget target,
grouping_sets_data gd,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 4240 of file planner.c.

References add_path(), AGG_HASHED, AGG_MIXED, AGG_SORTED, grouping_sets_data::any_hashable, Assert, bms_is_empty(), bms_is_member(), create_groupingsets_path(), DiscreteKnapsack(), grouping_sets_data::dNumHashGroups, estimate_hashagg_tablesize(), for_each_cell, PlannerInfo::group_pathkeys, RollupData::groupClause, RollupData::gsets, RollupData::gsets_data, RollupData::hashable, Query::havingQual, i, RollupData::is_hashed, lappend(), lcons(), lfirst_node, linitial, list_concat(), list_copy(), list_head(), list_length(), list_make1, lnext, makeNode, Max, Min, NIL, GroupingSetData::numGroups, RollupData::numGroups, palloc(), parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), preprocess_groupclause(), remap_to_groupclause_idx(), grouping_sets_data::rollups, scale, GroupingSetData::set, grouping_sets_data::tleref_to_colnum_map, grouping_sets_data::unsortable_sets, and work_mem.

Referenced by create_grouping_paths().

4249 {
4250  Query *parse = root->parse;
4251 
4252  /*
4253  * If we're not being offered sorted input, then only consider plans that
4254  * can be done entirely by hashing.
4255  *
4256  * We can hash everything if it looks like it'll fit in work_mem. But if
4257  * the input is actually sorted despite not being advertised as such, we
4258  * prefer to make use of that in order to use less memory.
4259  *
4260  * If none of the grouping sets are sortable, then ignore the work_mem
4261  * limit and generate a path anyway, since otherwise we'll just fail.
4262  */
4263  if (!is_sorted)
4264  {
4265  List *new_rollups = NIL;
4266  RollupData *unhashed_rollup = NULL;
4267  List *sets_data;
4268  List *empty_sets_data = NIL;
4269  List *empty_sets = NIL;
4270  ListCell *lc;
4271  ListCell *l_start = list_head(gd->rollups);
4272  AggStrategy strat = AGG_HASHED;
4273  Size hashsize;
4274  double exclude_groups = 0.0;
4275 
4276  Assert(can_hash);
4277 
4278  if (pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4279  {
4280  unhashed_rollup = lfirst_node(RollupData, l_start);
4281  exclude_groups = unhashed_rollup->numGroups;
4282  l_start = lnext(l_start);
4283  }
4284 
4285  hashsize = estimate_hashagg_tablesize(path,
4286  agg_costs,
4287  dNumGroups - exclude_groups);
4288 
4289  /*
4290  * gd->rollups is empty if we have only unsortable columns to work
4291  * with. Override work_mem in that case; otherwise, we'll rely on the
4292  * sorted-input case to generate usable mixed paths.
4293  */
4294  if (hashsize > work_mem * 1024L && gd->rollups)
4295  return; /* nope, won't fit */
4296 
4297  /*
4298  * We need to burst the existing rollups list into individual grouping
4299  * sets and recompute a groupClause for each set.
4300  */
4301  sets_data = list_copy(gd->unsortable_sets);
4302 
4303  for_each_cell(lc, l_start)
4304  {
4305  RollupData *rollup = lfirst_node(RollupData, lc);
4306 
4307  /*
4308  * If we find an unhashable rollup that's not been skipped by the
4309  * "actually sorted" check above, we can't cope; we'd need sorted
4310  * input (with a different sort order) but we can't get that here.
4311  * So bail out; we'll get a valid path from the is_sorted case
4312  * instead.
4313  *
4314  * The mere presence of empty grouping sets doesn't make a rollup
4315  * unhashable (see preprocess_grouping_sets), we handle those
4316  * specially below.
4317  */
4318  if (!rollup->hashable)
4319  return;
4320  else
4321  sets_data = list_concat(sets_data, list_copy(rollup->gsets_data));
4322  }
4323  foreach(lc, sets_data)
4324  {
4326  List *gset = gs->set;
4327  RollupData *rollup;
4328 
4329  if (gset == NIL)
4330  {
4331  /* Empty grouping sets can't be hashed. */
4332  empty_sets_data = lappend(empty_sets_data, gs);
4333  empty_sets = lappend(empty_sets, NIL);
4334  }
4335  else
4336  {
4337  rollup = makeNode(RollupData);
4338 
4339  rollup->groupClause = preprocess_groupclause(root, gset);
4340  rollup->gsets_data = list_make1(gs);
4341  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4342  rollup->gsets_data,
4343  gd->tleref_to_colnum_map);
4344  rollup->numGroups = gs->numGroups;
4345  rollup->hashable = true;
4346  rollup->is_hashed = true;
4347  new_rollups = lappend(new_rollups, rollup);
4348  }
4349  }
4350 
4351  /*
4352  * If we didn't find anything nonempty to hash, then bail. We'll
4353  * generate a path from the is_sorted case.
4354  */
4355  if (new_rollups == NIL)
4356  return;
4357 
4358  /*
4359  * If there were empty grouping sets they should have been in the
4360  * first rollup.
4361  */
4362  Assert(!unhashed_rollup || !empty_sets);
4363 
4364  if (unhashed_rollup)
4365  {
4366  new_rollups = lappend(new_rollups, unhashed_rollup);
4367  strat = AGG_MIXED;
4368  }
4369  else if (empty_sets)
4370  {
4371  RollupData *rollup = makeNode(RollupData);
4372 
4373  rollup->groupClause = NIL;
4374  rollup->gsets_data = empty_sets_data;
4375  rollup->gsets = empty_sets;
4376  rollup->numGroups = list_length(empty_sets);
4377  rollup->hashable = false;
4378  rollup->is_hashed = false;
4379  new_rollups = lappend(new_rollups, rollup);
4380  strat = AGG_MIXED;
4381  }
4382 
4383  add_path(grouped_rel, (Path *)
4385  grouped_rel,
4386  path,
4387  target,
4388  (List *) parse->havingQual,
4389  strat,
4390  new_rollups,
4391  agg_costs,
4392  dNumGroups));
4393  return;
4394  }
4395 
4396  /*
4397  * If we have sorted input but nothing we can do with it, bail.
4398  */
4399  if (list_length(gd->rollups) == 0)
4400  return;
4401 
4402  /*
4403  * Given sorted input, we try and make two paths: one sorted and one mixed
4404  * sort/hash. (We need to try both because hashagg might be disabled, or
4405  * some columns might not be sortable.)
4406  *
4407  * can_hash is passed in as false if some obstacle elsewhere (such as
4408  * ordered aggs) means that we shouldn't consider hashing at all.
4409  */
4410  if (can_hash && gd->any_hashable)
4411  {
4412  List *rollups = NIL;
4413  List *hash_sets = list_copy(gd->unsortable_sets);
4414  double availspace = (work_mem * 1024.0);
4415  ListCell *lc;
4416 
4417  /*
4418  * Account first for space needed for groups we can't sort at all.
4419  */
4420  availspace -= (double) estimate_hashagg_tablesize(path,
4421  agg_costs,
4422  gd->dNumHashGroups);
4423 
4424  if (availspace > 0 && list_length(gd->rollups) > 1)
4425  {
4426  double scale;
4427  int num_rollups = list_length(gd->rollups);
4428  int k_capacity;
4429  int *k_weights = palloc(num_rollups * sizeof(int));
4430  Bitmapset *hash_items = NULL;
4431  int i;
4432 
4433  /*
4434  * We treat this as a knapsack problem: the knapsack capacity
4435  * represents work_mem, the item weights are the estimated memory
4436  * usage of the hashtables needed to implement a single rollup,
4437  * and we really ought to use the cost saving as the item value;
4438  * however, currently the costs assigned to sort nodes don't
4439  * reflect the comparison costs well, and so we treat all items as
4440  * of equal value (each rollup we hash instead saves us one sort).
4441  *
4442  * To use the discrete knapsack, we need to scale the values to a
4443  * reasonably small bounded range. We choose to allow a 5% error
4444  * margin; we have no more than 4096 rollups in the worst possible
4445  * case, which with a 5% error margin will require a bit over 42MB
4446  * of workspace. (Anyone wanting to plan queries that complex had
4447  * better have the memory for it. In more reasonable cases, with
4448  * no more than a couple of dozen rollups, the memory usage will
4449  * be negligible.)
4450  *
4451  * k_capacity is naturally bounded, but we clamp the values for
4452  * scale and weight (below) to avoid overflows or underflows (or
4453  * uselessly trying to use a scale factor less than 1 byte).
4454  */
4455  scale = Max(availspace / (20.0 * num_rollups), 1.0);
4456  k_capacity = (int) floor(availspace / scale);
4457 
4458  /*
4459  * We leave the first rollup out of consideration since it's the
4460  * one that matches the input sort order. We assign indexes "i"
4461  * to only those entries considered for hashing; the second loop,
4462  * below, must use the same condition.
4463  */
4464  i = 0;
4466  {
4467  RollupData *rollup = lfirst_node(RollupData, lc);
4468 
4469  if (rollup->hashable)
4470  {
4471  double sz = estimate_hashagg_tablesize(path,
4472  agg_costs,
4473  rollup->numGroups);
4474 
4475  /*
4476  * If sz is enormous, but work_mem (and hence scale) is
4477  * small, avoid integer overflow here.
4478  */
4479  k_weights[i] = (int) Min(floor(sz / scale),
4480  k_capacity + 1.0);
4481  ++i;
4482  }
4483  }
4484 
4485  /*
4486  * Apply knapsack algorithm; compute the set of items which
4487  * maximizes the value stored (in this case the number of sorts
4488  * saved) while keeping the total size (approximately) within
4489  * capacity.
4490  */
4491  if (i > 0)
4492  hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4493 
4494  if (!bms_is_empty(hash_items))
4495  {
4496  rollups = list_make1(linitial(gd->rollups));
4497 
4498  i = 0;
4500  {
4501  RollupData *rollup = lfirst_node(RollupData, lc);
4502 
4503  if (rollup->hashable)
4504  {
4505  if (bms_is_member(i, hash_items))
4506  hash_sets = list_concat(hash_sets,
4507  list_copy(rollup->gsets_data));
4508  else
4509  rollups = lappend(rollups, rollup);
4510  ++i;
4511  }
4512  else
4513  rollups = lappend(rollups, rollup);
4514  }
4515  }
4516  }
4517 
4518  if (!rollups && hash_sets)
4519  rollups = list_copy(gd->rollups);
4520 
4521  foreach(lc, hash_sets)
4522  {
4524  RollupData *rollup = makeNode(RollupData);
4525 
4526  Assert(gs->set != NIL);
4527 
4528  rollup->groupClause = preprocess_groupclause(root, gs->set);
4529  rollup->gsets_data = list_make1(gs);
4530  rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4531  rollup->gsets_data,
4532  gd->tleref_to_colnum_map);
4533  rollup->numGroups = gs->numGroups;
4534  rollup->hashable = true;
4535  rollup->is_hashed = true;
4536  rollups = lcons(rollup, rollups);
4537  }
4538 
4539  if (rollups)
4540  {
4541  add_path(grouped_rel, (Path *)
4543  grouped_rel,
4544  path,
4545  target,
4546  (List *) parse->havingQual,
4547  AGG_MIXED,
4548  rollups,
4549  agg_costs,
4550  dNumGroups));
4551  }
4552  }
4553 
4554  /*
4555  * Now try the simple sorted case.
4556  */
4557  if (!gd->unsortable_sets)
4558  add_path(grouped_rel, (Path *)
4560  grouped_rel,
4561  path,
4562  target,
4563  (List *) parse->havingQual,
4564  AGG_SORTED,
4565  gd->rollups,
4566  agg_costs,
4567  dNumGroups));
4568 }
List * group_pathkeys
Definition: relation.h:264
#define NIL
Definition: pg_list.h:69
Query * parse
Definition: relation.h:155
List * groupClause
Definition: relation.h:1570
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2990
#define Min(x, y)
Definition: c.h:802
bool is_hashed
Definition: relation.h:1575
List * list_copy(const List *oldlist)
Definition: list.c:1160
int scale
Definition: pgbench.c:108
double dNumHashGroups
Definition: planner.c:104
List * list_concat(List *list1, List *list2)
Definition: list.c:321
double numGroups
Definition: relation.h:1573
#define list_make1(x1)
Definition: pg_list.h:139
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs, double numGroups)
Definition: pathnode.c:2761
#define linitial(l)
Definition: pg_list.h:111
int * tleref_to_colnum_map
Definition: planner.c:109
#define lfirst_node(type, lc)
Definition: pg_list.h:109
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
#define lnext(lc)
Definition: pg_list.h:105
static Size estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:3559
List * lappend(List *list, void *datum)
Definition: list.c:128
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:663
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2345
int work_mem
Definition: globals.c:113
List * lcons(void *datum, List *list)
Definition: list.c:259
List * pathkeys
Definition: relation.h:1056
#define Max(x, y)
Definition: c.h:796
#define makeNode(_type_)
Definition: nodes.h:558
#define Assert(condition)
Definition: c.h:670
size_t Size
Definition: c.h:404
static int list_length(const List *l)
Definition: pg_list.h:89
List * unsortable_sets
Definition: planner.c:108
#define for_each_cell(cell, initcell)
Definition: pg_list.h:169
AggStrategy
Definition: nodes.h:736
void * palloc(Size size)
Definition: mcxt.c:848
int i
double numGroups
Definition: relation.h:1564
bool hashable
Definition: relation.h:1574
Node * havingQual
Definition: parsenodes.h:150
Definition: pg_list.h:45
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:55
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420
List * gsets_data
Definition: relation.h:1572
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
List * gsets
Definition: relation.h:1571

◆ create_distinct_paths()

static RelOptInfo * create_distinct_paths ( PlannerInfo root,
RelOptInfo input_rel 
)
static

Definition at line 4766 of file planner.c.

References add_path(), AGG_HASHED, AGGSPLIT_SIMPLE, Assert, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_agg_path(), create_sort_path(), create_upper_paths_hook, create_upper_unique_path(), PlannerInfo::distinct_pathkeys, Query::distinctClause, enable_hashagg, ereport, errcode(), errdetail(), errmsg(), ERROR, estimate_num_groups(), RelOptInfo::fdwroutine, fetch_upper_rel(), get_sortgrouplist_exprs(), FdwRoutine::GetForeignUpperPaths, Query::groupClause, grouping_is_hashable(), grouping_is_sortable(), Query::groupingSets, Query::hasAggs, Query::hasDistinctOn, hash_agg_entry_size(), PlannerInfo::hasHavingQual, lfirst, list_length(), MAXALIGN, NIL, parse(), PlannerInfo::parse, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, Path::pathtarget, Path::rows, RelOptInfo::serverid, set_cheapest(), SizeofMinimalTupleHeader, PlannerInfo::sort_pathkeys, Query::targetList, UPPERREL_DISTINCT, RelOptInfo::userid, RelOptInfo::useridiscurrent, PathTarget::width, and work_mem.

Referenced by grouping_planner().

4768 {
4769  Query *parse = root->parse;
4770  Path *cheapest_input_path = input_rel->cheapest_total_path;
4771  RelOptInfo *distinct_rel;
4772  double numDistinctRows;
4773  bool allow_hash;
4774  Path *path;
4775  ListCell *lc;
4776 
4777  /* For now, do all work in the (DISTINCT, NULL) upperrel */
4778  distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4779 
4780  /*
4781  * We don't compute anything at this level, so distinct_rel will be
4782  * parallel-safe if the input rel is parallel-safe. In particular, if
4783  * there is a DISTINCT ON (...) clause, any path for the input_rel will
4784  * output those expressions, and will not be parallel-safe unless those
4785  * expressions are parallel-safe.
4786  */
4787  distinct_rel->consider_parallel = input_rel->consider_parallel;
4788 
4789  /*
4790  * If the input rel belongs to a single FDW, so does the distinct_rel.
4791  */
4792  distinct_rel->serverid = input_rel->serverid;
4793  distinct_rel->userid = input_rel->userid;
4794  distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4795  distinct_rel->fdwroutine = input_rel->fdwroutine;
4796 
4797  /* Estimate number of distinct rows there will be */
4798  if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4799  root->hasHavingQual)
4800  {
4801  /*
4802  * If there was grouping or aggregation, use the number of input rows
4803  * as the estimated number of DISTINCT rows (ie, assume the input is
4804  * already mostly unique).
4805  */
4806  numDistinctRows = cheapest_input_path->rows;
4807  }
4808  else
4809  {
4810  /*
4811  * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4812  */
4813  List *distinctExprs;
4814 
4815  distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4816  parse->targetList);
4817  numDistinctRows = estimate_num_groups(root, distinctExprs,
4818  cheapest_input_path->rows,
4819  NULL);
4820  }
4821 
4822  /*
4823  * Consider sort-based implementations of DISTINCT, if possible.
4824  */
4826  {
4827  /*
4828  * First, if we have any adequately-presorted paths, just stick a
4829  * Unique node on those. Then consider doing an explicit sort of the
4830  * cheapest input path and Unique'ing that.
4831  *
4832  * When we have DISTINCT ON, we must sort by the more rigorous of
4833  * DISTINCT and ORDER BY, else it won't have the desired behavior.
4834  * Also, if we do have to do an explicit sort, we might as well use
4835  * the more rigorous ordering to avoid a second sort later. (Note
4836  * that the parser will have ensured that one clause is a prefix of
4837  * the other.)
4838  */
4839  List *needed_pathkeys;
4840 
4841  if (parse->hasDistinctOn &&
4843  list_length(root->sort_pathkeys))
4844  needed_pathkeys = root->sort_pathkeys;
4845  else
4846  needed_pathkeys = root->distinct_pathkeys;
4847 
4848  foreach(lc, input_rel->pathlist)
4849  {
4850  Path *path = (Path *) lfirst(lc);
4851 
4852  if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4853  {
4854  add_path(distinct_rel, (Path *)
4855  create_upper_unique_path(root, distinct_rel,
4856  path,
4858  numDistinctRows));
4859  }
4860  }
4861 
4862  /* For explicit-sort case, always use the more rigorous clause */
4863  if (list_length(root->distinct_pathkeys) <
4864  list_length(root->sort_pathkeys))
4865  {
4866  needed_pathkeys = root->sort_pathkeys;
4867  /* Assert checks that parser didn't mess up... */
4869  needed_pathkeys));
4870  }
4871  else
4872  needed_pathkeys = root->distinct_pathkeys;
4873 
4874  path = cheapest_input_path;
4875  if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4876  path = (Path *) create_sort_path(root, distinct_rel,
4877  path,
4878  needed_pathkeys,
4879  -1.0);
4880 
4881  add_path(distinct_rel, (Path *)
4882  create_upper_unique_path(root, distinct_rel,
4883  path,
4885  numDistinctRows));
4886  }
4887 
4888  /*
4889  * Consider hash-based implementations of DISTINCT, if possible.
4890  *
4891  * If we were not able to make any other types of path, we *must* hash or
4892  * die trying. If we do have other choices, there are several things that
4893  * should prevent selection of hashing: if the query uses DISTINCT ON
4894  * (because it won't really have the expected behavior if we hash), or if
4895  * enable_hashagg is off, or if it looks like the hashtable will exceed
4896  * work_mem.
4897  *
4898  * Note: grouping_is_hashable() is much more expensive to check than the
4899  * other gating conditions, so we want to do it last.
4900  */
4901  if (distinct_rel->pathlist == NIL)
4902  allow_hash = true; /* we have no alternatives */
4903  else if (parse->hasDistinctOn || !enable_hashagg)
4904  allow_hash = false; /* policy-based decision not to hash */
4905  else
4906  {
4907  Size hashentrysize;
4908 
4909  /* Estimate per-hash-entry space at tuple width... */
4910  hashentrysize = MAXALIGN(cheapest_input_path->pathtarget->width) +
4912  /* plus the per-hash-entry overhead */
4913  hashentrysize += hash_agg_entry_size(0);
4914 
4915  /* Allow hashing only if hashtable is predicted to fit in work_mem */
4916  allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
4917  }
4918 
4919  if (allow_hash && grouping_is_hashable(parse->distinctClause))
4920  {
4921  /* Generate hashed aggregate path --- no sort needed */
4922  add_path(distinct_rel, (Path *)
4923  create_agg_path(root,
4924  distinct_rel,
4925  cheapest_input_path,
4926  cheapest_input_path->pathtarget,
4927  AGG_HASHED,
4929  parse->distinctClause,
4930  NIL,
4931  NULL,
4932  numDistinctRows));
4933  }
4934 
4935  /* Give a helpful error if we failed to find any implementation */
4936  if (distinct_rel->pathlist == NIL)
4937  ereport(ERROR,
4938  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4939  errmsg("could not implement DISTINCT"),
4940  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4941 
4942  /*
4943  * If there is an FDW that's responsible for all baserels of the query,
4944  * let it consider adding ForeignPaths.
4945  */
4946  if (distinct_rel->fdwroutine &&
4947  distinct_rel->fdwroutine->GetForeignUpperPaths)
4948  distinct_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_DISTINCT,
4949  input_rel, distinct_rel);
4950 
4951  /* Let extensions possibly add some more paths */
4953  (*create_upper_paths_hook) (root, UPPERREL_DISTINCT,
4954  input_rel, distinct_rel);
4955 
4956  /* Now choose the best path(s) */
4957  set_cheapest(distinct_rel);
4958 
4959  return distinct_rel;
4960 }
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:197
#define NIL
Definition: pg_list.h:69
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset)
Definition: selfuncs.c:3360
PathTarget * pathtarget
Definition: relation.h:1043
Query * parse
Definition: relation.h:155
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:2643
Oid userid
Definition: relation.h:633
bool hasAggs
Definition: parsenodes.h:123
List * groupingSets
Definition: parsenodes.h:148
int errcode(int sqlerrcode)
Definition: elog.c:575
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:538
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:70
bool useridiscurrent
Definition: relation.h:634
bool hasDistinctOn
Definition: parsenodes.h:127
List * targetList
Definition: parsenodes.h:138
List * distinctClause
Definition: parsenodes.h:154
#define ERROR
Definition: elog.h:43
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2695
struct Path * cheapest_total_path
Definition: relation.h:603
struct FdwRoutine * fdwroutine
Definition: relation.h:636
int errdetail(const char *fmt,...)
Definition: elog.c:873
#define ereport(elevel, rest)
Definition: elog.h:122
List * sort_pathkeys
Definition: relation.h:267
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
Oid serverid
Definition: relation.h:632
#define SizeofMinimalTupleHeader
Definition: htup_details.h:655
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
int work_mem
Definition: globals.c:113
List * distinct_pathkeys
Definition: relation.h:266
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2540
List * pathkeys
Definition: relation.h:1056
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1052
size_t Size
Definition: c.h:404
static int list_length(const List *l)
Definition: pg_list.h:89
Size hash_agg_entry_size(int numAggs)
Definition: nodeAgg.c:2039
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:395
#define MAXALIGN(LEN)
Definition: c.h:623
bool consider_parallel
Definition: relation.h:593
bool enable_hashagg
Definition: costsize.c:124
int width
Definition: relation.h:975
List * groupClause
Definition: parsenodes.h:146
int errmsg(const char *fmt,...)
Definition: elog.c:797
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:518
bool hasHavingQual
Definition: relation.h:305
List * pathlist
Definition: relation.h:599
Definition: pg_list.h:45
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649

◆ create_grouping_paths()

static RelOptInfo * create_grouping_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
const AggClauseCosts agg_costs,
grouping_sets_data gd 
)
static

Definition at line 3603 of file planner.c.

References add_partial_path(), add_path(), AGG_HASHED, AGG_PLAIN, AGG_SORTED, AGGSPLIT_FINAL_DESERIAL, AGGSPLIT_INITIAL_SERIAL, AGGSPLIT_SIMPLE, grouping_sets_data::any_hashable, Assert, RelOptInfo::cheapest_total_path, consider_groupingsets_paths(), RelOptInfo::consider_parallel, create_agg_path(), create_append_path(), create_gather_merge_path(), create_gather_path(), create_group_path(), create_result_path(), create_sort_path(), create_upper_paths_hook, ereport, errcode(), errdetail(), errmsg(), ERROR, estimate_hashagg_tablesize(), PathTarget::exprs, RelOptInfo::fdwroutine, fetch_upper_rel(), get_agg_clause_costs(), get_number_of_groups(), FdwRoutine::GetForeignUpperPaths, PlannerInfo::group_pathkeys, Query::groupClause, grouping_is_hashable(), grouping_is_sortable(), Query::groupingSets, Query::hasAggs, PlannerInfo::hasHavingQual, AggClauseCosts::hasNonPartial, AggClauseCosts::hasNonSerial, Query::havingQual, is_parallel_safe(), lappend(), lfirst, linitial, list_length(), make_partial_grouping_target(), MemSet, NIL, AggClauseCosts::numOrderedAggs, Path::parallel_workers, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, Path::pathtarget, grouping_sets_data::rollups, Path::rows, RelOptInfo::serverid, set_cheapest(), subpath(), UPPERREL_GROUP_AGG, RelOptInfo::userid, RelOptInfo::useridiscurrent, and work_mem.

Referenced by grouping_planner().

3608 {
3609  Query *parse = root->parse;
3610  Path *cheapest_path = input_rel->cheapest_total_path;
3611  RelOptInfo *grouped_rel;
3612  PathTarget *partial_grouping_target = NULL;
3613  AggClauseCosts agg_partial_costs; /* parallel only */
3614  AggClauseCosts agg_final_costs; /* parallel only */
3615  Size hashaggtablesize;
3616  double dNumGroups;
3617  double dNumPartialGroups = 0;
3618  bool can_hash;
3619  bool can_sort;
3620  bool try_parallel_aggregation;
3621 
3622  ListCell *lc;
3623 
3624  /* For now, do all work in the (GROUP_AGG, NULL) upperrel */
3625  grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3626 
3627  /*
3628  * If the input relation is not parallel-safe, then the grouped relation
3629  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3630  * target list and HAVING quals are parallel-safe.
3631  */
3632  if (input_rel->consider_parallel &&
3633  is_parallel_safe(root, (Node *) target->exprs) &&
3634  is_parallel_safe(root, (Node *) parse->havingQual))
3635  grouped_rel->consider_parallel = true;
3636 
3637  /*
3638  * If the input rel belongs to a single FDW, so does the grouped rel.
3639  */
3640  grouped_rel->serverid = input_rel->serverid;
3641  grouped_rel->userid = input_rel->userid;
3642  grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3643  grouped_rel->fdwroutine = input_rel->fdwroutine;
3644 
3645  /*
3646  * Check for degenerate grouping.
3647  */
3648  if ((root->hasHavingQual || parse->groupingSets) &&
3649  !parse->hasAggs && parse->groupClause == NIL)
3650  {
3651  /*
3652  * We have a HAVING qual and/or grouping sets, but no aggregates and
3653  * no GROUP BY (which implies that the grouping sets are all empty).
3654  *
3655  * This is a degenerate case in which we are supposed to emit either
3656  * zero or one row for each grouping set depending on whether HAVING
3657  * succeeds. Furthermore, there cannot be any variables in either
3658  * HAVING or the targetlist, so we actually do not need the FROM table
3659  * at all! We can just throw away the plan-so-far and generate a
3660  * Result node. This is a sufficiently unusual corner case that it's
3661  * not worth contorting the structure of this module to avoid having
3662  * to generate the earlier paths in the first place.
3663  */
3664  int nrows = list_length(parse->groupingSets);
3665  Path *path;
3666 
3667  if (nrows > 1)
3668  {
3669  /*
3670  * Doesn't seem worthwhile writing code to cons up a
3671  * generate_series or a values scan to emit multiple rows. Instead
3672  * just make N clones and append them. (With a volatile HAVING
3673  * clause, this means you might get between 0 and N output rows.
3674  * Offhand I think that's desired.)
3675  */
3676  List *paths = NIL;
3677 
3678  while (--nrows >= 0)
3679  {
3680  path = (Path *)
3681  create_result_path(root, grouped_rel,
3682  target,
3683  (List *) parse->havingQual);
3684  paths = lappend(paths, path);
3685  }
3686  path = (Path *)
3687  create_append_path(grouped_rel,
3688  paths,
3689  NULL,
3690  0,
3691  NIL);
3692  path->pathtarget = target;
3693  }
3694  else
3695  {
3696  /* No grouping sets, or just one, so one output row */
3697  path = (Path *)
3698  create_result_path(root, grouped_rel,
3699  target,
3700  (List *) parse->havingQual);
3701  }
3702 
3703  add_path(grouped_rel, path);
3704 
3705  /* No need to consider any other alternatives. */
3706  set_cheapest(grouped_rel);
3707 
3708  return grouped_rel;
3709  }
3710 
3711  /*
3712  * Estimate number of groups.
3713  */
3714  dNumGroups = get_number_of_groups(root,
3715  cheapest_path->rows,
3716  gd);
3717 
3718  /*
3719  * Determine whether it's possible to perform sort-based implementations
3720  * of grouping. (Note that if groupClause is empty,
3721  * grouping_is_sortable() is trivially true, and all the
3722  * pathkeys_contained_in() tests will succeed too, so that we'll consider
3723  * every surviving input path.)
3724  *
3725  * If we have grouping sets, we might be able to sort some but not all of
3726  * them; in this case, we need can_sort to be true as long as we must
3727  * consider any sorted-input plan.
3728  */
3729  can_sort = (gd && gd->rollups != NIL)
3730  || grouping_is_sortable(parse->groupClause);
3731 
3732  /*
3733  * Determine whether we should consider hash-based implementations of
3734  * grouping.
3735  *
3736  * Hashed aggregation only applies if we're grouping. If we have grouping
3737  * sets, some groups might be hashable but others not; in this case we set
3738  * can_hash true as long as there is nothing globally preventing us from
3739  * hashing (and we should therefore consider plans with hashes).
3740  *
3741  * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
3742  * aggregates. (Doing so would imply storing *all* the input values in
3743  * the hash table, and/or running many sorts in parallel, either of which
3744  * seems like a certain loser.) We similarly don't support ordered-set
3745  * aggregates in hashed aggregation, but that case is also included in the
3746  * numOrderedAggs count.
3747  *
3748  * Note: grouping_is_hashable() is much more expensive to check than the
3749  * other gating conditions, so we want to do it last.
3750  */
3751  can_hash = (parse->groupClause != NIL &&
3752  agg_costs->numOrderedAggs == 0 &&
3753  (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause)));
3754 
3755  /*
3756  * If grouped_rel->consider_parallel is true, then paths that we generate
3757  * for this grouping relation could be run inside of a worker, but that
3758  * doesn't mean we can actually use the PartialAggregate/FinalizeAggregate
3759  * execution strategy. Figure that out.
3760  */
3761  if (!grouped_rel->consider_parallel)
3762  {
3763  /* Not even parallel-safe. */
3764  try_parallel_aggregation = false;
3765  }
3766  else if (input_rel->partial_pathlist == NIL)
3767  {
3768  /* Nothing to use as input for partial aggregate. */
3769  try_parallel_aggregation = false;
3770  }
3771  else if (!parse->hasAggs && parse->groupClause == NIL)
3772  {
3773  /*
3774  * We don't know how to do parallel aggregation unless we have either
3775  * some aggregates or a grouping clause.
3776  */
3777  try_parallel_aggregation = false;
3778  }
3779  else if (parse->groupingSets)
3780  {
3781  /* We don't know how to do grouping sets in parallel. */
3782  try_parallel_aggregation = false;
3783  }
3784  else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
3785  {
3786  /* Insufficient support for partial mode. */
3787  try_parallel_aggregation = false;
3788  }
3789  else
3790  {
3791  /* Everything looks good. */
3792  try_parallel_aggregation = true;
3793  }
3794 
3795  /*
3796  * Before generating paths for grouped_rel, we first generate any possible
3797  * partial paths; that way, later code can easily consider both parallel
3798  * and non-parallel approaches to grouping. Note that the partial paths
3799  * we generate here are also partially aggregated, so simply pushing a
3800  * Gather node on top is insufficient to create a final path, as would be
3801  * the case for a scan/join rel.
3802  */
3803  if (try_parallel_aggregation)
3804  {
3805  Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
3806 
3807  /*
3808  * Build target list for partial aggregate paths. These paths cannot
3809  * just emit the same tlist as regular aggregate paths, because (1) we
3810  * must include Vars and Aggrefs needed in HAVING, which might not
3811  * appear in the result tlist, and (2) the Aggrefs must be set in
3812  * partial mode.
3813  */
3814  partial_grouping_target = make_partial_grouping_target(root, target);
3815 
3816  /* Estimate number of partial groups. */
3817  dNumPartialGroups = get_number_of_groups(root,
3818  cheapest_partial_path->rows,
3819  gd);
3820 
3821  /*
3822  * Collect statistics about aggregates for estimating costs of
3823  * performing aggregation in parallel.
3824  */
3825  MemSet(&agg_partial_costs, 0, sizeof(AggClauseCosts));
3826  MemSet(&agg_final_costs, 0, sizeof(AggClauseCosts));
3827  if (parse->hasAggs)
3828  {
3829  /* partial phase */
3830  get_agg_clause_costs(root, (Node *) partial_grouping_target->exprs,
3832  &agg_partial_costs);
3833 
3834  /* final phase */
3835  get_agg_clause_costs(root, (Node *) target->exprs,
3837  &agg_final_costs);
3838  get_agg_clause_costs(root, parse->havingQual,
3840  &agg_final_costs);
3841  }
3842 
3843  if (can_sort)
3844  {
3845  /* This was checked before setting try_parallel_aggregation */
3846  Assert(parse->hasAggs || parse->groupClause);
3847 
3848  /*
3849  * Use any available suitably-sorted path as input, and also
3850  * consider sorting the cheapest partial path.
3851  */
3852  foreach(lc, input_rel->partial_pathlist)
3853  {
3854  Path *path = (Path *) lfirst(lc);
3855  bool is_sorted;
3856 
3857  is_sorted = pathkeys_contained_in(root->group_pathkeys,
3858  path->pathkeys);
3859  if (path == cheapest_partial_path || is_sorted)
3860  {
3861  /* Sort the cheapest partial path, if it isn't already */
3862  if (!is_sorted)
3863  path = (Path *) create_sort_path(root,
3864  grouped_rel,
3865  path,
3866  root->group_pathkeys,
3867  -1.0);
3868 
3869  if (parse->hasAggs)
3870  add_partial_path(grouped_rel, (Path *)
3871  create_agg_path(root,
3872  grouped_rel,
3873  path,
3874  partial_grouping_target,
3875  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
3877  parse->groupClause,
3878  NIL,
3879  &agg_partial_costs,
3880  dNumPartialGroups));
3881  else
3882  add_partial_path(grouped_rel, (Path *)
3883  create_group_path(root,
3884  grouped_rel,
3885  path,
3886  partial_grouping_target,
3887  parse->groupClause,
3888  NIL,
3889  dNumPartialGroups));
3890  }
3891  }
3892  }
3893 
3894  if (can_hash)
3895  {
3896  /* Checked above */
3897  Assert(parse->hasAggs || parse->groupClause);
3898 
3899  hashaggtablesize =
3900  estimate_hashagg_tablesize(cheapest_partial_path,
3901  &agg_partial_costs,
3902  dNumPartialGroups);
3903 
3904  /*
3905  * Tentatively produce a partial HashAgg Path, depending on if it
3906  * looks as if the hash table will fit in work_mem.
3907  */
3908  if (hashaggtablesize < work_mem * 1024L)
3909  {
3910  add_partial_path(grouped_rel, (Path *)
3911  create_agg_path(root,
3912  grouped_rel,
3913  cheapest_partial_path,
3914  partial_grouping_target,
3915  AGG_HASHED,
3917  parse->groupClause,
3918  NIL,
3919  &agg_partial_costs,
3920  dNumPartialGroups));
3921  }
3922  }
3923  }
3924 
3925  /* Build final grouping paths */
3926  if (can_sort)
3927  {
3928  /*
3929  * Use any available suitably-sorted path as input, and also consider
3930  * sorting the cheapest-total path.
3931  */
3932  foreach(lc, input_rel->pathlist)
3933  {
3934  Path *path = (Path *) lfirst(lc);
3935  bool is_sorted;
3936 
3937  is_sorted = pathkeys_contained_in(root->group_pathkeys,
3938  path->pathkeys);
3939  if (path == cheapest_path || is_sorted)
3940  {
3941  /* Sort the cheapest-total path if it isn't already sorted */
3942  if (!is_sorted)
3943  path = (Path *) create_sort_path(root,
3944  grouped_rel,
3945  path,
3946  root->group_pathkeys,
3947  -1.0);
3948 
3949  /* Now decide what to stick atop it */
3950  if (parse->groupingSets)
3951  {
3952  consider_groupingsets_paths(root, grouped_rel,
3953  path, true, can_hash, target,
3954  gd, agg_costs, dNumGroups);
3955  }
3956  else if (parse->hasAggs)
3957  {
3958  /*
3959  * We have aggregation, possibly with plain GROUP BY. Make
3960  * an AggPath.
3961  */
3962  add_path(grouped_rel, (Path *)
3963  create_agg_path(root,
3964  grouped_rel,
3965  path,
3966  target,
3967  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
3969  parse->groupClause,
3970  (List *) parse->havingQual,
3971  agg_costs,
3972  dNumGroups));
3973  }
3974  else if (parse->groupClause)
3975  {
3976  /*
3977  * We have GROUP BY without aggregation or grouping sets.
3978  * Make a GroupPath.
3979  */
3980  add_path(grouped_rel, (Path *)
3981  create_group_path(root,
3982  grouped_rel,
3983  path,
3984  target,
3985  parse->groupClause,
3986  (List *) parse->havingQual,
3987  dNumGroups));
3988  }
3989  else
3990  {
3991  /* Other cases should have been handled above */
3992  Assert(false);
3993  }
3994  }
3995  }
3996 
3997  /*
3998  * Now generate a complete GroupAgg Path atop of the cheapest partial
3999  * path. We can do this using either Gather or Gather Merge.
4000  */
4001  if (grouped_rel->partial_pathlist)
4002  {
4003  Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
4004  double total_groups = path->rows * path->parallel_workers;
4005 
4006  path = (Path *) create_gather_path(root,
4007  grouped_rel,
4008  path,
4009  partial_grouping_target,
4010  NULL,
4011  &total_groups);
4012 
4013  /*
4014  * Since Gather's output is always unsorted, we'll need to sort,
4015  * unless there's no GROUP BY clause or a degenerate (constant)
4016  * one, in which case there will only be a single group.
4017  */
4018  if (root->group_pathkeys)
4019  path = (Path *) create_sort_path(root,
4020  grouped_rel,
4021  path,
4022  root->group_pathkeys,
4023  -1.0);
4024 
4025  if (parse->hasAggs)
4026  add_path(grouped_rel, (Path *)
4027  create_agg_path(root,
4028  grouped_rel,
4029  path,
4030  target,
4031  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
4033  parse->groupClause,
4034  (List *) parse->havingQual,
4035  &agg_final_costs,
4036  dNumGroups));
4037  else
4038  add_path(grouped_rel, (Path *)
4039  create_group_path(root,
4040  grouped_rel,
4041  path,
4042  target,
4043  parse->groupClause,
4044  (List *) parse->havingQual,
4045  dNumGroups));
4046 
4047  /*
4048  * The point of using Gather Merge rather than Gather is that it
4049  * can preserve the ordering of the input path, so there's no
4050  * reason to try it unless (1) it's possible to produce more than
4051  * one output row and (2) we want the output path to be ordered.
4052  */
4053  if (parse->groupClause != NIL && root->group_pathkeys != NIL)
4054  {
4055  foreach(lc, grouped_rel->partial_pathlist)
4056  {
4057  Path *subpath = (Path *) lfirst(lc);
4058  Path *gmpath;
4059  double total_groups;
4060 
4061  /*
4062  * It's useful to consider paths that are already properly
4063  * ordered for Gather Merge, because those don't need a
4064  * sort. It's also useful to consider the cheapest path,
4065  * because sorting it in parallel and then doing Gather
4066  * Merge may be better than doing an unordered Gather
4067  * followed by a sort. But there's no point in
4068  * considering non-cheapest paths that aren't already
4069  * sorted correctly.
4070  */
4071  if (path != subpath &&
4073  subpath->pathkeys))
4074  continue;
4075 
4076  total_groups = subpath->rows * subpath->parallel_workers;
4077 
4078  gmpath = (Path *)
4080  grouped_rel,
4081  subpath,
4082  partial_grouping_target,
4083  root->group_pathkeys,
4084  NULL,
4085  &total_groups);
4086 
4087  if (parse->hasAggs)
4088  add_path(grouped_rel, (Path *)
4089  create_agg_path(root,
4090  grouped_rel,
4091  gmpath,
4092  target,
4093  parse->groupClause ? AGG_SORTED : AGG_PLAIN,
4095  parse->groupClause,
4096  (List *) parse->havingQual,
4097  &agg_final_costs,
4098  dNumGroups));
4099  else
4100  add_path(grouped_rel, (Path *)
4101  create_group_path(root,
4102  grouped_rel,
4103  gmpath,
4104  target,
4105  parse->groupClause,
4106  (List *) parse->havingQual,
4107  dNumGroups));
4108  }
4109  }
4110  }
4111  }
4112 
4113  if (can_hash)
4114  {
4115  if (parse->groupingSets)
4116  {
4117  /*
4118  * Try for a hash-only groupingsets path over unsorted input.
4119  */
4120  consider_groupingsets_paths(root, grouped_rel,
4121  cheapest_path, false, true, target,
4122  gd, agg_costs, dNumGroups);
4123  }
4124  else
4125  {
4126  hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
4127  agg_costs,
4128  dNumGroups);
4129 
4130  /*
4131  * Provided that the estimated size of the hashtable does not
4132  * exceed work_mem, we'll generate a HashAgg Path, although if we
4133  * were unable to sort above, then we'd better generate a Path, so
4134  * that we at least have one.
4135  */
4136  if (hashaggtablesize < work_mem * 1024L ||
4137  grouped_rel->pathlist == NIL)
4138  {
4139  /*
4140  * We just need an Agg over the cheapest-total input path,
4141  * since input order won't matter.
4142  */
4143  add_path(grouped_rel, (Path *)
4144  create_agg_path(root, grouped_rel,
4145  cheapest_path,
4146  target,
4147  AGG_HASHED,
4149  parse->groupClause,
4150  (List *) parse->havingQual,
4151  agg_costs,
4152  dNumGroups));
4153  }
4154  }
4155 
4156  /*
4157  * Generate a HashAgg Path atop of the cheapest partial path. Once
4158  * again, we'll only do this if it looks as though the hash table
4159  * won't exceed work_mem.
4160  */
4161  if (grouped_rel->partial_pathlist)
4162  {
4163  Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
4164 
4165  hashaggtablesize = estimate_hashagg_tablesize(path,
4166  &agg_final_costs,
4167  dNumGroups);
4168 
4169  if (hashaggtablesize < work_mem * 1024L)
4170  {
4171  double total_groups = path->rows * path->parallel_workers;
4172 
4173  path = (Path *) create_gather_path(root,
4174  grouped_rel,
4175  path,
4176  partial_grouping_target,
4177  NULL,
4178  &total_groups);
4179 
4180  add_path(grouped_rel, (Path *)
4181  create_agg_path(root,
4182  grouped_rel,
4183  path,
4184  target,
4185  AGG_HASHED,
4187  parse->groupClause,
4188  (List *) parse->havingQual,
4189  &agg_final_costs,
4190  dNumGroups));
4191  }
4192  }
4193  }
4194 
4195  /* Give a helpful error if we failed to find any implementation */
4196  if (grouped_rel->pathlist == NIL)
4197  ereport(ERROR,
4198  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4199  errmsg("could not implement GROUP BY"),
4200  errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4201 
4202  /*
4203  * If there is an FDW that's responsible for all baserels of the query,
4204  * let it consider adding ForeignPaths.
4205  */
4206  if (grouped_rel->fdwroutine &&
4207  grouped_rel->fdwroutine->GetForeignUpperPaths)
4209  input_rel, grouped_rel);
4210 
4211  /* Let extensions possibly add some more paths */
4213  (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4214  input_rel, grouped_rel);
4215 
4216  /* Now choose the best path(s) */
4217  set_cheapest(grouped_rel);
4218 
4219  /*
4220  * We've been using the partial pathlist for the grouped relation to hold
4221  * partially aggregated paths, but that's actually a little bit bogus
4222  * because it's unsafe for later planning stages -- like ordered_rel ---
4223  * to get the idea that they can use these partial paths as if they didn't
4224  * need a FinalizeAggregate step. Zap the partial pathlist at this stage
4225  * so we don't get confused.
4226  */
4227  grouped_rel->partial_pathlist = NIL;
4228 
4229  return grouped_rel;
4230 }
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:197
List * group_pathkeys
Definition: relation.h:264
#define NIL
Definition: pg_list.h:69
GatherPath * create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
Definition: pathnode.c:1744
PathTarget * pathtarget
Definition: relation.h:1043
Query * parse
Definition: relation.h:155
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
Oid userid
Definition: relation.h:633
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd)
Definition: planner.c:3447
void get_agg_clause_costs(PlannerInfo *root, Node *clause, AggSplit aggsplit, AggClauseCosts *costs)
Definition: clauses.c:467
bool hasAggs
Definition: parsenodes.h:123
int parallel_workers
Definition: relation.h:1049
List * groupingSets
Definition: parsenodes.h:148
Definition: nodes.h:510
int errcode(int sqlerrcode)
Definition: elog.c:575
List * partial_pathlist
Definition: relation.h:601
#define MemSet(start, val, len)
Definition: c.h:853
bool hasNonSerial
Definition: relation.h:61
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:538
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:70
bool hasNonPartial
Definition: relation.h:60
bool useridiscurrent
Definition: relation.h:634
AppendPath * create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer, int parallel_workers, List *partitioned_rels)
Definition: pathnode.c:1211
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, PathTarget *target, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4240
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target)
Definition: planner.c:5224
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:1087
#define linitial(l)
Definition: pg_list.h:111
#define ERROR
Definition: elog.h:43
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2695
struct Path * cheapest_total_path
Definition: relation.h:603
struct FdwRoutine * fdwroutine
Definition: relation.h:636
int errdetail(const char *fmt,...)
Definition: elog.c:873
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2584
static Size estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:3559
#define ereport(elevel, rest)
Definition: elog.h:122
List * lappend(List *list, void *datum)
Definition: list.c:128
int numOrderedAggs
Definition: relation.h:59
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
Oid serverid
Definition: relation.h:632
List * exprs
Definition: relation.h:972
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
int work_mem
Definition: globals.c:113
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1653
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2540
List * pathkeys
Definition: relation.h:1056
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1052
size_t Size
Definition: c.h:404
static int list_length(const List *l)
Definition: pg_list.h:89
bool consider_parallel
Definition: relation.h:593
List * groupClause
Definition: parsenodes.h:146
int errmsg(const char *fmt,...)
Definition: elog.c:797
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:760
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:518
bool hasHavingQual
Definition: relation.h:305
ResultPath * create_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *resconstantqual)
Definition: pathnode.c:1357
List * pathlist
Definition: relation.h:599
Node * havingQual
Definition: parsenodes.h:150
Definition: pg_list.h:45
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649

◆ create_one_window_path()

static void create_one_window_path ( PlannerInfo root,
RelOptInfo window_rel,
Path path,
PathTarget input_target,
PathTarget output_target,
List tlist,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4671 of file planner.c.

References add_column_to_pathtarget(), add_path(), copy_pathtarget(), create_sort_path(), create_windowagg_path(), get_typavgwidth(), lfirst_node, lnext, make_pathkeys_for_window(), Path::pathkeys, pathkeys_contained_in(), PathTarget::width, WindowFuncLists::windowFuncs, and WindowClause::winref.

Referenced by create_window_paths().

4679 {
4680  PathTarget *window_target;
4681  ListCell *l;
4682 
4683  /*
4684  * Since each window clause could require a different sort order, we stack
4685  * up a WindowAgg node for each clause, with sort steps between them as
4686  * needed. (We assume that select_active_windows chose a good order for
4687  * executing the clauses in.)
4688  *
4689  * input_target should contain all Vars and Aggs needed for the result.
4690  * (In some cases we wouldn't need to propagate all of these all the way
4691  * to the top, since they might only be needed as inputs to WindowFuncs.
4692  * It's probably not worth trying to optimize that though.) It must also
4693  * contain all window partitioning and sorting expressions, to ensure
4694  * they're computed only once at the bottom of the stack (that's critical
4695  * for volatile functions). As we climb up the stack, we'll add outputs
4696  * for the WindowFuncs computed at each level.
4697  */
4698  window_target = input_target;
4699 
4700  foreach(l, activeWindows)
4701  {
4703  List *window_pathkeys;
4704 
4705  window_pathkeys = make_pathkeys_for_window(root,
4706  wc,
4707  tlist);
4708 
4709  /* Sort if necessary */
4710  if (!pathkeys_contained_in(window_pathkeys, path->pathkeys))
4711  {
4712  path = (Path *) create_sort_path(root, window_rel,
4713  path,
4714  window_pathkeys,
4715  -1.0);
4716  }
4717 
4718  if (lnext(l))
4719  {
4720  /*
4721  * Add the current WindowFuncs to the output target for this
4722  * intermediate WindowAggPath. We must copy window_target to
4723  * avoid changing the previous path's target.
4724  *
4725  * Note: a WindowFunc adds nothing to the target's eval costs; but
4726  * we do need to account for the increase in tlist width.
4727  */
4728  ListCell *lc2;
4729 
4730  window_target = copy_pathtarget(window_target);
4731  foreach(lc2, wflists->windowFuncs[wc->winref])
4732  {
4733  WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4734 
4735  add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4736  window_target->width += get_typavgwidth(wfunc->wintype, -1);
4737  }
4738  }
4739  else
4740  {
4741  /* Install the goal target in the topmost WindowAgg */
4742  window_target = output_target;
4743  }
4744 
4745  path = (Path *)
4746  create_windowagg_path(root, window_rel, path, window_target,
4747  wflists->windowFuncs[wc->winref],
4748  wc,
4749  window_pathkeys);
4750  }
4751 
4752  add_path(window_rel, path);
4753 }
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:629
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:667
#define lfirst_node(type, lc)
Definition: pg_list.h:109
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:5610
#define lnext(lc)
Definition: pg_list.h:105
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, WindowClause *winclause, List *winpathkeys)
Definition: pathnode.c:2990
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2328
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2540
List * pathkeys
Definition: relation.h:1056
Oid wintype
Definition: primnodes.h:356
int width
Definition: relation.h:975
Definition: pg_list.h:45
List ** windowFuncs
Definition: clauses.h:27

◆ create_ordered_paths()

static RelOptInfo * create_ordered_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget target,
double  limit_tuples 
)
static

Definition at line 4977 of file planner.c.

References add_path(), apply_projection_to_path(), Assert, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_gather_merge_path(), create_sort_path(), create_upper_paths_hook, PathTarget::exprs, RelOptInfo::fdwroutine, fetch_upper_rel(), FdwRoutine::GetForeignUpperPaths, is_parallel_safe(), lfirst, linitial, NIL, Path::parallel_workers, RelOptInfo::partial_pathlist, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, Path::pathtarget, Path::rows, RelOptInfo::serverid, PlannerInfo::sort_pathkeys, UPPERREL_ORDERED, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by grouping_planner().

4981 {
4982  Path *cheapest_input_path = input_rel->cheapest_total_path;
4983  RelOptInfo *ordered_rel;
4984  ListCell *lc;
4985 
4986  /* For now, do all work in the (ORDERED, NULL) upperrel */
4987  ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
4988 
4989  /*
4990  * If the input relation is not parallel-safe, then the ordered relation
4991  * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4992  * target list is parallel-safe.
4993  */
4994  if (input_rel->consider_parallel &&
4995  is_parallel_safe(root, (Node *) target->exprs))
4996  ordered_rel->consider_parallel = true;
4997 
4998  /*
4999  * If the input rel belongs to a single FDW, so does the ordered_rel.
5000  */
5001  ordered_rel->serverid = input_rel->serverid;
5002  ordered_rel->userid = input_rel->userid;
5003  ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5004  ordered_rel->fdwroutine = input_rel->fdwroutine;
5005 
5006  foreach(lc, input_rel->pathlist)
5007  {
5008  Path *path = (Path *) lfirst(lc);
5009  bool is_sorted;
5010 
5011  is_sorted = pathkeys_contained_in(root->sort_pathkeys,
5012  path->pathkeys);
5013  if (path == cheapest_input_path || is_sorted)
5014  {
5015  if (!is_sorted)
5016  {
5017  /* An explicit sort here can take advantage of LIMIT */
5018  path = (Path *) create_sort_path(root,
5019  ordered_rel,
5020  path,
5021  root->sort_pathkeys,
5022  limit_tuples);
5023  }
5024 
5025  /* Add projection step if needed */
5026  if (path->pathtarget != target)
5027  path = apply_projection_to_path(root, ordered_rel,
5028  path, target);
5029 
5030  add_path(ordered_rel, path);
5031  }
5032  }
5033 
5034  /*
5035  * generate_gather_paths() will have already generated a simple Gather
5036  * path for the best parallel path, if any, and the loop above will have
5037  * considered sorting it. Similarly, generate_gather_paths() will also
5038  * have generated order-preserving Gather Merge plans which can be used
5039  * without sorting if they happen to match the sort_pathkeys, and the loop
5040  * above will have handled those as well. However, there's one more
5041  * possibility: it may make sense to sort the cheapest partial path
5042  * according to the required output order and then use Gather Merge.
5043  */
5044  if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5045  input_rel->partial_pathlist != NIL)
5046  {
5047  Path *cheapest_partial_path;
5048 
5049  cheapest_partial_path = linitial(input_rel->partial_pathlist);
5050 
5051  /*
5052  * If cheapest partial path doesn't need a sort, this is redundant
5053  * with what's already been tried.
5054  */
5056  cheapest_partial_path->pathkeys))
5057  {
5058  Path *path;
5059  double total_groups;
5060 
5061  path = (Path *) create_sort_path(root,
5062  ordered_rel,
5063  cheapest_partial_path,
5064  root->sort_pathkeys,
5065  -1.0);
5066 
5067  total_groups = cheapest_partial_path->rows *
5068  cheapest_partial_path->parallel_workers;
5069  path = (Path *)
5070  create_gather_merge_path(root, ordered_rel,
5071  path,
5072  path->pathtarget,
5073  root->sort_pathkeys, NULL,
5074  &total_groups);
5075 
5076  /* Add projection step if needed */
5077  if (path->pathtarget != target)
5078  path = apply_projection_to_path(root, ordered_rel,
5079  path, target);
5080 
5081  add_path(ordered_rel, path);
5082  }
5083  }
5084 
5085  /*
5086  * If there is an FDW that's responsible for all baserels of the query,
5087  * let it consider adding ForeignPaths.
5088  */
5089  if (ordered_rel->fdwroutine &&
5090  ordered_rel->fdwroutine->GetForeignUpperPaths)
5091  ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5092  input_rel, ordered_rel);
5093 
5094  /* Let extensions possibly add some more paths */
5096  (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5097  input_rel, ordered_rel);
5098 
5099  /*
5100  * No need to bother with set_cheapest here; grouping_planner does not
5101  * need us to do it.
5102  */
5103  Assert(ordered_rel->pathlist != NIL);
5104 
5105  return ordered_rel;
5106 }
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2384
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:197
#define NIL
Definition: pg_list.h:69
PathTarget * pathtarget
Definition: relation.h:1043
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
Oid userid
Definition: relation.h:633
int parallel_workers
Definition: relation.h:1049
Definition: nodes.h:510
List * partial_pathlist
Definition: relation.h:601
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:70
bool useridiscurrent
Definition: relation.h:634
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:1087
#define linitial(l)
Definition: pg_list.h:111
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
struct Path * cheapest_total_path
Definition: relation.h:603
struct FdwRoutine * fdwroutine
Definition: relation.h:636
List * sort_pathkeys
Definition: relation.h:267
Oid serverid
Definition: relation.h:632
List * exprs
Definition: relation.h:972
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1653
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2540
List * pathkeys
Definition: relation.h:1056
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1052
bool consider_parallel
Definition: relation.h:593
List * pathlist
Definition: relation.h:599

◆ create_window_paths()

static RelOptInfo * create_window_paths ( PlannerInfo root,
RelOptInfo input_rel,
PathTarget input_target,
PathTarget output_target,
List tlist,
WindowFuncLists wflists,
List activeWindows 
)
static

Definition at line 4585 of file planner.c.

References RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, create_one_window_path(), create_upper_paths_hook, PathTarget::exprs, RelOptInfo::fdwroutine, fetch_upper_rel(), FdwRoutine::GetForeignUpperPaths, is_parallel_safe(), lfirst, Path::pathkeys, pathkeys_contained_in(), RelOptInfo::pathlist, RelOptInfo::serverid, set_cheapest(), UPPERREL_WINDOW, RelOptInfo::userid, RelOptInfo::useridiscurrent, and PlannerInfo::window_pathkeys.

Referenced by grouping_planner().

4592 {
4593  RelOptInfo *window_rel;
4594  ListCell *lc;
4595 
4596  /* For now, do all work in the (WINDOW, NULL) upperrel */
4597  window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4598 
4599  /*
4600  * If the input relation is not parallel-safe, then the window relation
4601  * can't be parallel-safe, either. Otherwise, we need to examine the
4602  * target list and active windows for non-parallel-safe constructs.
4603  */
4604  if (input_rel->consider_parallel &&
4605  is_parallel_safe(root, (Node *) output_target->exprs) &&
4606  is_parallel_safe(root, (Node *) activeWindows))
4607  window_rel->consider_parallel = true;
4608 
4609  /*
4610  * If the input rel belongs to a single FDW, so does the window rel.
4611  */
4612  window_rel->serverid = input_rel->serverid;
4613  window_rel->userid = input_rel->userid;
4614  window_rel->useridiscurrent = input_rel->useridiscurrent;
4615  window_rel->fdwroutine = input_rel->fdwroutine;
4616 
4617  /*
4618  * Consider computing window functions starting from the existing
4619  * cheapest-total path (which will likely require a sort) as well as any
4620  * existing paths that satisfy root->window_pathkeys (which won't).
4621  */
4622  foreach(lc, input_rel->pathlist)
4623  {
4624  Path *path = (Path *) lfirst(lc);
4625 
4626  if (path == input_rel->cheapest_total_path ||
4629  window_rel,
4630  path,
4631  input_target,
4632  output_target,
4633  tlist,
4634  wflists,
4635  activeWindows);
4636  }
4637 
4638  /*
4639  * If there is an FDW that's responsible for all baserels of the query,
4640  * let it consider adding ForeignPaths.
4641  */
4642  if (window_rel->fdwroutine &&
4643  window_rel->fdwroutine->GetForeignUpperPaths)
4644  window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4645  input_rel, window_rel);
4646 
4647  /* Let extensions possibly add some more paths */
4649  (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4650  input_rel, window_rel);
4651 
4652  /* Now choose the best path(s) */
4653  set_cheapest(window_rel);
4654 
4655  return window_rel;
4656 }
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:197
Oid userid
Definition: relation.h:633
Definition: nodes.h:510
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:70
bool useridiscurrent
Definition: relation.h:634
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:1087
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, List *tlist, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4671
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
struct Path * cheapest_total_path
Definition: relation.h:603
struct FdwRoutine * fdwroutine
Definition: relation.h:636
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
Oid serverid
Definition: relation.h:632
List * exprs
Definition: relation.h:972
List * window_pathkeys
Definition: relation.h:265
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:317
List * pathkeys
Definition: relation.h:1056
#define lfirst(lc)
Definition: pg_list.h:106
bool consider_parallel
Definition: relation.h:593
List * pathlist
Definition: relation.h:599

◆ estimate_hashagg_tablesize()

static Size estimate_hashagg_tablesize ( Path path,
const AggClauseCosts agg_costs,
double  dNumGroups 
)
static

Definition at line 3559 of file planner.c.

References hash_agg_entry_size(), MAXALIGN, AggClauseCosts::numAggs, Path::pathtarget, SizeofMinimalTupleHeader, AggClauseCosts::transitionSpace, and PathTarget::width.

Referenced by consider_groupingsets_paths(), and create_grouping_paths().

3561 {
3562  Size hashentrysize;
3563 
3564  /* Estimate per-hash-entry space at tuple width... */
3565  hashentrysize = MAXALIGN(path->pathtarget->width) +
3567 
3568  /* plus space for pass-by-ref transition values... */
3569  hashentrysize += agg_costs->transitionSpace;
3570  /* plus the per-hash-entry overhead */
3571  hashentrysize += hash_agg_entry_size(agg_costs->numAggs);
3572 
3573  /*
3574  * Note that this disregards the effect of fill-factor and growth policy
3575  * of the hash-table. That's probably ok, given default the default
3576  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3577  * "double-in-size" growth policies here.
3578  */
3579  return hashentrysize * dNumGroups;
3580 }
PathTarget * pathtarget
Definition: relation.h:1043
#define SizeofMinimalTupleHeader
Definition: htup_details.h:655
size_t Size
Definition: c.h:404
Size hash_agg_entry_size(int numAggs)
Definition: nodeAgg.c:2039
#define MAXALIGN(LEN)
Definition: c.h:623
int width
Definition: relation.h:975
Size transitionSpace
Definition: relation.h:64

◆ expression_planner()

Expr* expression_planner ( Expr expr)

Definition at line 6029 of file planner.c.

References eval_const_expressions(), and fix_opfuncids().

Referenced by ATExecAddColumn(), ATPrepAlterColumnType(), BeginCopyFrom(), CheckMutability(), ComputePartitionAttrs(), ExecPrepareCheck(), ExecPrepareExpr(), ExecPrepareQual(), get_cast_hashentry(), load_domaintype_info(), slot_fill_defaults(), and transformPartitionBoundValue().

6030 {
6031  Node *result;
6032 
6033  /*
6034  * Convert named-argument function calls, insert default arguments and
6035  * simplify constant subexprs
6036  */
6037  result = eval_const_expressions(NULL, (Node *) expr);
6038 
6039  /* Fill in opfuncid values if missing */
6040  fix_opfuncids(result);
6041 
6042  return (Expr *) result;
6043 }
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1582
Definition: nodes.h:510
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2459

◆ extract_rollup_sets()

static List * extract_rollup_sets ( List groupingSets)
static

Definition at line 3093 of file planner.c.

References Assert, BipartiteMatch(), BipartiteMatchFree(), bms_add_member(), bms_equal(), bms_free(), bms_is_subset(), for_each_cell, i, lappend(), lcons(), lfirst, lfirst_int, list_concat(), list_head(), list_length(), list_make1, lnext, NIL, BipartiteMatchState::pair_uv, BipartiteMatchState::pair_vu, palloc(), palloc0(), and pfree().

Referenced by preprocess_grouping_sets().

3094 {
3095  int num_sets_raw = list_length(groupingSets);
3096  int num_empty = 0;
3097  int num_sets = 0; /* distinct sets */
3098  int num_chains = 0;
3099  List *result = NIL;
3100  List **results;
3101  List **orig_sets;
3102  Bitmapset **set_masks;
3103  int *chains;
3104  short **adjacency;
3105  short *adjacency_buf;
3107  int i;
3108  int j;
3109  int j_size;
3110  ListCell *lc1 = list_head(groupingSets);
3111  ListCell *lc;
3112 
3113  /*
3114  * Start by stripping out empty sets. The algorithm doesn't require this,
3115  * but the planner currently needs all empty sets to be returned in the
3116  * first list, so we strip them here and add them back after.
3117  */
3118  while (lc1 && lfirst(lc1) == NIL)
3119  {
3120  ++num_empty;
3121  lc1 = lnext(lc1);
3122  }
3123 
3124  /* bail out now if it turns out that all we had were empty sets. */
3125  if (!lc1)
3126  return list_make1(groupingSets);
3127 
3128  /*----------
3129  * We don't strictly need to remove duplicate sets here, but if we don't,
3130  * they tend to become scattered through the result, which is a bit
3131  * confusing (and irritating if we ever decide to optimize them out).
3132  * So we remove them here and add them back after.
3133  *
3134  * For each non-duplicate set, we fill in the following:
3135  *
3136  * orig_sets[i] = list of the original set lists
3137  * set_masks[i] = bitmapset for testing inclusion
3138  * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3139  *
3140  * chains[i] will be the result group this set is assigned to.
3141  *
3142  * We index all of these from 1 rather than 0 because it is convenient
3143  * to leave 0 free for the NIL node in the graph algorithm.
3144  *----------
3145  */
3146  orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3147  set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3148  adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3149  adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3150 
3151  j_size = 0;
3152  j = 0;
3153  i = 1;
3154 
3155  for_each_cell(lc, lc1)
3156  {
3157  List *candidate = (List *) lfirst(lc);
3158  Bitmapset *candidate_set = NULL;
3159  ListCell *lc2;
3160  int dup_of = 0;
3161 
3162  foreach(lc2, candidate)
3163  {
3164  candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3165  }
3166 
3167  /* we can only be a dup if we're the same length as a previous set */
3168  if (j_size == list_length(candidate))
3169  {
3170  int k;
3171 
3172  for (k = j; k < i; ++k)
3173  {
3174  if (bms_equal(set_masks[k], candidate_set))
3175  {
3176  dup_of = k;
3177  break;
3178  }
3179  }
3180  }
3181  else if (j_size < list_length(candidate))
3182  {
3183  j_size = list_length(candidate);
3184  j = i;
3185  }
3186 
3187  if (dup_of > 0)
3188  {
3189  orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3190  bms_free(candidate_set);
3191  }
3192  else
3193  {
3194  int k;
3195  int n_adj = 0;
3196 
3197  orig_sets[i] = list_make1(candidate);
3198  set_masks[i] = candidate_set;
3199 
3200  /* fill in adjacency list; no need to compare equal-size sets */
3201 
3202  for (k = j - 1; k > 0; --k)
3203  {
3204  if (bms_is_subset(set_masks[k], candidate_set))
3205  adjacency_buf[++n_adj] = k;
3206  }
3207 
3208  if (n_adj > 0)
3209  {
3210  adjacency_buf[0] = n_adj;
3211  adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3212  memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3213  }
3214  else
3215  adjacency[i] = NULL;
3216 
3217  ++i;
3218  }
3219  }
3220 
3221  num_sets = i - 1;
3222 
3223  /*
3224  * Apply the graph matching algorithm to do the work.
3225  */
3226  state = BipartiteMatch(num_sets, num_sets, adjacency);
3227 
3228  /*
3229  * Now, the state->pair* fields have the info we need to assign sets to
3230  * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3231  * pair_vu[v] = u (both will be true, but we check both so that we can do
3232  * it in one pass)
3233  */
3234  chains = palloc0((num_sets + 1) * sizeof(int));
3235 
3236  for (i = 1; i <= num_sets; ++i)
3237  {
3238  int u = state->pair_vu[i];
3239  int v = state->pair_uv[i];
3240 
3241  if (u > 0 && u < i)
3242  chains[i] = chains[u];
3243  else if (v > 0 && v < i)
3244  chains[i] = chains[v];
3245  else
3246  chains[i] = ++num_chains;
3247  }
3248 
3249  /* build result lists. */
3250  results = palloc0((num_chains + 1) * sizeof(List *));
3251 
3252  for (i = 1; i <= num_sets; ++i)
3253  {
3254  int c = chains[i];
3255 
3256  Assert(c > 0);
3257 
3258  results[c] = list_concat(results[c], orig_sets[i]);
3259  }
3260 
3261  /* push any empty sets back on the first list. */
3262  while (num_empty-- > 0)
3263  results[1] = lcons(NIL, results[1]);
3264 
3265  /* make result list */
3266  for (i = 1; i <= num_chains; ++i)
3267  result = lappend(result, results[i]);
3268 
3269  /*
3270  * Free all the things.
3271  *
3272  * (This is over-fussy for small sets but for large sets we could have
3273  * tied up a nontrivial amount of memory.)
3274  */
3275  BipartiteMatchFree(state);
3276  pfree(results);
3277  pfree(chains);
3278  for (i = 1; i <= num_sets; ++i)
3279  if (adjacency[i])
3280  pfree(adjacency[i]);
3281  pfree(adjacency);
3282  pfree(adjacency_buf);
3283  pfree(orig_sets);
3284  for (i = 1; i <= num_sets; ++i)
3285  bms_free(set_masks[i]);
3286  pfree(set_masks);
3287 
3288  return result;
3289 }
#define NIL
Definition: pg_list.h:69
List * list_concat(List *list1, List *list2)
Definition: list.c:321
void BipartiteMatchFree(BipartiteMatchState *state)
#define list_make1(x1)
Definition: pg_list.h:139
void pfree(void *pointer)
Definition: mcxt.c:949
#define lfirst_int(lc)
Definition: pg_list.h:107
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:308
char * c
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
#define lnext(lc)
Definition: pg_list.h:105
List * lappend(List *list, void *datum)
Definition: list.c:128
void * palloc0(Size size)
Definition: mcxt.c:877
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
List * lcons(void *datum, List *list)
Definition: list.c:259
void bms_free(Bitmapset *a)
Definition: bitmapset.c:201
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
Definition: regguts.h:298
static int list_length(const List *l)
Definition: pg_list.h:89
#define for_each_cell(cell, initcell)
Definition: pg_list.h:169
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:698
void * palloc(Size size)
Definition: mcxt.c:848
int i
Definition: pg_list.h:45
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:131

◆ get_cheapest_fractional_path()

Path* get_cheapest_fractional_path ( RelOptInfo rel,
double  tuple_fraction 
)

Definition at line 5876 of file planner.c.

References RelOptInfo::cheapest_total_path, compare_fractional_path_costs(), lfirst, RelOptInfo::pathlist, and Path::rows.

Referenced by make_subplan(), recurse_set_operations(), and standard_planner().

5877 {
5878  Path *best_path = rel->cheapest_total_path;
5879  ListCell *l;
5880 
5881  /* If all tuples will be retrieved, just return the cheapest-total path */
5882  if (tuple_fraction <= 0.0)
5883  return best_path;
5884 
5885  /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
5886  if (tuple_fraction >= 1.0 && best_path->rows > 0)
5887  tuple_fraction /= best_path->rows;
5888 
5889  foreach(l, rel->pathlist)
5890  {
5891  Path *path = (Path *) lfirst(l);
5892 
5893  if (path == rel->cheapest_total_path ||
5894  compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
5895  continue;
5896 
5897  best_path = path;
5898  }
5899 
5900  return best_path;
5901 }
struct Path * cheapest_total_path
Definition: relation.h:603
#define lfirst(lc)
Definition: pg_list.h:106
double rows
Definition: relation.h:1052
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:115
List * pathlist
Definition: relation.h:599

◆ get_number_of_groups()

static double get_number_of_groups ( PlannerInfo root,
double  path_rows,
grouping_sets_data gd 
)
static

Definition at line 3447 of file planner.c.

References Assert, grouping_sets_data::dNumHashGroups, estimate_num_groups(), forboth, get_sortgrouplist_exprs(), Query::groupClause, RollupData::groupClause, Query::groupingSets, RollupData::gsets, RollupData::gsets_data, Query::hasAggs, grouping_sets_data::hash_sets_idx, PlannerInfo::hasHavingQual, lfirst, lfirst_node, list_length(), GroupingSetData::numGroups, RollupData::numGroups, parse(), PlannerInfo::parse, grouping_sets_data::rollups, Query::targetList, and grouping_sets_data::unsortable_sets.

Referenced by create_grouping_paths().

3450 {
3451  Query *parse = root->parse;
3452  double dNumGroups;
3453 
3454  if (parse->groupClause)
3455  {
3456  List *groupExprs;
3457 
3458  if (parse->groupingSets)
3459  {
3460  /* Add up the estimates for each grouping set */
3461  ListCell *lc;
3462  ListCell *lc2;
3463 
3464  Assert(gd); /* keep Coverity happy */
3465 
3466  dNumGroups = 0;
3467 
3468  foreach(lc, gd->rollups)
3469  {
3470  RollupData *rollup = lfirst_node(RollupData, lc);
3471  ListCell *lc;
3472 
3473  groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3474  parse->targetList);
3475 
3476  rollup->numGroups = 0.0;
3477 
3478  forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
3479  {
3480  List *gset = (List *) lfirst(lc);
3482  double numGroups = estimate_num_groups(root,
3483  groupExprs,
3484  path_rows,
3485  &gset);
3486 
3487  gs->numGroups = numGroups;
3488  rollup->numGroups += numGroups;
3489  }
3490 
3491  dNumGroups += rollup->numGroups;
3492  }
3493 
3494  if (gd->hash_sets_idx)
3495  {
3496  ListCell *lc;
3497 
3498  gd->dNumHashGroups = 0;
3499 
3500  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3501  parse->targetList);
3502 
3503  forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3504  {
3505  List *gset = (List *) lfirst(lc);
3507  double numGroups = estimate_num_groups(root,
3508  groupExprs,
3509  path_rows,
3510  &gset);
3511 
3512  gs->numGroups = numGroups;
3513  gd->dNumHashGroups += numGroups;
3514  }
3515 
3516  dNumGroups += gd->dNumHashGroups;
3517  }
3518  }
3519  else
3520  {
3521  /* Plain GROUP BY */
3522  groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3523  parse->targetList);
3524 
3525  dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3526  NULL);
3527  }
3528  }
3529  else if (parse->groupingSets)
3530  {
3531  /* Empty grouping sets ... one result row for each one */
3532  dNumGroups = list_length(parse->groupingSets);
3533  }
3534  else if (parse->hasAggs || root->hasHavingQual)
3535  {
3536  /* Plain aggregation, one result row */
3537  dNumGroups = 1;
3538  }
3539  else
3540  {
3541  /* Not grouping */
3542  dNumGroups = 1;
3543  }
3544 
3545  return dNumGroups;
3546 }
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset)
Definition: selfuncs.c:3360
Query * parse
Definition: relation.h:155
List * groupClause
Definition: relation.h:1570
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:180
bool hasAggs
Definition: parsenodes.h:123
List * hash_sets_idx
Definition: planner.c:103
List * groupingSets
Definition: parsenodes.h:148
double dNumHashGroups
Definition: planner.c:104
double numGroups
Definition: relation.h:1573
List * targetList
Definition: parsenodes.h:138
#define lfirst_node(type, lc)
Definition: pg_list.h:109
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
static int list_length(const List *l)
Definition: pg_list.h:89
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:395
List * unsortable_sets
Definition: planner.c:108
List * groupClause
Definition: parsenodes.h:146
double numGroups
Definition: relation.h:1564
bool hasHavingQual
Definition: relation.h:305
Definition: pg_list.h:45
List * gsets_data
Definition: relation.h:1572
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
List * gsets
Definition: relation.h:1571

◆ get_partitioned_child_rels()

List* get_partitioned_child_rels ( PlannerInfo root,
Index  rti 
)

Definition at line 6167 of file planner.c.

References PartitionedChildRelInfo::child_rels, lfirst_node, NIL, PartitionedChildRelInfo::parent_relid, and PlannerInfo::pcinfo_list.

Referenced by add_paths_to_append_rel(), and inheritance_planner().

6168 {
6169  List *result = NIL;
6170  ListCell *l;
6171 
6172  foreach(l, root->pcinfo_list)
6173  {
6175 
6176  if (pc->parent_relid == rti)
6177  {
6178  result = pc->child_rels;
6179  break;
6180  }
6181  }
6182 
6183  return result;
6184 }
#define NIL
Definition: pg_list.h:69
#define lfirst_node(type, lc)
Definition: pg_list.h:109
List * pcinfo_list
Definition: relation.h:254
Definition: pg_list.h:45

◆ get_partitioned_child_rels_for_join()

List* get_partitioned_child_rels_for_join ( PlannerInfo root,
Relids  join_relids 
)

Definition at line 6192 of file planner.c.

References bms_is_member(), PartitionedChildRelInfo::child_rels, lfirst, list_concat(), list_copy(), NIL, PartitionedChildRelInfo::parent_relid, and PlannerInfo::pcinfo_list.

Referenced by add_paths_to_append_rel().

6193 {
6194  List *result = NIL;
6195  ListCell *l;
6196 
6197  foreach(l, root->pcinfo_list)
6198  {
6200 
6201  if (bms_is_member(pc->parent_relid, join_relids))
6202  result = list_concat(result, list_copy(pc->child_rels));
6203  }
6204 
6205  return result;
6206 }
#define NIL
Definition: pg_list.h:69
List * list_copy(const List *oldlist)
Definition: list.c:1160
List * list_concat(List *list1, List *list2)
Definition: list.c:321
#define lfirst(lc)
Definition: pg_list.h:106
List * pcinfo_list
Definition: relation.h:254
Definition: pg_list.h:45
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420

◆ grouping_planner()

static void grouping_planner ( PlannerInfo root,
bool  inheritance_update,
double  tuple_fraction 
)
static

Definition at line 1554 of file planner.c.

References standard_qp_extra::activeWindows, add_path(), adjust_paths_for_srfs(), AGGSPLIT_SIMPLE, apply_projection_to_path(), Assert, Query::canSetTag, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, CMD_SELECT, Query::commandType, RelOptInfo::consider_parallel, copyObject, create_distinct_paths(), create_grouping_paths(), create_limit_path(), create_lockrows_path(), create_modifytable_path(), create_ordered_paths(), create_pathtarget, create_projection_path(), create_upper_paths_hook, create_window_paths(), Query::distinctClause, ereport, errcode(), errmsg(), ERROR, PathTarget::exprs, RelOptInfo::fdwroutine, fetch_upper_rel(), find_window_functions(), get_agg_clause_costs(), FdwRoutine::GetForeignUpperPaths, standard_qp_extra::groupClause, Query::groupClause, Query::groupingSets, Query::hasAggs, PlannerInfo::hasHavingQual, PlannerInfo::hasRecursion, Query::hasTargetSRFs, Query::hasWindowFuncs, Query::havingQual, is_parallel_safe(), LCS_asString(), lfirst, limit_needed(), PlannerInfo::limit_tuples, Query::limitCount, Query::limitOffset, linitial_int, linitial_node, list_length(), list_make1, list_make1_int, make_group_input_target(), make_pathkeys_for_sortclauses(), make_sort_input_target(), make_window_input_target(), MemSet, NIL, WindowFuncLists::numWindowFuncs, Query::onConflict, OnConflictExpr::onConflictSet, Path::param_info, parse(), PlannerInfo::parse, RelOptInfo::partial_pathlist, RelOptInfo::pathlist, Path::pathtarget, plan_set_operations(), postprocess_setop_tlist(), preprocess_groupclause(), preprocess_grouping_sets(), preprocess_limit(), preprocess_minmax_aggregates(), preprocess_onconflict_targetlist(), preprocess_targetlist(), PlannerInfo::processed_tlist, query_planner(), Query::resultRelation, Query::returningList, grouping_sets_data::rollups, Query::rowMarks, PlannerInfo::rowMarks, Query::rtable, select_active_windows(), RelOptInfo::serverid, Query::setOperations, PlannerInfo::sort_pathkeys, Query::sortClause, split_pathtarget_at_srfs(), SS_assign_special_param(), standard_qp_callback(), subpath(), Query::targetList, standard_qp_extra::tlist, PlannerInfo::tuple_fraction, PlannerInfo::upper_targets, UPPERREL_FINAL, UPPERREL_GROUP_AGG, UPPERREL_WINDOW, RelOptInfo::userid, RelOptInfo::useridiscurrent, Query::windowClause, and Query::withCheckOptions.

Referenced by inheritance_planner(), and subquery_planner().

1556 {
1557  Query *parse = root->parse;
1558  List *tlist = parse->targetList;
1559  int64 offset_est = 0;
1560  int64 count_est = 0;
1561  double limit_tuples = -1.0;
1562  bool have_postponed_srfs = false;
1563  PathTarget *final_target;
1564  List *final_targets;
1565  List *final_targets_contain_srfs;
1566  RelOptInfo *current_rel;
1567  RelOptInfo *final_rel;
1568  ListCell *lc;
1569 
1570  /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1571  if (parse->limitCount || parse->limitOffset)
1572  {
1573  tuple_fraction = preprocess_limit(root, tuple_fraction,
1574  &offset_est, &count_est);
1575 
1576  /*
1577  * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1578  * estimate the effects of using a bounded sort.
1579  */
1580  if (count_est > 0 && offset_est >= 0)
1581  limit_tuples = (double) count_est + (double) offset_est;
1582  }
1583 
1584  /* Make tuple_fraction accessible to lower-level routines */
1585  root->tuple_fraction = tuple_fraction;
1586 
1587  if (parse->setOperations)
1588  {
1589  /*
1590  * If there's a top-level ORDER BY, assume we have to fetch all the
1591  * tuples. This might be too simplistic given all the hackery below
1592  * to possibly avoid the sort; but the odds of accurate estimates here
1593  * are pretty low anyway. XXX try to get rid of this in favor of
1594  * letting plan_set_operations generate both fast-start and
1595  * cheapest-total paths.
1596  */
1597  if (parse->sortClause)
1598  root->tuple_fraction = 0.0;
1599 
1600  /*
1601  * Construct Paths for set operations. The results will not need any
1602  * work except perhaps a top-level sort and/or LIMIT. Note that any
1603  * special work for recursive unions is the responsibility of
1604  * plan_set_operations.
1605  */
1606  current_rel = plan_set_operations(root);
1607 
1608  /*
1609  * We should not need to call preprocess_targetlist, since we must be
1610  * in a SELECT query node. Instead, use the targetlist returned by
1611  * plan_set_operations (since this tells whether it returned any
1612  * resjunk columns!), and transfer any sort key information from the
1613  * original tlist.
1614  */
1615  Assert(parse->commandType == CMD_SELECT);
1616 
1617  tlist = root->processed_tlist; /* from plan_set_operations */
1618 
1619  /* for safety, copy processed_tlist instead of modifying in-place */
1620  tlist = postprocess_setop_tlist(copyObject(tlist), parse->targetList);
1621 
1622  /* Save aside the final decorated tlist */
1623  root->processed_tlist = tlist;
1624 
1625  /* Also extract the PathTarget form of the setop result tlist */
1626  final_target = current_rel->cheapest_total_path->pathtarget;
1627 
1628  /* The setop result tlist couldn't contain any SRFs */
1629  Assert(!parse->hasTargetSRFs);
1630  final_targets = final_targets_contain_srfs = NIL;
1631 
1632  /*
1633  * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1634  * checked already, but let's make sure).
1635  */
1636  if (parse->rowMarks)
1637  ereport(ERROR,
1638  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1639  /*------
1640  translator: %s is a SQL row locking clause such as FOR UPDATE */
1641  errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1643  parse->rowMarks)->strength))));
1644 
1645  /*
1646  * Calculate pathkeys that represent result ordering requirements
1647  */
1648  Assert(parse->distinctClause == NIL);
1650  parse->sortClause,
1651  tlist);
1652  }
1653  else
1654  {
1655  /* No set operations, do regular planning */
1656  PathTarget *sort_input_target;
1657  List *sort_input_targets;
1658  List *sort_input_targets_contain_srfs;
1659  PathTarget *grouping_target;
1660  List *grouping_targets;
1661  List *grouping_targets_contain_srfs;
1662  PathTarget *scanjoin_target;
1663  List *scanjoin_targets;
1664  List *scanjoin_targets_contain_srfs;
1665  bool have_grouping;
1666  AggClauseCosts agg_costs;
1667  WindowFuncLists *wflists = NULL;
1668  List *activeWindows = NIL;
1669  grouping_sets_data *gset_data = NULL;
1670  standard_qp_extra qp_extra;
1671 
1672  /* A recursive query should always have setOperations */
1673  Assert(!root->hasRecursion);
1674 
1675  /* Preprocess grouping sets and GROUP BY clause, if any */
1676  if (parse->groupingSets)
1677  {
1678  gset_data = preprocess_grouping_sets(root);
1679  }
1680  else
1681  {
1682  /* Preprocess regular GROUP BY clause, if any */
1683  if (parse->groupClause)
1684  parse->groupClause = preprocess_groupclause(root, NIL);
1685  }
1686 
1687  /* Preprocess targetlist */
1688  tlist = preprocess_targetlist(root, tlist);
1689 
1690  if (parse->onConflict)
1691  parse->onConflict->onConflictSet =
1693  parse->resultRelation,
1694  parse->rtable);
1695 
1696  /*
1697  * We are now done hacking up the query's targetlist. Most of the
1698  * remaining planning work will be done with the PathTarget
1699  * representation of tlists, but save aside the full representation so
1700  * that we can transfer its decoration (resnames etc) to the topmost
1701  * tlist of the finished Plan.
1702  */
1703  root->processed_tlist = tlist;
1704 
1705  /*
1706  * Collect statistics about aggregates for estimating costs, and mark
1707  * all the aggregates with resolved aggtranstypes. We must do this
1708  * before slicing and dicing the tlist into various pathtargets, else
1709  * some copies of the Aggref nodes might escape being marked with the
1710  * correct transtypes.
1711  *
1712  * Note: currently, we do not detect duplicate aggregates here. This
1713  * may result in somewhat-overestimated cost, which is fine for our
1714  * purposes since all Paths will get charged the same. But at some
1715  * point we might wish to do that detection in the planner, rather
1716  * than during executor startup.
1717  */
1718  MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
1719  if (parse->hasAggs)
1720  {
1721  get_agg_clause_costs(root, (Node *) tlist, AGGSPLIT_SIMPLE,
1722  &agg_costs);
1724  &agg_costs);
1725  }
1726 
1727  /*
1728  * Locate any window functions in the tlist. (We don't need to look
1729  * anywhere else, since expressions used in ORDER BY will be in there
1730  * too.) Note that they could all have been eliminated by constant
1731  * folding, in which case we don't need to do any more work.
1732  */
1733  if (parse->hasWindowFuncs)
1734  {
1735  wflists = find_window_functions((Node *) tlist,
1736  list_length(parse->windowClause));
1737  if (wflists->numWindowFuncs > 0)
1738  activeWindows = select_active_windows(root, wflists);
1739  else
1740  parse->hasWindowFuncs = false;
1741  }
1742 
1743  /*
1744  * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1745  * adding logic between here and the query_planner() call. Anything
1746  * that is needed in MIN/MAX-optimizable cases will have to be
1747  * duplicated in planagg.c.
1748  */
1749  if (parse->hasAggs)
1750  preprocess_minmax_aggregates(root, tlist);
1751 
1752  /*
1753  * Figure out whether there's a hard limit on the number of rows that
1754  * query_planner's result subplan needs to return. Even if we know a
1755  * hard limit overall, it doesn't apply if the query has any
1756  * grouping/aggregation operations, or SRFs in the tlist.
1757  */
1758  if (parse->groupClause ||
1759  parse->groupingSets ||
1760  parse->distinctClause ||
1761  parse->hasAggs ||
1762  parse->hasWindowFuncs ||
1763  parse->hasTargetSRFs ||
1764  root->hasHavingQual)
1765  root->limit_tuples = -1.0;
1766  else
1767  root->limit_tuples = limit_tuples;
1768 
1769  /* Set up data needed by standard_qp_callback */
1770  qp_extra.tlist = tlist;
1771  qp_extra.activeWindows = activeWindows;
1772  qp_extra.groupClause = (gset_data
1773  ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL)
1774  : parse->groupClause);
1775 
1776  /*
1777  * Generate the best unsorted and presorted paths for the scan/join
1778  * portion of this Query, ie the processing represented by the
1779  * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1780  * We also generate (in standard_qp_callback) pathkey representations
1781  * of the query's sort clause, distinct clause, etc.
1782  */
1783  current_rel = query_planner(root, tlist,
1784  standard_qp_callback, &qp_extra);
1785 
1786  /*
1787  * Convert the query's result tlist into PathTarget format.
1788  *
1789  * Note: it's desirable to not do this till after query_planner(),
1790  * because the target width estimates can use per-Var width numbers
1791  * that were obtained within query_planner().
1792  */
1793  final_target = create_pathtarget(root, tlist);
1794 
1795  /*
1796  * If ORDER BY was given, consider whether we should use a post-sort
1797  * projection, and compute the adjusted target for preceding steps if
1798  * so.
1799  */
1800  if (parse->sortClause)
1801  sort_input_target = make_sort_input_target(root,
1802  final_target,
1803  &have_postponed_srfs);
1804  else
1805  sort_input_target = final_target;
1806 
1807  /*
1808  * If we have window functions to deal with, the output from any
1809  * grouping step needs to be what the window functions want;
1810  * otherwise, it should be sort_input_target.
1811  */
1812  if (activeWindows)
1813  grouping_target = make_window_input_target(root,
1814  final_target,
1815  activeWindows);
1816  else
1817  grouping_target = sort_input_target;
1818 
1819  /*
1820  * If we have grouping or aggregation to do, the topmost scan/join
1821  * plan node must emit what the grouping step wants; otherwise, it
1822  * should emit grouping_target.
1823  */
1824  have_grouping = (parse->groupClause || parse->groupingSets ||
1825  parse->hasAggs || root->hasHavingQual);
1826  if (have_grouping)
1827  scanjoin_target = make_group_input_target(root, final_target);
1828  else
1829  scanjoin_target = grouping_target;
1830 
1831  /*
1832  * If there are any SRFs in the targetlist, we must separate each of
1833  * these PathTargets into SRF-computing and SRF-free targets. Replace
1834  * each of the named targets with a SRF-free version, and remember the
1835  * list of additional projection steps we need to add afterwards.
1836  */
1837  if (parse->hasTargetSRFs)
1838  {
1839  /* final_target doesn't recompute any SRFs in sort_input_target */
1840  split_pathtarget_at_srfs(root, final_target, sort_input_target,
1841  &final_targets,
1842  &final_targets_contain_srfs);
1843  final_target = linitial_node(PathTarget, final_targets);
1844  Assert(!linitial_int(final_targets_contain_srfs));
1845  /* likewise for sort_input_target vs. grouping_target */
1846  split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1847  &sort_input_targets,
1848  &sort_input_targets_contain_srfs);
1849  sort_input_target = linitial_node(PathTarget, sort_input_targets);
1850  Assert(!linitial_int(sort_input_targets_contain_srfs));
1851  /* likewise for grouping_target vs. scanjoin_target */
1852  split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1853  &grouping_targets,
1854  &grouping_targets_contain_srfs);
1855  grouping_target = linitial_node(PathTarget, grouping_targets);
1856  Assert(!linitial_int(grouping_targets_contain_srfs));
1857  /* scanjoin_target will not have any SRFs precomputed for it */
1858  split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1859  &scanjoin_targets,
1860  &scanjoin_targets_contain_srfs);
1861  scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1862  Assert(!linitial_int(scanjoin_targets_contain_srfs));
1863  }
1864  else
1865  {
1866  /* initialize lists, just to keep compiler quiet */
1867  final_targets = final_targets_contain_srfs = NIL;
1868  sort_input_targets = sort_input_targets_contain_srfs = NIL;
1869  grouping_targets = grouping_targets_contain_srfs = NIL;
1870  scanjoin_targets = scanjoin_targets_contain_srfs = NIL;
1871  }
1872 
1873  /*
1874  * Forcibly apply SRF-free scan/join target to all the Paths for the
1875  * scan/join rel.
1876  *
1877  * In principle we should re-run set_cheapest() here to identify the
1878  * cheapest path, but it seems unlikely that adding the same tlist
1879  * eval costs to all the paths would change that, so we don't bother.
1880  * Instead, just assume that the cheapest-startup and cheapest-total
1881  * paths remain so. (There should be no parameterized paths anymore,
1882  * so we needn't worry about updating cheapest_parameterized_paths.)
1883  */
1884  foreach(lc, current_rel->pathlist)
1885  {
1886  Path *subpath = (Path *) lfirst(lc);
1887  Path *path;
1888 
1889  Assert(subpath->param_info == NULL);
1890  path = apply_projection_to_path(root, current_rel,
1891  subpath, scanjoin_target);
1892  /* If we had to add a Result, path is different from subpath */
1893  if (path != subpath)
1894  {
1895  lfirst(lc) = path;
1896  if (subpath == current_rel->cheapest_startup_path)
1897  current_rel->cheapest_startup_path = path;
1898  if (subpath == current_rel->cheapest_total_path)
1899  current_rel->cheapest_total_path = path;
1900  }
1901  }
1902 
1903  /*
1904  * Upper planning steps which make use of the top scan/join rel's
1905  * partial pathlist will expect partial paths for that rel to produce
1906  * the same output as complete paths ... and we just changed the
1907  * output for the complete paths, so we'll need to do the same thing
1908  * for partial paths. But only parallel-safe expressions can be
1909  * computed by partial paths.
1910  */
1911  if (current_rel->partial_pathlist &&
1912  is_parallel_safe(root, (Node *) scanjoin_target->exprs))
1913  {
1914  /* Apply the scan/join target to each partial path */
1915  foreach(lc, current_rel->partial_pathlist)
1916  {
1917  Path *subpath = (Path *) lfirst(lc);
1918  Path *newpath;
1919 
1920  /* Shouldn't have any parameterized paths anymore */
1921  Assert(subpath->param_info == NULL);
1922 
1923  /*
1924  * Don't use apply_projection_to_path() here, because there
1925  * could be other pointers to these paths, and therefore we
1926  * mustn't modify them in place.
1927  */
1928  newpath = (Path *) create_projection_path(root,
1929  current_rel,
1930  subpath,
1931  scanjoin_target);
1932  lfirst(lc) = newpath;
1933  }
1934  }
1935  else
1936  {
1937  /*
1938  * In the unfortunate event that scanjoin_target is not
1939  * parallel-safe, we can't apply it to the partial paths; in that
1940  * case, we'll need to forget about the partial paths, which
1941  * aren't valid input for upper planning steps.
1942  */
1943  current_rel->partial_pathlist = NIL;
1944  }
1945 
1946  /* Now fix things up if scan/join target contains SRFs */
1947  if (parse->hasTargetSRFs)
1948  adjust_paths_for_srfs(root, current_rel,
1949  scanjoin_targets,
1950  scanjoin_targets_contain_srfs);
1951 
1952  /*
1953  * Save the various upper-rel PathTargets we just computed into
1954  * root->upper_targets[]. The core code doesn't use this, but it
1955  * provides a convenient place for extensions to get at the info. For
1956  * consistency, we save all the intermediate targets, even though some
1957  * of the corresponding upperrels might not be needed for this query.
1958  */
1959  root->upper_targets[UPPERREL_FINAL] = final_target;
1960  root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1961  root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1962 
1963  /*
1964  * If we have grouping and/or aggregation, consider ways to implement
1965  * that. We build a new upperrel representing the output of this
1966  * phase.
1967  */
1968  if (have_grouping)
1969  {
1970  current_rel = create_grouping_paths(root,
1971  current_rel,
1972  grouping_target,
1973  &agg_costs,
1974  gset_data);
1975  /* Fix things up if grouping_target contains SRFs */
1976  if (parse->hasTargetSRFs)
1977  adjust_paths_for_srfs(root, current_rel,
1978  grouping_targets,
1979  grouping_targets_contain_srfs);
1980  }
1981 
1982  /*
1983  * If we have window functions, consider ways to implement those. We
1984  * build a new upperrel representing the output of this phase.
1985  */
1986  if (activeWindows)
1987  {
1988  current_rel = create_window_paths(root,
1989  current_rel,
1990  grouping_target,
1991  sort_input_target,
1992  tlist,
1993  wflists,
1994  activeWindows);
1995  /* Fix things up if sort_input_target contains SRFs */
1996  if (parse->hasTargetSRFs)
1997  adjust_paths_for_srfs(root, current_rel,
1998  sort_input_targets,
1999  sort_input_targets_contain_srfs);
2000  }
2001 
2002  /*
2003  * If there is a DISTINCT clause, consider ways to implement that. We
2004  * build a new upperrel representing the output of this phase.
2005  */
2006  if (parse->distinctClause)
2007  {
2008  current_rel = create_distinct_paths(root,
2009  current_rel);
2010  }
2011  } /* end of if (setOperations) */
2012 
2013  /*
2014  * If ORDER BY was given, consider ways to implement that, and generate a
2015  * new upperrel containing only paths that emit the correct ordering and
2016  * project the correct final_target. We can apply the original
2017  * limit_tuples limit in sort costing here, but only if there are no
2018  * postponed SRFs.
2019  */
2020  if (parse->sortClause)
2021  {
2022  current_rel = create_ordered_paths(root,
2023  current_rel,
2024  final_target,
2025  have_postponed_srfs ? -1.0 :
2026  limit_tuples);
2027  /* Fix things up if final_target contains SRFs */
2028  if (parse->hasTargetSRFs)
2029  adjust_paths_for_srfs(root, current_rel,
2030  final_targets,
2031  final_targets_contain_srfs);
2032  }
2033 
2034  /*
2035  * Now we are prepared to build the final-output upperrel.
2036  */
2037  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
2038 
2039  /*
2040  * If the input rel is marked consider_parallel and there's nothing that's
2041  * not parallel-safe in the LIMIT clause, then the final_rel can be marked
2042  * consider_parallel as well. Note that if the query has rowMarks or is
2043  * not a SELECT, consider_parallel will be false for every relation in the
2044  * query.
2045  */
2046  if (current_rel->consider_parallel &&
2047  is_parallel_safe(root, parse->limitOffset) &&
2048  is_parallel_safe(root, parse->limitCount))
2049  final_rel->consider_parallel = true;
2050 
2051  /*
2052  * If the current_rel belongs to a single FDW, so does the final_rel.
2053  */
2054  final_rel->serverid = current_rel->serverid;
2055  final_rel->userid = current_rel->userid;
2056  final_rel->useridiscurrent = current_rel->useridiscurrent;
2057  final_rel->fdwroutine = current_rel->fdwroutine;
2058 
2059  /*
2060  * Generate paths for the final_rel. Insert all surviving paths, with
2061  * LockRows, Limit, and/or ModifyTable steps added if needed.
2062  */
2063  foreach(lc, current_rel->pathlist)
2064  {
2065  Path *path = (Path *) lfirst(lc);
2066 
2067  /*
2068  * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
2069  * (Note: we intentionally test parse->rowMarks not root->rowMarks
2070  * here. If there are only non-locking rowmarks, they should be
2071  * handled by the ModifyTable node instead. However, root->rowMarks
2072  * is what goes into the LockRows node.)
2073  */
2074  if (parse->rowMarks)
2075  {
2076  path = (Path *) create_lockrows_path(root, final_rel, path,
2077  root->rowMarks,
2078  SS_assign_special_param(root));
2079  }
2080 
2081  /*
2082  * If there is a LIMIT/OFFSET clause, add the LIMIT node.
2083  */
2084  if (limit_needed(parse))
2085  {
2086  path = (Path *) create_limit_path(root, final_rel, path,
2087  parse->limitOffset,
2088  parse->limitCount,
2089  offset_est, count_est);
2090  }
2091 
2092  /*
2093  * If this is an INSERT/UPDATE/DELETE, and we're not being called from
2094  * inheritance_planner, add the ModifyTable node.
2095  */
2096  if (parse->commandType != CMD_SELECT && !inheritance_update)
2097  {
2098  List *withCheckOptionLists;
2099  List *returningLists;
2100  List *rowMarks;
2101 
2102  /*
2103  * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
2104  * needed.
2105  */
2106  if (parse->withCheckOptions)
2107  withCheckOptionLists = list_make1(parse->withCheckOptions);
2108  else
2109  withCheckOptionLists = NIL;
2110 
2111  if (parse->returningList)
2112  returningLists = list_make1(parse->returningList);
2113  else
2114  returningLists = NIL;
2115 
2116  /*
2117  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2118  * will have dealt with fetching non-locked marked rows, else we
2119  * need to have ModifyTable do that.
2120  */
2121  if (parse->rowMarks)
2122  rowMarks = NIL;
2123  else
2124  rowMarks = root->rowMarks;
2125 
2126  path = (Path *)
2127  create_modifytable_path(root, final_rel,
2128  parse->commandType,
2129  parse->canSetTag,
2130  parse->resultRelation,
2131  NIL,
2133  list_make1(path),
2134  list_make1(root),
2135  withCheckOptionLists,
2136  returningLists,
2137  rowMarks,
2138  parse->onConflict,
2139  SS_assign_special_param(root));
2140  }
2141 
2142  /* And shove it into final_rel */
2143  add_path(final_rel, path);
2144  }
2145 
2146  /*
2147  * If there is an FDW that's responsible for all baserels of the query,
2148  * let it consider adding ForeignPaths.
2149  */
2150  if (final_rel->fdwroutine &&
2151  final_rel->fdwroutine->GetForeignUpperPaths)
2153  current_rel, final_rel);
2154 
2155  /* Let extensions possibly add some more paths */
2157  (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2158  current_rel, final_rel);
2159 
2160  /* Note: currently, we leave it to callers to do set_cheapest() */
2161 }
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2384
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:143
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:197
Node * limitOffset
Definition: parsenodes.h:158
#define NIL
Definition: pg_list.h:69
List * rowMarks
Definition: relation.h:256
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2591
PathTarget * pathtarget
Definition: relation.h:1043
Query * parse
Definition: relation.h:155
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:2581
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
List * sortClause
Definition: parsenodes.h:156
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3161
int SS_assign_special_param(PlannerInfo *root)
Definition: subselect.c:429
OnConflictExpr * onConflict
Definition: parsenodes.h:142
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:865
List * preprocess_onconflict_targetlist(List *tlist, int result_relation, List *range_table)
Definition: preptlist.c:206
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2990
RelOptInfo * query_planner(PlannerInfo *root, List *tlist, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
struct Path * cheapest_startup_path
Definition: relation.h:602
Oid userid
Definition: relation.h:633
List * withCheckOptions
Definition: parsenodes.h:169
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:840
void get_agg_clause_costs(PlannerInfo *root, Node *clause, AggSplit aggsplit, AggClauseCosts *costs)
Definition: clauses.c:467
bool hasAggs
Definition: parsenodes.h:123
int resultRelation
Definition: parsenodes.h:120
int numWindowFuncs
Definition: clauses.h:25
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:740
List * preprocess_targetlist(PlannerInfo *root, List *tlist)
Definition: preptlist.c:64
ParamPathInfo * param_info
Definition: relation.h:1045
List * groupingSets
Definition: parsenodes.h:148
Definition: nodes.h:510
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2293
int errcode(int sqlerrcode)
Definition: elog.c:575
List * partial_pathlist
Definition: relation.h:601
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
Definition: planner.c:4766
#define MemSet(start, val, len)
Definition: c.h:853
List * tlist
Definition: planner.c:91
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5137
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:70
bool useridiscurrent
Definition: relation.h:634
void preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
Definition: planagg.c:75
List * rowMarks
Definition: parsenodes.h:161
#define linitial_node(type, l)
Definition: pg_list.h:114
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, int64 offset_est, int64 count_est)
Definition: pathnode.c:3320
bool hasRecursion
Definition: relation.h:308
List * windowClause
Definition: parsenodes.h:152
List * targetList
Definition: parsenodes.h:138
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5360
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:5919
#define list_make1(x1)
Definition: pg_list.h:139
#define linitial_int(l)
Definition: pg_list.h:112
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, const AggClauseCosts *agg_costs, grouping_sets_data *gd)
Definition: planner.c:3603
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:1087
double tuple_fraction
Definition: relation.h:294
List * rtable
Definition: parsenodes.h:135
List * distinctClause
Definition: parsenodes.h:154
#define ERROR
Definition: elog.h:43
static bool limit_needed(Query *parse)
Definition: planner.c:2776
double limit_tuples
Definition: relation.h:295
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
struct Path * cheapest_total_path
Definition: relation.h:603
Node * limitCount
Definition: parsenodes.h:159
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5490
struct FdwRoutine * fdwroutine
Definition: relation.h:636
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3358
#define create_pathtarget(root, tlist)
Definition: tlist.h:69
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2170
List * returningList
Definition: parsenodes.h:144
#define list_make1_int(x1)
Definition: pg_list.h:145
#define ereport(elevel, rest)
Definition: elog.h:122
List * sort_pathkeys
Definition: relation.h:267
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, double limit_tuples)
Definition: planner.c:4977
Oid serverid
Definition: relation.h:632
List * exprs
Definition: relation.h:972
CmdType commandType
Definition: parsenodes.h:110
bool hasTargetSRFs
Definition: parsenodes.h:125
List * groupClause
Definition: planner.c:93
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
bool hasWindowFuncs
Definition: parsenodes.h:124
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam)
Definition: pathnode.c:3220
bool canSetTag
Definition: parsenodes.h:116
static int list_length(const List *l)
Definition: pg_list.h:89
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, List *tlist, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4585
bool consider_parallel
Definition: relation.h:593
List * activeWindows
Definition: planner.c:92
Node * setOperations
Definition: parsenodes.h:163
List * groupClause
Definition: parsenodes.h:146
int errmsg(const char *fmt,...)
Definition: elog.c:797
List * onConflictSet
Definition: primnodes.h:1503
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5393
bool hasHavingQual
Definition: relation.h:305
List * pathlist
Definition: relation.h:599
#define copyObject(obj)
Definition: nodes.h:623
Node * havingQual
Definition: parsenodes.h:150
List * processed_tlist
Definition: relation.h:284
Definition: pg_list.h:45
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:5705
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
struct PathTarget * upper_targets[UPPERREL_FINAL+1]
Definition: relation.h:278

◆ inheritance_planner()

static void inheritance_planner ( PlannerInfo root)
static

Definition at line 1079 of file planner.c.

References add_path(), adjust_appendrel_attrs(), PlannerInfo::append_rel_list, Assert, bms_add_member(), bms_is_member(), bms_make_singleton(), bms_overlap(), Query::canSetTag, ChangeVarNodes(), RelOptInfo::cheapest_total_path, AppendRelInfo::child_relid, CMD_INSERT, Query::commandType, copyObject, create_modifytable_path(), fetch_upper_rel(), get_partitioned_child_rels(), grouping_planner(), PlannerInfo::hasInheritedTarget, RangeTblEntry::inh, PlannerInfo::init_plans, IS_DUMMY_PATH, PlannerInfo::join_info_list, lappend(), lappend_int(), lfirst_node, list_concat(), list_copy_tail(), list_length(), makeNode, NIL, Query::onConflict, palloc0(), AppendRelInfo::parent_relid, parse(), PlannerInfo::parse, PlannerInfo::placeholder_list, pull_varnos(), RangeTblEntry::relkind, RELKIND_PARTITIONED_TABLE, Query::resultRelation, Query::returningList, Query::rowMarks, PlannerInfo::rowMarks, rt_fetch, Query::rtable, RTE_SUBQUERY, RangeTblEntry::rtekind, RangeTblEntry::securityQuals, set_cheapest(), set_dummy_rel_pathlist(), PlannerInfo::simple_rel_array, PlannerInfo::simple_rel_array_size, PlannerInfo::simple_rte_array, SS_assign_special_param(), subpath(), AppendRelInfo::translated_vars, UPPERREL_FINAL, and Query::withCheckOptions.

Referenced by subquery_planner().

1080 {
1081  Query *parse = root->parse;
1082  int top_parentRTindex = parse->resultRelation;
1083  Bitmapset *subqueryRTindexes;
1084  Bitmapset *modifiableARIindexes;
1085  int nominalRelation = -1;
1086  List *final_rtable = NIL;
1087  int save_rel_array_size = 0;
1088  RelOptInfo **save_rel_array = NULL;
1089  List *subpaths = NIL;
1090  List *subroots = NIL;
1091  List *resultRelations = NIL;
1092  List *withCheckOptionLists = NIL;
1093  List *returningLists = NIL;
1094  List *rowMarks;
1095  RelOptInfo *final_rel;
1096  ListCell *lc;
1097  Index rti;
1098  RangeTblEntry *parent_rte;
1099  List *partitioned_rels = NIL;
1100  PlannerInfo *parent_root;
1101  Query *parent_parse;
1102  Bitmapset *parent_relids = bms_make_singleton(top_parentRTindex);
1103  PlannerInfo **parent_roots = NULL;
1104 
1105  Assert(parse->commandType != CMD_INSERT);
1106 
1107  /*
1108  * We generate a modified instance of the original Query for each target
1109  * relation, plan that, and put all the plans into a list that will be
1110  * controlled by a single ModifyTable node. All the instances share the
1111  * same rangetable, but each instance must have its own set of subquery
1112  * RTEs within the finished rangetable because (1) they are likely to get
1113  * scribbled on during planning, and (2) it's not inconceivable that
1114  * subqueries could get planned differently in different cases. We need
1115  * not create duplicate copies of other RTE kinds, in particular not the
1116  * target relations, because they don't have either of those issues. Not
1117  * having to duplicate the target relations is important because doing so
1118  * (1) would result in a rangetable of length O(N^2) for N targets, with
1119  * at least O(N^3) work expended here; and (2) would greatly complicate
1120  * management of the rowMarks list.
1121  *
1122  * To begin with, generate a bitmapset of the relids of the subquery RTEs.
1123  */
1124  subqueryRTindexes = NULL;
1125  rti = 1;
1126  foreach(lc, parse->rtable)
1127  {
1129 
1130  if (rte->rtekind == RTE_SUBQUERY)
1131  subqueryRTindexes = bms_add_member(subqueryRTindexes, rti);
1132  rti++;
1133  }
1134 
1135  /*
1136  * Next, we want to identify which AppendRelInfo items contain references
1137  * to any of the aforesaid subquery RTEs. These items will need to be
1138  * copied and modified to adjust their subquery references; whereas the
1139  * other ones need not be touched. It's worth being tense over this
1140  * because we can usually avoid processing most of the AppendRelInfo
1141  * items, thereby saving O(N^2) space and time when the target is a large
1142  * inheritance tree. We can identify AppendRelInfo items by their
1143  * child_relid, since that should be unique within the list.
1144  */
1145  modifiableARIindexes = NULL;
1146  if (subqueryRTindexes != NULL)
1147  {
1148  foreach(lc, root->append_rel_list)
1149  {
1150  AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
1151 
1152  if (bms_is_member(appinfo->parent_relid, subqueryRTindexes) ||
1153  bms_is_member(appinfo->child_relid, subqueryRTindexes) ||
1155  subqueryRTindexes))
1156  modifiableARIindexes = bms_add_member(modifiableARIindexes,
1157  appinfo->child_relid);
1158  }
1159  }
1160 
1161  /*
1162  * If the parent RTE is a partitioned table, we should use that as the
1163  * nominal relation, because the RTEs added for partitioned tables
1164  * (including the root parent) as child members of the inheritance set do
1165  * not appear anywhere else in the plan. The situation is exactly the
1166  * opposite in the case of non-partitioned inheritance parent as described
1167  * below. For the same reason, collect the list of descendant partitioned
1168  * tables to be saved in ModifyTable node, so that executor can lock those
1169  * as well.
1170  */
1171  parent_rte = rt_fetch(top_parentRTindex, root->parse->rtable);
1172  if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
1173  {
1174  nominalRelation = top_parentRTindex;
1175  partitioned_rels = get_partitioned_child_rels(root, top_parentRTindex);
1176  /* The root partitioned table is included as a child rel */
1177  Assert(list_length(partitioned_rels) >= 1);
1178  }
1179 
1180  /*
1181  * The PlannerInfo for each child is obtained by translating the relevant
1182  * members of the PlannerInfo for its immediate parent, which we find
1183  * using the parent_relid in its AppendRelInfo. We save the PlannerInfo
1184  * for each parent in an array indexed by relid for fast retrieval. Since
1185  * the maximum number of parents is limited by the number of RTEs in the
1186  * query, we use that number to allocate the array. An extra entry is
1187  * needed since relids start from 1.
1188  */
1189  parent_roots = (PlannerInfo **) palloc0((list_length(parse->rtable) + 1) *
1190  sizeof(PlannerInfo *));
1191  parent_roots[top_parentRTindex] = root;
1192 
1193  /*
1194  * And now we can get on with generating a plan for each child table.
1195  */
1196  foreach(lc, root->append_rel_list)
1197  {
1198  AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
1199  PlannerInfo *subroot;
1200  RangeTblEntry *child_rte;
1201  RelOptInfo *sub_final_rel;
1202  Path *subpath;
1203 
1204  /* append_rel_list contains all append rels; ignore others */
1205  if (!bms_is_member(appinfo->parent_relid, parent_relids))
1206  continue;
1207 
1208  /*
1209  * expand_inherited_rtentry() always processes a parent before any of
1210  * that parent's children, so the parent_root for this relation should
1211  * already be available.
1212  */
1213  parent_root = parent_roots[appinfo->parent_relid];
1214  Assert(parent_root != NULL);
1215  parent_parse = parent_root->parse;
1216 
1217  /*
1218  * We need a working copy of the PlannerInfo so that we can control
1219  * propagation of information back to the main copy.
1220  */
1221  subroot = makeNode(PlannerInfo);
1222  memcpy(subroot, parent_root, sizeof(PlannerInfo));
1223 
1224  /*
1225  * Generate modified query with this rel as target. We first apply
1226  * adjust_appendrel_attrs, which copies the Query and changes
1227  * references to the parent RTE to refer to the current child RTE,
1228  * then fool around with subquery RTEs.
1229  */
1230  subroot->parse = (Query *)
1231  adjust_appendrel_attrs(parent_root,
1232  (Node *) parent_parse,
1233  1, &appinfo);
1234 
1235  /*
1236  * If there are securityQuals attached to the parent, move them to the
1237  * child rel (they've already been transformed properly for that).
1238  */
1239  parent_rte = rt_fetch(appinfo->parent_relid, subroot->parse->rtable);
1240  child_rte = rt_fetch(appinfo->child_relid, subroot->parse->rtable);
1241  child_rte->securityQuals = parent_rte->securityQuals;
1242  parent_rte->securityQuals = NIL;
1243 
1244  /*
1245  * The rowMarks list might contain references to subquery RTEs, so
1246  * make a copy that we can apply ChangeVarNodes to. (Fortunately, the
1247  * executor doesn't need to see the modified copies --- we can just
1248  * pass it the original rowMarks list.)
1249  */
1250  subroot->rowMarks = copyObject(parent_root->rowMarks);
1251 
1252  /*
1253  * The append_rel_list likewise might contain references to subquery
1254  * RTEs (if any subqueries were flattenable UNION ALLs). So prepare
1255  * to apply ChangeVarNodes to that, too. As explained above, we only
1256  * want to copy items that actually contain such references; the rest
1257  * can just get linked into the subroot's append_rel_list.
1258  *
1259  * If we know there are no such references, we can just use the outer
1260  * append_rel_list unmodified.
1261  */
1262  if (modifiableARIindexes != NULL)
1263  {
1264  ListCell *lc2;
1265 
1266  subroot->append_rel_list = NIL;
1267  foreach(lc2, parent_root->append_rel_list)
1268  {
1269  AppendRelInfo *appinfo2 = lfirst_node(AppendRelInfo, lc2);
1270 
1271  if (bms_is_member(appinfo2->child_relid, modifiableARIindexes))
1272  appinfo2 = copyObject(appinfo2);
1273 
1274  subroot->append_rel_list = lappend(subroot->append_rel_list,
1275  appinfo2);
1276  }
1277  }
1278 
1279  /*
1280  * Add placeholders to the child Query's rangetable list to fill the
1281  * RT indexes already reserved for subqueries in previous children.
1282  * These won't be referenced, so there's no need to make them very
1283  * valid-looking.
1284  */
1285  while (list_length(subroot->parse->rtable) < list_length(final_rtable))
1286  subroot->parse->rtable = lappend(subroot->parse->rtable,
1288 
1289  /*
1290  * If this isn't the first child Query, generate duplicates of all
1291  * subquery RTEs, and adjust Var numbering to reference the
1292  * duplicates. To simplify the loop logic, we scan the original rtable
1293  * not the copy just made by adjust_appendrel_attrs; that should be OK
1294  * since subquery RTEs couldn't contain any references to the target
1295  * rel.
1296  */
1297  if (final_rtable != NIL && subqueryRTindexes != NULL)
1298  {
1299  ListCell *lr;
1300 
1301  rti = 1;
1302  foreach(lr, parent_parse->rtable)
1303  {
1305 
1306  if (bms_is_member(rti, subqueryRTindexes))
1307  {
1308  Index newrti;
1309 
1310  /*
1311  * The RTE can't contain any references to its own RT
1312  * index, except in its securityQuals, so we can save a
1313  * few cycles by applying ChangeVarNodes to the rest of
1314  * the rangetable before we append the RTE to it.
1315  */
1316  newrti = list_length(subroot->parse->rtable) + 1;
1317  ChangeVarNodes((Node *) subroot->parse, rti, newrti, 0);
1318  ChangeVarNodes((Node *) subroot->rowMarks, rti, newrti, 0);
1319  /* Skip processing unchanging parts of append_rel_list */
1320  if (modifiableARIindexes != NULL)
1321  {
1322  ListCell *lc2;
1323 
1324  foreach(lc2, subroot->append_rel_list)
1325  {
1326  AppendRelInfo *appinfo2 = lfirst_node(AppendRelInfo, lc2);
1327 
1328  if (bms_is_member(appinfo2->child_relid,
1329  modifiableARIindexes))
1330  ChangeVarNodes((Node *) appinfo2, rti, newrti, 0);
1331  }
1332  }
1333  rte = copyObject(rte);
1334  ChangeVarNodes((Node *) rte->securityQuals, rti, newrti, 0);
1335  subroot->parse->rtable = lappend(subroot->parse->rtable,
1336  rte);
1337  }
1338  rti++;
1339  }
1340  }
1341 
1342  /* There shouldn't be any OJ info to translate, as yet */
1343  Assert(subroot->join_info_list == NIL);
1344  /* and we haven't created PlaceHolderInfos, either */
1345  Assert(subroot->placeholder_list == NIL);
1346  /* hack to mark target relation as an inheritance partition */
1347  subroot->hasInheritedTarget = true;
1348 
1349  /*
1350  * If the child is further partitioned, remember it as a parent. Since
1351  * a partitioned table does not have any data, we don't need to create
1352  * a plan for it. We do, however, need to remember the PlannerInfo for
1353  * use when processing its children.
1354  */
1355  if (child_rte->inh)
1356  {
1357  Assert(child_rte->relkind == RELKIND_PARTITIONED_TABLE);
1358  parent_relids =
1359  bms_add_member(parent_relids, appinfo->child_relid);
1360  parent_roots[appinfo->child_relid] = subroot;
1361 
1362  continue;
1363  }
1364 
1365  /* Generate Path(s) for accessing this result relation */
1366  grouping_planner(subroot, true, 0.0 /* retrieve all tuples */ );
1367 
1368  /*
1369  * Set the nomimal target relation of the ModifyTable node if not
1370  * already done. We use the inheritance parent RTE as the nominal
1371  * target relation if it's a partitioned table (see just above this
1372  * loop). In the non-partitioned parent case, we'll use the first
1373  * child relation (even if it's excluded) as the nominal target
1374  * relation. Because of the way expand_inherited_rtentry works, the
1375  * latter should be the RTE representing the parent table in its role
1376  * as a simple member of the inheritance set.
1377  *
1378  * It would be logically cleaner to *always* use the inheritance
1379  * parent RTE as the nominal relation; but that RTE is not otherwise
1380  * referenced in the plan in the non-partitioned inheritance case.
1381  * Instead the duplicate child RTE created by expand_inherited_rtentry
1382  * is used elsewhere in the plan, so using the original parent RTE
1383  * would give rise to confusing use of multiple aliases in EXPLAIN
1384  * output for what the user will think is the "same" table. OTOH,
1385  * it's not a problem in the partitioned inheritance case, because the
1386  * duplicate child RTE added for the parent does not appear anywhere
1387  * else in the plan tree.
1388  */
1389  if (nominalRelation < 0)
1390  nominalRelation = appinfo->child_relid;
1391 
1392  /*
1393  * Select cheapest path in case there's more than one. We always run
1394  * modification queries to conclusion, so we care only for the
1395  * cheapest-total path.
1396  */
1397  sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
1398  set_cheapest(sub_final_rel);
1399  subpath = sub_final_rel->cheapest_total_path;
1400 
1401  /*
1402  * If this child rel was excluded by constraint exclusion, exclude it
1403  * from the result plan.
1404  */
1405  if (IS_DUMMY_PATH(subpath))
1406  continue;
1407 
1408  /*
1409  * If this is the first non-excluded child, its post-planning rtable
1410  * becomes the initial contents of final_rtable; otherwise, append
1411  * just its modified subquery RTEs to final_rtable.
1412  */
1413  if (final_rtable == NIL)
1414  final_rtable = subroot->parse->rtable;
1415  else
1416  final_rtable = list_concat(final_rtable,
1417  list_copy_tail(subroot->parse->rtable,
1418  list_length(final_rtable)));
1419 
1420  /*
1421  * We need to collect all the RelOptInfos from all child plans into
1422  * the main PlannerInfo, since setrefs.c will need them. We use the
1423  * last child's simple_rel_array (previous ones are too short), so we
1424  * have to propagate forward the RelOptInfos that were already built
1425  * in previous children.
1426  */
1427  Assert(subroot->simple_rel_array_size >= save_rel_array_size);
1428  for (rti = 1; rti < save_rel_array_size; rti++)
1429  {
1430  RelOptInfo *brel = save_rel_array[rti];
1431 
1432  if (brel)
1433  subroot->simple_rel_array[rti] = brel;
1434  }
1435  save_rel_array_size = subroot->simple_rel_array_size;
1436  save_rel_array = subroot->simple_rel_array;
1437 
1438  /* Make sure any initplans from this rel get into the outer list */
1439  root->init_plans = subroot->init_plans;
1440 
1441  /* Build list of sub-paths */
1442  subpaths = lappend(subpaths, subpath);
1443 
1444  /* Build list of modified subroots, too */
1445  subroots = lappend(subroots, subroot);
1446 
1447  /* Build list of target-relation RT indexes */
1448  resultRelations = lappend_int(resultRelations, appinfo->child_relid);
1449 
1450  /* Build lists of per-relation WCO and RETURNING targetlists */
1451  if (parse->withCheckOptions)
1452  withCheckOptionLists = lappend(withCheckOptionLists,
1453  subroot->parse->withCheckOptions);
1454  if (parse->returningList)
1455  returningLists = lappend(returningLists,
1456  subroot->parse->returningList);
1457 
1458  Assert(!parse->onConflict);
1459  }
1460 
1461  /* Result path must go into outer query's FINAL upperrel */
1462  final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1463 
1464  /*
1465  * We don't currently worry about setting final_rel's consider_parallel
1466  * flag in this case, nor about allowing FDWs or create_upper_paths_hook
1467  * to get control here.
1468  */
1469 
1470  /*
1471  * If we managed to exclude every child rel, return a dummy plan; it
1472  * doesn't even need a ModifyTable node.
1473  */
1474  if (subpaths == NIL)
1475  {
1476  set_dummy_rel_pathlist(final_rel);
1477  return;
1478  }
1479 
1480  /*
1481  * Put back the final adjusted rtable into the master copy of the Query.
1482  * (We mustn't do this if we found no non-excluded children.)
1483  */
1484  parse->rtable = final_rtable;
1485  root->simple_rel_array_size = save_rel_array_size;
1486  root->simple_rel_array = save_rel_array;
1487  /* Must reconstruct master's simple_rte_array, too */
1488  root->simple_rte_array = (RangeTblEntry **)
1489  palloc0((list_length(final_rtable) + 1) * sizeof(RangeTblEntry *));
1490  rti = 1;
1491  foreach(lc, final_rtable)
1492  {
1494 
1495  root->simple_rte_array[rti++] = rte;
1496  }
1497 
1498  /*
1499  * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
1500  * have dealt with fetching non-locked marked rows, else we need to have
1501  * ModifyTable do that.
1502  */
1503  if (parse->rowMarks)
1504  rowMarks = NIL;
1505  else
1506  rowMarks = root->rowMarks;
1507 
1508  /* Create Path representing a ModifyTable to do the UPDATE/DELETE work */
1509  add_path(final_rel, (Path *)
1510  create_modifytable_path(root, final_rel,
1511  parse->commandType,
1512  parse->canSetTag,
1513  nominalRelation,
1514  partitioned_rels,
1515  resultRelations,
1516  subpaths,
1517  subroots,
1518  withCheckOptionLists,
1519  returningLists,
1520  rowMarks,
1521  NULL,
1522  SS_assign_special_param(root)));
1523 }
#define NIL
Definition: pg_list.h:69
List * rowMarks
Definition: relation.h:256
Query * parse
Definition: relation.h:155
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:420
List * join_info_list
Definition: relation.h:250
int SS_assign_special_param(PlannerInfo *root)
Definition: subselect.c:429
OnConflictExpr * onConflict
Definition: parsenodes.h:142
List * get_partitioned_child_rels(PlannerInfo *root, Index rti)
Definition: planner.c:6167
List * withCheckOptions
Definition: parsenodes.h:169
List * securityQuals
Definition: parsenodes.h:1064
int resultRelation
Definition: parsenodes.h:120
Definition: nodes.h:510
List * list_concat(List *list1, List *list2)
Definition: list.c:321
List * list_copy_tail(const List *oldlist, int nskip)
Definition: list.c:1203
List * rowMarks
Definition: parsenodes.h:161
List * translated_vars
Definition: relation.h:2093
struct RelOptInfo ** simple_rel_array
Definition: relation.h:179
#define IS_DUMMY_PATH(p)
Definition: relation.h:1271
void set_dummy_rel_pathlist(RelOptInfo *rel)
Definition: allpaths.c:1802
List * rtable
Definition: parsenodes.h:135
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1137
#define lfirst_node(type, lc)
Definition: pg_list.h:109
struct Path * cheapest_total_path
Definition: relation.h:603
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:179
List * returningList
Definition: parsenodes.h:144
int simple_rel_array_size
Definition: relation.h:180
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
Relids pull_varnos(Node *node)
Definition: var.c:95
List * lappend_int(List *list, int datum)
Definition: list.c:146
List * lappend(List *list, void *datum)
Definition: list.c:128
RangeTblEntry ** simple_rte_array
Definition: relation.h:188
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: prepunion.c:1945
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:242
#define RELKIND_PARTITIONED_TABLE
Definition: pg_class.h:168
void * palloc0(Size size)
Definition: mcxt.c:877
List * append_rel_list
Definition: relation.h:252
unsigned int Index
Definition: c.h:413
List * init_plans
Definition: relation.h:228
CmdType commandType
Definition: parsenodes.h:110
#define makeNode(_type_)
Definition: nodes.h:558
#define Assert(condition)
Definition: c.h:670
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam)
Definition: pathnode.c:3220
bool hasInheritedTarget
Definition: relation.h:300
bool canSetTag
Definition: parsenodes.h:116
static int list_length(const List *l)
Definition: pg_list.h:89
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:698
RTEKind rtekind
Definition: parsenodes.h:951
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:443
static void grouping_planner(PlannerInfo *root, bool inheritance_update, double tuple_fraction)
Definition: planner.c:1554
List * placeholder_list
Definition: relation.h:258
void ChangeVarNodes(Node *node, int rt_index, int new_index, int sublevels_up)
Definition: rewriteManip.c:607
Index child_relid
Definition: relation.h:2066
#define copyObject(obj)
Definition: nodes.h:623
Index parent_relid
Definition: relation.h:2065
Definition: pg_list.h:45
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:234
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649

◆ is_dummy_plan()

bool is_dummy_plan ( Plan plan)

Definition at line 2389 of file planner.c.

References Const::constisnull, Const::constvalue, DatumGetBool, IsA, linitial, and list_length().

2390 {
2391  if (IsA(plan, Result))
2392  {
2393  List *rcqual = (List *) ((Result *) plan)->resconstantqual;
2394 
2395  if (list_length(rcqual) == 1)
2396  {
2397  Const *constqual = (Const *) linitial(rcqual);
2398 
2399  if (constqual && IsA(constqual, Const))
2400  {
2401  if (!constqual->constisnull &&
2402  !DatumGetBool(constqual->constvalue))
2403  return true;
2404  }
2405  }
2406  }
2407  return false;
2408 }
Datum constvalue
Definition: primnodes.h:196
#define IsA(nodeptr, _type_)
Definition: nodes.h:561
#define linitial(l)
Definition: pg_list.h:111
#define DatumGetBool(X)
Definition: postgres.h:399
static int list_length(const List *l)
Definition: pg_list.h:89
Definition: pg_list.h:45
bool constisnull
Definition: primnodes.h:197

◆ limit_needed()

static bool limit_needed ( Query parse)
static

Definition at line 2776 of file planner.c.

References DatumGetInt64, IsA, Query::limitCount, and Query::limitOffset.

Referenced by grouping_planner().

2777 {
2778  Node *node;
2779 
2780  node = parse->limitCount;
2781  if (node)
2782  {
2783  if (IsA(node, Const))
2784  {
2785  /* NULL indicates LIMIT ALL, ie, no limit */
2786  if (!((Const *) node)->constisnull)
2787  return true; /* LIMIT with a constant value */
2788  }
2789  else
2790  return true; /* non-constant LIMIT */
2791  }
2792 
2793  node = parse->limitOffset;
2794  if (node)
2795  {
2796  if (IsA(node, Const))
2797  {
2798  /* Treat NULL as no offset; the executor would too */
2799  if (!((Const *) node)->constisnull)
2800  {
2801  int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2802 
2803  if (offset != 0)
2804  return true; /* OFFSET with a nonzero value */
2805  }
2806  }
2807  else
2808  return true; /* non-constant OFFSET */
2809  }
2810 
2811  return false; /* don't need a Limit plan node */
2812 }
Node * limitOffset
Definition: parsenodes.h:158
#define IsA(nodeptr, _type_)
Definition: nodes.h:561
Definition: nodes.h:510
#define DatumGetInt64(X)
Definition: postgres.h:613
Node * limitCount
Definition: parsenodes.h:159

◆ make_group_input_target()

static PathTarget * make_group_input_target ( PlannerInfo root,
PathTarget final_target 
)
static

Definition at line 5137 of file planner.c.

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), Query::groupClause, Query::havingQual, i, lappend(), lfirst, list_free(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_AGGREGATES, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by grouping_planner().

5138 {
5139  Query *parse = root->parse;
5140  PathTarget *input_target;
5141  List *non_group_cols;
5142  List *non_group_vars;
5143  int i;
5144  ListCell *lc;
5145 
5146  /*
5147  * We must build a target containing all grouping columns, plus any other
5148  * Vars mentioned in the query's targetlist and HAVING qual.
5149  */
5150  input_target = create_empty_pathtarget();
5151  non_group_cols = NIL;
5152 
5153  i = 0;
5154  foreach(lc, final_target->exprs)
5155  {
5156  Expr *expr = (Expr *) lfirst(lc);
5157  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5158 
5159  if (sgref && parse->groupClause &&
5160  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5161  {
5162  /*
5163  * It's a grouping column, so add it to the input target as-is.
5164  */
5165  add_column_to_pathtarget(input_target, expr, sgref);
5166  }
5167  else
5168  {
5169  /*
5170  * Non-grouping column, so just remember the expression for later
5171  * call to pull_var_clause.
5172  */
5173  non_group_cols = lappend(non_group_cols, expr);
5174  }
5175 
5176  i++;
5177  }
5178 
5179  /*
5180  * If there's a HAVING clause, we'll need the Vars it uses, too.
5181  */
5182  if (parse->havingQual)
5183  non_group_cols = lappend(non_group_cols, parse->havingQual);
5184 
5185  /*
5186  * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5187  * add them to the input target if not already present. (A Var used
5188  * directly as a GROUP BY item will be present already.) Note this
5189  * includes Vars used in resjunk items, so we are covering the needs of
5190  * ORDER BY and window specifications. Vars used within Aggrefs and
5191  * WindowFuncs will be pulled out here, too.
5192  */
5193  non_group_vars = pull_var_clause((Node *) non_group_cols,
5197  add_new_columns_to_pathtarget(input_target, non_group_vars);
5198 
5199  /* clean up cruft */
5200  list_free(non_group_vars);
5201  list_free(non_group_cols);
5202 
5203  /* XXX this causes some redundant cost calculation ... */
5204  return set_pathtarget_cost_width(root, input_target);
5205 }
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:653
#define NIL
Definition: pg_list.h:69
Query * parse
Definition: relation.h:155
#define PVC_RECURSE_AGGREGATES
Definition: var.h:21
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:5038
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:667
Definition: nodes.h:510
List * pull_var_clause(Node *node, int flags)
Definition: var.c:535
#define PVC_INCLUDE_PLACEHOLDERS
Definition: var.h:24
#define PVC_RECURSE_WINDOWFUNCS
Definition: var.h:23
#define get_pathtarget_sortgroupref(target, colno)
Definition: relation.h:979
List * lappend(List *list, void *datum)
Definition: list.c:128
List * exprs
Definition: relation.h:972
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:446
unsigned int Index
Definition: c.h:413
#define lfirst(lc)
Definition: pg_list.h:106
List * groupClause
Definition: parsenodes.h:146
void list_free(List *list)
Definition: list.c:1133
int i
Node * havingQual
Definition: parsenodes.h:150
Definition: pg_list.h:45
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:714

◆ make_partial_grouping_target()

static PathTarget * make_partial_grouping_target ( PlannerInfo root,
PathTarget grouping_target 
)
static

Definition at line 5224 of file planner.c.

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), AGGSPLIT_INITIAL_SERIAL, create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, get_sortgroupref_clause_noerr(), Query::groupClause, Query::havingQual, i, IsA, lappend(), lfirst, list_free(), makeNode, mark_partial_aggref(), NIL, parse(), PlannerInfo::parse, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, and set_pathtarget_cost_width().

Referenced by create_grouping_paths().

5225 {
5226  Query *parse = root->parse;
5227  PathTarget *partial_target;
5228  List *non_group_cols;
5229  List *non_group_exprs;
5230  int i;
5231  ListCell *lc;
5232 
5233  partial_target = create_empty_pathtarget();
5234  non_group_cols = NIL;
5235 
5236  i = 0;
5237  foreach(lc, grouping_target->exprs)
5238  {
5239  Expr *expr = (Expr *) lfirst(lc);
5240  Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5241 
5242  if (sgref && parse->groupClause &&
5243  get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5244  {
5245  /*
5246  * It's a grouping column, so add it to the partial_target as-is.
5247  * (This allows the upper agg step to repeat the grouping calcs.)
5248  */
5249  add_column_to_pathtarget(partial_target, expr, sgref);
5250  }
5251  else
5252  {
5253  /*
5254  * Non-grouping column, so just remember the expression for later
5255  * call to pull_var_clause.
5256  */
5257  non_group_cols = lappend(non_group_cols, expr);
5258  }
5259 
5260  i++;
5261  }
5262 
5263  /*
5264  * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5265  */
5266  if (parse->havingQual)
5267  non_group_cols = lappend(non_group_cols, parse->havingQual);
5268 
5269  /*
5270  * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5271  * non-group cols (plus HAVING), and add them to the partial_target if not
5272  * already present. (An expression used directly as a GROUP BY item will
5273  * be present already.) Note this includes Vars used in resjunk items, so
5274  * we are covering the needs of ORDER BY and window specifications.
5275  */
5276  non_group_exprs = pull_var_clause((Node *) non_group_cols,
5280 
5281  add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5282 
5283  /*
5284  * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5285  * are at the top level of the target list, so we can just scan the list
5286  * rather than recursing through the expression trees.
5287  */
5288  foreach(lc, partial_target->exprs)
5289  {
5290  Aggref *aggref = (Aggref *) lfirst(lc);
5291 
5292  if (IsA(aggref, Aggref))
5293  {
5294  Aggref *newaggref;
5295 
5296  /*
5297  * We shouldn't need to copy the substructure of the Aggref node,
5298  * but flat-copy the node itself to avoid damaging other trees.
5299  */
5300  newaggref = makeNode(Aggref);
5301  memcpy(newaggref, aggref, sizeof(Aggref));
5302 
5303  /* For now, assume serialization is required */
5305 
5306  lfirst(lc) = newaggref;
5307  }
5308  }
5309 
5310  /* clean up cruft */
5311  list_free(non_group_exprs);
5312  list_free(non_group_cols);
5313 
5314  /* XXX this causes some redundant cost calculation ... */
5315  return set_pathtarget_cost_width(root, partial_target);
5316 }
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:653
#define NIL
Definition: pg_list.h:69
#define IsA(nodeptr, _type_)
Definition: nodes.h:561
Query * parse
Definition: relation.h:155
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:5038
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:667
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5325
Definition: nodes.h:510
List * pull_var_clause(Node *node, int flags)
Definition: var.c:535
#define PVC_INCLUDE_AGGREGATES
Definition: var.h:20
#define PVC_INCLUDE_PLACEHOLDERS
Definition: var.h:24
#define PVC_RECURSE_WINDOWFUNCS
Definition: var.h:23
#define get_pathtarget_sortgroupref(target, colno)
Definition: relation.h:979
List * lappend(List *list, void *datum)
Definition: list.c:128
List * exprs
Definition: relation.h:972
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:446
unsigned int Index
Definition: c.h:413
#define makeNode(_type_)
Definition: nodes.h:558
#define lfirst(lc)
Definition: pg_list.h:106
List * groupClause
Definition: parsenodes.h:146
void list_free(List *list)
Definition: list.c:1133
int i
Node * havingQual
Definition: parsenodes.h:150
Definition: pg_list.h:45
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:714

◆ make_pathkeys_for_window()

static List * make_pathkeys_for_window ( PlannerInfo root,
WindowClause wc,
List tlist 
)
static

Definition at line 5610 of file planner.c.

References ereport, errcode(), errdetail(), errmsg(), ERROR, grouping_is_sortable(), list_concat(), list_copy(), list_free(), make_pathkeys_for_sortclauses(), WindowClause::orderClause, and WindowClause::partitionClause.

Referenced by create_one_window_path(), and standard_qp_callback().

5612 {
5613  List *window_pathkeys;
5614  List *window_sortclauses;
5615 
5616  /* Throw error if can't sort */
5618  ereport(ERROR,
5619  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5620  errmsg("could not implement window PARTITION BY"),
5621  errdetail("Window partitioning columns must be of sortable datatypes.")));
5623  ereport(ERROR,
5624  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5625  errmsg("could not implement window ORDER BY"),
5626  errdetail("Window ordering columns must be of sortable datatypes.")));
5627 
5628  /* Okay, make the combined pathkeys */
5629  window_sortclauses = list_concat(list_copy(wc->partitionClause),
5630  list_copy(wc->orderClause));
5631  window_pathkeys = make_pathkeys_for_sortclauses(root,
5632  window_sortclauses,
5633  tlist);
5634  list_free(window_sortclauses);
5635  return window_pathkeys;
5636 }
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:865
List * list_copy(const List *oldlist)
Definition: list.c:1160
int errcode(int sqlerrcode)
Definition: elog.c:575
List * list_concat(List *list1, List *list2)
Definition: list.c:321
#define ERROR
Definition: elog.h:43
List * partitionClause
Definition: parsenodes.h:1289
int errdetail(const char *fmt,...)
Definition: elog.c:873
#define ereport(elevel, rest)
Definition: elog.h:122
List * orderClause
Definition: parsenodes.h:1290
int errmsg(const char *fmt,...)
Definition: elog.c:797
void list_free(List *list)
Definition: list.c:1133
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:518
Definition: pg_list.h:45

◆ make_sort_input_target()

static PathTarget * make_sort_input_target ( PlannerInfo root,
PathTarget final_target,
bool have_postponed_srfs 
)
static

Definition at line 5705 of file planner.c.

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, contain_volatile_functions(), cost_qual_eval_node(), cpu_operator_cost, create_empty_pathtarget(), expression_returns_set(), PathTarget::exprs, get_pathtarget_sortgroupref, Query::hasTargetSRFs, i, lappend(), lfirst, Query::limitCount, list_free(), list_length(), NIL, palloc0(), parse(), PlannerInfo::parse, QualCost::per_tuple, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_INCLUDE_WINDOWFUNCS, set_pathtarget_cost_width(), Query::sortClause, and PlannerInfo::tuple_fraction.

Referenced by grouping_planner().

5708 {
5709  Query *parse = root->parse;
5710  PathTarget *input_target;
5711  int ncols;
5712  bool *col_is_srf;
5713  bool *postpone_col;
5714  bool have_srf;
5715  bool have_volatile;
5716  bool have_expensive;
5717  bool have_srf_sortcols;
5718  bool postpone_srfs;
5719  List *postponable_cols;
5720  List *postponable_vars;
5721  int i;
5722  ListCell *lc;
5723 
5724  /* Shouldn't get here unless query has ORDER BY */
5725  Assert(parse->sortClause);
5726 
5727  *have_postponed_srfs = false; /* default result */
5728 
5729  /* Inspect tlist and collect per-column information */
5730  ncols = list_length(final_target->exprs);
5731  col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
5732  postpone_col = (bool *) palloc0(ncols * sizeof(bool));
5733  have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
5734 
5735  i = 0;
5736  foreach(lc, final_target->exprs)
5737  {
5738  Expr *expr = (Expr *) lfirst(lc);
5739 
5740  /*
5741  * If the column has a sortgroupref, assume it has to be evaluated
5742  * before sorting. Generally such columns would be ORDER BY, GROUP
5743  * BY, etc targets. One exception is columns that were removed from
5744  * GROUP BY by remove_useless_groupby_columns() ... but those would
5745  * only be Vars anyway. There don't seem to be any cases where it
5746  * would be worth the trouble to double-check.
5747  */
5748  if (get_pathtarget_sortgroupref(final_target, i) == 0)
5749  {
5750  /*
5751  * Check for SRF or volatile functions. Check the SRF case first
5752  * because we must know whether we have any postponed SRFs.
5753  */
5754  if (parse->hasTargetSRFs &&
5755  expression_returns_set((Node *) expr))
5756  {
5757  /* We'll decide below whether these are postponable */
5758  col_is_srf[i] = true;
5759  have_srf = true;
5760  }
5761  else if (contain_volatile_functions((Node *) expr))
5762  {
5763  /* Unconditionally postpone */
5764  postpone_col[i] = true;
5765  have_volatile = true;
5766  }
5767  else
5768  {
5769  /*
5770  * Else check the cost. XXX it's annoying to have to do this
5771  * when set_pathtarget_cost_width() just did it. Refactor to
5772  * allow sharing the work?
5773  */
5774  QualCost cost;
5775 
5776  cost_qual_eval_node(&cost, (Node *) expr, root);
5777 
5778  /*
5779  * We arbitrarily define "expensive" as "more than 10X
5780  * cpu_operator_cost". Note this will take in any PL function
5781  * with default cost.
5782  */
5783  if (cost.per_tuple > 10 * cpu_operator_cost)
5784  {
5785  postpone_col[i] = true;
5786  have_expensive = true;
5787  }
5788  }
5789  }
5790  else
5791  {
5792  /* For sortgroupref cols, just check if any contain SRFs */
5793  if (!have_srf_sortcols &&
5794  parse->hasTargetSRFs &&
5795  expression_returns_set((Node *) expr))
5796  have_srf_sortcols = true;
5797  }
5798 
5799  i++;
5800  }
5801 
5802  /*
5803  * We can postpone SRFs if we have some but none are in sortgroupref cols.
5804  */
5805  postpone_srfs = (have_srf && !have_srf_sortcols);
5806 
5807  /*
5808  * If we don't need a post-sort projection, just return final_target.
5809  */
5810  if (!(postpone_srfs || have_volatile ||
5811  (have_expensive &&
5812  (parse->limitCount || root->tuple_fraction > 0))))
5813  return final_target;
5814 
5815  /*
5816  * Report whether the post-sort projection will contain set-returning
5817  * functions. This is important because it affects whether the Sort can
5818  * rely on the query's LIMIT (if any) to bound the number of rows it needs
5819  * to return.
5820  */
5821  *have_postponed_srfs = postpone_srfs;
5822 
5823  /*
5824  * Construct the sort-input target, taking all non-postponable columns and
5825  * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
5826  * the postponable ones.
5827  */
5828  input_target = create_empty_pathtarget();
5829  postponable_cols = NIL;
5830 
5831  i = 0;
5832  foreach(lc, final_target->exprs)
5833  {
5834  Expr *expr = (Expr *) lfirst(lc);
5835 
5836  if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
5837  postponable_cols = lappend(postponable_cols, expr);
5838  else
5839  add_column_to_pathtarget(input_target, expr,
5840  get_pathtarget_sortgroupref(final_target, i));
5841 
5842  i++;
5843  }
5844 
5845  /*
5846  * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
5847  * postponable columns, and add them to the sort-input target if not
5848  * already present. (Some might be there already.) We mustn't
5849  * deconstruct Aggrefs or WindowFuncs here, since the projection node
5850  * would be unable to recompute them.
5851  */
5852  postponable_vars = pull_var_clause((Node *) postponable_cols,
5856  add_new_columns_to_pathtarget(input_target, postponable_vars);
5857 
5858  /* clean up cruft */
5859  list_free(postponable_vars);
5860  list_free(postponable_cols);
5861 
5862  /* XXX this represents even more redundant cost calculation ... */
5863  return set_pathtarget_cost_width(root, input_target);
5864 }
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:3534
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:653
#define NIL
Definition: pg_list.h:69
Query * parse
Definition: relation.h:155
List * sortClause
Definition: parsenodes.h:156
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:5038
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:667
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:670
Definition: nodes.h:510
List * pull_var_clause(Node *node, int flags)
Definition: var.c:535
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:957
#define PVC_INCLUDE_AGGREGATES
Definition: var.h:20
#define PVC_INCLUDE_PLACEHOLDERS
Definition: var.h:24
Cost per_tuple
Definition: relation.h:46
double tuple_fraction
Definition: relation.h:294
Node * limitCount
Definition: parsenodes.h:159
double cpu_operator_cost
Definition: costsize.c:108
#define get_pathtarget_sortgroupref(target, colno)
Definition: relation.h:979
List * lappend(List *list, void *datum)
Definition: list.c:128
List * exprs
Definition: relation.h:972
void * palloc0(Size size)
Definition: mcxt.c:877
#define PVC_INCLUDE_WINDOWFUNCS
Definition: var.h:22
bool hasTargetSRFs
Definition: parsenodes.h:125
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
static int list_length(const List *l)
Definition: pg_list.h:89
void list_free(List *list)
Definition: list.c:1133
int i
Definition: pg_list.h:45
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:714

◆ make_window_input_target()

static PathTarget * make_window_input_target ( PlannerInfo root,
PathTarget final_target,
List activeWindows 
)
static

Definition at line 5490 of file planner.c.

References add_column_to_pathtarget(), add_new_columns_to_pathtarget(), Assert, bms_add_member(), bms_is_member(), create_empty_pathtarget(), PathTarget::exprs, get_pathtarget_sortgroupref, Query::groupClause, Query::hasWindowFuncs, i, lappend(), lfirst, lfirst_node, list_free(), NIL, WindowClause::orderClause, parse(), PlannerInfo::parse, WindowClause::partitionClause, pull_var_clause(), PVC_INCLUDE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, set_pathtarget_cost_width(), and SortGroupClause::tleSortGroupRef.

Referenced by grouping_planner().

5493 {
5494  Query *parse = root->parse;
5495  PathTarget *input_target;
5496  Bitmapset *sgrefs;
5497  List *flattenable_cols;
5498  List *flattenable_vars;
5499  int i;
5500  ListCell *lc;
5501 
5502  Assert(parse->hasWindowFuncs);
5503 
5504  /*
5505  * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
5506  * into a bitmapset for convenient reference below.
5507  */
5508  sgrefs = NULL;
5509  foreach(lc, activeWindows)
5510  {
5512  ListCell *lc2;
5513 
5514  foreach(lc2, wc->partitionClause)
5515  {
5517 
5518  sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5519  }
5520  foreach(lc2, wc->orderClause)
5521  {
5523 
5524  sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5525  }
5526  }
5527 
5528  /* Add in sortgroupref numbers of GROUP BY clauses, too */
5529  foreach(lc, parse->groupClause)
5530  {
5532 
5533  sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
5534  }
5535 
5536  /*
5537  * Construct a target containing all the non-flattenable targetlist items,
5538  * and save aside the others for a moment.
5539  */
5540  input_target = create_empty_pathtarget();
5541  flattenable_cols = NIL;
5542 
5543  i = 0;
5544  foreach(lc, final_target->exprs)
5545  {
5546  Expr *expr = (Expr *) lfirst(lc);
5547  Index sgref = get_pathtarget_sortgroupref(final_target, i);
5548 
5549  /*
5550  * Don't want to deconstruct window clauses or GROUP BY items. (Note
5551  * that such items can't contain window functions, so it's okay to
5552  * compute them below the WindowAgg nodes.)
5553  */
5554  if (sgref != 0 && bms_is_member(sgref, sgrefs))
5555  {
5556  /*
5557  * Don't want to deconstruct this value, so add it to the input
5558  * target as-is.
5559  */
5560  add_column_to_pathtarget(input_target, expr, sgref);
5561  }
5562  else
5563  {
5564  /*
5565  * Column is to be flattened, so just remember the expression for
5566  * later call to pull_var_clause.
5567  */
5568  flattenable_cols = lappend(flattenable_cols, expr);
5569  }
5570 
5571  i++;
5572  }
5573 
5574  /*
5575  * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
5576  * add them to the input target if not already present. (Some might be
5577  * there already because they're used directly as window/group clauses.)
5578  *
5579  * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
5580  * Aggrefs are placed in the Agg node's tlist and not left to be computed
5581  * at higher levels. On the other hand, we should recurse into
5582  * WindowFuncs to make sure their input expressions are available.
5583  */
5584  flattenable_vars = pull_var_clause((Node *) flattenable_cols,
5588  add_new_columns_to_pathtarget(input_target, flattenable_vars);
5589 
5590  /* clean up cruft */
5591  list_free(flattenable_vars);
5592  list_free(flattenable_cols);
5593 
5594  /* XXX this causes some redundant cost calculation ... */
5595  return set_pathtarget_cost_width(root, input_target);
5596 }
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:653
#define NIL
Definition: pg_list.h:69
Query * parse
Definition: relation.h:155
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:5038
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:667
Index tleSortGroupRef
Definition: parsenodes.h:1196
Definition: nodes.h:510
List * pull_var_clause(Node *node, int flags)
Definition: var.c:535
#define PVC_INCLUDE_AGGREGATES
Definition: var.h:20
#define PVC_INCLUDE_PLACEHOLDERS
Definition: var.h:24
List * partitionClause
Definition: parsenodes.h:1289
#define lfirst_node(type, lc)
Definition: pg_list.h:109
#define PVC_RECURSE_WINDOWFUNCS
Definition: var.h:23
#define get_pathtarget_sortgroupref(target, colno)
Definition: relation.h:979
List * lappend(List *list, void *datum)
Definition: list.c:128
List * exprs
Definition: relation.h:972
unsigned int Index
Definition: c.h:413
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
bool hasWindowFuncs
Definition: parsenodes.h:124
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:698
List * orderClause
Definition: parsenodes.h:1290
List * groupClause
Definition: parsenodes.h:146
void list_free(List *list)
Definition: list.c:1133
int i
Definition: pg_list.h:45
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:714

◆ mark_partial_aggref()

void mark_partial_aggref ( Aggref agg,
AggSplit  aggsplit 
)

Definition at line 5325 of file planner.c.

References Aggref::aggsplit, AGGSPLIT_SIMPLE, Aggref::aggtranstype, Aggref::aggtype, Assert, BYTEAOID, DO_AGGSPLIT_SERIALIZE, DO_AGGSPLIT_SKIPFINAL, INTERNALOID, and OidIsValid.

Referenced by convert_combining_aggrefs(), and make_partial_grouping_target().

5326 {
5327  /* aggtranstype should be computed by this point */
5329  /* ... but aggsplit should still be as the parser left it */
5330  Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5331 
5332  /* Mark the Aggref with the intended partial-aggregation mode */
5333  agg->aggsplit = aggsplit;
5334 
5335  /*
5336  * Adjust result type if needed. Normally, a partial aggregate returns
5337  * the aggregate's transition type; but if that's INTERNAL and we're
5338  * serializing, it returns BYTEA instead.
5339  */
5340  if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5341  {
5342  if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5343  agg->aggtype = BYTEAOID;
5344  else
5345  agg->aggtype = agg->aggtranstype;
5346  }
5347 }
#define OidIsValid(objectId)
Definition: c.h:576
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:771
#define INTERNALOID
Definition: pg_type.h:698
#define Assert(condition)
Definition: c.h:670
AggSplit aggsplit
Definition: primnodes.h:310
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:770
#define BYTEAOID
Definition: pg_type.h:292
Oid aggtranstype
Definition: primnodes.h:298
Oid aggtype
Definition: primnodes.h:295

◆ plan_cluster_use_sort()

bool plan_cluster_use_sort ( Oid  tableOid,
Oid  indexOid 
)

Definition at line 6058 of file planner.c.

References build_simple_rel(), CMD_SELECT, Query::commandType, cost_qual_eval(), cost_sort(), create_index_path(), create_seqscan_path(), CurrentMemoryContext, enable_indexscan, ForwardScanDirection, get_relation_data_width(), PlannerInfo::glob, RelOptInfo::indexlist, IndexOptInfo::indexoid, IndexOptInfo::indexprs, RangeTblEntry::inFromCl, RangeTblEntry::inh, RangeTblEntry::lateral, lfirst_node, list_make1, maintenance_work_mem, makeNode, NIL, RelOptInfo::pages, PlannerInfo::parse, IndexPath::path, QualCost::per_tuple, PlannerInfo::planner_cxt, PlannerInfo::query_level, RangeTblEntry::relid, RangeTblEntry::relkind, RELKIND_RELATION, RelOptInfo::reltarget, RelOptInfo::rows, Query::rtable, RTE_RELATION, RangeTblEntry::rtekind, setup_simple_rel_arrays(), QualCost::startup, Path::total_cost, PlannerInfo::total_table_pages, RelOptInfo::tuples, PathTarget::width, and PlannerInfo::wt_param_id.

Referenced by copy_heap_data().

6059 {
6060  PlannerInfo *root;
6061  Query *query;
6062  PlannerGlobal *glob;
6063  RangeTblEntry *rte;
6064  RelOptInfo *rel;
6065  IndexOptInfo *indexInfo;
6066  QualCost indexExprCost;
6067  Cost comparisonCost;
6068  Path *seqScanPath;
6069  Path seqScanAndSortPath;
6070  IndexPath *indexScanPath;
6071  ListCell *lc;
6072 
6073  /* We can short-circuit the cost comparison if indexscans are disabled */
6074  if (!enable_indexscan)
6075  return true; /* use sort */
6076 
6077  /* Set up mostly-dummy planner state */
6078  query = makeNode(Query);
6079  query->commandType = CMD_SELECT;
6080 
6081  glob = makeNode(PlannerGlobal);
6082 
6083  root = makeNode(PlannerInfo);
6084  root->parse = query;
6085  root->glob = glob;
6086  root->query_level = 1;
6088  root->wt_param_id = -1;
6089 
6090  /* Build a minimal RTE for the rel */
6091  rte = makeNode(RangeTblEntry);
6092  rte->rtekind = RTE_RELATION;
6093  rte->relid = tableOid;
6094  rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6095  rte->lateral = false;
6096  rte->inh = false;
6097  rte->inFromCl = true;
6098  query->rtable = list_make1(rte);
6099 
6100  /* Set up RTE/RelOptInfo arrays */
6102 
6103  /* Build RelOptInfo */
6104  rel = build_simple_rel(root, 1, NULL);
6105 
6106  /* Locate IndexOptInfo for the target index */
6107  indexInfo = NULL;
6108  foreach(lc, rel->indexlist)
6109  {
6110  indexInfo = lfirst_node(IndexOptInfo, lc);
6111  if (indexInfo->indexoid == indexOid)
6112  break;
6113  }
6114 
6115  /*
6116  * It's possible that get_relation_info did not generate an IndexOptInfo
6117  * for the desired index; this could happen if it's not yet reached its
6118  * indcheckxmin usability horizon, or if it's a system index and we're
6119  * ignoring system indexes. In such cases we should tell CLUSTER to not
6120  * trust the index contents but use seqscan-and-sort.
6121  */
6122  if (lc == NULL) /* not in the list? */
6123  return true; /* use sort */
6124 
6125  /*
6126  * Rather than doing all the pushups that would be needed to use
6127  * set_baserel_size_estimates, just do a quick hack for rows and width.
6128  */
6129  rel->rows = rel->tuples;
6130  rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6131 
6132  root->total_table_pages = rel->pages;
6133 
6134  /*
6135  * Determine eval cost of the index expressions, if any. We need to
6136  * charge twice that amount for each tuple comparison that happens during
6137  * the sort, since tuplesort.c will have to re-evaluate the index
6138  * expressions each time. (XXX that's pretty inefficient...)
6139  */
6140  cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6141  comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6142 
6143  /* Estimate the cost of seq scan + sort */
6144  seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6145  cost_sort(&seqScanAndSortPath, root, NIL,
6146  seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6147  comparisonCost, maintenance_work_mem, -1.0);
6148 
6149  /* Estimate the cost of index scan */
6150  indexScanPath = create_index_path(root, indexInfo,
6151  NIL, NIL, NIL, NIL, NIL,
6152  ForwardScanDirection, false,
6153  NULL, 1.0, false);
6154 
6155  return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6156 }
#define NIL
Definition: pg_list.h:69
Query * parse
Definition: relation.h:155
Path path
Definition: relation.h:1118
double tuples
Definition: relation.h:625
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexclausecols, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1016
Cost startup
Definition: relation.h:45
#define list_make1(x1)
Definition: pg_list.h:139
Cost per_tuple
Definition: relation.h:46
int wt_param_id
Definition: relation.h:311
List * rtable
Definition: parsenodes.h:135
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:3508
#define lfirst_node(type, lc)
Definition: pg_list.h:109
PlannerGlobal * glob
Definition: relation.h:157
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:96
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
double total_table_pages
Definition: relation.h:292
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1122
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1645
List * indexlist
Definition: relation.h:622
double rows
Definition: relation.h:588
int maintenance_work_mem
Definition: globals.c:114
Cost total_cost
Definition: relation.h:1054
CmdType commandType
Definition: parsenodes.h:110
#define makeNode(_type_)
Definition: nodes.h:558
BlockNumber pages
Definition: relation.h:624
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:67
Index query_level
Definition: relation.h:159
RTEKind rtekind
Definition: parsenodes.h:951
int width
Definition: relation.h:975
MemoryContext planner_cxt
Definition: relation.h:290
Oid indexoid
Definition: relation.h:719
#define RELKIND_RELATION
Definition: pg_class.h:160
struct PathTarget * reltarget
Definition: relation.h:596
bool enable_indexscan
Definition: costsize.c:119
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:946
double Cost
Definition: nodes.h:641
List * indexprs
Definition: relation.h:741

◆ planner()

PlannedStmt* planner ( Query parse,
int  cursorOptions,
ParamListInfo  boundParams 
)

Definition at line 204 of file planner.c.

References parse(), planner_hook, and standard_planner().

Referenced by pg_plan_query().

205 {
206  PlannedStmt *result;
207 
208  if (planner_hook)
209  result = (*planner_hook) (parse, cursorOptions, boundParams);
210  else
211  result = standard_planner(parse, cursorOptions, boundParams);
212  return result;
213 }
PlannedStmt * standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:216
planner_hook_type planner_hook
Definition: planner.c:67
static struct subre * parse(struct vars *, int, int, struct state *, struct state *)
Definition: regcomp.c:649

◆ postprocess_setop_tlist()

static List * postprocess_setop_tlist ( List new_tlist,
List orig_tlist 
)
static

Definition at line 5360 of file planner.c.

References Assert, elog, ERROR, lfirst_node, list_head(), lnext, TargetEntry::resjunk, TargetEntry::resno, and TargetEntry::ressortgroupref.

Referenced by grouping_planner().

5361 {
5362  ListCell *l;
5363  ListCell *orig_tlist_item = list_head(orig_tlist);
5364 
5365  foreach(l, new_tlist)
5366  {
5367  TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5368  TargetEntry *orig_tle;
5369 
5370  /* ignore resjunk columns in setop result */
5371  if (new_tle->resjunk)
5372  continue;
5373 
5374  Assert(orig_tlist_item != NULL);
5375  orig_tle = lfirst_node(TargetEntry, orig_