PostgreSQL Source Code git master
planner.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * planner.c
4 * The query optimizer external interface.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/plan/planner.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/genam.h"
22#include "access/parallel.h"
23#include "access/sysattr.h"
24#include "access/table.h"
26#include "catalog/pg_inherits.h"
27#include "catalog/pg_proc.h"
28#include "catalog/pg_type.h"
29#include "executor/executor.h"
30#include "foreign/fdwapi.h"
31#include "jit/jit.h"
32#include "lib/bipartite_match.h"
33#include "lib/knapsack.h"
34#include "miscadmin.h"
35#include "nodes/makefuncs.h"
36#include "nodes/nodeFuncs.h"
37#ifdef OPTIMIZER_DEBUG
38#include "nodes/print.h"
39#endif
40#include "nodes/supportnodes.h"
42#include "optimizer/clauses.h"
43#include "optimizer/cost.h"
44#include "optimizer/optimizer.h"
46#include "optimizer/pathnode.h"
47#include "optimizer/paths.h"
48#include "optimizer/plancat.h"
49#include "optimizer/planmain.h"
50#include "optimizer/planner.h"
51#include "optimizer/prep.h"
52#include "optimizer/subselect.h"
53#include "optimizer/tlist.h"
54#include "parser/analyze.h"
55#include "parser/parse_agg.h"
56#include "parser/parse_clause.h"
58#include "parser/parsetree.h"
61#include "utils/acl.h"
63#include "utils/lsyscache.h"
64#include "utils/rel.h"
65#include "utils/selfuncs.h"
66
67/* GUC parameters */
72
73/* Hook for plugins to get control in planner() */
75
76/* Hook for plugins to get control after PlannerGlobal is initialized */
78
79/* Hook for plugins to get control before PlannerGlobal is discarded */
81
82/* Hook for plugins to get control when grouping_planner() plans upper rels */
84
85
86/* Expression kind codes for preprocess_expression */
87#define EXPRKIND_QUAL 0
88#define EXPRKIND_TARGET 1
89#define EXPRKIND_RTFUNC 2
90#define EXPRKIND_RTFUNC_LATERAL 3
91#define EXPRKIND_VALUES 4
92#define EXPRKIND_VALUES_LATERAL 5
93#define EXPRKIND_LIMIT 6
94#define EXPRKIND_APPINFO 7
95#define EXPRKIND_PHV 8
96#define EXPRKIND_TABLESAMPLE 9
97#define EXPRKIND_ARBITER_ELEM 10
98#define EXPRKIND_TABLEFUNC 11
99#define EXPRKIND_TABLEFUNC_LATERAL 12
100#define EXPRKIND_GROUPEXPR 13
101
102/*
103 * Data specific to grouping sets
104 */
105typedef struct
106{
116
117/*
118 * Temporary structure for use during WindowClause reordering in order to be
119 * able to sort WindowClauses on partitioning/ordering prefix.
120 */
121typedef struct
122{
124 List *uniqueOrder; /* A List of unique ordering/partitioning
125 * clauses per Window */
127
128/* Passthrough data for standard_qp_callback */
129typedef struct
130{
131 List *activeWindows; /* active windows, if any */
132 grouping_sets_data *gset_data; /* grouping sets data, if any */
133 SetOperationStmt *setop; /* parent set operation or NULL if not a
134 * subquery belonging to a set operation */
136
137/* Local functions */
138static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140static void grouping_planner(PlannerInfo *root, double tuple_fraction,
141 SetOperationStmt *setops);
143static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 int *tleref_to_colnum_map);
146static double preprocess_limit(PlannerInfo *root,
147 double tuple_fraction,
148 int64 *offset_est, int64 *count_est);
150static List *extract_rollup_sets(List *groupingSets);
151static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152static void standard_qp_callback(PlannerInfo *root, void *extra);
154 double path_rows,
156 List *target_list);
158 RelOptInfo *input_rel,
159 PathTarget *target,
160 bool target_parallel_safe,
164 RelOptInfo *input_rel,
165 RelOptInfo *grouped_rel);
167 PathTarget *target, bool target_parallel_safe,
168 Node *havingQual);
170 RelOptInfo *input_rel,
171 RelOptInfo *grouped_rel,
172 const AggClauseCosts *agg_costs,
174 GroupPathExtraData *extra,
175 RelOptInfo **partially_grouped_rel_p);
177 RelOptInfo *grouped_rel,
178 Path *path,
179 bool is_sorted,
180 bool can_hash,
182 const AggClauseCosts *agg_costs,
183 double dNumGroups);
185 RelOptInfo *input_rel,
186 PathTarget *input_target,
187 PathTarget *output_target,
188 bool output_target_parallel_safe,
189 WindowFuncLists *wflists,
190 List *activeWindows);
192 RelOptInfo *window_rel,
193 Path *path,
194 PathTarget *input_target,
195 PathTarget *output_target,
196 WindowFuncLists *wflists,
197 List *activeWindows);
199 RelOptInfo *input_rel,
200 PathTarget *target);
202 RelOptInfo *input_rel,
203 RelOptInfo *final_distinct_rel,
204 PathTarget *target);
206 RelOptInfo *input_rel,
207 RelOptInfo *distinct_rel);
209 List *needed_pathkeys,
210 List *path_pathkeys);
212 RelOptInfo *input_rel,
213 PathTarget *target,
214 bool target_parallel_safe,
215 double limit_tuples);
217 PathTarget *final_target);
219 PathTarget *grouping_target,
220 Node *havingQual);
221static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
223 WindowFuncLists *wflists);
225static void name_active_windows(List *activeWindows);
227 PathTarget *final_target,
228 List *activeWindows);
230 List *tlist);
232 PathTarget *final_target,
233 bool *have_postponed_srfs);
235 List *targets, List *targets_contain_srfs);
237 RelOptInfo *grouped_rel,
238 RelOptInfo *partially_grouped_rel,
239 const AggClauseCosts *agg_costs,
241 GroupPathExtraData *extra);
243 RelOptInfo *grouped_rel,
244 RelOptInfo *input_rel,
246 GroupPathExtraData *extra,
247 bool force_rel_creation);
249 RelOptInfo *rel,
250 Path *path,
251 Path *cheapest_path,
252 List *pathkeys,
253 double limit_tuples);
255static bool can_partial_agg(PlannerInfo *root);
257 RelOptInfo *rel,
258 List *scanjoin_targets,
259 List *scanjoin_targets_contain_srfs,
260 bool scanjoin_target_parallel_safe,
261 bool tlist_same_exprs);
263 RelOptInfo *input_rel,
264 RelOptInfo *grouped_rel,
265 RelOptInfo *partially_grouped_rel,
266 const AggClauseCosts *agg_costs,
269 GroupPathExtraData *extra);
270static bool group_by_has_partkey(RelOptInfo *input_rel,
271 List *targetList,
272 List *groupClause);
273static int common_prefix_cmp(const void *a, const void *b);
275 List *targetlist);
277 List *sortPathkeys, List *groupClause,
278 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
280 List *sortPathkeys, List *groupClause,
281 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282
283
284/*****************************************************************************
285 *
286 * Query optimizer entry point
287 *
288 * Inputs:
289 * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 * query_string: source text for the query tree (used for error reports)
291 * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 * boundParams: passed-in parameter values, or NULL if none
293 * es: ExplainState if being called from EXPLAIN, else NULL
294 *
295 * The result is a PlannedStmt tree.
296 *
297 * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 * Parameter values not so marked are still relied on for estimation purposes.
300 *
301 * The ExplainState pointer is not currently used by the core planner, but it
302 * is passed through to some planner hooks so that they can report information
303 * back to EXPLAIN extension hooks.
304 *
305 * To support loadable plugins that monitor or modify planner behavior,
306 * we provide a hook variable that lets a plugin get control before and
307 * after the standard planning process. The plugin would normally call
308 * standard_planner().
309 *
310 * Note to plugin authors: standard_planner() scribbles on its Query input,
311 * so you'd better copy that data structure if you want to plan more than once.
312 *
313 *****************************************************************************/
315planner(Query *parse, const char *query_string, int cursorOptions,
316 ParamListInfo boundParams, ExplainState *es)
317{
318 PlannedStmt *result;
319
320 if (planner_hook)
321 result = (*planner_hook) (parse, query_string, cursorOptions,
322 boundParams, es);
323 else
324 result = standard_planner(parse, query_string, cursorOptions,
325 boundParams, es);
326
327 pgstat_report_plan_id(result->planId, false);
328
329 return result;
330}
331
333standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 ParamListInfo boundParams, ExplainState *es)
335{
336 PlannedStmt *result;
337 PlannerGlobal *glob;
338 double tuple_fraction;
340 RelOptInfo *final_rel;
341 Path *best_path;
342 Plan *top_plan;
343 ListCell *lp,
344 *lr;
345
346 /*
347 * Set up global state for this planner invocation. This data is needed
348 * across all levels of sub-Query that might exist in the given command,
349 * so we keep it in a separate struct that's linked to by each per-Query
350 * PlannerInfo.
351 */
352 glob = makeNode(PlannerGlobal);
353
354 glob->boundParams = boundParams;
355 glob->subplans = NIL;
356 glob->subpaths = NIL;
357 glob->subroots = NIL;
358 glob->rewindPlanIDs = NULL;
359 glob->finalrtable = NIL;
360 glob->allRelids = NULL;
361 glob->prunableRelids = NULL;
362 glob->finalrteperminfos = NIL;
363 glob->finalrowmarks = NIL;
364 glob->resultRelations = NIL;
365 glob->appendRelations = NIL;
366 glob->partPruneInfos = NIL;
367 glob->relationOids = NIL;
368 glob->invalItems = NIL;
369 glob->paramExecTypes = NIL;
370 glob->lastPHId = 0;
371 glob->lastRowMarkId = 0;
372 glob->lastPlanNodeId = 0;
373 glob->transientPlan = false;
374 glob->dependsOnRole = false;
375 glob->partition_directory = NULL;
376 glob->rel_notnullatts_hash = NULL;
377
378 /*
379 * Assess whether it's feasible to use parallel mode for this query. We
380 * can't do this in a standalone backend, or if the command will try to
381 * modify any data, or if this is a cursor operation, or if GUCs are set
382 * to values that don't permit parallelism, or if parallel-unsafe
383 * functions are present in the query tree.
384 *
385 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 * the command is writing into a completely new table which workers won't
388 * be able to see. If the workers could see the table, the fact that
389 * group locking would cause them to ignore the leader's heavyweight GIN
390 * page locks would make this unsafe. We'll have to fix that somehow if
391 * we want to allow parallel inserts in general; updates and deletes have
392 * additional problems especially around combo CIDs.)
393 *
394 * For now, we don't try to use parallel mode if we're running inside a
395 * parallel worker. We might eventually be able to relax this
396 * restriction, but for now it seems best not to have parallel workers
397 * trying to create their own parallel workers.
398 */
399 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
401 parse->commandType == CMD_SELECT &&
402 !parse->hasModifyingCTE &&
405 {
406 /* all the cheap tests pass, so scan the query tree */
408 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
409 }
410 else
411 {
412 /* skip the query tree scan, just assume it's unsafe */
413 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
414 glob->parallelModeOK = false;
415 }
416
417 /*
418 * glob->parallelModeNeeded is normally set to false here and changed to
419 * true during plan creation if a Gather or Gather Merge plan is actually
420 * created (cf. create_gather_plan, create_gather_merge_plan).
421 *
422 * However, if debug_parallel_query = on or debug_parallel_query =
423 * regress, then we impose parallel mode whenever it's safe to do so, even
424 * if the final plan doesn't use parallelism. It's not safe to do so if
425 * the query contains anything parallel-unsafe; parallelModeOK will be
426 * false in that case. Note that parallelModeOK can't change after this
427 * point. Otherwise, everything in the query is either parallel-safe or
428 * parallel-restricted, and in either case it should be OK to impose
429 * parallel-mode restrictions. If that ends up breaking something, then
430 * either some function the user included in the query is incorrectly
431 * labeled as parallel-safe or parallel-restricted when in reality it's
432 * parallel-unsafe, or else the query planner itself has a bug.
433 */
434 glob->parallelModeNeeded = glob->parallelModeOK &&
436
437 /* Determine what fraction of the plan is likely to be scanned */
438 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 {
440 /*
441 * We have no real idea how many tuples the user will ultimately FETCH
442 * from a cursor, but it is often the case that he doesn't want 'em
443 * all, or would prefer a fast-start plan anyway so that he can
444 * process some of the tuples sooner. Use a GUC parameter to decide
445 * what fraction to optimize for.
446 */
447 tuple_fraction = cursor_tuple_fraction;
448
449 /*
450 * We document cursor_tuple_fraction as simply being a fraction, which
451 * means the edge cases 0 and 1 have to be treated specially here. We
452 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 */
454 if (tuple_fraction >= 1.0)
455 tuple_fraction = 0.0;
456 else if (tuple_fraction <= 0.0)
457 tuple_fraction = 1e-10;
458 }
459 else
460 {
461 /* Default assumption is we need all the tuples */
462 tuple_fraction = 0.0;
463 }
464
465 /* Allow plugins to take control after we've initialized "glob" */
467 (*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
468
469 /* primary planning entry point (may recurse for subqueries) */
470 root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
471 NULL);
472
473 /* Select best Path and turn it into a Plan */
474 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
475 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
476
477 top_plan = create_plan(root, best_path);
478
479 /*
480 * If creating a plan for a scrollable cursor, make sure it can run
481 * backwards on demand. Add a Material node at the top at need.
482 */
483 if (cursorOptions & CURSOR_OPT_SCROLL)
484 {
485 if (!ExecSupportsBackwardScan(top_plan))
486 top_plan = materialize_finished_plan(top_plan);
487 }
488
489 /*
490 * Optionally add a Gather node for testing purposes, provided this is
491 * actually a safe thing to do.
492 *
493 * We can add Gather even when top_plan has parallel-safe initPlans, but
494 * then we have to move the initPlans to the Gather node because of
495 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
496 * regression tests when debug_parallel_query = regress, because initPlans
497 * that would normally appear on the top_plan move to the Gather, causing
498 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
499 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
500 */
502 top_plan->parallel_safe &&
503 (top_plan->initPlan == NIL ||
505 {
506 Gather *gather = makeNode(Gather);
507 Cost initplan_cost;
508 bool unsafe_initplans;
509
510 gather->plan.targetlist = top_plan->targetlist;
511 gather->plan.qual = NIL;
512 gather->plan.lefttree = top_plan;
513 gather->plan.righttree = NULL;
514 gather->num_workers = 1;
515 gather->single_copy = true;
517
518 /* Transfer any initPlans to the new top node */
519 gather->plan.initPlan = top_plan->initPlan;
520 top_plan->initPlan = NIL;
521
522 /*
523 * Since this Gather has no parallel-aware descendants to signal to,
524 * we don't need a rescan Param.
525 */
526 gather->rescan_param = -1;
527
528 /*
529 * Ideally we'd use cost_gather here, but setting up dummy path data
530 * to satisfy it doesn't seem much cleaner than knowing what it does.
531 */
532 gather->plan.startup_cost = top_plan->startup_cost +
534 gather->plan.total_cost = top_plan->total_cost +
536 gather->plan.plan_rows = top_plan->plan_rows;
537 gather->plan.plan_width = top_plan->plan_width;
538 gather->plan.parallel_aware = false;
539 gather->plan.parallel_safe = false;
540
541 /*
542 * Delete the initplans' cost from top_plan. We needn't add it to the
543 * Gather node, since the above coding already included it there.
544 */
546 &initplan_cost, &unsafe_initplans);
547 top_plan->startup_cost -= initplan_cost;
548 top_plan->total_cost -= initplan_cost;
549
550 /* use parallel mode for parallel plans. */
551 root->glob->parallelModeNeeded = true;
552
553 top_plan = &gather->plan;
554 }
555
556 /*
557 * If any Params were generated, run through the plan tree and compute
558 * each plan node's extParam/allParam sets. Ideally we'd merge this into
559 * set_plan_references' tree traversal, but for now it has to be separate
560 * because we need to visit subplans before not after main plan.
561 */
562 if (glob->paramExecTypes != NIL)
563 {
564 Assert(list_length(glob->subplans) == list_length(glob->subroots));
565 forboth(lp, glob->subplans, lr, glob->subroots)
566 {
567 Plan *subplan = (Plan *) lfirst(lp);
568 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
569
570 SS_finalize_plan(subroot, subplan);
571 }
572 SS_finalize_plan(root, top_plan);
573 }
574
575 /* final cleanup of the plan */
576 Assert(glob->finalrtable == NIL);
577 Assert(glob->finalrteperminfos == NIL);
578 Assert(glob->finalrowmarks == NIL);
579 Assert(glob->resultRelations == NIL);
580 Assert(glob->appendRelations == NIL);
581 top_plan = set_plan_references(root, top_plan);
582 /* ... and the subplans (both regular subplans and initplans) */
583 Assert(list_length(glob->subplans) == list_length(glob->subroots));
584 forboth(lp, glob->subplans, lr, glob->subroots)
585 {
586 Plan *subplan = (Plan *) lfirst(lp);
587 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
588
589 lfirst(lp) = set_plan_references(subroot, subplan);
590 }
591
592 /* build the PlannedStmt result */
593 result = makeNode(PlannedStmt);
594
595 result->commandType = parse->commandType;
596 result->queryId = parse->queryId;
598 result->hasReturning = (parse->returningList != NIL);
599 result->hasModifyingCTE = parse->hasModifyingCTE;
600 result->canSetTag = parse->canSetTag;
601 result->transientPlan = glob->transientPlan;
602 result->dependsOnRole = glob->dependsOnRole;
604 result->planTree = top_plan;
605 result->partPruneInfos = glob->partPruneInfos;
606 result->rtable = glob->finalrtable;
608 glob->prunableRelids);
609 result->permInfos = glob->finalrteperminfos;
610 result->resultRelations = glob->resultRelations;
611 result->appendRelations = glob->appendRelations;
612 result->subplans = glob->subplans;
613 result->rewindPlanIDs = glob->rewindPlanIDs;
614 result->rowMarks = glob->finalrowmarks;
615 result->relationOids = glob->relationOids;
616 result->invalItems = glob->invalItems;
617 result->paramExecTypes = glob->paramExecTypes;
618 /* utilityStmt should be null, but we might as well copy it */
619 result->utilityStmt = parse->utilityStmt;
620 result->stmt_location = parse->stmt_location;
621 result->stmt_len = parse->stmt_len;
622
623 result->jitFlags = PGJIT_NONE;
624 if (jit_enabled && jit_above_cost >= 0 &&
625 top_plan->total_cost > jit_above_cost)
626 {
627 result->jitFlags |= PGJIT_PERFORM;
628
629 /*
630 * Decide how much effort should be put into generating better code.
631 */
632 if (jit_optimize_above_cost >= 0 &&
634 result->jitFlags |= PGJIT_OPT3;
635 if (jit_inline_above_cost >= 0 &&
637 result->jitFlags |= PGJIT_INLINE;
638
639 /*
640 * Decide which operations should be JITed.
641 */
642 if (jit_expressions)
643 result->jitFlags |= PGJIT_EXPR;
645 result->jitFlags |= PGJIT_DEFORM;
646 }
647
648 /* Allow plugins to take control before we discard "glob" */
650 (*planner_shutdown_hook) (glob, parse, query_string, result);
651
652 if (glob->partition_directory != NULL)
653 DestroyPartitionDirectory(glob->partition_directory);
654
655 return result;
656}
657
658
659/*--------------------
660 * subquery_planner
661 * Invokes the planner on a subquery. We recurse to here for each
662 * sub-SELECT found in the query tree.
663 *
664 * glob is the global state for the current planner run.
665 * parse is the querytree produced by the parser & rewriter.
666 * plan_name is the name to assign to this subplan (NULL at the top level).
667 * parent_root is the immediate parent Query's info (NULL at the top level).
668 * hasRecursion is true if this is a recursive WITH query.
669 * tuple_fraction is the fraction of tuples we expect will be retrieved.
670 * tuple_fraction is interpreted as explained for grouping_planner, below.
671 * setops is used for set operation subqueries to provide the subquery with
672 * the context in which it's being used so that Paths correctly sorted for the
673 * set operation can be generated. NULL when not planning a set operation
674 * child, or when a child of a set op that isn't interested in sorted input.
675 *
676 * Basically, this routine does the stuff that should only be done once
677 * per Query object. It then calls grouping_planner. At one time,
678 * grouping_planner could be invoked recursively on the same Query object;
679 * that's not currently true, but we keep the separation between the two
680 * routines anyway, in case we need it again someday.
681 *
682 * subquery_planner will be called recursively to handle sub-Query nodes
683 * found within the query's expressions and rangetable.
684 *
685 * Returns the PlannerInfo struct ("root") that contains all data generated
686 * while planning the subquery. In particular, the Path(s) attached to
687 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
688 * cheapest way(s) to implement the query. The top level will select the
689 * best Path and pass it through createplan.c to produce a finished Plan.
690 *--------------------
691 */
693subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
694 PlannerInfo *parent_root, bool hasRecursion,
695 double tuple_fraction, SetOperationStmt *setops)
696{
698 List *newWithCheckOptions;
699 List *newHaving;
700 bool hasOuterJoins;
701 bool hasResultRTEs;
702 RelOptInfo *final_rel;
703 ListCell *l;
704
705 /* Create a PlannerInfo data structure for this subquery */
707 root->parse = parse;
708 root->glob = glob;
709 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
710 root->plan_name = plan_name;
711 root->parent_root = parent_root;
712 root->plan_params = NIL;
713 root->outer_params = NULL;
714 root->planner_cxt = CurrentMemoryContext;
715 root->init_plans = NIL;
716 root->cte_plan_ids = NIL;
717 root->multiexpr_params = NIL;
718 root->join_domains = NIL;
719 root->eq_classes = NIL;
720 root->ec_merging_done = false;
721 root->last_rinfo_serial = 0;
722 root->all_result_relids =
723 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
724 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
725 root->append_rel_list = NIL;
726 root->row_identity_vars = NIL;
727 root->rowMarks = NIL;
728 memset(root->upper_rels, 0, sizeof(root->upper_rels));
729 memset(root->upper_targets, 0, sizeof(root->upper_targets));
730 root->processed_groupClause = NIL;
731 root->processed_distinctClause = NIL;
732 root->processed_tlist = NIL;
733 root->update_colnos = NIL;
734 root->grouping_map = NULL;
735 root->minmax_aggs = NIL;
736 root->qual_security_level = 0;
737 root->hasPseudoConstantQuals = false;
738 root->hasAlternativeSubPlans = false;
739 root->placeholdersFrozen = false;
740 root->hasRecursion = hasRecursion;
741 root->assumeReplanning = false;
742 if (hasRecursion)
743 root->wt_param_id = assign_special_exec_param(root);
744 else
745 root->wt_param_id = -1;
746 root->non_recursive_path = NULL;
747
748 /*
749 * Create the top-level join domain. This won't have valid contents until
750 * deconstruct_jointree fills it in, but the node needs to exist before
751 * that so we can build EquivalenceClasses referencing it.
752 */
753 root->join_domains = list_make1(makeNode(JoinDomain));
754
755 /*
756 * If there is a WITH list, process each WITH query and either convert it
757 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
758 */
759 if (parse->cteList)
761
762 /*
763 * If it's a MERGE command, transform the joinlist as appropriate.
764 */
766
767 /*
768 * Scan the rangetable for relation RTEs and retrieve the necessary
769 * catalog information for each relation. Using this information, clear
770 * the inh flag for any relation that has no children, collect not-null
771 * attribute numbers for any relation that has column not-null
772 * constraints, and expand virtual generated columns for any relation that
773 * contains them. Note that this step does not descend into sublinks and
774 * subqueries; if we pull up any sublinks or subqueries below, their
775 * relation RTEs are processed just before pulling them up.
776 */
778
779 /*
780 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
781 * that we don't need so many special cases to deal with that situation.
782 */
784
785 /*
786 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
787 * to transform them into joins. Note that this step does not descend
788 * into subqueries; if we pull up any subqueries below, their SubLinks are
789 * processed just before pulling them up.
790 */
791 if (parse->hasSubLinks)
793
794 /*
795 * Scan the rangetable for function RTEs, do const-simplification on them,
796 * and then inline them if possible (producing subqueries that might get
797 * pulled up next). Recursion issues here are handled in the same way as
798 * for SubLinks.
799 */
801
802 /*
803 * Check to see if any subqueries in the jointree can be merged into this
804 * query.
805 */
807
808 /*
809 * If this is a simple UNION ALL query, flatten it into an appendrel. We
810 * do this now because it requires applying pull_up_subqueries to the leaf
811 * queries of the UNION ALL, which weren't touched above because they
812 * weren't referenced by the jointree (they will be after we do this).
813 */
814 if (parse->setOperations)
816
817 /*
818 * Survey the rangetable to see what kinds of entries are present. We can
819 * skip some later processing if relevant SQL features are not used; for
820 * example if there are no JOIN RTEs we can avoid the expense of doing
821 * flatten_join_alias_vars(). This must be done after we have finished
822 * adding rangetable entries, of course. (Note: actually, processing of
823 * inherited or partitioned rels can cause RTEs for their child tables to
824 * get added later; but those must all be RTE_RELATION entries, so they
825 * don't invalidate the conclusions drawn here.)
826 */
827 root->hasJoinRTEs = false;
828 root->hasLateralRTEs = false;
829 root->group_rtindex = 0;
830 hasOuterJoins = false;
831 hasResultRTEs = false;
832 foreach(l, parse->rtable)
833 {
835
836 switch (rte->rtekind)
837 {
838 case RTE_JOIN:
839 root->hasJoinRTEs = true;
840 if (IS_OUTER_JOIN(rte->jointype))
841 hasOuterJoins = true;
842 break;
843 case RTE_RESULT:
844 hasResultRTEs = true;
845 break;
846 case RTE_GROUP:
847 Assert(parse->hasGroupRTE);
848 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
849 break;
850 default:
851 /* No work here for other RTE types */
852 break;
853 }
854
855 if (rte->lateral)
856 root->hasLateralRTEs = true;
857
858 /*
859 * We can also determine the maximum security level required for any
860 * securityQuals now. Addition of inheritance-child RTEs won't affect
861 * this, because child tables don't have their own securityQuals; see
862 * expand_single_inheritance_child().
863 */
864 if (rte->securityQuals)
865 root->qual_security_level = Max(root->qual_security_level,
866 list_length(rte->securityQuals));
867 }
868
869 /*
870 * If we have now verified that the query target relation is
871 * non-inheriting, mark it as a leaf target.
872 */
873 if (parse->resultRelation)
874 {
875 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
876
877 if (!rte->inh)
878 root->leaf_result_relids =
879 bms_make_singleton(parse->resultRelation);
880 }
881
882 /*
883 * This would be a convenient time to check access permissions for all
884 * relations mentioned in the query, since it would be better to fail now,
885 * before doing any detailed planning. However, for historical reasons,
886 * we leave this to be done at executor startup.
887 *
888 * Note, however, that we do need to check access permissions for any view
889 * relations mentioned in the query, in order to prevent information being
890 * leaked by selectivity estimation functions, which only check view owner
891 * permissions on underlying tables (see all_rows_selectable() and its
892 * callers). This is a little ugly, because it means that access
893 * permissions for views will be checked twice, which is another reason
894 * why it would be better to do all the ACL checks here.
895 */
896 foreach(l, parse->rtable)
897 {
899
900 if (rte->perminfoindex != 0 &&
901 rte->relkind == RELKIND_VIEW)
902 {
903 RTEPermissionInfo *perminfo;
904 bool result;
905
906 perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
907 result = ExecCheckOneRelPerms(perminfo);
908 if (!result)
910 get_rel_name(perminfo->relid));
911 }
912 }
913
914 /*
915 * Preprocess RowMark information. We need to do this after subquery
916 * pullup, so that all base relations are present.
917 */
919
920 /*
921 * Set hasHavingQual to remember if HAVING clause is present. Needed
922 * because preprocess_expression will reduce a constant-true condition to
923 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
924 */
925 root->hasHavingQual = (parse->havingQual != NULL);
926
927 /*
928 * Do expression preprocessing on targetlist and quals, as well as other
929 * random expressions in the querytree. Note that we do not need to
930 * handle sort/group expressions explicitly, because they are actually
931 * part of the targetlist.
932 */
933 parse->targetList = (List *)
934 preprocess_expression(root, (Node *) parse->targetList,
936
937 newWithCheckOptions = NIL;
938 foreach(l, parse->withCheckOptions)
939 {
941
942 wco->qual = preprocess_expression(root, wco->qual,
944 if (wco->qual != NULL)
945 newWithCheckOptions = lappend(newWithCheckOptions, wco);
946 }
947 parse->withCheckOptions = newWithCheckOptions;
948
949 parse->returningList = (List *)
950 preprocess_expression(root, (Node *) parse->returningList,
952
954
955 parse->havingQual = preprocess_expression(root, parse->havingQual,
957
958 foreach(l, parse->windowClause)
959 {
961
962 /* partitionClause/orderClause are sort/group expressions */
967 }
968
969 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
971 parse->limitCount = preprocess_expression(root, parse->limitCount,
973
974 if (parse->onConflict)
975 {
976 parse->onConflict->arbiterElems = (List *)
978 (Node *) parse->onConflict->arbiterElems,
980 parse->onConflict->arbiterWhere =
982 parse->onConflict->arbiterWhere,
984 parse->onConflict->onConflictSet = (List *)
986 (Node *) parse->onConflict->onConflictSet,
988 parse->onConflict->onConflictWhere =
990 parse->onConflict->onConflictWhere,
992 /* exclRelTlist contains only Vars, so no preprocessing needed */
993 }
994
995 foreach(l, parse->mergeActionList)
996 {
998
999 action->targetList = (List *)
1001 (Node *) action->targetList,
1003 action->qual =
1005 (Node *) action->qual,
1007 }
1008
1009 parse->mergeJoinCondition =
1010 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1011
1012 root->append_rel_list = (List *)
1013 preprocess_expression(root, (Node *) root->append_rel_list,
1015
1016 /* Also need to preprocess expressions within RTEs */
1017 foreach(l, parse->rtable)
1018 {
1020 int kind;
1021 ListCell *lcsq;
1022
1023 if (rte->rtekind == RTE_RELATION)
1024 {
1025 if (rte->tablesample)
1028 (Node *) rte->tablesample,
1030 }
1031 else if (rte->rtekind == RTE_SUBQUERY)
1032 {
1033 /*
1034 * We don't want to do all preprocessing yet on the subquery's
1035 * expressions, since that will happen when we plan it. But if it
1036 * contains any join aliases of our level, those have to get
1037 * expanded now, because planning of the subquery won't do it.
1038 * That's only possible if the subquery is LATERAL.
1039 */
1040 if (rte->lateral && root->hasJoinRTEs)
1041 rte->subquery = (Query *)
1043 (Node *) rte->subquery);
1044 }
1045 else if (rte->rtekind == RTE_FUNCTION)
1046 {
1047 /* Preprocess the function expression(s) fully */
1048 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1049 rte->functions = (List *)
1050 preprocess_expression(root, (Node *) rte->functions, kind);
1051 }
1052 else if (rte->rtekind == RTE_TABLEFUNC)
1053 {
1054 /* Preprocess the function expression(s) fully */
1055 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1056 rte->tablefunc = (TableFunc *)
1057 preprocess_expression(root, (Node *) rte->tablefunc, kind);
1058 }
1059 else if (rte->rtekind == RTE_VALUES)
1060 {
1061 /* Preprocess the values lists fully */
1062 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1063 rte->values_lists = (List *)
1065 }
1066 else if (rte->rtekind == RTE_GROUP)
1067 {
1068 /* Preprocess the groupexprs list fully */
1069 rte->groupexprs = (List *)
1070 preprocess_expression(root, (Node *) rte->groupexprs,
1072 }
1073
1074 /*
1075 * Process each element of the securityQuals list as if it were a
1076 * separate qual expression (as indeed it is). We need to do it this
1077 * way to get proper canonicalization of AND/OR structure. Note that
1078 * this converts each element into an implicit-AND sublist.
1079 */
1080 foreach(lcsq, rte->securityQuals)
1081 {
1083 (Node *) lfirst(lcsq),
1085 }
1086 }
1087
1088 /*
1089 * Now that we are done preprocessing expressions, and in particular done
1090 * flattening join alias variables, get rid of the joinaliasvars lists.
1091 * They no longer match what expressions in the rest of the tree look
1092 * like, because we have not preprocessed expressions in those lists (and
1093 * do not want to; for example, expanding a SubLink there would result in
1094 * a useless unreferenced subplan). Leaving them in place simply creates
1095 * a hazard for later scans of the tree. We could try to prevent that by
1096 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1097 * but that doesn't sound very reliable.
1098 */
1099 if (root->hasJoinRTEs)
1100 {
1101 foreach(l, parse->rtable)
1102 {
1104
1105 rte->joinaliasvars = NIL;
1106 }
1107 }
1108
1109 /*
1110 * Replace any Vars in the subquery's targetlist and havingQual that
1111 * reference GROUP outputs with the underlying grouping expressions.
1112 *
1113 * Note that we need to perform this replacement after we've preprocessed
1114 * the grouping expressions. This is to ensure that there is only one
1115 * instance of SubPlan for each SubLink contained within the grouping
1116 * expressions.
1117 */
1118 if (parse->hasGroupRTE)
1119 {
1120 parse->targetList = (List *)
1121 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1122 parse->havingQual =
1123 flatten_group_exprs(root, root->parse, parse->havingQual);
1124 }
1125
1126 /* Constant-folding might have removed all set-returning functions */
1127 if (parse->hasTargetSRFs)
1128 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1129
1130 /*
1131 * If we have grouping sets, expand the groupingSets tree of this query to
1132 * a flat list of grouping sets. We need to do this before optimizing
1133 * HAVING, since we can't easily tell if there's an empty grouping set
1134 * until we have this representation.
1135 */
1136 if (parse->groupingSets)
1137 {
1138 parse->groupingSets =
1139 expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1140 }
1141
1142 /*
1143 * In some cases we may want to transfer a HAVING clause into WHERE. We
1144 * cannot do so if the HAVING clause contains aggregates (obviously) or
1145 * volatile functions (since a HAVING clause is supposed to be executed
1146 * only once per group). We also can't do this if there are any grouping
1147 * sets and the clause references any columns that are nullable by the
1148 * grouping sets; the nulled values of those columns are not available
1149 * before the grouping step. (The test on groupClause might seem wrong,
1150 * but it's okay: it's just an optimization to avoid running pull_varnos
1151 * when there cannot be any Vars in the HAVING clause.)
1152 *
1153 * Also, it may be that the clause is so expensive to execute that we're
1154 * better off doing it only once per group, despite the loss of
1155 * selectivity. This is hard to estimate short of doing the entire
1156 * planning process twice, so we use a heuristic: clauses containing
1157 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1158 * clause into WHERE, in hopes of eliminating tuples before aggregation
1159 * instead of after.
1160 *
1161 * If the query has no empty grouping set then we can simply move such a
1162 * clause into WHERE; any group that fails the clause will not be in the
1163 * output because none of its tuples will reach the grouping or
1164 * aggregation stage. Otherwise we have to keep the clause in HAVING to
1165 * ensure that we don't emit a bogus aggregated row. But then the HAVING
1166 * clause must be degenerate (variable-free), so we can copy it into WHERE
1167 * so that query_planner() can use it in a gating Result node. (This could
1168 * be done better, but it seems not worth optimizing.)
1169 *
1170 * Note that a HAVING clause may contain expressions that are not fully
1171 * preprocessed. This can happen if these expressions are part of
1172 * grouping items. In such cases, they are replaced with GROUP Vars in
1173 * the parser and then replaced back after we're done with expression
1174 * preprocessing on havingQual. This is not an issue if the clause
1175 * remains in HAVING, because these expressions will be matched to lower
1176 * target items in setrefs.c. However, if the clause is moved or copied
1177 * into WHERE, we need to ensure that these expressions are fully
1178 * preprocessed.
1179 *
1180 * Note that both havingQual and parse->jointree->quals are in
1181 * implicitly-ANDed-list form at this point, even though they are declared
1182 * as Node *.
1183 */
1184 newHaving = NIL;
1185 foreach(l, (List *) parse->havingQual)
1186 {
1187 Node *havingclause = (Node *) lfirst(l);
1188
1189 if (contain_agg_clause(havingclause) ||
1190 contain_volatile_functions(havingclause) ||
1191 contain_subplans(havingclause) ||
1192 (parse->groupClause && parse->groupingSets &&
1193 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1194 {
1195 /* keep it in HAVING */
1196 newHaving = lappend(newHaving, havingclause);
1197 }
1198 else if (parse->groupClause &&
1199 (parse->groupingSets == NIL ||
1200 (List *) linitial(parse->groupingSets) != NIL))
1201 {
1202 /* There is GROUP BY, but no empty grouping set */
1203 Node *whereclause;
1204
1205 /* Preprocess the HAVING clause fully */
1206 whereclause = preprocess_expression(root, havingclause,
1208 /* ... and move it to WHERE */
1209 parse->jointree->quals = (Node *)
1210 list_concat((List *) parse->jointree->quals,
1211 (List *) whereclause);
1212 }
1213 else
1214 {
1215 /* There is an empty grouping set (perhaps implicitly) */
1216 Node *whereclause;
1217
1218 /* Preprocess the HAVING clause fully */
1219 whereclause = preprocess_expression(root, copyObject(havingclause),
1221 /* ... and put a copy in WHERE */
1222 parse->jointree->quals = (Node *)
1223 list_concat((List *) parse->jointree->quals,
1224 (List *) whereclause);
1225 /* ... and also keep it in HAVING */
1226 newHaving = lappend(newHaving, havingclause);
1227 }
1228 }
1229 parse->havingQual = (Node *) newHaving;
1230
1231 /*
1232 * If we have any outer joins, try to reduce them to plain inner joins.
1233 * This step is most easily done after we've done expression
1234 * preprocessing.
1235 */
1236 if (hasOuterJoins)
1238
1239 /*
1240 * If we have any RTE_RESULT relations, see if they can be deleted from
1241 * the jointree. We also rely on this processing to flatten single-child
1242 * FromExprs underneath outer joins. This step is most effectively done
1243 * after we've done expression preprocessing and outer join reduction.
1244 */
1245 if (hasResultRTEs || hasOuterJoins)
1247
1248 /*
1249 * Do the main planning.
1250 */
1251 grouping_planner(root, tuple_fraction, setops);
1252
1253 /*
1254 * Capture the set of outer-level param IDs we have access to, for use in
1255 * extParam/allParam calculations later.
1256 */
1258
1259 /*
1260 * If any initPlans were created in this query level, adjust the surviving
1261 * Paths' costs and parallel-safety flags to account for them. The
1262 * initPlans won't actually get attached to the plan tree till
1263 * create_plan() runs, but we must include their effects now.
1264 */
1265 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1266 SS_charge_for_initplans(root, final_rel);
1267
1268 /*
1269 * Make sure we've identified the cheapest Path for the final rel. (By
1270 * doing this here not in grouping_planner, we include initPlan costs in
1271 * the decision, though it's unlikely that will change anything.)
1272 */
1273 set_cheapest(final_rel);
1274
1275 return root;
1276}
1277
1278/*
1279 * preprocess_expression
1280 * Do subquery_planner's preprocessing work for an expression,
1281 * which can be a targetlist, a WHERE clause (including JOIN/ON
1282 * conditions), a HAVING clause, or a few other things.
1283 */
1284static Node *
1286{
1287 /*
1288 * Fall out quickly if expression is empty. This occurs often enough to
1289 * be worth checking. Note that null->null is the correct conversion for
1290 * implicit-AND result format, too.
1291 */
1292 if (expr == NULL)
1293 return NULL;
1294
1295 /*
1296 * If the query has any join RTEs, replace join alias variables with
1297 * base-relation variables. We must do this first, since any expressions
1298 * we may extract from the joinaliasvars lists have not been preprocessed.
1299 * For example, if we did this after sublink processing, sublinks expanded
1300 * out from join aliases would not get processed. But we can skip this in
1301 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1302 * they can't contain any Vars of the current query level.
1303 */
1304 if (root->hasJoinRTEs &&
1305 !(kind == EXPRKIND_RTFUNC ||
1306 kind == EXPRKIND_VALUES ||
1307 kind == EXPRKIND_TABLESAMPLE ||
1308 kind == EXPRKIND_TABLEFUNC))
1309 expr = flatten_join_alias_vars(root, root->parse, expr);
1310
1311 /*
1312 * Simplify constant expressions. For function RTEs, this was already
1313 * done by preprocess_function_rtes. (But note we must do it again for
1314 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1315 * un-simplified subexpressions inserted by flattening of subqueries or
1316 * join alias variables.)
1317 *
1318 * Note: an essential effect of this is to convert named-argument function
1319 * calls to positional notation and insert the current actual values of
1320 * any default arguments for functions. To ensure that happens, we *must*
1321 * process all expressions here. Previous PG versions sometimes skipped
1322 * const-simplification if it didn't seem worth the trouble, but we can't
1323 * do that anymore.
1324 *
1325 * Note: this also flattens nested AND and OR expressions into N-argument
1326 * form. All processing of a qual expression after this point must be
1327 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1328 * with AND directly under AND, nor OR directly under OR.
1329 */
1330 if (kind != EXPRKIND_RTFUNC)
1331 expr = eval_const_expressions(root, expr);
1332
1333 /*
1334 * If it's a qual or havingQual, canonicalize it.
1335 */
1336 if (kind == EXPRKIND_QUAL)
1337 {
1338 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1339
1340#ifdef OPTIMIZER_DEBUG
1341 printf("After canonicalize_qual()\n");
1342 pprint(expr);
1343#endif
1344 }
1345
1346 /*
1347 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1348 * hashfuncid of any that might execute more quickly by using hash lookups
1349 * instead of a linear search.
1350 */
1351 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1352 {
1354 }
1355
1356 /* Expand SubLinks to SubPlans */
1357 if (root->parse->hasSubLinks)
1358 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1359
1360 /*
1361 * XXX do not insert anything here unless you have grokked the comments in
1362 * SS_replace_correlation_vars ...
1363 */
1364
1365 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1366 if (root->query_level > 1)
1367 expr = SS_replace_correlation_vars(root, expr);
1368
1369 /*
1370 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1371 * don't want to do this before eval_const_expressions, since the latter
1372 * would be unable to simplify a top-level AND correctly. Also,
1373 * SS_process_sublinks expects explicit-AND format.)
1374 */
1375 if (kind == EXPRKIND_QUAL)
1376 expr = (Node *) make_ands_implicit((Expr *) expr);
1377
1378 return expr;
1379}
1380
1381/*
1382 * preprocess_qual_conditions
1383 * Recursively scan the query's jointree and do subquery_planner's
1384 * preprocessing work on each qual condition found therein.
1385 */
1386static void
1388{
1389 if (jtnode == NULL)
1390 return;
1391 if (IsA(jtnode, RangeTblRef))
1392 {
1393 /* nothing to do here */
1394 }
1395 else if (IsA(jtnode, FromExpr))
1396 {
1397 FromExpr *f = (FromExpr *) jtnode;
1398 ListCell *l;
1399
1400 foreach(l, f->fromlist)
1402
1404 }
1405 else if (IsA(jtnode, JoinExpr))
1406 {
1407 JoinExpr *j = (JoinExpr *) jtnode;
1408
1411
1412 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1413 }
1414 else
1415 elog(ERROR, "unrecognized node type: %d",
1416 (int) nodeTag(jtnode));
1417}
1418
1419/*
1420 * preprocess_phv_expression
1421 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1422 *
1423 * If a LATERAL subquery references an output of another subquery, and that
1424 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1425 * join, then we'll push the PlaceHolderVar expression down into the subquery
1426 * and later pull it back up during find_lateral_references, which runs after
1427 * subquery_planner has preprocessed all the expressions that were in the
1428 * current query level to start with. So we need to preprocess it then.
1429 */
1430Expr *
1432{
1433 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1434}
1435
1436/*--------------------
1437 * grouping_planner
1438 * Perform planning steps related to grouping, aggregation, etc.
1439 *
1440 * This function adds all required top-level processing to the scan/join
1441 * Path(s) produced by query_planner.
1442 *
1443 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1444 * tuple_fraction is interpreted as follows:
1445 * 0: expect all tuples to be retrieved (normal case)
1446 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1447 * from the plan to be retrieved
1448 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1449 * expected to be retrieved (ie, a LIMIT specification).
1450 * setops is used for set operation subqueries to provide the subquery with
1451 * the context in which it's being used so that Paths correctly sorted for the
1452 * set operation can be generated. NULL when not planning a set operation
1453 * child, or when a child of a set op that isn't interested in sorted input.
1454 *
1455 * Returns nothing; the useful output is in the Paths we attach to the
1456 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1457 * root->processed_tlist contains the final processed targetlist.
1458 *
1459 * Note that we have not done set_cheapest() on the final rel; it's convenient
1460 * to leave this to the caller.
1461 *--------------------
1462 */
1463static void
1464grouping_planner(PlannerInfo *root, double tuple_fraction,
1465 SetOperationStmt *setops)
1466{
1467 Query *parse = root->parse;
1468 int64 offset_est = 0;
1469 int64 count_est = 0;
1470 double limit_tuples = -1.0;
1471 bool have_postponed_srfs = false;
1472 PathTarget *final_target;
1473 List *final_targets;
1474 List *final_targets_contain_srfs;
1475 bool final_target_parallel_safe;
1476 RelOptInfo *current_rel;
1477 RelOptInfo *final_rel;
1478 FinalPathExtraData extra;
1479 ListCell *lc;
1480
1481 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1482 if (parse->limitCount || parse->limitOffset)
1483 {
1484 tuple_fraction = preprocess_limit(root, tuple_fraction,
1485 &offset_est, &count_est);
1486
1487 /*
1488 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1489 * estimate the effects of using a bounded sort.
1490 */
1491 if (count_est > 0 && offset_est >= 0)
1492 limit_tuples = (double) count_est + (double) offset_est;
1493 }
1494
1495 /* Make tuple_fraction accessible to lower-level routines */
1496 root->tuple_fraction = tuple_fraction;
1497
1498 if (parse->setOperations)
1499 {
1500 /*
1501 * Construct Paths for set operations. The results will not need any
1502 * work except perhaps a top-level sort and/or LIMIT. Note that any
1503 * special work for recursive unions is the responsibility of
1504 * plan_set_operations.
1505 */
1506 current_rel = plan_set_operations(root);
1507
1508 /*
1509 * We should not need to call preprocess_targetlist, since we must be
1510 * in a SELECT query node. Instead, use the processed_tlist returned
1511 * by plan_set_operations (since this tells whether it returned any
1512 * resjunk columns!), and transfer any sort key information from the
1513 * original tlist.
1514 */
1515 Assert(parse->commandType == CMD_SELECT);
1516
1517 /* for safety, copy processed_tlist instead of modifying in-place */
1518 root->processed_tlist =
1519 postprocess_setop_tlist(copyObject(root->processed_tlist),
1520 parse->targetList);
1521
1522 /* Also extract the PathTarget form of the setop result tlist */
1523 final_target = current_rel->cheapest_total_path->pathtarget;
1524
1525 /* And check whether it's parallel safe */
1526 final_target_parallel_safe =
1527 is_parallel_safe(root, (Node *) final_target->exprs);
1528
1529 /* The setop result tlist couldn't contain any SRFs */
1530 Assert(!parse->hasTargetSRFs);
1531 final_targets = final_targets_contain_srfs = NIL;
1532
1533 /*
1534 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1535 * checked already, but let's make sure).
1536 */
1537 if (parse->rowMarks)
1538 ereport(ERROR,
1539 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1540 /*------
1541 translator: %s is a SQL row locking clause such as FOR UPDATE */
1542 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1544 parse->rowMarks)->strength))));
1545
1546 /*
1547 * Calculate pathkeys that represent result ordering requirements
1548 */
1549 Assert(parse->distinctClause == NIL);
1550 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1551 parse->sortClause,
1552 root->processed_tlist);
1553 }
1554 else
1555 {
1556 /* No set operations, do regular planning */
1557 PathTarget *sort_input_target;
1558 List *sort_input_targets;
1559 List *sort_input_targets_contain_srfs;
1560 bool sort_input_target_parallel_safe;
1561 PathTarget *grouping_target;
1562 List *grouping_targets;
1563 List *grouping_targets_contain_srfs;
1564 bool grouping_target_parallel_safe;
1565 PathTarget *scanjoin_target;
1566 List *scanjoin_targets;
1567 List *scanjoin_targets_contain_srfs;
1568 bool scanjoin_target_parallel_safe;
1569 bool scanjoin_target_same_exprs;
1570 bool have_grouping;
1571 WindowFuncLists *wflists = NULL;
1572 List *activeWindows = NIL;
1573 grouping_sets_data *gset_data = NULL;
1574 standard_qp_extra qp_extra;
1575
1576 /* A recursive query should always have setOperations */
1577 Assert(!root->hasRecursion);
1578
1579 /* Preprocess grouping sets and GROUP BY clause, if any */
1580 if (parse->groupingSets)
1581 {
1582 gset_data = preprocess_grouping_sets(root);
1583 }
1584 else if (parse->groupClause)
1585 {
1586 /* Preprocess regular GROUP BY clause, if any */
1587 root->processed_groupClause = preprocess_groupclause(root, NIL);
1588 }
1589
1590 /*
1591 * Preprocess targetlist. Note that much of the remaining planning
1592 * work will be done with the PathTarget representation of tlists, but
1593 * we must also maintain the full representation of the final tlist so
1594 * that we can transfer its decoration (resnames etc) to the topmost
1595 * tlist of the finished Plan. This is kept in processed_tlist.
1596 */
1598
1599 /*
1600 * Mark all the aggregates with resolved aggtranstypes, and detect
1601 * aggregates that are duplicates or can share transition state. We
1602 * must do this before slicing and dicing the tlist into various
1603 * pathtargets, else some copies of the Aggref nodes might escape
1604 * being marked.
1605 */
1606 if (parse->hasAggs)
1607 {
1608 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1609 preprocess_aggrefs(root, (Node *) parse->havingQual);
1610 }
1611
1612 /*
1613 * Locate any window functions in the tlist. (We don't need to look
1614 * anywhere else, since expressions used in ORDER BY will be in there
1615 * too.) Note that they could all have been eliminated by constant
1616 * folding, in which case we don't need to do any more work.
1617 */
1618 if (parse->hasWindowFuncs)
1619 {
1620 wflists = find_window_functions((Node *) root->processed_tlist,
1621 list_length(parse->windowClause));
1622 if (wflists->numWindowFuncs > 0)
1623 {
1624 /*
1625 * See if any modifications can be made to each WindowClause
1626 * to allow the executor to execute the WindowFuncs more
1627 * quickly.
1628 */
1629 optimize_window_clauses(root, wflists);
1630
1631 /* Extract the list of windows actually in use. */
1632 activeWindows = select_active_windows(root, wflists);
1633
1634 /* Make sure they all have names, for EXPLAIN's use. */
1635 name_active_windows(activeWindows);
1636 }
1637 else
1638 parse->hasWindowFuncs = false;
1639 }
1640
1641 /*
1642 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1643 * adding logic between here and the query_planner() call. Anything
1644 * that is needed in MIN/MAX-optimizable cases will have to be
1645 * duplicated in planagg.c.
1646 */
1647 if (parse->hasAggs)
1649
1650 /*
1651 * Figure out whether there's a hard limit on the number of rows that
1652 * query_planner's result subplan needs to return. Even if we know a
1653 * hard limit overall, it doesn't apply if the query has any
1654 * grouping/aggregation operations, or SRFs in the tlist.
1655 */
1656 if (parse->groupClause ||
1657 parse->groupingSets ||
1658 parse->distinctClause ||
1659 parse->hasAggs ||
1660 parse->hasWindowFuncs ||
1661 parse->hasTargetSRFs ||
1662 root->hasHavingQual)
1663 root->limit_tuples = -1.0;
1664 else
1665 root->limit_tuples = limit_tuples;
1666
1667 /* Set up data needed by standard_qp_callback */
1668 qp_extra.activeWindows = activeWindows;
1669 qp_extra.gset_data = gset_data;
1670
1671 /*
1672 * If we're a subquery for a set operation, store the SetOperationStmt
1673 * in qp_extra.
1674 */
1675 qp_extra.setop = setops;
1676
1677 /*
1678 * Generate the best unsorted and presorted paths for the scan/join
1679 * portion of this Query, ie the processing represented by the
1680 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1681 * We also generate (in standard_qp_callback) pathkey representations
1682 * of the query's sort clause, distinct clause, etc.
1683 */
1684 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1685
1686 /*
1687 * Convert the query's result tlist into PathTarget format.
1688 *
1689 * Note: this cannot be done before query_planner() has performed
1690 * appendrel expansion, because that might add resjunk entries to
1691 * root->processed_tlist. Waiting till afterwards is also helpful
1692 * because the target width estimates can use per-Var width numbers
1693 * that were obtained within query_planner().
1694 */
1695 final_target = create_pathtarget(root, root->processed_tlist);
1696 final_target_parallel_safe =
1697 is_parallel_safe(root, (Node *) final_target->exprs);
1698
1699 /*
1700 * If ORDER BY was given, consider whether we should use a post-sort
1701 * projection, and compute the adjusted target for preceding steps if
1702 * so.
1703 */
1704 if (parse->sortClause)
1705 {
1706 sort_input_target = make_sort_input_target(root,
1707 final_target,
1708 &have_postponed_srfs);
1709 sort_input_target_parallel_safe =
1710 is_parallel_safe(root, (Node *) sort_input_target->exprs);
1711 }
1712 else
1713 {
1714 sort_input_target = final_target;
1715 sort_input_target_parallel_safe = final_target_parallel_safe;
1716 }
1717
1718 /*
1719 * If we have window functions to deal with, the output from any
1720 * grouping step needs to be what the window functions want;
1721 * otherwise, it should be sort_input_target.
1722 */
1723 if (activeWindows)
1724 {
1725 grouping_target = make_window_input_target(root,
1726 final_target,
1727 activeWindows);
1728 grouping_target_parallel_safe =
1729 is_parallel_safe(root, (Node *) grouping_target->exprs);
1730 }
1731 else
1732 {
1733 grouping_target = sort_input_target;
1734 grouping_target_parallel_safe = sort_input_target_parallel_safe;
1735 }
1736
1737 /*
1738 * If we have grouping or aggregation to do, the topmost scan/join
1739 * plan node must emit what the grouping step wants; otherwise, it
1740 * should emit grouping_target.
1741 */
1742 have_grouping = (parse->groupClause || parse->groupingSets ||
1743 parse->hasAggs || root->hasHavingQual);
1744 if (have_grouping)
1745 {
1746 scanjoin_target = make_group_input_target(root, final_target);
1747 scanjoin_target_parallel_safe =
1748 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1749 }
1750 else
1751 {
1752 scanjoin_target = grouping_target;
1753 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1754 }
1755
1756 /*
1757 * If there are any SRFs in the targetlist, we must separate each of
1758 * these PathTargets into SRF-computing and SRF-free targets. Replace
1759 * each of the named targets with a SRF-free version, and remember the
1760 * list of additional projection steps we need to add afterwards.
1761 */
1762 if (parse->hasTargetSRFs)
1763 {
1764 /* final_target doesn't recompute any SRFs in sort_input_target */
1765 split_pathtarget_at_srfs(root, final_target, sort_input_target,
1766 &final_targets,
1767 &final_targets_contain_srfs);
1768 final_target = linitial_node(PathTarget, final_targets);
1769 Assert(!linitial_int(final_targets_contain_srfs));
1770 /* likewise for sort_input_target vs. grouping_target */
1771 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1772 &sort_input_targets,
1773 &sort_input_targets_contain_srfs);
1774 sort_input_target = linitial_node(PathTarget, sort_input_targets);
1775 Assert(!linitial_int(sort_input_targets_contain_srfs));
1776 /* likewise for grouping_target vs. scanjoin_target */
1778 grouping_target, scanjoin_target,
1779 &grouping_targets,
1780 &grouping_targets_contain_srfs);
1781 grouping_target = linitial_node(PathTarget, grouping_targets);
1782 Assert(!linitial_int(grouping_targets_contain_srfs));
1783 /* scanjoin_target will not have any SRFs precomputed for it */
1784 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1785 &scanjoin_targets,
1786 &scanjoin_targets_contain_srfs);
1787 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1788 Assert(!linitial_int(scanjoin_targets_contain_srfs));
1789 }
1790 else
1791 {
1792 /* initialize lists; for most of these, dummy values are OK */
1793 final_targets = final_targets_contain_srfs = NIL;
1794 sort_input_targets = sort_input_targets_contain_srfs = NIL;
1795 grouping_targets = grouping_targets_contain_srfs = NIL;
1796 scanjoin_targets = list_make1(scanjoin_target);
1797 scanjoin_targets_contain_srfs = NIL;
1798 }
1799
1800 /* Apply scan/join target. */
1801 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1802 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1803 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1804 scanjoin_targets_contain_srfs,
1805 scanjoin_target_parallel_safe,
1806 scanjoin_target_same_exprs);
1807
1808 /*
1809 * Save the various upper-rel PathTargets we just computed into
1810 * root->upper_targets[]. The core code doesn't use this, but it
1811 * provides a convenient place for extensions to get at the info. For
1812 * consistency, we save all the intermediate targets, even though some
1813 * of the corresponding upperrels might not be needed for this query.
1814 */
1815 root->upper_targets[UPPERREL_FINAL] = final_target;
1816 root->upper_targets[UPPERREL_ORDERED] = final_target;
1817 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1818 root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1819 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1820 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1821
1822 /*
1823 * If we have grouping and/or aggregation, consider ways to implement
1824 * that. We build a new upperrel representing the output of this
1825 * phase.
1826 */
1827 if (have_grouping)
1828 {
1829 current_rel = create_grouping_paths(root,
1830 current_rel,
1831 grouping_target,
1832 grouping_target_parallel_safe,
1833 gset_data);
1834 /* Fix things up if grouping_target contains SRFs */
1835 if (parse->hasTargetSRFs)
1836 adjust_paths_for_srfs(root, current_rel,
1837 grouping_targets,
1838 grouping_targets_contain_srfs);
1839 }
1840
1841 /*
1842 * If we have window functions, consider ways to implement those. We
1843 * build a new upperrel representing the output of this phase.
1844 */
1845 if (activeWindows)
1846 {
1847 current_rel = create_window_paths(root,
1848 current_rel,
1849 grouping_target,
1850 sort_input_target,
1851 sort_input_target_parallel_safe,
1852 wflists,
1853 activeWindows);
1854 /* Fix things up if sort_input_target contains SRFs */
1855 if (parse->hasTargetSRFs)
1856 adjust_paths_for_srfs(root, current_rel,
1857 sort_input_targets,
1858 sort_input_targets_contain_srfs);
1859 }
1860
1861 /*
1862 * If there is a DISTINCT clause, consider ways to implement that. We
1863 * build a new upperrel representing the output of this phase.
1864 */
1865 if (parse->distinctClause)
1866 {
1867 current_rel = create_distinct_paths(root,
1868 current_rel,
1869 sort_input_target);
1870 }
1871 } /* end of if (setOperations) */
1872
1873 /*
1874 * If ORDER BY was given, consider ways to implement that, and generate a
1875 * new upperrel containing only paths that emit the correct ordering and
1876 * project the correct final_target. We can apply the original
1877 * limit_tuples limit in sort costing here, but only if there are no
1878 * postponed SRFs.
1879 */
1880 if (parse->sortClause)
1881 {
1882 current_rel = create_ordered_paths(root,
1883 current_rel,
1884 final_target,
1885 final_target_parallel_safe,
1886 have_postponed_srfs ? -1.0 :
1887 limit_tuples);
1888 /* Fix things up if final_target contains SRFs */
1889 if (parse->hasTargetSRFs)
1890 adjust_paths_for_srfs(root, current_rel,
1891 final_targets,
1892 final_targets_contain_srfs);
1893 }
1894
1895 /*
1896 * Now we are prepared to build the final-output upperrel.
1897 */
1898 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1899
1900 /*
1901 * If the input rel is marked consider_parallel and there's nothing that's
1902 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1903 * consider_parallel as well. Note that if the query has rowMarks or is
1904 * not a SELECT, consider_parallel will be false for every relation in the
1905 * query.
1906 */
1907 if (current_rel->consider_parallel &&
1908 is_parallel_safe(root, parse->limitOffset) &&
1909 is_parallel_safe(root, parse->limitCount))
1910 final_rel->consider_parallel = true;
1911
1912 /*
1913 * If the current_rel belongs to a single FDW, so does the final_rel.
1914 */
1915 final_rel->serverid = current_rel->serverid;
1916 final_rel->userid = current_rel->userid;
1917 final_rel->useridiscurrent = current_rel->useridiscurrent;
1918 final_rel->fdwroutine = current_rel->fdwroutine;
1919
1920 /*
1921 * Generate paths for the final_rel. Insert all surviving paths, with
1922 * LockRows, Limit, and/or ModifyTable steps added if needed.
1923 */
1924 foreach(lc, current_rel->pathlist)
1925 {
1926 Path *path = (Path *) lfirst(lc);
1927
1928 /*
1929 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1930 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1931 * here. If there are only non-locking rowmarks, they should be
1932 * handled by the ModifyTable node instead. However, root->rowMarks
1933 * is what goes into the LockRows node.)
1934 */
1935 if (parse->rowMarks)
1936 {
1937 path = (Path *) create_lockrows_path(root, final_rel, path,
1938 root->rowMarks,
1940 }
1941
1942 /*
1943 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1944 */
1945 if (limit_needed(parse))
1946 {
1947 path = (Path *) create_limit_path(root, final_rel, path,
1948 parse->limitOffset,
1949 parse->limitCount,
1950 parse->limitOption,
1951 offset_est, count_est);
1952 }
1953
1954 /*
1955 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1956 */
1957 if (parse->commandType != CMD_SELECT)
1958 {
1959 Index rootRelation;
1960 List *resultRelations = NIL;
1961 List *updateColnosLists = NIL;
1962 List *withCheckOptionLists = NIL;
1963 List *returningLists = NIL;
1964 List *mergeActionLists = NIL;
1965 List *mergeJoinConditions = NIL;
1966 List *rowMarks;
1967
1968 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1969 {
1970 /* Inherited UPDATE/DELETE/MERGE */
1971 RelOptInfo *top_result_rel = find_base_rel(root,
1972 parse->resultRelation);
1973 int resultRelation = -1;
1974
1975 /* Pass the root result rel forward to the executor. */
1976 rootRelation = parse->resultRelation;
1977
1978 /* Add only leaf children to ModifyTable. */
1979 while ((resultRelation = bms_next_member(root->leaf_result_relids,
1980 resultRelation)) >= 0)
1981 {
1982 RelOptInfo *this_result_rel = find_base_rel(root,
1983 resultRelation);
1984
1985 /*
1986 * Also exclude any leaf rels that have turned dummy since
1987 * being added to the list, for example, by being excluded
1988 * by constraint exclusion.
1989 */
1990 if (IS_DUMMY_REL(this_result_rel))
1991 continue;
1992
1993 /* Build per-target-rel lists needed by ModifyTable */
1994 resultRelations = lappend_int(resultRelations,
1995 resultRelation);
1996 if (parse->commandType == CMD_UPDATE)
1997 {
1998 List *update_colnos = root->update_colnos;
1999
2000 if (this_result_rel != top_result_rel)
2001 update_colnos =
2003 update_colnos,
2004 this_result_rel->relid,
2005 top_result_rel->relid);
2006 updateColnosLists = lappend(updateColnosLists,
2007 update_colnos);
2008 }
2009 if (parse->withCheckOptions)
2010 {
2011 List *withCheckOptions = parse->withCheckOptions;
2012
2013 if (this_result_rel != top_result_rel)
2014 withCheckOptions = (List *)
2016 (Node *) withCheckOptions,
2017 this_result_rel,
2018 top_result_rel);
2019 withCheckOptionLists = lappend(withCheckOptionLists,
2020 withCheckOptions);
2021 }
2022 if (parse->returningList)
2023 {
2024 List *returningList = parse->returningList;
2025
2026 if (this_result_rel != top_result_rel)
2027 returningList = (List *)
2029 (Node *) returningList,
2030 this_result_rel,
2031 top_result_rel);
2032 returningLists = lappend(returningLists,
2033 returningList);
2034 }
2035 if (parse->mergeActionList)
2036 {
2037 ListCell *l;
2038 List *mergeActionList = NIL;
2039
2040 /*
2041 * Copy MergeActions and translate stuff that
2042 * references attribute numbers.
2043 */
2044 foreach(l, parse->mergeActionList)
2045 {
2047 *leaf_action = copyObject(action);
2048
2049 leaf_action->qual =
2051 (Node *) action->qual,
2052 this_result_rel,
2053 top_result_rel);
2054 leaf_action->targetList = (List *)
2056 (Node *) action->targetList,
2057 this_result_rel,
2058 top_result_rel);
2059 if (leaf_action->commandType == CMD_UPDATE)
2060 leaf_action->updateColnos =
2062 action->updateColnos,
2063 this_result_rel->relid,
2064 top_result_rel->relid);
2065 mergeActionList = lappend(mergeActionList,
2066 leaf_action);
2067 }
2068
2069 mergeActionLists = lappend(mergeActionLists,
2070 mergeActionList);
2071 }
2072 if (parse->commandType == CMD_MERGE)
2073 {
2074 Node *mergeJoinCondition = parse->mergeJoinCondition;
2075
2076 if (this_result_rel != top_result_rel)
2077 mergeJoinCondition =
2079 mergeJoinCondition,
2080 this_result_rel,
2081 top_result_rel);
2082 mergeJoinConditions = lappend(mergeJoinConditions,
2083 mergeJoinCondition);
2084 }
2085 }
2086
2087 if (resultRelations == NIL)
2088 {
2089 /*
2090 * We managed to exclude every child rel, so generate a
2091 * dummy one-relation plan using info for the top target
2092 * rel (even though that may not be a leaf target).
2093 * Although it's clear that no data will be updated or
2094 * deleted, we still need to have a ModifyTable node so
2095 * that any statement triggers will be executed. (This
2096 * could be cleaner if we fixed nodeModifyTable.c to allow
2097 * zero target relations, but that probably wouldn't be a
2098 * net win.)
2099 */
2100 resultRelations = list_make1_int(parse->resultRelation);
2101 if (parse->commandType == CMD_UPDATE)
2102 updateColnosLists = list_make1(root->update_colnos);
2103 if (parse->withCheckOptions)
2104 withCheckOptionLists = list_make1(parse->withCheckOptions);
2105 if (parse->returningList)
2106 returningLists = list_make1(parse->returningList);
2107 if (parse->mergeActionList)
2108 mergeActionLists = list_make1(parse->mergeActionList);
2109 if (parse->commandType == CMD_MERGE)
2110 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2111 }
2112 }
2113 else
2114 {
2115 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2116 rootRelation = 0; /* there's no separate root rel */
2117 resultRelations = list_make1_int(parse->resultRelation);
2118 if (parse->commandType == CMD_UPDATE)
2119 updateColnosLists = list_make1(root->update_colnos);
2120 if (parse->withCheckOptions)
2121 withCheckOptionLists = list_make1(parse->withCheckOptions);
2122 if (parse->returningList)
2123 returningLists = list_make1(parse->returningList);
2124 if (parse->mergeActionList)
2125 mergeActionLists = list_make1(parse->mergeActionList);
2126 if (parse->commandType == CMD_MERGE)
2127 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2128 }
2129
2130 /*
2131 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2132 * will have dealt with fetching non-locked marked rows, else we
2133 * need to have ModifyTable do that.
2134 */
2135 if (parse->rowMarks)
2136 rowMarks = NIL;
2137 else
2138 rowMarks = root->rowMarks;
2139
2140 path = (Path *)
2141 create_modifytable_path(root, final_rel,
2142 path,
2143 parse->commandType,
2144 parse->canSetTag,
2145 parse->resultRelation,
2146 rootRelation,
2147 resultRelations,
2148 updateColnosLists,
2149 withCheckOptionLists,
2150 returningLists,
2151 rowMarks,
2152 parse->onConflict,
2153 mergeActionLists,
2154 mergeJoinConditions,
2156 }
2157
2158 /* And shove it into final_rel */
2159 add_path(final_rel, path);
2160 }
2161
2162 /*
2163 * Generate partial paths for final_rel, too, if outer query levels might
2164 * be able to make use of them.
2165 */
2166 if (final_rel->consider_parallel && root->query_level > 1 &&
2168 {
2169 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2170 foreach(lc, current_rel->partial_pathlist)
2171 {
2172 Path *partial_path = (Path *) lfirst(lc);
2173
2174 add_partial_path(final_rel, partial_path);
2175 }
2176 }
2177
2179 extra.limit_tuples = limit_tuples;
2180 extra.count_est = count_est;
2181 extra.offset_est = offset_est;
2182
2183 /*
2184 * If there is an FDW that's responsible for all baserels of the query,
2185 * let it consider adding ForeignPaths.
2186 */
2187 if (final_rel->fdwroutine &&
2188 final_rel->fdwroutine->GetForeignUpperPaths)
2189 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2190 current_rel, final_rel,
2191 &extra);
2192
2193 /* Let extensions possibly add some more paths */
2195 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2196 current_rel, final_rel, &extra);
2197
2198 /* Note: currently, we leave it to callers to do set_cheapest() */
2199}
2200
2201/*
2202 * Do preprocessing for groupingSets clause and related data.
2203 *
2204 * We expect that parse->groupingSets has already been expanded into a flat
2205 * list of grouping sets (that is, just integer Lists of ressortgroupref
2206 * numbers) by expand_grouping_sets(). This function handles the preliminary
2207 * steps of organizing the grouping sets into lists of rollups, and preparing
2208 * annotations which will later be filled in with size estimates.
2209 */
2210static grouping_sets_data *
2212{
2213 Query *parse = root->parse;
2214 List *sets;
2215 int maxref = 0;
2216 ListCell *lc_set;
2218
2219 /*
2220 * We don't currently make any attempt to optimize the groupClause when
2221 * there are grouping sets, so just duplicate it in processed_groupClause.
2222 */
2223 root->processed_groupClause = parse->groupClause;
2224
2225 /* Detect unhashable and unsortable grouping expressions */
2226 gd->any_hashable = false;
2227 gd->unhashable_refs = NULL;
2228 gd->unsortable_refs = NULL;
2229 gd->unsortable_sets = NIL;
2230
2231 if (parse->groupClause)
2232 {
2233 ListCell *lc;
2234
2235 foreach(lc, parse->groupClause)
2236 {
2238 Index ref = gc->tleSortGroupRef;
2239
2240 if (ref > maxref)
2241 maxref = ref;
2242
2243 if (!gc->hashable)
2245
2246 if (!OidIsValid(gc->sortop))
2248 }
2249 }
2250
2251 /* Allocate workspace array for remapping */
2252 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2253
2254 /*
2255 * If we have any unsortable sets, we must extract them before trying to
2256 * prepare rollups. Unsortable sets don't go through
2257 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2258 * here.
2259 */
2260 if (!bms_is_empty(gd->unsortable_refs))
2261 {
2262 List *sortable_sets = NIL;
2263 ListCell *lc;
2264
2265 foreach(lc, parse->groupingSets)
2266 {
2267 List *gset = (List *) lfirst(lc);
2268
2269 if (bms_overlap_list(gd->unsortable_refs, gset))
2270 {
2272
2273 gs->set = gset;
2275
2276 /*
2277 * We must enforce here that an unsortable set is hashable;
2278 * later code assumes this. Parse analysis only checks that
2279 * every individual column is either hashable or sortable.
2280 *
2281 * Note that passing this test doesn't guarantee we can
2282 * generate a plan; there might be other showstoppers.
2283 */
2284 if (bms_overlap_list(gd->unhashable_refs, gset))
2285 ereport(ERROR,
2286 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2287 errmsg("could not implement GROUP BY"),
2288 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2289 }
2290 else
2291 sortable_sets = lappend(sortable_sets, gset);
2292 }
2293
2294 if (sortable_sets)
2295 sets = extract_rollup_sets(sortable_sets);
2296 else
2297 sets = NIL;
2298 }
2299 else
2300 sets = extract_rollup_sets(parse->groupingSets);
2301
2302 foreach(lc_set, sets)
2303 {
2304 List *current_sets = (List *) lfirst(lc_set);
2305 RollupData *rollup = makeNode(RollupData);
2306 GroupingSetData *gs;
2307
2308 /*
2309 * Reorder the current list of grouping sets into correct prefix
2310 * order. If only one aggregation pass is needed, try to make the
2311 * list match the ORDER BY clause; if more than one pass is needed, we
2312 * don't bother with that.
2313 *
2314 * Note that this reorders the sets from smallest-member-first to
2315 * largest-member-first, and applies the GroupingSetData annotations,
2316 * though the data will be filled in later.
2317 */
2318 current_sets = reorder_grouping_sets(current_sets,
2319 (list_length(sets) == 1
2320 ? parse->sortClause
2321 : NIL));
2322
2323 /*
2324 * Get the initial (and therefore largest) grouping set.
2325 */
2326 gs = linitial_node(GroupingSetData, current_sets);
2327
2328 /*
2329 * Order the groupClause appropriately. If the first grouping set is
2330 * empty, then the groupClause must also be empty; otherwise we have
2331 * to force the groupClause to match that grouping set's order.
2332 *
2333 * (The first grouping set can be empty even though parse->groupClause
2334 * is not empty only if all non-empty grouping sets are unsortable.
2335 * The groupClauses for hashed grouping sets are built later on.)
2336 */
2337 if (gs->set)
2339 else
2340 rollup->groupClause = NIL;
2341
2342 /*
2343 * Is it hashable? We pretend empty sets are hashable even though we
2344 * actually force them not to be hashed later. But don't bother if
2345 * there's nothing but empty sets (since in that case we can't hash
2346 * anything).
2347 */
2348 if (gs->set &&
2350 {
2351 rollup->hashable = true;
2352 gd->any_hashable = true;
2353 }
2354
2355 /*
2356 * Now that we've pinned down an order for the groupClause for this
2357 * list of grouping sets, we need to remap the entries in the grouping
2358 * sets from sortgrouprefs to plain indices (0-based) into the
2359 * groupClause for this collection of grouping sets. We keep the
2360 * original form for later use, though.
2361 */
2362 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2363 current_sets,
2365 rollup->gsets_data = current_sets;
2366
2367 gd->rollups = lappend(gd->rollups, rollup);
2368 }
2369
2370 if (gd->unsortable_sets)
2371 {
2372 /*
2373 * We have not yet pinned down a groupclause for this, but we will
2374 * need index-based lists for estimation purposes. Construct
2375 * hash_sets_idx based on the entire original groupclause for now.
2376 */
2377 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2378 gd->unsortable_sets,
2380 gd->any_hashable = true;
2381 }
2382
2383 return gd;
2384}
2385
2386/*
2387 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2388 * (without annotation) mapped to indexes into the given groupclause.
2389 */
2390static List *
2392 List *gsets,
2393 int *tleref_to_colnum_map)
2394{
2395 int ref = 0;
2396 List *result = NIL;
2397 ListCell *lc;
2398
2399 foreach(lc, groupClause)
2400 {
2402
2403 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2404 }
2405
2406 foreach(lc, gsets)
2407 {
2408 List *set = NIL;
2409 ListCell *lc2;
2411
2412 foreach(lc2, gs->set)
2413 {
2414 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2415 }
2416
2417 result = lappend(result, set);
2418 }
2419
2420 return result;
2421}
2422
2423
2424/*
2425 * preprocess_rowmarks - set up PlanRowMarks if needed
2426 */
2427static void
2429{
2430 Query *parse = root->parse;
2431 Bitmapset *rels;
2432 List *prowmarks;
2433 ListCell *l;
2434 int i;
2435
2436 if (parse->rowMarks)
2437 {
2438 /*
2439 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2440 * grouping, since grouping renders a reference to individual tuple
2441 * CTIDs invalid. This is also checked at parse time, but that's
2442 * insufficient because of rule substitution, query pullup, etc.
2443 */
2445 parse->rowMarks)->strength);
2446 }
2447 else
2448 {
2449 /*
2450 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2451 * UPDATE/SHARE.
2452 */
2453 if (parse->commandType != CMD_UPDATE &&
2454 parse->commandType != CMD_DELETE &&
2455 parse->commandType != CMD_MERGE)
2456 return;
2457 }
2458
2459 /*
2460 * We need to have rowmarks for all base relations except the target. We
2461 * make a bitmapset of all base rels and then remove the items we don't
2462 * need or have FOR [KEY] UPDATE/SHARE marks for.
2463 */
2464 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2465 if (parse->resultRelation)
2466 rels = bms_del_member(rels, parse->resultRelation);
2467
2468 /*
2469 * Convert RowMarkClauses to PlanRowMark representation.
2470 */
2471 prowmarks = NIL;
2472 foreach(l, parse->rowMarks)
2473 {
2475 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2476 PlanRowMark *newrc;
2477
2478 /*
2479 * Currently, it is syntactically impossible to have FOR UPDATE et al
2480 * applied to an update/delete target rel. If that ever becomes
2481 * possible, we should drop the target from the PlanRowMark list.
2482 */
2483 Assert(rc->rti != parse->resultRelation);
2484
2485 /*
2486 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2487 * can't support true locking. Subqueries that got flattened into the
2488 * main query should be ignored completely. Any that didn't will get
2489 * ROW_MARK_COPY items in the next loop.
2490 */
2491 if (rte->rtekind != RTE_RELATION)
2492 continue;
2493
2494 rels = bms_del_member(rels, rc->rti);
2495
2496 newrc = makeNode(PlanRowMark);
2497 newrc->rti = newrc->prti = rc->rti;
2498 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2499 newrc->markType = select_rowmark_type(rte, rc->strength);
2500 newrc->allMarkTypes = (1 << newrc->markType);
2501 newrc->strength = rc->strength;
2502 newrc->waitPolicy = rc->waitPolicy;
2503 newrc->isParent = false;
2504
2505 prowmarks = lappend(prowmarks, newrc);
2506 }
2507
2508 /*
2509 * Now, add rowmarks for any non-target, non-locked base relations.
2510 */
2511 i = 0;
2512 foreach(l, parse->rtable)
2513 {
2515 PlanRowMark *newrc;
2516
2517 i++;
2518 if (!bms_is_member(i, rels))
2519 continue;
2520
2521 newrc = makeNode(PlanRowMark);
2522 newrc->rti = newrc->prti = i;
2523 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2524 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2525 newrc->allMarkTypes = (1 << newrc->markType);
2526 newrc->strength = LCS_NONE;
2527 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2528 newrc->isParent = false;
2529
2530 prowmarks = lappend(prowmarks, newrc);
2531 }
2532
2533 root->rowMarks = prowmarks;
2534}
2535
2536/*
2537 * Select RowMarkType to use for a given table
2538 */
2541{
2542 if (rte->rtekind != RTE_RELATION)
2543 {
2544 /* If it's not a table at all, use ROW_MARK_COPY */
2545 return ROW_MARK_COPY;
2546 }
2547 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2548 {
2549 /* Let the FDW select the rowmark type, if it wants to */
2550 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2551
2552 if (fdwroutine->GetForeignRowMarkType != NULL)
2553 return fdwroutine->GetForeignRowMarkType(rte, strength);
2554 /* Otherwise, use ROW_MARK_COPY by default */
2555 return ROW_MARK_COPY;
2556 }
2557 else
2558 {
2559 /* Regular table, apply the appropriate lock type */
2560 switch (strength)
2561 {
2562 case LCS_NONE:
2563
2564 /*
2565 * We don't need a tuple lock, only the ability to re-fetch
2566 * the row.
2567 */
2568 return ROW_MARK_REFERENCE;
2569 break;
2570 case LCS_FORKEYSHARE:
2571 return ROW_MARK_KEYSHARE;
2572 break;
2573 case LCS_FORSHARE:
2574 return ROW_MARK_SHARE;
2575 break;
2576 case LCS_FORNOKEYUPDATE:
2578 break;
2579 case LCS_FORUPDATE:
2580 return ROW_MARK_EXCLUSIVE;
2581 break;
2582 }
2583 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2584 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2585 }
2586}
2587
2588/*
2589 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2590 *
2591 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2592 * results back in *count_est and *offset_est. These variables are set to
2593 * 0 if the corresponding clause is not present, and -1 if it's present
2594 * but we couldn't estimate the value for it. (The "0" convention is OK
2595 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2596 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2597 * usual practice of never estimating less than one row.) These values will
2598 * be passed to create_limit_path, which see if you change this code.
2599 *
2600 * The return value is the suitably adjusted tuple_fraction to use for
2601 * planning the query. This adjustment is not overridable, since it reflects
2602 * plan actions that grouping_planner() will certainly take, not assumptions
2603 * about context.
2604 */
2605static double
2606preprocess_limit(PlannerInfo *root, double tuple_fraction,
2607 int64 *offset_est, int64 *count_est)
2608{
2609 Query *parse = root->parse;
2610 Node *est;
2611 double limit_fraction;
2612
2613 /* Should not be called unless LIMIT or OFFSET */
2614 Assert(parse->limitCount || parse->limitOffset);
2615
2616 /*
2617 * Try to obtain the clause values. We use estimate_expression_value
2618 * primarily because it can sometimes do something useful with Params.
2619 */
2620 if (parse->limitCount)
2621 {
2622 est = estimate_expression_value(root, parse->limitCount);
2623 if (est && IsA(est, Const))
2624 {
2625 if (((Const *) est)->constisnull)
2626 {
2627 /* NULL indicates LIMIT ALL, ie, no limit */
2628 *count_est = 0; /* treat as not present */
2629 }
2630 else
2631 {
2632 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2633 if (*count_est <= 0)
2634 *count_est = 1; /* force to at least 1 */
2635 }
2636 }
2637 else
2638 *count_est = -1; /* can't estimate */
2639 }
2640 else
2641 *count_est = 0; /* not present */
2642
2643 if (parse->limitOffset)
2644 {
2645 est = estimate_expression_value(root, parse->limitOffset);
2646 if (est && IsA(est, Const))
2647 {
2648 if (((Const *) est)->constisnull)
2649 {
2650 /* Treat NULL as no offset; the executor will too */
2651 *offset_est = 0; /* treat as not present */
2652 }
2653 else
2654 {
2655 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2656 if (*offset_est < 0)
2657 *offset_est = 0; /* treat as not present */
2658 }
2659 }
2660 else
2661 *offset_est = -1; /* can't estimate */
2662 }
2663 else
2664 *offset_est = 0; /* not present */
2665
2666 if (*count_est != 0)
2667 {
2668 /*
2669 * A LIMIT clause limits the absolute number of tuples returned.
2670 * However, if it's not a constant LIMIT then we have to guess; for
2671 * lack of a better idea, assume 10% of the plan's result is wanted.
2672 */
2673 if (*count_est < 0 || *offset_est < 0)
2674 {
2675 /* LIMIT or OFFSET is an expression ... punt ... */
2676 limit_fraction = 0.10;
2677 }
2678 else
2679 {
2680 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2681 limit_fraction = (double) *count_est + (double) *offset_est;
2682 }
2683
2684 /*
2685 * If we have absolute limits from both caller and LIMIT, use the
2686 * smaller value; likewise if they are both fractional. If one is
2687 * fractional and the other absolute, we can't easily determine which
2688 * is smaller, but we use the heuristic that the absolute will usually
2689 * be smaller.
2690 */
2691 if (tuple_fraction >= 1.0)
2692 {
2693 if (limit_fraction >= 1.0)
2694 {
2695 /* both absolute */
2696 tuple_fraction = Min(tuple_fraction, limit_fraction);
2697 }
2698 else
2699 {
2700 /* caller absolute, limit fractional; use caller's value */
2701 }
2702 }
2703 else if (tuple_fraction > 0.0)
2704 {
2705 if (limit_fraction >= 1.0)
2706 {
2707 /* caller fractional, limit absolute; use limit */
2708 tuple_fraction = limit_fraction;
2709 }
2710 else
2711 {
2712 /* both fractional */
2713 tuple_fraction = Min(tuple_fraction, limit_fraction);
2714 }
2715 }
2716 else
2717 {
2718 /* no info from caller, just use limit */
2719 tuple_fraction = limit_fraction;
2720 }
2721 }
2722 else if (*offset_est != 0 && tuple_fraction > 0.0)
2723 {
2724 /*
2725 * We have an OFFSET but no LIMIT. This acts entirely differently
2726 * from the LIMIT case: here, we need to increase rather than decrease
2727 * the caller's tuple_fraction, because the OFFSET acts to cause more
2728 * tuples to be fetched instead of fewer. This only matters if we got
2729 * a tuple_fraction > 0, however.
2730 *
2731 * As above, use 10% if OFFSET is present but unestimatable.
2732 */
2733 if (*offset_est < 0)
2734 limit_fraction = 0.10;
2735 else
2736 limit_fraction = (double) *offset_est;
2737
2738 /*
2739 * If we have absolute counts from both caller and OFFSET, add them
2740 * together; likewise if they are both fractional. If one is
2741 * fractional and the other absolute, we want to take the larger, and
2742 * we heuristically assume that's the fractional one.
2743 */
2744 if (tuple_fraction >= 1.0)
2745 {
2746 if (limit_fraction >= 1.0)
2747 {
2748 /* both absolute, so add them together */
2749 tuple_fraction += limit_fraction;
2750 }
2751 else
2752 {
2753 /* caller absolute, limit fractional; use limit */
2754 tuple_fraction = limit_fraction;
2755 }
2756 }
2757 else
2758 {
2759 if (limit_fraction >= 1.0)
2760 {
2761 /* caller fractional, limit absolute; use caller's value */
2762 }
2763 else
2764 {
2765 /* both fractional, so add them together */
2766 tuple_fraction += limit_fraction;
2767 if (tuple_fraction >= 1.0)
2768 tuple_fraction = 0.0; /* assume fetch all */
2769 }
2770 }
2771 }
2772
2773 return tuple_fraction;
2774}
2775
2776/*
2777 * limit_needed - do we actually need a Limit plan node?
2778 *
2779 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2780 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2781 * locution for an optimization fence. (Because other places in the planner
2782 * merely check whether parse->limitOffset isn't NULL, it will still work as
2783 * an optimization fence --- we're just suppressing unnecessary run-time
2784 * overhead.)
2785 *
2786 * This might look like it could be merged into preprocess_limit, but there's
2787 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2788 * in preprocess_limit it's good enough to consider estimated values.
2789 */
2790bool
2792{
2793 Node *node;
2794
2795 node = parse->limitCount;
2796 if (node)
2797 {
2798 if (IsA(node, Const))
2799 {
2800 /* NULL indicates LIMIT ALL, ie, no limit */
2801 if (!((Const *) node)->constisnull)
2802 return true; /* LIMIT with a constant value */
2803 }
2804 else
2805 return true; /* non-constant LIMIT */
2806 }
2807
2808 node = parse->limitOffset;
2809 if (node)
2810 {
2811 if (IsA(node, Const))
2812 {
2813 /* Treat NULL as no offset; the executor would too */
2814 if (!((Const *) node)->constisnull)
2815 {
2816 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2817
2818 if (offset != 0)
2819 return true; /* OFFSET with a nonzero value */
2820 }
2821 }
2822 else
2823 return true; /* non-constant OFFSET */
2824 }
2825
2826 return false; /* don't need a Limit plan node */
2827}
2828
2829/*
2830 * preprocess_groupclause - do preparatory work on GROUP BY clause
2831 *
2832 * The idea here is to adjust the ordering of the GROUP BY elements
2833 * (which in itself is semantically insignificant) to match ORDER BY,
2834 * thereby allowing a single sort operation to both implement the ORDER BY
2835 * requirement and set up for a Unique step that implements GROUP BY.
2836 * We also consider partial match between GROUP BY and ORDER BY elements,
2837 * which could allow to implement ORDER BY using the incremental sort.
2838 *
2839 * We also consider other orderings of the GROUP BY elements, which could
2840 * match the sort ordering of other possible plans (eg an indexscan) and
2841 * thereby reduce cost. This is implemented during the generation of grouping
2842 * paths. See get_useful_group_keys_orderings() for details.
2843 *
2844 * Note: we need no comparable processing of the distinctClause because
2845 * the parser already enforced that that matches ORDER BY.
2846 *
2847 * Note: we return a fresh List, but its elements are the same
2848 * SortGroupClauses appearing in parse->groupClause. This is important
2849 * because later processing may modify the processed_groupClause list.
2850 *
2851 * For grouping sets, the order of items is instead forced to agree with that
2852 * of the grouping set (and items not in the grouping set are skipped). The
2853 * work of sorting the order of grouping set elements to match the ORDER BY if
2854 * possible is done elsewhere.
2855 */
2856static List *
2858{
2859 Query *parse = root->parse;
2860 List *new_groupclause = NIL;
2861 ListCell *sl;
2862 ListCell *gl;
2863
2864 /* For grouping sets, we need to force the ordering */
2865 if (force)
2866 {
2867 foreach(sl, force)
2868 {
2869 Index ref = lfirst_int(sl);
2870 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2871
2872 new_groupclause = lappend(new_groupclause, cl);
2873 }
2874
2875 return new_groupclause;
2876 }
2877
2878 /* If no ORDER BY, nothing useful to do here */
2879 if (parse->sortClause == NIL)
2880 return list_copy(parse->groupClause);
2881
2882 /*
2883 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2884 * items, but only as far as we can make a matching prefix.
2885 *
2886 * This code assumes that the sortClause contains no duplicate items.
2887 */
2888 foreach(sl, parse->sortClause)
2889 {
2891
2892 foreach(gl, parse->groupClause)
2893 {
2895
2896 if (equal(gc, sc))
2897 {
2898 new_groupclause = lappend(new_groupclause, gc);
2899 break;
2900 }
2901 }
2902 if (gl == NULL)
2903 break; /* no match, so stop scanning */
2904 }
2905
2906
2907 /* If no match at all, no point in reordering GROUP BY */
2908 if (new_groupclause == NIL)
2909 return list_copy(parse->groupClause);
2910
2911 /*
2912 * Add any remaining GROUP BY items to the new list. We don't require a
2913 * complete match, because even partial match allows ORDER BY to be
2914 * implemented using incremental sort. Also, give up if there are any
2915 * non-sortable GROUP BY items, since then there's no hope anyway.
2916 */
2917 foreach(gl, parse->groupClause)
2918 {
2920
2921 if (list_member_ptr(new_groupclause, gc))
2922 continue; /* it matched an ORDER BY item */
2923 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2924 return list_copy(parse->groupClause);
2925 new_groupclause = lappend(new_groupclause, gc);
2926 }
2927
2928 /* Success --- install the rearranged GROUP BY list */
2929 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2930 return new_groupclause;
2931}
2932
2933/*
2934 * Extract lists of grouping sets that can be implemented using a single
2935 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2936 *
2937 * Input must be sorted with smallest sets first. Result has each sublist
2938 * sorted with smallest sets first.
2939 *
2940 * We want to produce the absolute minimum possible number of lists here to
2941 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2942 * of finding the minimal partition of a partially-ordered set into chains
2943 * (which is what we need, taking the list of grouping sets as a poset ordered
2944 * by set inclusion) can be mapped to the problem of finding the maximum
2945 * cardinality matching on a bipartite graph, which is solvable in polynomial
2946 * time with a worst case of no worse than O(n^2.5) and usually much
2947 * better. Since our N is at most 4096, we don't need to consider fallbacks to
2948 * heuristic or approximate methods. (Planning time for a 12-d cube is under
2949 * half a second on my modest system even with optimization off and assertions
2950 * on.)
2951 */
2952static List *
2954{
2955 int num_sets_raw = list_length(groupingSets);
2956 int num_empty = 0;
2957 int num_sets = 0; /* distinct sets */
2958 int num_chains = 0;
2959 List *result = NIL;
2960 List **results;
2961 List **orig_sets;
2962 Bitmapset **set_masks;
2963 int *chains;
2964 short **adjacency;
2965 short *adjacency_buf;
2967 int i;
2968 int j;
2969 int j_size;
2970 ListCell *lc1 = list_head(groupingSets);
2971 ListCell *lc;
2972
2973 /*
2974 * Start by stripping out empty sets. The algorithm doesn't require this,
2975 * but the planner currently needs all empty sets to be returned in the
2976 * first list, so we strip them here and add them back after.
2977 */
2978 while (lc1 && lfirst(lc1) == NIL)
2979 {
2980 ++num_empty;
2981 lc1 = lnext(groupingSets, lc1);
2982 }
2983
2984 /* bail out now if it turns out that all we had were empty sets. */
2985 if (!lc1)
2986 return list_make1(groupingSets);
2987
2988 /*----------
2989 * We don't strictly need to remove duplicate sets here, but if we don't,
2990 * they tend to become scattered through the result, which is a bit
2991 * confusing (and irritating if we ever decide to optimize them out).
2992 * So we remove them here and add them back after.
2993 *
2994 * For each non-duplicate set, we fill in the following:
2995 *
2996 * orig_sets[i] = list of the original set lists
2997 * set_masks[i] = bitmapset for testing inclusion
2998 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2999 *
3000 * chains[i] will be the result group this set is assigned to.
3001 *
3002 * We index all of these from 1 rather than 0 because it is convenient
3003 * to leave 0 free for the NIL node in the graph algorithm.
3004 *----------
3005 */
3006 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3007 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3008 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3009 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3010
3011 j_size = 0;
3012 j = 0;
3013 i = 1;
3014
3015 for_each_cell(lc, groupingSets, lc1)
3016 {
3017 List *candidate = (List *) lfirst(lc);
3018 Bitmapset *candidate_set = NULL;
3019 ListCell *lc2;
3020 int dup_of = 0;
3021
3022 foreach(lc2, candidate)
3023 {
3024 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3025 }
3026
3027 /* we can only be a dup if we're the same length as a previous set */
3028 if (j_size == list_length(candidate))
3029 {
3030 int k;
3031
3032 for (k = j; k < i; ++k)
3033 {
3034 if (bms_equal(set_masks[k], candidate_set))
3035 {
3036 dup_of = k;
3037 break;
3038 }
3039 }
3040 }
3041 else if (j_size < list_length(candidate))
3042 {
3043 j_size = list_length(candidate);
3044 j = i;
3045 }
3046
3047 if (dup_of > 0)
3048 {
3049 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3050 bms_free(candidate_set);
3051 }
3052 else
3053 {
3054 int k;
3055 int n_adj = 0;
3056
3057 orig_sets[i] = list_make1(candidate);
3058 set_masks[i] = candidate_set;
3059
3060 /* fill in adjacency list; no need to compare equal-size sets */
3061
3062 for (k = j - 1; k > 0; --k)
3063 {
3064 if (bms_is_subset(set_masks[k], candidate_set))
3065 adjacency_buf[++n_adj] = k;
3066 }
3067
3068 if (n_adj > 0)
3069 {
3070 adjacency_buf[0] = n_adj;
3071 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3072 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3073 }
3074 else
3075 adjacency[i] = NULL;
3076
3077 ++i;
3078 }
3079 }
3080
3081 num_sets = i - 1;
3082
3083 /*
3084 * Apply the graph matching algorithm to do the work.
3085 */
3086 state = BipartiteMatch(num_sets, num_sets, adjacency);
3087
3088 /*
3089 * Now, the state->pair* fields have the info we need to assign sets to
3090 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3091 * pair_vu[v] = u (both will be true, but we check both so that we can do
3092 * it in one pass)
3093 */
3094 chains = palloc0((num_sets + 1) * sizeof(int));
3095
3096 for (i = 1; i <= num_sets; ++i)
3097 {
3098 int u = state->pair_vu[i];
3099 int v = state->pair_uv[i];
3100
3101 if (u > 0 && u < i)
3102 chains[i] = chains[u];
3103 else if (v > 0 && v < i)
3104 chains[i] = chains[v];
3105 else
3106 chains[i] = ++num_chains;
3107 }
3108
3109 /* build result lists. */
3110 results = palloc0((num_chains + 1) * sizeof(List *));
3111
3112 for (i = 1; i <= num_sets; ++i)
3113 {
3114 int c = chains[i];
3115
3116 Assert(c > 0);
3117
3118 results[c] = list_concat(results[c], orig_sets[i]);
3119 }
3120
3121 /* push any empty sets back on the first list. */
3122 while (num_empty-- > 0)
3123 results[1] = lcons(NIL, results[1]);
3124
3125 /* make result list */
3126 for (i = 1; i <= num_chains; ++i)
3127 result = lappend(result, results[i]);
3128
3129 /*
3130 * Free all the things.
3131 *
3132 * (This is over-fussy for small sets but for large sets we could have
3133 * tied up a nontrivial amount of memory.)
3134 */
3136 pfree(results);
3137 pfree(chains);
3138 for (i = 1; i <= num_sets; ++i)
3139 if (adjacency[i])
3140 pfree(adjacency[i]);
3141 pfree(adjacency);
3142 pfree(adjacency_buf);
3143 pfree(orig_sets);
3144 for (i = 1; i <= num_sets; ++i)
3145 bms_free(set_masks[i]);
3146 pfree(set_masks);
3147
3148 return result;
3149}
3150
3151/*
3152 * Reorder the elements of a list of grouping sets such that they have correct
3153 * prefix relationships. Also inserts the GroupingSetData annotations.
3154 *
3155 * The input must be ordered with smallest sets first; the result is returned
3156 * with largest sets first. Note that the result shares no list substructure
3157 * with the input, so it's safe for the caller to modify it later.
3158 *
3159 * If we're passed in a sortclause, we follow its order of columns to the
3160 * extent possible, to minimize the chance that we add unnecessary sorts.
3161 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3162 * gets implemented in one pass.)
3163 */
3164static List *
3165reorder_grouping_sets(List *groupingSets, List *sortclause)
3166{
3167 ListCell *lc;
3168 List *previous = NIL;
3169 List *result = NIL;
3170
3171 foreach(lc, groupingSets)
3172 {
3173 List *candidate = (List *) lfirst(lc);
3174 List *new_elems = list_difference_int(candidate, previous);
3176
3177 while (list_length(sortclause) > list_length(previous) &&
3178 new_elems != NIL)
3179 {
3180 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3181 int ref = sc->tleSortGroupRef;
3182
3183 if (list_member_int(new_elems, ref))
3184 {
3185 previous = lappend_int(previous, ref);
3186 new_elems = list_delete_int(new_elems, ref);
3187 }
3188 else
3189 {
3190 /* diverged from the sortclause; give up on it */
3191 sortclause = NIL;
3192 break;
3193 }
3194 }
3195
3196 previous = list_concat(previous, new_elems);
3197
3198 gs->set = list_copy(previous);
3199 result = lcons(gs, result);
3200 }
3201
3202 list_free(previous);
3203
3204 return result;
3205}
3206
3207/*
3208 * has_volatile_pathkey
3209 * Returns true if any PathKey in 'keys' has an EquivalenceClass
3210 * containing a volatile function. Otherwise returns false.
3211 */
3212static bool
3214{
3215 ListCell *lc;
3216
3217 foreach(lc, keys)
3218 {
3219 PathKey *pathkey = lfirst_node(PathKey, lc);
3220
3221 if (pathkey->pk_eclass->ec_has_volatile)
3222 return true;
3223 }
3224
3225 return false;
3226}
3227
3228/*
3229 * adjust_group_pathkeys_for_groupagg
3230 * Add pathkeys to root->group_pathkeys to reflect the best set of
3231 * pre-ordered input for ordered aggregates.
3232 *
3233 * We define "best" as the pathkeys that suit the largest number of
3234 * aggregate functions. We find these by looking at the first ORDER BY /
3235 * DISTINCT aggregate and take the pathkeys for that before searching for
3236 * other aggregates that require the same or a more strict variation of the
3237 * same pathkeys. We then repeat that process for any remaining aggregates
3238 * with different pathkeys and if we find another set of pathkeys that suits a
3239 * larger number of aggregates then we select those pathkeys instead.
3240 *
3241 * When the best pathkeys are found we also mark each Aggref that can use
3242 * those pathkeys as aggpresorted = true.
3243 *
3244 * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3245 * volatile functions, we never make use of these pathkeys. We want to ensure
3246 * that sorts using volatile functions are done independently in each Aggref
3247 * rather than once at the query level. If we were to allow this then Aggrefs
3248 * with compatible sort orders would all transition their rows in the same
3249 * order if those pathkeys were deemed to be the best pathkeys to sort on.
3250 * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3251 * better pathkeys to sort on, then the volatile function Aggrefs would be
3252 * left to perform their sorts individually. To avoid this inconsistent
3253 * behavior which could make Aggref results depend on what other Aggrefs the
3254 * query contains, we always force Aggrefs with volatile functions to perform
3255 * their own sorts.
3256 */
3257static void
3259{
3260 List *grouppathkeys = root->group_pathkeys;
3261 List *bestpathkeys;
3262 Bitmapset *bestaggs;
3263 Bitmapset *unprocessed_aggs;
3264 ListCell *lc;
3265 int i;
3266
3267 /* Shouldn't be here if there are grouping sets */
3268 Assert(root->parse->groupingSets == NIL);
3269 /* Shouldn't be here unless there are some ordered aggregates */
3270 Assert(root->numOrderedAggs > 0);
3271
3272 /* Do nothing if disabled */
3274 return;
3275
3276 /*
3277 * Make a first pass over all AggInfos to collect a Bitmapset containing
3278 * the indexes of all AggInfos to be processed below.
3279 */
3280 unprocessed_aggs = NULL;
3281 foreach(lc, root->agginfos)
3282 {
3283 AggInfo *agginfo = lfirst_node(AggInfo, lc);
3284 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3285
3286 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3287 continue;
3288
3289 /* Skip unless there's a DISTINCT or ORDER BY clause */
3290 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3291 continue;
3292
3293 /* Additional safety checks are needed if there's a FILTER clause */
3294 if (aggref->aggfilter != NULL)
3295 {
3296 ListCell *lc2;
3297 bool allow_presort = true;
3298
3299 /*
3300 * When the Aggref has a FILTER clause, it's possible that the
3301 * filter removes rows that cannot be sorted because the
3302 * expression to sort by results in an error during its
3303 * evaluation. This is a problem for presorting as that happens
3304 * before the FILTER, whereas without presorting, the Aggregate
3305 * node will apply the FILTER *before* sorting. So that we never
3306 * try to sort anything that might error, here we aim to skip over
3307 * any Aggrefs with arguments with expressions which, when
3308 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3309 * may be more cases that should be allowed, but more thought
3310 * needs to be given. Err on the side of caution.
3311 */
3312 foreach(lc2, aggref->args)
3313 {
3314 TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3315 Expr *expr = tle->expr;
3316
3317 while (IsA(expr, RelabelType))
3318 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3319
3320 /* Common case, Vars and Consts are ok */
3321 if (IsA(expr, Var) || IsA(expr, Const))
3322 continue;
3323
3324 /* Unsupported. Don't try to presort for this Aggref */
3325 allow_presort = false;
3326 break;
3327 }
3328
3329 /* Skip unsupported Aggrefs */
3330 if (!allow_presort)
3331 continue;
3332 }
3333
3334 unprocessed_aggs = bms_add_member(unprocessed_aggs,
3336 }
3337
3338 /*
3339 * Now process all the unprocessed_aggs to find the best set of pathkeys
3340 * for the given set of aggregates.
3341 *
3342 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3343 * this during the first loop using the pathkeys for the very first
3344 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3345 * a more strict set of compatible pathkeys. Once the outer loop is
3346 * complete, we mark off all the aggregates with compatible pathkeys then
3347 * remove those from the unprocessed_aggs and repeat the process to try to
3348 * find another set of pathkeys that are suitable for a larger number of
3349 * aggregates. The outer loop will stop when there are not enough
3350 * unprocessed aggregates for it to be possible to find a set of pathkeys
3351 * to suit a larger number of aggregates.
3352 */
3353 bestpathkeys = NIL;
3354 bestaggs = NULL;
3355 while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3356 {
3357 Bitmapset *aggindexes = NULL;
3358 List *currpathkeys = NIL;
3359
3360 i = -1;
3361 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3362 {
3363 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3364 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3365 List *sortlist;
3366 List *pathkeys;
3367
3368 if (aggref->aggdistinct != NIL)
3369 sortlist = aggref->aggdistinct;
3370 else
3371 sortlist = aggref->aggorder;
3372
3373 pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3374 aggref->args);
3375
3376 /*
3377 * Ignore Aggrefs which have volatile functions in their ORDER BY
3378 * or DISTINCT clause.
3379 */
3380 if (has_volatile_pathkey(pathkeys))
3381 {
3382 unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3383 continue;
3384 }
3385
3386 /*
3387 * When not set yet, take the pathkeys from the first unprocessed
3388 * aggregate.
3389 */
3390 if (currpathkeys == NIL)
3391 {
3392 currpathkeys = pathkeys;
3393
3394 /* include the GROUP BY pathkeys, if they exist */
3395 if (grouppathkeys != NIL)
3396 currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3397 currpathkeys);
3398
3399 /* record that we found pathkeys for this aggregate */
3400 aggindexes = bms_add_member(aggindexes, i);
3401 }
3402 else
3403 {
3404 /* now look for a stronger set of matching pathkeys */
3405
3406 /* include the GROUP BY pathkeys, if they exist */
3407 if (grouppathkeys != NIL)
3408 pathkeys = append_pathkeys(list_copy(grouppathkeys),
3409 pathkeys);
3410
3411 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3412 switch (compare_pathkeys(currpathkeys, pathkeys))
3413 {
3414 case PATHKEYS_BETTER2:
3415 /* 'pathkeys' are stronger, use these ones instead */
3416 currpathkeys = pathkeys;
3417 /* FALLTHROUGH */
3418
3419 case PATHKEYS_BETTER1:
3420 /* 'pathkeys' are less strict */
3421 /* FALLTHROUGH */
3422
3423 case PATHKEYS_EQUAL:
3424 /* mark this aggregate as covered by 'currpathkeys' */
3425 aggindexes = bms_add_member(aggindexes, i);
3426 break;
3427
3428 case PATHKEYS_DIFFERENT:
3429 break;
3430 }
3431 }
3432 }
3433
3434 /* remove the aggregates that we've just processed */
3435 unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3436
3437 /*
3438 * If this pass included more aggregates than the previous best then
3439 * use these ones as the best set.
3440 */
3441 if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3442 {
3443 bestaggs = aggindexes;
3444 bestpathkeys = currpathkeys;
3445 }
3446 }
3447
3448 /*
3449 * If we found any ordered aggregates, update root->group_pathkeys to add
3450 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3451 * the original GROUP BY pathkeys already.
3452 */
3453 if (bestpathkeys != NIL)
3454 root->group_pathkeys = bestpathkeys;
3455
3456 /*
3457 * Now that we've found the best set of aggregates we can set the
3458 * presorted flag to indicate to the executor that it needn't bother
3459 * performing a sort for these Aggrefs. We're able to do this now as
3460 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3461 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3462 * of ordered aggregates.
3463 */
3464 i = -1;
3465 while ((i = bms_next_member(bestaggs, i)) >= 0)
3466 {
3467 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3468
3469 foreach(lc, agginfo->aggrefs)
3470 {
3471 Aggref *aggref = lfirst_node(Aggref, lc);
3472
3473 aggref->aggpresorted = true;
3474 }
3475 }
3476}
3477
3478/*
3479 * Compute query_pathkeys and other pathkeys during plan generation
3480 */
3481static void
3483{
3484 Query *parse = root->parse;
3485 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3486 List *tlist = root->processed_tlist;
3487 List *activeWindows = qp_extra->activeWindows;
3488
3489 /*
3490 * Calculate pathkeys that represent grouping/ordering and/or ordered
3491 * aggregate requirements.
3492 */
3493 if (qp_extra->gset_data)
3494 {
3495 /*
3496 * With grouping sets, just use the first RollupData's groupClause. We
3497 * don't make any effort to optimize grouping clauses when there are
3498 * grouping sets, nor can we combine aggregate ordering keys with
3499 * grouping.
3500 */
3501 List *rollups = qp_extra->gset_data->rollups;
3502 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3503
3504 if (grouping_is_sortable(groupClause))
3505 {
3506 bool sortable;
3507
3508 /*
3509 * The groupClause is logically below the grouping step. So if
3510 * there is an RTE entry for the grouping step, we need to remove
3511 * its RT index from the sort expressions before we make PathKeys
3512 * for them.
3513 */
3514 root->group_pathkeys =
3516 &groupClause,
3517 tlist,
3518 false,
3519 parse->hasGroupRTE,
3520 &sortable,
3521 false);
3522 Assert(sortable);
3523 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3524 }
3525 else
3526 {
3527 root->group_pathkeys = NIL;
3528 root->num_groupby_pathkeys = 0;
3529 }
3530 }
3531 else if (parse->groupClause || root->numOrderedAggs > 0)
3532 {
3533 /*
3534 * With a plain GROUP BY list, we can remove any grouping items that
3535 * are proven redundant by EquivalenceClass processing. For example,
3536 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3537 * especially common cases, but they're nearly free to detect. Note
3538 * that we remove redundant items from processed_groupClause but not
3539 * the original parse->groupClause.
3540 */
3541 bool sortable;
3542
3543 /*
3544 * Convert group clauses into pathkeys. Set the ec_sortref field of
3545 * EquivalenceClass'es if it's not set yet.
3546 */
3547 root->group_pathkeys =
3549 &root->processed_groupClause,
3550 tlist,
3551 true,
3552 false,
3553 &sortable,
3554 true);
3555 if (!sortable)
3556 {
3557 /* Can't sort; no point in considering aggregate ordering either */
3558 root->group_pathkeys = NIL;
3559 root->num_groupby_pathkeys = 0;
3560 }
3561 else
3562 {
3563 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3564 /* If we have ordered aggs, consider adding onto group_pathkeys */
3565 if (root->numOrderedAggs > 0)
3567 }
3568 }
3569 else
3570 {
3571 root->group_pathkeys = NIL;
3572 root->num_groupby_pathkeys = 0;
3573 }
3574
3575 /* We consider only the first (bottom) window in pathkeys logic */
3576 if (activeWindows != NIL)
3577 {
3578 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3579
3580 root->window_pathkeys = make_pathkeys_for_window(root,
3581 wc,
3582 tlist);
3583 }
3584 else
3585 root->window_pathkeys = NIL;
3586
3587 /*
3588 * As with GROUP BY, we can discard any DISTINCT items that are proven
3589 * redundant by EquivalenceClass processing. The non-redundant list is
3590 * kept in root->processed_distinctClause, leaving the original
3591 * parse->distinctClause alone.
3592 */
3593 if (parse->distinctClause)
3594 {
3595 bool sortable;
3596
3597 /* Make a copy since pathkey processing can modify the list */
3598 root->processed_distinctClause = list_copy(parse->distinctClause);
3599 root->distinct_pathkeys =
3601 &root->processed_distinctClause,
3602 tlist,
3603 true,
3604 false,
3605 &sortable,
3606 false);
3607 if (!sortable)
3608 root->distinct_pathkeys = NIL;
3609 }
3610 else
3611 root->distinct_pathkeys = NIL;
3612
3613 root->sort_pathkeys =
3615 parse->sortClause,
3616 tlist);
3617
3618 /* setting setop_pathkeys might be useful to the union planner */
3619 if (qp_extra->setop != NULL)
3620 {
3621 List *groupClauses;
3622 bool sortable;
3623
3624 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3625
3626 root->setop_pathkeys =
3628 &groupClauses,
3629 tlist,
3630 false,
3631 false,
3632 &sortable,
3633 false);
3634 if (!sortable)
3635 root->setop_pathkeys = NIL;
3636 }
3637 else
3638 root->setop_pathkeys = NIL;
3639
3640 /*
3641 * Figure out whether we want a sorted result from query_planner.
3642 *
3643 * If we have a sortable GROUP BY clause, then we want a result sorted
3644 * properly for grouping. Otherwise, if we have window functions to
3645 * evaluate, we try to sort for the first window. Otherwise, if there's a
3646 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3647 * we try to produce output that's sufficiently well sorted for the
3648 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3649 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3650 * for a set operation which can benefit from presorted results and have a
3651 * sortable targetlist, we want to sort by the target list.
3652 *
3653 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3654 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3655 * that might just leave us failing to exploit an available sort order at
3656 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3657 * much easier, since we know that the parser ensured that one is a
3658 * superset of the other.
3659 */
3660 if (root->group_pathkeys)
3661 root->query_pathkeys = root->group_pathkeys;
3662 else if (root->window_pathkeys)
3663 root->query_pathkeys = root->window_pathkeys;
3664 else if (list_length(root->distinct_pathkeys) >
3665 list_length(root->sort_pathkeys))
3666 root->query_pathkeys = root->distinct_pathkeys;
3667 else if (root->sort_pathkeys)
3668 root->query_pathkeys = root->sort_pathkeys;
3669 else if (root->setop_pathkeys != NIL)
3670 root->query_pathkeys = root->setop_pathkeys;
3671 else
3672 root->query_pathkeys = NIL;
3673}
3674
3675/*
3676 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3677 *
3678 * path_rows: number of output rows from scan/join step
3679 * gd: grouping sets data including list of grouping sets and their clauses
3680 * target_list: target list containing group clause references
3681 *
3682 * If doing grouping sets, we also annotate the gsets data with the estimates
3683 * for each set and each individual rollup list, with a view to later
3684 * determining whether some combination of them could be hashed instead.
3685 */
3686static double
3688 double path_rows,
3690 List *target_list)
3691{
3692 Query *parse = root->parse;
3693 double dNumGroups;
3694
3695 if (parse->groupClause)
3696 {
3697 List *groupExprs;
3698
3699 if (parse->groupingSets)
3700 {
3701 /* Add up the estimates for each grouping set */
3702 ListCell *lc;
3703
3704 Assert(gd); /* keep Coverity happy */
3705
3706 dNumGroups = 0;
3707
3708 foreach(lc, gd->rollups)
3709 {
3710 RollupData *rollup = lfirst_node(RollupData, lc);
3711 ListCell *lc2;
3712 ListCell *lc3;
3713
3714 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3715 target_list);
3716
3717 rollup->numGroups = 0.0;
3718
3719 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3720 {
3721 List *gset = (List *) lfirst(lc2);
3723 double numGroups = estimate_num_groups(root,
3724 groupExprs,
3725 path_rows,
3726 &gset,
3727 NULL);
3728
3729 gs->numGroups = numGroups;
3730 rollup->numGroups += numGroups;
3731 }
3732
3733 dNumGroups += rollup->numGroups;
3734 }
3735
3736 if (gd->hash_sets_idx)
3737 {
3738 ListCell *lc2;
3739
3740 gd->dNumHashGroups = 0;
3741
3742 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3743 target_list);
3744
3745 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3746 {
3747 List *gset = (List *) lfirst(lc);
3749 double numGroups = estimate_num_groups(root,
3750 groupExprs,
3751 path_rows,
3752 &gset,
3753 NULL);
3754
3755 gs->numGroups = numGroups;
3756 gd->dNumHashGroups += numGroups;
3757 }
3758
3759 dNumGroups += gd->dNumHashGroups;
3760 }
3761 }
3762 else
3763 {
3764 /* Plain GROUP BY -- estimate based on optimized groupClause */
3765 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3766 target_list);
3767
3768 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3769 NULL, NULL);
3770 }
3771 }
3772 else if (parse->groupingSets)
3773 {
3774 /* Empty grouping sets ... one result row for each one */
3775 dNumGroups = list_length(parse->groupingSets);
3776 }
3777 else if (parse->hasAggs || root->hasHavingQual)
3778 {
3779 /* Plain aggregation, one result row */
3780 dNumGroups = 1;
3781 }
3782 else
3783 {
3784 /* Not grouping */
3785 dNumGroups = 1;
3786 }
3787
3788 return dNumGroups;
3789}
3790
3791/*
3792 * create_grouping_paths
3793 *
3794 * Build a new upperrel containing Paths for grouping and/or aggregation.
3795 * Along the way, we also build an upperrel for Paths which are partially
3796 * grouped and/or aggregated. A partially grouped and/or aggregated path
3797 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3798 * the only partially grouped paths we build are also partial paths; that
3799 * is, they need a Gather and then a FinalizeAggregate.
3800 *
3801 * input_rel: contains the source-data Paths
3802 * target: the pathtarget for the result Paths to compute
3803 * gd: grouping sets data including list of grouping sets and their clauses
3804 *
3805 * Note: all Paths in input_rel are expected to return the target computed
3806 * by make_group_input_target.
3807 */
3808static RelOptInfo *
3810 RelOptInfo *input_rel,
3811 PathTarget *target,
3812 bool target_parallel_safe,
3814{
3815 Query *parse = root->parse;
3816 RelOptInfo *grouped_rel;
3817 RelOptInfo *partially_grouped_rel;
3818 AggClauseCosts agg_costs;
3819
3820 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3822
3823 /*
3824 * Create grouping relation to hold fully aggregated grouping and/or
3825 * aggregation paths.
3826 */
3827 grouped_rel = make_grouping_rel(root, input_rel, target,
3828 target_parallel_safe, parse->havingQual);
3829
3830 /*
3831 * Create either paths for a degenerate grouping or paths for ordinary
3832 * grouping, as appropriate.
3833 */
3835 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3836 else
3837 {
3838 int flags = 0;
3839 GroupPathExtraData extra;
3840
3841 /*
3842 * Determine whether it's possible to perform sort-based
3843 * implementations of grouping. (Note that if processed_groupClause
3844 * is empty, grouping_is_sortable() is trivially true, and all the
3845 * pathkeys_contained_in() tests will succeed too, so that we'll
3846 * consider every surviving input path.)
3847 *
3848 * If we have grouping sets, we might be able to sort some but not all
3849 * of them; in this case, we need can_sort to be true as long as we
3850 * must consider any sorted-input plan.
3851 */
3852 if ((gd && gd->rollups != NIL)
3853 || grouping_is_sortable(root->processed_groupClause))
3854 flags |= GROUPING_CAN_USE_SORT;
3855
3856 /*
3857 * Determine whether we should consider hash-based implementations of
3858 * grouping.
3859 *
3860 * Hashed aggregation only applies if we're grouping. If we have
3861 * grouping sets, some groups might be hashable but others not; in
3862 * this case we set can_hash true as long as there is nothing globally
3863 * preventing us from hashing (and we should therefore consider plans
3864 * with hashes).
3865 *
3866 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3867 * BY aggregates. (Doing so would imply storing *all* the input
3868 * values in the hash table, and/or running many sorts in parallel,
3869 * either of which seems like a certain loser.) We similarly don't
3870 * support ordered-set aggregates in hashed aggregation, but that case
3871 * is also included in the numOrderedAggs count.
3872 *
3873 * Note: grouping_is_hashable() is much more expensive to check than
3874 * the other gating conditions, so we want to do it last.
3875 */
3876 if ((parse->groupClause != NIL &&
3877 root->numOrderedAggs == 0 &&
3878 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3879 flags |= GROUPING_CAN_USE_HASH;
3880
3881 /*
3882 * Determine whether partial aggregation is possible.
3883 */
3884 if (can_partial_agg(root))
3885 flags |= GROUPING_CAN_PARTIAL_AGG;
3886
3887 extra.flags = flags;
3888 extra.target_parallel_safe = target_parallel_safe;
3889 extra.havingQual = parse->havingQual;
3890 extra.targetList = parse->targetList;
3891 extra.partial_costs_set = false;
3892
3893 /*
3894 * Determine whether partitionwise aggregation is in theory possible.
3895 * It can be disabled by the user, and for now, we don't try to
3896 * support grouping sets. create_ordinary_grouping_paths() will check
3897 * additional conditions, such as whether input_rel is partitioned.
3898 */
3899 if (enable_partitionwise_aggregate && !parse->groupingSets)
3901 else
3903
3904 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3905 &agg_costs, gd, &extra,
3906 &partially_grouped_rel);
3907 }
3908
3909 set_cheapest(grouped_rel);
3910 return grouped_rel;
3911}
3912
3913/*
3914 * make_grouping_rel
3915 *
3916 * Create a new grouping rel and set basic properties.
3917 *
3918 * input_rel represents the underlying scan/join relation.
3919 * target is the output expected from the grouping relation.
3920 */
3921static RelOptInfo *
3923 PathTarget *target, bool target_parallel_safe,
3924 Node *havingQual)
3925{
3926 RelOptInfo *grouped_rel;
3927
3928 if (IS_OTHER_REL(input_rel))
3929 {
3931 input_rel->relids);
3932 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3933 }
3934 else
3935 {
3936 /*
3937 * By tradition, the relids set for the main grouping relation is
3938 * NULL. (This could be changed, but might require adjustments
3939 * elsewhere.)
3940 */
3941 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3942 }
3943
3944 /* Set target. */
3945 grouped_rel->reltarget = target;
3946
3947 /*
3948 * If the input relation is not parallel-safe, then the grouped relation
3949 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3950 * target list and HAVING quals are parallel-safe.
3951 */
3952 if (input_rel->consider_parallel && target_parallel_safe &&
3953 is_parallel_safe(root, havingQual))
3954 grouped_rel->consider_parallel = true;
3955
3956 /*
3957 * If the input rel belongs to a single FDW, so does the grouped rel.
3958 */
3959 grouped_rel->serverid = input_rel->serverid;
3960 grouped_rel->userid = input_rel->userid;
3961 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3962 grouped_rel->fdwroutine = input_rel->fdwroutine;
3963
3964 return grouped_rel;
3965}
3966
3967/*
3968 * is_degenerate_grouping
3969 *
3970 * A degenerate grouping is one in which the query has a HAVING qual and/or
3971 * grouping sets, but no aggregates and no GROUP BY (which implies that the
3972 * grouping sets are all empty).
3973 */
3974static bool
3976{
3977 Query *parse = root->parse;
3978
3979 return (root->hasHavingQual || parse->groupingSets) &&
3980 !parse->hasAggs && parse->groupClause == NIL;
3981}
3982
3983/*
3984 * create_degenerate_grouping_paths
3985 *
3986 * When the grouping is degenerate (see is_degenerate_grouping), we are
3987 * supposed to emit either zero or one row for each grouping set depending on
3988 * whether HAVING succeeds. Furthermore, there cannot be any variables in
3989 * either HAVING or the targetlist, so we actually do not need the FROM table
3990 * at all! We can just throw away the plan-so-far and generate a Result node.
3991 * This is a sufficiently unusual corner case that it's not worth contorting
3992 * the structure of this module to avoid having to generate the earlier paths
3993 * in the first place.
3994 */
3995static void
3997 RelOptInfo *grouped_rel)
3998{
3999 Query *parse = root->parse;
4000 int nrows;
4001 Path *path;
4002
4003 nrows = list_length(parse->groupingSets);
4004 if (nrows > 1)
4005 {
4006 /*
4007 * Doesn't seem worthwhile writing code to cons up a generate_series
4008 * or a values scan to emit multiple rows. Instead just make N clones
4009 * and append them. (With a volatile HAVING clause, this means you
4010 * might get between 0 and N output rows. Offhand I think that's
4011 * desired.)
4012 */
4013 List *paths = NIL;
4014
4015 while (--nrows >= 0)
4016 {
4017 path = (Path *)
4018 create_group_result_path(root, grouped_rel,
4019 grouped_rel->reltarget,
4020 (List *) parse->havingQual);
4021 paths = lappend(paths, path);
4022 }
4023 path = (Path *)
4025 grouped_rel,
4026 paths,
4027 NIL,
4028 NIL,
4029 NULL,
4030 0,
4031 false,
4032 -1);
4033 }
4034 else
4035 {
4036 /* No grouping sets, or just one, so one output row */
4037 path = (Path *)
4038 create_group_result_path(root, grouped_rel,
4039 grouped_rel->reltarget,
4040 (List *) parse->havingQual);
4041 }
4042
4043 add_path(grouped_rel, path);
4044}
4045
4046/*
4047 * create_ordinary_grouping_paths
4048 *
4049 * Create grouping paths for the ordinary (that is, non-degenerate) case.
4050 *
4051 * We need to consider sorted and hashed aggregation in the same function,
4052 * because otherwise (1) it would be harder to throw an appropriate error
4053 * message if neither way works, and (2) we should not allow hashtable size
4054 * considerations to dissuade us from using hashing if sorting is not possible.
4055 *
4056 * *partially_grouped_rel_p will be set to the partially grouped rel which this
4057 * function creates, or to NULL if it doesn't create one.
4058 */
4059static void
4061 RelOptInfo *grouped_rel,
4062 const AggClauseCosts *agg_costs,
4064 GroupPathExtraData *extra,
4065 RelOptInfo **partially_grouped_rel_p)
4066{
4067 RelOptInfo *partially_grouped_rel = NULL;
4069
4070 /*
4071 * If this is the topmost grouping relation or if the parent relation is
4072 * doing some form of partitionwise aggregation, then we may be able to do
4073 * it at this level also. However, if the input relation is not
4074 * partitioned, partitionwise aggregate is impossible.
4075 */
4076 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4077 IS_PARTITIONED_REL(input_rel))
4078 {
4079 /*
4080 * If this is the topmost relation or if the parent relation is doing
4081 * full partitionwise aggregation, then we can do full partitionwise
4082 * aggregation provided that the GROUP BY clause contains all of the
4083 * partitioning columns at this level and the collation used by GROUP
4084 * BY matches the partitioning collation. Otherwise, we can do at
4085 * most partial partitionwise aggregation. But if partial aggregation
4086 * is not supported in general then we can't use it for partitionwise
4087 * aggregation either.
4088 *
4089 * Check parse->groupClause not processed_groupClause, because it's
4090 * okay if some of the partitioning columns were proved redundant.
4091 */
4092 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4093 group_by_has_partkey(input_rel, extra->targetList,
4094 root->parse->groupClause))
4096 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4098 else
4100 }
4101
4102 /*
4103 * Before generating paths for grouped_rel, we first generate any possible
4104 * partially grouped paths; that way, later code can easily consider both
4105 * parallel and non-parallel approaches to grouping.
4106 */
4107 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4108 {
4109 bool force_rel_creation;
4110
4111 /*
4112 * If we're doing partitionwise aggregation at this level, force
4113 * creation of a partially_grouped_rel so we can add partitionwise
4114 * paths to it.
4115 */
4116 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4117
4118 partially_grouped_rel =
4120 grouped_rel,
4121 input_rel,
4122 gd,
4123 extra,
4124 force_rel_creation);
4125 }
4126
4127 /* Set out parameter. */
4128 *partially_grouped_rel_p = partially_grouped_rel;
4129
4130 /* Apply partitionwise aggregation technique, if possible. */
4131 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4132 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4133 partially_grouped_rel, agg_costs,
4134 gd, patype, extra);
4135
4136 /* If we are doing partial aggregation only, return. */
4138 {
4139 Assert(partially_grouped_rel);
4140
4141 if (partially_grouped_rel->pathlist)
4142 set_cheapest(partially_grouped_rel);
4143
4144 return;
4145 }
4146
4147 /* Gather any partially grouped partial paths. */
4148 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4149 gather_grouping_paths(root, partially_grouped_rel);
4150
4151 /* Now choose the best path(s) for partially_grouped_rel. */
4152 if (partially_grouped_rel && partially_grouped_rel->pathlist)
4153 set_cheapest(partially_grouped_rel);
4154
4155 /* Build final grouping paths */
4156 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4157 partially_grouped_rel, agg_costs, gd,
4158 extra);
4159
4160 /* Give a helpful error if we failed to find any implementation */
4161 if (grouped_rel->pathlist == NIL)
4162 ereport(ERROR,
4163 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4164 errmsg("could not implement GROUP BY"),
4165 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4166
4167 /*
4168 * If there is an FDW that's responsible for all baserels of the query,
4169 * let it consider adding ForeignPaths.
4170 */
4171 if (grouped_rel->fdwroutine &&
4172 grouped_rel->fdwroutine->GetForeignUpperPaths)
4173 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4174 input_rel, grouped_rel,
4175 extra);
4176
4177 /* Let extensions possibly add some more paths */
4179 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4180 input_rel, grouped_rel,
4181 extra);
4182}
4183
4184/*
4185 * For a given input path, consider the possible ways of doing grouping sets on
4186 * it, by combinations of hashing and sorting. This can be called multiple
4187 * times, so it's important that it not scribble on input. No result is
4188 * returned, but any generated paths are added to grouped_rel.
4189 */
4190static void
4192 RelOptInfo *grouped_rel,
4193 Path *path,
4194 bool is_sorted,
4195 bool can_hash,
4197 const AggClauseCosts *agg_costs,
4198 double dNumGroups)
4199{
4200 Query *parse = root->parse;
4201 Size hash_mem_limit = get_hash_memory_limit();
4202
4203 /*
4204 * If we're not being offered sorted input, then only consider plans that
4205 * can be done entirely by hashing.
4206 *
4207 * We can hash everything if it looks like it'll fit in hash_mem. But if
4208 * the input is actually sorted despite not being advertised as such, we
4209 * prefer to make use of that in order to use less memory.
4210 *
4211 * If none of the grouping sets are sortable, then ignore the hash_mem
4212 * limit and generate a path anyway, since otherwise we'll just fail.
4213 */
4214 if (!is_sorted)
4215 {
4216 List *new_rollups = NIL;
4217 RollupData *unhashed_rollup = NULL;
4218 List *sets_data;
4219 List *empty_sets_data = NIL;
4220 List *empty_sets = NIL;
4221 ListCell *lc;
4222 ListCell *l_start = list_head(gd->rollups);
4223 AggStrategy strat = AGG_HASHED;
4224 double hashsize;
4225 double exclude_groups = 0.0;
4226
4227 Assert(can_hash);
4228
4229 /*
4230 * If the input is coincidentally sorted usefully (which can happen
4231 * even if is_sorted is false, since that only means that our caller
4232 * has set up the sorting for us), then save some hashtable space by
4233 * making use of that. But we need to watch out for degenerate cases:
4234 *
4235 * 1) If there are any empty grouping sets, then group_pathkeys might
4236 * be NIL if all non-empty grouping sets are unsortable. In this case,
4237 * there will be a rollup containing only empty groups, and the
4238 * pathkeys_contained_in test is vacuously true; this is ok.
4239 *
4240 * XXX: the above relies on the fact that group_pathkeys is generated
4241 * from the first rollup. If we add the ability to consider multiple
4242 * sort orders for grouping input, this assumption might fail.
4243 *
4244 * 2) If there are no empty sets and only unsortable sets, then the
4245 * rollups list will be empty (and thus l_start == NULL), and
4246 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4247 * pathkeys_contained_in test doesn't cause us to crash.
4248 */
4249 if (l_start != NULL &&
4250 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4251 {
4252 unhashed_rollup = lfirst_node(RollupData, l_start);
4253 exclude_groups = unhashed_rollup->numGroups;
4254 l_start = lnext(gd->rollups, l_start);
4255 }
4256
4258 path,
4259 agg_costs,
4260 dNumGroups - exclude_groups);
4261
4262 /*
4263 * gd->rollups is empty if we have only unsortable columns to work
4264 * with. Override hash_mem in that case; otherwise, we'll rely on the
4265 * sorted-input case to generate usable mixed paths.
4266 */
4267 if (hashsize > hash_mem_limit && gd->rollups)
4268 return; /* nope, won't fit */
4269
4270 /*
4271 * We need to burst the existing rollups list into individual grouping
4272 * sets and recompute a groupClause for each set.
4273 */
4274 sets_data = list_copy(gd->unsortable_sets);
4275
4276 for_each_cell(lc, gd->rollups, l_start)
4277 {
4278 RollupData *rollup = lfirst_node(RollupData, lc);
4279
4280 /*
4281 * If we find an unhashable rollup that's not been skipped by the
4282 * "actually sorted" check above, we can't cope; we'd need sorted
4283 * input (with a different sort order) but we can't get that here.
4284 * So bail out; we'll get a valid path from the is_sorted case
4285 * instead.
4286 *
4287 * The mere presence of empty grouping sets doesn't make a rollup
4288 * unhashable (see preprocess_grouping_sets), we handle those
4289 * specially below.
4290 */
4291 if (!rollup->hashable)
4292 return;
4293
4294 sets_data = list_concat(sets_data, rollup->gsets_data);
4295 }
4296 foreach(lc, sets_data)
4297 {
4299 List *gset = gs->set;
4300 RollupData *rollup;
4301
4302 if (gset == NIL)
4303 {
4304 /* Empty grouping sets can't be hashed. */
4305 empty_sets_data = lappend(empty_sets_data, gs);
4306 empty_sets = lappend(empty_sets, NIL);
4307 }
4308 else
4309 {
4310 rollup = makeNode(RollupData);
4311
4312 rollup->groupClause = preprocess_groupclause(root, gset);
4313 rollup->gsets_data = list_make1(gs);
4314 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4315 rollup->gsets_data,
4317 rollup->numGroups = gs->numGroups;
4318 rollup->hashable = true;
4319 rollup->is_hashed = true;
4320 new_rollups = lappend(new_rollups, rollup);
4321 }
4322 }
4323
4324 /*
4325 * If we didn't find anything nonempty to hash, then bail. We'll
4326 * generate a path from the is_sorted case.
4327 */
4328 if (new_rollups == NIL)
4329 return;
4330
4331 /*
4332 * If there were empty grouping sets they should have been in the
4333 * first rollup.
4334 */
4335 Assert(!unhashed_rollup || !empty_sets);
4336
4337 if (unhashed_rollup)
4338 {
4339 new_rollups = lappend(new_rollups, unhashed_rollup);
4340 strat = AGG_MIXED;
4341 }
4342 else if (empty_sets)
4343 {
4344 RollupData *rollup = makeNode(RollupData);
4345
4346 rollup->groupClause = NIL;
4347 rollup->gsets_data = empty_sets_data;
4348 rollup->gsets = empty_sets;
4349 rollup->numGroups = list_length(empty_sets);
4350 rollup->hashable = false;
4351 rollup->is_hashed = false;
4352 new_rollups = lappend(new_rollups, rollup);
4353 strat = AGG_MIXED;
4354 }
4355
4356 add_path(grouped_rel, (Path *)
4358 grouped_rel,
4359 path,
4360 (List *) parse->havingQual,
4361 strat,
4362 new_rollups,
4363 agg_costs));
4364 return;
4365 }
4366
4367 /*
4368 * If we have sorted input but nothing we can do with it, bail.
4369 */
4370 if (gd->rollups == NIL)
4371 return;
4372
4373 /*
4374 * Given sorted input, we try and make two paths: one sorted and one mixed
4375 * sort/hash. (We need to try both because hashagg might be disabled, or
4376 * some columns might not be sortable.)
4377 *
4378 * can_hash is passed in as false if some obstacle elsewhere (such as
4379 * ordered aggs) means that we shouldn't consider hashing at all.
4380 */
4381 if (can_hash && gd->any_hashable)
4382 {
4383 List *rollups = NIL;
4384 List *hash_sets = list_copy(gd->unsortable_sets);
4385 double availspace = hash_mem_limit;
4386 ListCell *lc;
4387
4388 /*
4389 * Account first for space needed for groups we can't sort at all.
4390 */
4391 availspace -= estimate_hashagg_tablesize(root,
4392 path,
4393 agg_costs,
4394 gd->dNumHashGroups);
4395
4396 if (availspace > 0 && list_length(gd->rollups) > 1)
4397 {
4398 double scale;
4399 int num_rollups = list_length(gd->rollups);
4400 int k_capacity;
4401 int *k_weights = palloc(num_rollups * sizeof(int));
4402 Bitmapset *hash_items = NULL;
4403 int i;
4404
4405 /*
4406 * We treat this as a knapsack problem: the knapsack capacity
4407 * represents hash_mem, the item weights are the estimated memory
4408 * usage of the hashtables needed to implement a single rollup,
4409 * and we really ought to use the cost saving as the item value;
4410 * however, currently the costs assigned to sort nodes don't
4411 * reflect the comparison costs well, and so we treat all items as
4412 * of equal value (each rollup we hash instead saves us one sort).
4413 *
4414 * To use the discrete knapsack, we need to scale the values to a
4415 * reasonably small bounded range. We choose to allow a 5% error
4416 * margin; we have no more than 4096 rollups in the worst possible
4417 * case, which with a 5% error margin will require a bit over 42MB
4418 * of workspace. (Anyone wanting to plan queries that complex had
4419 * better have the memory for it. In more reasonable cases, with
4420 * no more than a couple of dozen rollups, the memory usage will
4421 * be negligible.)
4422 *
4423 * k_capacity is naturally bounded, but we clamp the values for
4424 * scale and weight (below) to avoid overflows or underflows (or
4425 * uselessly trying to use a scale factor less than 1 byte).
4426 */
4427 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4428 k_capacity = (int) floor(availspace / scale);
4429
4430 /*
4431 * We leave the first rollup out of consideration since it's the
4432 * one that matches the input sort order. We assign indexes "i"
4433 * to only those entries considered for hashing; the second loop,
4434 * below, must use the same condition.
4435 */
4436 i = 0;
4437 for_each_from(lc, gd->rollups, 1)
4438 {
4439 RollupData *rollup = lfirst_node(RollupData, lc);
4440
4441 if (rollup->hashable)
4442 {
4443 double sz = estimate_hashagg_tablesize(root,
4444 path,
4445 agg_costs,
4446 rollup->numGroups);
4447
4448 /*
4449 * If sz is enormous, but hash_mem (and hence scale) is
4450 * small, avoid integer overflow here.
4451 */
4452 k_weights[i] = (int) Min(floor(sz / scale),
4453 k_capacity + 1.0);
4454 ++i;
4455 }
4456 }
4457
4458 /*
4459 * Apply knapsack algorithm; compute the set of items which
4460 * maximizes the value stored (in this case the number of sorts
4461 * saved) while keeping the total size (approximately) within
4462 * capacity.
4463 */
4464 if (i > 0)
4465 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4466
4467 if (!bms_is_empty(hash_items))
4468 {
4469 rollups = list_make1(linitial(gd->rollups));
4470
4471 i = 0;
4472 for_each_from(lc, gd->rollups, 1)
4473 {
4474 RollupData *rollup = lfirst_node(RollupData, lc);
4475
4476 if (rollup->hashable)
4477 {
4478 if (bms_is_member(i, hash_items))
4479 hash_sets = list_concat(hash_sets,
4480 rollup->gsets_data);
4481 else
4482 rollups = lappend(rollups, rollup);
4483 ++i;
4484 }
4485 else
4486 rollups = lappend(rollups, rollup);
4487 }
4488 }
4489 }
4490
4491 if (!rollups && hash_sets)
4492 rollups = list_copy(gd->rollups);
4493
4494 foreach(lc, hash_sets)
4495 {
4497 RollupData *rollup = makeNode(RollupData);
4498
4499 Assert(gs->set != NIL);
4500
4502 rollup->gsets_data = list_make1(gs);
4503 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4504 rollup->gsets_data,
4506 rollup->numGroups = gs->numGroups;
4507 rollup->hashable = true;
4508 rollup->is_hashed = true;
4509 rollups = lcons(rollup, rollups);
4510 }
4511
4512 if (rollups)
4513 {
4514 add_path(grouped_rel, (Path *)
4516 grouped_rel,
4517 path,
4518 (List *) parse->havingQual,
4519 AGG_MIXED,
4520 rollups,
4521 agg_costs));
4522 }
4523 }
4524
4525 /*
4526 * Now try the simple sorted case.
4527 */
4528 if (!gd->unsortable_sets)
4529 add_path(grouped_rel, (Path *)
4531 grouped_rel,
4532 path,
4533 (List *) parse->havingQual,
4534 AGG_SORTED,
4535 gd->rollups,
4536 agg_costs));
4537}
4538
4539/*
4540 * create_window_paths
4541 *
4542 * Build a new upperrel containing Paths for window-function evaluation.
4543 *
4544 * input_rel: contains the source-data Paths
4545 * input_target: result of make_window_input_target
4546 * output_target: what the topmost WindowAggPath should return
4547 * wflists: result of find_window_functions
4548 * activeWindows: result of select_active_windows
4549 *
4550 * Note: all Paths in input_rel are expected to return input_target.
4551 */
4552static RelOptInfo *
4554 RelOptInfo *input_rel,
4555 PathTarget *input_target,
4556 PathTarget *output_target,
4557 bool output_target_parallel_safe,
4558 WindowFuncLists *wflists,
4559 List *activeWindows)
4560{
4561 RelOptInfo *window_rel;
4562 ListCell *lc;
4563
4564 /* For now, do all work in the (WINDOW, NULL) upperrel */
4565 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4566
4567 /*
4568 * If the input relation is not parallel-safe, then the window relation
4569 * can't be parallel-safe, either. Otherwise, we need to examine the
4570 * target list and active windows for non-parallel-safe constructs.
4571 */
4572 if (input_rel->consider_parallel && output_target_parallel_safe &&
4573 is_parallel_safe(root, (Node *) activeWindows))
4574 window_rel->consider_parallel = true;
4575
4576 /*
4577 * If the input rel belongs to a single FDW, so does the window rel.
4578 */
4579 window_rel->serverid = input_rel->serverid;
4580 window_rel->userid = input_rel->userid;
4581 window_rel->useridiscurrent = input_rel->useridiscurrent;
4582 window_rel->fdwroutine = input_rel->fdwroutine;
4583
4584 /*
4585 * Consider computing window functions starting from the existing
4586 * cheapest-total path (which will likely require a sort) as well as any
4587 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4588 */
4589 foreach(lc, input_rel->pathlist)
4590 {
4591 Path *path = (Path *) lfirst(lc);
4592 int presorted_keys;
4593
4594 if (path == input_rel->cheapest_total_path ||
4595 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4596 &presorted_keys) ||
4597 presorted_keys > 0)
4599 window_rel,
4600 path,
4601 input_target,
4602 output_target,
4603 wflists,
4604 activeWindows);
4605 }
4606
4607 /*
4608 * If there is an FDW that's responsible for all baserels of the query,
4609 * let it consider adding ForeignPaths.
4610 */
4611 if (window_rel->fdwroutine &&
4612 window_rel->fdwroutine->GetForeignUpperPaths)
4613 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4614 input_rel, window_rel,
4615 NULL);
4616
4617 /* Let extensions possibly add some more paths */
4619 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4620 input_rel, window_rel, NULL);
4621
4622 /* Now choose the best path(s) */
4623 set_cheapest(window_rel);
4624
4625 return window_rel;
4626}
4627
4628/*
4629 * Stack window-function implementation steps atop the given Path, and
4630 * add the result to window_rel.
4631 *
4632 * window_rel: upperrel to contain result
4633 * path: input Path to use (must return input_target)
4634 * input_target: result of make_window_input_target
4635 * output_target: what the topmost WindowAggPath should return
4636 * wflists: result of find_window_functions
4637 * activeWindows: result of select_active_windows
4638 */
4639static void
4641 RelOptInfo *window_rel,
4642 Path *path,
4643 PathTarget *input_target,
4644 PathTarget *output_target,
4645 WindowFuncLists *wflists,
4646 List *activeWindows)
4647{
4648 PathTarget *window_target;
4649 ListCell *l;
4650 List *topqual = NIL;
4651
4652 /*
4653 * Since each window clause could require a different sort order, we stack
4654 * up a WindowAgg node for each clause, with sort steps between them as
4655 * needed. (We assume that select_active_windows chose a good order for
4656 * executing the clauses in.)
4657 *
4658 * input_target should contain all Vars and Aggs needed for the result.
4659 * (In some cases we wouldn't need to propagate all of these all the way
4660 * to the top, since they might only be needed as inputs to WindowFuncs.
4661 * It's probably not worth trying to optimize that though.) It must also
4662 * contain all window partitioning and sorting expressions, to ensure
4663 * they're computed only once at the bottom of the stack (that's critical
4664 * for volatile functions). As we climb up the stack, we'll add outputs
4665 * for the WindowFuncs computed at each level.
4666 */
4667 window_target = input_target;
4668
4669 foreach(l, activeWindows)
4670 {
4672 List *window_pathkeys;
4673 List *runcondition = NIL;
4674 int presorted_keys;
4675 bool is_sorted;
4676 bool topwindow;
4677 ListCell *lc2;
4678
4679 window_pathkeys = make_pathkeys_for_window(root,
4680 wc,
4681 root->processed_tlist);
4682
4683 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4684 path->pathkeys,
4685 &presorted_keys);
4686
4687 /* Sort if necessary */
4688 if (!is_sorted)
4689 {
4690 /*
4691 * No presorted keys or incremental sort disabled, just perform a
4692 * complete sort.
4693 */
4694 if (presorted_keys == 0 || !enable_incremental_sort)
4695 path = (Path *) create_sort_path(root, window_rel,
4696 path,
4697 window_pathkeys,
4698 -1.0);
4699 else
4700 {
4701 /*
4702 * Since we have presorted keys and incremental sort is
4703 * enabled, just use incremental sort.
4704 */
4706 window_rel,
4707 path,
4708 window_pathkeys,
4709 presorted_keys,
4710 -1.0);
4711 }
4712 }
4713
4714 if (lnext(activeWindows, l))
4715 {
4716 /*
4717 * Add the current WindowFuncs to the output target for this
4718 * intermediate WindowAggPath. We must copy window_target to
4719 * avoid changing the previous path's target.
4720 *
4721 * Note: a WindowFunc adds nothing to the target's eval costs; but
4722 * we do need to account for the increase in tlist width.
4723 */
4724 int64 tuple_width = window_target->width;
4725
4726 window_target = copy_pathtarget(window_target);
4727 foreach(lc2, wflists->windowFuncs[wc->winref])
4728 {
4729 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4730
4731 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4732 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4733 }
4734 window_target->width = clamp_width_est(tuple_width);
4735 }
4736 else
4737 {
4738 /* Install the goal target in the topmost WindowAgg */
4739 window_target = output_target;
4740 }
4741
4742 /* mark the final item in the list as the top-level window */
4743 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4744
4745 /*
4746 * Collect the WindowFuncRunConditions from each WindowFunc and
4747 * convert them into OpExprs
4748 */
4749 foreach(lc2, wflists->windowFuncs[wc->winref])
4750 {
4751 ListCell *lc3;
4752 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4753
4754 foreach(lc3, wfunc->runCondition)
4755 {
4756 WindowFuncRunCondition *wfuncrc =
4758 Expr *opexpr;
4759 Expr *leftop;
4760 Expr *rightop;
4761
4762 if (wfuncrc->wfunc_left)
4763 {
4764 leftop = (Expr *) copyObject(wfunc);
4765 rightop = copyObject(wfuncrc->arg);
4766 }
4767 else
4768 {
4769 leftop = copyObject(wfuncrc->arg);
4770 rightop = (Expr *) copyObject(wfunc);
4771 }
4772
4773 opexpr = make_opclause(wfuncrc->opno,
4774 BOOLOID,
4775 false,
4776 leftop,
4777 rightop,
4778 InvalidOid,
4779 wfuncrc->inputcollid);
4780
4781 runcondition = lappend(runcondition, opexpr);
4782
4783 if (!topwindow)
4784 topqual = lappend(topqual, opexpr);
4785 }
4786 }
4787
4788 path = (Path *)
4789 create_windowagg_path(root, window_rel, path, window_target,
4790 wflists->windowFuncs[wc->winref],
4791 runcondition, wc,
4792 topwindow ? topqual : NIL, topwindow);
4793 }
4794
4795 add_path(window_rel, path);
4796}
4797
4798/*
4799 * create_distinct_paths
4800 *
4801 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4802 *
4803 * input_rel: contains the source-data Paths
4804 * target: the pathtarget for the result Paths to compute
4805 *
4806 * Note: input paths should already compute the desired pathtarget, since
4807 * Sort/Unique won't project anything.
4808 */
4809static RelOptInfo *
4811 PathTarget *target)
4812{
4813 RelOptInfo *distinct_rel;
4814
4815 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4816 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4817
4818 /*
4819 * We don't compute anything at this level, so distinct_rel will be
4820 * parallel-safe if the input rel is parallel-safe. In particular, if
4821 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4822 * output those expressions, and will not be parallel-safe unless those
4823 * expressions are parallel-safe.
4824 */
4825 distinct_rel->consider_parallel = input_rel->consider_parallel;
4826
4827 /*
4828 * If the input rel belongs to a single FDW, so does the distinct_rel.
4829 */
4830 distinct_rel->serverid = input_rel->serverid;
4831 distinct_rel->userid = input_rel->userid;
4832 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4833 distinct_rel->fdwroutine = input_rel->fdwroutine;
4834
4835 /* build distinct paths based on input_rel's pathlist */
4836 create_final_distinct_paths(root, input_rel, distinct_rel);
4837
4838 /* now build distinct paths based on input_rel's partial_pathlist */
4839 create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4840
4841 /* Give a helpful error if we failed to create any paths */
4842 if (distinct_rel->pathlist == NIL)
4843 ereport(ERROR,
4844 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4845 errmsg("could not implement DISTINCT"),
4846 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4847
4848 /*
4849 * If there is an FDW that's responsible for all baserels of the query,
4850 * let it consider adding ForeignPaths.
4851 */
4852 if (distinct_rel->fdwroutine &&
4853 distinct_rel->fdwroutine->GetForeignUpperPaths)
4854 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4856 input_rel,
4857 distinct_rel,
4858 NULL);
4859
4860 /* Let extensions possibly add some more paths */
4862 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4863 distinct_rel, NULL);
4864
4865 /* Now choose the best path(s) */
4866 set_cheapest(distinct_rel);
4867
4868 return distinct_rel;
4869}
4870
4871/*
4872 * create_partial_distinct_paths
4873 *
4874 * Process 'input_rel' partial paths and add unique/aggregate paths to the
4875 * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4876 * paths on top and add a final unique/aggregate path to remove any duplicate
4877 * produced from combining rows from parallel workers.
4878 */
4879static void
4881 RelOptInfo *final_distinct_rel,
4882 PathTarget *target)
4883{
4884 RelOptInfo *partial_distinct_rel;
4885 Query *parse;
4886 List *distinctExprs;
4887 double numDistinctRows;
4888 Path *cheapest_partial_path;
4889 ListCell *lc;
4890
4891 /* nothing to do when there are no partial paths in the input rel */
4892 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4893 return;
4894
4895 parse = root->parse;
4896
4897 /* can't do parallel DISTINCT ON */
4898 if (parse->hasDistinctOn)
4899 return;
4900
4901 partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4902 NULL);
4903 partial_distinct_rel->reltarget = target;
4904 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4905
4906 /*
4907 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4908 */
4909 partial_distinct_rel->serverid = input_rel->serverid;
4910 partial_distinct_rel->userid = input_rel->userid;
4911 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4912 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4913
4914 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4915
4916 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4917 parse->targetList);
4918
4919 /* estimate how many distinct rows we'll get from each worker */
4920 numDistinctRows = estimate_num_groups(root, distinctExprs,
4921 cheapest_partial_path->rows,
4922 NULL, NULL);
4923
4924 /*
4925 * Try sorting the cheapest path and incrementally sorting any paths with
4926 * presorted keys and put a unique paths atop of those. We'll also
4927 * attempt to reorder the required pathkeys to match the input path's
4928 * pathkeys as much as possible, in hopes of avoiding a possible need to
4929 * re-sort.
4930 */
4931 if (grouping_is_sortable(root->processed_distinctClause))
4932 {
4933 foreach(lc, input_rel->partial_pathlist)
4934 {
4935 Path *input_path = (Path *) lfirst(lc);
4936 Path *sorted_path;
4937 List *useful_pathkeys_list = NIL;
4938
4939 useful_pathkeys_list =
4941 root->distinct_pathkeys,
4942 input_path->pathkeys);
4943 Assert(list_length(useful_pathkeys_list) > 0);
4944
4945 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4946 {
4947 sorted_path = make_ordered_path(root,
4948 partial_distinct_rel,
4949 input_path,
4950 cheapest_partial_path,
4951 useful_pathkeys,
4952 -1.0);
4953
4954 if (sorted_path == NULL)
4955 continue;
4956
4957 /*
4958 * An empty distinct_pathkeys means all tuples have the same
4959 * value for the DISTINCT clause. See
4960 * create_final_distinct_paths()
4961 */
4962 if (root->distinct_pathkeys == NIL)
4963 {
4964 Node *limitCount;
4965
4966 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4967 sizeof(int64),
4968 Int64GetDatum(1), false,
4969 true);
4970
4971 /*
4972 * Apply a LimitPath onto the partial path to restrict the
4973 * tuples from each worker to 1.
4974 * create_final_distinct_paths will need to apply an
4975 * additional LimitPath to restrict this to a single row
4976 * after the Gather node. If the query already has a
4977 * LIMIT clause, then we could end up with three Limit
4978 * nodes in the final plan. Consolidating the top two of
4979 * these could be done, but does not seem worth troubling
4980 * over.
4981 */
4982 add_partial_path(partial_distinct_rel, (Path *)
4983 create_limit_path(root, partial_distinct_rel,
4984 sorted_path,
4985 NULL,
4986 limitCount,
4988 0, 1));
4989 }
4990 else
4991 {
4992 add_partial_path(partial_distinct_rel, (Path *)
4993 create_unique_path(root, partial_distinct_rel,
4994 sorted_path,
4995 list_length(root->distinct_pathkeys),
4996 numDistinctRows));
4997 }
4998 }
4999 }
5000 }
5001
5002 /*
5003 * Now try hash aggregate paths, if enabled and hashing is possible. Since
5004 * we're not on the hook to ensure we do our best to create at least one
5005 * path here, we treat enable_hashagg as a hard off-switch rather than the
5006 * slightly softer variant in create_final_distinct_paths.
5007 */
5008 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5009 {
5010 add_partial_path(partial_distinct_rel, (Path *)
5012 partial_distinct_rel,
5013 cheapest_partial_path,
5014 cheapest_partial_path->pathtarget,
5015 AGG_HASHED,
5017 root->processed_distinctClause,
5018 NIL,
5019 NULL,
5020 numDistinctRows));
5021 }
5022
5023 /*
5024 * If there is an FDW that's responsible for all baserels of the query,
5025 * let it consider adding ForeignPaths.
5026 */
5027 if (partial_distinct_rel->fdwroutine &&
5028 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5029 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5031 input_rel,
5032 partial_distinct_rel,
5033 NULL);
5034
5035 /* Let extensions possibly add some more partial paths */
5037 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5038 input_rel, partial_distinct_rel, NULL);
5039
5040 if (partial_distinct_rel->partial_pathlist != NIL)
5041 {
5042 generate_useful_gather_paths(root, partial_distinct_rel, true);
5043 set_cheapest(partial_distinct_rel);
5044
5045 /*
5046 * Finally, create paths to distinctify the final result. This step
5047 * is needed to remove any duplicates due to combining rows from
5048 * parallel workers.
5049 */
5050 create_final_distinct_paths(root, partial_distinct_rel,
5051 final_distinct_rel);
5052 }
5053}
5054
5055/*
5056 * create_final_distinct_paths
5057 * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5058 *
5059 * input_rel: contains the source-data paths
5060 * distinct_rel: destination relation for storing created paths
5061 */
5062static RelOptInfo *
5064 RelOptInfo *distinct_rel)
5065{
5066 Query *parse = root->parse;
5067 Path *cheapest_input_path = input_rel->cheapest_total_path;
5068 double numDistinctRows;
5069 bool allow_hash;
5070
5071 /* Estimate number of distinct rows there will be */
5072 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5073 root->hasHavingQual)
5074 {
5075 /*
5076 * If there was grouping or aggregation, use the number of input rows
5077 * as the estimated number of DISTINCT rows (ie, assume the input is
5078 * already mostly unique).
5079 */
5080 numDistinctRows = cheapest_input_path->rows;
5081 }
5082 else
5083 {
5084 /*
5085 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5086 */
5087 List *distinctExprs;
5088
5089 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5090 parse->targetList);
5091 numDistinctRows = estimate_num_groups(root, distinctExprs,
5092 cheapest_input_path->rows,
5093 NULL, NULL);
5094 }
5095
5096 /*
5097 * Consider sort-based implementations of DISTINCT, if possible.
5098 */
5099 if (grouping_is_sortable(root->processed_distinctClause))
5100 {
5101 /*
5102 * Firstly, if we have any adequately-presorted paths, just stick a
5103 * Unique node on those. We also, consider doing an explicit sort of
5104 * the cheapest input path and Unique'ing that. If any paths have
5105 * presorted keys then we'll create an incremental sort atop of those
5106 * before adding a unique node on the top. We'll also attempt to
5107 * reorder the required pathkeys to match the input path's pathkeys as
5108 * much as possible, in hopes of avoiding a possible need to re-sort.
5109 *
5110 * When we have DISTINCT ON, we must sort by the more rigorous of
5111 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5112 * Also, if we do have to do an explicit sort, we might as well use
5113 * the more rigorous ordering to avoid a second sort later. (Note
5114 * that the parser will have ensured that one clause is a prefix of
5115 * the other.)
5116 */
5117 List *needed_pathkeys;
5118 ListCell *lc;
5119 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5120
5121 if (parse->hasDistinctOn &&
5122 list_length(root->distinct_pathkeys) <
5123 list_length(root->sort_pathkeys))
5124 needed_pathkeys = root->sort_pathkeys;
5125 else
5126 needed_pathkeys = root->distinct_pathkeys;
5127
5128 foreach(lc, input_rel->pathlist)
5129 {
5130 Path *input_path = (Path *) lfirst(lc);
5131 Path *sorted_path;
5132 List *useful_pathkeys_list = NIL;
5133
5134 useful_pathkeys_list =
5136 needed_pathkeys,
5137 input_path->pathkeys);
5138 Assert(list_length(useful_pathkeys_list) > 0);
5139
5140 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5141 {
5142 sorted_path = make_ordered_path(root,
5143 distinct_rel,
5144 input_path,
5145 cheapest_input_path,
5146 useful_pathkeys,
5147 limittuples);
5148
5149 if (sorted_path == NULL)
5150 continue;
5151
5152 /*
5153 * distinct_pathkeys may have become empty if all of the
5154 * pathkeys were determined to be redundant. If all of the
5155 * pathkeys are redundant then each DISTINCT target must only
5156 * allow a single value, therefore all resulting tuples must
5157 * be identical (or at least indistinguishable by an equality
5158 * check). We can uniquify these tuples simply by just taking
5159 * the first tuple. All we do here is add a path to do "LIMIT
5160 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5161 * still have a non-NIL sort_pathkeys list, so we must still
5162 * only do this with paths which are correctly sorted by
5163 * sort_pathkeys.
5164 */
5165 if (root->distinct_pathkeys == NIL)
5166 {
5167 Node *limitCount;
5168
5169 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5170 sizeof(int64),
5171 Int64GetDatum(1), false,
5172 true);
5173
5174 /*
5175 * If the query already has a LIMIT clause, then we could
5176 * end up with a duplicate LimitPath in the final plan.
5177 * That does not seem worth troubling over too much.
5178 */
5179 add_path(distinct_rel, (Path *)
5180 create_limit_path(root, distinct_rel, sorted_path,
5181 NULL, limitCount,
5182 LIMIT_OPTION_COUNT, 0, 1));
5183 }
5184 else
5185 {
5186 add_path(distinct_rel, (Path *)
5187 create_unique_path(root, distinct_rel,
5188 sorted_path,
5189 list_length(root->distinct_pathkeys),
5190 numDistinctRows));
5191 }
5192 }
5193 }
5194 }
5195
5196 /*
5197 * Consider hash-based implementations of DISTINCT, if possible.
5198 *
5199 * If we were not able to make any other types of path, we *must* hash or
5200 * die trying. If we do have other choices, there are two things that
5201 * should prevent selection of hashing: if the query uses DISTINCT ON
5202 * (because it won't really have the expected behavior if we hash), or if
5203 * enable_hashagg is off.
5204 *
5205 * Note: grouping_is_hashable() is much more expensive to check than the
5206 * other gating conditions, so we want to do it last.
5207 */
5208 if (distinct_rel->pathlist == NIL)
5209 allow_hash = true; /* we have no alternatives */
5210 else if (parse->hasDistinctOn || !enable_hashagg)
5211 allow_hash = false; /* policy-based decision not to hash */
5212 else
5213 allow_hash = true; /* default */
5214
5215 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5216 {
5217 /* Generate hashed aggregate path --- no sort needed */
5218 add_path(distinct_rel, (Path *)
5220 distinct_rel,
5221 cheapest_input_path,
5222 cheapest_input_path->pathtarget,
5223 AGG_HASHED,
5225 root->processed_distinctClause,
5226 NIL,
5227 NULL,
5228 numDistinctRows));
5229 }
5230
5231 return distinct_rel;
5232}
5233
5234/*
5235 * get_useful_pathkeys_for_distinct
5236 * Get useful orderings of pathkeys for distinctClause by reordering
5237 * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5238 *
5239 * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5240 * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5241 */
5242static List *
5244 List *path_pathkeys)
5245{
5246 List *useful_pathkeys_list = NIL;
5247 List *useful_pathkeys = NIL;
5248
5249 /* always include the given 'needed_pathkeys' */
5250 useful_pathkeys_list = lappend(useful_pathkeys_list,
5251 needed_pathkeys);
5252
5254 return useful_pathkeys_list;
5255
5256 /*
5257 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5258 * that match 'needed_pathkeys', but only up to the longest matching
5259 * prefix.
5260 *
5261 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5262 * list matches initial distinctClause pathkeys; otherwise, it won't have
5263 * the desired behavior.
5264 */
5265 foreach_node(PathKey, pathkey, path_pathkeys)
5266 {
5267 /*
5268 * The PathKey nodes are canonical, so they can be checked for
5269 * equality by simple pointer comparison.
5270 */
5271 if (!list_member_ptr(needed_pathkeys, pathkey))
5272 break;
5273 if (root->parse->hasDistinctOn &&
5274 !list_member_ptr(root->distinct_pathkeys, pathkey))
5275 break;
5276
5277 useful_pathkeys = lappend(useful_pathkeys, pathkey);
5278 }
5279
5280 /* If no match at all, no point in reordering needed_pathkeys */
5281 if (useful_pathkeys == NIL)
5282 return useful_pathkeys_list;
5283
5284 /*
5285 * If not full match, the resulting pathkey list is not useful without
5286 * incremental sort.
5287 */
5288 if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5290 return useful_pathkeys_list;
5291
5292 /* Append the remaining PathKey nodes in needed_pathkeys */
5293 useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5294 needed_pathkeys);
5295
5296 /*
5297 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5298 * just drop it.
5299 */
5300 if (compare_pathkeys(needed_pathkeys,
5301 useful_pathkeys) == PATHKEYS_EQUAL)
5302 return useful_pathkeys_list;
5303
5304 useful_pathkeys_list = lappend(useful_pathkeys_list,
5305 useful_pathkeys);
5306
5307 return useful_pathkeys_list;
5308}
5309
5310/*
5311 * create_ordered_paths
5312 *
5313 * Build a new upperrel containing Paths for ORDER BY evaluation.
5314 *
5315 * All paths in the result must satisfy the ORDER BY ordering.
5316 * The only new paths we need consider are an explicit full sort
5317 * and incremental sort on the cheapest-total existing path.
5318 *
5319 * input_rel: contains the source-data Paths
5320 * target: the output tlist the result Paths must emit
5321 * limit_tuples: estimated bound on the number of output tuples,
5322 * or -1 if no LIMIT or couldn't estimate
5323 *
5324 * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5325 * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5326 */
5327static RelOptInfo *
5329 RelOptInfo *input_rel,
5330 PathTarget *target,
5331 bool target_parallel_safe,
5332 double limit_tuples)
5333{
5334 Path *cheapest_input_path = input_rel->cheapest_total_path;
5335 RelOptInfo *ordered_rel;
5336 ListCell *lc;
5337
5338 /* For now, do all work in the (ORDERED, NULL) upperrel */
5339 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5340
5341 /*
5342 * If the input relation is not parallel-safe, then the ordered relation
5343 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5344 * target list is parallel-safe.
5345 */
5346 if (input_rel->consider_parallel && target_parallel_safe)
5347 ordered_rel->consider_parallel = true;
5348
5349 /*
5350 * If the input rel belongs to a single FDW, so does the ordered_rel.
5351 */
5352 ordered_rel->serverid = input_rel->serverid;
5353 ordered_rel->userid = input_rel->userid;
5354 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5355 ordered_rel->fdwroutine = input_rel->fdwroutine;
5356
5357 foreach(lc, input_rel->pathlist)
5358 {
5359 Path *input_path = (Path *) lfirst(lc);
5360 Path *sorted_path;
5361 bool is_sorted;
5362 int presorted_keys;
5363
5364 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5365 input_path->pathkeys, &presorted_keys);
5366
5367 if (is_sorted)
5368 sorted_path = input_path;
5369 else
5370 {
5371 /*
5372 * Try at least sorting the cheapest path and also try
5373 * incrementally sorting any path which is partially sorted
5374 * already (no need to deal with paths which have presorted keys
5375 * when incremental sort is disabled unless it's the cheapest
5376 * input path).
5377 */
5378 if (input_path != cheapest_input_path &&
5379 (presorted_keys == 0 || !enable_incremental_sort))
5380 continue;
5381
5382 /*
5383 * We've no need to consider both a sort and incremental sort.
5384 * We'll just do a sort if there are no presorted keys and an
5385 * incremental sort when there are presorted keys.
5386 */
5387 if (presorted_keys == 0 || !enable_incremental_sort)
5388 sorted_path = (Path *) create_sort_path(root,
5389 ordered_rel,
5390 input_path,
5391 root->sort_pathkeys,
5392 limit_tuples);
5393 else
5394 sorted_path = (Path *) create_incremental_sort_path(root,
5395 ordered_rel,
5396 input_path,
5397 root->sort_pathkeys,
5398 presorted_keys,
5399 limit_tuples);
5400 }
5401
5402 /*
5403 * If the pathtarget of the result path has different expressions from
5404 * the target to be applied, a projection step is needed.
5405 */
5406 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5407 sorted_path = apply_projection_to_path(root, ordered_rel,
5408 sorted_path, target);
5409
5410 add_path(ordered_rel, sorted_path);
5411 }
5412
5413 /*
5414 * generate_gather_paths() will have already generated a simple Gather
5415 * path for the best parallel path, if any, and the loop above will have
5416 * considered sorting it. Similarly, generate_gather_paths() will also
5417 * have generated order-preserving Gather Merge plans which can be used
5418 * without sorting if they happen to match the sort_pathkeys, and the loop
5419 * above will have handled those as well. However, there's one more
5420 * possibility: it may make sense to sort the cheapest partial path or
5421 * incrementally sort any partial path that is partially sorted according
5422 * to the required output order and then use Gather Merge.
5423 */
5424 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5425 input_rel->partial_pathlist != NIL)
5426 {
5427 Path *cheapest_partial_path;
5428
5429 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5430
5431 foreach(lc, input_rel->partial_pathlist)
5432 {
5433 Path *input_path = (Path *) lfirst(lc);
5434 Path *sorted_path;
5435 bool is_sorted;
5436 int presorted_keys;
5437 double total_groups;
5438
5439 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5440 input_path->pathkeys,
5441 &presorted_keys);
5442
5443 if (is_sorted)
5444 continue;
5445
5446 /*
5447 * Try at least sorting the cheapest path and also try
5448 * incrementally sorting any path which is partially sorted
5449 * already (no need to deal with paths which have presorted keys
5450 * when incremental sort is disabled unless it's the cheapest
5451 * partial path).
5452 */
5453 if (input_path != cheapest_partial_path &&
5454 (presorted_keys == 0 || !enable_incremental_sort))
5455 continue;
5456
5457 /*
5458 * We've no need to consider both a sort and incremental sort.
5459 * We'll just do a sort if there are no presorted keys and an
5460 * incremental sort when there are presorted keys.
5461 */
5462 if (presorted_keys == 0 || !enable_incremental_sort)
5463 sorted_path = (Path *) create_sort_path(root,
5464 ordered_rel,
5465 input_path,
5466 root->sort_pathkeys,
5467 limit_tuples);
5468 else
5469 sorted_path = (Path *) create_incremental_sort_path(root,
5470 ordered_rel,
5471 input_path,
5472 root->sort_pathkeys,
5473 presorted_keys,
5474 limit_tuples);
5475 total_groups = compute_gather_rows(sorted_path);
5476 sorted_path = (Path *)
5477 create_gather_merge_path(root, ordered_rel,
5478 sorted_path,
5479 sorted_path->pathtarget,
5480 root->sort_pathkeys, NULL,
5481 &total_groups);
5482
5483 /*
5484 * If the pathtarget of the result path has different expressions
5485 * from the target to be applied, a projection step is needed.
5486 */
5487 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5488 sorted_path = apply_projection_to_path(root, ordered_rel,
5489 sorted_path, target);
5490
5491 add_path(ordered_rel, sorted_path);
5492 }
5493 }
5494
5495 /*
5496 * If there is an FDW that's responsible for all baserels of the query,
5497 * let it consider adding ForeignPaths.
5498 */
5499 if (ordered_rel->fdwroutine &&
5500 ordered_rel->fdwroutine->GetForeignUpperPaths)
5501 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5502 input_rel, ordered_rel,
5503 NULL);
5504
5505 /* Let extensions possibly add some more paths */
5507 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5508 input_rel, ordered_rel, NULL);
5509
5510 /*
5511 * No need to bother with set_cheapest here; grouping_planner does not
5512 * need us to do it.
5513 */
5514 Assert(ordered_rel->pathlist != NIL);
5515
5516 return ordered_rel;
5517}
5518
5519
5520/*
5521 * make_group_input_target
5522 * Generate appropriate PathTarget for initial input to grouping nodes.
5523 *
5524 * If there is grouping or aggregation, the scan/join subplan cannot emit
5525 * the query's final targetlist; for example, it certainly can't emit any
5526 * aggregate function calls. This routine generates the correct target
5527 * for the scan/join subplan.
5528 *
5529 * The query target list passed from the parser already contains entries
5530 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5531 * for variables used only in HAVING clauses; so we need to add those
5532 * variables to the subplan target list. Also, we flatten all expressions
5533 * except GROUP BY items into their component variables; other expressions
5534 * will be computed by the upper plan nodes rather than by the subplan.
5535 * For example, given a query like
5536 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5537 * we want to pass this targetlist to the subplan:
5538 * a+b,c,d
5539 * where the a+b target will be used by the Sort/Group steps, and the
5540 * other targets will be used for computing the final results.
5541 *
5542 * 'final_target' is the query's final target list (in PathTarget form)
5543 *
5544 * The result is the PathTarget to be computed by the Paths returned from
5545 * query_planner().
5546 */
5547static PathTarget *
5549{
5550 Query *parse = root->parse;
5551 PathTarget *input_target;
5552 List *non_group_cols;
5553 List *non_group_vars;
5554 int i;
5555 ListCell *lc;
5556
5557 /*
5558 * We must build a target containing all grouping columns, plus any other
5559 * Vars mentioned in the query's targetlist and HAVING qual.
5560 */
5561 input_target = create_empty_pathtarget();
5562 non_group_cols = NIL;
5563
5564 i = 0;
5565 foreach(lc, final_target->exprs)
5566 {
5567 Expr *expr = (Expr *) lfirst(lc);
5568 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5569
5570 if (sgref && root->processed_groupClause &&
5572 root->processed_groupClause) != NULL)
5573 {
5574 /*
5575 * It's a grouping column, so add it to the input target as-is.
5576 *
5577 * Note that the target is logically below the grouping step. So
5578 * with grouping sets we need to remove the RT index of the
5579 * grouping step if there is any from the target expression.
5580 */
5581 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5582 {
5583 Assert(root->group_rtindex > 0);
5584 expr = (Expr *)
5585 remove_nulling_relids((Node *) expr,
5586 bms_make_singleton(root->group_rtindex),
5587 NULL);
5588 }
5589 add_column_to_pathtarget(input_target, expr, sgref);
5590 }
5591 else
5592 {
5593 /*
5594 * Non-grouping column, so just remember the expression for later
5595 * call to pull_var_clause.
5596 */
5597 non_group_cols = lappend(non_group_cols, expr);
5598 }
5599
5600 i++;
5601 }
5602
5603 /*
5604 * If there's a HAVING clause, we'll need the Vars it uses, too.
5605 */
5606 if (parse->havingQual)
5607 non_group_cols = lappend(non_group_cols, parse->havingQual);
5608
5609 /*
5610 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5611 * add them to the input target if not already present. (A Var used
5612 * directly as a GROUP BY item will be present already.) Note this
5613 * includes Vars used in resjunk items, so we are covering the needs of
5614 * ORDER BY and window specifications. Vars used within Aggrefs and
5615 * WindowFuncs will be pulled out here, too.
5616 *
5617 * Note that the target is logically below the grouping step. So with
5618 * grouping sets we need to remove the RT index of the grouping step if
5619 * there is any from the non-group Vars.
5620 */
5621 non_group_vars = pull_var_clause((Node *) non_group_cols,
5625 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5626 {
5627 Assert(root->group_rtindex > 0);
5628 non_group_vars = (List *)
5629 remove_nulling_relids((Node *) non_group_vars,
5630 bms_make_singleton(root->group_rtindex),
5631 NULL);
5632 }
5633 add_new_columns_to_pathtarget(input_target, non_group_vars);
5634
5635 /* clean up cruft */
5636 list_free(non_group_vars);
5637 list_free(non_group_cols);
5638
5639 /* XXX this causes some redundant cost calculation ... */
5640 return set_pathtarget_cost_width(root, input_target);
5641}
5642
5643/*
5644 * make_partial_grouping_target
5645 * Generate appropriate PathTarget for output of partial aggregate
5646 * (or partial grouping, if there are no aggregates) nodes.
5647 *
5648 * A partial aggregation node needs to emit all the same aggregates that
5649 * a regular aggregation node would, plus any aggregates used in HAVING;
5650 * except that the Aggref nodes should be marked as partial aggregates.
5651 *
5652 * In addition, we'd better emit any Vars and PlaceHolderVars that are
5653 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5654 * these would be Vars that are grouped by or used in grouping expressions.)
5655 *
5656 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5657 * havingQual represents the HAVING clause.
5658 */
5659static PathTarget *
5661 PathTarget *grouping_target,
5662 Node *havingQual)
5663{
5664 PathTarget *partial_target;
5665 List *non_group_cols;
5666 List *non_group_exprs;
5667 int i;
5668 ListCell *lc;
5669
5670 partial_target = create_empty_pathtarget();
5671 non_group_cols = NIL;
5672
5673 i = 0;
5674 foreach(lc, grouping_target->exprs)
5675 {
5676 Expr *expr = (Expr *) lfirst(lc);
5677 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5678
5679 if (sgref && root->processed_groupClause &&
5681 root->processed_groupClause) != NULL)
5682 {
5683 /*
5684 * It's a grouping column, so add it to the partial_target as-is.
5685 * (This allows the upper agg step to repeat the grouping calcs.)
5686 */
5687 add_column_to_pathtarget(partial_target, expr, sgref);
5688 }
5689 else
5690 {
5691 /*
5692 * Non-grouping column, so just remember the expression for later
5693 * call to pull_var_clause.
5694 */
5695 non_group_cols = lappend(non_group_cols, expr);
5696 }
5697
5698 i++;
5699 }
5700
5701 /*
5702 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5703 */
5704 if (havingQual)
5705 non_group_cols = lappend(non_group_cols, havingQual);
5706
5707 /*
5708 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5709 * non-group cols (plus HAVING), and add them to the partial_target if not
5710 * already present. (An expression used directly as a GROUP BY item will
5711 * be present already.) Note this includes Vars used in resjunk items, so
5712 * we are covering the needs of ORDER BY and window specifications.
5713 */
5714 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5718
5719 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5720
5721 /*
5722 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5723 * are at the top level of the target list, so we can just scan the list
5724 * rather than recursing through the expression trees.
5725 */
5726 foreach(lc, partial_target->exprs)
5727 {
5728 Aggref *aggref = (Aggref *) lfirst(lc);
5729
5730 if (IsA(aggref, Aggref))
5731 {
5732 Aggref *newaggref;
5733
5734 /*
5735 * We shouldn't need to copy the substructure of the Aggref node,
5736 * but flat-copy the node itself to avoid damaging other trees.
5737 */
5738 newaggref = makeNode(Aggref);
5739 memcpy(newaggref, aggref, sizeof(Aggref));
5740
5741 /* For now, assume serialization is required */
5743
5744 lfirst(lc) = newaggref;
5745 }
5746 }
5747
5748 /* clean up cruft */
5749 list_free(non_group_exprs);
5750 list_free(non_group_cols);
5751
5752 /* XXX this causes some redundant cost calculation ... */
5753 return set_pathtarget_cost_width(root, partial_target);
5754}
5755
5756/*
5757 * mark_partial_aggref
5758 * Adjust an Aggref to make it represent a partial-aggregation step.
5759 *
5760 * The Aggref node is modified in-place; caller must do any copying required.
5761 */
5762void
5764{
5765 /* aggtranstype should be computed by this point */
5766 Assert(OidIsValid(agg->aggtranstype));
5767 /* ... but aggsplit should still be as the parser left it */
5768 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5769
5770 /* Mark the Aggref with the intended partial-aggregation mode */
5771 agg->aggsplit = aggsplit;
5772
5773 /*
5774 * Adjust result type if needed. Normally, a partial aggregate returns
5775 * the aggregate's transition type; but if that's INTERNAL and we're
5776 * serializing, it returns BYTEA instead.
5777 */
5778 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5779 {
5780 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5781 agg->aggtype = BYTEAOID;
5782 else
5783 agg->aggtype = agg->aggtranstype;
5784 }
5785}
5786
5787/*
5788 * postprocess_setop_tlist
5789 * Fix up targetlist returned by plan_set_operations().
5790 *
5791 * We need to transpose sort key info from the orig_tlist into new_tlist.
5792 * NOTE: this would not be good enough if we supported resjunk sort keys
5793 * for results of set operations --- then, we'd need to project a whole
5794 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5795 * find any resjunk columns in orig_tlist.
5796 */
5797static List *
5798postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5799{
5800 ListCell *l;
5801 ListCell *orig_tlist_item = list_head(orig_tlist);
5802
5803 foreach(l, new_tlist)
5804 {
5805 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5806 TargetEntry *orig_tle;
5807
5808 /* ignore resjunk columns in setop result */
5809 if (new_tle->resjunk)
5810 continue;
5811
5812 Assert(orig_tlist_item != NULL);
5813 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5814 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5815 if (orig_tle->resjunk) /* should not happen */
5816 elog(ERROR, "resjunk output columns are not implemented");
5817 Assert(new_tle->resno == orig_tle->resno);
5818 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5819 }
5820 if (orig_tlist_item != NULL)
5821 elog(ERROR, "resjunk output columns are not implemented");
5822 return new_tlist;
5823}
5824
5825/*
5826 * optimize_window_clauses
5827 * Call each WindowFunc's prosupport function to see if we're able to
5828 * make any adjustments to any of the WindowClause's so that the executor
5829 * can execute the window functions in a more optimal way.
5830 *
5831 * Currently we only allow adjustments to the WindowClause's frameOptions. We
5832 * may allow more things to be done here in the future.
5833 */
5834static void
5836{
5837 List *windowClause = root->parse->windowClause;
5838 ListCell *lc;
5839
5840 foreach(lc, windowClause)
5841 {
5843 ListCell *lc2;
5844 int optimizedFrameOptions = 0;
5845
5846 Assert(wc->winref <= wflists->maxWinRef);
5847
5848 /* skip any WindowClauses that have no WindowFuncs */
5849 if (wflists->windowFuncs[wc->winref] == NIL)
5850 continue;
5851
5852 foreach(lc2, wflists->windowFuncs[wc->winref])
5853 {
5856 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5857 Oid prosupport;
5858
5859 prosupport = get_func_support(wfunc->winfnoid);
5860
5861 /* Check if there's a support function for 'wfunc' */
5862 if (!OidIsValid(prosupport))
5863 break; /* can't optimize this WindowClause */
5864
5865 req.type = T_SupportRequestOptimizeWindowClause;
5866 req.window_clause = wc;
5867 req.window_func = wfunc;
5868 req.frameOptions = wc->frameOptions;
5869
5870 /* call the support function */
5873 PointerGetDatum(&req)));
5874
5875 /*
5876 * Skip to next WindowClause if the support function does not
5877 * support this request type.
5878 */
5879 if (res == NULL)
5880 break;
5881
5882 /*
5883 * Save these frameOptions for the first WindowFunc for this
5884 * WindowClause.
5885 */
5886 if (foreach_current_index(lc2) == 0)
5887 optimizedFrameOptions = res->frameOptions;
5888
5889 /*
5890 * On subsequent WindowFuncs, if the frameOptions are not the same
5891 * then we're unable to optimize the frameOptions for this
5892 * WindowClause.
5893 */
5894 else if (optimizedFrameOptions != res->frameOptions)
5895 break; /* skip to the next WindowClause, if any */
5896 }
5897
5898 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5899 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5900 {
5901 ListCell *lc3;
5902
5903 /* apply the new frame options */
5904 wc->frameOptions = optimizedFrameOptions;
5905
5906 /*
5907 * We now check to see if changing the frameOptions has caused
5908 * this WindowClause to be a duplicate of some other WindowClause.
5909 * This can only happen if we have multiple WindowClauses, so
5910 * don't bother if there's only 1.
5911 */
5912 if (list_length(windowClause) == 1)
5913 continue;
5914
5915 /*
5916 * Do the duplicate check and reuse the existing WindowClause if
5917 * we find a duplicate.
5918 */
5919 foreach(lc3, windowClause)
5920 {
5921 WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5922
5923 /* skip over the WindowClause we're currently editing */
5924 if (existing_wc == wc)
5925 continue;
5926
5927 /*
5928 * Perform the same duplicate check that is done in
5929 * transformWindowFuncCall.
5930 */
5931 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5932 equal(wc->orderClause, existing_wc->orderClause) &&
5933 wc->frameOptions == existing_wc->frameOptions &&
5934 equal(wc->startOffset, existing_wc->startOffset) &&
5935 equal(wc->endOffset, existing_wc->endOffset))
5936 {
5937 ListCell *lc4;
5938
5939 /*
5940 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5941 * This required adjusting each WindowFunc's winref and
5942 * moving the WindowFuncs in 'wc' to the list of
5943 * WindowFuncs in 'existing_wc'.
5944 */
5945 foreach(lc4, wflists->windowFuncs[wc->winref])
5946 {
5947 WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5948
5949 wfunc->winref = existing_wc->winref;
5950 }
5951
5952 /* move list items */
5953 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5954 wflists->windowFuncs[wc->winref]);
5955 wflists->windowFuncs[wc->winref] = NIL;
5956
5957 /*
5958 * transformWindowFuncCall() should have made sure there
5959 * are no other duplicates, so we needn't bother looking
5960 * any further.
5961 */
5962 break;
5963 }
5964 }
5965 }
5966 }
5967}
5968
5969/*
5970 * select_active_windows
5971 * Create a list of the "active" window clauses (ie, those referenced
5972 * by non-deleted WindowFuncs) in the order they are to be executed.
5973 */
5974static List *
5976{
5977 List *windowClause = root->parse->windowClause;
5978 List *result = NIL;
5979 ListCell *lc;
5980 int nActive = 0;
5982 list_length(windowClause));
5983
5984 /* First, construct an array of the active windows */
5985 foreach(lc, windowClause)
5986 {
5988
5989 /* It's only active if wflists shows some related WindowFuncs */
5990 Assert(wc->winref <= wflists->maxWinRef);
5991 if (wflists->windowFuncs[wc->winref] == NIL)
5992 continue;
5993
5994 actives[nActive].wc = wc; /* original clause */
5995
5996 /*
5997 * For sorting, we want the list of partition keys followed by the
5998 * list of sort keys. But pathkeys construction will remove duplicates
5999 * between the two, so we can as well (even though we can't detect all
6000 * of the duplicates, since some may come from ECs - that might mean
6001 * we miss optimization chances here). We must, however, ensure that
6002 * the order of entries is preserved with respect to the ones we do
6003 * keep.
6004 *
6005 * partitionClause and orderClause had their own duplicates removed in
6006 * parse analysis, so we're only concerned here with removing
6007 * orderClause entries that also appear in partitionClause.
6008 */
6009 actives[nActive].uniqueOrder =
6011 wc->orderClause);
6012 nActive++;
6013 }
6014
6015 /*
6016 * Sort active windows by their partitioning/ordering clauses, ignoring
6017 * any framing clauses, so that the windows that need the same sorting are
6018 * adjacent in the list. When we come to generate paths, this will avoid
6019 * inserting additional Sort nodes.
6020 *
6021 * This is how we implement a specific requirement from the SQL standard,
6022 * which says that when two or more windows are order-equivalent (i.e.
6023 * have matching partition and order clauses, even if their names or
6024 * framing clauses differ), then all peer rows must be presented in the
6025 * same order in all of them. If we allowed multiple sort nodes for such
6026 * cases, we'd risk having the peer rows end up in different orders in
6027 * equivalent windows due to sort instability. (See General Rule 4 of
6028 * <window clause> in SQL2008 - SQL2016.)
6029 *
6030 * Additionally, if the entire list of clauses of one window is a prefix
6031 * of another, put first the window with stronger sorting requirements.
6032 * This way we will first sort for stronger window, and won't have to sort
6033 * again for the weaker one.
6034 */
6035 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
6036
6037 /* build ordered list of the original WindowClause nodes */
6038 for (int i = 0; i < nActive; i++)
6039 result = lappend(result, actives[i].wc);
6040
6041 pfree(actives);
6042
6043 return result;
6044}
6045
6046/*
6047 * name_active_windows
6048 * Ensure all active windows have unique names.
6049 *
6050 * The parser will have checked that user-assigned window names are unique
6051 * within the Query. Here we assign made-up names to any unnamed
6052 * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6053 * at parse time, because it'd mess up decompilation of views.)
6054 *
6055 * activeWindows: result of select_active_windows
6056 */
6057static void
6059{
6060 int next_n = 1;
6061 char newname[16];
6062 ListCell *lc;
6063
6064 foreach(lc, activeWindows)
6065 {
6067
6068 /* Nothing to do if it has a name already. */
6069 if (wc->name)
6070 continue;
6071
6072 /* Select a name not currently present in the list. */
6073 for (;;)
6074 {
6075 ListCell *lc2;
6076
6077 snprintf(newname, sizeof(newname), "w%d", next_n++);
6078 foreach(lc2, activeWindows)
6079 {
6081
6082 if (wc2->name && strcmp(wc2->name, newname) == 0)
6083 break; /* matched */
6084 }
6085 if (lc2 == NULL)
6086 break; /* reached the end with no match */
6087 }
6088 wc->name = pstrdup(newname);
6089 }
6090}
6091
6092/*
6093 * common_prefix_cmp
6094 * QSort comparison function for WindowClauseSortData
6095 *
6096 * Sort the windows by the required sorting clauses. First, compare the sort
6097 * clauses themselves. Second, if one window's clauses are a prefix of another
6098 * one's clauses, put the window with more sort clauses first.
6099 *
6100 * We purposefully sort by the highest tleSortGroupRef first. Since
6101 * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6102 * and because here we sort the lowest tleSortGroupRefs last, if a
6103 * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6104 * ORDER BY clause, this makes it more likely that the final WindowAgg will
6105 * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6106 * reducing the total number of sorts required for the query.
6107 */
6108static int
6109common_prefix_cmp(const void *a, const void *b)
6110{
6111 const WindowClauseSortData *wcsa = a;
6112 const WindowClauseSortData *wcsb = b;
6113 ListCell *item_a;
6114 ListCell *item_b;
6115
6116 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6117 {
6120
6121 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6122 return -1;
6123 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6124 return 1;
6125 else if (sca->sortop > scb->sortop)
6126 return -1;
6127 else if (sca->sortop < scb->sortop)
6128 return 1;
6129 else if (sca->nulls_first && !scb->nulls_first)
6130 return -1;
6131 else if (!sca->nulls_first && scb->nulls_first)
6132 return 1;
6133 /* no need to compare eqop, since it is fully determined by sortop */
6134 }
6135
6136 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6137 return -1;
6138 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6139 return 1;
6140
6141 return 0;
6142}
6143
6144/*
6145 * make_window_input_target
6146 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6147 *
6148 * When the query has window functions, this function computes the desired
6149 * target to be computed by the node just below the first WindowAgg.
6150 * This tlist must contain all values needed to evaluate the window functions,
6151 * compute the final target list, and perform any required final sort step.
6152 * If multiple WindowAggs are needed, each intermediate one adds its window
6153 * function results onto this base tlist; only the topmost WindowAgg computes
6154 * the actual desired target list.
6155 *
6156 * This function is much like make_group_input_target, though not quite enough
6157 * like it to share code. As in that function, we flatten most expressions
6158 * into their component variables. But we do not want to flatten window
6159 * PARTITION BY/ORDER BY clauses, since that might result in multiple
6160 * evaluations of them, which would be bad (possibly even resulting in
6161 * inconsistent answers, if they contain volatile functions).
6162 * Also, we must not flatten GROUP BY clauses that were left unflattened by
6163 * make_group_input_target, because we may no longer have access to the
6164 * individual Vars in them.
6165 *
6166 * Another key difference from make_group_input_target is that we don't
6167 * flatten Aggref expressions, since those are to be computed below the
6168 * window functions and just referenced like Vars above that.
6169 *
6170 * 'final_target' is the query's final target list (in PathTarget form)
6171 * 'activeWindows' is the list of active windows previously identified by
6172 * select_active_windows.
6173 *
6174 * The result is the PathTarget to be computed by the plan node immediately
6175 * below the first WindowAgg node.
6176 */
6177static PathTarget *
6179 PathTarget *final_target,
6180 List *activeWindows)
6181{
6182 PathTarget *input_target;
6183 Bitmapset *sgrefs;
6184 List *flattenable_cols;
6185 List *flattenable_vars;
6186 int i;
6187 ListCell *lc;
6188
6189 Assert(root->parse->hasWindowFuncs);
6190
6191 /*
6192 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6193 * into a bitmapset for convenient reference below.
6194 */
6195 sgrefs = NULL;
6196 foreach(lc, activeWindows)
6197 {
6199 ListCell *lc2;
6200
6201 foreach(lc2, wc->partitionClause)
6202 {
6204
6205 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6206 }
6207 foreach(lc2, wc->orderClause)
6208 {
6210
6211 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6212 }
6213 }
6214
6215 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6216 foreach(lc, root->processed_groupClause)
6217 {
6219
6220 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6221 }
6222
6223 /*
6224 * Construct a target containing all the non-flattenable targetlist items,
6225 * and save aside the others for a moment.
6226 */
6227 input_target = create_empty_pathtarget();
6228 flattenable_cols = NIL;
6229
6230 i = 0;
6231 foreach(lc, final_target->exprs)
6232 {
6233 Expr *expr = (Expr *) lfirst(lc);
6234 Index sgref = get_pathtarget_sortgroupref(final_target, i);
6235
6236 /*
6237 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6238 * that such items can't contain window functions, so it's okay to
6239 * compute them below the WindowAgg nodes.)
6240 */
6241 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6242 {
6243 /*
6244 * Don't want to deconstruct this value, so add it to the input
6245 * target as-is.
6246 */
6247 add_column_to_pathtarget(input_target, expr, sgref);
6248 }
6249 else
6250 {
6251 /*
6252 * Column is to be flattened, so just remember the expression for
6253 * later call to pull_var_clause.
6254 */
6255 flattenable_cols = lappend(flattenable_cols, expr);
6256 }
6257
6258 i++;
6259 }
6260
6261 /*
6262 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6263 * add them to the input target if not already present. (Some might be
6264 * there already because they're used directly as window/group clauses.)
6265 *
6266 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6267 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6268 * at higher levels. On the other hand, we should recurse into
6269 * WindowFuncs to make sure their input expressions are available.
6270 */
6271 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6275 add_new_columns_to_pathtarget(input_target, flattenable_vars);
6276
6277 /* clean up cruft */
6278 list_free(flattenable_vars);
6279 list_free(flattenable_cols);
6280
6281 /* XXX this causes some redundant cost calculation ... */
6282 return set_pathtarget_cost_width(root, input_target);
6283}
6284
6285/*
6286 * make_pathkeys_for_window
6287 * Create a pathkeys list describing the required input ordering
6288 * for the given WindowClause.
6289 *
6290 * Modifies wc's partitionClause to remove any clauses which are deemed
6291 * redundant by the pathkey logic.
6292 *
6293 * The required ordering is first the PARTITION keys, then the ORDER keys.
6294 * In the future we might try to implement windowing using hashing, in which
6295 * case the ordering could be relaxed, but for now we always sort.
6296 */
6297static List *
6299 List *tlist)
6300{
6301 List *window_pathkeys = NIL;
6302
6303 /* Throw error if can't sort */
6305 ereport(ERROR,
6306 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6307 errmsg("could not implement window PARTITION BY"),
6308 errdetail("Window partitioning columns must be of sortable datatypes.")));
6310 ereport(ERROR,
6311 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6312 errmsg("could not implement window ORDER BY"),
6313 errdetail("Window ordering columns must be of sortable datatypes.")));
6314
6315 /*
6316 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6317 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6318 */
6319 if (wc->partitionClause != NIL)
6320 {
6321 bool sortable;
6322
6324 &wc->partitionClause,
6325 tlist,
6326 true,
6327 false,
6328 &sortable,
6329 false);
6330
6331 Assert(sortable);
6332 }
6333
6334 /*
6335 * In principle, we could also consider removing redundant ORDER BY items
6336 * too as doing so does not alter the result of peer row checks done by
6337 * the executor. However, we must *not* remove the ordering column for
6338 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6339 * if it's known to be equal to some partitioning column.
6340 */
6341 if (wc->orderClause != NIL)
6342 {
6343 List *orderby_pathkeys;
6344
6345 orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6346 wc->orderClause,
6347 tlist);
6348
6349 /* Okay, make the combined pathkeys */
6350 if (window_pathkeys != NIL)
6351 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6352 else
6353 window_pathkeys = orderby_pathkeys;
6354 }
6355
6356 return window_pathkeys;
6357}
6358
6359/*
6360 * make_sort_input_target
6361 * Generate appropriate PathTarget for initial input to Sort step.
6362 *
6363 * If the query has ORDER BY, this function chooses the target to be computed
6364 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6365 * project) steps. This might or might not be identical to the query's final
6366 * output target.
6367 *
6368 * The main argument for keeping the sort-input tlist the same as the final
6369 * is that we avoid a separate projection node (which will be needed if
6370 * they're different, because Sort can't project). However, there are also
6371 * advantages to postponing tlist evaluation till after the Sort: it ensures
6372 * a consistent order of evaluation for any volatile functions in the tlist,
6373 * and if there's also a LIMIT, we can stop the query without ever computing
6374 * tlist functions for later rows, which is beneficial for both volatile and
6375 * expensive functions.
6376 *
6377 * Our current policy is to postpone volatile expressions till after the sort
6378 * unconditionally (assuming that that's possible, ie they are in plain tlist
6379 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6380 * postpone set-returning expressions, because running them beforehand would
6381 * bloat the sort dataset, and because it might cause unexpected output order
6382 * if the sort isn't stable. However there's a constraint on that: all SRFs
6383 * in the tlist should be evaluated at the same plan step, so that they can
6384 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6385 * mustn't postpone any SRFs. (Note that in principle that policy should
6386 * probably get applied to the group/window input targetlists too, but we
6387 * have not done that historically.) Lastly, expensive expressions are
6388 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6389 * partial evaluation of the query is possible (if neither is true, we expect
6390 * to have to evaluate the expressions for every row anyway), or if there are
6391 * any volatile or set-returning expressions (since once we've put in a
6392 * projection at all, it won't cost any more to postpone more stuff).
6393 *
6394 * Another issue that could potentially be considered here is that
6395 * evaluating tlist expressions could result in data that's either wider
6396 * or narrower than the input Vars, thus changing the volume of data that
6397 * has to go through the Sort. However, we usually have only a very bad
6398 * idea of the output width of any expression more complex than a Var,
6399 * so for now it seems too risky to try to optimize on that basis.
6400 *
6401 * Note that if we do produce a modified sort-input target, and then the
6402 * query ends up not using an explicit Sort, no particular harm is done:
6403 * we'll initially use the modified target for the preceding path nodes,
6404 * but then change them to the final target with apply_projection_to_path.
6405 * Moreover, in such a case the guarantees about evaluation order of
6406 * volatile functions still hold, since the rows are sorted already.
6407 *
6408 * This function has some things in common with make_group_input_target and
6409 * make_window_input_target, though the detailed rules for what to do are
6410 * different. We never flatten/postpone any grouping or ordering columns;
6411 * those are needed before the sort. If we do flatten a particular
6412 * expression, we leave Aggref and WindowFunc nodes alone, since those were
6413 * computed earlier.
6414 *
6415 * 'final_target' is the query's final target list (in PathTarget form)
6416 * 'have_postponed_srfs' is an output argument, see below
6417 *
6418 * The result is the PathTarget to be computed by the plan node immediately
6419 * below the Sort step (and the Distinct step, if any). This will be
6420 * exactly final_target if we decide a projection step wouldn't be helpful.
6421 *
6422 * In addition, *have_postponed_srfs is set to true if we choose to postpone
6423 * any set-returning functions to after the Sort.
6424 */
6425static PathTarget *
6427 PathTarget *final_target,
6428 bool *have_postponed_srfs)
6429{
6430 Query *parse = root->parse;
6431 PathTarget *input_target;
6432 int ncols;
6433 bool *col_is_srf;
6434 bool *postpone_col;
6435 bool have_srf;
6436 bool have_volatile;
6437 bool have_expensive;
6438 bool have_srf_sortcols;
6439 bool postpone_srfs;
6440 List *postponable_cols;
6441 List *postponable_vars;
6442 int i;
6443 ListCell *lc;
6444
6445 /* Shouldn't get here unless query has ORDER BY */
6446 Assert(parse->sortClause);
6447
6448 *have_postponed_srfs = false; /* default result */
6449
6450 /* Inspect tlist and collect per-column information */
6451 ncols = list_length(final_target->exprs);
6452 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6453 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6454 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6455
6456 i = 0;
6457 foreach(lc, final_target->exprs)
6458 {
6459 Expr *expr = (Expr *) lfirst(lc);
6460
6461 /*
6462 * If the column has a sortgroupref, assume it has to be evaluated
6463 * before sorting. Generally such columns would be ORDER BY, GROUP
6464 * BY, etc targets. One exception is columns that were removed from
6465 * GROUP BY by remove_useless_groupby_columns() ... but those would
6466 * only be Vars anyway. There don't seem to be any cases where it
6467 * would be worth the trouble to double-check.
6468 */
6469 if (get_pathtarget_sortgroupref(final_target, i) == 0)
6470 {
6471 /*
6472 * Check for SRF or volatile functions. Check the SRF case first
6473 * because we must know whether we have any postponed SRFs.
6474 */
6475 if (parse->hasTargetSRFs &&
6476 expression_returns_set((Node *) expr))
6477 {
6478 /* We'll decide below whether these are postponable */
6479 col_is_srf[i] = true;
6480 have_srf = true;
6481 }
6482 else if (contain_volatile_functions((Node *) expr))
6483 {
6484 /* Unconditionally postpone */
6485 postpone_col[i] = true;
6486 have_volatile = true;
6487 }
6488 else
6489 {
6490 /*
6491 * Else check the cost. XXX it's annoying to have to do this
6492 * when set_pathtarget_cost_width() just did it. Refactor to
6493 * allow sharing the work?
6494 */
6495 QualCost cost;
6496
6497 cost_qual_eval_node(&cost, (Node *) expr, root);
6498
6499 /*
6500 * We arbitrarily define "expensive" as "more than 10X
6501 * cpu_operator_cost". Note this will take in any PL function
6502 * with default cost.
6503 */
6504 if (cost.per_tuple > 10 * cpu_operator_cost)
6505 {
6506 postpone_col[i] = true;
6507 have_expensive = true;
6508 }
6509 }
6510 }
6511 else
6512 {
6513 /* For sortgroupref cols, just check if any contain SRFs */
6514 if (!have_srf_sortcols &&
6515 parse->hasTargetSRFs &&
6516 expression_returns_set((Node *) expr))
6517 have_srf_sortcols = true;
6518 }
6519
6520 i++;
6521 }
6522
6523 /*
6524 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6525 */
6526 postpone_srfs = (have_srf && !have_srf_sortcols);
6527
6528 /*
6529 * If we don't need a post-sort projection, just return final_target.
6530 */
6531 if (!(postpone_srfs || have_volatile ||
6532 (have_expensive &&
6533 (parse->limitCount || root->tuple_fraction > 0))))
6534 return final_target;
6535
6536 /*
6537 * Report whether the post-sort projection will contain set-returning
6538 * functions. This is important because it affects whether the Sort can
6539 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6540 * to return.
6541 */
6542 *have_postponed_srfs = postpone_srfs;
6543
6544 /*
6545 * Construct the sort-input target, taking all non-postponable columns and
6546 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6547 * the postponable ones.
6548 */
6549 input_target = create_empty_pathtarget();
6550 postponable_cols = NIL;
6551
6552 i = 0;
6553 foreach(lc, final_target->exprs)
6554 {
6555 Expr *expr = (Expr *) lfirst(lc);
6556
6557 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6558 postponable_cols = lappend(postponable_cols, expr);
6559 else
6560 add_column_to_pathtarget(input_target, expr,
6561 get_pathtarget_sortgroupref(final_target, i));
6562
6563 i++;
6564 }
6565
6566 /*
6567 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6568 * postponable columns, and add them to the sort-input target if not
6569 * already present. (Some might be there already.) We mustn't
6570 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6571 * would be unable to recompute them.
6572 */
6573 postponable_vars = pull_var_clause((Node *) postponable_cols,
6577 add_new_columns_to_pathtarget(input_target, postponable_vars);
6578
6579 /* clean up cruft */
6580 list_free(postponable_vars);
6581 list_free(postponable_cols);
6582
6583 /* XXX this represents even more redundant cost calculation ... */
6584 return set_pathtarget_cost_width(root, input_target);
6585}
6586
6587/*
6588 * get_cheapest_fractional_path
6589 * Find the cheapest path for retrieving a specified fraction of all
6590 * the tuples expected to be returned by the given relation.
6591 *
6592 * Do not consider parameterized paths. If the caller needs a path for upper
6593 * rel, it can't have parameterized paths. If the caller needs an append
6594 * subpath, it could become limited by the treatment of similar
6595 * parameterization of all the subpaths.
6596 *
6597 * We interpret tuple_fraction the same way as grouping_planner.
6598 *
6599 * We assume set_cheapest() has been run on the given rel.
6600 */
6601Path *
6602get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6603{
6604 Path *best_path = rel->cheapest_total_path;
6605 ListCell *l;
6606
6607 /* If all tuples will be retrieved, just return the cheapest-total path */
6608 if (tuple_fraction <= 0.0)
6609 return best_path;
6610
6611 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6612 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6613 tuple_fraction /= best_path->rows;
6614
6615 foreach(l, rel->pathlist)
6616 {
6617 Path *path = (Path *) lfirst(l);
6618
6619 if (path->param_info)
6620 continue;
6621
6622 if (path == rel->cheapest_total_path ||
6623 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6624 continue;
6625
6626 best_path = path;
6627 }
6628
6629 return best_path;
6630}
6631
6632/*
6633 * adjust_paths_for_srfs
6634 * Fix up the Paths of the given upperrel to handle tSRFs properly.
6635 *
6636 * The executor can only handle set-returning functions that appear at the
6637 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6638 * that are not at top level, we need to split up the evaluation into multiple
6639 * plan levels in which each level satisfies this constraint. This function
6640 * modifies each Path of an upperrel that (might) compute any SRFs in its
6641 * output tlist to insert appropriate projection steps.
6642 *
6643 * The given targets and targets_contain_srfs lists are from
6644 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6645 * target in targets.
6646 */
6647static void
6649 List *targets, List *targets_contain_srfs)
6650{
6651 ListCell *lc;
6652
6653 Assert(list_length(targets) == list_length(targets_contain_srfs));
6654 Assert(!linitial_int(targets_contain_srfs));
6655
6656 /* If no SRFs appear at this plan level, nothing to do */
6657 if (list_length(targets) == 1)
6658 return;
6659
6660 /*
6661 * Stack SRF-evaluation nodes atop each path for the rel.
6662 *
6663 * In principle we should re-run set_cheapest() here to identify the
6664 * cheapest path, but it seems unlikely that adding the same tlist eval
6665 * costs to all the paths would change that, so we don't bother. Instead,
6666 * just assume that the cheapest-startup and cheapest-total paths remain
6667 * so. (There should be no parameterized paths anymore, so we needn't
6668 * worry about updating cheapest_parameterized_paths.)
6669 */
6670 foreach(lc, rel->pathlist)
6671 {
6672 Path *subpath = (Path *) lfirst(lc);
6673 Path *newpath = subpath;
6674 ListCell *lc1,
6675 *lc2;
6676
6677 Assert(subpath->param_info == NULL);
6678 forboth(lc1, targets, lc2, targets_contain_srfs)
6679 {
6680 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6681 bool contains_srfs = (bool) lfirst_int(lc2);
6682
6683 /* If this level doesn't contain SRFs, do regular projection */
6684 if (contains_srfs)
6685 newpath = (Path *) create_set_projection_path(root,
6686 rel,
6687 newpath,
6688 thistarget);
6689 else
6690 newpath = (Path *) apply_projection_to_path(root,
6691 rel,
6692 newpath,
6693 thistarget);
6694 }
6695 lfirst(lc) = newpath;
6696 if (subpath == rel->cheapest_startup_path)
6697 rel->cheapest_startup_path = newpath;
6698 if (subpath == rel->cheapest_total_path)
6699 rel->cheapest_total_path = newpath;
6700 }
6701
6702 /* Likewise for partial paths, if any */
6703 foreach(lc, rel->partial_pathlist)
6704 {
6705 Path *subpath = (Path *) lfirst(lc);
6706 Path *newpath = subpath;
6707 ListCell *lc1,
6708 *lc2;
6709
6710 Assert(subpath->param_info == NULL);
6711 forboth(lc1, targets, lc2, targets_contain_srfs)
6712 {
6713 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6714 bool contains_srfs = (bool) lfirst_int(lc2);
6715
6716 /* If this level doesn't contain SRFs, do regular projection */
6717 if (contains_srfs)
6718 newpath = (Path *) create_set_projection_path(root,
6719 rel,
6720 newpath,
6721 thistarget);
6722 else
6723 {
6724 /* avoid apply_projection_to_path, in case of multiple refs */
6725 newpath = (Path *) create_projection_path(root,
6726 rel,
6727 newpath,
6728 thistarget);
6729 }
6730 }
6731 lfirst(lc) = newpath;
6732 }
6733}
6734
6735/*
6736 * expression_planner
6737 * Perform planner's transformations on a standalone expression.
6738 *
6739 * Various utility commands need to evaluate expressions that are not part
6740 * of a plannable query. They can do so using the executor's regular
6741 * expression-execution machinery, but first the expression has to be fed
6742 * through here to transform it from parser output to something executable.
6743 *
6744 * Currently, we disallow sublinks in standalone expressions, so there's no
6745 * real "planning" involved here. (That might not always be true though.)
6746 * What we must do is run eval_const_expressions to ensure that any function
6747 * calls are converted to positional notation and function default arguments
6748 * get inserted. The fact that constant subexpressions get simplified is a
6749 * side-effect that is useful when the expression will get evaluated more than
6750 * once. Also, we must fix operator function IDs.
6751 *
6752 * This does not return any information about dependencies of the expression.
6753 * Hence callers should use the results only for the duration of the current
6754 * query. Callers that would like to cache the results for longer should use
6755 * expression_planner_with_deps, probably via the plancache.
6756 *
6757 * Note: this must not make any damaging changes to the passed-in expression
6758 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6759 * we first do an expression_tree_mutator-based walk, what is returned will
6760 * be a new node tree.) The result is constructed in the current memory
6761 * context; beware that this can leak a lot of additional stuff there, too.
6762 */
6763Expr *
6765{
6766 Node *result;
6767
6768 /*
6769 * Convert named-argument function calls, insert default arguments and
6770 * simplify constant subexprs
6771 */
6772 result = eval_const_expressions(NULL, (Node *) expr);
6773
6774 /* Fill in opfuncid values if missing */
6775 fix_opfuncids(result);
6776
6777 return (Expr *) result;
6778}
6779
6780/*
6781 * expression_planner_with_deps
6782 * Perform planner's transformations on a standalone expression,
6783 * returning expression dependency information along with the result.
6784 *
6785 * This is identical to expression_planner() except that it also returns
6786 * information about possible dependencies of the expression, ie identities of
6787 * objects whose definitions affect the result. As in a PlannedStmt, these
6788 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6789 */
6790Expr *
6792 List **relationOids,
6793 List **invalItems)
6794{
6795 Node *result;
6796 PlannerGlobal glob;
6798
6799 /* Make up dummy planner state so we can use setrefs machinery */
6800 MemSet(&glob, 0, sizeof(glob));
6801 glob.type = T_PlannerGlobal;
6802 glob.relationOids = NIL;
6803 glob.invalItems = NIL;
6804
6805 MemSet(&root, 0, sizeof(root));
6806 root.type = T_PlannerInfo;
6807 root.glob = &glob;
6808
6809 /*
6810 * Convert named-argument function calls, insert default arguments and
6811 * simplify constant subexprs. Collect identities of inlined functions
6812 * and elided domains, too.
6813 */
6814 result = eval_const_expressions(&root, (Node *) expr);
6815
6816 /* Fill in opfuncid values if missing */
6817 fix_opfuncids(result);
6818
6819 /*
6820 * Now walk the finished expression to find anything else we ought to
6821 * record as an expression dependency.
6822 */
6823 (void) extract_query_dependencies_walker(result, &root);
6824
6825 *relationOids = glob.relationOids;
6826 *invalItems = glob.invalItems;
6827
6828 return (Expr *) result;
6829}
6830
6831
6832/*
6833 * plan_cluster_use_sort
6834 * Use the planner to decide how CLUSTER should implement sorting
6835 *
6836 * tableOid is the OID of a table to be clustered on its index indexOid
6837 * (which is already known to be a btree index). Decide whether it's
6838 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6839 * Return true to use sorting, false to use an indexscan.
6840 *
6841 * Note: caller had better already hold some type of lock on the table.
6842 */
6843bool
6844plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6845{
6847 Query *query;
6848 PlannerGlobal *glob;
6849 RangeTblEntry *rte;
6850 RelOptInfo *rel;
6851 IndexOptInfo *indexInfo;
6852 QualCost indexExprCost;
6853 Cost comparisonCost;
6854 Path *seqScanPath;
6855 Path seqScanAndSortPath;
6856 IndexPath *indexScanPath;
6857 ListCell *lc;
6858
6859 /* We can short-circuit the cost comparison if indexscans are disabled */
6860 if (!enable_indexscan)
6861 return true; /* use sort */
6862
6863 /* Set up mostly-dummy planner state */
6864 query = makeNode(Query);
6865 query->commandType = CMD_SELECT;
6866
6867 glob = makeNode(PlannerGlobal);
6868
6870 root->parse = query;
6871 root->glob = glob;
6872 root->query_level = 1;
6873 root->planner_cxt = CurrentMemoryContext;
6874 root->wt_param_id = -1;
6875 root->join_domains = list_make1(makeNode(JoinDomain));
6876
6877 /* Build a minimal RTE for the rel */
6878 rte = makeNode(RangeTblEntry);
6879 rte->rtekind = RTE_RELATION;
6880 rte->relid = tableOid;
6881 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6882 rte->rellockmode = AccessShareLock;
6883 rte->lateral = false;
6884 rte->inh = false;
6885 rte->inFromCl = true;
6886 query->rtable = list_make1(rte);
6887 addRTEPermissionInfo(&query->rteperminfos, rte);
6888
6889 /* Set up RTE/RelOptInfo arrays */
6891
6892 /* Build RelOptInfo */
6893 rel = build_simple_rel(root, 1, NULL);
6894
6895 /* Locate IndexOptInfo for the target index */
6896 indexInfo = NULL;
6897 foreach(lc, rel->indexlist)
6898 {
6899 indexInfo = lfirst_node(IndexOptInfo, lc);
6900 if (indexInfo->indexoid == indexOid)
6901 break;
6902 }
6903
6904 /*
6905 * It's possible that get_relation_info did not generate an IndexOptInfo
6906 * for the desired index; this could happen if it's not yet reached its
6907 * indcheckxmin usability horizon, or if it's a system index and we're
6908 * ignoring system indexes. In such cases we should tell CLUSTER to not
6909 * trust the index contents but use seqscan-and-sort.
6910 */
6911 if (lc == NULL) /* not in the list? */
6912 return true; /* use sort */
6913
6914 /*
6915 * Rather than doing all the pushups that would be needed to use
6916 * set_baserel_size_estimates, just do a quick hack for rows and width.
6917 */
6918 rel->rows = rel->tuples;
6919 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6920
6921 root->total_table_pages = rel->pages;
6922
6923 /*
6924 * Determine eval cost of the index expressions, if any. We need to
6925 * charge twice that amount for each tuple comparison that happens during
6926 * the sort, since tuplesort.c will have to re-evaluate the index
6927 * expressions each time. (XXX that's pretty inefficient...)
6928 */
6929 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6930 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6931
6932 /* Estimate the cost of seq scan + sort */
6933 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6934 cost_sort(&seqScanAndSortPath, root, NIL,
6935 seqScanPath->disabled_nodes,
6936 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6937 comparisonCost, maintenance_work_mem, -1.0);
6938
6939 /* Estimate the cost of index scan */
6940 indexScanPath = create_index_path(root, indexInfo,
6941 NIL, NIL, NIL, NIL,
6942 ForwardScanDirection, false,
6943 NULL, 1.0, false);
6944
6945 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6946}
6947
6948/*
6949 * plan_create_index_workers
6950 * Use the planner to decide how many parallel worker processes
6951 * CREATE INDEX should request for use
6952 *
6953 * tableOid is the table on which the index is to be built. indexOid is the
6954 * OID of an index to be created or reindexed (which must be an index with
6955 * support for parallel builds - currently btree, GIN, or BRIN).
6956 *
6957 * Return value is the number of parallel worker processes to request. It
6958 * may be unsafe to proceed if this is 0. Note that this does not include the
6959 * leader participating as a worker (value is always a number of parallel
6960 * worker processes).
6961 *
6962 * Note: caller had better already hold some type of lock on the table and
6963 * index.
6964 */
6965int
6967{
6969 Query *query;
6970 PlannerGlobal *glob;
6971 RangeTblEntry *rte;
6972 Relation heap;
6974 RelOptInfo *rel;
6975 int parallel_workers;
6976 BlockNumber heap_blocks;
6977 double reltuples;
6978 double allvisfrac;
6979
6980 /*
6981 * We don't allow performing parallel operation in standalone backend or
6982 * when parallelism is disabled.
6983 */
6985 return 0;
6986
6987 /* Set up largely-dummy planner state */
6988 query = makeNode(Query);
6989 query->commandType = CMD_SELECT;
6990
6991 glob = makeNode(PlannerGlobal);
6992
6994 root->parse = query;
6995 root->glob = glob;
6996 root->query_level = 1;
6997 root->planner_cxt = CurrentMemoryContext;
6998 root->wt_param_id = -1;
6999 root->join_domains = list_make1(makeNode(JoinDomain));
7000
7001 /*
7002 * Build a minimal RTE.
7003 *
7004 * Mark the RTE with inh = true. This is a kludge to prevent
7005 * get_relation_info() from fetching index info, which is necessary
7006 * because it does not expect that any IndexOptInfo is currently
7007 * undergoing REINDEX.
7008 */
7009 rte = makeNode(RangeTblEntry);
7010 rte->rtekind = RTE_RELATION;
7011 rte->relid = tableOid;
7012 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7013 rte->rellockmode = AccessShareLock;
7014 rte->lateral = false;
7015 rte->inh = true;
7016 rte->inFromCl = true;
7017 query->rtable = list_make1(rte);
7018 addRTEPermissionInfo(&query->rteperminfos, rte);
7019
7020 /* Set up RTE/RelOptInfo arrays */
7022
7023 /* Build RelOptInfo */
7024 rel = build_simple_rel(root, 1, NULL);
7025
7026 /* Rels are assumed already locked by the caller */
7027 heap = table_open(tableOid, NoLock);
7028 index = index_open(indexOid, NoLock);
7029
7030 /*
7031 * Determine if it's safe to proceed.
7032 *
7033 * Currently, parallel workers can't access the leader's temporary tables.
7034 * Furthermore, any index predicate or index expressions must be parallel
7035 * safe.
7036 */
7037 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7040 {
7041 parallel_workers = 0;
7042 goto done;
7043 }
7044
7045 /*
7046 * If parallel_workers storage parameter is set for the table, accept that
7047 * as the number of parallel worker processes to launch (though still cap
7048 * at max_parallel_maintenance_workers). Note that we deliberately do not
7049 * consider any other factor when parallel_workers is set. (e.g., memory
7050 * use by workers.)
7051 */
7052 if (rel->rel_parallel_workers != -1)
7053 {
7054 parallel_workers = Min(rel->rel_parallel_workers,
7056 goto done;
7057 }
7058
7059 /*
7060 * Estimate heap relation size ourselves, since rel->pages cannot be
7061 * trusted (heap RTE was marked as inheritance parent)
7062 */
7063 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7064
7065 /*
7066 * Determine number of workers to scan the heap relation using generic
7067 * model
7068 */
7069 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7071
7072 /*
7073 * Cap workers based on available maintenance_work_mem as needed.
7074 *
7075 * Note that each tuplesort participant receives an even share of the
7076 * total maintenance_work_mem budget. Aim to leave participants
7077 * (including the leader as a participant) with no less than 32MB of
7078 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7079 * immediately past the threshold of being capable of launching a single
7080 * parallel worker to sort.
7081 */
7082 while (parallel_workers > 0 &&
7083 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7084 parallel_workers--;
7085
7086done:
7088 table_close(heap, NoLock);
7089
7090 return parallel_workers;
7091}
7092
7093/*
7094 * add_paths_to_grouping_rel
7095 *
7096 * Add non-partial paths to grouping relation.
7097 */
7098static void
7100 RelOptInfo *grouped_rel,
7101 RelOptInfo *partially_grouped_rel,
7102 const AggClauseCosts *agg_costs,
7104 GroupPathExtraData *extra)
7105{
7106 Query *parse = root->parse;
7107 Path *cheapest_path = input_rel->cheapest_total_path;
7108 Path *cheapest_partially_grouped_path = NULL;
7109 ListCell *lc;
7110 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7111 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7112 List *havingQual = (List *) extra->havingQual;
7113 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7114 double dNumGroups = 0;
7115 double dNumFinalGroups = 0;
7116
7117 /*
7118 * Estimate number of groups for non-split aggregation.
7119 */
7120 dNumGroups = get_number_of_groups(root,
7121 cheapest_path->rows,
7122 gd,
7123 extra->targetList);
7124
7125 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7126 {
7127 cheapest_partially_grouped_path =
7128 partially_grouped_rel->cheapest_total_path;
7129
7130 /*
7131 * Estimate number of groups for final phase of partial aggregation.
7132 */
7133 dNumFinalGroups =
7135 cheapest_partially_grouped_path->rows,
7136 gd,
7137 extra->targetList);
7138 }
7139
7140 if (can_sort)
7141 {
7142 /*
7143 * Use any available suitably-sorted path as input, and also consider
7144 * sorting the cheapest-total path and incremental sort on any paths
7145 * with presorted keys.
7146 */
7147 foreach(lc, input_rel->pathlist)
7148 {
7149 ListCell *lc2;
7150 Path *path = (Path *) lfirst(lc);
7151 Path *path_save = path;
7152 List *pathkey_orderings = NIL;
7153
7154 /* generate alternative group orderings that might be useful */
7155 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7156
7157 Assert(list_length(pathkey_orderings) > 0);
7158
7159 foreach(lc2, pathkey_orderings)
7160 {
7161 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7162
7163 /* restore the path (we replace it in the loop) */
7164 path = path_save;
7165
7166 path = make_ordered_path(root,
7167 grouped_rel,
7168 path,
7169 cheapest_path,
7170 info->pathkeys,
7171 -1.0);
7172 if (path == NULL)
7173 continue;
7174
7175 /* Now decide what to stick atop it */
7176 if (parse->groupingSets)
7177 {
7178 consider_groupingsets_paths(root, grouped_rel,
7179 path, true, can_hash,
7180 gd, agg_costs, dNumGroups);
7181 }
7182 else if (parse->hasAggs)
7183 {
7184 /*
7185 * We have aggregation, possibly with plain GROUP BY. Make
7186 * an AggPath.
7187 */
7188 add_path(grouped_rel, (Path *)
7190 grouped_rel,
7191 path,
7192 grouped_rel->reltarget,
7193 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7195 info->clauses,
7196 havingQual,
7197 agg_costs,
7198 dNumGroups));
7199 }
7200 else if (parse->groupClause)
7201 {
7202 /*
7203 * We have GROUP BY without aggregation or grouping sets.
7204 * Make a GroupPath.
7205 */
7206 add_path(grouped_rel, (Path *)
7208 grouped_rel,
7209 path,
7210 info->clauses,
7211 havingQual,
7212 dNumGroups));
7213 }
7214 else
7215 {
7216 /* Other cases should have been handled above */
7217 Assert(false);
7218 }
7219 }
7220 }
7221
7222 /*
7223 * Instead of operating directly on the input relation, we can
7224 * consider finalizing a partially aggregated path.
7225 */
7226 if (partially_grouped_rel != NULL)
7227 {
7228 foreach(lc, partially_grouped_rel->pathlist)
7229 {
7230 ListCell *lc2;
7231 Path *path = (Path *) lfirst(lc);
7232 Path *path_save = path;
7233 List *pathkey_orderings = NIL;
7234
7235 /* generate alternative group orderings that might be useful */
7236 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7237
7238 Assert(list_length(pathkey_orderings) > 0);
7239
7240 /* process all potentially interesting grouping reorderings */
7241 foreach(lc2, pathkey_orderings)
7242 {
7243 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7244
7245 /* restore the path (we replace it in the loop) */
7246 path = path_save;
7247
7248 path = make_ordered_path(root,
7249 grouped_rel,
7250 path,
7251 cheapest_partially_grouped_path,
7252 info->pathkeys,
7253 -1.0);
7254
7255 if (path == NULL)
7256 continue;
7257
7258 if (parse->hasAggs)
7259 add_path(grouped_rel, (Path *)
7261 grouped_rel,
7262 path,
7263 grouped_rel->reltarget,
7264 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7266 info->clauses,
7267 havingQual,
7268 agg_final_costs,
7269 dNumFinalGroups));
7270 else
7271 add_path(grouped_rel, (Path *)
7273 grouped_rel,
7274 path,
7275 info->clauses,
7276 havingQual,
7277 dNumFinalGroups));
7278
7279 }
7280 }
7281 }
7282 }
7283
7284 if (can_hash)
7285 {
7286 if (parse->groupingSets)
7287 {
7288 /*
7289 * Try for a hash-only groupingsets path over unsorted input.
7290 */
7291 consider_groupingsets_paths(root, grouped_rel,
7292 cheapest_path, false, true,
7293 gd, agg_costs, dNumGroups);
7294 }
7295 else
7296 {
7297 /*
7298 * Generate a HashAgg Path. We just need an Agg over the
7299 * cheapest-total input path, since input order won't matter.
7300 */
7301 add_path(grouped_rel, (Path *)
7302 create_agg_path(root, grouped_rel,
7303 cheapest_path,
7304 grouped_rel->reltarget,
7305 AGG_HASHED,
7307 root->processed_groupClause,
7308 havingQual,
7309 agg_costs,
7310 dNumGroups));
7311 }
7312
7313 /*
7314 * Generate a Finalize HashAgg Path atop of the cheapest partially
7315 * grouped path, assuming there is one
7316 */
7317 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7318 {
7319 add_path(grouped_rel, (Path *)
7321 grouped_rel,
7322 cheapest_partially_grouped_path,
7323 grouped_rel->reltarget,
7324 AGG_HASHED,
7326 root->processed_groupClause,
7327 havingQual,
7328 agg_final_costs,
7329 dNumFinalGroups));
7330 }
7331 }
7332
7333 /*
7334 * When partitionwise aggregate is used, we might have fully aggregated
7335 * paths in the partial pathlist, because add_paths_to_append_rel() will
7336 * consider a path for grouped_rel consisting of a Parallel Append of
7337 * non-partial paths from each child.
7338 */
7339 if (grouped_rel->partial_pathlist != NIL)
7340 gather_grouping_paths(root, grouped_rel);
7341}
7342
7343/*
7344 * create_partial_grouping_paths
7345 *
7346 * Create a new upper relation representing the result of partial aggregation
7347 * and populate it with appropriate paths. Note that we don't finalize the
7348 * lists of paths here, so the caller can add additional partial or non-partial
7349 * paths and must afterward call gather_grouping_paths and set_cheapest on
7350 * the returned upper relation.
7351 *
7352 * All paths for this new upper relation -- both partial and non-partial --
7353 * have been partially aggregated but require a subsequent FinalizeAggregate
7354 * step.
7355 *
7356 * NB: This function is allowed to return NULL if it determines that there is
7357 * no real need to create a new RelOptInfo.
7358 */
7359static RelOptInfo *
7361 RelOptInfo *grouped_rel,
7362 RelOptInfo *input_rel,
7364 GroupPathExtraData *extra,
7365 bool force_rel_creation)
7366{
7367 Query *parse = root->parse;
7368 RelOptInfo *partially_grouped_rel;
7369 RelOptInfo *eager_agg_rel = NULL;
7370 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7371 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7372 Path *cheapest_partial_path = NULL;
7373 Path *cheapest_total_path = NULL;
7374 double dNumPartialGroups = 0;
7375 double dNumPartialPartialGroups = 0;
7376 ListCell *lc;
7377 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7378 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7379
7380 /*
7381 * Check whether any partially aggregated paths have been generated
7382 * through eager aggregation.
7383 */
7384 if (input_rel->grouped_rel &&
7385 !IS_DUMMY_REL(input_rel->grouped_rel) &&
7386 input_rel->grouped_rel->pathlist != NIL)
7387 eager_agg_rel = input_rel->grouped_rel;
7388
7389 /*
7390 * Consider whether we should generate partially aggregated non-partial
7391 * paths. We can only do this if we have a non-partial path, and only if
7392 * the parent of the input rel is performing partial partitionwise
7393 * aggregation. (Note that extra->patype is the type of partitionwise
7394 * aggregation being used at the parent level, not this level.)
7395 */
7396 if (input_rel->pathlist != NIL &&
7398 cheapest_total_path = input_rel->cheapest_total_path;
7399
7400 /*
7401 * If parallelism is possible for grouped_rel, then we should consider
7402 * generating partially-grouped partial paths. However, if the input rel
7403 * has no partial paths, then we can't.
7404 */
7405 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7406 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7407
7408 /*
7409 * If we can't partially aggregate partial paths, and we can't partially
7410 * aggregate non-partial paths, and no partially aggregated paths were
7411 * generated by eager aggregation, then don't bother creating the new
7412 * RelOptInfo at all, unless the caller specified force_rel_creation.
7413 */
7414 if (cheapest_total_path == NULL &&
7415 cheapest_partial_path == NULL &&
7416 eager_agg_rel == NULL &&
7417 !force_rel_creation)
7418 return NULL;
7419
7420 /*
7421 * Build a new upper relation to represent the result of partially
7422 * aggregating the rows from the input relation.
7423 */
7424 partially_grouped_rel = fetch_upper_rel(root,
7426 grouped_rel->relids);
7427 partially_grouped_rel->consider_parallel =
7428 grouped_rel->consider_parallel;
7429 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7430 partially_grouped_rel->serverid = grouped_rel->serverid;
7431 partially_grouped_rel->userid = grouped_rel->userid;
7432 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7433 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7434
7435 /*
7436 * Build target list for partial aggregate paths. These paths cannot just
7437 * emit the same tlist as regular aggregate paths, because (1) we must
7438 * include Vars and Aggrefs needed in HAVING, which might not appear in
7439 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7440 */
7441 partially_grouped_rel->reltarget =
7443 extra->havingQual);
7444
7445 if (!extra->partial_costs_set)
7446 {
7447 /*
7448 * Collect statistics about aggregates for estimating costs of
7449 * performing aggregation in parallel.
7450 */
7451 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7452 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7453 if (parse->hasAggs)
7454 {
7455 /* partial phase */
7457 agg_partial_costs);
7458
7459 /* final phase */
7461 agg_final_costs);
7462 }
7463
7464 extra->partial_costs_set = true;
7465 }
7466
7467 /* Estimate number of partial groups. */
7468 if (cheapest_total_path != NULL)
7469 dNumPartialGroups =
7471 cheapest_total_path->rows,
7472 gd,
7473 extra->targetList);
7474 if (cheapest_partial_path != NULL)
7475 dNumPartialPartialGroups =
7477 cheapest_partial_path->rows,
7478 gd,
7479 extra->targetList);
7480
7481 if (can_sort && cheapest_total_path != NULL)
7482 {
7483 /* This should have been checked previously */
7484 Assert(parse->hasAggs || parse->groupClause);
7485
7486 /*
7487 * Use any available suitably-sorted path as input, and also consider
7488 * sorting the cheapest partial path.
7489 */
7490 foreach(lc, input_rel->pathlist)
7491 {
7492 ListCell *lc2;
7493 Path *path = (Path *) lfirst(lc);
7494 Path *path_save = path;
7495 List *pathkey_orderings = NIL;
7496
7497 /* generate alternative group orderings that might be useful */
7498 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7499
7500 Assert(list_length(pathkey_orderings) > 0);
7501
7502 /* process all potentially interesting grouping reorderings */
7503 foreach(lc2, pathkey_orderings)
7504 {
7505 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7506
7507 /* restore the path (we replace it in the loop) */
7508 path = path_save;
7509
7510 path = make_ordered_path(root,
7511 partially_grouped_rel,
7512 path,
7513 cheapest_total_path,
7514 info->pathkeys,
7515 -1.0);
7516
7517 if (path == NULL)
7518 continue;
7519
7520 if (parse->hasAggs)
7521 add_path(partially_grouped_rel, (Path *)
7523 partially_grouped_rel,
7524 path,
7525 partially_grouped_rel->reltarget,
7526 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7528 info->clauses,
7529 NIL,
7530 agg_partial_costs,
7531 dNumPartialGroups));
7532 else
7533 add_path(partially_grouped_rel, (Path *)
7535 partially_grouped_rel,
7536 path,
7537 info->clauses,
7538 NIL,
7539 dNumPartialGroups));
7540 }
7541 }
7542 }
7543
7544 if (can_sort && cheapest_partial_path != NULL)
7545 {
7546 /* Similar to above logic, but for partial paths. */
7547 foreach(lc, input_rel->partial_pathlist)
7548 {
7549 ListCell *lc2;
7550 Path *path = (Path *) lfirst(lc);
7551 Path *path_save = path;
7552 List *pathkey_orderings = NIL;
7553
7554 /* generate alternative group orderings that might be useful */
7555 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7556
7557 Assert(list_length(pathkey_orderings) > 0);
7558
7559 /* process all potentially interesting grouping reorderings */
7560 foreach(lc2, pathkey_orderings)
7561 {
7562 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7563
7564
7565 /* restore the path (we replace it in the loop) */
7566 path = path_save;
7567
7568 path = make_ordered_path(root,
7569 partially_grouped_rel,
7570 path,
7571 cheapest_partial_path,
7572 info->pathkeys,
7573 -1.0);
7574
7575 if (path == NULL)
7576 continue;
7577
7578 if (parse->hasAggs)
7579 add_partial_path(partially_grouped_rel, (Path *)
7581 partially_grouped_rel,
7582 path,
7583 partially_grouped_rel->reltarget,
7584 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7586 info->clauses,
7587 NIL,
7588 agg_partial_costs,
7589 dNumPartialPartialGroups));
7590 else
7591 add_partial_path(partially_grouped_rel, (Path *)
7593 partially_grouped_rel,
7594 path,
7595 info->clauses,
7596 NIL,
7597 dNumPartialPartialGroups));
7598 }
7599 }
7600 }
7601
7602 /*
7603 * Add a partially-grouped HashAgg Path where possible
7604 */
7605 if (can_hash && cheapest_total_path != NULL)
7606 {
7607 /* Checked above */
7608 Assert(parse->hasAggs || parse->groupClause);
7609
7610 add_path(partially_grouped_rel, (Path *)
7612 partially_grouped_rel,
7613 cheapest_total_path,
7614 partially_grouped_rel->reltarget,
7615 AGG_HASHED,
7617 root->processed_groupClause,
7618 NIL,
7619 agg_partial_costs,
7620 dNumPartialGroups));
7621 }
7622
7623 /*
7624 * Now add a partially-grouped HashAgg partial Path where possible
7625 */
7626 if (can_hash && cheapest_partial_path != NULL)
7627 {
7628 add_partial_path(partially_grouped_rel, (Path *)
7630 partially_grouped_rel,
7631 cheapest_partial_path,
7632 partially_grouped_rel->reltarget,
7633 AGG_HASHED,
7635 root->processed_groupClause,
7636 NIL,
7637 agg_partial_costs,
7638 dNumPartialPartialGroups));
7639 }
7640
7641 /*
7642 * Add any partially aggregated paths generated by eager aggregation to
7643 * the new upper relation after applying projection steps as needed.
7644 */
7645 if (eager_agg_rel)
7646 {
7647 /* Add the paths */
7648 foreach(lc, eager_agg_rel->pathlist)
7649 {
7650 Path *path = (Path *) lfirst(lc);
7651
7652 /* Shouldn't have any parameterized paths anymore */
7653 Assert(path->param_info == NULL);
7654
7655 path = (Path *) create_projection_path(root,
7656 partially_grouped_rel,
7657 path,
7658 partially_grouped_rel->reltarget);
7659
7660 add_path(partially_grouped_rel, path);
7661 }
7662
7663 /*
7664 * Likewise add the partial paths, but only if parallelism is possible
7665 * for partially_grouped_rel.
7666 */
7667 if (partially_grouped_rel->consider_parallel)
7668 {
7669 foreach(lc, eager_agg_rel->partial_pathlist)
7670 {
7671 Path *path = (Path *) lfirst(lc);
7672
7673 /* Shouldn't have any parameterized paths anymore */
7674 Assert(path->param_info == NULL);
7675
7676 path = (Path *) create_projection_path(root,
7677 partially_grouped_rel,
7678 path,
7679 partially_grouped_rel->reltarget);
7680
7681 add_partial_path(partially_grouped_rel, path);
7682 }
7683 }
7684 }
7685
7686 /*
7687 * If there is an FDW that's responsible for all baserels of the query,
7688 * let it consider adding partially grouped ForeignPaths.
7689 */
7690 if (partially_grouped_rel->fdwroutine &&
7691 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7692 {
7693 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7694
7695 fdwroutine->GetForeignUpperPaths(root,
7697 input_rel, partially_grouped_rel,
7698 extra);
7699 }
7700
7701 return partially_grouped_rel;
7702}
7703
7704/*
7705 * make_ordered_path
7706 * Return a path ordered by 'pathkeys' based on the given 'path'. May
7707 * return NULL if it doesn't make sense to generate an ordered path in
7708 * this case.
7709 */
7710static Path *
7712 Path *cheapest_path, List *pathkeys, double limit_tuples)
7713{
7714 bool is_sorted;
7715 int presorted_keys;
7716
7717 is_sorted = pathkeys_count_contained_in(pathkeys,
7718 path->pathkeys,
7719 &presorted_keys);
7720
7721 if (!is_sorted)
7722 {
7723 /*
7724 * Try at least sorting the cheapest path and also try incrementally
7725 * sorting any path which is partially sorted already (no need to deal
7726 * with paths which have presorted keys when incremental sort is
7727 * disabled unless it's the cheapest input path).
7728 */
7729 if (path != cheapest_path &&
7730 (presorted_keys == 0 || !enable_incremental_sort))
7731 return NULL;
7732
7733 /*
7734 * We've no need to consider both a sort and incremental sort. We'll
7735 * just do a sort if there are no presorted keys and an incremental
7736 * sort when there are presorted keys.
7737 */
7738 if (presorted_keys == 0 || !enable_incremental_sort)
7739 path = (Path *) create_sort_path(root,
7740 rel,
7741 path,
7742 pathkeys,
7743 limit_tuples);
7744 else
7746 rel,
7747 path,
7748 pathkeys,
7749 presorted_keys,
7750 limit_tuples);
7751 }
7752
7753 return path;
7754}
7755
7756/*
7757 * Generate Gather and Gather Merge paths for a grouping relation or partial
7758 * grouping relation.
7759 *
7760 * generate_useful_gather_paths does most of the work, but we also consider a
7761 * special case: we could try sorting the data by the group_pathkeys and then
7762 * applying Gather Merge.
7763 *
7764 * NB: This function shouldn't be used for anything other than a grouped or
7765 * partially grouped relation not only because of the fact that it explicitly
7766 * references group_pathkeys but we pass "true" as the third argument to
7767 * generate_useful_gather_paths().
7768 */
7769static void
7771{
7772 ListCell *lc;
7773 Path *cheapest_partial_path;
7774 List *groupby_pathkeys;
7775
7776 /*
7777 * This occurs after any partial aggregation has taken place, so trim off
7778 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7779 */
7780 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7781 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7782 root->num_groupby_pathkeys);
7783 else
7784 groupby_pathkeys = root->group_pathkeys;
7785
7786 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7788
7789 cheapest_partial_path = linitial(rel->partial_pathlist);
7790
7791 /* XXX Shouldn't this also consider the group-key-reordering? */
7792 foreach(lc, rel->partial_pathlist)
7793 {
7794 Path *path = (Path *) lfirst(lc);
7795 bool is_sorted;
7796 int presorted_keys;
7797 double total_groups;
7798
7799 is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7800 path->pathkeys,
7801 &presorted_keys);
7802
7803 if (is_sorted)
7804 continue;
7805
7806 /*
7807 * Try at least sorting the cheapest path and also try incrementally
7808 * sorting any path which is partially sorted already (no need to deal
7809 * with paths which have presorted keys when incremental sort is
7810 * disabled unless it's the cheapest input path).
7811 */
7812 if (path != cheapest_partial_path &&
7813 (presorted_keys == 0 || !enable_incremental_sort))
7814 continue;
7815
7816 /*
7817 * We've no need to consider both a sort and incremental sort. We'll
7818 * just do a sort if there are no presorted keys and an incremental
7819 * sort when there are presorted keys.
7820 */
7821 if (presorted_keys == 0 || !enable_incremental_sort)
7822 path = (Path *) create_sort_path(root, rel, path,
7823 groupby_pathkeys,
7824 -1.0);
7825 else
7827 rel,
7828 path,
7829 groupby_pathkeys,
7830 presorted_keys,
7831 -1.0);
7832 total_groups = compute_gather_rows(path);
7833 path = (Path *)
7835 rel,
7836 path,
7837 rel->reltarget,
7838 groupby_pathkeys,
7839 NULL,
7840 &total_groups);
7841
7842 add_path(rel, path);
7843 }
7844}
7845
7846/*
7847 * can_partial_agg
7848 *
7849 * Determines whether or not partial grouping and/or aggregation is possible.
7850 * Returns true when possible, false otherwise.
7851 */
7852static bool
7854{
7855 Query *parse = root->parse;
7856
7857 if (!parse->hasAggs && parse->groupClause == NIL)
7858 {
7859 /*
7860 * We don't know how to do parallel aggregation unless we have either
7861 * some aggregates or a grouping clause.
7862 */
7863 return false;
7864 }
7865 else if (parse->groupingSets)
7866 {
7867 /* We don't know how to do grouping sets in parallel. */
7868 return false;
7869 }
7870 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7871 {
7872 /* Insufficient support for partial mode. */
7873 return false;
7874 }
7875
7876 /* Everything looks good. */
7877 return true;
7878}
7879
7880/*
7881 * apply_scanjoin_target_to_paths
7882 *
7883 * Adjust the final scan/join relation, and recursively all of its children,
7884 * to generate the final scan/join target. It would be more correct to model
7885 * this as a separate planning step with a new RelOptInfo at the toplevel and
7886 * for each child relation, but doing it this way is noticeably cheaper.
7887 * Maybe that problem can be solved at some point, but for now we do this.
7888 *
7889 * If tlist_same_exprs is true, then the scan/join target to be applied has
7890 * the same expressions as the existing reltarget, so we need only insert the
7891 * appropriate sortgroupref information. By avoiding the creation of
7892 * projection paths we save effort both immediately and at plan creation time.
7893 */
7894static void
7896 RelOptInfo *rel,
7897 List *scanjoin_targets,
7898 List *scanjoin_targets_contain_srfs,
7899 bool scanjoin_target_parallel_safe,
7900 bool tlist_same_exprs)
7901{
7902 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7903 PathTarget *scanjoin_target;
7904 ListCell *lc;
7905
7906 /* This recurses, so be paranoid. */
7908
7909 /*
7910 * If the rel only has Append and MergeAppend paths, we want to drop its
7911 * existing paths and generate new ones. This function would still be
7912 * correct if we kept the existing paths: we'd modify them to generate the
7913 * correct target above the partitioning Append, and then they'd compete
7914 * on cost with paths generating the target below the Append. However, in
7915 * our current cost model the latter way is always the same or cheaper
7916 * cost, so modifying the existing paths would just be useless work.
7917 * Moreover, when the cost is the same, varying roundoff errors might
7918 * sometimes allow an existing path to be picked, resulting in undesirable
7919 * cross-platform plan variations. So we drop old paths and thereby force
7920 * the work to be done below the Append.
7921 *
7922 * However, there are several cases when this optimization is not safe. If
7923 * the rel isn't partitioned, then none of the paths will be Append or
7924 * MergeAppend paths, so we should definitely not do this. If it is
7925 * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7926 * but it can also have join paths that we can't afford to discard.
7927 *
7928 * Some care is needed, because we have to allow
7929 * generate_useful_gather_paths to see the old partial paths in the next
7930 * stanza. Hence, zap the main pathlist here, then allow
7931 * generate_useful_gather_paths to add path(s) to the main list, and
7932 * finally zap the partial pathlist.
7933 */
7934 if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7935 rel->pathlist = NIL;
7936
7937 /*
7938 * If the scan/join target is not parallel-safe, partial paths cannot
7939 * generate it.
7940 */
7941 if (!scanjoin_target_parallel_safe)
7942 {
7943 /*
7944 * Since we can't generate the final scan/join target in parallel
7945 * workers, this is our last opportunity to use any partial paths that
7946 * exist; so build Gather path(s) that use them and emit whatever the
7947 * current reltarget is. We don't do this in the case where the
7948 * target is parallel-safe, since we will be able to generate superior
7949 * paths by doing it after the final scan/join target has been
7950 * applied.
7951 */
7953
7954 /* Can't use parallel query above this level. */
7955 rel->partial_pathlist = NIL;
7956 rel->consider_parallel = false;
7957 }
7958
7959 /* Finish dropping old paths for a partitioned rel, per comment above */
7960 if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7961 rel->partial_pathlist = NIL;
7962
7963 /* Extract SRF-free scan/join target. */
7964 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7965
7966 /*
7967 * Apply the SRF-free scan/join target to each existing path.
7968 *
7969 * If the tlist exprs are the same, we can just inject the sortgroupref
7970 * information into the existing pathtargets. Otherwise, replace each
7971 * path with a projection path that generates the SRF-free scan/join
7972 * target. This can't change the ordering of paths within rel->pathlist,
7973 * so we just modify the list in place.
7974 */
7975 foreach(lc, rel->pathlist)
7976 {
7977 Path *subpath = (Path *) lfirst(lc);
7978
7979 /* Shouldn't have any parameterized paths anymore */
7980 Assert(subpath->param_info == NULL);
7981
7982 if (tlist_same_exprs)
7983 subpath->pathtarget->sortgrouprefs =
7984 scanjoin_target->sortgrouprefs;
7985 else
7986 {
7987 Path *newpath;
7988
7989 newpath = (Path *) create_projection_path(root, rel, subpath,
7990 scanjoin_target);
7991 lfirst(lc) = newpath;
7992 }
7993 }
7994
7995 /* Likewise adjust the targets for any partial paths. */
7996 foreach(lc, rel->partial_pathlist)
7997 {
7998 Path *subpath = (Path *) lfirst(lc);
7999
8000 /* Shouldn't have any parameterized paths anymore */
8001 Assert(subpath->param_info == NULL);
8002
8003 if (tlist_same_exprs)
8004 subpath->pathtarget->sortgrouprefs =
8005 scanjoin_target->sortgrouprefs;
8006 else
8007 {
8008 Path *newpath;
8009
8010 newpath = (Path *) create_projection_path(root, rel, subpath,
8011 scanjoin_target);
8012 lfirst(lc) = newpath;
8013 }
8014 }
8015
8016 /*
8017 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8018 * atop each existing path. (Note that this function doesn't look at the
8019 * cheapest-path fields, which is a good thing because they're bogus right
8020 * now.)
8021 */
8022 if (root->parse->hasTargetSRFs)
8024 scanjoin_targets,
8025 scanjoin_targets_contain_srfs);
8026
8027 /*
8028 * Update the rel's target to be the final (with SRFs) scan/join target.
8029 * This now matches the actual output of all the paths, and we might get
8030 * confused in createplan.c if they don't agree. We must do this now so
8031 * that any append paths made in the next part will use the correct
8032 * pathtarget (cf. create_append_path).
8033 *
8034 * Note that this is also necessary if GetForeignUpperPaths() gets called
8035 * on the final scan/join relation or on any of its children, since the
8036 * FDW might look at the rel's target to create ForeignPaths.
8037 */
8038 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
8039
8040 /*
8041 * If the relation is partitioned, recursively apply the scan/join target
8042 * to all partitions, and generate brand-new Append paths in which the
8043 * scan/join target is computed below the Append rather than above it.
8044 * Since Append is not projection-capable, that might save a separate
8045 * Result node, and it also is important for partitionwise aggregate.
8046 */
8047 if (rel_is_partitioned)
8048 {
8049 List *live_children = NIL;
8050 int i;
8051
8052 /* Adjust each partition. */
8053 i = -1;
8054 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8055 {
8056 RelOptInfo *child_rel = rel->part_rels[i];
8057 AppendRelInfo **appinfos;
8058 int nappinfos;
8059 List *child_scanjoin_targets = NIL;
8060
8061 Assert(child_rel != NULL);
8062
8063 /* Dummy children can be ignored. */
8064 if (IS_DUMMY_REL(child_rel))
8065 continue;
8066
8067 /* Translate scan/join targets for this child. */
8068 appinfos = find_appinfos_by_relids(root, child_rel->relids,
8069 &nappinfos);
8070 foreach(lc, scanjoin_targets)
8071 {
8072 PathTarget *target = lfirst_node(PathTarget, lc);
8073
8074 target = copy_pathtarget(target);
8075 target->exprs = (List *)
8077 (Node *) target->exprs,
8078 nappinfos, appinfos);
8079 child_scanjoin_targets = lappend(child_scanjoin_targets,
8080 target);
8081 }
8082 pfree(appinfos);
8083
8084 /* Recursion does the real work. */
8086 child_scanjoin_targets,
8087 scanjoin_targets_contain_srfs,
8088 scanjoin_target_parallel_safe,
8090
8091 /* Save non-dummy children for Append paths. */
8092 if (!IS_DUMMY_REL(child_rel))
8093 live_children = lappend(live_children, child_rel);
8094 }
8095
8096 /* Build new paths for this relation by appending child paths. */
8097 add_paths_to_append_rel(root, rel, live_children);
8098 }
8099
8100 /*
8101 * Consider generating Gather or Gather Merge paths. We must only do this
8102 * if the relation is parallel safe, and we don't do it for child rels to
8103 * avoid creating multiple Gather nodes within the same plan. We must do
8104 * this after all paths have been generated and before set_cheapest, since
8105 * one of the generated paths may turn out to be the cheapest one.
8106 */
8107 if (rel->consider_parallel && !IS_OTHER_REL(rel))
8109
8110 /*
8111 * Reassess which paths are the cheapest, now that we've potentially added
8112 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8113 * this relation.
8114 */
8115 set_cheapest(rel);
8116}
8117
8118/*
8119 * create_partitionwise_grouping_paths
8120 *
8121 * If the partition keys of input relation are part of the GROUP BY clause, all
8122 * the rows belonging to a given group come from a single partition. This
8123 * allows aggregation/grouping over a partitioned relation to be broken down
8124 * into aggregation/grouping on each partition. This should be no worse, and
8125 * often better, than the normal approach.
8126 *
8127 * However, if the GROUP BY clause does not contain all the partition keys,
8128 * rows from a given group may be spread across multiple partitions. In that
8129 * case, we perform partial aggregation for each group, append the results,
8130 * and then finalize aggregation. This is less certain to win than the
8131 * previous case. It may win if the PartialAggregate stage greatly reduces
8132 * the number of groups, because fewer rows will pass through the Append node.
8133 * It may lose if we have lots of small groups.
8134 */
8135static void
8137 RelOptInfo *input_rel,
8138 RelOptInfo *grouped_rel,
8139 RelOptInfo *partially_grouped_rel,
8140 const AggClauseCosts *agg_costs,
8143 GroupPathExtraData *extra)
8144{
8145 List *grouped_live_children = NIL;
8146 List *partially_grouped_live_children = NIL;
8147 PathTarget *target = grouped_rel->reltarget;
8148 bool partial_grouping_valid = true;
8149 int i;
8150
8153 partially_grouped_rel != NULL);
8154
8155 /* Add paths for partitionwise aggregation/grouping. */
8156 i = -1;
8157 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8158 {
8159 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8160 PathTarget *child_target;
8161 AppendRelInfo **appinfos;
8162 int nappinfos;
8163 GroupPathExtraData child_extra;
8164 RelOptInfo *child_grouped_rel;
8165 RelOptInfo *child_partially_grouped_rel;
8166
8167 Assert(child_input_rel != NULL);
8168
8169 /* Dummy children can be ignored. */
8170 if (IS_DUMMY_REL(child_input_rel))
8171 continue;
8172
8173 child_target = copy_pathtarget(target);
8174
8175 /*
8176 * Copy the given "extra" structure as is and then override the
8177 * members specific to this child.
8178 */
8179 memcpy(&child_extra, extra, sizeof(child_extra));
8180
8181 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8182 &nappinfos);
8183
8184 child_target->exprs = (List *)
8186 (Node *) target->exprs,
8187 nappinfos, appinfos);
8188
8189 /* Translate havingQual and targetList. */
8190 child_extra.havingQual = (Node *)
8192 extra->havingQual,
8193 nappinfos, appinfos);
8194 child_extra.targetList = (List *)
8196 (Node *) extra->targetList,
8197 nappinfos, appinfos);
8198
8199 /*
8200 * extra->patype was the value computed for our parent rel; patype is
8201 * the value for this relation. For the child, our value is its
8202 * parent rel's value.
8203 */
8204 child_extra.patype = patype;
8205
8206 /*
8207 * Create grouping relation to hold fully aggregated grouping and/or
8208 * aggregation paths for the child.
8209 */
8210 child_grouped_rel = make_grouping_rel(root, child_input_rel,
8211 child_target,
8212 extra->target_parallel_safe,
8213 child_extra.havingQual);
8214
8215 /* Create grouping paths for this child relation. */
8216 create_ordinary_grouping_paths(root, child_input_rel,
8217 child_grouped_rel,
8218 agg_costs, gd, &child_extra,
8219 &child_partially_grouped_rel);
8220
8221 if (child_partially_grouped_rel)
8222 {
8223 partially_grouped_live_children =
8224 lappend(partially_grouped_live_children,
8225 child_partially_grouped_rel);
8226 }
8227 else
8228 partial_grouping_valid = false;
8229
8230 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8231 {
8232 set_cheapest(child_grouped_rel);
8233 grouped_live_children = lappend(grouped_live_children,
8234 child_grouped_rel);
8235 }
8236
8237 pfree(appinfos);
8238 }
8239
8240 /*
8241 * Try to create append paths for partially grouped children. For full
8242 * partitionwise aggregation, we might have paths in the partial_pathlist
8243 * if parallel aggregation is possible. For partial partitionwise
8244 * aggregation, we may have paths in both pathlist and partial_pathlist.
8245 *
8246 * NB: We must have a partially grouped path for every child in order to
8247 * generate a partially grouped path for this relation.
8248 */
8249 if (partially_grouped_rel && partial_grouping_valid)
8250 {
8251 Assert(partially_grouped_live_children != NIL);
8252
8253 add_paths_to_append_rel(root, partially_grouped_rel,
8254 partially_grouped_live_children);
8255 }
8256
8257 /* If possible, create append paths for fully grouped children. */
8258 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8259 {
8260 Assert(grouped_live_children != NIL);
8261
8262 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8263 }
8264}
8265
8266/*
8267 * group_by_has_partkey
8268 *
8269 * Returns true if all the partition keys of the given relation are part of
8270 * the GROUP BY clauses, including having matching collation, false otherwise.
8271 */
8272static bool
8274 List *targetList,
8275 List *groupClause)
8276{
8277 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8278 int cnt = 0;
8279 int partnatts;
8280
8281 /* Input relation should be partitioned. */
8282 Assert(input_rel->part_scheme);
8283
8284 /* Rule out early, if there are no partition keys present. */
8285 if (!input_rel->partexprs)
8286 return false;
8287
8288 partnatts = input_rel->part_scheme->partnatts;
8289
8290 for (cnt = 0; cnt < partnatts; cnt++)
8291 {
8292 List *partexprs = input_rel->partexprs[cnt];
8293 ListCell *lc;
8294 bool found = false;
8295
8296 foreach(lc, partexprs)
8297 {
8298 ListCell *lg;
8299 Expr *partexpr = lfirst(lc);
8300 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8301
8302 foreach(lg, groupexprs)
8303 {
8304 Expr *groupexpr = lfirst(lg);
8305 Oid groupcoll = exprCollation((Node *) groupexpr);
8306
8307 /*
8308 * Note: we can assume there is at most one RelabelType node;
8309 * eval_const_expressions() will have simplified if more than
8310 * one.
8311 */
8312 if (IsA(groupexpr, RelabelType))
8313 groupexpr = ((RelabelType *) groupexpr)->arg;
8314
8315 if (equal(groupexpr, partexpr))
8316 {
8317 /*
8318 * Reject a match if the grouping collation does not match
8319 * the partitioning collation.
8320 */
8321 if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8322 partcoll != groupcoll)
8323 return false;
8324
8325 found = true;
8326 break;
8327 }
8328 }
8329
8330 if (found)
8331 break;
8332 }
8333
8334 /*
8335 * If none of the partition key expressions match with any of the
8336 * GROUP BY expression, return false.
8337 */
8338 if (!found)
8339 return false;
8340 }
8341
8342 return true;
8343}
8344
8345/*
8346 * generate_setop_child_grouplist
8347 * Build a SortGroupClause list defining the sort/grouping properties
8348 * of the child of a set operation.
8349 *
8350 * This is similar to generate_setop_grouplist() but differs as the setop
8351 * child query's targetlist entries may already have a tleSortGroupRef
8352 * assigned for other purposes, such as GROUP BYs. Here we keep the
8353 * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8354 * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8355 * any of the columns in the targetlist don't match to the setop's colTypes
8356 * then we return an empty list. This may leave some TLEs with unreferenced
8357 * ressortgroupref markings, but that's harmless.
8358 */
8359static List *
8361{
8362 List *grouplist = copyObject(op->groupClauses);
8363 ListCell *lg;
8364 ListCell *lt;
8365 ListCell *ct;
8366
8367 lg = list_head(grouplist);
8368 ct = list_head(op->colTypes);
8369 foreach(lt, targetlist)
8370 {
8371 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8372 SortGroupClause *sgc;
8373 Oid coltype;
8374
8375 /* resjunk columns could have sortgrouprefs. Leave these alone */
8376 if (tle->resjunk)
8377 continue;
8378
8379 /*
8380 * We expect every non-resjunk target to have a SortGroupClause and
8381 * colTypes.
8382 */
8383 Assert(lg != NULL);
8384 Assert(ct != NULL);
8385 sgc = (SortGroupClause *) lfirst(lg);
8386 coltype = lfirst_oid(ct);
8387
8388 /* reject if target type isn't the same as the setop target type */
8389 if (coltype != exprType((Node *) tle->expr))
8390 return NIL;
8391
8392 lg = lnext(grouplist, lg);
8393 ct = lnext(op->colTypes, ct);
8394
8395 /* assign a tleSortGroupRef, or reuse the existing one */
8396 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8397 }
8398
8399 Assert(lg == NULL);
8400 Assert(ct == NULL);
8401
8402 return grouplist;
8403}
8404
8405/*
8406 * create_unique_paths
8407 * Build a new RelOptInfo containing Paths that represent elimination of
8408 * distinct rows from the input data. Distinct-ness is defined according to
8409 * the needs of the semijoin represented by sjinfo. If it is not possible
8410 * to identify how to make the data unique, NULL is returned.
8411 *
8412 * If used at all, this is likely to be called repeatedly on the same rel,
8413 * so we cache the result.
8414 */
8415RelOptInfo *
8417{
8418 RelOptInfo *unique_rel;
8419 List *sortPathkeys = NIL;
8420 List *groupClause = NIL;
8421 MemoryContext oldcontext;
8422
8423 /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8424 Assert(sjinfo->jointype == JOIN_SEMI);
8425 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8426
8427 /* If result already cached, return it */
8428 if (rel->unique_rel)
8429 return rel->unique_rel;
8430
8431 /* If it's not possible to unique-ify, return NULL */
8432 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8433 return NULL;
8434
8435 /*
8436 * Punt if this is a child relation and we failed to build a unique-ified
8437 * relation for its parent. This can happen if all the RHS columns were
8438 * found to be equated to constants when unique-ifying the parent table,
8439 * leaving no columns to unique-ify.
8440 */
8441 if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8442 return NULL;
8443
8444 /*
8445 * When called during GEQO join planning, we are in a short-lived memory
8446 * context. We must make sure that the unique rel and any subsidiary data
8447 * structures created for a baserel survive the GEQO cycle, else the
8448 * baserel is trashed for future GEQO cycles. On the other hand, when we
8449 * are creating those for a joinrel during GEQO, we don't want them to
8450 * clutter the main planning context. Upshot is that the best solution is
8451 * to explicitly allocate memory in the same context the given RelOptInfo
8452 * is in.
8453 */
8455
8456 unique_rel = makeNode(RelOptInfo);
8457 memcpy(unique_rel, rel, sizeof(RelOptInfo));
8458
8459 /*
8460 * clear path info
8461 */
8462 unique_rel->pathlist = NIL;
8463 unique_rel->ppilist = NIL;
8464 unique_rel->partial_pathlist = NIL;
8465 unique_rel->cheapest_startup_path = NULL;
8466 unique_rel->cheapest_total_path = NULL;
8467 unique_rel->cheapest_parameterized_paths = NIL;
8468
8469 /*
8470 * Build the target list for the unique rel. We also build the pathkeys
8471 * that represent the ordering requirements for the sort-based
8472 * implementation, and the list of SortGroupClause nodes that represent
8473 * the columns to be grouped on for the hash-based implementation.
8474 *
8475 * For a child rel, we can construct these fields from those of its
8476 * parent.
8477 */
8478 if (IS_OTHER_REL(rel))
8479 {
8480 PathTarget *child_unique_target;
8481 PathTarget *parent_unique_target;
8482
8483 parent_unique_target = rel->top_parent->unique_rel->reltarget;
8484
8485 child_unique_target = copy_pathtarget(parent_unique_target);
8486
8487 /* Translate the target expressions */
8488 child_unique_target->exprs = (List *)
8490 (Node *) parent_unique_target->exprs,
8491 rel,
8492 rel->top_parent);
8493
8494 unique_rel->reltarget = child_unique_target;
8495
8496 sortPathkeys = rel->top_parent->unique_pathkeys;
8497 groupClause = rel->top_parent->unique_groupclause;
8498 }
8499 else
8500 {
8501 List *newtlist;
8502 int nextresno;
8503 List *sortList = NIL;
8504 ListCell *lc1;
8505 ListCell *lc2;
8506
8507 /*
8508 * The values we are supposed to unique-ify may be expressions in the
8509 * variables of the input rel's targetlist. We have to add any such
8510 * expressions to the unique rel's targetlist.
8511 *
8512 * To complicate matters, some of the values to be unique-ified may be
8513 * known redundant by the EquivalenceClass machinery (e.g., because
8514 * they have been equated to constants). There is no need to compare
8515 * such values during unique-ification, and indeed we had better not
8516 * try because the Vars involved may not have propagated as high as
8517 * the semijoin's level. We use make_pathkeys_for_sortclauses to
8518 * detect such cases, which is a tad inefficient but it doesn't seem
8519 * worth building specialized infrastructure for this.
8520 */
8521 newtlist = make_tlist_from_pathtarget(rel->reltarget);
8522 nextresno = list_length(newtlist) + 1;
8523
8524 forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8525 {
8526 Expr *uniqexpr = lfirst(lc1);
8527 Oid in_oper = lfirst_oid(lc2);
8528 Oid sortop;
8529 TargetEntry *tle;
8530 bool made_tle = false;
8531
8532 tle = tlist_member(uniqexpr, newtlist);
8533 if (!tle)
8534 {
8535 tle = makeTargetEntry(uniqexpr,
8536 nextresno,
8537 NULL,
8538 false);
8539 newtlist = lappend(newtlist, tle);
8540 nextresno++;
8541 made_tle = true;
8542 }
8543
8544 /*
8545 * Try to build an ORDER BY list to sort the input compatibly. We
8546 * do this for each sortable clause even when the clauses are not
8547 * all sortable, so that we can detect clauses that are redundant
8548 * according to the pathkey machinery.
8549 */
8550 sortop = get_ordering_op_for_equality_op(in_oper, false);
8551 if (OidIsValid(sortop))
8552 {
8553 Oid eqop;
8554 SortGroupClause *sortcl;
8555
8556 /*
8557 * The Unique node will need equality operators. Normally
8558 * these are the same as the IN clause operators, but if those
8559 * are cross-type operators then the equality operators are
8560 * the ones for the IN clause operators' RHS datatype.
8561 */
8562 eqop = get_equality_op_for_ordering_op(sortop, NULL);
8563 if (!OidIsValid(eqop)) /* shouldn't happen */
8564 elog(ERROR, "could not find equality operator for ordering operator %u",
8565 sortop);
8566
8567 sortcl = makeNode(SortGroupClause);
8568 sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8569 sortcl->eqop = eqop;
8570 sortcl->sortop = sortop;
8571 sortcl->reverse_sort = false;
8572 sortcl->nulls_first = false;
8573 sortcl->hashable = false; /* no need to make this accurate */
8574 sortList = lappend(sortList, sortcl);
8575
8576 /*
8577 * At each step, convert the SortGroupClause list to pathkey
8578 * form. If the just-added SortGroupClause is redundant, the
8579 * result will be shorter than the SortGroupClause list.
8580 */
8581 sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8582 newtlist);
8583 if (list_length(sortPathkeys) != list_length(sortList))
8584 {
8585 /* Drop the redundant SortGroupClause */
8586 sortList = list_delete_last(sortList);
8587 Assert(list_length(sortPathkeys) == list_length(sortList));
8588 /* Undo tlist addition, if we made one */
8589 if (made_tle)
8590 {
8591 newtlist = list_delete_last(newtlist);
8592 nextresno--;
8593 }
8594 /* We need not consider this clause for hashing, either */
8595 continue;
8596 }
8597 }
8598 else if (sjinfo->semi_can_btree) /* shouldn't happen */
8599 elog(ERROR, "could not find ordering operator for equality operator %u",
8600 in_oper);
8601
8602 if (sjinfo->semi_can_hash)
8603 {
8604 /* Create a GROUP BY list for the Agg node to use */
8605 Oid eq_oper;
8606 SortGroupClause *groupcl;
8607
8608 /*
8609 * Get the hashable equality operators for the Agg node to
8610 * use. Normally these are the same as the IN clause
8611 * operators, but if those are cross-type operators then the
8612 * equality operators are the ones for the IN clause
8613 * operators' RHS datatype.
8614 */
8615 if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8616 elog(ERROR, "could not find compatible hash operator for operator %u",
8617 in_oper);
8618
8619 groupcl = makeNode(SortGroupClause);
8620 groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8621 groupcl->eqop = eq_oper;
8622 groupcl->sortop = sortop;
8623 groupcl->reverse_sort = false;
8624 groupcl->nulls_first = false;
8625 groupcl->hashable = true;
8626 groupClause = lappend(groupClause, groupcl);
8627 }
8628 }
8629
8630 /*
8631 * Done building the sortPathkeys and groupClause. But the
8632 * sortPathkeys are bogus if not all the clauses were sortable.
8633 */
8634 if (!sjinfo->semi_can_btree)
8635 sortPathkeys = NIL;
8636
8637 /*
8638 * It can happen that all the RHS columns are equated to constants.
8639 * We'd have to do something special to unique-ify in that case, and
8640 * it's such an unlikely-in-the-real-world case that it's not worth
8641 * the effort. So just punt if we found no columns to unique-ify.
8642 */
8643 if (sortPathkeys == NIL && groupClause == NIL)
8644 {
8645 MemoryContextSwitchTo(oldcontext);
8646 return NULL;
8647 }
8648
8649 /* Convert the required targetlist back to PathTarget form */
8650 unique_rel->reltarget = create_pathtarget(root, newtlist);
8651 }
8652
8653 /* build unique paths based on input rel's pathlist */
8654 create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8655 sjinfo, unique_rel);
8656
8657 /* build unique paths based on input rel's partial_pathlist */
8658 create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8659 sjinfo, unique_rel);
8660
8661 /* Now choose the best path(s) */
8662 set_cheapest(unique_rel);
8663
8664 /*
8665 * There shouldn't be any partial paths for the unique relation;
8666 * otherwise, we won't be able to properly guarantee uniqueness.
8667 */
8668 Assert(unique_rel->partial_pathlist == NIL);
8669
8670 /* Cache the result */
8671 rel->unique_rel = unique_rel;
8672 rel->unique_pathkeys = sortPathkeys;
8673 rel->unique_groupclause = groupClause;
8674
8675 MemoryContextSwitchTo(oldcontext);
8676
8677 return unique_rel;
8678}
8679
8680/*
8681 * create_final_unique_paths
8682 * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8683 */
8684static void
8686 List *sortPathkeys, List *groupClause,
8687 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8688{
8689 Path *cheapest_input_path = input_rel->cheapest_total_path;
8690
8691 /* Estimate number of output rows */
8692 unique_rel->rows = estimate_num_groups(root,
8693 sjinfo->semi_rhs_exprs,
8694 cheapest_input_path->rows,
8695 NULL,
8696 NULL);
8697
8698 /* Consider sort-based implementations, if possible. */
8699 if (sjinfo->semi_can_btree)
8700 {
8701 ListCell *lc;
8702
8703 /*
8704 * Use any available suitably-sorted path as input, and also consider
8705 * sorting the cheapest-total path and incremental sort on any paths
8706 * with presorted keys.
8707 *
8708 * To save planning time, we ignore parameterized input paths unless
8709 * they are the cheapest-total path.
8710 */
8711 foreach(lc, input_rel->pathlist)
8712 {
8713 Path *input_path = (Path *) lfirst(lc);
8714 Path *path;
8715 bool is_sorted;
8716 int presorted_keys;
8717
8718 /*
8719 * Ignore parameterized paths that are not the cheapest-total
8720 * path.
8721 */
8722 if (input_path->param_info &&
8723 input_path != cheapest_input_path)
8724 continue;
8725
8726 is_sorted = pathkeys_count_contained_in(sortPathkeys,
8727 input_path->pathkeys,
8728 &presorted_keys);
8729
8730 /*
8731 * Ignore paths that are not suitably or partially sorted, unless
8732 * they are the cheapest total path (no need to deal with paths
8733 * which have presorted keys when incremental sort is disabled).
8734 */
8735 if (!is_sorted && input_path != cheapest_input_path &&
8736 (presorted_keys == 0 || !enable_incremental_sort))
8737 continue;
8738
8739 /*
8740 * Make a separate ProjectionPath in case we need a Result node.
8741 */
8742 path = (Path *) create_projection_path(root,
8743 unique_rel,
8744 input_path,
8745 unique_rel->reltarget);
8746
8747 if (!is_sorted)
8748 {
8749 /*
8750 * We've no need to consider both a sort and incremental sort.
8751 * We'll just do a sort if there are no presorted keys and an
8752 * incremental sort when there are presorted keys.
8753 */
8754 if (presorted_keys == 0 || !enable_incremental_sort)
8755 path = (Path *) create_sort_path(root,
8756 unique_rel,
8757 path,
8758 sortPathkeys,
8759 -1.0);
8760 else
8762 unique_rel,
8763 path,
8764 sortPathkeys,
8765 presorted_keys,
8766 -1.0);
8767 }
8768
8769 path = (Path *) create_unique_path(root, unique_rel, path,
8770 list_length(sortPathkeys),
8771 unique_rel->rows);
8772
8773 add_path(unique_rel, path);
8774 }
8775 }
8776
8777 /* Consider hash-based implementation, if possible. */
8778 if (sjinfo->semi_can_hash)
8779 {
8780 Path *path;
8781
8782 /*
8783 * Make a separate ProjectionPath in case we need a Result node.
8784 */
8785 path = (Path *) create_projection_path(root,
8786 unique_rel,
8787 cheapest_input_path,
8788 unique_rel->reltarget);
8789
8790 path = (Path *) create_agg_path(root,
8791 unique_rel,
8792 path,
8793 cheapest_input_path->pathtarget,
8794 AGG_HASHED,
8796 groupClause,
8797 NIL,
8798 NULL,
8799 unique_rel->rows);
8800
8801 add_path(unique_rel, path);
8802 }
8803}
8804
8805/*
8806 * create_partial_unique_paths
8807 * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8808 */
8809static void
8811 List *sortPathkeys, List *groupClause,
8812 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8813{
8814 RelOptInfo *partial_unique_rel;
8815 Path *cheapest_partial_path;
8816
8817 /* nothing to do when there are no partial paths in the input rel */
8818 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8819 return;
8820
8821 /*
8822 * nothing to do if there's anything in the targetlist that's
8823 * parallel-restricted.
8824 */
8825 if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8826 return;
8827
8828 cheapest_partial_path = linitial(input_rel->partial_pathlist);
8829
8830 partial_unique_rel = makeNode(RelOptInfo);
8831 memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8832
8833 /*
8834 * clear path info
8835 */
8836 partial_unique_rel->pathlist = NIL;
8837 partial_unique_rel->ppilist = NIL;
8838 partial_unique_rel->partial_pathlist = NIL;
8839 partial_unique_rel->cheapest_startup_path = NULL;
8840 partial_unique_rel->cheapest_total_path = NULL;
8841 partial_unique_rel->cheapest_parameterized_paths = NIL;
8842
8843 /* Estimate number of output rows */
8844 partial_unique_rel->rows = estimate_num_groups(root,
8845 sjinfo->semi_rhs_exprs,
8846 cheapest_partial_path->rows,
8847 NULL,
8848 NULL);
8849 partial_unique_rel->reltarget = unique_rel->reltarget;
8850
8851 /* Consider sort-based implementations, if possible. */
8852 if (sjinfo->semi_can_btree)
8853 {
8854 ListCell *lc;
8855
8856 /*
8857 * Use any available suitably-sorted path as input, and also consider
8858 * sorting the cheapest partial path and incremental sort on any paths
8859 * with presorted keys.
8860 */
8861 foreach(lc, input_rel->partial_pathlist)
8862 {
8863 Path *input_path = (Path *) lfirst(lc);
8864 Path *path;
8865 bool is_sorted;
8866 int presorted_keys;
8867
8868 is_sorted = pathkeys_count_contained_in(sortPathkeys,
8869 input_path->pathkeys,
8870 &presorted_keys);
8871
8872 /*
8873 * Ignore paths that are not suitably or partially sorted, unless
8874 * they are the cheapest partial path (no need to deal with paths
8875 * which have presorted keys when incremental sort is disabled).
8876 */
8877 if (!is_sorted && input_path != cheapest_partial_path &&
8878 (presorted_keys == 0 || !enable_incremental_sort))
8879 continue;
8880
8881 /*
8882 * Make a separate ProjectionPath in case we need a Result node.
8883 */
8884 path = (Path *) create_projection_path(root,
8885 partial_unique_rel,
8886 input_path,
8887 partial_unique_rel->reltarget);
8888
8889 if (!is_sorted)
8890 {
8891 /*
8892 * We've no need to consider both a sort and incremental sort.
8893 * We'll just do a sort if there are no presorted keys and an
8894 * incremental sort when there are presorted keys.
8895 */
8896 if (presorted_keys == 0 || !enable_incremental_sort)
8897 path = (Path *) create_sort_path(root,
8898 partial_unique_rel,
8899 path,
8900 sortPathkeys,
8901 -1.0);
8902 else
8904 partial_unique_rel,
8905 path,
8906 sortPathkeys,
8907 presorted_keys,
8908 -1.0);
8909 }
8910
8911 path = (Path *) create_unique_path(root, partial_unique_rel, path,
8912 list_length(sortPathkeys),
8913 partial_unique_rel->rows);
8914
8915 add_partial_path(partial_unique_rel, path);
8916 }
8917 }
8918
8919 /* Consider hash-based implementation, if possible. */
8920 if (sjinfo->semi_can_hash)
8921 {
8922 Path *path;
8923
8924 /*
8925 * Make a separate ProjectionPath in case we need a Result node.
8926 */
8927 path = (Path *) create_projection_path(root,
8928 partial_unique_rel,
8929 cheapest_partial_path,
8930 partial_unique_rel->reltarget);
8931
8932 path = (Path *) create_agg_path(root,
8933 partial_unique_rel,
8934 path,
8935 cheapest_partial_path->pathtarget,
8936 AGG_HASHED,
8938 groupClause,
8939 NIL,
8940 NULL,
8941 partial_unique_rel->rows);
8942
8943 add_partial_path(partial_unique_rel, path);
8944 }
8945
8946 if (partial_unique_rel->partial_pathlist != NIL)
8947 {
8948 generate_useful_gather_paths(root, partial_unique_rel, true);
8949 set_cheapest(partial_unique_rel);
8950
8951 /*
8952 * Finally, create paths to unique-ify the final result. This step is
8953 * needed to remove any duplicates due to combining rows from parallel
8954 * workers.
8955 */
8956 create_final_unique_paths(root, partial_unique_rel,
8957 sortPathkeys, groupClause,
8958 sjinfo, unique_rel);
8959 }
8960}
8961
8962/*
8963 * Choose a unique name for some subroot.
8964 *
8965 * Modifies glob->subplanNames to track names already used.
8966 */
8967char *
8968choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
8969{
8970 unsigned n;
8971
8972 /*
8973 * If a numeric suffix is not required, then search the list of
8974 * previously-assigned names for a match. If none is found, then we can
8975 * use the provided name without modification.
8976 */
8977 if (!always_number)
8978 {
8979 bool found = false;
8980
8981 foreach_ptr(char, subplan_name, glob->subplanNames)
8982 {
8983 if (strcmp(subplan_name, name) == 0)
8984 {
8985 found = true;
8986 break;
8987 }
8988 }
8989
8990 if (!found)
8991 {
8992 /* pstrdup here is just to avoid cast-away-const */
8993 char *chosen_name = pstrdup(name);
8994
8995 glob->subplanNames = lappend(glob->subplanNames, chosen_name);
8996 return chosen_name;
8997 }
8998 }
8999
9000 /*
9001 * If a numeric suffix is required or if the un-suffixed name is already
9002 * in use, then loop until we find a positive integer that produces a
9003 * novel name.
9004 */
9005 for (n = 1; true; ++n)
9006 {
9007 char *proposed_name = psprintf("%s_%u", name, n);
9008 bool found = false;
9009
9010 foreach_ptr(char, subplan_name, glob->subplanNames)
9011 {
9012 if (strcmp(subplan_name, proposed_name) == 0)
9013 {
9014 found = true;
9015 break;
9016 }
9017 }
9018
9019 if (!found)
9020 {
9021 glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9022 return proposed_name;
9023 }
9024
9025 pfree(proposed_name);
9026 }
9027}
@ ACLCHECK_NO_PRIV
Definition: acl.h:184
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition: aclchk.c:2654
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4702
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3325
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1404
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:804
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:733
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:592
void pprint(const void *obj)
Definition: print.c:54
void pgstat_report_plan_id(int64 plan_id, bool force)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:346
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1305
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1160
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:867
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:750
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:814
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:780
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:607
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:995
#define Max(x, y)
Definition: c.h:989
int64_t int64
Definition: c.h:549
unsigned int Index
Definition: c.h:633
#define MemSet(start, val, len)
Definition: c.h:1011
#define OidIsValid(objectId)
Definition: c.h:788
size_t Size
Definition: c.h:624
bool contain_agg_clause(Node *clause)
Definition: clauses.c:190
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2411
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:240
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2270
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2303
char max_parallel_hazard(Query *parse)
Definition: clauses.c:746
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:765
bool contain_subplans(Node *clause)
Definition: clauses.c:342
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:550
double cpu_operator_cost
Definition: costsize.c:134
bool enable_partitionwise_aggregate
Definition: costsize.c:160
int max_parallel_workers_per_gather
Definition: costsize.c:143
double parallel_setup_cost
Definition: costsize.c:136
double parallel_tuple_cost
Definition: costsize.c:135
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2134
double compute_gather_rows(Path *path)
Definition: costsize.c:6651
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4808
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6393
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4782
bool enable_presorted_aggregate
Definition: costsize.c:164
bool enable_hashagg
Definition: costsize.c:152
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_indexscan
Definition: costsize.c:146
bool enable_incremental_sort
Definition: costsize.c:151
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6507
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:340
int errdetail(const char *fmt,...)
Definition: elog.c:1216
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:511
bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
Definition: execMain.c:646
#define palloc_array(type, count)
Definition: fe_memutils.h:76
#define palloc0_object(type)
Definition: fe_memutils.h:75
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:420
int max_parallel_maintenance_workers
Definition: globals.c:134
bool IsUnderPostmaster
Definition: globals.c:120
int maintenance_work_mem
Definition: globals.c:133
Assert(PointerIsAligned(start, uint64))
#define IsParallelWorker()
Definition: parallel.h:60
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lcons(void *datum, List *list)
Definition: list.c:495
List * list_delete_int(List *list, int datum)
Definition: list.c:891
List * list_delete_last(List *list)
Definition: list.c:957
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
void list_free(List *list)
Definition: list.c:1546
bool list_member_int(const List *list, int datum)
Definition: list.c:702
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
#define NoLock
Definition: lockdefs.h:34
#define AccessShareLock
Definition: lockdefs.h:36
@ LockWaitBlock
Definition: lockoptions.h:39
LockClauseStrength
Definition: lockoptions.h:22
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_NONE
Definition: lockoptions.h:23
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
char * get_rel_name(Oid relid)
Definition: lsyscache.c:2078
bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno)
Definition: lsyscache.c:475
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:2008
Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse)
Definition: lsyscache.c:324
Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
Definition: lsyscache.c:362
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2728
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition: makefuncs.c:289
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:701
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:350
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:810
char * pstrdup(const char *in)
Definition: mcxt.c:1781
void pfree(void *pointer)
Definition: mcxt.c:1616
void * palloc0(Size size)
Definition: mcxt.c:1417
void * palloc(Size size)
Definition: mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:756
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1840
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3621
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:396
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:232
double Cost
Definition: nodes.h:261
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:348
@ CMD_MERGE
Definition: nodes.h:279
@ CMD_DELETE
Definition: nodes.h:278
@ CMD_UPDATE
Definition: nodes.h:276
@ CMD_SELECT
Definition: nodes.h:275
AggStrategy
Definition: nodes.h:363
@ AGG_SORTED
Definition: nodes.h:365
@ AGG_HASHED
Definition: nodes.h:366
@ AGG_MIXED
Definition: nodes.h:367
@ AGG_PLAIN
Definition: nodes.h:364
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:397
AggSplit
Definition: nodes.h:385
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:391
@ AGGSPLIT_SIMPLE
Definition: nodes.h:387
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:389
@ LIMIT_OPTION_COUNT
Definition: nodes.h:441
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
@ JOIN_SEMI
Definition: nodes.h:317
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:189
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:191
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:98
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:96
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:190
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:192
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:188
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:754
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1947
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3416
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3422
@ RTE_JOIN
Definition: parsenodes.h:1072
@ RTE_VALUES
Definition: parsenodes.h:1075
@ RTE_SUBQUERY
Definition: parsenodes.h:1071
@ RTE_RESULT
Definition: parsenodes.h:1078
@ RTE_FUNCTION
Definition: parsenodes.h:1073
@ RTE_TABLEFUNC
Definition: parsenodes.h:1074
@ RTE_GROUP
Definition: parsenodes.h:1081
@ RTE_RELATION
Definition: parsenodes.h:1070
@ OBJECT_VIEW
Definition: parsenodes.h:2403
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3425
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3349
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3324
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:484
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1336
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition: pathkeys.c:1381
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3630
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1049
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2723
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2525
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3331
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3568
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2634
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1751
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:270
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3730
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1301
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:125
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2793
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3077
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2842
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2886
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:2943
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2995
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1610
PartitionwiseAggregateType
Definition: pathnodes.h:3535
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3538
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3537
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3536
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:895
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2194
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3520
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1796
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1135
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3519
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3521
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:888
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:910
@ PATHKEYS_BETTER2
Definition: paths.h:215
@ PATHKEYS_BETTER1
Definition: paths.h:214
@ PATHKEYS_DIFFERENT
Definition: paths.h:216
@ PATHKEYS_EQUAL
Definition: paths.h:213
void * arg
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define linitial_node(type, l)
Definition: pg_list.h:181
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define lfirst_int(lc)
Definition: pg_list.h:173
#define list_make1(x1)
Definition: pg_list.h:212
#define linitial_int(l)
Definition: pg_list.h:179
#define foreach_ptr(type, var, lst)
Definition: pg_list.h:469
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define list_make1_int(x1)
Definition: pg_list.h:227
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:333
#define llast_node(type, l)
Definition: pg_list.h:202
static int scale
Definition: pgbench.c:182
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:74
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1307
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1474
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
#define DEFAULT_CURSOR_TUPLE_FRACTION
Definition: planmain.h:21
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:99
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:5063
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5798
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5660
Expr * expression_planner_with_deps(Expr *expr, List **relationOids, List **invalItems)
Definition: planner.c:6791
#define EXPRKIND_TARGET
Definition: planner.c:88
#define EXPRKIND_APPINFO
Definition: planner.c:94
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7770
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2428
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:96
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
Definition: planner.c:7099
PlannedStmt * planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition: planner.c:315
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3996
char * choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
Definition: planner.c:8968
#define EXPRKIND_GROUPEXPR
Definition: planner.c:100
planner_hook_type planner_hook
Definition: planner.c:74
double cursor_tuple_fraction
Definition: planner.c:68
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3975
planner_shutdown_hook_type planner_shutdown_hook
Definition: planner.c:80
bool plan_cluster_use_sort(Oid tableOid, Oid indexOid)
Definition: planner.c:6844
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1387
int plan_create_index_workers(Oid tableOid, Oid indexOid)
Definition: planner.c:6966
#define EXPRKIND_PHV
Definition: planner.c:95
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:90
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:92
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:4060
RelOptInfo * create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
Definition: planner.c:8416
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:693
#define EXPRKIND_LIMIT
Definition: planner.c:93
#define EXPRKIND_VALUES
Definition: planner.c:91
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7853
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2606
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6602
Expr * preprocess_phv_expression(PlannerInfo *root, Expr *expr)
Definition: planner.c:1431
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5243
planner_setup_hook_type planner_setup_hook
Definition: planner.c:77
bool parallel_leader_participation
Definition: planner.c:70
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6178
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7895
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4810
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5835
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2540
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6648
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4880
#define EXPRKIND_QUAL
Definition: planner.c:87
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2857
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1285
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7711
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3213
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7360
static void name_active_windows(List *activeWindows)
Definition: planner.c:6058
static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition: planner.c:8685
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6426
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4640
bool enable_distinct_reordering
Definition: planner.c:71
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5763
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2211
int debug_parallel_query
Definition: planner.c:69
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2391
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3258
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5548
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3165
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:6109
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1464
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3922
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:8360
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition: planner.c:333
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5975
Expr * expression_planner(Expr *expr)
Definition: planner.c:6764
static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition: planner.c:8810
bool limit_needed(Query *parse)
Definition: planner.c:2791
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:83
#define EXPRKIND_TABLEFUNC
Definition: planner.c:98
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4191
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6298
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5328
#define EXPRKIND_RTFUNC
Definition: planner.c:89
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3687
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2953
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3809
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:8136
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:97
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8273
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3482
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4553
void(* planner_setup_hook_type)(PlannerGlobal *glob, Query *parse, const char *query_string, double *tuple_fraction, ExplainState *es)
Definition: planner.h:36
PlannedStmt *(* planner_hook_type)(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition: planner.h:28
void(* create_upper_paths_hook_type)(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, RelOptInfo *output_rel, void *extra)
Definition: planner.h:49
void(* planner_shutdown_hook_type)(PlannerGlobal *glob, Query *parse, const char *query_string, PlannedStmt *pstmt)
Definition: planner.h:43
@ PLAN_STMT_STANDARD
Definition: plannodes.h:41
RowMarkType
Definition: plannodes.h:1535
@ ROW_MARK_COPY
Definition: plannodes.h:1541
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1540
@ ROW_MARK_SHARE
Definition: plannodes.h:1538
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1536
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1537
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1539
#define snprintf
Definition: port.h:260
#define qsort(a, b, c, d)
Definition: port.h:499
#define printf(...)
Definition: port.h:266
static Datum Int64GetDatum(int64 X)
Definition: postgres.h:403
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:393
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_function_rtes(PlannerInfo *root)
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:187
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:647
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:589
void pull_up_subqueries(PlannerInfo *root)
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
Query * preprocess_relation_rtes(PlannerInfo *root)
Definition: prepjointree.c:417
void reduce_outer_joins(PlannerInfo *root)
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
char * c
e
Definition: preproc-init.c:82
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:64
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:97
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5205
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5092
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:529
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:108
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1581
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:206
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
@ ForwardScanDirection
Definition: sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3771
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:4521
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:288
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3692
void check_stack_depth(void)
Definition: stack_depth.c:95
List * aggrefs
Definition: pathnodes.h:3642
List * aggdistinct
Definition: primnodes.h:493
List * args
Definition: primnodes.h:487
Expr * aggfilter
Definition: primnodes.h:496
List * aggorder
Definition: primnodes.h:490
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
Cardinality limit_tuples
Definition: pathnodes.h:3582
Node * quals
Definition: primnodes.h:2358
List * fromlist
Definition: primnodes.h:2357
int num_workers
Definition: plannodes.h:1339
bool invisible
Definition: plannodes.h:1345
bool single_copy
Definition: plannodes.h:1343
Plan plan
Definition: plannodes.h:1337
int rescan_param
Definition: plannodes.h:1341
PartitionwiseAggregateType patype
Definition: pathnodes.h:3566
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3560
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3559
Cardinality numGroups
Definition: pathnodes.h:2503
Path path
Definition: pathnodes.h:1957
Definition: pg_list.h:54
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1780
List * pathkeys
Definition: pathnodes.h:1913
Cardinality rows
Definition: pathnodes.h:1907
int disabled_nodes
Definition: pathnodes.h:1908
Cost total_cost
Definition: pathnodes.h:1910
LockClauseStrength strength
Definition: plannodes.h:1600
Index prti
Definition: plannodes.h:1592
RowMarkType markType
Definition: plannodes.h:1596
LockWaitPolicy waitPolicy
Definition: plannodes.h:1602
bool isParent
Definition: plannodes.h:1604
Index rowmarkId
Definition: plannodes.h:1594
int allMarkTypes
Definition: plannodes.h:1598
struct Plan * lefttree
Definition: plannodes.h:233
Cost total_cost
Definition: plannodes.h:199
struct Plan * righttree
Definition: plannodes.h:234
bool parallel_aware
Definition: plannodes.h:213
Cost startup_cost
Definition: plannodes.h:197
List * qual
Definition: plannodes.h:231
int plan_width
Definition: plannodes.h:207
bool parallel_safe
Definition: plannodes.h:215
Cardinality plan_rows
Definition: plannodes.h:205
List * targetlist
Definition: plannodes.h:229
List * initPlan
Definition: plannodes.h:236
struct Plan * planTree
Definition: plannodes.h:101
bool hasModifyingCTE
Definition: plannodes.h:83
List * appendRelations
Definition: plannodes.h:127
List * permInfos
Definition: plannodes.h:120
bool canSetTag
Definition: plannodes.h:86
List * rowMarks
Definition: plannodes.h:138
int64 planId
Definition: plannodes.h:74
int jitFlags
Definition: plannodes.h:98
Bitmapset * rewindPlanIDs
Definition: plannodes.h:135
int64 queryId
Definition: plannodes.h:71
ParseLoc stmt_len
Definition: plannodes.h:165
PlannedStmtOrigin planOrigin
Definition: plannodes.h:77
bool hasReturning
Definition: plannodes.h:80
ParseLoc stmt_location
Definition: plannodes.h:163
List * invalItems
Definition: plannodes.h:144
bool transientPlan
Definition: plannodes.h:89
List * resultRelations
Definition: plannodes.h:124
List * subplans
Definition: plannodes.h:132
List * relationOids
Definition: plannodes.h:141
bool dependsOnRole
Definition: plannodes.h:92
Bitmapset * unprunableRelids
Definition: plannodes.h:115
CmdType commandType
Definition: plannodes.h:68
Node * utilityStmt
Definition: plannodes.h:150
List * rtable
Definition: plannodes.h:109
List * partPruneInfos
Definition: plannodes.h:106
List * paramExecTypes
Definition: plannodes.h:147
bool parallelModeNeeded
Definition: plannodes.h:95
Bitmapset * prunableRelids
Definition: pathnodes.h:133
int lastPlanNodeId
Definition: pathnodes.h:166
char maxParallelHazard
Definition: pathnodes.h:181
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:172
Bitmapset * allRelids
Definition: pathnodes.h:126
List * appendRelations
Definition: pathnodes.h:145
List * finalrowmarks
Definition: pathnodes.h:139
List * invalItems
Definition: pathnodes.h:154
List * relationOids
Definition: pathnodes.h:151
List * paramExecTypes
Definition: pathnodes.h:157
bool parallelModeOK
Definition: pathnodes.h:175
bool transientPlan
Definition: pathnodes.h:169
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:117
List * finalrteperminfos
Definition: pathnodes.h:136
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:160
Index lastRowMarkId
Definition: pathnodes.h:163
List * resultRelations
Definition: pathnodes.h:142
List * partPruneInfos
Definition: pathnodes.h:148
List * finalrtable
Definition: pathnodes.h:120
bool parallelModeNeeded
Definition: pathnodes.h:178
Index query_level
Definition: pathnodes.h:233
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:175
CmdType commandType
Definition: parsenodes.h:121
TableFunc * tablefunc
Definition: parsenodes.h:1242
struct TableSampleClause * tablesample
Definition: parsenodes.h:1156
Query * subquery
Definition: parsenodes.h:1162
List * values_lists
Definition: parsenodes.h:1248
JoinType jointype
Definition: parsenodes.h:1209
List * functions
Definition: parsenodes.h:1235
RTEKind rtekind
Definition: parsenodes.h:1105
List * ppilist
Definition: pathnodes.h:955
bool useridiscurrent
Definition: pathnodes.h:1019
Relids relids
Definition: pathnodes.h:927
struct PathTarget * reltarget
Definition: pathnodes.h:949
Index relid
Definition: pathnodes.h:973
List * unique_pathkeys
Definition: pathnodes.h:1038
Cardinality tuples
Definition: pathnodes.h:1000
bool consider_parallel
Definition: pathnodes.h:943
BlockNumber pages
Definition: pathnodes.h:999
List * cheapest_parameterized_paths
Definition: pathnodes.h:959
List * pathlist
Definition: pathnodes.h:954
RelOptKind reloptkind
Definition: pathnodes.h:921
List * indexlist
Definition: pathnodes.h:995
struct Path * cheapest_startup_path
Definition: pathnodes.h:957
struct Path * cheapest_total_path
Definition: pathnodes.h:958
List * unique_groupclause
Definition: pathnodes.h:1040
struct RelOptInfo * grouped_rel
Definition: pathnodes.h:1068
Oid serverid
Definition: pathnodes.h:1015
Bitmapset * live_parts
Definition: pathnodes.h:1108
int rel_parallel_workers
Definition: pathnodes.h:1007
List * partial_pathlist
Definition: pathnodes.h:956
struct RelOptInfo * unique_rel
Definition: pathnodes.h:1036
Cardinality rows
Definition: pathnodes.h:933
Form_pg_class rd_rel
Definition: rel.h:111
Cardinality numGroups
Definition: pathnodes.h:2514
List * groupClause
Definition: pathnodes.h:2511
List * gsets_data
Definition: pathnodes.h:2513
bool hashable
Definition: pathnodes.h:2515
List * gsets
Definition: pathnodes.h:2512
bool is_hashed
Definition: pathnodes.h:2516
LockClauseStrength strength
Definition: parsenodes.h:1638
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1639
Index tleSortGroupRef
Definition: parsenodes.h:1496
List * semi_rhs_exprs
Definition: pathnodes.h:3132
JoinType jointype
Definition: pathnodes.h:3121
Relids syn_righthand
Definition: pathnodes.h:3120
List * semi_operators
Definition: pathnodes.h:3131
Expr * expr
Definition: primnodes.h:2239
AttrNumber resno
Definition: primnodes.h:2241
Index ressortgroupref
Definition: primnodes.h:2245
Definition: primnodes.h:262
WindowClause * wc
Definition: planner.c:123
Node * startOffset
Definition: parsenodes.h:1605
List * partitionClause
Definition: parsenodes.h:1601
Node * endOffset
Definition: parsenodes.h:1606
List * orderClause
Definition: parsenodes.h:1603
List ** windowFuncs
Definition: clauses.h:23
Index maxWinRef
Definition: clauses.h:22
int numWindowFuncs
Definition: clauses.h:21
Index winref
Definition: primnodes.h:611
Oid winfnoid
Definition: primnodes.h:597
int * tleref_to_colnum_map
Definition: planner.c:114
Bitmapset * unhashable_refs
Definition: planner.c:112
List * unsortable_sets
Definition: planner.c:113
List * hash_sets_idx
Definition: planner.c:108
double dNumHashGroups
Definition: planner.c:109
Bitmapset * unsortable_refs
Definition: planner.c:111
Definition: type.h:96
List * activeWindows
Definition: planner.c:131
grouping_sets_data * gset_data
Definition: planner.c:132
SetOperationStmt * setop
Definition: planner.c:133
Definition: regguts.h:323
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:2062
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:883
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2220
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:2007
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2404
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2348
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2284
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
void split_pathtarget_at_srfs_grouping(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:868
TargetEntry * tlist_member(Expr *node, List *targetlist)
Definition: tlist.c:88
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:227
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:452
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:431
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:549
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:633
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:666
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:761
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:690
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:401
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:845
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:569
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:704
#define create_pathtarget(root, tlist)
Definition: tlist.h:58
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:968
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:114
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:789
const char * name