PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
planner.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * planner.c
4 * The query optimizer external interface.
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/plan/planner.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/genam.h"
22#include "access/parallel.h"
23#include "access/sysattr.h"
24#include "access/table.h"
26#include "catalog/pg_inherits.h"
27#include "catalog/pg_proc.h"
28#include "catalog/pg_type.h"
29#include "executor/executor.h"
30#include "foreign/fdwapi.h"
31#include "jit/jit.h"
32#include "lib/bipartite_match.h"
33#include "lib/knapsack.h"
34#include "miscadmin.h"
35#include "nodes/makefuncs.h"
36#include "nodes/nodeFuncs.h"
37#ifdef OPTIMIZER_DEBUG
38#include "nodes/print.h"
39#endif
40#include "nodes/supportnodes.h"
42#include "optimizer/clauses.h"
43#include "optimizer/cost.h"
44#include "optimizer/optimizer.h"
46#include "optimizer/pathnode.h"
47#include "optimizer/paths.h"
48#include "optimizer/plancat.h"
49#include "optimizer/planmain.h"
50#include "optimizer/planner.h"
51#include "optimizer/prep.h"
52#include "optimizer/subselect.h"
53#include "optimizer/tlist.h"
54#include "parser/analyze.h"
55#include "parser/parse_agg.h"
56#include "parser/parse_clause.h"
58#include "parser/parsetree.h"
61#include "utils/lsyscache.h"
62#include "utils/rel.h"
63#include "utils/selfuncs.h"
64
65/* GUC parameters */
70
71/* Hook for plugins to get control in planner() */
73
74/* Hook for plugins to get control when grouping_planner() plans upper rels */
76
77
78/* Expression kind codes for preprocess_expression */
79#define EXPRKIND_QUAL 0
80#define EXPRKIND_TARGET 1
81#define EXPRKIND_RTFUNC 2
82#define EXPRKIND_RTFUNC_LATERAL 3
83#define EXPRKIND_VALUES 4
84#define EXPRKIND_VALUES_LATERAL 5
85#define EXPRKIND_LIMIT 6
86#define EXPRKIND_APPINFO 7
87#define EXPRKIND_PHV 8
88#define EXPRKIND_TABLESAMPLE 9
89#define EXPRKIND_ARBITER_ELEM 10
90#define EXPRKIND_TABLEFUNC 11
91#define EXPRKIND_TABLEFUNC_LATERAL 12
92#define EXPRKIND_GROUPEXPR 13
93
94/*
95 * Data specific to grouping sets
96 */
97typedef struct
98{
108
109/*
110 * Temporary structure for use during WindowClause reordering in order to be
111 * able to sort WindowClauses on partitioning/ordering prefix.
112 */
113typedef struct
114{
116 List *uniqueOrder; /* A List of unique ordering/partitioning
117 * clauses per Window */
119
120/* Passthrough data for standard_qp_callback */
121typedef struct
122{
123 List *activeWindows; /* active windows, if any */
124 grouping_sets_data *gset_data; /* grouping sets data, if any */
125 SetOperationStmt *setop; /* parent set operation or NULL if not a
126 * subquery belonging to a set operation */
128
129/* Local functions */
130static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
131static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
132static void grouping_planner(PlannerInfo *root, double tuple_fraction,
133 SetOperationStmt *setops);
135static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
136 int *tleref_to_colnum_map);
138static double preprocess_limit(PlannerInfo *root,
139 double tuple_fraction,
140 int64 *offset_est, int64 *count_est);
142static List *extract_rollup_sets(List *groupingSets);
143static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
144static void standard_qp_callback(PlannerInfo *root, void *extra);
146 double path_rows,
148 List *target_list);
150 RelOptInfo *input_rel,
151 PathTarget *target,
152 bool target_parallel_safe,
156 RelOptInfo *input_rel,
157 RelOptInfo *grouped_rel);
159 PathTarget *target, bool target_parallel_safe,
160 Node *havingQual);
162 RelOptInfo *input_rel,
163 RelOptInfo *grouped_rel,
164 const AggClauseCosts *agg_costs,
166 GroupPathExtraData *extra,
167 RelOptInfo **partially_grouped_rel_p);
169 RelOptInfo *grouped_rel,
170 Path *path,
171 bool is_sorted,
172 bool can_hash,
174 const AggClauseCosts *agg_costs,
175 double dNumGroups);
177 RelOptInfo *input_rel,
178 PathTarget *input_target,
179 PathTarget *output_target,
180 bool output_target_parallel_safe,
181 WindowFuncLists *wflists,
182 List *activeWindows);
184 RelOptInfo *window_rel,
185 Path *path,
186 PathTarget *input_target,
187 PathTarget *output_target,
188 WindowFuncLists *wflists,
189 List *activeWindows);
191 RelOptInfo *input_rel,
192 PathTarget *target);
194 RelOptInfo *input_rel,
195 RelOptInfo *final_distinct_rel,
196 PathTarget *target);
198 RelOptInfo *input_rel,
199 RelOptInfo *distinct_rel);
201 List *needed_pathkeys,
202 List *path_pathkeys);
204 RelOptInfo *input_rel,
205 PathTarget *target,
206 bool target_parallel_safe,
207 double limit_tuples);
209 PathTarget *final_target);
211 PathTarget *grouping_target,
212 Node *havingQual);
213static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
215 WindowFuncLists *wflists);
218 PathTarget *final_target,
219 List *activeWindows);
221 List *tlist);
223 PathTarget *final_target,
224 bool *have_postponed_srfs);
226 List *targets, List *targets_contain_srfs);
228 RelOptInfo *grouped_rel,
229 RelOptInfo *partially_grouped_rel,
230 const AggClauseCosts *agg_costs,
232 double dNumGroups,
233 GroupPathExtraData *extra);
235 RelOptInfo *grouped_rel,
236 RelOptInfo *input_rel,
238 GroupPathExtraData *extra,
239 bool force_rel_creation);
241 RelOptInfo *rel,
242 Path *path,
243 Path *cheapest_path,
244 List *pathkeys,
245 double limit_tuples);
247static bool can_partial_agg(PlannerInfo *root);
249 RelOptInfo *rel,
250 List *scanjoin_targets,
251 List *scanjoin_targets_contain_srfs,
252 bool scanjoin_target_parallel_safe,
253 bool tlist_same_exprs);
255 RelOptInfo *input_rel,
256 RelOptInfo *grouped_rel,
257 RelOptInfo *partially_grouped_rel,
258 const AggClauseCosts *agg_costs,
261 GroupPathExtraData *extra);
262static bool group_by_has_partkey(RelOptInfo *input_rel,
263 List *targetList,
264 List *groupClause);
265static int common_prefix_cmp(const void *a, const void *b);
267 List *targetlist);
268
269
270/*****************************************************************************
271 *
272 * Query optimizer entry point
273 *
274 * To support loadable plugins that monitor or modify planner behavior,
275 * we provide a hook variable that lets a plugin get control before and
276 * after the standard planning process. The plugin would normally call
277 * standard_planner().
278 *
279 * Note to plugin authors: standard_planner() scribbles on its Query input,
280 * so you'd better copy that data structure if you want to plan more than once.
281 *
282 *****************************************************************************/
284planner(Query *parse, const char *query_string, int cursorOptions,
285 ParamListInfo boundParams)
286{
287 PlannedStmt *result;
288
289 if (planner_hook)
290 result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
291 else
292 result = standard_planner(parse, query_string, cursorOptions, boundParams);
293 return result;
294}
295
297standard_planner(Query *parse, const char *query_string, int cursorOptions,
298 ParamListInfo boundParams)
299{
300 PlannedStmt *result;
301 PlannerGlobal *glob;
302 double tuple_fraction;
304 RelOptInfo *final_rel;
305 Path *best_path;
306 Plan *top_plan;
307 ListCell *lp,
308 *lr;
309
310 /*
311 * Set up global state for this planner invocation. This data is needed
312 * across all levels of sub-Query that might exist in the given command,
313 * so we keep it in a separate struct that's linked to by each per-Query
314 * PlannerInfo.
315 */
316 glob = makeNode(PlannerGlobal);
317
318 glob->boundParams = boundParams;
319 glob->subplans = NIL;
320 glob->subpaths = NIL;
321 glob->subroots = NIL;
322 glob->rewindPlanIDs = NULL;
323 glob->finalrtable = NIL;
324 glob->finalrteperminfos = NIL;
325 glob->finalrowmarks = NIL;
326 glob->resultRelations = NIL;
327 glob->appendRelations = NIL;
328 glob->relationOids = NIL;
329 glob->invalItems = NIL;
330 glob->paramExecTypes = NIL;
331 glob->lastPHId = 0;
332 glob->lastRowMarkId = 0;
333 glob->lastPlanNodeId = 0;
334 glob->transientPlan = false;
335 glob->dependsOnRole = false;
336
337 /*
338 * Assess whether it's feasible to use parallel mode for this query. We
339 * can't do this in a standalone backend, or if the command will try to
340 * modify any data, or if this is a cursor operation, or if GUCs are set
341 * to values that don't permit parallelism, or if parallel-unsafe
342 * functions are present in the query tree.
343 *
344 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
345 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
346 * the command is writing into a completely new table which workers won't
347 * be able to see. If the workers could see the table, the fact that
348 * group locking would cause them to ignore the leader's heavyweight GIN
349 * page locks would make this unsafe. We'll have to fix that somehow if
350 * we want to allow parallel inserts in general; updates and deletes have
351 * additional problems especially around combo CIDs.)
352 *
353 * For now, we don't try to use parallel mode if we're running inside a
354 * parallel worker. We might eventually be able to relax this
355 * restriction, but for now it seems best not to have parallel workers
356 * trying to create their own parallel workers.
357 */
358 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
360 parse->commandType == CMD_SELECT &&
361 !parse->hasModifyingCTE &&
364 {
365 /* all the cheap tests pass, so scan the query tree */
367 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
368 }
369 else
370 {
371 /* skip the query tree scan, just assume it's unsafe */
372 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
373 glob->parallelModeOK = false;
374 }
375
376 /*
377 * glob->parallelModeNeeded is normally set to false here and changed to
378 * true during plan creation if a Gather or Gather Merge plan is actually
379 * created (cf. create_gather_plan, create_gather_merge_plan).
380 *
381 * However, if debug_parallel_query = on or debug_parallel_query =
382 * regress, then we impose parallel mode whenever it's safe to do so, even
383 * if the final plan doesn't use parallelism. It's not safe to do so if
384 * the query contains anything parallel-unsafe; parallelModeOK will be
385 * false in that case. Note that parallelModeOK can't change after this
386 * point. Otherwise, everything in the query is either parallel-safe or
387 * parallel-restricted, and in either case it should be OK to impose
388 * parallel-mode restrictions. If that ends up breaking something, then
389 * either some function the user included in the query is incorrectly
390 * labeled as parallel-safe or parallel-restricted when in reality it's
391 * parallel-unsafe, or else the query planner itself has a bug.
392 */
393 glob->parallelModeNeeded = glob->parallelModeOK &&
395
396 /* Determine what fraction of the plan is likely to be scanned */
397 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
398 {
399 /*
400 * We have no real idea how many tuples the user will ultimately FETCH
401 * from a cursor, but it is often the case that he doesn't want 'em
402 * all, or would prefer a fast-start plan anyway so that he can
403 * process some of the tuples sooner. Use a GUC parameter to decide
404 * what fraction to optimize for.
405 */
406 tuple_fraction = cursor_tuple_fraction;
407
408 /*
409 * We document cursor_tuple_fraction as simply being a fraction, which
410 * means the edge cases 0 and 1 have to be treated specially here. We
411 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
412 */
413 if (tuple_fraction >= 1.0)
414 tuple_fraction = 0.0;
415 else if (tuple_fraction <= 0.0)
416 tuple_fraction = 1e-10;
417 }
418 else
419 {
420 /* Default assumption is we need all the tuples */
421 tuple_fraction = 0.0;
422 }
423
424 /* primary planning entry point (may recurse for subqueries) */
425 root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
426
427 /* Select best Path and turn it into a Plan */
428 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
429 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
430
431 top_plan = create_plan(root, best_path);
432
433 /*
434 * If creating a plan for a scrollable cursor, make sure it can run
435 * backwards on demand. Add a Material node at the top at need.
436 */
437 if (cursorOptions & CURSOR_OPT_SCROLL)
438 {
439 if (!ExecSupportsBackwardScan(top_plan))
440 top_plan = materialize_finished_plan(top_plan);
441 }
442
443 /*
444 * Optionally add a Gather node for testing purposes, provided this is
445 * actually a safe thing to do.
446 *
447 * We can add Gather even when top_plan has parallel-safe initPlans, but
448 * then we have to move the initPlans to the Gather node because of
449 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
450 * regression tests when debug_parallel_query = regress, because initPlans
451 * that would normally appear on the top_plan move to the Gather, causing
452 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
453 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
454 */
456 top_plan->parallel_safe &&
457 (top_plan->initPlan == NIL ||
459 {
460 Gather *gather = makeNode(Gather);
461 Cost initplan_cost;
462 bool unsafe_initplans;
463
464 gather->plan.targetlist = top_plan->targetlist;
465 gather->plan.qual = NIL;
466 gather->plan.lefttree = top_plan;
467 gather->plan.righttree = NULL;
468 gather->num_workers = 1;
469 gather->single_copy = true;
471
472 /* Transfer any initPlans to the new top node */
473 gather->plan.initPlan = top_plan->initPlan;
474 top_plan->initPlan = NIL;
475
476 /*
477 * Since this Gather has no parallel-aware descendants to signal to,
478 * we don't need a rescan Param.
479 */
480 gather->rescan_param = -1;
481
482 /*
483 * Ideally we'd use cost_gather here, but setting up dummy path data
484 * to satisfy it doesn't seem much cleaner than knowing what it does.
485 */
486 gather->plan.startup_cost = top_plan->startup_cost +
488 gather->plan.total_cost = top_plan->total_cost +
490 gather->plan.plan_rows = top_plan->plan_rows;
491 gather->plan.plan_width = top_plan->plan_width;
492 gather->plan.parallel_aware = false;
493 gather->plan.parallel_safe = false;
494
495 /*
496 * Delete the initplans' cost from top_plan. We needn't add it to the
497 * Gather node, since the above coding already included it there.
498 */
500 &initplan_cost, &unsafe_initplans);
501 top_plan->startup_cost -= initplan_cost;
502 top_plan->total_cost -= initplan_cost;
503
504 /* use parallel mode for parallel plans. */
505 root->glob->parallelModeNeeded = true;
506
507 top_plan = &gather->plan;
508 }
509
510 /*
511 * If any Params were generated, run through the plan tree and compute
512 * each plan node's extParam/allParam sets. Ideally we'd merge this into
513 * set_plan_references' tree traversal, but for now it has to be separate
514 * because we need to visit subplans before not after main plan.
515 */
516 if (glob->paramExecTypes != NIL)
517 {
518 Assert(list_length(glob->subplans) == list_length(glob->subroots));
519 forboth(lp, glob->subplans, lr, glob->subroots)
520 {
521 Plan *subplan = (Plan *) lfirst(lp);
522 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
523
524 SS_finalize_plan(subroot, subplan);
525 }
526 SS_finalize_plan(root, top_plan);
527 }
528
529 /* final cleanup of the plan */
530 Assert(glob->finalrtable == NIL);
531 Assert(glob->finalrteperminfos == NIL);
532 Assert(glob->finalrowmarks == NIL);
533 Assert(glob->resultRelations == NIL);
534 Assert(glob->appendRelations == NIL);
535 top_plan = set_plan_references(root, top_plan);
536 /* ... and the subplans (both regular subplans and initplans) */
537 Assert(list_length(glob->subplans) == list_length(glob->subroots));
538 forboth(lp, glob->subplans, lr, glob->subroots)
539 {
540 Plan *subplan = (Plan *) lfirst(lp);
541 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
542
543 lfirst(lp) = set_plan_references(subroot, subplan);
544 }
545
546 /* build the PlannedStmt result */
547 result = makeNode(PlannedStmt);
548
549 result->commandType = parse->commandType;
550 result->queryId = parse->queryId;
551 result->hasReturning = (parse->returningList != NIL);
552 result->hasModifyingCTE = parse->hasModifyingCTE;
553 result->canSetTag = parse->canSetTag;
554 result->transientPlan = glob->transientPlan;
555 result->dependsOnRole = glob->dependsOnRole;
557 result->planTree = top_plan;
558 result->rtable = glob->finalrtable;
559 result->permInfos = glob->finalrteperminfos;
560 result->resultRelations = glob->resultRelations;
561 result->appendRelations = glob->appendRelations;
562 result->subplans = glob->subplans;
563 result->rewindPlanIDs = glob->rewindPlanIDs;
564 result->rowMarks = glob->finalrowmarks;
565 result->relationOids = glob->relationOids;
566 result->invalItems = glob->invalItems;
567 result->paramExecTypes = glob->paramExecTypes;
568 /* utilityStmt should be null, but we might as well copy it */
569 result->utilityStmt = parse->utilityStmt;
570 result->stmt_location = parse->stmt_location;
571 result->stmt_len = parse->stmt_len;
572
573 result->jitFlags = PGJIT_NONE;
574 if (jit_enabled && jit_above_cost >= 0 &&
575 top_plan->total_cost > jit_above_cost)
576 {
577 result->jitFlags |= PGJIT_PERFORM;
578
579 /*
580 * Decide how much effort should be put into generating better code.
581 */
582 if (jit_optimize_above_cost >= 0 &&
584 result->jitFlags |= PGJIT_OPT3;
585 if (jit_inline_above_cost >= 0 &&
587 result->jitFlags |= PGJIT_INLINE;
588
589 /*
590 * Decide which operations should be JITed.
591 */
592 if (jit_expressions)
593 result->jitFlags |= PGJIT_EXPR;
595 result->jitFlags |= PGJIT_DEFORM;
596 }
597
598 if (glob->partition_directory != NULL)
599 DestroyPartitionDirectory(glob->partition_directory);
600
601 return result;
602}
603
604
605/*--------------------
606 * subquery_planner
607 * Invokes the planner on a subquery. We recurse to here for each
608 * sub-SELECT found in the query tree.
609 *
610 * glob is the global state for the current planner run.
611 * parse is the querytree produced by the parser & rewriter.
612 * parent_root is the immediate parent Query's info (NULL at the top level).
613 * hasRecursion is true if this is a recursive WITH query.
614 * tuple_fraction is the fraction of tuples we expect will be retrieved.
615 * tuple_fraction is interpreted as explained for grouping_planner, below.
616 * setops is used for set operation subqueries to provide the subquery with
617 * the context in which it's being used so that Paths correctly sorted for the
618 * set operation can be generated. NULL when not planning a set operation
619 * child, or when a child of a set op that isn't interested in sorted input.
620 *
621 * Basically, this routine does the stuff that should only be done once
622 * per Query object. It then calls grouping_planner. At one time,
623 * grouping_planner could be invoked recursively on the same Query object;
624 * that's not currently true, but we keep the separation between the two
625 * routines anyway, in case we need it again someday.
626 *
627 * subquery_planner will be called recursively to handle sub-Query nodes
628 * found within the query's expressions and rangetable.
629 *
630 * Returns the PlannerInfo struct ("root") that contains all data generated
631 * while planning the subquery. In particular, the Path(s) attached to
632 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
633 * cheapest way(s) to implement the query. The top level will select the
634 * best Path and pass it through createplan.c to produce a finished Plan.
635 *--------------------
636 */
639 bool hasRecursion, double tuple_fraction,
640 SetOperationStmt *setops)
641{
643 List *newWithCheckOptions;
644 List *newHaving;
645 bool hasOuterJoins;
646 bool hasResultRTEs;
647 RelOptInfo *final_rel;
648 ListCell *l;
649
650 /* Create a PlannerInfo data structure for this subquery */
652 root->parse = parse;
653 root->glob = glob;
654 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
655 root->parent_root = parent_root;
656 root->plan_params = NIL;
657 root->outer_params = NULL;
658 root->planner_cxt = CurrentMemoryContext;
659 root->init_plans = NIL;
660 root->cte_plan_ids = NIL;
661 root->multiexpr_params = NIL;
662 root->join_domains = NIL;
663 root->eq_classes = NIL;
664 root->ec_merging_done = false;
665 root->last_rinfo_serial = 0;
666 root->all_result_relids =
667 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
668 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
669 root->append_rel_list = NIL;
670 root->row_identity_vars = NIL;
671 root->rowMarks = NIL;
672 memset(root->upper_rels, 0, sizeof(root->upper_rels));
673 memset(root->upper_targets, 0, sizeof(root->upper_targets));
674 root->processed_groupClause = NIL;
675 root->processed_distinctClause = NIL;
676 root->processed_tlist = NIL;
677 root->update_colnos = NIL;
678 root->grouping_map = NULL;
679 root->minmax_aggs = NIL;
680 root->qual_security_level = 0;
681 root->hasPseudoConstantQuals = false;
682 root->hasAlternativeSubPlans = false;
683 root->placeholdersFrozen = false;
684 root->hasRecursion = hasRecursion;
685 if (hasRecursion)
686 root->wt_param_id = assign_special_exec_param(root);
687 else
688 root->wt_param_id = -1;
689 root->non_recursive_path = NULL;
690 root->partColsUpdated = false;
691
692 /*
693 * Create the top-level join domain. This won't have valid contents until
694 * deconstruct_jointree fills it in, but the node needs to exist before
695 * that so we can build EquivalenceClasses referencing it.
696 */
697 root->join_domains = list_make1(makeNode(JoinDomain));
698
699 /*
700 * If there is a WITH list, process each WITH query and either convert it
701 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
702 */
703 if (parse->cteList)
705
706 /*
707 * If it's a MERGE command, transform the joinlist as appropriate.
708 */
710
711 /*
712 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
713 * that we don't need so many special cases to deal with that situation.
714 */
716
717 /*
718 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
719 * to transform them into joins. Note that this step does not descend
720 * into subqueries; if we pull up any subqueries below, their SubLinks are
721 * processed just before pulling them up.
722 */
723 if (parse->hasSubLinks)
725
726 /*
727 * Scan the rangetable for function RTEs, do const-simplification on them,
728 * and then inline them if possible (producing subqueries that might get
729 * pulled up next). Recursion issues here are handled in the same way as
730 * for SubLinks.
731 */
733
734 /*
735 * Check to see if any subqueries in the jointree can be merged into this
736 * query.
737 */
739
740 /*
741 * If this is a simple UNION ALL query, flatten it into an appendrel. We
742 * do this now because it requires applying pull_up_subqueries to the leaf
743 * queries of the UNION ALL, which weren't touched above because they
744 * weren't referenced by the jointree (they will be after we do this).
745 */
746 if (parse->setOperations)
748
749 /*
750 * Survey the rangetable to see what kinds of entries are present. We can
751 * skip some later processing if relevant SQL features are not used; for
752 * example if there are no JOIN RTEs we can avoid the expense of doing
753 * flatten_join_alias_vars(). This must be done after we have finished
754 * adding rangetable entries, of course. (Note: actually, processing of
755 * inherited or partitioned rels can cause RTEs for their child tables to
756 * get added later; but those must all be RTE_RELATION entries, so they
757 * don't invalidate the conclusions drawn here.)
758 */
759 root->hasJoinRTEs = false;
760 root->hasLateralRTEs = false;
761 root->group_rtindex = 0;
762 hasOuterJoins = false;
763 hasResultRTEs = false;
764 foreach(l, parse->rtable)
765 {
767
768 switch (rte->rtekind)
769 {
770 case RTE_RELATION:
771 if (rte->inh)
772 {
773 /*
774 * Check to see if the relation actually has any children;
775 * if not, clear the inh flag so we can treat it as a
776 * plain base relation.
777 *
778 * Note: this could give a false-positive result, if the
779 * rel once had children but no longer does. We used to
780 * be able to clear rte->inh later on when we discovered
781 * that, but no more; we have to handle such cases as
782 * full-fledged inheritance.
783 */
784 rte->inh = has_subclass(rte->relid);
785 }
786 break;
787 case RTE_JOIN:
788 root->hasJoinRTEs = true;
789 if (IS_OUTER_JOIN(rte->jointype))
790 hasOuterJoins = true;
791 break;
792 case RTE_RESULT:
793 hasResultRTEs = true;
794 break;
795 case RTE_GROUP:
796 Assert(parse->hasGroupRTE);
797 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
798 break;
799 default:
800 /* No work here for other RTE types */
801 break;
802 }
803
804 if (rte->lateral)
805 root->hasLateralRTEs = true;
806
807 /*
808 * We can also determine the maximum security level required for any
809 * securityQuals now. Addition of inheritance-child RTEs won't affect
810 * this, because child tables don't have their own securityQuals; see
811 * expand_single_inheritance_child().
812 */
813 if (rte->securityQuals)
814 root->qual_security_level = Max(root->qual_security_level,
815 list_length(rte->securityQuals));
816 }
817
818 /*
819 * If we have now verified that the query target relation is
820 * non-inheriting, mark it as a leaf target.
821 */
822 if (parse->resultRelation)
823 {
824 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
825
826 if (!rte->inh)
827 root->leaf_result_relids =
828 bms_make_singleton(parse->resultRelation);
829 }
830
831 /*
832 * Preprocess RowMark information. We need to do this after subquery
833 * pullup, so that all base relations are present.
834 */
836
837 /*
838 * Set hasHavingQual to remember if HAVING clause is present. Needed
839 * because preprocess_expression will reduce a constant-true condition to
840 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
841 */
842 root->hasHavingQual = (parse->havingQual != NULL);
843
844 /*
845 * Do expression preprocessing on targetlist and quals, as well as other
846 * random expressions in the querytree. Note that we do not need to
847 * handle sort/group expressions explicitly, because they are actually
848 * part of the targetlist.
849 */
850 parse->targetList = (List *)
851 preprocess_expression(root, (Node *) parse->targetList,
853
854 newWithCheckOptions = NIL;
855 foreach(l, parse->withCheckOptions)
856 {
858
859 wco->qual = preprocess_expression(root, wco->qual,
861 if (wco->qual != NULL)
862 newWithCheckOptions = lappend(newWithCheckOptions, wco);
863 }
864 parse->withCheckOptions = newWithCheckOptions;
865
866 parse->returningList = (List *)
867 preprocess_expression(root, (Node *) parse->returningList,
869
871
872 parse->havingQual = preprocess_expression(root, parse->havingQual,
874
875 foreach(l, parse->windowClause)
876 {
878
879 /* partitionClause/orderClause are sort/group expressions */
884 }
885
886 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
888 parse->limitCount = preprocess_expression(root, parse->limitCount,
890
891 if (parse->onConflict)
892 {
893 parse->onConflict->arbiterElems = (List *)
895 (Node *) parse->onConflict->arbiterElems,
897 parse->onConflict->arbiterWhere =
899 parse->onConflict->arbiterWhere,
901 parse->onConflict->onConflictSet = (List *)
903 (Node *) parse->onConflict->onConflictSet,
905 parse->onConflict->onConflictWhere =
907 parse->onConflict->onConflictWhere,
909 /* exclRelTlist contains only Vars, so no preprocessing needed */
910 }
911
912 foreach(l, parse->mergeActionList)
913 {
915
916 action->targetList = (List *)
918 (Node *) action->targetList,
920 action->qual =
922 (Node *) action->qual,
924 }
925
926 parse->mergeJoinCondition =
927 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
928
929 root->append_rel_list = (List *)
930 preprocess_expression(root, (Node *) root->append_rel_list,
932
933 /* Also need to preprocess expressions within RTEs */
934 foreach(l, parse->rtable)
935 {
937 int kind;
938 ListCell *lcsq;
939
940 if (rte->rtekind == RTE_RELATION)
941 {
942 if (rte->tablesample)
945 (Node *) rte->tablesample,
947 }
948 else if (rte->rtekind == RTE_SUBQUERY)
949 {
950 /*
951 * We don't want to do all preprocessing yet on the subquery's
952 * expressions, since that will happen when we plan it. But if it
953 * contains any join aliases of our level, those have to get
954 * expanded now, because planning of the subquery won't do it.
955 * That's only possible if the subquery is LATERAL.
956 */
957 if (rte->lateral && root->hasJoinRTEs)
958 rte->subquery = (Query *)
960 (Node *) rte->subquery);
961 }
962 else if (rte->rtekind == RTE_FUNCTION)
963 {
964 /* Preprocess the function expression(s) fully */
965 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
966 rte->functions = (List *)
967 preprocess_expression(root, (Node *) rte->functions, kind);
968 }
969 else if (rte->rtekind == RTE_TABLEFUNC)
970 {
971 /* Preprocess the function expression(s) fully */
972 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
973 rte->tablefunc = (TableFunc *)
974 preprocess_expression(root, (Node *) rte->tablefunc, kind);
975 }
976 else if (rte->rtekind == RTE_VALUES)
977 {
978 /* Preprocess the values lists fully */
979 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
980 rte->values_lists = (List *)
982 }
983 else if (rte->rtekind == RTE_GROUP)
984 {
985 /* Preprocess the groupexprs list fully */
986 rte->groupexprs = (List *)
987 preprocess_expression(root, (Node *) rte->groupexprs,
989 }
990
991 /*
992 * Process each element of the securityQuals list as if it were a
993 * separate qual expression (as indeed it is). We need to do it this
994 * way to get proper canonicalization of AND/OR structure. Note that
995 * this converts each element into an implicit-AND sublist.
996 */
997 foreach(lcsq, rte->securityQuals)
998 {
1000 (Node *) lfirst(lcsq),
1002 }
1003 }
1004
1005 /*
1006 * Now that we are done preprocessing expressions, and in particular done
1007 * flattening join alias variables, get rid of the joinaliasvars lists.
1008 * They no longer match what expressions in the rest of the tree look
1009 * like, because we have not preprocessed expressions in those lists (and
1010 * do not want to; for example, expanding a SubLink there would result in
1011 * a useless unreferenced subplan). Leaving them in place simply creates
1012 * a hazard for later scans of the tree. We could try to prevent that by
1013 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1014 * but that doesn't sound very reliable.
1015 */
1016 if (root->hasJoinRTEs)
1017 {
1018 foreach(l, parse->rtable)
1019 {
1021
1022 rte->joinaliasvars = NIL;
1023 }
1024 }
1025
1026 /*
1027 * Replace any Vars in the subquery's targetlist and havingQual that
1028 * reference GROUP outputs with the underlying grouping expressions.
1029 *
1030 * Note that we need to perform this replacement after we've preprocessed
1031 * the grouping expressions. This is to ensure that there is only one
1032 * instance of SubPlan for each SubLink contained within the grouping
1033 * expressions.
1034 */
1035 if (parse->hasGroupRTE)
1036 {
1037 parse->targetList = (List *)
1038 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1039 parse->havingQual =
1040 flatten_group_exprs(root, root->parse, parse->havingQual);
1041 }
1042
1043 /* Constant-folding might have removed all set-returning functions */
1044 if (parse->hasTargetSRFs)
1045 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1046
1047 /*
1048 * In some cases we may want to transfer a HAVING clause into WHERE. We
1049 * cannot do so if the HAVING clause contains aggregates (obviously) or
1050 * volatile functions (since a HAVING clause is supposed to be executed
1051 * only once per group). We also can't do this if there are any nonempty
1052 * grouping sets and the clause references any columns that are nullable
1053 * by the grouping sets; moving such a clause into WHERE would potentially
1054 * change the results. (If there are only empty grouping sets, then the
1055 * HAVING clause must be degenerate as discussed below.)
1056 *
1057 * Also, it may be that the clause is so expensive to execute that we're
1058 * better off doing it only once per group, despite the loss of
1059 * selectivity. This is hard to estimate short of doing the entire
1060 * planning process twice, so we use a heuristic: clauses containing
1061 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1062 * clause into WHERE, in hopes of eliminating tuples before aggregation
1063 * instead of after.
1064 *
1065 * If the query has explicit grouping then we can simply move such a
1066 * clause into WHERE; any group that fails the clause will not be in the
1067 * output because none of its tuples will reach the grouping or
1068 * aggregation stage. Otherwise we must have a degenerate (variable-free)
1069 * HAVING clause, which we put in WHERE so that query_planner() can use it
1070 * in a gating Result node, but also keep in HAVING to ensure that we
1071 * don't emit a bogus aggregated row. (This could be done better, but it
1072 * seems not worth optimizing.)
1073 *
1074 * Note that a HAVING clause may contain expressions that are not fully
1075 * preprocessed. This can happen if these expressions are part of
1076 * grouping items. In such cases, they are replaced with GROUP Vars in
1077 * the parser and then replaced back after we've done with expression
1078 * preprocessing on havingQual. This is not an issue if the clause
1079 * remains in HAVING, because these expressions will be matched to lower
1080 * target items in setrefs.c. However, if the clause is moved or copied
1081 * into WHERE, we need to ensure that these expressions are fully
1082 * preprocessed.
1083 *
1084 * Note that both havingQual and parse->jointree->quals are in
1085 * implicitly-ANDed-list form at this point, even though they are declared
1086 * as Node *.
1087 */
1088 newHaving = NIL;
1089 foreach(l, (List *) parse->havingQual)
1090 {
1091 Node *havingclause = (Node *) lfirst(l);
1092
1093 if (contain_agg_clause(havingclause) ||
1094 contain_volatile_functions(havingclause) ||
1095 contain_subplans(havingclause) ||
1096 (parse->groupClause && parse->groupingSets &&
1097 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1098 {
1099 /* keep it in HAVING */
1100 newHaving = lappend(newHaving, havingclause);
1101 }
1102 else if (parse->groupClause)
1103 {
1104 Node *whereclause;
1105
1106 /* Preprocess the HAVING clause fully */
1107 whereclause = preprocess_expression(root, havingclause,
1109 /* ... and move it to WHERE */
1110 parse->jointree->quals = (Node *)
1111 list_concat((List *) parse->jointree->quals,
1112 (List *) whereclause);
1113 }
1114 else
1115 {
1116 Node *whereclause;
1117
1118 /* Preprocess the HAVING clause fully */
1119 whereclause = preprocess_expression(root, copyObject(havingclause),
1121 /* ... and put a copy in WHERE */
1122 parse->jointree->quals = (Node *)
1123 list_concat((List *) parse->jointree->quals,
1124 (List *) whereclause);
1125 /* ... and also keep it in HAVING */
1126 newHaving = lappend(newHaving, havingclause);
1127 }
1128 }
1129 parse->havingQual = (Node *) newHaving;
1130
1131 /*
1132 * If we have any outer joins, try to reduce them to plain inner joins.
1133 * This step is most easily done after we've done expression
1134 * preprocessing.
1135 */
1136 if (hasOuterJoins)
1138
1139 /*
1140 * If we have any RTE_RESULT relations, see if they can be deleted from
1141 * the jointree. We also rely on this processing to flatten single-child
1142 * FromExprs underneath outer joins. This step is most effectively done
1143 * after we've done expression preprocessing and outer join reduction.
1144 */
1145 if (hasResultRTEs || hasOuterJoins)
1147
1148 /*
1149 * Do the main planning.
1150 */
1151 grouping_planner(root, tuple_fraction, setops);
1152
1153 /*
1154 * Capture the set of outer-level param IDs we have access to, for use in
1155 * extParam/allParam calculations later.
1156 */
1158
1159 /*
1160 * If any initPlans were created in this query level, adjust the surviving
1161 * Paths' costs and parallel-safety flags to account for them. The
1162 * initPlans won't actually get attached to the plan tree till
1163 * create_plan() runs, but we must include their effects now.
1164 */
1165 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1166 SS_charge_for_initplans(root, final_rel);
1167
1168 /*
1169 * Make sure we've identified the cheapest Path for the final rel. (By
1170 * doing this here not in grouping_planner, we include initPlan costs in
1171 * the decision, though it's unlikely that will change anything.)
1172 */
1173 set_cheapest(final_rel);
1174
1175 return root;
1176}
1177
1178/*
1179 * preprocess_expression
1180 * Do subquery_planner's preprocessing work for an expression,
1181 * which can be a targetlist, a WHERE clause (including JOIN/ON
1182 * conditions), a HAVING clause, or a few other things.
1183 */
1184static Node *
1186{
1187 /*
1188 * Fall out quickly if expression is empty. This occurs often enough to
1189 * be worth checking. Note that null->null is the correct conversion for
1190 * implicit-AND result format, too.
1191 */
1192 if (expr == NULL)
1193 return NULL;
1194
1195 /*
1196 * If the query has any join RTEs, replace join alias variables with
1197 * base-relation variables. We must do this first, since any expressions
1198 * we may extract from the joinaliasvars lists have not been preprocessed.
1199 * For example, if we did this after sublink processing, sublinks expanded
1200 * out from join aliases would not get processed. But we can skip this in
1201 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1202 * they can't contain any Vars of the current query level.
1203 */
1204 if (root->hasJoinRTEs &&
1205 !(kind == EXPRKIND_RTFUNC ||
1206 kind == EXPRKIND_VALUES ||
1207 kind == EXPRKIND_TABLESAMPLE ||
1208 kind == EXPRKIND_TABLEFUNC))
1209 expr = flatten_join_alias_vars(root, root->parse, expr);
1210
1211 /*
1212 * Simplify constant expressions. For function RTEs, this was already
1213 * done by preprocess_function_rtes. (But note we must do it again for
1214 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1215 * un-simplified subexpressions inserted by flattening of subqueries or
1216 * join alias variables.)
1217 *
1218 * Note: an essential effect of this is to convert named-argument function
1219 * calls to positional notation and insert the current actual values of
1220 * any default arguments for functions. To ensure that happens, we *must*
1221 * process all expressions here. Previous PG versions sometimes skipped
1222 * const-simplification if it didn't seem worth the trouble, but we can't
1223 * do that anymore.
1224 *
1225 * Note: this also flattens nested AND and OR expressions into N-argument
1226 * form. All processing of a qual expression after this point must be
1227 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1228 * with AND directly under AND, nor OR directly under OR.
1229 */
1230 if (kind != EXPRKIND_RTFUNC)
1231 expr = eval_const_expressions(root, expr);
1232
1233 /*
1234 * If it's a qual or havingQual, canonicalize it.
1235 */
1236 if (kind == EXPRKIND_QUAL)
1237 {
1238 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1239
1240#ifdef OPTIMIZER_DEBUG
1241 printf("After canonicalize_qual()\n");
1242 pprint(expr);
1243#endif
1244 }
1245
1246 /*
1247 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1248 * hashfuncid of any that might execute more quickly by using hash lookups
1249 * instead of a linear search.
1250 */
1251 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1252 {
1254 }
1255
1256 /* Expand SubLinks to SubPlans */
1257 if (root->parse->hasSubLinks)
1258 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1259
1260 /*
1261 * XXX do not insert anything here unless you have grokked the comments in
1262 * SS_replace_correlation_vars ...
1263 */
1264
1265 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1266 if (root->query_level > 1)
1267 expr = SS_replace_correlation_vars(root, expr);
1268
1269 /*
1270 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1271 * don't want to do this before eval_const_expressions, since the latter
1272 * would be unable to simplify a top-level AND correctly. Also,
1273 * SS_process_sublinks expects explicit-AND format.)
1274 */
1275 if (kind == EXPRKIND_QUAL)
1276 expr = (Node *) make_ands_implicit((Expr *) expr);
1277
1278 return expr;
1279}
1280
1281/*
1282 * preprocess_qual_conditions
1283 * Recursively scan the query's jointree and do subquery_planner's
1284 * preprocessing work on each qual condition found therein.
1285 */
1286static void
1288{
1289 if (jtnode == NULL)
1290 return;
1291 if (IsA(jtnode, RangeTblRef))
1292 {
1293 /* nothing to do here */
1294 }
1295 else if (IsA(jtnode, FromExpr))
1296 {
1297 FromExpr *f = (FromExpr *) jtnode;
1298 ListCell *l;
1299
1300 foreach(l, f->fromlist)
1302
1304 }
1305 else if (IsA(jtnode, JoinExpr))
1306 {
1307 JoinExpr *j = (JoinExpr *) jtnode;
1308
1311
1312 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1313 }
1314 else
1315 elog(ERROR, "unrecognized node type: %d",
1316 (int) nodeTag(jtnode));
1317}
1318
1319/*
1320 * preprocess_phv_expression
1321 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1322 *
1323 * If a LATERAL subquery references an output of another subquery, and that
1324 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1325 * join, then we'll push the PlaceHolderVar expression down into the subquery
1326 * and later pull it back up during find_lateral_references, which runs after
1327 * subquery_planner has preprocessed all the expressions that were in the
1328 * current query level to start with. So we need to preprocess it then.
1329 */
1330Expr *
1332{
1333 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1334}
1335
1336/*--------------------
1337 * grouping_planner
1338 * Perform planning steps related to grouping, aggregation, etc.
1339 *
1340 * This function adds all required top-level processing to the scan/join
1341 * Path(s) produced by query_planner.
1342 *
1343 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1344 * tuple_fraction is interpreted as follows:
1345 * 0: expect all tuples to be retrieved (normal case)
1346 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1347 * from the plan to be retrieved
1348 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1349 * expected to be retrieved (ie, a LIMIT specification).
1350 * setops is used for set operation subqueries to provide the subquery with
1351 * the context in which it's being used so that Paths correctly sorted for the
1352 * set operation can be generated. NULL when not planning a set operation
1353 * child, or when a child of a set op that isn't interested in sorted input.
1354 *
1355 * Returns nothing; the useful output is in the Paths we attach to the
1356 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1357 * root->processed_tlist contains the final processed targetlist.
1358 *
1359 * Note that we have not done set_cheapest() on the final rel; it's convenient
1360 * to leave this to the caller.
1361 *--------------------
1362 */
1363static void
1364grouping_planner(PlannerInfo *root, double tuple_fraction,
1365 SetOperationStmt *setops)
1366{
1367 Query *parse = root->parse;
1368 int64 offset_est = 0;
1369 int64 count_est = 0;
1370 double limit_tuples = -1.0;
1371 bool have_postponed_srfs = false;
1372 PathTarget *final_target;
1373 List *final_targets;
1374 List *final_targets_contain_srfs;
1375 bool final_target_parallel_safe;
1376 RelOptInfo *current_rel;
1377 RelOptInfo *final_rel;
1378 FinalPathExtraData extra;
1379 ListCell *lc;
1380
1381 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1382 if (parse->limitCount || parse->limitOffset)
1383 {
1384 tuple_fraction = preprocess_limit(root, tuple_fraction,
1385 &offset_est, &count_est);
1386
1387 /*
1388 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1389 * estimate the effects of using a bounded sort.
1390 */
1391 if (count_est > 0 && offset_est >= 0)
1392 limit_tuples = (double) count_est + (double) offset_est;
1393 }
1394
1395 /* Make tuple_fraction accessible to lower-level routines */
1396 root->tuple_fraction = tuple_fraction;
1397
1398 if (parse->setOperations)
1399 {
1400 /*
1401 * Construct Paths for set operations. The results will not need any
1402 * work except perhaps a top-level sort and/or LIMIT. Note that any
1403 * special work for recursive unions is the responsibility of
1404 * plan_set_operations.
1405 */
1406 current_rel = plan_set_operations(root);
1407
1408 /*
1409 * We should not need to call preprocess_targetlist, since we must be
1410 * in a SELECT query node. Instead, use the processed_tlist returned
1411 * by plan_set_operations (since this tells whether it returned any
1412 * resjunk columns!), and transfer any sort key information from the
1413 * original tlist.
1414 */
1415 Assert(parse->commandType == CMD_SELECT);
1416
1417 /* for safety, copy processed_tlist instead of modifying in-place */
1418 root->processed_tlist =
1419 postprocess_setop_tlist(copyObject(root->processed_tlist),
1420 parse->targetList);
1421
1422 /* Also extract the PathTarget form of the setop result tlist */
1423 final_target = current_rel->cheapest_total_path->pathtarget;
1424
1425 /* And check whether it's parallel safe */
1426 final_target_parallel_safe =
1427 is_parallel_safe(root, (Node *) final_target->exprs);
1428
1429 /* The setop result tlist couldn't contain any SRFs */
1430 Assert(!parse->hasTargetSRFs);
1431 final_targets = final_targets_contain_srfs = NIL;
1432
1433 /*
1434 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1435 * checked already, but let's make sure).
1436 */
1437 if (parse->rowMarks)
1438 ereport(ERROR,
1439 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1440 /*------
1441 translator: %s is a SQL row locking clause such as FOR UPDATE */
1442 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1444 parse->rowMarks)->strength))));
1445
1446 /*
1447 * Calculate pathkeys that represent result ordering requirements
1448 */
1449 Assert(parse->distinctClause == NIL);
1450 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1451 parse->sortClause,
1452 root->processed_tlist);
1453 }
1454 else
1455 {
1456 /* No set operations, do regular planning */
1457 PathTarget *sort_input_target;
1458 List *sort_input_targets;
1459 List *sort_input_targets_contain_srfs;
1460 bool sort_input_target_parallel_safe;
1461 PathTarget *grouping_target;
1462 List *grouping_targets;
1463 List *grouping_targets_contain_srfs;
1464 bool grouping_target_parallel_safe;
1465 PathTarget *scanjoin_target;
1466 List *scanjoin_targets;
1467 List *scanjoin_targets_contain_srfs;
1468 bool scanjoin_target_parallel_safe;
1469 bool scanjoin_target_same_exprs;
1470 bool have_grouping;
1471 WindowFuncLists *wflists = NULL;
1472 List *activeWindows = NIL;
1473 grouping_sets_data *gset_data = NULL;
1474 standard_qp_extra qp_extra;
1475
1476 /* A recursive query should always have setOperations */
1477 Assert(!root->hasRecursion);
1478
1479 /* Preprocess grouping sets and GROUP BY clause, if any */
1480 if (parse->groupingSets)
1481 {
1482 gset_data = preprocess_grouping_sets(root);
1483 }
1484 else if (parse->groupClause)
1485 {
1486 /* Preprocess regular GROUP BY clause, if any */
1487 root->processed_groupClause = preprocess_groupclause(root, NIL);
1488 }
1489
1490 /*
1491 * Preprocess targetlist. Note that much of the remaining planning
1492 * work will be done with the PathTarget representation of tlists, but
1493 * we must also maintain the full representation of the final tlist so
1494 * that we can transfer its decoration (resnames etc) to the topmost
1495 * tlist of the finished Plan. This is kept in processed_tlist.
1496 */
1498
1499 /*
1500 * Mark all the aggregates with resolved aggtranstypes, and detect
1501 * aggregates that are duplicates or can share transition state. We
1502 * must do this before slicing and dicing the tlist into various
1503 * pathtargets, else some copies of the Aggref nodes might escape
1504 * being marked.
1505 */
1506 if (parse->hasAggs)
1507 {
1508 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1509 preprocess_aggrefs(root, (Node *) parse->havingQual);
1510 }
1511
1512 /*
1513 * Locate any window functions in the tlist. (We don't need to look
1514 * anywhere else, since expressions used in ORDER BY will be in there
1515 * too.) Note that they could all have been eliminated by constant
1516 * folding, in which case we don't need to do any more work.
1517 */
1518 if (parse->hasWindowFuncs)
1519 {
1520 wflists = find_window_functions((Node *) root->processed_tlist,
1521 list_length(parse->windowClause));
1522 if (wflists->numWindowFuncs > 0)
1523 {
1524 /*
1525 * See if any modifications can be made to each WindowClause
1526 * to allow the executor to execute the WindowFuncs more
1527 * quickly.
1528 */
1529 optimize_window_clauses(root, wflists);
1530
1531 activeWindows = select_active_windows(root, wflists);
1532 }
1533 else
1534 parse->hasWindowFuncs = false;
1535 }
1536
1537 /*
1538 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1539 * adding logic between here and the query_planner() call. Anything
1540 * that is needed in MIN/MAX-optimizable cases will have to be
1541 * duplicated in planagg.c.
1542 */
1543 if (parse->hasAggs)
1545
1546 /*
1547 * Figure out whether there's a hard limit on the number of rows that
1548 * query_planner's result subplan needs to return. Even if we know a
1549 * hard limit overall, it doesn't apply if the query has any
1550 * grouping/aggregation operations, or SRFs in the tlist.
1551 */
1552 if (parse->groupClause ||
1553 parse->groupingSets ||
1554 parse->distinctClause ||
1555 parse->hasAggs ||
1556 parse->hasWindowFuncs ||
1557 parse->hasTargetSRFs ||
1558 root->hasHavingQual)
1559 root->limit_tuples = -1.0;
1560 else
1561 root->limit_tuples = limit_tuples;
1562
1563 /* Set up data needed by standard_qp_callback */
1564 qp_extra.activeWindows = activeWindows;
1565 qp_extra.gset_data = gset_data;
1566
1567 /*
1568 * If we're a subquery for a set operation, store the SetOperationStmt
1569 * in qp_extra.
1570 */
1571 qp_extra.setop = setops;
1572
1573 /*
1574 * Generate the best unsorted and presorted paths for the scan/join
1575 * portion of this Query, ie the processing represented by the
1576 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1577 * We also generate (in standard_qp_callback) pathkey representations
1578 * of the query's sort clause, distinct clause, etc.
1579 */
1580 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1581
1582 /*
1583 * Convert the query's result tlist into PathTarget format.
1584 *
1585 * Note: this cannot be done before query_planner() has performed
1586 * appendrel expansion, because that might add resjunk entries to
1587 * root->processed_tlist. Waiting till afterwards is also helpful
1588 * because the target width estimates can use per-Var width numbers
1589 * that were obtained within query_planner().
1590 */
1591 final_target = create_pathtarget(root, root->processed_tlist);
1592 final_target_parallel_safe =
1593 is_parallel_safe(root, (Node *) final_target->exprs);
1594
1595 /*
1596 * If ORDER BY was given, consider whether we should use a post-sort
1597 * projection, and compute the adjusted target for preceding steps if
1598 * so.
1599 */
1600 if (parse->sortClause)
1601 {
1602 sort_input_target = make_sort_input_target(root,
1603 final_target,
1604 &have_postponed_srfs);
1605 sort_input_target_parallel_safe =
1606 is_parallel_safe(root, (Node *) sort_input_target->exprs);
1607 }
1608 else
1609 {
1610 sort_input_target = final_target;
1611 sort_input_target_parallel_safe = final_target_parallel_safe;
1612 }
1613
1614 /*
1615 * If we have window functions to deal with, the output from any
1616 * grouping step needs to be what the window functions want;
1617 * otherwise, it should be sort_input_target.
1618 */
1619 if (activeWindows)
1620 {
1621 grouping_target = make_window_input_target(root,
1622 final_target,
1623 activeWindows);
1624 grouping_target_parallel_safe =
1625 is_parallel_safe(root, (Node *) grouping_target->exprs);
1626 }
1627 else
1628 {
1629 grouping_target = sort_input_target;
1630 grouping_target_parallel_safe = sort_input_target_parallel_safe;
1631 }
1632
1633 /*
1634 * If we have grouping or aggregation to do, the topmost scan/join
1635 * plan node must emit what the grouping step wants; otherwise, it
1636 * should emit grouping_target.
1637 */
1638 have_grouping = (parse->groupClause || parse->groupingSets ||
1639 parse->hasAggs || root->hasHavingQual);
1640 if (have_grouping)
1641 {
1642 scanjoin_target = make_group_input_target(root, final_target);
1643 scanjoin_target_parallel_safe =
1644 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1645 }
1646 else
1647 {
1648 scanjoin_target = grouping_target;
1649 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1650 }
1651
1652 /*
1653 * If there are any SRFs in the targetlist, we must separate each of
1654 * these PathTargets into SRF-computing and SRF-free targets. Replace
1655 * each of the named targets with a SRF-free version, and remember the
1656 * list of additional projection steps we need to add afterwards.
1657 */
1658 if (parse->hasTargetSRFs)
1659 {
1660 /* final_target doesn't recompute any SRFs in sort_input_target */
1661 split_pathtarget_at_srfs(root, final_target, sort_input_target,
1662 &final_targets,
1663 &final_targets_contain_srfs);
1664 final_target = linitial_node(PathTarget, final_targets);
1665 Assert(!linitial_int(final_targets_contain_srfs));
1666 /* likewise for sort_input_target vs. grouping_target */
1667 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1668 &sort_input_targets,
1669 &sort_input_targets_contain_srfs);
1670 sort_input_target = linitial_node(PathTarget, sort_input_targets);
1671 Assert(!linitial_int(sort_input_targets_contain_srfs));
1672 /* likewise for grouping_target vs. scanjoin_target */
1673 split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1674 &grouping_targets,
1675 &grouping_targets_contain_srfs);
1676 grouping_target = linitial_node(PathTarget, grouping_targets);
1677 Assert(!linitial_int(grouping_targets_contain_srfs));
1678 /* scanjoin_target will not have any SRFs precomputed for it */
1679 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1680 &scanjoin_targets,
1681 &scanjoin_targets_contain_srfs);
1682 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1683 Assert(!linitial_int(scanjoin_targets_contain_srfs));
1684 }
1685 else
1686 {
1687 /* initialize lists; for most of these, dummy values are OK */
1688 final_targets = final_targets_contain_srfs = NIL;
1689 sort_input_targets = sort_input_targets_contain_srfs = NIL;
1690 grouping_targets = grouping_targets_contain_srfs = NIL;
1691 scanjoin_targets = list_make1(scanjoin_target);
1692 scanjoin_targets_contain_srfs = NIL;
1693 }
1694
1695 /* Apply scan/join target. */
1696 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1697 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1698 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1699 scanjoin_targets_contain_srfs,
1700 scanjoin_target_parallel_safe,
1701 scanjoin_target_same_exprs);
1702
1703 /*
1704 * Save the various upper-rel PathTargets we just computed into
1705 * root->upper_targets[]. The core code doesn't use this, but it
1706 * provides a convenient place for extensions to get at the info. For
1707 * consistency, we save all the intermediate targets, even though some
1708 * of the corresponding upperrels might not be needed for this query.
1709 */
1710 root->upper_targets[UPPERREL_FINAL] = final_target;
1711 root->upper_targets[UPPERREL_ORDERED] = final_target;
1712 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1713 root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1714 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1715 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1716
1717 /*
1718 * If we have grouping and/or aggregation, consider ways to implement
1719 * that. We build a new upperrel representing the output of this
1720 * phase.
1721 */
1722 if (have_grouping)
1723 {
1724 current_rel = create_grouping_paths(root,
1725 current_rel,
1726 grouping_target,
1727 grouping_target_parallel_safe,
1728 gset_data);
1729 /* Fix things up if grouping_target contains SRFs */
1730 if (parse->hasTargetSRFs)
1731 adjust_paths_for_srfs(root, current_rel,
1732 grouping_targets,
1733 grouping_targets_contain_srfs);
1734 }
1735
1736 /*
1737 * If we have window functions, consider ways to implement those. We
1738 * build a new upperrel representing the output of this phase.
1739 */
1740 if (activeWindows)
1741 {
1742 current_rel = create_window_paths(root,
1743 current_rel,
1744 grouping_target,
1745 sort_input_target,
1746 sort_input_target_parallel_safe,
1747 wflists,
1748 activeWindows);
1749 /* Fix things up if sort_input_target contains SRFs */
1750 if (parse->hasTargetSRFs)
1751 adjust_paths_for_srfs(root, current_rel,
1752 sort_input_targets,
1753 sort_input_targets_contain_srfs);
1754 }
1755
1756 /*
1757 * If there is a DISTINCT clause, consider ways to implement that. We
1758 * build a new upperrel representing the output of this phase.
1759 */
1760 if (parse->distinctClause)
1761 {
1762 current_rel = create_distinct_paths(root,
1763 current_rel,
1764 sort_input_target);
1765 }
1766 } /* end of if (setOperations) */
1767
1768 /*
1769 * If ORDER BY was given, consider ways to implement that, and generate a
1770 * new upperrel containing only paths that emit the correct ordering and
1771 * project the correct final_target. We can apply the original
1772 * limit_tuples limit in sort costing here, but only if there are no
1773 * postponed SRFs.
1774 */
1775 if (parse->sortClause)
1776 {
1777 current_rel = create_ordered_paths(root,
1778 current_rel,
1779 final_target,
1780 final_target_parallel_safe,
1781 have_postponed_srfs ? -1.0 :
1782 limit_tuples);
1783 /* Fix things up if final_target contains SRFs */
1784 if (parse->hasTargetSRFs)
1785 adjust_paths_for_srfs(root, current_rel,
1786 final_targets,
1787 final_targets_contain_srfs);
1788 }
1789
1790 /*
1791 * Now we are prepared to build the final-output upperrel.
1792 */
1793 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1794
1795 /*
1796 * If the input rel is marked consider_parallel and there's nothing that's
1797 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1798 * consider_parallel as well. Note that if the query has rowMarks or is
1799 * not a SELECT, consider_parallel will be false for every relation in the
1800 * query.
1801 */
1802 if (current_rel->consider_parallel &&
1803 is_parallel_safe(root, parse->limitOffset) &&
1804 is_parallel_safe(root, parse->limitCount))
1805 final_rel->consider_parallel = true;
1806
1807 /*
1808 * If the current_rel belongs to a single FDW, so does the final_rel.
1809 */
1810 final_rel->serverid = current_rel->serverid;
1811 final_rel->userid = current_rel->userid;
1812 final_rel->useridiscurrent = current_rel->useridiscurrent;
1813 final_rel->fdwroutine = current_rel->fdwroutine;
1814
1815 /*
1816 * Generate paths for the final_rel. Insert all surviving paths, with
1817 * LockRows, Limit, and/or ModifyTable steps added if needed.
1818 */
1819 foreach(lc, current_rel->pathlist)
1820 {
1821 Path *path = (Path *) lfirst(lc);
1822
1823 /*
1824 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1825 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1826 * here. If there are only non-locking rowmarks, they should be
1827 * handled by the ModifyTable node instead. However, root->rowMarks
1828 * is what goes into the LockRows node.)
1829 */
1830 if (parse->rowMarks)
1831 {
1832 path = (Path *) create_lockrows_path(root, final_rel, path,
1833 root->rowMarks,
1835 }
1836
1837 /*
1838 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1839 */
1840 if (limit_needed(parse))
1841 {
1842 path = (Path *) create_limit_path(root, final_rel, path,
1843 parse->limitOffset,
1844 parse->limitCount,
1845 parse->limitOption,
1846 offset_est, count_est);
1847 }
1848
1849 /*
1850 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1851 */
1852 if (parse->commandType != CMD_SELECT)
1853 {
1854 Index rootRelation;
1855 List *resultRelations = NIL;
1856 List *updateColnosLists = NIL;
1857 List *withCheckOptionLists = NIL;
1858 List *returningLists = NIL;
1859 List *mergeActionLists = NIL;
1860 List *mergeJoinConditions = NIL;
1861 List *rowMarks;
1862
1863 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1864 {
1865 /* Inherited UPDATE/DELETE/MERGE */
1866 RelOptInfo *top_result_rel = find_base_rel(root,
1867 parse->resultRelation);
1868 int resultRelation = -1;
1869
1870 /* Pass the root result rel forward to the executor. */
1871 rootRelation = parse->resultRelation;
1872
1873 /* Add only leaf children to ModifyTable. */
1874 while ((resultRelation = bms_next_member(root->leaf_result_relids,
1875 resultRelation)) >= 0)
1876 {
1877 RelOptInfo *this_result_rel = find_base_rel(root,
1878 resultRelation);
1879
1880 /*
1881 * Also exclude any leaf rels that have turned dummy since
1882 * being added to the list, for example, by being excluded
1883 * by constraint exclusion.
1884 */
1885 if (IS_DUMMY_REL(this_result_rel))
1886 continue;
1887
1888 /* Build per-target-rel lists needed by ModifyTable */
1889 resultRelations = lappend_int(resultRelations,
1890 resultRelation);
1891 if (parse->commandType == CMD_UPDATE)
1892 {
1893 List *update_colnos = root->update_colnos;
1894
1895 if (this_result_rel != top_result_rel)
1896 update_colnos =
1898 update_colnos,
1899 this_result_rel->relid,
1900 top_result_rel->relid);
1901 updateColnosLists = lappend(updateColnosLists,
1902 update_colnos);
1903 }
1904 if (parse->withCheckOptions)
1905 {
1906 List *withCheckOptions = parse->withCheckOptions;
1907
1908 if (this_result_rel != top_result_rel)
1909 withCheckOptions = (List *)
1911 (Node *) withCheckOptions,
1912 this_result_rel,
1913 top_result_rel);
1914 withCheckOptionLists = lappend(withCheckOptionLists,
1915 withCheckOptions);
1916 }
1917 if (parse->returningList)
1918 {
1919 List *returningList = parse->returningList;
1920
1921 if (this_result_rel != top_result_rel)
1922 returningList = (List *)
1924 (Node *) returningList,
1925 this_result_rel,
1926 top_result_rel);
1927 returningLists = lappend(returningLists,
1928 returningList);
1929 }
1930 if (parse->mergeActionList)
1931 {
1932 ListCell *l;
1933 List *mergeActionList = NIL;
1934
1935 /*
1936 * Copy MergeActions and translate stuff that
1937 * references attribute numbers.
1938 */
1939 foreach(l, parse->mergeActionList)
1940 {
1942 *leaf_action = copyObject(action);
1943
1944 leaf_action->qual =
1946 (Node *) action->qual,
1947 this_result_rel,
1948 top_result_rel);
1949 leaf_action->targetList = (List *)
1951 (Node *) action->targetList,
1952 this_result_rel,
1953 top_result_rel);
1954 if (leaf_action->commandType == CMD_UPDATE)
1955 leaf_action->updateColnos =
1957 action->updateColnos,
1958 this_result_rel->relid,
1959 top_result_rel->relid);
1960 mergeActionList = lappend(mergeActionList,
1961 leaf_action);
1962 }
1963
1964 mergeActionLists = lappend(mergeActionLists,
1965 mergeActionList);
1966 }
1967 if (parse->commandType == CMD_MERGE)
1968 {
1969 Node *mergeJoinCondition = parse->mergeJoinCondition;
1970
1971 if (this_result_rel != top_result_rel)
1972 mergeJoinCondition =
1974 mergeJoinCondition,
1975 this_result_rel,
1976 top_result_rel);
1977 mergeJoinConditions = lappend(mergeJoinConditions,
1978 mergeJoinCondition);
1979 }
1980 }
1981
1982 if (resultRelations == NIL)
1983 {
1984 /*
1985 * We managed to exclude every child rel, so generate a
1986 * dummy one-relation plan using info for the top target
1987 * rel (even though that may not be a leaf target).
1988 * Although it's clear that no data will be updated or
1989 * deleted, we still need to have a ModifyTable node so
1990 * that any statement triggers will be executed. (This
1991 * could be cleaner if we fixed nodeModifyTable.c to allow
1992 * zero target relations, but that probably wouldn't be a
1993 * net win.)
1994 */
1995 resultRelations = list_make1_int(parse->resultRelation);
1996 if (parse->commandType == CMD_UPDATE)
1997 updateColnosLists = list_make1(root->update_colnos);
1998 if (parse->withCheckOptions)
1999 withCheckOptionLists = list_make1(parse->withCheckOptions);
2000 if (parse->returningList)
2001 returningLists = list_make1(parse->returningList);
2002 if (parse->mergeActionList)
2003 mergeActionLists = list_make1(parse->mergeActionList);
2004 if (parse->commandType == CMD_MERGE)
2005 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2006 }
2007 }
2008 else
2009 {
2010 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2011 rootRelation = 0; /* there's no separate root rel */
2012 resultRelations = list_make1_int(parse->resultRelation);
2013 if (parse->commandType == CMD_UPDATE)
2014 updateColnosLists = list_make1(root->update_colnos);
2015 if (parse->withCheckOptions)
2016 withCheckOptionLists = list_make1(parse->withCheckOptions);
2017 if (parse->returningList)
2018 returningLists = list_make1(parse->returningList);
2019 if (parse->mergeActionList)
2020 mergeActionLists = list_make1(parse->mergeActionList);
2021 if (parse->commandType == CMD_MERGE)
2022 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2023 }
2024
2025 /*
2026 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2027 * will have dealt with fetching non-locked marked rows, else we
2028 * need to have ModifyTable do that.
2029 */
2030 if (parse->rowMarks)
2031 rowMarks = NIL;
2032 else
2033 rowMarks = root->rowMarks;
2034
2035 path = (Path *)
2036 create_modifytable_path(root, final_rel,
2037 path,
2038 parse->commandType,
2039 parse->canSetTag,
2040 parse->resultRelation,
2041 rootRelation,
2042 root->partColsUpdated,
2043 resultRelations,
2044 updateColnosLists,
2045 withCheckOptionLists,
2046 returningLists,
2047 rowMarks,
2048 parse->onConflict,
2049 mergeActionLists,
2050 mergeJoinConditions,
2052 }
2053
2054 /* And shove it into final_rel */
2055 add_path(final_rel, path);
2056 }
2057
2058 /*
2059 * Generate partial paths for final_rel, too, if outer query levels might
2060 * be able to make use of them.
2061 */
2062 if (final_rel->consider_parallel && root->query_level > 1 &&
2064 {
2065 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2066 foreach(lc, current_rel->partial_pathlist)
2067 {
2068 Path *partial_path = (Path *) lfirst(lc);
2069
2070 add_partial_path(final_rel, partial_path);
2071 }
2072 }
2073
2075 extra.limit_tuples = limit_tuples;
2076 extra.count_est = count_est;
2077 extra.offset_est = offset_est;
2078
2079 /*
2080 * If there is an FDW that's responsible for all baserels of the query,
2081 * let it consider adding ForeignPaths.
2082 */
2083 if (final_rel->fdwroutine &&
2084 final_rel->fdwroutine->GetForeignUpperPaths)
2085 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2086 current_rel, final_rel,
2087 &extra);
2088
2089 /* Let extensions possibly add some more paths */
2091 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2092 current_rel, final_rel, &extra);
2093
2094 /* Note: currently, we leave it to callers to do set_cheapest() */
2095}
2096
2097/*
2098 * Do preprocessing for groupingSets clause and related data. This handles the
2099 * preliminary steps of expanding the grouping sets, organizing them into lists
2100 * of rollups, and preparing annotations which will later be filled in with
2101 * size estimates.
2102 */
2103static grouping_sets_data *
2105{
2106 Query *parse = root->parse;
2107 List *sets;
2108 int maxref = 0;
2109 ListCell *lc_set;
2111
2112 parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2113
2114 gd->any_hashable = false;
2115 gd->unhashable_refs = NULL;
2116 gd->unsortable_refs = NULL;
2117 gd->unsortable_sets = NIL;
2118
2119 /*
2120 * We don't currently make any attempt to optimize the groupClause when
2121 * there are grouping sets, so just duplicate it in processed_groupClause.
2122 */
2123 root->processed_groupClause = parse->groupClause;
2124
2125 if (parse->groupClause)
2126 {
2127 ListCell *lc;
2128
2129 foreach(lc, parse->groupClause)
2130 {
2132 Index ref = gc->tleSortGroupRef;
2133
2134 if (ref > maxref)
2135 maxref = ref;
2136
2137 if (!gc->hashable)
2139
2140 if (!OidIsValid(gc->sortop))
2142 }
2143 }
2144
2145 /* Allocate workspace array for remapping */
2146 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2147
2148 /*
2149 * If we have any unsortable sets, we must extract them before trying to
2150 * prepare rollups. Unsortable sets don't go through
2151 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2152 * here.
2153 */
2154 if (!bms_is_empty(gd->unsortable_refs))
2155 {
2156 List *sortable_sets = NIL;
2157 ListCell *lc;
2158
2159 foreach(lc, parse->groupingSets)
2160 {
2161 List *gset = (List *) lfirst(lc);
2162
2163 if (bms_overlap_list(gd->unsortable_refs, gset))
2164 {
2166
2167 gs->set = gset;
2169
2170 /*
2171 * We must enforce here that an unsortable set is hashable;
2172 * later code assumes this. Parse analysis only checks that
2173 * every individual column is either hashable or sortable.
2174 *
2175 * Note that passing this test doesn't guarantee we can
2176 * generate a plan; there might be other showstoppers.
2177 */
2178 if (bms_overlap_list(gd->unhashable_refs, gset))
2179 ereport(ERROR,
2180 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2181 errmsg("could not implement GROUP BY"),
2182 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2183 }
2184 else
2185 sortable_sets = lappend(sortable_sets, gset);
2186 }
2187
2188 if (sortable_sets)
2189 sets = extract_rollup_sets(sortable_sets);
2190 else
2191 sets = NIL;
2192 }
2193 else
2194 sets = extract_rollup_sets(parse->groupingSets);
2195
2196 foreach(lc_set, sets)
2197 {
2198 List *current_sets = (List *) lfirst(lc_set);
2199 RollupData *rollup = makeNode(RollupData);
2200 GroupingSetData *gs;
2201
2202 /*
2203 * Reorder the current list of grouping sets into correct prefix
2204 * order. If only one aggregation pass is needed, try to make the
2205 * list match the ORDER BY clause; if more than one pass is needed, we
2206 * don't bother with that.
2207 *
2208 * Note that this reorders the sets from smallest-member-first to
2209 * largest-member-first, and applies the GroupingSetData annotations,
2210 * though the data will be filled in later.
2211 */
2212 current_sets = reorder_grouping_sets(current_sets,
2213 (list_length(sets) == 1
2214 ? parse->sortClause
2215 : NIL));
2216
2217 /*
2218 * Get the initial (and therefore largest) grouping set.
2219 */
2220 gs = linitial_node(GroupingSetData, current_sets);
2221
2222 /*
2223 * Order the groupClause appropriately. If the first grouping set is
2224 * empty, then the groupClause must also be empty; otherwise we have
2225 * to force the groupClause to match that grouping set's order.
2226 *
2227 * (The first grouping set can be empty even though parse->groupClause
2228 * is not empty only if all non-empty grouping sets are unsortable.
2229 * The groupClauses for hashed grouping sets are built later on.)
2230 */
2231 if (gs->set)
2233 else
2234 rollup->groupClause = NIL;
2235
2236 /*
2237 * Is it hashable? We pretend empty sets are hashable even though we
2238 * actually force them not to be hashed later. But don't bother if
2239 * there's nothing but empty sets (since in that case we can't hash
2240 * anything).
2241 */
2242 if (gs->set &&
2244 {
2245 rollup->hashable = true;
2246 gd->any_hashable = true;
2247 }
2248
2249 /*
2250 * Now that we've pinned down an order for the groupClause for this
2251 * list of grouping sets, we need to remap the entries in the grouping
2252 * sets from sortgrouprefs to plain indices (0-based) into the
2253 * groupClause for this collection of grouping sets. We keep the
2254 * original form for later use, though.
2255 */
2256 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2257 current_sets,
2259 rollup->gsets_data = current_sets;
2260
2261 gd->rollups = lappend(gd->rollups, rollup);
2262 }
2263
2264 if (gd->unsortable_sets)
2265 {
2266 /*
2267 * We have not yet pinned down a groupclause for this, but we will
2268 * need index-based lists for estimation purposes. Construct
2269 * hash_sets_idx based on the entire original groupclause for now.
2270 */
2271 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2272 gd->unsortable_sets,
2274 gd->any_hashable = true;
2275 }
2276
2277 return gd;
2278}
2279
2280/*
2281 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2282 * (without annotation) mapped to indexes into the given groupclause.
2283 */
2284static List *
2286 List *gsets,
2287 int *tleref_to_colnum_map)
2288{
2289 int ref = 0;
2290 List *result = NIL;
2291 ListCell *lc;
2292
2293 foreach(lc, groupClause)
2294 {
2296
2297 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2298 }
2299
2300 foreach(lc, gsets)
2301 {
2302 List *set = NIL;
2303 ListCell *lc2;
2305
2306 foreach(lc2, gs->set)
2307 {
2308 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2309 }
2310
2311 result = lappend(result, set);
2312 }
2313
2314 return result;
2315}
2316
2317
2318/*
2319 * preprocess_rowmarks - set up PlanRowMarks if needed
2320 */
2321static void
2323{
2324 Query *parse = root->parse;
2325 Bitmapset *rels;
2326 List *prowmarks;
2327 ListCell *l;
2328 int i;
2329
2330 if (parse->rowMarks)
2331 {
2332 /*
2333 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2334 * grouping, since grouping renders a reference to individual tuple
2335 * CTIDs invalid. This is also checked at parse time, but that's
2336 * insufficient because of rule substitution, query pullup, etc.
2337 */
2339 parse->rowMarks)->strength);
2340 }
2341 else
2342 {
2343 /*
2344 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2345 * UPDATE/SHARE.
2346 */
2347 if (parse->commandType != CMD_UPDATE &&
2348 parse->commandType != CMD_DELETE &&
2349 parse->commandType != CMD_MERGE)
2350 return;
2351 }
2352
2353 /*
2354 * We need to have rowmarks for all base relations except the target. We
2355 * make a bitmapset of all base rels and then remove the items we don't
2356 * need or have FOR [KEY] UPDATE/SHARE marks for.
2357 */
2358 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2359 if (parse->resultRelation)
2360 rels = bms_del_member(rels, parse->resultRelation);
2361
2362 /*
2363 * Convert RowMarkClauses to PlanRowMark representation.
2364 */
2365 prowmarks = NIL;
2366 foreach(l, parse->rowMarks)
2367 {
2369 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2370 PlanRowMark *newrc;
2371
2372 /*
2373 * Currently, it is syntactically impossible to have FOR UPDATE et al
2374 * applied to an update/delete target rel. If that ever becomes
2375 * possible, we should drop the target from the PlanRowMark list.
2376 */
2377 Assert(rc->rti != parse->resultRelation);
2378
2379 /*
2380 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2381 * can't support true locking. Subqueries that got flattened into the
2382 * main query should be ignored completely. Any that didn't will get
2383 * ROW_MARK_COPY items in the next loop.
2384 */
2385 if (rte->rtekind != RTE_RELATION)
2386 continue;
2387
2388 rels = bms_del_member(rels, rc->rti);
2389
2390 newrc = makeNode(PlanRowMark);
2391 newrc->rti = newrc->prti = rc->rti;
2392 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2393 newrc->markType = select_rowmark_type(rte, rc->strength);
2394 newrc->allMarkTypes = (1 << newrc->markType);
2395 newrc->strength = rc->strength;
2396 newrc->waitPolicy = rc->waitPolicy;
2397 newrc->isParent = false;
2398
2399 prowmarks = lappend(prowmarks, newrc);
2400 }
2401
2402 /*
2403 * Now, add rowmarks for any non-target, non-locked base relations.
2404 */
2405 i = 0;
2406 foreach(l, parse->rtable)
2407 {
2409 PlanRowMark *newrc;
2410
2411 i++;
2412 if (!bms_is_member(i, rels))
2413 continue;
2414
2415 newrc = makeNode(PlanRowMark);
2416 newrc->rti = newrc->prti = i;
2417 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2418 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2419 newrc->allMarkTypes = (1 << newrc->markType);
2420 newrc->strength = LCS_NONE;
2421 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2422 newrc->isParent = false;
2423
2424 prowmarks = lappend(prowmarks, newrc);
2425 }
2426
2427 root->rowMarks = prowmarks;
2428}
2429
2430/*
2431 * Select RowMarkType to use for a given table
2432 */
2435{
2436 if (rte->rtekind != RTE_RELATION)
2437 {
2438 /* If it's not a table at all, use ROW_MARK_COPY */
2439 return ROW_MARK_COPY;
2440 }
2441 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2442 {
2443 /* Let the FDW select the rowmark type, if it wants to */
2444 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2445
2446 if (fdwroutine->GetForeignRowMarkType != NULL)
2447 return fdwroutine->GetForeignRowMarkType(rte, strength);
2448 /* Otherwise, use ROW_MARK_COPY by default */
2449 return ROW_MARK_COPY;
2450 }
2451 else
2452 {
2453 /* Regular table, apply the appropriate lock type */
2454 switch (strength)
2455 {
2456 case LCS_NONE:
2457
2458 /*
2459 * We don't need a tuple lock, only the ability to re-fetch
2460 * the row.
2461 */
2462 return ROW_MARK_REFERENCE;
2463 break;
2464 case LCS_FORKEYSHARE:
2465 return ROW_MARK_KEYSHARE;
2466 break;
2467 case LCS_FORSHARE:
2468 return ROW_MARK_SHARE;
2469 break;
2470 case LCS_FORNOKEYUPDATE:
2472 break;
2473 case LCS_FORUPDATE:
2474 return ROW_MARK_EXCLUSIVE;
2475 break;
2476 }
2477 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2478 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2479 }
2480}
2481
2482/*
2483 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2484 *
2485 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2486 * results back in *count_est and *offset_est. These variables are set to
2487 * 0 if the corresponding clause is not present, and -1 if it's present
2488 * but we couldn't estimate the value for it. (The "0" convention is OK
2489 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2490 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2491 * usual practice of never estimating less than one row.) These values will
2492 * be passed to create_limit_path, which see if you change this code.
2493 *
2494 * The return value is the suitably adjusted tuple_fraction to use for
2495 * planning the query. This adjustment is not overridable, since it reflects
2496 * plan actions that grouping_planner() will certainly take, not assumptions
2497 * about context.
2498 */
2499static double
2500preprocess_limit(PlannerInfo *root, double tuple_fraction,
2501 int64 *offset_est, int64 *count_est)
2502{
2503 Query *parse = root->parse;
2504 Node *est;
2505 double limit_fraction;
2506
2507 /* Should not be called unless LIMIT or OFFSET */
2508 Assert(parse->limitCount || parse->limitOffset);
2509
2510 /*
2511 * Try to obtain the clause values. We use estimate_expression_value
2512 * primarily because it can sometimes do something useful with Params.
2513 */
2514 if (parse->limitCount)
2515 {
2516 est = estimate_expression_value(root, parse->limitCount);
2517 if (est && IsA(est, Const))
2518 {
2519 if (((Const *) est)->constisnull)
2520 {
2521 /* NULL indicates LIMIT ALL, ie, no limit */
2522 *count_est = 0; /* treat as not present */
2523 }
2524 else
2525 {
2526 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2527 if (*count_est <= 0)
2528 *count_est = 1; /* force to at least 1 */
2529 }
2530 }
2531 else
2532 *count_est = -1; /* can't estimate */
2533 }
2534 else
2535 *count_est = 0; /* not present */
2536
2537 if (parse->limitOffset)
2538 {
2539 est = estimate_expression_value(root, parse->limitOffset);
2540 if (est && IsA(est, Const))
2541 {
2542 if (((Const *) est)->constisnull)
2543 {
2544 /* Treat NULL as no offset; the executor will too */
2545 *offset_est = 0; /* treat as not present */
2546 }
2547 else
2548 {
2549 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2550 if (*offset_est < 0)
2551 *offset_est = 0; /* treat as not present */
2552 }
2553 }
2554 else
2555 *offset_est = -1; /* can't estimate */
2556 }
2557 else
2558 *offset_est = 0; /* not present */
2559
2560 if (*count_est != 0)
2561 {
2562 /*
2563 * A LIMIT clause limits the absolute number of tuples returned.
2564 * However, if it's not a constant LIMIT then we have to guess; for
2565 * lack of a better idea, assume 10% of the plan's result is wanted.
2566 */
2567 if (*count_est < 0 || *offset_est < 0)
2568 {
2569 /* LIMIT or OFFSET is an expression ... punt ... */
2570 limit_fraction = 0.10;
2571 }
2572 else
2573 {
2574 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2575 limit_fraction = (double) *count_est + (double) *offset_est;
2576 }
2577
2578 /*
2579 * If we have absolute limits from both caller and LIMIT, use the
2580 * smaller value; likewise if they are both fractional. If one is
2581 * fractional and the other absolute, we can't easily determine which
2582 * is smaller, but we use the heuristic that the absolute will usually
2583 * be smaller.
2584 */
2585 if (tuple_fraction >= 1.0)
2586 {
2587 if (limit_fraction >= 1.0)
2588 {
2589 /* both absolute */
2590 tuple_fraction = Min(tuple_fraction, limit_fraction);
2591 }
2592 else
2593 {
2594 /* caller absolute, limit fractional; use caller's value */
2595 }
2596 }
2597 else if (tuple_fraction > 0.0)
2598 {
2599 if (limit_fraction >= 1.0)
2600 {
2601 /* caller fractional, limit absolute; use limit */
2602 tuple_fraction = limit_fraction;
2603 }
2604 else
2605 {
2606 /* both fractional */
2607 tuple_fraction = Min(tuple_fraction, limit_fraction);
2608 }
2609 }
2610 else
2611 {
2612 /* no info from caller, just use limit */
2613 tuple_fraction = limit_fraction;
2614 }
2615 }
2616 else if (*offset_est != 0 && tuple_fraction > 0.0)
2617 {
2618 /*
2619 * We have an OFFSET but no LIMIT. This acts entirely differently
2620 * from the LIMIT case: here, we need to increase rather than decrease
2621 * the caller's tuple_fraction, because the OFFSET acts to cause more
2622 * tuples to be fetched instead of fewer. This only matters if we got
2623 * a tuple_fraction > 0, however.
2624 *
2625 * As above, use 10% if OFFSET is present but unestimatable.
2626 */
2627 if (*offset_est < 0)
2628 limit_fraction = 0.10;
2629 else
2630 limit_fraction = (double) *offset_est;
2631
2632 /*
2633 * If we have absolute counts from both caller and OFFSET, add them
2634 * together; likewise if they are both fractional. If one is
2635 * fractional and the other absolute, we want to take the larger, and
2636 * we heuristically assume that's the fractional one.
2637 */
2638 if (tuple_fraction >= 1.0)
2639 {
2640 if (limit_fraction >= 1.0)
2641 {
2642 /* both absolute, so add them together */
2643 tuple_fraction += limit_fraction;
2644 }
2645 else
2646 {
2647 /* caller absolute, limit fractional; use limit */
2648 tuple_fraction = limit_fraction;
2649 }
2650 }
2651 else
2652 {
2653 if (limit_fraction >= 1.0)
2654 {
2655 /* caller fractional, limit absolute; use caller's value */
2656 }
2657 else
2658 {
2659 /* both fractional, so add them together */
2660 tuple_fraction += limit_fraction;
2661 if (tuple_fraction >= 1.0)
2662 tuple_fraction = 0.0; /* assume fetch all */
2663 }
2664 }
2665 }
2666
2667 return tuple_fraction;
2668}
2669
2670/*
2671 * limit_needed - do we actually need a Limit plan node?
2672 *
2673 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2674 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2675 * locution for an optimization fence. (Because other places in the planner
2676 * merely check whether parse->limitOffset isn't NULL, it will still work as
2677 * an optimization fence --- we're just suppressing unnecessary run-time
2678 * overhead.)
2679 *
2680 * This might look like it could be merged into preprocess_limit, but there's
2681 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2682 * in preprocess_limit it's good enough to consider estimated values.
2683 */
2684bool
2686{
2687 Node *node;
2688
2689 node = parse->limitCount;
2690 if (node)
2691 {
2692 if (IsA(node, Const))
2693 {
2694 /* NULL indicates LIMIT ALL, ie, no limit */
2695 if (!((Const *) node)->constisnull)
2696 return true; /* LIMIT with a constant value */
2697 }
2698 else
2699 return true; /* non-constant LIMIT */
2700 }
2701
2702 node = parse->limitOffset;
2703 if (node)
2704 {
2705 if (IsA(node, Const))
2706 {
2707 /* Treat NULL as no offset; the executor would too */
2708 if (!((Const *) node)->constisnull)
2709 {
2710 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2711
2712 if (offset != 0)
2713 return true; /* OFFSET with a nonzero value */
2714 }
2715 }
2716 else
2717 return true; /* non-constant OFFSET */
2718 }
2719
2720 return false; /* don't need a Limit plan node */
2721}
2722
2723/*
2724 * preprocess_groupclause - do preparatory work on GROUP BY clause
2725 *
2726 * The idea here is to adjust the ordering of the GROUP BY elements
2727 * (which in itself is semantically insignificant) to match ORDER BY,
2728 * thereby allowing a single sort operation to both implement the ORDER BY
2729 * requirement and set up for a Unique step that implements GROUP BY.
2730 * We also consider partial match between GROUP BY and ORDER BY elements,
2731 * which could allow to implement ORDER BY using the incremental sort.
2732 *
2733 * We also consider other orderings of the GROUP BY elements, which could
2734 * match the sort ordering of other possible plans (eg an indexscan) and
2735 * thereby reduce cost. This is implemented during the generation of grouping
2736 * paths. See get_useful_group_keys_orderings() for details.
2737 *
2738 * Note: we need no comparable processing of the distinctClause because
2739 * the parser already enforced that that matches ORDER BY.
2740 *
2741 * Note: we return a fresh List, but its elements are the same
2742 * SortGroupClauses appearing in parse->groupClause. This is important
2743 * because later processing may modify the processed_groupClause list.
2744 *
2745 * For grouping sets, the order of items is instead forced to agree with that
2746 * of the grouping set (and items not in the grouping set are skipped). The
2747 * work of sorting the order of grouping set elements to match the ORDER BY if
2748 * possible is done elsewhere.
2749 */
2750static List *
2752{
2753 Query *parse = root->parse;
2754 List *new_groupclause = NIL;
2755 ListCell *sl;
2756 ListCell *gl;
2757
2758 /* For grouping sets, we need to force the ordering */
2759 if (force)
2760 {
2761 foreach(sl, force)
2762 {
2763 Index ref = lfirst_int(sl);
2764 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2765
2766 new_groupclause = lappend(new_groupclause, cl);
2767 }
2768
2769 return new_groupclause;
2770 }
2771
2772 /* If no ORDER BY, nothing useful to do here */
2773 if (parse->sortClause == NIL)
2774 return list_copy(parse->groupClause);
2775
2776 /*
2777 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2778 * items, but only as far as we can make a matching prefix.
2779 *
2780 * This code assumes that the sortClause contains no duplicate items.
2781 */
2782 foreach(sl, parse->sortClause)
2783 {
2785
2786 foreach(gl, parse->groupClause)
2787 {
2789
2790 if (equal(gc, sc))
2791 {
2792 new_groupclause = lappend(new_groupclause, gc);
2793 break;
2794 }
2795 }
2796 if (gl == NULL)
2797 break; /* no match, so stop scanning */
2798 }
2799
2800
2801 /* If no match at all, no point in reordering GROUP BY */
2802 if (new_groupclause == NIL)
2803 return list_copy(parse->groupClause);
2804
2805 /*
2806 * Add any remaining GROUP BY items to the new list. We don't require a
2807 * complete match, because even partial match allows ORDER BY to be
2808 * implemented using incremental sort. Also, give up if there are any
2809 * non-sortable GROUP BY items, since then there's no hope anyway.
2810 */
2811 foreach(gl, parse->groupClause)
2812 {
2814
2815 if (list_member_ptr(new_groupclause, gc))
2816 continue; /* it matched an ORDER BY item */
2817 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2818 return list_copy(parse->groupClause);
2819 new_groupclause = lappend(new_groupclause, gc);
2820 }
2821
2822 /* Success --- install the rearranged GROUP BY list */
2823 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2824 return new_groupclause;
2825}
2826
2827/*
2828 * Extract lists of grouping sets that can be implemented using a single
2829 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2830 *
2831 * Input must be sorted with smallest sets first. Result has each sublist
2832 * sorted with smallest sets first.
2833 *
2834 * We want to produce the absolute minimum possible number of lists here to
2835 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2836 * of finding the minimal partition of a partially-ordered set into chains
2837 * (which is what we need, taking the list of grouping sets as a poset ordered
2838 * by set inclusion) can be mapped to the problem of finding the maximum
2839 * cardinality matching on a bipartite graph, which is solvable in polynomial
2840 * time with a worst case of no worse than O(n^2.5) and usually much
2841 * better. Since our N is at most 4096, we don't need to consider fallbacks to
2842 * heuristic or approximate methods. (Planning time for a 12-d cube is under
2843 * half a second on my modest system even with optimization off and assertions
2844 * on.)
2845 */
2846static List *
2848{
2849 int num_sets_raw = list_length(groupingSets);
2850 int num_empty = 0;
2851 int num_sets = 0; /* distinct sets */
2852 int num_chains = 0;
2853 List *result = NIL;
2854 List **results;
2855 List **orig_sets;
2856 Bitmapset **set_masks;
2857 int *chains;
2858 short **adjacency;
2859 short *adjacency_buf;
2861 int i;
2862 int j;
2863 int j_size;
2864 ListCell *lc1 = list_head(groupingSets);
2865 ListCell *lc;
2866
2867 /*
2868 * Start by stripping out empty sets. The algorithm doesn't require this,
2869 * but the planner currently needs all empty sets to be returned in the
2870 * first list, so we strip them here and add them back after.
2871 */
2872 while (lc1 && lfirst(lc1) == NIL)
2873 {
2874 ++num_empty;
2875 lc1 = lnext(groupingSets, lc1);
2876 }
2877
2878 /* bail out now if it turns out that all we had were empty sets. */
2879 if (!lc1)
2880 return list_make1(groupingSets);
2881
2882 /*----------
2883 * We don't strictly need to remove duplicate sets here, but if we don't,
2884 * they tend to become scattered through the result, which is a bit
2885 * confusing (and irritating if we ever decide to optimize them out).
2886 * So we remove them here and add them back after.
2887 *
2888 * For each non-duplicate set, we fill in the following:
2889 *
2890 * orig_sets[i] = list of the original set lists
2891 * set_masks[i] = bitmapset for testing inclusion
2892 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2893 *
2894 * chains[i] will be the result group this set is assigned to.
2895 *
2896 * We index all of these from 1 rather than 0 because it is convenient
2897 * to leave 0 free for the NIL node in the graph algorithm.
2898 *----------
2899 */
2900 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2901 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2902 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2903 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2904
2905 j_size = 0;
2906 j = 0;
2907 i = 1;
2908
2909 for_each_cell(lc, groupingSets, lc1)
2910 {
2911 List *candidate = (List *) lfirst(lc);
2912 Bitmapset *candidate_set = NULL;
2913 ListCell *lc2;
2914 int dup_of = 0;
2915
2916 foreach(lc2, candidate)
2917 {
2918 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2919 }
2920
2921 /* we can only be a dup if we're the same length as a previous set */
2922 if (j_size == list_length(candidate))
2923 {
2924 int k;
2925
2926 for (k = j; k < i; ++k)
2927 {
2928 if (bms_equal(set_masks[k], candidate_set))
2929 {
2930 dup_of = k;
2931 break;
2932 }
2933 }
2934 }
2935 else if (j_size < list_length(candidate))
2936 {
2937 j_size = list_length(candidate);
2938 j = i;
2939 }
2940
2941 if (dup_of > 0)
2942 {
2943 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2944 bms_free(candidate_set);
2945 }
2946 else
2947 {
2948 int k;
2949 int n_adj = 0;
2950
2951 orig_sets[i] = list_make1(candidate);
2952 set_masks[i] = candidate_set;
2953
2954 /* fill in adjacency list; no need to compare equal-size sets */
2955
2956 for (k = j - 1; k > 0; --k)
2957 {
2958 if (bms_is_subset(set_masks[k], candidate_set))
2959 adjacency_buf[++n_adj] = k;
2960 }
2961
2962 if (n_adj > 0)
2963 {
2964 adjacency_buf[0] = n_adj;
2965 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2966 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2967 }
2968 else
2969 adjacency[i] = NULL;
2970
2971 ++i;
2972 }
2973 }
2974
2975 num_sets = i - 1;
2976
2977 /*
2978 * Apply the graph matching algorithm to do the work.
2979 */
2980 state = BipartiteMatch(num_sets, num_sets, adjacency);
2981
2982 /*
2983 * Now, the state->pair* fields have the info we need to assign sets to
2984 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
2985 * pair_vu[v] = u (both will be true, but we check both so that we can do
2986 * it in one pass)
2987 */
2988 chains = palloc0((num_sets + 1) * sizeof(int));
2989
2990 for (i = 1; i <= num_sets; ++i)
2991 {
2992 int u = state->pair_vu[i];
2993 int v = state->pair_uv[i];
2994
2995 if (u > 0 && u < i)
2996 chains[i] = chains[u];
2997 else if (v > 0 && v < i)
2998 chains[i] = chains[v];
2999 else
3000 chains[i] = ++num_chains;
3001 }
3002
3003 /* build result lists. */
3004 results = palloc0((num_chains + 1) * sizeof(List *));
3005
3006 for (i = 1; i <= num_sets; ++i)
3007 {
3008 int c = chains[i];
3009
3010 Assert(c > 0);
3011
3012 results[c] = list_concat(results[c], orig_sets[i]);
3013 }
3014
3015 /* push any empty sets back on the first list. */
3016 while (num_empty-- > 0)
3017 results[1] = lcons(NIL, results[1]);
3018
3019 /* make result list */
3020 for (i = 1; i <= num_chains; ++i)
3021 result = lappend(result, results[i]);
3022
3023 /*
3024 * Free all the things.
3025 *
3026 * (This is over-fussy for small sets but for large sets we could have
3027 * tied up a nontrivial amount of memory.)
3028 */
3030 pfree(results);
3031 pfree(chains);
3032 for (i = 1; i <= num_sets; ++i)
3033 if (adjacency[i])
3034 pfree(adjacency[i]);
3035 pfree(adjacency);
3036 pfree(adjacency_buf);
3037 pfree(orig_sets);
3038 for (i = 1; i <= num_sets; ++i)
3039 bms_free(set_masks[i]);
3040 pfree(set_masks);
3041
3042 return result;
3043}
3044
3045/*
3046 * Reorder the elements of a list of grouping sets such that they have correct
3047 * prefix relationships. Also inserts the GroupingSetData annotations.
3048 *
3049 * The input must be ordered with smallest sets first; the result is returned
3050 * with largest sets first. Note that the result shares no list substructure
3051 * with the input, so it's safe for the caller to modify it later.
3052 *
3053 * If we're passed in a sortclause, we follow its order of columns to the
3054 * extent possible, to minimize the chance that we add unnecessary sorts.
3055 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3056 * gets implemented in one pass.)
3057 */
3058static List *
3059reorder_grouping_sets(List *groupingSets, List *sortclause)
3060{
3061 ListCell *lc;
3062 List *previous = NIL;
3063 List *result = NIL;
3064
3065 foreach(lc, groupingSets)
3066 {
3067 List *candidate = (List *) lfirst(lc);
3068 List *new_elems = list_difference_int(candidate, previous);
3070
3071 while (list_length(sortclause) > list_length(previous) &&
3072 new_elems != NIL)
3073 {
3074 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3075 int ref = sc->tleSortGroupRef;
3076
3077 if (list_member_int(new_elems, ref))
3078 {
3079 previous = lappend_int(previous, ref);
3080 new_elems = list_delete_int(new_elems, ref);
3081 }
3082 else
3083 {
3084 /* diverged from the sortclause; give up on it */
3085 sortclause = NIL;
3086 break;
3087 }
3088 }
3089
3090 previous = list_concat(previous, new_elems);
3091
3092 gs->set = list_copy(previous);
3093 result = lcons(gs, result);
3094 }
3095
3096 list_free(previous);
3097
3098 return result;
3099}
3100
3101/*
3102 * has_volatile_pathkey
3103 * Returns true if any PathKey in 'keys' has an EquivalenceClass
3104 * containing a volatile function. Otherwise returns false.
3105 */
3106static bool
3108{
3109 ListCell *lc;
3110
3111 foreach(lc, keys)
3112 {
3113 PathKey *pathkey = lfirst_node(PathKey, lc);
3114
3115 if (pathkey->pk_eclass->ec_has_volatile)
3116 return true;
3117 }
3118
3119 return false;
3120}
3121
3122/*
3123 * adjust_group_pathkeys_for_groupagg
3124 * Add pathkeys to root->group_pathkeys to reflect the best set of
3125 * pre-ordered input for ordered aggregates.
3126 *
3127 * We define "best" as the pathkeys that suit the largest number of
3128 * aggregate functions. We find these by looking at the first ORDER BY /
3129 * DISTINCT aggregate and take the pathkeys for that before searching for
3130 * other aggregates that require the same or a more strict variation of the
3131 * same pathkeys. We then repeat that process for any remaining aggregates
3132 * with different pathkeys and if we find another set of pathkeys that suits a
3133 * larger number of aggregates then we select those pathkeys instead.
3134 *
3135 * When the best pathkeys are found we also mark each Aggref that can use
3136 * those pathkeys as aggpresorted = true.
3137 *
3138 * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3139 * volatile functions, we never make use of these pathkeys. We want to ensure
3140 * that sorts using volatile functions are done independently in each Aggref
3141 * rather than once at the query level. If we were to allow this then Aggrefs
3142 * with compatible sort orders would all transition their rows in the same
3143 * order if those pathkeys were deemed to be the best pathkeys to sort on.
3144 * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3145 * better pathkeys to sort on, then the volatile function Aggrefs would be
3146 * left to perform their sorts individually. To avoid this inconsistent
3147 * behavior which could make Aggref results depend on what other Aggrefs the
3148 * query contains, we always force Aggrefs with volatile functions to perform
3149 * their own sorts.
3150 */
3151static void
3153{
3154 List *grouppathkeys = root->group_pathkeys;
3155 List *bestpathkeys;
3156 Bitmapset *bestaggs;
3157 Bitmapset *unprocessed_aggs;
3158 ListCell *lc;
3159 int i;
3160
3161 /* Shouldn't be here if there are grouping sets */
3162 Assert(root->parse->groupingSets == NIL);
3163 /* Shouldn't be here unless there are some ordered aggregates */
3164 Assert(root->numOrderedAggs > 0);
3165
3166 /* Do nothing if disabled */
3168 return;
3169
3170 /*
3171 * Make a first pass over all AggInfos to collect a Bitmapset containing
3172 * the indexes of all AggInfos to be processed below.
3173 */
3174 unprocessed_aggs = NULL;
3175 foreach(lc, root->agginfos)
3176 {
3177 AggInfo *agginfo = lfirst_node(AggInfo, lc);
3178 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3179
3180 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3181 continue;
3182
3183 /* only add aggregates with a DISTINCT or ORDER BY */
3184 if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3185 unprocessed_aggs = bms_add_member(unprocessed_aggs,
3187 }
3188
3189 /*
3190 * Now process all the unprocessed_aggs to find the best set of pathkeys
3191 * for the given set of aggregates.
3192 *
3193 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3194 * this during the first loop using the pathkeys for the very first
3195 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3196 * a more strict set of compatible pathkeys. Once the outer loop is
3197 * complete, we mark off all the aggregates with compatible pathkeys then
3198 * remove those from the unprocessed_aggs and repeat the process to try to
3199 * find another set of pathkeys that are suitable for a larger number of
3200 * aggregates. The outer loop will stop when there are not enough
3201 * unprocessed aggregates for it to be possible to find a set of pathkeys
3202 * to suit a larger number of aggregates.
3203 */
3204 bestpathkeys = NIL;
3205 bestaggs = NULL;
3206 while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3207 {
3208 Bitmapset *aggindexes = NULL;
3209 List *currpathkeys = NIL;
3210
3211 i = -1;
3212 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3213 {
3214 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3215 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3216 List *sortlist;
3217 List *pathkeys;
3218
3219 if (aggref->aggdistinct != NIL)
3220 sortlist = aggref->aggdistinct;
3221 else
3222 sortlist = aggref->aggorder;
3223
3224 pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3225 aggref->args);
3226
3227 /*
3228 * Ignore Aggrefs which have volatile functions in their ORDER BY
3229 * or DISTINCT clause.
3230 */
3231 if (has_volatile_pathkey(pathkeys))
3232 {
3233 unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3234 continue;
3235 }
3236
3237 /*
3238 * When not set yet, take the pathkeys from the first unprocessed
3239 * aggregate.
3240 */
3241 if (currpathkeys == NIL)
3242 {
3243 currpathkeys = pathkeys;
3244
3245 /* include the GROUP BY pathkeys, if they exist */
3246 if (grouppathkeys != NIL)
3247 currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3248 currpathkeys);
3249
3250 /* record that we found pathkeys for this aggregate */
3251 aggindexes = bms_add_member(aggindexes, i);
3252 }
3253 else
3254 {
3255 /* now look for a stronger set of matching pathkeys */
3256
3257 /* include the GROUP BY pathkeys, if they exist */
3258 if (grouppathkeys != NIL)
3259 pathkeys = append_pathkeys(list_copy(grouppathkeys),
3260 pathkeys);
3261
3262 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3263 switch (compare_pathkeys(currpathkeys, pathkeys))
3264 {
3265 case PATHKEYS_BETTER2:
3266 /* 'pathkeys' are stronger, use these ones instead */
3267 currpathkeys = pathkeys;
3268 /* FALLTHROUGH */
3269
3270 case PATHKEYS_BETTER1:
3271 /* 'pathkeys' are less strict */
3272 /* FALLTHROUGH */
3273
3274 case PATHKEYS_EQUAL:
3275 /* mark this aggregate as covered by 'currpathkeys' */
3276 aggindexes = bms_add_member(aggindexes, i);
3277 break;
3278
3279 case PATHKEYS_DIFFERENT:
3280 break;
3281 }
3282 }
3283 }
3284
3285 /* remove the aggregates that we've just processed */
3286 unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3287
3288 /*
3289 * If this pass included more aggregates than the previous best then
3290 * use these ones as the best set.
3291 */
3292 if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3293 {
3294 bestaggs = aggindexes;
3295 bestpathkeys = currpathkeys;
3296 }
3297 }
3298
3299 /*
3300 * If we found any ordered aggregates, update root->group_pathkeys to add
3301 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3302 * the original GROUP BY pathkeys already.
3303 */
3304 if (bestpathkeys != NIL)
3305 root->group_pathkeys = bestpathkeys;
3306
3307 /*
3308 * Now that we've found the best set of aggregates we can set the
3309 * presorted flag to indicate to the executor that it needn't bother
3310 * performing a sort for these Aggrefs. We're able to do this now as
3311 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3312 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3313 * of ordered aggregates.
3314 */
3315 i = -1;
3316 while ((i = bms_next_member(bestaggs, i)) >= 0)
3317 {
3318 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3319
3320 foreach(lc, agginfo->aggrefs)
3321 {
3322 Aggref *aggref = lfirst_node(Aggref, lc);
3323
3324 aggref->aggpresorted = true;
3325 }
3326 }
3327}
3328
3329/*
3330 * Compute query_pathkeys and other pathkeys during plan generation
3331 */
3332static void
3334{
3335 Query *parse = root->parse;
3336 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3337 List *tlist = root->processed_tlist;
3338 List *activeWindows = qp_extra->activeWindows;
3339
3340 /*
3341 * Calculate pathkeys that represent grouping/ordering and/or ordered
3342 * aggregate requirements.
3343 */
3344 if (qp_extra->gset_data)
3345 {
3346 /*
3347 * With grouping sets, just use the first RollupData's groupClause. We
3348 * don't make any effort to optimize grouping clauses when there are
3349 * grouping sets, nor can we combine aggregate ordering keys with
3350 * grouping.
3351 */
3352 List *rollups = qp_extra->gset_data->rollups;
3353 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3354
3355 if (grouping_is_sortable(groupClause))
3356 {
3357 bool sortable;
3358
3359 /*
3360 * The groupClause is logically below the grouping step. So if
3361 * there is an RTE entry for the grouping step, we need to remove
3362 * its RT index from the sort expressions before we make PathKeys
3363 * for them.
3364 */
3365 root->group_pathkeys =
3367 &groupClause,
3368 tlist,
3369 false,
3370 parse->hasGroupRTE,
3371 &sortable,
3372 false);
3373 Assert(sortable);
3374 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3375 }
3376 else
3377 {
3378 root->group_pathkeys = NIL;
3379 root->num_groupby_pathkeys = 0;
3380 }
3381 }
3382 else if (parse->groupClause || root->numOrderedAggs > 0)
3383 {
3384 /*
3385 * With a plain GROUP BY list, we can remove any grouping items that
3386 * are proven redundant by EquivalenceClass processing. For example,
3387 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3388 * especially common cases, but they're nearly free to detect. Note
3389 * that we remove redundant items from processed_groupClause but not
3390 * the original parse->groupClause.
3391 */
3392 bool sortable;
3393
3394 /*
3395 * Convert group clauses into pathkeys. Set the ec_sortref field of
3396 * EquivalenceClass'es if it's not set yet.
3397 */
3398 root->group_pathkeys =
3400 &root->processed_groupClause,
3401 tlist,
3402 true,
3403 false,
3404 &sortable,
3405 true);
3406 if (!sortable)
3407 {
3408 /* Can't sort; no point in considering aggregate ordering either */
3409 root->group_pathkeys = NIL;
3410 root->num_groupby_pathkeys = 0;
3411 }
3412 else
3413 {
3414 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3415 /* If we have ordered aggs, consider adding onto group_pathkeys */
3416 if (root->numOrderedAggs > 0)
3418 }
3419 }
3420 else
3421 {
3422 root->group_pathkeys = NIL;
3423 root->num_groupby_pathkeys = 0;
3424 }
3425
3426 /* We consider only the first (bottom) window in pathkeys logic */
3427 if (activeWindows != NIL)
3428 {
3429 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3430
3431 root->window_pathkeys = make_pathkeys_for_window(root,
3432 wc,
3433 tlist);
3434 }
3435 else
3436 root->window_pathkeys = NIL;
3437
3438 /*
3439 * As with GROUP BY, we can discard any DISTINCT items that are proven
3440 * redundant by EquivalenceClass processing. The non-redundant list is
3441 * kept in root->processed_distinctClause, leaving the original
3442 * parse->distinctClause alone.
3443 */
3444 if (parse->distinctClause)
3445 {
3446 bool sortable;
3447
3448 /* Make a copy since pathkey processing can modify the list */
3449 root->processed_distinctClause = list_copy(parse->distinctClause);
3450 root->distinct_pathkeys =
3452 &root->processed_distinctClause,
3453 tlist,
3454 true,
3455 false,
3456 &sortable,
3457 false);
3458 if (!sortable)
3459 root->distinct_pathkeys = NIL;
3460 }
3461 else
3462 root->distinct_pathkeys = NIL;
3463
3464 root->sort_pathkeys =
3466 parse->sortClause,
3467 tlist);
3468
3469 /* setting setop_pathkeys might be useful to the union planner */
3470 if (qp_extra->setop != NULL)
3471 {
3472 List *groupClauses;
3473 bool sortable;
3474
3475 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3476
3477 root->setop_pathkeys =
3479 &groupClauses,
3480 tlist,
3481 false,
3482 false,
3483 &sortable,
3484 false);
3485 if (!sortable)
3486 root->setop_pathkeys = NIL;
3487 }
3488 else
3489 root->setop_pathkeys = NIL;
3490
3491 /*
3492 * Figure out whether we want a sorted result from query_planner.
3493 *
3494 * If we have a sortable GROUP BY clause, then we want a result sorted
3495 * properly for grouping. Otherwise, if we have window functions to
3496 * evaluate, we try to sort for the first window. Otherwise, if there's a
3497 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3498 * we try to produce output that's sufficiently well sorted for the
3499 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3500 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3501 * for a set operation which can benefit from presorted results and have a
3502 * sortable targetlist, we want to sort by the target list.
3503 *
3504 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3505 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3506 * that might just leave us failing to exploit an available sort order at
3507 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3508 * much easier, since we know that the parser ensured that one is a
3509 * superset of the other.
3510 */
3511 if (root->group_pathkeys)
3512 root->query_pathkeys = root->group_pathkeys;
3513 else if (root->window_pathkeys)
3514 root->query_pathkeys = root->window_pathkeys;
3515 else if (list_length(root->distinct_pathkeys) >
3516 list_length(root->sort_pathkeys))
3517 root->query_pathkeys = root->distinct_pathkeys;
3518 else if (root->sort_pathkeys)
3519 root->query_pathkeys = root->sort_pathkeys;
3520 else if (root->setop_pathkeys != NIL)
3521 root->query_pathkeys = root->setop_pathkeys;
3522 else
3523 root->query_pathkeys = NIL;
3524}
3525
3526/*
3527 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3528 *
3529 * path_rows: number of output rows from scan/join step
3530 * gd: grouping sets data including list of grouping sets and their clauses
3531 * target_list: target list containing group clause references
3532 *
3533 * If doing grouping sets, we also annotate the gsets data with the estimates
3534 * for each set and each individual rollup list, with a view to later
3535 * determining whether some combination of them could be hashed instead.
3536 */
3537static double
3539 double path_rows,
3541 List *target_list)
3542{
3543 Query *parse = root->parse;
3544 double dNumGroups;
3545
3546 if (parse->groupClause)
3547 {
3548 List *groupExprs;
3549
3550 if (parse->groupingSets)
3551 {
3552 /* Add up the estimates for each grouping set */
3553 ListCell *lc;
3554
3555 Assert(gd); /* keep Coverity happy */
3556
3557 dNumGroups = 0;
3558
3559 foreach(lc, gd->rollups)
3560 {
3561 RollupData *rollup = lfirst_node(RollupData, lc);
3562 ListCell *lc2;
3563 ListCell *lc3;
3564
3565 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3566 target_list);
3567
3568 rollup->numGroups = 0.0;
3569
3570 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3571 {
3572 List *gset = (List *) lfirst(lc2);
3574 double numGroups = estimate_num_groups(root,
3575 groupExprs,
3576 path_rows,
3577 &gset,
3578 NULL);
3579
3580 gs->numGroups = numGroups;
3581 rollup->numGroups += numGroups;
3582 }
3583
3584 dNumGroups += rollup->numGroups;
3585 }
3586
3587 if (gd->hash_sets_idx)
3588 {
3589 ListCell *lc2;
3590
3591 gd->dNumHashGroups = 0;
3592
3593 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3594 target_list);
3595
3596 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3597 {
3598 List *gset = (List *) lfirst(lc);
3600 double numGroups = estimate_num_groups(root,
3601 groupExprs,
3602 path_rows,
3603 &gset,
3604 NULL);
3605
3606 gs->numGroups = numGroups;
3607 gd->dNumHashGroups += numGroups;
3608 }
3609
3610 dNumGroups += gd->dNumHashGroups;
3611 }
3612 }
3613 else
3614 {
3615 /* Plain GROUP BY -- estimate based on optimized groupClause */
3616 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3617 target_list);
3618
3619 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3620 NULL, NULL);
3621 }
3622 }
3623 else if (parse->groupingSets)
3624 {
3625 /* Empty grouping sets ... one result row for each one */
3626 dNumGroups = list_length(parse->groupingSets);
3627 }
3628 else if (parse->hasAggs || root->hasHavingQual)
3629 {
3630 /* Plain aggregation, one result row */
3631 dNumGroups = 1;
3632 }
3633 else
3634 {
3635 /* Not grouping */
3636 dNumGroups = 1;
3637 }
3638
3639 return dNumGroups;
3640}
3641
3642/*
3643 * create_grouping_paths
3644 *
3645 * Build a new upperrel containing Paths for grouping and/or aggregation.
3646 * Along the way, we also build an upperrel for Paths which are partially
3647 * grouped and/or aggregated. A partially grouped and/or aggregated path
3648 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3649 * the only partially grouped paths we build are also partial paths; that
3650 * is, they need a Gather and then a FinalizeAggregate.
3651 *
3652 * input_rel: contains the source-data Paths
3653 * target: the pathtarget for the result Paths to compute
3654 * gd: grouping sets data including list of grouping sets and their clauses
3655 *
3656 * Note: all Paths in input_rel are expected to return the target computed
3657 * by make_group_input_target.
3658 */
3659static RelOptInfo *
3661 RelOptInfo *input_rel,
3662 PathTarget *target,
3663 bool target_parallel_safe,
3665{
3666 Query *parse = root->parse;
3667 RelOptInfo *grouped_rel;
3668 RelOptInfo *partially_grouped_rel;
3669 AggClauseCosts agg_costs;
3670
3671 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3673
3674 /*
3675 * Create grouping relation to hold fully aggregated grouping and/or
3676 * aggregation paths.
3677 */
3678 grouped_rel = make_grouping_rel(root, input_rel, target,
3679 target_parallel_safe, parse->havingQual);
3680
3681 /*
3682 * Create either paths for a degenerate grouping or paths for ordinary
3683 * grouping, as appropriate.
3684 */
3686 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3687 else
3688 {
3689 int flags = 0;
3690 GroupPathExtraData extra;
3691
3692 /*
3693 * Determine whether it's possible to perform sort-based
3694 * implementations of grouping. (Note that if processed_groupClause
3695 * is empty, grouping_is_sortable() is trivially true, and all the
3696 * pathkeys_contained_in() tests will succeed too, so that we'll
3697 * consider every surviving input path.)
3698 *
3699 * If we have grouping sets, we might be able to sort some but not all
3700 * of them; in this case, we need can_sort to be true as long as we
3701 * must consider any sorted-input plan.
3702 */
3703 if ((gd && gd->rollups != NIL)
3704 || grouping_is_sortable(root->processed_groupClause))
3705 flags |= GROUPING_CAN_USE_SORT;
3706
3707 /*
3708 * Determine whether we should consider hash-based implementations of
3709 * grouping.
3710 *
3711 * Hashed aggregation only applies if we're grouping. If we have
3712 * grouping sets, some groups might be hashable but others not; in
3713 * this case we set can_hash true as long as there is nothing globally
3714 * preventing us from hashing (and we should therefore consider plans
3715 * with hashes).
3716 *
3717 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3718 * BY aggregates. (Doing so would imply storing *all* the input
3719 * values in the hash table, and/or running many sorts in parallel,
3720 * either of which seems like a certain loser.) We similarly don't
3721 * support ordered-set aggregates in hashed aggregation, but that case
3722 * is also included in the numOrderedAggs count.
3723 *
3724 * Note: grouping_is_hashable() is much more expensive to check than
3725 * the other gating conditions, so we want to do it last.
3726 */
3727 if ((parse->groupClause != NIL &&
3728 root->numOrderedAggs == 0 &&
3729 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3730 flags |= GROUPING_CAN_USE_HASH;
3731
3732 /*
3733 * Determine whether partial aggregation is possible.
3734 */
3735 if (can_partial_agg(root))
3736 flags |= GROUPING_CAN_PARTIAL_AGG;
3737
3738 extra.flags = flags;
3739 extra.target_parallel_safe = target_parallel_safe;
3740 extra.havingQual = parse->havingQual;
3741 extra.targetList = parse->targetList;
3742 extra.partial_costs_set = false;
3743
3744 /*
3745 * Determine whether partitionwise aggregation is in theory possible.
3746 * It can be disabled by the user, and for now, we don't try to
3747 * support grouping sets. create_ordinary_grouping_paths() will check
3748 * additional conditions, such as whether input_rel is partitioned.
3749 */
3750 if (enable_partitionwise_aggregate && !parse->groupingSets)
3752 else
3754
3755 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3756 &agg_costs, gd, &extra,
3757 &partially_grouped_rel);
3758 }
3759
3760 set_cheapest(grouped_rel);
3761 return grouped_rel;
3762}
3763
3764/*
3765 * make_grouping_rel
3766 *
3767 * Create a new grouping rel and set basic properties.
3768 *
3769 * input_rel represents the underlying scan/join relation.
3770 * target is the output expected from the grouping relation.
3771 */
3772static RelOptInfo *
3774 PathTarget *target, bool target_parallel_safe,
3775 Node *havingQual)
3776{
3777 RelOptInfo *grouped_rel;
3778
3779 if (IS_OTHER_REL(input_rel))
3780 {
3782 input_rel->relids);
3783 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3784 }
3785 else
3786 {
3787 /*
3788 * By tradition, the relids set for the main grouping relation is
3789 * NULL. (This could be changed, but might require adjustments
3790 * elsewhere.)
3791 */
3792 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3793 }
3794
3795 /* Set target. */
3796 grouped_rel->reltarget = target;
3797
3798 /*
3799 * If the input relation is not parallel-safe, then the grouped relation
3800 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3801 * target list and HAVING quals are parallel-safe.
3802 */
3803 if (input_rel->consider_parallel && target_parallel_safe &&
3804 is_parallel_safe(root, (Node *) havingQual))
3805 grouped_rel->consider_parallel = true;
3806
3807 /*
3808 * If the input rel belongs to a single FDW, so does the grouped rel.
3809 */
3810 grouped_rel->serverid = input_rel->serverid;
3811 grouped_rel->userid = input_rel->userid;
3812 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3813 grouped_rel->fdwroutine = input_rel->fdwroutine;
3814
3815 return grouped_rel;
3816}
3817
3818/*
3819 * is_degenerate_grouping
3820 *
3821 * A degenerate grouping is one in which the query has a HAVING qual and/or
3822 * grouping sets, but no aggregates and no GROUP BY (which implies that the
3823 * grouping sets are all empty).
3824 */
3825static bool
3827{
3828 Query *parse = root->parse;
3829
3830 return (root->hasHavingQual || parse->groupingSets) &&
3831 !parse->hasAggs && parse->groupClause == NIL;
3832}
3833
3834/*
3835 * create_degenerate_grouping_paths
3836 *
3837 * When the grouping is degenerate (see is_degenerate_grouping), we are
3838 * supposed to emit either zero or one row for each grouping set depending on
3839 * whether HAVING succeeds. Furthermore, there cannot be any variables in
3840 * either HAVING or the targetlist, so we actually do not need the FROM table
3841 * at all! We can just throw away the plan-so-far and generate a Result node.
3842 * This is a sufficiently unusual corner case that it's not worth contorting
3843 * the structure of this module to avoid having to generate the earlier paths
3844 * in the first place.
3845 */
3846static void
3848 RelOptInfo *grouped_rel)
3849{
3850 Query *parse = root->parse;
3851 int nrows;
3852 Path *path;
3853
3854 nrows = list_length(parse->groupingSets);
3855 if (nrows > 1)
3856 {
3857 /*
3858 * Doesn't seem worthwhile writing code to cons up a generate_series
3859 * or a values scan to emit multiple rows. Instead just make N clones
3860 * and append them. (With a volatile HAVING clause, this means you
3861 * might get between 0 and N output rows. Offhand I think that's
3862 * desired.)
3863 */
3864 List *paths = NIL;
3865
3866 while (--nrows >= 0)
3867 {
3868 path = (Path *)
3869 create_group_result_path(root, grouped_rel,
3870 grouped_rel->reltarget,
3871 (List *) parse->havingQual);
3872 paths = lappend(paths, path);
3873 }
3874 path = (Path *)
3876 grouped_rel,
3877 paths,
3878 NIL,
3879 NIL,
3880 NULL,
3881 0,
3882 false,
3883 -1);
3884 }
3885 else
3886 {
3887 /* No grouping sets, or just one, so one output row */
3888 path = (Path *)
3889 create_group_result_path(root, grouped_rel,
3890 grouped_rel->reltarget,
3891 (List *) parse->havingQual);
3892 }
3893
3894 add_path(grouped_rel, path);
3895}
3896
3897/*
3898 * create_ordinary_grouping_paths
3899 *
3900 * Create grouping paths for the ordinary (that is, non-degenerate) case.
3901 *
3902 * We need to consider sorted and hashed aggregation in the same function,
3903 * because otherwise (1) it would be harder to throw an appropriate error
3904 * message if neither way works, and (2) we should not allow hashtable size
3905 * considerations to dissuade us from using hashing if sorting is not possible.
3906 *
3907 * *partially_grouped_rel_p will be set to the partially grouped rel which this
3908 * function creates, or to NULL if it doesn't create one.
3909 */
3910static void
3912 RelOptInfo *grouped_rel,
3913 const AggClauseCosts *agg_costs,
3915 GroupPathExtraData *extra,
3916 RelOptInfo **partially_grouped_rel_p)
3917{
3918 Path *cheapest_path = input_rel->cheapest_total_path;
3919 RelOptInfo *partially_grouped_rel = NULL;
3920 double dNumGroups;
3922
3923 /*
3924 * If this is the topmost grouping relation or if the parent relation is
3925 * doing some form of partitionwise aggregation, then we may be able to do
3926 * it at this level also. However, if the input relation is not
3927 * partitioned, partitionwise aggregate is impossible.
3928 */
3929 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3930 IS_PARTITIONED_REL(input_rel))
3931 {
3932 /*
3933 * If this is the topmost relation or if the parent relation is doing
3934 * full partitionwise aggregation, then we can do full partitionwise
3935 * aggregation provided that the GROUP BY clause contains all of the
3936 * partitioning columns at this level and the collation used by GROUP
3937 * BY matches the partitioning collation. Otherwise, we can do at
3938 * most partial partitionwise aggregation. But if partial aggregation
3939 * is not supported in general then we can't use it for partitionwise
3940 * aggregation either.
3941 *
3942 * Check parse->groupClause not processed_groupClause, because it's
3943 * okay if some of the partitioning columns were proved redundant.
3944 */
3945 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3946 group_by_has_partkey(input_rel, extra->targetList,
3947 root->parse->groupClause))
3949 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3951 else
3953 }
3954
3955 /*
3956 * Before generating paths for grouped_rel, we first generate any possible
3957 * partially grouped paths; that way, later code can easily consider both
3958 * parallel and non-parallel approaches to grouping.
3959 */
3960 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3961 {
3962 bool force_rel_creation;
3963
3964 /*
3965 * If we're doing partitionwise aggregation at this level, force
3966 * creation of a partially_grouped_rel so we can add partitionwise
3967 * paths to it.
3968 */
3969 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3970
3971 partially_grouped_rel =
3973 grouped_rel,
3974 input_rel,
3975 gd,
3976 extra,
3977 force_rel_creation);
3978 }
3979
3980 /* Set out parameter. */
3981 *partially_grouped_rel_p = partially_grouped_rel;
3982
3983 /* Apply partitionwise aggregation technique, if possible. */
3984 if (patype != PARTITIONWISE_AGGREGATE_NONE)
3985 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
3986 partially_grouped_rel, agg_costs,
3987 gd, patype, extra);
3988
3989 /* If we are doing partial aggregation only, return. */
3991 {
3992 Assert(partially_grouped_rel);
3993
3994 if (partially_grouped_rel->pathlist)
3995 set_cheapest(partially_grouped_rel);
3996
3997 return;
3998 }
3999
4000 /* Gather any partially grouped partial paths. */
4001 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4002 {
4003 gather_grouping_paths(root, partially_grouped_rel);
4004 set_cheapest(partially_grouped_rel);
4005 }
4006
4007 /*
4008 * Estimate number of groups.
4009 */
4010 dNumGroups = get_number_of_groups(root,
4011 cheapest_path->rows,
4012 gd,
4013 extra->targetList);
4014
4015 /* Build final grouping paths */
4016 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4017 partially_grouped_rel, agg_costs, gd,
4018 dNumGroups, extra);
4019
4020 /* Give a helpful error if we failed to find any implementation */
4021 if (grouped_rel->pathlist == NIL)
4022 ereport(ERROR,
4023 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4024 errmsg("could not implement GROUP BY"),
4025 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4026
4027 /*
4028 * If there is an FDW that's responsible for all baserels of the query,
4029 * let it consider adding ForeignPaths.
4030 */
4031 if (grouped_rel->fdwroutine &&
4032 grouped_rel->fdwroutine->GetForeignUpperPaths)
4033 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4034 input_rel, grouped_rel,
4035 extra);
4036
4037 /* Let extensions possibly add some more paths */
4039 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4040 input_rel, grouped_rel,
4041 extra);
4042}
4043
4044/*
4045 * For a given input path, consider the possible ways of doing grouping sets on
4046 * it, by combinations of hashing and sorting. This can be called multiple
4047 * times, so it's important that it not scribble on input. No result is
4048 * returned, but any generated paths are added to grouped_rel.
4049 */
4050static void
4052 RelOptInfo *grouped_rel,
4053 Path *path,
4054 bool is_sorted,
4055 bool can_hash,
4057 const AggClauseCosts *agg_costs,
4058 double dNumGroups)
4059{
4060 Query *parse = root->parse;
4061 Size hash_mem_limit = get_hash_memory_limit();
4062
4063 /*
4064 * If we're not being offered sorted input, then only consider plans that
4065 * can be done entirely by hashing.
4066 *
4067 * We can hash everything if it looks like it'll fit in hash_mem. But if
4068 * the input is actually sorted despite not being advertised as such, we
4069 * prefer to make use of that in order to use less memory.
4070 *
4071 * If none of the grouping sets are sortable, then ignore the hash_mem
4072 * limit and generate a path anyway, since otherwise we'll just fail.
4073 */
4074 if (!is_sorted)
4075 {
4076 List *new_rollups = NIL;
4077 RollupData *unhashed_rollup = NULL;
4078 List *sets_data;
4079 List *empty_sets_data = NIL;
4080 List *empty_sets = NIL;
4081 ListCell *lc;
4082 ListCell *l_start = list_head(gd->rollups);
4083 AggStrategy strat = AGG_HASHED;
4084 double hashsize;
4085 double exclude_groups = 0.0;
4086
4087 Assert(can_hash);
4088
4089 /*
4090 * If the input is coincidentally sorted usefully (which can happen
4091 * even if is_sorted is false, since that only means that our caller
4092 * has set up the sorting for us), then save some hashtable space by
4093 * making use of that. But we need to watch out for degenerate cases:
4094 *
4095 * 1) If there are any empty grouping sets, then group_pathkeys might
4096 * be NIL if all non-empty grouping sets are unsortable. In this case,
4097 * there will be a rollup containing only empty groups, and the
4098 * pathkeys_contained_in test is vacuously true; this is ok.
4099 *
4100 * XXX: the above relies on the fact that group_pathkeys is generated
4101 * from the first rollup. If we add the ability to consider multiple
4102 * sort orders for grouping input, this assumption might fail.
4103 *
4104 * 2) If there are no empty sets and only unsortable sets, then the
4105 * rollups list will be empty (and thus l_start == NULL), and
4106 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4107 * pathkeys_contained_in test doesn't cause us to crash.
4108 */
4109 if (l_start != NULL &&
4110 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4111 {
4112 unhashed_rollup = lfirst_node(RollupData, l_start);
4113 exclude_groups = unhashed_rollup->numGroups;
4114 l_start = lnext(gd->rollups, l_start);
4115 }
4116
4118 path,
4119 agg_costs,
4120 dNumGroups - exclude_groups);
4121
4122 /*
4123 * gd->rollups is empty if we have only unsortable columns to work
4124 * with. Override hash_mem in that case; otherwise, we'll rely on the
4125 * sorted-input case to generate usable mixed paths.
4126 */
4127 if (hashsize > hash_mem_limit && gd->rollups)
4128 return; /* nope, won't fit */
4129
4130 /*
4131 * We need to burst the existing rollups list into individual grouping
4132 * sets and recompute a groupClause for each set.
4133 */
4134 sets_data = list_copy(gd->unsortable_sets);
4135
4136 for_each_cell(lc, gd->rollups, l_start)
4137 {
4138 RollupData *rollup = lfirst_node(RollupData, lc);
4139
4140 /*
4141 * If we find an unhashable rollup that's not been skipped by the
4142 * "actually sorted" check above, we can't cope; we'd need sorted
4143 * input (with a different sort order) but we can't get that here.
4144 * So bail out; we'll get a valid path from the is_sorted case
4145 * instead.
4146 *
4147 * The mere presence of empty grouping sets doesn't make a rollup
4148 * unhashable (see preprocess_grouping_sets), we handle those
4149 * specially below.
4150 */
4151 if (!rollup->hashable)
4152 return;
4153
4154 sets_data = list_concat(sets_data, rollup->gsets_data);
4155 }
4156 foreach(lc, sets_data)
4157 {
4159 List *gset = gs->set;
4160 RollupData *rollup;
4161
4162 if (gset == NIL)
4163 {
4164 /* Empty grouping sets can't be hashed. */
4165 empty_sets_data = lappend(empty_sets_data, gs);
4166 empty_sets = lappend(empty_sets, NIL);
4167 }
4168 else
4169 {
4170 rollup = makeNode(RollupData);
4171
4172 rollup->groupClause = preprocess_groupclause(root, gset);
4173 rollup->gsets_data = list_make1(gs);
4174 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4175 rollup->gsets_data,
4177 rollup->numGroups = gs->numGroups;
4178 rollup->hashable = true;
4179 rollup->is_hashed = true;
4180 new_rollups = lappend(new_rollups, rollup);
4181 }
4182 }
4183
4184 /*
4185 * If we didn't find anything nonempty to hash, then bail. We'll
4186 * generate a path from the is_sorted case.
4187 */
4188 if (new_rollups == NIL)
4189 return;
4190
4191 /*
4192 * If there were empty grouping sets they should have been in the
4193 * first rollup.
4194 */
4195 Assert(!unhashed_rollup || !empty_sets);
4196
4197 if (unhashed_rollup)
4198 {
4199 new_rollups = lappend(new_rollups, unhashed_rollup);
4200 strat = AGG_MIXED;
4201 }
4202 else if (empty_sets)
4203 {
4204 RollupData *rollup = makeNode(RollupData);
4205
4206 rollup->groupClause = NIL;
4207 rollup->gsets_data = empty_sets_data;
4208 rollup->gsets = empty_sets;
4209 rollup->numGroups = list_length(empty_sets);
4210 rollup->hashable = false;
4211 rollup->is_hashed = false;
4212 new_rollups = lappend(new_rollups, rollup);
4213 strat = AGG_MIXED;
4214 }
4215
4216 add_path(grouped_rel, (Path *)
4218 grouped_rel,
4219 path,
4220 (List *) parse->havingQual,
4221 strat,
4222 new_rollups,
4223 agg_costs));
4224 return;
4225 }
4226
4227 /*
4228 * If we have sorted input but nothing we can do with it, bail.
4229 */
4230 if (gd->rollups == NIL)
4231 return;
4232
4233 /*
4234 * Given sorted input, we try and make two paths: one sorted and one mixed
4235 * sort/hash. (We need to try both because hashagg might be disabled, or
4236 * some columns might not be sortable.)
4237 *
4238 * can_hash is passed in as false if some obstacle elsewhere (such as
4239 * ordered aggs) means that we shouldn't consider hashing at all.
4240 */
4241 if (can_hash && gd->any_hashable)
4242 {
4243 List *rollups = NIL;
4244 List *hash_sets = list_copy(gd->unsortable_sets);
4245 double availspace = hash_mem_limit;
4246 ListCell *lc;
4247
4248 /*
4249 * Account first for space needed for groups we can't sort at all.
4250 */
4251 availspace -= estimate_hashagg_tablesize(root,
4252 path,
4253 agg_costs,
4254 gd->dNumHashGroups);
4255
4256 if (availspace > 0 && list_length(gd->rollups) > 1)
4257 {
4258 double scale;
4259 int num_rollups = list_length(gd->rollups);
4260 int k_capacity;
4261 int *k_weights = palloc(num_rollups * sizeof(int));
4262 Bitmapset *hash_items = NULL;
4263 int i;
4264
4265 /*
4266 * We treat this as a knapsack problem: the knapsack capacity
4267 * represents hash_mem, the item weights are the estimated memory
4268 * usage of the hashtables needed to implement a single rollup,
4269 * and we really ought to use the cost saving as the item value;
4270 * however, currently the costs assigned to sort nodes don't
4271 * reflect the comparison costs well, and so we treat all items as
4272 * of equal value (each rollup we hash instead saves us one sort).
4273 *
4274 * To use the discrete knapsack, we need to scale the values to a
4275 * reasonably small bounded range. We choose to allow a 5% error
4276 * margin; we have no more than 4096 rollups in the worst possible
4277 * case, which with a 5% error margin will require a bit over 42MB
4278 * of workspace. (Anyone wanting to plan queries that complex had
4279 * better have the memory for it. In more reasonable cases, with
4280 * no more than a couple of dozen rollups, the memory usage will
4281 * be negligible.)
4282 *
4283 * k_capacity is naturally bounded, but we clamp the values for
4284 * scale and weight (below) to avoid overflows or underflows (or
4285 * uselessly trying to use a scale factor less than 1 byte).
4286 */
4287 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4288 k_capacity = (int) floor(availspace / scale);
4289
4290 /*
4291 * We leave the first rollup out of consideration since it's the
4292 * one that matches the input sort order. We assign indexes "i"
4293 * to only those entries considered for hashing; the second loop,
4294 * below, must use the same condition.
4295 */
4296 i = 0;
4297 for_each_from(lc, gd->rollups, 1)
4298 {
4299 RollupData *rollup = lfirst_node(RollupData, lc);
4300
4301 if (rollup->hashable)
4302 {
4303 double sz = estimate_hashagg_tablesize(root,
4304 path,
4305 agg_costs,
4306 rollup->numGroups);
4307
4308 /*
4309 * If sz is enormous, but hash_mem (and hence scale) is
4310 * small, avoid integer overflow here.
4311 */
4312 k_weights[i] = (int) Min(floor(sz / scale),
4313 k_capacity + 1.0);
4314 ++i;
4315 }
4316 }
4317
4318 /*
4319 * Apply knapsack algorithm; compute the set of items which
4320 * maximizes the value stored (in this case the number of sorts
4321 * saved) while keeping the total size (approximately) within
4322 * capacity.
4323 */
4324 if (i > 0)
4325 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4326
4327 if (!bms_is_empty(hash_items))
4328 {
4329 rollups = list_make1(linitial(gd->rollups));
4330
4331 i = 0;
4332 for_each_from(lc, gd->rollups, 1)
4333 {
4334 RollupData *rollup = lfirst_node(RollupData, lc);
4335
4336 if (rollup->hashable)
4337 {
4338 if (bms_is_member(i, hash_items))
4339 hash_sets = list_concat(hash_sets,
4340 rollup->gsets_data);
4341 else
4342 rollups = lappend(rollups, rollup);
4343 ++i;
4344 }
4345 else
4346 rollups = lappend(rollups, rollup);
4347 }
4348 }
4349 }
4350
4351 if (!rollups && hash_sets)
4352 rollups = list_copy(gd->rollups);
4353
4354 foreach(lc, hash_sets)
4355 {
4357 RollupData *rollup = makeNode(RollupData);
4358
4359 Assert(gs->set != NIL);
4360
4362 rollup->gsets_data = list_make1(gs);
4363 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4364 rollup->gsets_data,
4366 rollup->numGroups = gs->numGroups;
4367 rollup->hashable = true;
4368 rollup->is_hashed = true;
4369 rollups = lcons(rollup, rollups);
4370 }
4371
4372 if (rollups)
4373 {
4374 add_path(grouped_rel, (Path *)
4376 grouped_rel,
4377 path,
4378 (List *) parse->havingQual,
4379 AGG_MIXED,
4380 rollups,
4381 agg_costs));
4382 }
4383 }
4384
4385 /*
4386 * Now try the simple sorted case.
4387 */
4388 if (!gd->unsortable_sets)
4389 add_path(grouped_rel, (Path *)
4391 grouped_rel,
4392 path,
4393 (List *) parse->havingQual,
4394 AGG_SORTED,
4395 gd->rollups,
4396 agg_costs));
4397}
4398
4399/*
4400 * create_window_paths
4401 *
4402 * Build a new upperrel containing Paths for window-function evaluation.
4403 *
4404 * input_rel: contains the source-data Paths
4405 * input_target: result of make_window_input_target
4406 * output_target: what the topmost WindowAggPath should return
4407 * wflists: result of find_window_functions
4408 * activeWindows: result of select_active_windows
4409 *
4410 * Note: all Paths in input_rel are expected to return input_target.
4411 */
4412static RelOptInfo *
4414 RelOptInfo *input_rel,
4415 PathTarget *input_target,
4416 PathTarget *output_target,
4417 bool output_target_parallel_safe,
4418 WindowFuncLists *wflists,
4419 List *activeWindows)
4420{
4421 RelOptInfo *window_rel;
4422 ListCell *lc;
4423
4424 /* For now, do all work in the (WINDOW, NULL) upperrel */
4425 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4426
4427 /*
4428 * If the input relation is not parallel-safe, then the window relation
4429 * can't be parallel-safe, either. Otherwise, we need to examine the
4430 * target list and active windows for non-parallel-safe constructs.
4431 */
4432 if (input_rel->consider_parallel && output_target_parallel_safe &&
4433 is_parallel_safe(root, (Node *) activeWindows))
4434 window_rel->consider_parallel = true;
4435
4436 /*
4437 * If the input rel belongs to a single FDW, so does the window rel.
4438 */
4439 window_rel->serverid = input_rel->serverid;
4440 window_rel->userid = input_rel->userid;
4441 window_rel->useridiscurrent = input_rel->useridiscurrent;
4442 window_rel->fdwroutine = input_rel->fdwroutine;
4443
4444 /*
4445 * Consider computing window functions starting from the existing
4446 * cheapest-total path (which will likely require a sort) as well as any
4447 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4448 */
4449 foreach(lc, input_rel->pathlist)
4450 {
4451 Path *path = (Path *) lfirst(lc);
4452 int presorted_keys;
4453
4454 if (path == input_rel->cheapest_total_path ||
4455 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4456 &presorted_keys) ||
4457 presorted_keys > 0)
4459 window_rel,
4460 path,
4461 input_target,
4462 output_target,
4463 wflists,
4464 activeWindows);
4465 }
4466
4467 /*
4468 * If there is an FDW that's responsible for all baserels of the query,
4469 * let it consider adding ForeignPaths.
4470 */
4471 if (window_rel->fdwroutine &&
4472 window_rel->fdwroutine->GetForeignUpperPaths)
4473 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4474 input_rel, window_rel,
4475 NULL);
4476
4477 /* Let extensions possibly add some more paths */
4479 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4480 input_rel, window_rel, NULL);
4481
4482 /* Now choose the best path(s) */
4483 set_cheapest(window_rel);
4484
4485 return window_rel;
4486}
4487
4488/*
4489 * Stack window-function implementation steps atop the given Path, and
4490 * add the result to window_rel.
4491 *
4492 * window_rel: upperrel to contain result
4493 * path: input Path to use (must return input_target)
4494 * input_target: result of make_window_input_target
4495 * output_target: what the topmost WindowAggPath should return
4496 * wflists: result of find_window_functions
4497 * activeWindows: result of select_active_windows
4498 */
4499static void
4501 RelOptInfo *window_rel,
4502 Path *path,
4503 PathTarget *input_target,
4504 PathTarget *output_target,
4505 WindowFuncLists *wflists,
4506 List *activeWindows)
4507{
4508 PathTarget *window_target;
4509 ListCell *l;
4510 List *topqual = NIL;
4511
4512 /*
4513 * Since each window clause could require a different sort order, we stack
4514 * up a WindowAgg node for each clause, with sort steps between them as
4515 * needed. (We assume that select_active_windows chose a good order for
4516 * executing the clauses in.)
4517 *
4518 * input_target should contain all Vars and Aggs needed for the result.
4519 * (In some cases we wouldn't need to propagate all of these all the way
4520 * to the top, since they might only be needed as inputs to WindowFuncs.
4521 * It's probably not worth trying to optimize that though.) It must also
4522 * contain all window partitioning and sorting expressions, to ensure
4523 * they're computed only once at the bottom of the stack (that's critical
4524 * for volatile functions). As we climb up the stack, we'll add outputs
4525 * for the WindowFuncs computed at each level.
4526 */
4527 window_target = input_target;
4528
4529 foreach(l, activeWindows)
4530 {
4532 List *window_pathkeys;
4533 List *runcondition = NIL;
4534 int presorted_keys;
4535 bool is_sorted;
4536 bool topwindow;
4537 ListCell *lc2;
4538
4539 window_pathkeys = make_pathkeys_for_window(root,
4540 wc,
4541 root->processed_tlist);
4542
4543 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4544 path->pathkeys,
4545 &presorted_keys);
4546
4547 /* Sort if necessary */
4548 if (!is_sorted)
4549 {
4550 /*
4551 * No presorted keys or incremental sort disabled, just perform a
4552 * complete sort.
4553 */
4554 if (presorted_keys == 0 || !enable_incremental_sort)
4555 path = (Path *) create_sort_path(root, window_rel,
4556 path,
4557 window_pathkeys,
4558 -1.0);
4559 else
4560 {
4561 /*
4562 * Since we have presorted keys and incremental sort is
4563 * enabled, just use incremental sort.
4564 */
4566 window_rel,
4567 path,
4568 window_pathkeys,
4569 presorted_keys,
4570 -1.0);
4571 }
4572 }
4573
4574 if (lnext(activeWindows, l))
4575 {
4576 /*
4577 * Add the current WindowFuncs to the output target for this
4578 * intermediate WindowAggPath. We must copy window_target to
4579 * avoid changing the previous path's target.
4580 *
4581 * Note: a WindowFunc adds nothing to the target's eval costs; but
4582 * we do need to account for the increase in tlist width.
4583 */
4584 int64 tuple_width = window_target->width;
4585
4586 window_target = copy_pathtarget(window_target);
4587 foreach(lc2, wflists->windowFuncs[wc->winref])
4588 {
4589 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4590
4591 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4592 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4593 }
4594 window_target->width = clamp_width_est(tuple_width);
4595 }
4596 else
4597 {
4598 /* Install the goal target in the topmost WindowAgg */
4599 window_target = output_target;
4600 }
4601
4602 /* mark the final item in the list as the top-level window */
4603 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4604
4605 /*
4606 * Collect the WindowFuncRunConditions from each WindowFunc and
4607 * convert them into OpExprs
4608 */
4609 foreach(lc2, wflists->windowFuncs[wc->winref])
4610 {
4611 ListCell *lc3;
4612 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4613
4614 foreach(lc3, wfunc->runCondition)
4615 {
4616 WindowFuncRunCondition *wfuncrc =
4618 Expr *opexpr;
4619 Expr *leftop;
4620 Expr *rightop;
4621
4622 if (wfuncrc->wfunc_left)
4623 {
4624 leftop = (Expr *) copyObject(wfunc);
4625 rightop = copyObject(wfuncrc->arg);
4626 }
4627 else
4628 {
4629 leftop = copyObject(wfuncrc->arg);
4630 rightop = (Expr *) copyObject(wfunc);
4631 }
4632
4633 opexpr = make_opclause(wfuncrc->opno,
4634 BOOLOID,
4635 false,
4636 leftop,
4637 rightop,
4638 InvalidOid,
4639 wfuncrc->inputcollid);
4640
4641 runcondition = lappend(runcondition, opexpr);
4642
4643 if (!topwindow)
4644 topqual = lappend(topqual, opexpr);
4645 }
4646 }
4647
4648 path = (Path *)
4649 create_windowagg_path(root, window_rel, path, window_target,
4650 wflists->windowFuncs[wc->winref],
4651 runcondition, wc,
4652 topwindow ? topqual : NIL, topwindow);
4653 }
4654
4655 add_path(window_rel, path);
4656}
4657
4658/*
4659 * create_distinct_paths
4660 *
4661 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4662 *
4663 * input_rel: contains the source-data Paths
4664 * target: the pathtarget for the result Paths to compute
4665 *
4666 * Note: input paths should already compute the desired pathtarget, since
4667 * Sort/Unique won't project anything.
4668 */
4669static RelOptInfo *
4671 PathTarget *target)
4672{
4673 RelOptInfo *distinct_rel;
4674
4675 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4676 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4677
4678 /*
4679 * We don't compute anything at this level, so distinct_rel will be
4680 * parallel-safe if the input rel is parallel-safe. In particular, if
4681 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4682 * output those expressions, and will not be parallel-safe unless those
4683 * expressions are parallel-safe.
4684 */
4685 distinct_rel->consider_parallel = input_rel->consider_parallel;
4686
4687 /*
4688 * If the input rel belongs to a single FDW, so does the distinct_rel.
4689 */
4690 distinct_rel->serverid = input_rel->serverid;
4691 distinct_rel->userid = input_rel->userid;
4692 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4693 distinct_rel->fdwroutine = input_rel->fdwroutine;
4694
4695 /* build distinct paths based on input_rel's pathlist */
4696 create_final_distinct_paths(root, input_rel, distinct_rel);
4697
4698 /* now build distinct paths based on input_rel's partial_pathlist */
4699 create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4700
4701 /* Give a helpful error if we failed to create any paths */
4702 if (distinct_rel->pathlist == NIL)
4703 ereport(ERROR,
4704 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4705 errmsg("could not implement DISTINCT"),
4706 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4707
4708 /*
4709 * If there is an FDW that's responsible for all baserels of the query,
4710 * let it consider adding ForeignPaths.
4711 */
4712 if (distinct_rel->fdwroutine &&
4713 distinct_rel->fdwroutine->GetForeignUpperPaths)
4714 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4716 input_rel,
4717 distinct_rel,
4718 NULL);
4719
4720 /* Let extensions possibly add some more paths */
4722 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4723 distinct_rel, NULL);
4724
4725 /* Now choose the best path(s) */
4726 set_cheapest(distinct_rel);
4727
4728 return distinct_rel;
4729}
4730
4731/*
4732 * create_partial_distinct_paths
4733 *
4734 * Process 'input_rel' partial paths and add unique/aggregate paths to the
4735 * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4736 * paths on top and add a final unique/aggregate path to remove any duplicate
4737 * produced from combining rows from parallel workers.
4738 */
4739static void
4741 RelOptInfo *final_distinct_rel,
4742 PathTarget *target)
4743{
4744 RelOptInfo *partial_distinct_rel;
4745 Query *parse;
4746 List *distinctExprs;
4747 double numDistinctRows;
4748 Path *cheapest_partial_path;
4749 ListCell *lc;
4750
4751 /* nothing to do when there are no partial paths in the input rel */
4752 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4753 return;
4754
4755 parse = root->parse;
4756
4757 /* can't do parallel DISTINCT ON */
4758 if (parse->hasDistinctOn)
4759 return;
4760
4761 partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4762 NULL);
4763 partial_distinct_rel->reltarget = target;
4764 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4765
4766 /*
4767 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4768 */
4769 partial_distinct_rel->serverid = input_rel->serverid;
4770 partial_distinct_rel->userid = input_rel->userid;
4771 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4772 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4773
4774 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4775
4776 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4777 parse->targetList);
4778
4779 /* estimate how many distinct rows we'll get from each worker */
4780 numDistinctRows = estimate_num_groups(root, distinctExprs,
4781 cheapest_partial_path->rows,
4782 NULL, NULL);
4783
4784 /*
4785 * Try sorting the cheapest path and incrementally sorting any paths with
4786 * presorted keys and put a unique paths atop of those. We'll also
4787 * attempt to reorder the required pathkeys to match the input path's
4788 * pathkeys as much as possible, in hopes of avoiding a possible need to
4789 * re-sort.
4790 */
4791 if (grouping_is_sortable(root->processed_distinctClause))
4792 {
4793 foreach(lc, input_rel->partial_pathlist)
4794 {
4795 Path *input_path = (Path *) lfirst(lc);
4796 Path *sorted_path;
4797 List *useful_pathkeys_list = NIL;
4798
4799 useful_pathkeys_list =
4801 root->distinct_pathkeys,
4802 input_path->pathkeys);
4803 Assert(list_length(useful_pathkeys_list) > 0);
4804
4805 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4806 {
4807 sorted_path = make_ordered_path(root,
4808 partial_distinct_rel,
4809 input_path,
4810 cheapest_partial_path,
4811 useful_pathkeys,
4812 -1.0);
4813
4814 if (sorted_path == NULL)
4815 continue;
4816
4817 /*
4818 * An empty distinct_pathkeys means all tuples have the same
4819 * value for the DISTINCT clause. See
4820 * create_final_distinct_paths()
4821 */
4822 if (root->distinct_pathkeys == NIL)
4823 {
4824 Node *limitCount;
4825
4826 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4827 sizeof(int64),
4828 Int64GetDatum(1), false,
4830
4831 /*
4832 * Apply a LimitPath onto the partial path to restrict the
4833 * tuples from each worker to 1.
4834 * create_final_distinct_paths will need to apply an
4835 * additional LimitPath to restrict this to a single row
4836 * after the Gather node. If the query already has a
4837 * LIMIT clause, then we could end up with three Limit
4838 * nodes in the final plan. Consolidating the top two of
4839 * these could be done, but does not seem worth troubling
4840 * over.
4841 */
4842 add_partial_path(partial_distinct_rel, (Path *)
4843 create_limit_path(root, partial_distinct_rel,
4844 sorted_path,
4845 NULL,
4846 limitCount,
4848 0, 1));
4849 }
4850 else
4851 {
4852 add_partial_path(partial_distinct_rel, (Path *)
4853 create_upper_unique_path(root, partial_distinct_rel,
4854 sorted_path,
4855 list_length(root->distinct_pathkeys),
4856 numDistinctRows));
4857 }
4858 }
4859 }
4860 }
4861
4862 /*
4863 * Now try hash aggregate paths, if enabled and hashing is possible. Since
4864 * we're not on the hook to ensure we do our best to create at least one
4865 * path here, we treat enable_hashagg as a hard off-switch rather than the
4866 * slightly softer variant in create_final_distinct_paths.
4867 */
4868 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4869 {
4870 add_partial_path(partial_distinct_rel, (Path *)
4872 partial_distinct_rel,
4873 cheapest_partial_path,
4874 cheapest_partial_path->pathtarget,
4875 AGG_HASHED,
4877 root->processed_distinctClause,
4878 NIL,
4879 NULL,
4880 numDistinctRows));
4881 }
4882
4883 /*
4884 * If there is an FDW that's responsible for all baserels of the query,
4885 * let it consider adding ForeignPaths.
4886 */
4887 if (partial_distinct_rel->fdwroutine &&
4888 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4889 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4891 input_rel,
4892 partial_distinct_rel,
4893 NULL);
4894
4895 /* Let extensions possibly add some more partial paths */
4897 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4898 input_rel, partial_distinct_rel, NULL);
4899
4900 if (partial_distinct_rel->partial_pathlist != NIL)
4901 {
4902 generate_useful_gather_paths(root, partial_distinct_rel, true);
4903 set_cheapest(partial_distinct_rel);
4904
4905 /*
4906 * Finally, create paths to distinctify the final result. This step
4907 * is needed to remove any duplicates due to combining rows from
4908 * parallel workers.
4909 */
4910 create_final_distinct_paths(root, partial_distinct_rel,
4911 final_distinct_rel);
4912 }
4913}
4914
4915/*
4916 * create_final_distinct_paths
4917 * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4918 *
4919 * input_rel: contains the source-data paths
4920 * distinct_rel: destination relation for storing created paths
4921 */
4922static RelOptInfo *
4924 RelOptInfo *distinct_rel)
4925{
4926 Query *parse = root->parse;
4927 Path *cheapest_input_path = input_rel->cheapest_total_path;
4928 double numDistinctRows;
4929 bool allow_hash;
4930
4931 /* Estimate number of distinct rows there will be */
4932 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4933 root->hasHavingQual)
4934 {
4935 /*
4936 * If there was grouping or aggregation, use the number of input rows
4937 * as the estimated number of DISTINCT rows (ie, assume the input is
4938 * already mostly unique).
4939 */
4940 numDistinctRows = cheapest_input_path->rows;
4941 }
4942 else
4943 {
4944 /*
4945 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4946 */
4947 List *distinctExprs;
4948
4949 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4950 parse->targetList);
4951 numDistinctRows = estimate_num_groups(root, distinctExprs,
4952 cheapest_input_path->rows,
4953 NULL, NULL);
4954 }
4955
4956 /*
4957 * Consider sort-based implementations of DISTINCT, if possible.
4958 */
4959 if (grouping_is_sortable(root->processed_distinctClause))
4960 {
4961 /*
4962 * Firstly, if we have any adequately-presorted paths, just stick a
4963 * Unique node on those. We also, consider doing an explicit sort of
4964 * the cheapest input path and Unique'ing that. If any paths have
4965 * presorted keys then we'll create an incremental sort atop of those
4966 * before adding a unique node on the top. We'll also attempt to
4967 * reorder the required pathkeys to match the input path's pathkeys as
4968 * much as possible, in hopes of avoiding a possible need to re-sort.
4969 *
4970 * When we have DISTINCT ON, we must sort by the more rigorous of
4971 * DISTINCT and ORDER BY, else it won't have the desired behavior.
4972 * Also, if we do have to do an explicit sort, we might as well use
4973 * the more rigorous ordering to avoid a second sort later. (Note
4974 * that the parser will have ensured that one clause is a prefix of
4975 * the other.)
4976 */
4977 List *needed_pathkeys;
4978 ListCell *lc;
4979 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
4980
4981 if (parse->hasDistinctOn &&
4982 list_length(root->distinct_pathkeys) <
4983 list_length(root->sort_pathkeys))
4984 needed_pathkeys = root->sort_pathkeys;
4985 else
4986 needed_pathkeys = root->distinct_pathkeys;
4987
4988 foreach(lc, input_rel->pathlist)
4989 {
4990 Path *input_path = (Path *) lfirst(lc);
4991 Path *sorted_path;
4992 List *useful_pathkeys_list = NIL;
4993
4994 useful_pathkeys_list =
4996 needed_pathkeys,
4997 input_path->pathkeys);
4998 Assert(list_length(useful_pathkeys_list) > 0);
4999
5000 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5001 {
5002 sorted_path = make_ordered_path(root,
5003 distinct_rel,
5004 input_path,
5005 cheapest_input_path,
5006 useful_pathkeys,
5007 limittuples);
5008
5009 if (sorted_path == NULL)
5010 continue;
5011
5012 /*
5013 * distinct_pathkeys may have become empty if all of the
5014 * pathkeys were determined to be redundant. If all of the
5015 * pathkeys are redundant then each DISTINCT target must only
5016 * allow a single value, therefore all resulting tuples must
5017 * be identical (or at least indistinguishable by an equality
5018 * check). We can uniquify these tuples simply by just taking
5019 * the first tuple. All we do here is add a path to do "LIMIT
5020 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5021 * still have a non-NIL sort_pathkeys list, so we must still
5022 * only do this with paths which are correctly sorted by
5023 * sort_pathkeys.
5024 */
5025 if (root->distinct_pathkeys == NIL)
5026 {
5027 Node *limitCount;
5028
5029 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5030 sizeof(int64),
5031 Int64GetDatum(1), false,
5033
5034 /*
5035 * If the query already has a LIMIT clause, then we could
5036 * end up with a duplicate LimitPath in the final plan.
5037 * That does not seem worth troubling over too much.
5038 */
5039 add_path(distinct_rel, (Path *)
5040 create_limit_path(root, distinct_rel, sorted_path,
5041 NULL, limitCount,
5042 LIMIT_OPTION_COUNT, 0, 1));
5043 }
5044 else
5045 {
5046 add_path(distinct_rel, (Path *)
5047 create_upper_unique_path(root, distinct_rel,
5048 sorted_path,
5049 list_length(root->distinct_pathkeys),
5050 numDistinctRows));
5051 }
5052 }
5053 }
5054 }
5055
5056 /*
5057 * Consider hash-based implementations of DISTINCT, if possible.
5058 *
5059 * If we were not able to make any other types of path, we *must* hash or
5060 * die trying. If we do have other choices, there are two things that
5061 * should prevent selection of hashing: if the query uses DISTINCT ON
5062 * (because it won't really have the expected behavior if we hash), or if
5063 * enable_hashagg is off.
5064 *
5065 * Note: grouping_is_hashable() is much more expensive to check than the
5066 * other gating conditions, so we want to do it last.
5067 */
5068 if (distinct_rel->pathlist == NIL)
5069 allow_hash = true; /* we have no alternatives */
5070 else if (parse->hasDistinctOn || !enable_hashagg)
5071 allow_hash = false; /* policy-based decision not to hash */
5072 else
5073 allow_hash = true; /* default */
5074
5075 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5076 {
5077 /* Generate hashed aggregate path --- no sort needed */
5078 add_path(distinct_rel, (Path *)
5080 distinct_rel,
5081 cheapest_input_path,
5082 cheapest_input_path->pathtarget,
5083 AGG_HASHED,
5085 root->processed_distinctClause,
5086 NIL,
5087 NULL,
5088 numDistinctRows));
5089 }
5090
5091 return distinct_rel;
5092}
5093
5094/*
5095 * get_useful_pathkeys_for_distinct
5096 * Get useful orderings of pathkeys for distinctClause by reordering
5097 * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5098 *
5099 * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5100 * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5101 */
5102static List *
5104 List *path_pathkeys)
5105{
5106 List *useful_pathkeys_list = NIL;
5107 List *useful_pathkeys = NIL;
5108
5109 /* always include the given 'needed_pathkeys' */
5110 useful_pathkeys_list = lappend(useful_pathkeys_list,
5111 needed_pathkeys);
5112
5114 return useful_pathkeys_list;
5115
5116 /*
5117 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5118 * that match 'needed_pathkeys', but only up to the longest matching
5119 * prefix.
5120 *
5121 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5122 * list matches initial distinctClause pathkeys; otherwise, it won't have
5123 * the desired behavior.
5124 */
5125 foreach_node(PathKey, pathkey, path_pathkeys)
5126 {
5127 /*
5128 * The PathKey nodes are canonical, so they can be checked for
5129 * equality by simple pointer comparison.
5130 */
5131 if (!list_member_ptr(needed_pathkeys, pathkey))
5132 break;
5133 if (root->parse->hasDistinctOn &&
5134 !list_member_ptr(root->distinct_pathkeys, pathkey))
5135 break;
5136
5137 useful_pathkeys = lappend(useful_pathkeys, pathkey);
5138 }
5139
5140 /* If no match at all, no point in reordering needed_pathkeys */
5141 if (useful_pathkeys == NIL)
5142 return useful_pathkeys_list;
5143
5144 /*
5145 * If not full match, the resulting pathkey list is not useful without
5146 * incremental sort.
5147 */
5148 if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5150 return useful_pathkeys_list;
5151
5152 /* Append the remaining PathKey nodes in needed_pathkeys */
5153 useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5154 needed_pathkeys);
5155
5156 /*
5157 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5158 * just drop it.
5159 */
5160 if (compare_pathkeys(needed_pathkeys,
5161 useful_pathkeys) == PATHKEYS_EQUAL)
5162 return useful_pathkeys_list;
5163
5164 useful_pathkeys_list = lappend(useful_pathkeys_list,
5165 useful_pathkeys);
5166
5167 return useful_pathkeys_list;
5168}
5169
5170/*
5171 * create_ordered_paths
5172 *
5173 * Build a new upperrel containing Paths for ORDER BY evaluation.
5174 *
5175 * All paths in the result must satisfy the ORDER BY ordering.
5176 * The only new paths we need consider are an explicit full sort
5177 * and incremental sort on the cheapest-total existing path.
5178 *
5179 * input_rel: contains the source-data Paths
5180 * target: the output tlist the result Paths must emit
5181 * limit_tuples: estimated bound on the number of output tuples,
5182 * or -1 if no LIMIT or couldn't estimate
5183 *
5184 * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5185 * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5186 */
5187static RelOptInfo *
5189 RelOptInfo *input_rel,
5190 PathTarget *target,
5191 bool target_parallel_safe,
5192 double limit_tuples)
5193{
5194 Path *cheapest_input_path = input_rel->cheapest_total_path;
5195 RelOptInfo *ordered_rel;
5196 ListCell *lc;
5197
5198 /* For now, do all work in the (ORDERED, NULL) upperrel */
5199 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5200
5201 /*
5202 * If the input relation is not parallel-safe, then the ordered relation
5203 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5204 * target list is parallel-safe.
5205 */
5206 if (input_rel->consider_parallel && target_parallel_safe)
5207 ordered_rel->consider_parallel = true;
5208
5209 /*
5210 * If the input rel belongs to a single FDW, so does the ordered_rel.
5211 */
5212 ordered_rel->serverid = input_rel->serverid;
5213 ordered_rel->userid = input_rel->userid;
5214 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5215 ordered_rel->fdwroutine = input_rel->fdwroutine;
5216
5217 foreach(lc, input_rel->pathlist)
5218 {
5219 Path *input_path = (Path *) lfirst(lc);
5220 Path *sorted_path;
5221 bool is_sorted;
5222 int presorted_keys;
5223
5224 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5225 input_path->pathkeys, &presorted_keys);
5226
5227 if (is_sorted)
5228 sorted_path = input_path;
5229 else
5230 {
5231 /*
5232 * Try at least sorting the cheapest path and also try
5233 * incrementally sorting any path which is partially sorted
5234 * already (no need to deal with paths which have presorted keys
5235 * when incremental sort is disabled unless it's the cheapest
5236 * input path).
5237 */
5238 if (input_path != cheapest_input_path &&
5239 (presorted_keys == 0 || !enable_incremental_sort))
5240 continue;
5241
5242 /*
5243 * We've no need to consider both a sort and incremental sort.
5244 * We'll just do a sort if there are no presorted keys and an
5245 * incremental sort when there are presorted keys.
5246 */
5247 if (presorted_keys == 0 || !enable_incremental_sort)
5248 sorted_path = (Path *) create_sort_path(root,
5249 ordered_rel,
5250 input_path,
5251 root->sort_pathkeys,
5252 limit_tuples);
5253 else
5254 sorted_path = (Path *) create_incremental_sort_path(root,
5255 ordered_rel,
5256 input_path,
5257 root->sort_pathkeys,
5258 presorted_keys,
5259 limit_tuples);
5260 }
5261
5262 /*
5263 * If the pathtarget of the result path has different expressions from
5264 * the target to be applied, a projection step is needed.
5265 */
5266 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5267 sorted_path = apply_projection_to_path(root, ordered_rel,
5268 sorted_path, target);
5269
5270 add_path(ordered_rel, sorted_path);
5271 }
5272
5273 /*
5274 * generate_gather_paths() will have already generated a simple Gather
5275 * path for the best parallel path, if any, and the loop above will have
5276 * considered sorting it. Similarly, generate_gather_paths() will also
5277 * have generated order-preserving Gather Merge plans which can be used
5278 * without sorting if they happen to match the sort_pathkeys, and the loop
5279 * above will have handled those as well. However, there's one more
5280 * possibility: it may make sense to sort the cheapest partial path or
5281 * incrementally sort any partial path that is partially sorted according
5282 * to the required output order and then use Gather Merge.
5283 */
5284 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5285 input_rel->partial_pathlist != NIL)
5286 {
5287 Path *cheapest_partial_path;
5288
5289 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5290
5291 foreach(lc, input_rel->partial_pathlist)
5292 {
5293 Path *input_path = (Path *) lfirst(lc);
5294 Path *sorted_path;
5295 bool is_sorted;
5296 int presorted_keys;
5297 double total_groups;
5298
5299 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5300 input_path->pathkeys,
5301 &presorted_keys);
5302
5303 if (is_sorted)
5304 continue;
5305
5306 /*
5307 * Try at least sorting the cheapest path and also try
5308 * incrementally sorting any path which is partially sorted
5309 * already (no need to deal with paths which have presorted keys
5310 * when incremental sort is disabled unless it's the cheapest
5311 * partial path).
5312 */
5313 if (input_path != cheapest_partial_path &&
5314 (presorted_keys == 0 || !enable_incremental_sort))
5315 continue;
5316
5317 /*
5318 * We've no need to consider both a sort and incremental sort.
5319 * We'll just do a sort if there are no presorted keys and an
5320 * incremental sort when there are presorted keys.
5321 */
5322 if (presorted_keys == 0 || !enable_incremental_sort)
5323 sorted_path = (Path *) create_sort_path(root,
5324 ordered_rel,
5325 input_path,
5326 root->sort_pathkeys,
5327 limit_tuples);
5328 else
5329 sorted_path = (Path *) create_incremental_sort_path(root,
5330 ordered_rel,
5331 input_path,
5332 root->sort_pathkeys,
5333 presorted_keys,
5334 limit_tuples);
5335 total_groups = compute_gather_rows(sorted_path);
5336 sorted_path = (Path *)
5337 create_gather_merge_path(root, ordered_rel,
5338 sorted_path,
5339 sorted_path->pathtarget,
5340 root->sort_pathkeys, NULL,
5341 &total_groups);
5342
5343 /*
5344 * If the pathtarget of the result path has different expressions
5345 * from the target to be applied, a projection step is needed.
5346 */
5347 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5348 sorted_path = apply_projection_to_path(root, ordered_rel,
5349 sorted_path, target);
5350
5351 add_path(ordered_rel, sorted_path);
5352 }
5353 }
5354
5355 /*
5356 * If there is an FDW that's responsible for all baserels of the query,
5357 * let it consider adding ForeignPaths.
5358 */
5359 if (ordered_rel->fdwroutine &&
5360 ordered_rel->fdwroutine->GetForeignUpperPaths)
5361 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5362 input_rel, ordered_rel,
5363 NULL);
5364
5365 /* Let extensions possibly add some more paths */
5367 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5368 input_rel, ordered_rel, NULL);
5369
5370 /*
5371 * No need to bother with set_cheapest here; grouping_planner does not
5372 * need us to do it.
5373 */
5374 Assert(ordered_rel->pathlist != NIL);
5375
5376 return ordered_rel;
5377}
5378
5379
5380/*
5381 * make_group_input_target
5382 * Generate appropriate PathTarget for initial input to grouping nodes.
5383 *
5384 * If there is grouping or aggregation, the scan/join subplan cannot emit
5385 * the query's final targetlist; for example, it certainly can't emit any
5386 * aggregate function calls. This routine generates the correct target
5387 * for the scan/join subplan.
5388 *
5389 * The query target list passed from the parser already contains entries
5390 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5391 * for variables used only in HAVING clauses; so we need to add those
5392 * variables to the subplan target list. Also, we flatten all expressions
5393 * except GROUP BY items into their component variables; other expressions
5394 * will be computed by the upper plan nodes rather than by the subplan.
5395 * For example, given a query like
5396 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5397 * we want to pass this targetlist to the subplan:
5398 * a+b,c,d
5399 * where the a+b target will be used by the Sort/Group steps, and the
5400 * other targets will be used for computing the final results.
5401 *
5402 * 'final_target' is the query's final target list (in PathTarget form)
5403 *
5404 * The result is the PathTarget to be computed by the Paths returned from
5405 * query_planner().
5406 */
5407static PathTarget *
5409{
5410 Query *parse = root->parse;
5411 PathTarget *input_target;
5412 List *non_group_cols;
5413 List *non_group_vars;
5414 int i;
5415 ListCell *lc;
5416
5417 /*
5418 * We must build a target containing all grouping columns, plus any other
5419 * Vars mentioned in the query's targetlist and HAVING qual.
5420 */
5421 input_target = create_empty_pathtarget();
5422 non_group_cols = NIL;
5423
5424 i = 0;
5425 foreach(lc, final_target->exprs)
5426 {
5427 Expr *expr = (Expr *) lfirst(lc);
5428 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5429
5430 if (sgref && root->processed_groupClause &&
5432 root->processed_groupClause) != NULL)
5433 {
5434 /*
5435 * It's a grouping column, so add it to the input target as-is.
5436 *
5437 * Note that the target is logically below the grouping step. So
5438 * with grouping sets we need to remove the RT index of the
5439 * grouping step if there is any from the target expression.
5440 */
5441 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5442 {
5443 Assert(root->group_rtindex > 0);
5444 expr = (Expr *)
5445 remove_nulling_relids((Node *) expr,
5446 bms_make_singleton(root->group_rtindex),
5447 NULL);
5448 }
5449 add_column_to_pathtarget(input_target, expr, sgref);
5450 }
5451 else
5452 {
5453 /*
5454 * Non-grouping column, so just remember the expression for later
5455 * call to pull_var_clause.
5456 */
5457 non_group_cols = lappend(non_group_cols, expr);
5458 }
5459
5460 i++;
5461 }
5462
5463 /*
5464 * If there's a HAVING clause, we'll need the Vars it uses, too.
5465 */
5466 if (parse->havingQual)
5467 non_group_cols = lappend(non_group_cols, parse->havingQual);
5468
5469 /*
5470 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5471 * add them to the input target if not already present. (A Var used
5472 * directly as a GROUP BY item will be present already.) Note this
5473 * includes Vars used in resjunk items, so we are covering the needs of
5474 * ORDER BY and window specifications. Vars used within Aggrefs and
5475 * WindowFuncs will be pulled out here, too.
5476 *
5477 * Note that the target is logically below the grouping step. So with
5478 * grouping sets we need to remove the RT index of the grouping step if
5479 * there is any from the non-group Vars.
5480 */
5481 non_group_vars = pull_var_clause((Node *) non_group_cols,
5485 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5486 {
5487 Assert(root->group_rtindex > 0);
5488 non_group_vars = (List *)
5489 remove_nulling_relids((Node *) non_group_vars,
5490 bms_make_singleton(root->group_rtindex),
5491 NULL);
5492 }
5493 add_new_columns_to_pathtarget(input_target, non_group_vars);
5494
5495 /* clean up cruft */
5496 list_free(non_group_vars);
5497 list_free(non_group_cols);
5498
5499 /* XXX this causes some redundant cost calculation ... */
5500 return set_pathtarget_cost_width(root, input_target);
5501}
5502
5503/*
5504 * make_partial_grouping_target
5505 * Generate appropriate PathTarget for output of partial aggregate
5506 * (or partial grouping, if there are no aggregates) nodes.
5507 *
5508 * A partial aggregation node needs to emit all the same aggregates that
5509 * a regular aggregation node would, plus any aggregates used in HAVING;
5510 * except that the Aggref nodes should be marked as partial aggregates.
5511 *
5512 * In addition, we'd better emit any Vars and PlaceHolderVars that are
5513 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5514 * these would be Vars that are grouped by or used in grouping expressions.)
5515 *
5516 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5517 * havingQual represents the HAVING clause.
5518 */
5519static PathTarget *
5521 PathTarget *grouping_target,
5522 Node *havingQual)
5523{
5524 PathTarget *partial_target;
5525 List *non_group_cols;
5526 List *non_group_exprs;
5527 int i;
5528 ListCell *lc;
5529
5530 partial_target = create_empty_pathtarget();
5531 non_group_cols = NIL;
5532
5533 i = 0;
5534 foreach(lc, grouping_target->exprs)
5535 {
5536 Expr *expr = (Expr *) lfirst(lc);
5537 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5538
5539 if (sgref && root->processed_groupClause &&
5541 root->processed_groupClause) != NULL)
5542 {
5543 /*
5544 * It's a grouping column, so add it to the partial_target as-is.
5545 * (This allows the upper agg step to repeat the grouping calcs.)
5546 */
5547 add_column_to_pathtarget(partial_target, expr, sgref);
5548 }
5549 else
5550 {
5551 /*
5552 * Non-grouping column, so just remember the expression for later
5553 * call to pull_var_clause.
5554 */
5555 non_group_cols = lappend(non_group_cols, expr);
5556 }
5557
5558 i++;
5559 }
5560
5561 /*
5562 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5563 */
5564 if (havingQual)
5565 non_group_cols = lappend(non_group_cols, havingQual);
5566
5567 /*
5568 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5569 * non-group cols (plus HAVING), and add them to the partial_target if not
5570 * already present. (An expression used directly as a GROUP BY item will
5571 * be present already.) Note this includes Vars used in resjunk items, so
5572 * we are covering the needs of ORDER BY and window specifications.
5573 */
5574 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5578
5579 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5580
5581 /*
5582 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5583 * are at the top level of the target list, so we can just scan the list
5584 * rather than recursing through the expression trees.
5585 */
5586 foreach(lc, partial_target->exprs)
5587 {
5588 Aggref *aggref = (Aggref *) lfirst(lc);
5589
5590 if (IsA(aggref, Aggref))
5591 {
5592 Aggref *newaggref;
5593
5594 /*
5595 * We shouldn't need to copy the substructure of the Aggref node,
5596 * but flat-copy the node itself to avoid damaging other trees.
5597 */
5598 newaggref = makeNode(Aggref);
5599 memcpy(newaggref, aggref, sizeof(Aggref));
5600
5601 /* For now, assume serialization is required */
5603
5604 lfirst(lc) = newaggref;
5605 }
5606 }
5607
5608 /* clean up cruft */
5609 list_free(non_group_exprs);
5610 list_free(non_group_cols);
5611
5612 /* XXX this causes some redundant cost calculation ... */
5613 return set_pathtarget_cost_width(root, partial_target);
5614}
5615
5616/*
5617 * mark_partial_aggref
5618 * Adjust an Aggref to make it represent a partial-aggregation step.
5619 *
5620 * The Aggref node is modified in-place; caller must do any copying required.
5621 */
5622void
5624{
5625 /* aggtranstype should be computed by this point */
5626 Assert(OidIsValid(agg->aggtranstype));
5627 /* ... but aggsplit should still be as the parser left it */
5628 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5629
5630 /* Mark the Aggref with the intended partial-aggregation mode */
5631 agg->aggsplit = aggsplit;
5632
5633 /*
5634 * Adjust result type if needed. Normally, a partial aggregate returns
5635 * the aggregate's transition type; but if that's INTERNAL and we're
5636 * serializing, it returns BYTEA instead.
5637 */
5638 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5639 {
5640 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5641 agg->aggtype = BYTEAOID;
5642 else
5643 agg->aggtype = agg->aggtranstype;
5644 }
5645}
5646
5647/*
5648 * postprocess_setop_tlist
5649 * Fix up targetlist returned by plan_set_operations().
5650 *
5651 * We need to transpose sort key info from the orig_tlist into new_tlist.
5652 * NOTE: this would not be good enough if we supported resjunk sort keys
5653 * for results of set operations --- then, we'd need to project a whole
5654 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5655 * find any resjunk columns in orig_tlist.
5656 */
5657static List *
5658postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5659{
5660 ListCell *l;
5661 ListCell *orig_tlist_item = list_head(orig_tlist);
5662
5663 foreach(l, new_tlist)
5664 {
5665 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5666 TargetEntry *orig_tle;
5667
5668 /* ignore resjunk columns in setop result */
5669 if (new_tle->resjunk)
5670 continue;
5671
5672 Assert(orig_tlist_item != NULL);
5673 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5674 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5675 if (orig_tle->resjunk) /* should not happen */
5676 elog(ERROR, "resjunk output columns are not implemented");
5677 Assert(new_tle->resno == orig_tle->resno);
5678 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5679 }
5680 if (orig_tlist_item != NULL)
5681 elog(ERROR, "resjunk output columns are not implemented");
5682 return new_tlist;
5683}
5684
5685/*
5686 * optimize_window_clauses
5687 * Call each WindowFunc's prosupport function to see if we're able to
5688 * make any adjustments to any of the WindowClause's so that the executor
5689 * can execute the window functions in a more optimal way.
5690 *
5691 * Currently we only allow adjustments to the WindowClause's frameOptions. We
5692 * may allow more things to be done here in the future.
5693 */
5694static void
5696{
5697 List *windowClause = root->parse->windowClause;
5698 ListCell *lc;
5699
5700 foreach(lc, windowClause)
5701 {
5703 ListCell *lc2;
5704 int optimizedFrameOptions = 0;
5705
5706 Assert(wc->winref <= wflists->maxWinRef);
5707
5708 /* skip any WindowClauses that have no WindowFuncs */
5709 if (wflists->windowFuncs[wc->winref] == NIL)
5710 continue;
5711
5712 foreach(lc2, wflists->windowFuncs[wc->winref])
5713 {
5716 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5717 Oid prosupport;
5718
5719 prosupport = get_func_support(wfunc->winfnoid);
5720
5721 /* Check if there's a support function for 'wfunc' */
5722 if (!OidIsValid(prosupport))
5723 break; /* can't optimize this WindowClause */
5724
5725 req.type = T_SupportRequestOptimizeWindowClause;
5726 req.window_clause = wc;
5727 req.window_func = wfunc;
5728 req.frameOptions = wc->frameOptions;
5729
5730 /* call the support function */
5733 PointerGetDatum(&req)));
5734
5735 /*
5736 * Skip to next WindowClause if the support function does not
5737 * support this request type.
5738 */
5739 if (res == NULL)
5740 break;
5741
5742 /*
5743 * Save these frameOptions for the first WindowFunc for this
5744 * WindowClause.
5745 */
5746 if (foreach_current_index(lc2) == 0)
5747 optimizedFrameOptions = res->frameOptions;
5748
5749 /*
5750 * On subsequent WindowFuncs, if the frameOptions are not the same
5751 * then we're unable to optimize the frameOptions for this
5752 * WindowClause.
5753 */
5754 else if (optimizedFrameOptions != res->frameOptions)
5755 break; /* skip to the next WindowClause, if any */
5756 }
5757
5758 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5759 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5760 {
5761 ListCell *lc3;
5762
5763 /* apply the new frame options */
5764 wc->frameOptions = optimizedFrameOptions;
5765
5766 /*
5767 * We now check to see if changing the frameOptions has caused
5768 * this WindowClause to be a duplicate of some other WindowClause.
5769 * This can only happen if we have multiple WindowClauses, so
5770 * don't bother if there's only 1.
5771 */
5772 if (list_length(windowClause) == 1)
5773 continue;
5774
5775 /*
5776 * Do the duplicate check and reuse the existing WindowClause if
5777 * we find a duplicate.
5778 */
5779 foreach(lc3, windowClause)
5780 {
5781 WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5782
5783 /* skip over the WindowClause we're currently editing */
5784 if (existing_wc == wc)
5785 continue;
5786
5787 /*
5788 * Perform the same duplicate check that is done in
5789 * transformWindowFuncCall.
5790 */
5791 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5792 equal(wc->orderClause, existing_wc->orderClause) &&
5793 wc->frameOptions == existing_wc->frameOptions &&
5794 equal(wc->startOffset, existing_wc->startOffset) &&
5795 equal(wc->endOffset, existing_wc->endOffset))
5796 {
5797 ListCell *lc4;
5798
5799 /*
5800 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5801 * This required adjusting each WindowFunc's winref and
5802 * moving the WindowFuncs in 'wc' to the list of
5803 * WindowFuncs in 'existing_wc'.
5804 */
5805 foreach(lc4, wflists->windowFuncs[wc->winref])
5806 {
5807 WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5808
5809 wfunc->winref = existing_wc->winref;
5810 }
5811
5812 /* move list items */
5813 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5814 wflists->windowFuncs[wc->winref]);
5815 wflists->windowFuncs[wc->winref] = NIL;
5816
5817 /*
5818 * transformWindowFuncCall() should have made sure there
5819 * are no other duplicates, so we needn't bother looking
5820 * any further.
5821 */
5822 break;
5823 }
5824 }
5825 }
5826 }
5827}
5828
5829/*
5830 * select_active_windows
5831 * Create a list of the "active" window clauses (ie, those referenced
5832 * by non-deleted WindowFuncs) in the order they are to be executed.
5833 */
5834static List *
5836{
5837 List *windowClause = root->parse->windowClause;
5838 List *result = NIL;
5839 ListCell *lc;
5840 int nActive = 0;
5842 * list_length(windowClause));
5843
5844 /* First, construct an array of the active windows */
5845 foreach(lc, windowClause)
5846 {
5848
5849 /* It's only active if wflists shows some related WindowFuncs */
5850 Assert(wc->winref <= wflists->maxWinRef);
5851 if (wflists->windowFuncs[wc->winref] == NIL)
5852 continue;
5853
5854 actives[nActive].wc = wc; /* original clause */
5855
5856 /*
5857 * For sorting, we want the list of partition keys followed by the
5858 * list of sort keys. But pathkeys construction will remove duplicates
5859 * between the two, so we can as well (even though we can't detect all
5860 * of the duplicates, since some may come from ECs - that might mean
5861 * we miss optimization chances here). We must, however, ensure that
5862 * the order of entries is preserved with respect to the ones we do
5863 * keep.
5864 *
5865 * partitionClause and orderClause had their own duplicates removed in
5866 * parse analysis, so we're only concerned here with removing
5867 * orderClause entries that also appear in partitionClause.
5868 */
5869 actives[nActive].uniqueOrder =
5871 wc->orderClause);
5872 nActive++;
5873 }
5874
5875 /*
5876 * Sort active windows by their partitioning/ordering clauses, ignoring
5877 * any framing clauses, so that the windows that need the same sorting are
5878 * adjacent in the list. When we come to generate paths, this will avoid
5879 * inserting additional Sort nodes.
5880 *
5881 * This is how we implement a specific requirement from the SQL standard,
5882 * which says that when two or more windows are order-equivalent (i.e.
5883 * have matching partition and order clauses, even if their names or
5884 * framing clauses differ), then all peer rows must be presented in the
5885 * same order in all of them. If we allowed multiple sort nodes for such
5886 * cases, we'd risk having the peer rows end up in different orders in
5887 * equivalent windows due to sort instability. (See General Rule 4 of
5888 * <window clause> in SQL2008 - SQL2016.)
5889 *
5890 * Additionally, if the entire list of clauses of one window is a prefix
5891 * of another, put first the window with stronger sorting requirements.
5892 * This way we will first sort for stronger window, and won't have to sort
5893 * again for the weaker one.
5894 */
5895 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5896
5897 /* build ordered list of the original WindowClause nodes */
5898 for (int i = 0; i < nActive; i++)
5899 result = lappend(result, actives[i].wc);
5900
5901 pfree(actives);
5902
5903 return result;
5904}
5905
5906/*
5907 * common_prefix_cmp
5908 * QSort comparison function for WindowClauseSortData
5909 *
5910 * Sort the windows by the required sorting clauses. First, compare the sort
5911 * clauses themselves. Second, if one window's clauses are a prefix of another
5912 * one's clauses, put the window with more sort clauses first.
5913 *
5914 * We purposefully sort by the highest tleSortGroupRef first. Since
5915 * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
5916 * and because here we sort the lowest tleSortGroupRefs last, if a
5917 * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
5918 * ORDER BY clause, this makes it more likely that the final WindowAgg will
5919 * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
5920 * reducing the total number of sorts required for the query.
5921 */
5922static int
5923common_prefix_cmp(const void *a, const void *b)
5924{
5925 const WindowClauseSortData *wcsa = a;
5926 const WindowClauseSortData *wcsb = b;
5927 ListCell *item_a;
5928 ListCell *item_b;
5929
5930 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5931 {
5934
5935 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5936 return -1;
5937 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5938 return 1;
5939 else if (sca->sortop > scb->sortop)
5940 return -1;
5941 else if (sca->sortop < scb->sortop)
5942 return 1;
5943 else if (sca->nulls_first && !scb->nulls_first)
5944 return -1;
5945 else if (!sca->nulls_first && scb->nulls_first)
5946 return 1;
5947 /* no need to compare eqop, since it is fully determined by sortop */
5948 }
5949
5950 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5951 return -1;
5952 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5953 return 1;
5954
5955 return 0;
5956}
5957
5958/*
5959 * make_window_input_target
5960 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
5961 *
5962 * When the query has window functions, this function computes the desired
5963 * target to be computed by the node just below the first WindowAgg.
5964 * This tlist must contain all values needed to evaluate the window functions,
5965 * compute the final target list, and perform any required final sort step.
5966 * If multiple WindowAggs are needed, each intermediate one adds its window
5967 * function results onto this base tlist; only the topmost WindowAgg computes
5968 * the actual desired target list.
5969 *
5970 * This function is much like make_group_input_target, though not quite enough
5971 * like it to share code. As in that function, we flatten most expressions
5972 * into their component variables. But we do not want to flatten window
5973 * PARTITION BY/ORDER BY clauses, since that might result in multiple
5974 * evaluations of them, which would be bad (possibly even resulting in
5975 * inconsistent answers, if they contain volatile functions).
5976 * Also, we must not flatten GROUP BY clauses that were left unflattened by
5977 * make_group_input_target, because we may no longer have access to the
5978 * individual Vars in them.
5979 *
5980 * Another key difference from make_group_input_target is that we don't
5981 * flatten Aggref expressions, since those are to be computed below the
5982 * window functions and just referenced like Vars above that.
5983 *
5984 * 'final_target' is the query's final target list (in PathTarget form)
5985 * 'activeWindows' is the list of active windows previously identified by
5986 * select_active_windows.
5987 *
5988 * The result is the PathTarget to be computed by the plan node immediately
5989 * below the first WindowAgg node.
5990 */
5991static PathTarget *
5993 PathTarget *final_target,
5994 List *activeWindows)
5995{
5996 PathTarget *input_target;
5997 Bitmapset *sgrefs;
5998 List *flattenable_cols;
5999 List *flattenable_vars;
6000 int i;
6001 ListCell *lc;
6002
6003 Assert(root->parse->hasWindowFuncs);
6004
6005 /*
6006 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6007 * into a bitmapset for convenient reference below.
6008 */
6009 sgrefs = NULL;
6010 foreach(lc, activeWindows)
6011 {
6013 ListCell *lc2;
6014
6015 foreach(lc2, wc->partitionClause)
6016 {
6018
6019 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6020 }
6021 foreach(lc2, wc->orderClause)
6022 {
6024
6025 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6026 }
6027 }
6028
6029 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6030 foreach(lc, root->processed_groupClause)
6031 {
6033
6034 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6035 }
6036
6037 /*
6038 * Construct a target containing all the non-flattenable targetlist items,
6039 * and save aside the others for a moment.
6040 */
6041 input_target = create_empty_pathtarget();
6042 flattenable_cols = NIL;
6043
6044 i = 0;
6045 foreach(lc, final_target->exprs)
6046 {
6047 Expr *expr = (Expr *) lfirst(lc);
6048 Index sgref = get_pathtarget_sortgroupref(final_target, i);
6049
6050 /*
6051 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6052 * that such items can't contain window functions, so it's okay to
6053 * compute them below the WindowAgg nodes.)
6054 */
6055 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6056 {
6057 /*
6058 * Don't want to deconstruct this value, so add it to the input
6059 * target as-is.
6060 */
6061 add_column_to_pathtarget(input_target, expr, sgref);
6062 }
6063 else
6064 {
6065 /*
6066 * Column is to be flattened, so just remember the expression for
6067 * later call to pull_var_clause.
6068 */
6069 flattenable_cols = lappend(flattenable_cols, expr);
6070 }
6071
6072 i++;
6073 }
6074
6075 /*
6076 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6077 * add them to the input target if not already present. (Some might be
6078 * there already because they're used directly as window/group clauses.)
6079 *
6080 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6081 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6082 * at higher levels. On the other hand, we should recurse into
6083 * WindowFuncs to make sure their input expressions are available.
6084 */
6085 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6089 add_new_columns_to_pathtarget(input_target, flattenable_vars);
6090
6091 /* clean up cruft */
6092 list_free(flattenable_vars);
6093 list_free(flattenable_cols);
6094
6095 /* XXX this causes some redundant cost calculation ... */
6096 return set_pathtarget_cost_width(root, input_target);
6097}
6098
6099/*
6100 * make_pathkeys_for_window
6101 * Create a pathkeys list describing the required input ordering
6102 * for the given WindowClause.
6103 *
6104 * Modifies wc's partitionClause to remove any clauses which are deemed
6105 * redundant by the pathkey logic.
6106 *
6107 * The required ordering is first the PARTITION keys, then the ORDER keys.
6108 * In the future we might try to implement windowing using hashing, in which
6109 * case the ordering could be relaxed, but for now we always sort.
6110 */
6111static List *
6113 List *tlist)
6114{
6115 List *window_pathkeys = NIL;
6116
6117 /* Throw error if can't sort */
6119 ereport(ERROR,
6120 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6121 errmsg("could not implement window PARTITION BY"),
6122 errdetail("Window partitioning columns must be of sortable datatypes.")));
6124 ereport(ERROR,
6125 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6126 errmsg("could not implement window ORDER BY"),
6127 errdetail("Window ordering columns must be of sortable datatypes.")));
6128
6129 /*
6130 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6131 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6132 */
6133 if (wc->partitionClause != NIL)
6134 {
6135 bool sortable;
6136
6138 &wc->partitionClause,
6139 tlist,
6140 true,
6141 false,
6142 &sortable,
6143 false);
6144
6145 Assert(sortable);
6146 }
6147
6148 /*
6149 * In principle, we could also consider removing redundant ORDER BY items
6150 * too as doing so does not alter the result of peer row checks done by
6151 * the executor. However, we must *not* remove the ordering column for
6152 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6153 * if it's known to be equal to some partitioning column.
6154 */
6155 if (wc->orderClause != NIL)
6156 {
6157 List *orderby_pathkeys;
6158
6159 orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6160 wc->orderClause,
6161 tlist);
6162
6163 /* Okay, make the combined pathkeys */
6164 if (window_pathkeys != NIL)
6165 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6166 else
6167 window_pathkeys = orderby_pathkeys;
6168 }
6169
6170 return window_pathkeys;
6171}
6172
6173/*
6174 * make_sort_input_target
6175 * Generate appropriate PathTarget for initial input to Sort step.
6176 *
6177 * If the query has ORDER BY, this function chooses the target to be computed
6178 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6179 * project) steps. This might or might not be identical to the query's final
6180 * output target.
6181 *
6182 * The main argument for keeping the sort-input tlist the same as the final
6183 * is that we avoid a separate projection node (which will be needed if
6184 * they're different, because Sort can't project). However, there are also
6185 * advantages to postponing tlist evaluation till after the Sort: it ensures
6186 * a consistent order of evaluation for any volatile functions in the tlist,
6187 * and if there's also a LIMIT, we can stop the query without ever computing
6188 * tlist functions for later rows, which is beneficial for both volatile and
6189 * expensive functions.
6190 *
6191 * Our current policy is to postpone volatile expressions till after the sort
6192 * unconditionally (assuming that that's possible, ie they are in plain tlist
6193 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6194 * postpone set-returning expressions, because running them beforehand would
6195 * bloat the sort dataset, and because it might cause unexpected output order
6196 * if the sort isn't stable. However there's a constraint on that: all SRFs
6197 * in the tlist should be evaluated at the same plan step, so that they can
6198 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6199 * mustn't postpone any SRFs. (Note that in principle that policy should
6200 * probably get applied to the group/window input targetlists too, but we
6201 * have not done that historically.) Lastly, expensive expressions are
6202 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6203 * partial evaluation of the query is possible (if neither is true, we expect
6204 * to have to evaluate the expressions for every row anyway), or if there are
6205 * any volatile or set-returning expressions (since once we've put in a
6206 * projection at all, it won't cost any more to postpone more stuff).
6207 *
6208 * Another issue that could potentially be considered here is that
6209 * evaluating tlist expressions could result in data that's either wider
6210 * or narrower than the input Vars, thus changing the volume of data that
6211 * has to go through the Sort. However, we usually have only a very bad
6212 * idea of the output width of any expression more complex than a Var,
6213 * so for now it seems too risky to try to optimize on that basis.
6214 *
6215 * Note that if we do produce a modified sort-input target, and then the
6216 * query ends up not using an explicit Sort, no particular harm is done:
6217 * we'll initially use the modified target for the preceding path nodes,
6218 * but then change them to the final target with apply_projection_to_path.
6219 * Moreover, in such a case the guarantees about evaluation order of
6220 * volatile functions still hold, since the rows are sorted already.
6221 *
6222 * This function has some things in common with make_group_input_target and
6223 * make_window_input_target, though the detailed rules for what to do are
6224 * different. We never flatten/postpone any grouping or ordering columns;
6225 * those are needed before the sort. If we do flatten a particular
6226 * expression, we leave Aggref and WindowFunc nodes alone, since those were
6227 * computed earlier.
6228 *
6229 * 'final_target' is the query's final target list (in PathTarget form)
6230 * 'have_postponed_srfs' is an output argument, see below
6231 *
6232 * The result is the PathTarget to be computed by the plan node immediately
6233 * below the Sort step (and the Distinct step, if any). This will be
6234 * exactly final_target if we decide a projection step wouldn't be helpful.
6235 *
6236 * In addition, *have_postponed_srfs is set to true if we choose to postpone
6237 * any set-returning functions to after the Sort.
6238 */
6239static PathTarget *
6241 PathTarget *final_target,
6242 bool *have_postponed_srfs)
6243{
6244 Query *parse = root->parse;
6245 PathTarget *input_target;
6246 int ncols;
6247 bool *col_is_srf;
6248 bool *postpone_col;
6249 bool have_srf;
6250 bool have_volatile;
6251 bool have_expensive;
6252 bool have_srf_sortcols;
6253 bool postpone_srfs;
6254 List *postponable_cols;
6255 List *postponable_vars;
6256 int i;
6257 ListCell *lc;
6258
6259 /* Shouldn't get here unless query has ORDER BY */
6260 Assert(parse->sortClause);
6261
6262 *have_postponed_srfs = false; /* default result */
6263
6264 /* Inspect tlist and collect per-column information */
6265 ncols = list_length(final_target->exprs);
6266 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6267 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6268 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6269
6270 i = 0;
6271 foreach(lc, final_target->exprs)
6272 {
6273 Expr *expr = (Expr *) lfirst(lc);
6274
6275 /*
6276 * If the column has a sortgroupref, assume it has to be evaluated
6277 * before sorting. Generally such columns would be ORDER BY, GROUP
6278 * BY, etc targets. One exception is columns that were removed from
6279 * GROUP BY by remove_useless_groupby_columns() ... but those would
6280 * only be Vars anyway. There don't seem to be any cases where it
6281 * would be worth the trouble to double-check.
6282 */
6283 if (get_pathtarget_sortgroupref(final_target, i) == 0)
6284 {
6285 /*
6286 * Check for SRF or volatile functions. Check the SRF case first
6287 * because we must know whether we have any postponed SRFs.
6288 */
6289 if (parse->hasTargetSRFs &&
6290 expression_returns_set((Node *) expr))
6291 {
6292 /* We'll decide below whether these are postponable */
6293 col_is_srf[i] = true;
6294 have_srf = true;
6295 }
6296 else if (contain_volatile_functions((Node *) expr))
6297 {
6298 /* Unconditionally postpone */
6299 postpone_col[i] = true;
6300 have_volatile = true;
6301 }
6302 else
6303 {
6304 /*
6305 * Else check the cost. XXX it's annoying to have to do this
6306 * when set_pathtarget_cost_width() just did it. Refactor to
6307 * allow sharing the work?
6308 */
6309 QualCost cost;
6310
6311 cost_qual_eval_node(&cost, (Node *) expr, root);
6312
6313 /*
6314 * We arbitrarily define "expensive" as "more than 10X
6315 * cpu_operator_cost". Note this will take in any PL function
6316 * with default cost.
6317 */
6318 if (cost.per_tuple > 10 * cpu_operator_cost)
6319 {
6320 postpone_col[i] = true;
6321 have_expensive = true;
6322 }
6323 }
6324 }
6325 else
6326 {
6327 /* For sortgroupref cols, just check if any contain SRFs */
6328 if (!have_srf_sortcols &&
6329 parse->hasTargetSRFs &&
6330 expression_returns_set((Node *) expr))
6331 have_srf_sortcols = true;
6332 }
6333
6334 i++;
6335 }
6336
6337 /*
6338 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6339 */
6340 postpone_srfs = (have_srf && !have_srf_sortcols);
6341
6342 /*
6343 * If we don't need a post-sort projection, just return final_target.
6344 */
6345 if (!(postpone_srfs || have_volatile ||
6346 (have_expensive &&
6347 (parse->limitCount || root->tuple_fraction > 0))))
6348 return final_target;
6349
6350 /*
6351 * Report whether the post-sort projection will contain set-returning
6352 * functions. This is important because it affects whether the Sort can
6353 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6354 * to return.
6355 */
6356 *have_postponed_srfs = postpone_srfs;
6357
6358 /*
6359 * Construct the sort-input target, taking all non-postponable columns and
6360 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6361 * the postponable ones.
6362 */
6363 input_target = create_empty_pathtarget();
6364 postponable_cols = NIL;
6365
6366 i = 0;
6367 foreach(lc, final_target->exprs)
6368 {
6369 Expr *expr = (Expr *) lfirst(lc);
6370
6371 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6372 postponable_cols = lappend(postponable_cols, expr);
6373 else
6374 add_column_to_pathtarget(input_target, expr,
6375 get_pathtarget_sortgroupref(final_target, i));
6376
6377 i++;
6378 }
6379
6380 /*
6381 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6382 * postponable columns, and add them to the sort-input target if not
6383 * already present. (Some might be there already.) We mustn't
6384 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6385 * would be unable to recompute them.
6386 */
6387 postponable_vars = pull_var_clause((Node *) postponable_cols,
6391 add_new_columns_to_pathtarget(input_target, postponable_vars);
6392
6393 /* clean up cruft */
6394 list_free(postponable_vars);
6395 list_free(postponable_cols);
6396
6397 /* XXX this represents even more redundant cost calculation ... */
6398 return set_pathtarget_cost_width(root, input_target);
6399}
6400
6401/*
6402 * get_cheapest_fractional_path
6403 * Find the cheapest path for retrieving a specified fraction of all
6404 * the tuples expected to be returned by the given relation.
6405 *
6406 * We interpret tuple_fraction the same way as grouping_planner.
6407 *
6408 * We assume set_cheapest() has been run on the given rel.
6409 */
6410Path *
6411get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6412{
6413 Path *best_path = rel->cheapest_total_path;
6414 ListCell *l;
6415
6416 /* If all tuples will be retrieved, just return the cheapest-total path */
6417 if (tuple_fraction <= 0.0)
6418 return best_path;
6419
6420 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6421 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6422 tuple_fraction /= best_path->rows;
6423
6424 foreach(l, rel->pathlist)
6425 {
6426 Path *path = (Path *) lfirst(l);
6427
6428 if (path == rel->cheapest_total_path ||
6429 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6430 continue;
6431
6432 best_path = path;
6433 }
6434
6435 return best_path;
6436}
6437
6438/*
6439 * adjust_paths_for_srfs
6440 * Fix up the Paths of the given upperrel to handle tSRFs properly.
6441 *
6442 * The executor can only handle set-returning functions that appear at the
6443 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6444 * that are not at top level, we need to split up the evaluation into multiple
6445 * plan levels in which each level satisfies this constraint. This function
6446 * modifies each Path of an upperrel that (might) compute any SRFs in its
6447 * output tlist to insert appropriate projection steps.
6448 *
6449 * The given targets and targets_contain_srfs lists are from
6450 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6451 * target in targets.
6452 */
6453static void
6455 List *targets, List *targets_contain_srfs)
6456{
6457 ListCell *lc;
6458
6459 Assert(list_length(targets) == list_length(targets_contain_srfs));
6460 Assert(!linitial_int(targets_contain_srfs));
6461
6462 /* If no SRFs appear at this plan level, nothing to do */
6463 if (list_length(targets) == 1)
6464 return;
6465
6466 /*
6467 * Stack SRF-evaluation nodes atop each path for the rel.
6468 *
6469 * In principle we should re-run set_cheapest() here to identify the
6470 * cheapest path, but it seems unlikely that adding the same tlist eval
6471 * costs to all the paths would change that, so we don't bother. Instead,
6472 * just assume that the cheapest-startup and cheapest-total paths remain
6473 * so. (There should be no parameterized paths anymore, so we needn't
6474 * worry about updating cheapest_parameterized_paths.)
6475 */
6476 foreach(lc, rel->pathlist)
6477 {
6478 Path *subpath = (Path *) lfirst(lc);
6479 Path *newpath = subpath;
6480 ListCell *lc1,
6481 *lc2;
6482
6483 Assert(subpath->param_info == NULL);
6484 forboth(lc1, targets, lc2, targets_contain_srfs)
6485 {
6486 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6487 bool contains_srfs = (bool) lfirst_int(lc2);
6488
6489 /* If this level doesn't contain SRFs, do regular projection */
6490 if (contains_srfs)
6491 newpath = (Path *) create_set_projection_path(root,
6492 rel,
6493 newpath,
6494 thistarget);
6495 else
6496 newpath = (Path *) apply_projection_to_path(root,
6497 rel,
6498 newpath,
6499 thistarget);
6500 }
6501 lfirst(lc) = newpath;
6502 if (subpath == rel->cheapest_startup_path)
6503 rel->cheapest_startup_path = newpath;
6504 if (subpath == rel->cheapest_total_path)
6505 rel->cheapest_total_path = newpath;
6506 }
6507
6508 /* Likewise for partial paths, if any */
6509 foreach(lc, rel->partial_pathlist)
6510 {
6511 Path *subpath = (Path *) lfirst(lc);
6512 Path *newpath = subpath;
6513 ListCell *lc1,
6514 *lc2;
6515
6516 Assert(subpath->param_info == NULL);
6517 forboth(lc1, targets, lc2, targets_contain_srfs)
6518 {
6519 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6520 bool contains_srfs = (bool) lfirst_int(lc2);
6521
6522 /* If this level doesn't contain SRFs, do regular projection */
6523 if (contains_srfs)
6524 newpath = (Path *) create_set_projection_path(root,
6525 rel,
6526 newpath,
6527 thistarget);
6528 else
6529 {
6530 /* avoid apply_projection_to_path, in case of multiple refs */
6531 newpath = (Path *) create_projection_path(root,
6532 rel,
6533 newpath,
6534 thistarget);
6535 }
6536 }
6537 lfirst(lc) = newpath;
6538 }
6539}
6540
6541/*
6542 * expression_planner
6543 * Perform planner's transformations on a standalone expression.
6544 *
6545 * Various utility commands need to evaluate expressions that are not part
6546 * of a plannable query. They can do so using the executor's regular
6547 * expression-execution machinery, but first the expression has to be fed
6548 * through here to transform it from parser output to something executable.
6549 *
6550 * Currently, we disallow sublinks in standalone expressions, so there's no
6551 * real "planning" involved here. (That might not always be true though.)
6552 * What we must do is run eval_const_expressions to ensure that any function
6553 * calls are converted to positional notation and function default arguments
6554 * get inserted. The fact that constant subexpressions get simplified is a
6555 * side-effect that is useful when the expression will get evaluated more than
6556 * once. Also, we must fix operator function IDs.
6557 *
6558 * This does not return any information about dependencies of the expression.
6559 * Hence callers should use the results only for the duration of the current
6560 * query. Callers that would like to cache the results for longer should use
6561 * expression_planner_with_deps, probably via the plancache.
6562 *
6563 * Note: this must not make any damaging changes to the passed-in expression
6564 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6565 * we first do an expression_tree_mutator-based walk, what is returned will
6566 * be a new node tree.) The result is constructed in the current memory
6567 * context; beware that this can leak a lot of additional stuff there, too.
6568 */
6569Expr *
6571{
6572 Node *result;
6573
6574 /*
6575 * Convert named-argument function calls, insert default arguments and
6576 * simplify constant subexprs
6577 */
6578 result = eval_const_expressions(NULL, (Node *) expr);
6579
6580 /* Fill in opfuncid values if missing */
6581 fix_opfuncids(result);
6582
6583 return (Expr *) result;
6584}
6585
6586/*
6587 * expression_planner_with_deps
6588 * Perform planner's transformations on a standalone expression,
6589 * returning expression dependency information along with the result.
6590 *
6591 * This is identical to expression_planner() except that it also returns
6592 * information about possible dependencies of the expression, ie identities of
6593 * objects whose definitions affect the result. As in a PlannedStmt, these
6594 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6595 */
6596Expr *
6598 List **relationOids,
6599 List **invalItems)
6600{
6601 Node *result;
6602 PlannerGlobal glob;
6604
6605 /* Make up dummy planner state so we can use setrefs machinery */
6606 MemSet(&glob, 0, sizeof(glob));
6607 glob.type = T_PlannerGlobal;
6608 glob.relationOids = NIL;
6609 glob.invalItems = NIL;
6610
6611 MemSet(&root, 0, sizeof(root));
6612 root.type = T_PlannerInfo;
6613 root.glob = &glob;
6614
6615 /*
6616 * Convert named-argument function calls, insert default arguments and
6617 * simplify constant subexprs. Collect identities of inlined functions
6618 * and elided domains, too.
6619 */
6620 result = eval_const_expressions(&root, (Node *) expr);
6621
6622 /* Fill in opfuncid values if missing */
6623 fix_opfuncids(result);
6624
6625 /*
6626 * Now walk the finished expression to find anything else we ought to
6627 * record as an expression dependency.
6628 */
6629 (void) extract_query_dependencies_walker(result, &root);
6630
6631 *relationOids = glob.relationOids;
6632 *invalItems = glob.invalItems;
6633
6634 return (Expr *) result;
6635}
6636
6637
6638/*
6639 * plan_cluster_use_sort
6640 * Use the planner to decide how CLUSTER should implement sorting
6641 *
6642 * tableOid is the OID of a table to be clustered on its index indexOid
6643 * (which is already known to be a btree index). Decide whether it's
6644 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6645 * Return true to use sorting, false to use an indexscan.
6646 *
6647 * Note: caller had better already hold some type of lock on the table.
6648 */
6649bool
6650plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6651{
6653 Query *query;
6654 PlannerGlobal *glob;
6655 RangeTblEntry *rte;
6656 RelOptInfo *rel;
6657 IndexOptInfo *indexInfo;
6658 QualCost indexExprCost;
6659 Cost comparisonCost;
6660 Path *seqScanPath;
6661 Path seqScanAndSortPath;
6662 IndexPath *indexScanPath;
6663 ListCell *lc;
6664
6665 /* We can short-circuit the cost comparison if indexscans are disabled */
6666 if (!enable_indexscan)
6667 return true; /* use sort */
6668
6669 /* Set up mostly-dummy planner state */
6670 query = makeNode(Query);
6671 query->commandType = CMD_SELECT;
6672
6673 glob = makeNode(PlannerGlobal);
6674
6676 root->parse = query;
6677 root->glob = glob;
6678 root->query_level = 1;
6679 root->planner_cxt = CurrentMemoryContext;
6680 root->wt_param_id = -1;
6681 root->join_domains = list_make1(makeNode(JoinDomain));
6682
6683 /* Build a minimal RTE for the rel */
6684 rte = makeNode(RangeTblEntry);
6685 rte->rtekind = RTE_RELATION;
6686 rte->relid = tableOid;
6687 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6688 rte->rellockmode = AccessShareLock;
6689 rte->lateral = false;
6690 rte->inh = false;
6691 rte->inFromCl = true;
6692 query->rtable = list_make1(rte);
6693 addRTEPermissionInfo(&query->rteperminfos, rte);
6694
6695 /* Set up RTE/RelOptInfo arrays */
6697
6698 /* Build RelOptInfo */
6699 rel = build_simple_rel(root, 1, NULL);
6700
6701 /* Locate IndexOptInfo for the target index */
6702 indexInfo = NULL;
6703 foreach(lc, rel->indexlist)
6704 {
6705 indexInfo = lfirst_node(IndexOptInfo, lc);
6706 if (indexInfo->indexoid == indexOid)
6707 break;
6708 }
6709
6710 /*
6711 * It's possible that get_relation_info did not generate an IndexOptInfo
6712 * for the desired index; this could happen if it's not yet reached its
6713 * indcheckxmin usability horizon, or if it's a system index and we're
6714 * ignoring system indexes. In such cases we should tell CLUSTER to not
6715 * trust the index contents but use seqscan-and-sort.
6716 */
6717 if (lc == NULL) /* not in the list? */
6718 return true; /* use sort */
6719
6720 /*
6721 * Rather than doing all the pushups that would be needed to use
6722 * set_baserel_size_estimates, just do a quick hack for rows and width.
6723 */
6724 rel->rows = rel->tuples;
6725 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6726
6727 root->total_table_pages = rel->pages;
6728
6729 /*
6730 * Determine eval cost of the index expressions, if any. We need to
6731 * charge twice that amount for each tuple comparison that happens during
6732 * the sort, since tuplesort.c will have to re-evaluate the index
6733 * expressions each time. (XXX that's pretty inefficient...)
6734 */
6735 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6736 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6737
6738 /* Estimate the cost of seq scan + sort */
6739 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6740 cost_sort(&seqScanAndSortPath, root, NIL,
6741 seqScanPath->disabled_nodes,
6742 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6743 comparisonCost, maintenance_work_mem, -1.0);
6744
6745 /* Estimate the cost of index scan */
6746 indexScanPath = create_index_path(root, indexInfo,
6747 NIL, NIL, NIL, NIL,
6748 ForwardScanDirection, false,
6749 NULL, 1.0, false);
6750
6751 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6752}
6753
6754/*
6755 * plan_create_index_workers
6756 * Use the planner to decide how many parallel worker processes
6757 * CREATE INDEX should request for use
6758 *
6759 * tableOid is the table on which the index is to be built. indexOid is the
6760 * OID of an index to be created or reindexed (which must be an index with
6761 * support for parallel builds - currently btree or BRIN).
6762 *
6763 * Return value is the number of parallel worker processes to request. It
6764 * may be unsafe to proceed if this is 0. Note that this does not include the
6765 * leader participating as a worker (value is always a number of parallel
6766 * worker processes).
6767 *
6768 * Note: caller had better already hold some type of lock on the table and
6769 * index.
6770 */
6771int
6773{
6775 Query *query;
6776 PlannerGlobal *glob;
6777 RangeTblEntry *rte;
6778 Relation heap;
6780 RelOptInfo *rel;
6781 int parallel_workers;
6782 BlockNumber heap_blocks;
6783 double reltuples;
6784 double allvisfrac;
6785
6786 /*
6787 * We don't allow performing parallel operation in standalone backend or
6788 * when parallelism is disabled.
6789 */
6791 return 0;
6792
6793 /* Set up largely-dummy planner state */
6794 query = makeNode(Query);
6795 query->commandType = CMD_SELECT;
6796
6797 glob = makeNode(PlannerGlobal);
6798
6800 root->parse = query;
6801 root->glob = glob;
6802 root->query_level = 1;
6803 root->planner_cxt = CurrentMemoryContext;
6804 root->wt_param_id = -1;
6805 root->join_domains = list_make1(makeNode(JoinDomain));
6806
6807 /*
6808 * Build a minimal RTE.
6809 *
6810 * Mark the RTE with inh = true. This is a kludge to prevent
6811 * get_relation_info() from fetching index info, which is necessary
6812 * because it does not expect that any IndexOptInfo is currently
6813 * undergoing REINDEX.
6814 */
6815 rte = makeNode(RangeTblEntry);
6816 rte->rtekind = RTE_RELATION;
6817 rte->relid = tableOid;
6818 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6819 rte->rellockmode = AccessShareLock;
6820 rte->lateral = false;
6821 rte->inh = true;
6822 rte->inFromCl = true;
6823 query->rtable = list_make1(rte);
6824 addRTEPermissionInfo(&query->rteperminfos, rte);
6825
6826 /* Set up RTE/RelOptInfo arrays */
6828
6829 /* Build RelOptInfo */
6830 rel = build_simple_rel(root, 1, NULL);
6831
6832 /* Rels are assumed already locked by the caller */
6833 heap = table_open(tableOid, NoLock);
6834 index = index_open(indexOid, NoLock);
6835
6836 /*
6837 * Determine if it's safe to proceed.
6838 *
6839 * Currently, parallel workers can't access the leader's temporary tables.
6840 * Furthermore, any index predicate or index expressions must be parallel
6841 * safe.
6842 */
6843 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6846 {
6847 parallel_workers = 0;
6848 goto done;
6849 }
6850
6851 /*
6852 * If parallel_workers storage parameter is set for the table, accept that
6853 * as the number of parallel worker processes to launch (though still cap
6854 * at max_parallel_maintenance_workers). Note that we deliberately do not
6855 * consider any other factor when parallel_workers is set. (e.g., memory
6856 * use by workers.)
6857 */
6858 if (rel->rel_parallel_workers != -1)
6859 {
6860 parallel_workers = Min(rel->rel_parallel_workers,
6862 goto done;
6863 }
6864
6865 /*
6866 * Estimate heap relation size ourselves, since rel->pages cannot be
6867 * trusted (heap RTE was marked as inheritance parent)
6868 */
6869 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6870
6871 /*
6872 * Determine number of workers to scan the heap relation using generic
6873 * model
6874 */
6875 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6877
6878 /*
6879 * Cap workers based on available maintenance_work_mem as needed.
6880 *
6881 * Note that each tuplesort participant receives an even share of the
6882 * total maintenance_work_mem budget. Aim to leave participants
6883 * (including the leader as a participant) with no less than 32MB of
6884 * memory. This leaves cases where maintenance_work_mem is set to 64MB
6885 * immediately past the threshold of being capable of launching a single
6886 * parallel worker to sort.
6887 */
6888 while (parallel_workers > 0 &&
6889 maintenance_work_mem / (parallel_workers + 1) < 32768L)
6890 parallel_workers--;
6891
6892done:
6894 table_close(heap, NoLock);
6895
6896 return parallel_workers;
6897}
6898
6899/*
6900 * add_paths_to_grouping_rel
6901 *
6902 * Add non-partial paths to grouping relation.
6903 */
6904static void
6906 RelOptInfo *grouped_rel,
6907 RelOptInfo *partially_grouped_rel,
6908 const AggClauseCosts *agg_costs,
6909 grouping_sets_data *gd, double dNumGroups,
6910 GroupPathExtraData *extra)
6911{
6912 Query *parse = root->parse;
6913 Path *cheapest_path = input_rel->cheapest_total_path;
6914 ListCell *lc;
6915 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6916 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6917 List *havingQual = (List *) extra->havingQual;
6918 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6919
6920 if (can_sort)
6921 {
6922 /*
6923 * Use any available suitably-sorted path as input, and also consider
6924 * sorting the cheapest-total path and incremental sort on any paths
6925 * with presorted keys.
6926 */
6927 foreach(lc, input_rel->pathlist)
6928 {
6929 ListCell *lc2;
6930 Path *path = (Path *) lfirst(lc);
6931 Path *path_save = path;
6932 List *pathkey_orderings = NIL;
6933
6934 /* generate alternative group orderings that might be useful */
6935 pathkey_orderings = get_useful_group_keys_orderings(root, path);
6936
6937 Assert(list_length(pathkey_orderings) > 0);
6938
6939 foreach(lc2, pathkey_orderings)
6940 {
6941 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
6942
6943 /* restore the path (we replace it in the loop) */
6944 path = path_save;
6945
6946 path = make_ordered_path(root,
6947 grouped_rel,
6948 path,
6949 cheapest_path,
6950 info->pathkeys,
6951 -1.0);
6952 if (path == NULL)
6953 continue;
6954
6955 /* Now decide what to stick atop it */
6956 if (parse->groupingSets)
6957 {
6958 consider_groupingsets_paths(root, grouped_rel,
6959 path, true, can_hash,
6960 gd, agg_costs, dNumGroups);
6961 }
6962 else if (parse->hasAggs)
6963 {
6964 /*
6965 * We have aggregation, possibly with plain GROUP BY. Make
6966 * an AggPath.
6967 */
6968 add_path(grouped_rel, (Path *)
6970 grouped_rel,
6971 path,
6972 grouped_rel->reltarget,
6973 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6975 info->clauses,
6976 havingQual,
6977 agg_costs,
6978 dNumGroups));
6979 }
6980 else if (parse->groupClause)
6981 {
6982 /*
6983 * We have GROUP BY without aggregation or grouping sets.
6984 * Make a GroupPath.
6985 */
6986 add_path(grouped_rel, (Path *)
6988 grouped_rel,
6989 path,
6990 info->clauses,
6991 havingQual,
6992 dNumGroups));
6993 }
6994 else
6995 {
6996 /* Other cases should have been handled above */
6997 Assert(false);
6998 }
6999 }
7000 }
7001
7002 /*
7003 * Instead of operating directly on the input relation, we can
7004 * consider finalizing a partially aggregated path.
7005 */
7006 if (partially_grouped_rel != NULL)
7007 {
7008 foreach(lc, partially_grouped_rel->pathlist)
7009 {
7010 ListCell *lc2;
7011 Path *path = (Path *) lfirst(lc);
7012 Path *path_save = path;
7013 List *pathkey_orderings = NIL;
7014
7015 /* generate alternative group orderings that might be useful */
7016 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7017
7018 Assert(list_length(pathkey_orderings) > 0);
7019
7020 /* process all potentially interesting grouping reorderings */
7021 foreach(lc2, pathkey_orderings)
7022 {
7023 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7024
7025 /* restore the path (we replace it in the loop) */
7026 path = path_save;
7027
7028 path = make_ordered_path(root,
7029 grouped_rel,
7030 path,
7031 partially_grouped_rel->cheapest_total_path,
7032 info->pathkeys,
7033 -1.0);
7034
7035 if (path == NULL)
7036 continue;
7037
7038 if (parse->hasAggs)
7039 add_path(grouped_rel, (Path *)
7041 grouped_rel,
7042 path,
7043 grouped_rel->reltarget,
7044 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7046 info->clauses,
7047 havingQual,
7048 agg_final_costs,
7049 dNumGroups));
7050 else
7051 add_path(grouped_rel, (Path *)
7053 grouped_rel,
7054 path,
7055 info->clauses,
7056 havingQual,
7057 dNumGroups));
7058
7059 }
7060 }
7061 }
7062 }
7063
7064 if (can_hash)
7065 {
7066 if (parse->groupingSets)
7067 {
7068 /*
7069 * Try for a hash-only groupingsets path over unsorted input.
7070 */
7071 consider_groupingsets_paths(root, grouped_rel,
7072 cheapest_path, false, true,
7073 gd, agg_costs, dNumGroups);
7074 }
7075 else
7076 {
7077 /*
7078 * Generate a HashAgg Path. We just need an Agg over the
7079 * cheapest-total input path, since input order won't matter.
7080 */
7081 add_path(grouped_rel, (Path *)
7082 create_agg_path(root, grouped_rel,
7083 cheapest_path,
7084 grouped_rel->reltarget,
7085 AGG_HASHED,
7087 root->processed_groupClause,
7088 havingQual,
7089 agg_costs,
7090 dNumGroups));
7091 }
7092
7093 /*
7094 * Generate a Finalize HashAgg Path atop of the cheapest partially
7095 * grouped path, assuming there is one
7096 */
7097 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7098 {
7099 Path *path = partially_grouped_rel->cheapest_total_path;
7100
7101 add_path(grouped_rel, (Path *)
7103 grouped_rel,
7104 path,
7105 grouped_rel->reltarget,
7106 AGG_HASHED,
7108 root->processed_groupClause,
7109 havingQual,
7110 agg_final_costs,
7111 dNumGroups));
7112 }
7113 }
7114
7115 /*
7116 * When partitionwise aggregate is used, we might have fully aggregated
7117 * paths in the partial pathlist, because add_paths_to_append_rel() will
7118 * consider a path for grouped_rel consisting of a Parallel Append of
7119 * non-partial paths from each child.
7120 */
7121 if (grouped_rel->partial_pathlist != NIL)
7122 gather_grouping_paths(root, grouped_rel);
7123}
7124
7125/*
7126 * create_partial_grouping_paths
7127 *
7128 * Create a new upper relation representing the result of partial aggregation
7129 * and populate it with appropriate paths. Note that we don't finalize the
7130 * lists of paths here, so the caller can add additional partial or non-partial
7131 * paths and must afterward call gather_grouping_paths and set_cheapest on
7132 * the returned upper relation.
7133 *
7134 * All paths for this new upper relation -- both partial and non-partial --
7135 * have been partially aggregated but require a subsequent FinalizeAggregate
7136 * step.
7137 *
7138 * NB: This function is allowed to return NULL if it determines that there is
7139 * no real need to create a new RelOptInfo.
7140 */
7141static RelOptInfo *
7143 RelOptInfo *grouped_rel,
7144 RelOptInfo *input_rel,
7146 GroupPathExtraData *extra,
7147 bool force_rel_creation)
7148{
7149 Query *parse = root->parse;
7150 RelOptInfo *partially_grouped_rel;
7151 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7152 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7153 Path *cheapest_partial_path = NULL;
7154 Path *cheapest_total_path = NULL;
7155 double dNumPartialGroups = 0;
7156 double dNumPartialPartialGroups = 0;
7157 ListCell *lc;
7158 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7159 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7160
7161 /*
7162 * Consider whether we should generate partially aggregated non-partial
7163 * paths. We can only do this if we have a non-partial path, and only if
7164 * the parent of the input rel is performing partial partitionwise
7165 * aggregation. (Note that extra->patype is the type of partitionwise
7166 * aggregation being used at the parent level, not this level.)
7167 */
7168 if (input_rel->pathlist != NIL &&
7170 cheapest_total_path = input_rel->cheapest_total_path;
7171
7172 /*
7173 * If parallelism is possible for grouped_rel, then we should consider
7174 * generating partially-grouped partial paths. However, if the input rel
7175 * has no partial paths, then we can't.
7176 */
7177 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7178 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7179
7180 /*
7181 * If we can't partially aggregate partial paths, and we can't partially
7182 * aggregate non-partial paths, then don't bother creating the new
7183 * RelOptInfo at all, unless the caller specified force_rel_creation.
7184 */
7185 if (cheapest_total_path == NULL &&
7186 cheapest_partial_path == NULL &&
7187 !force_rel_creation)
7188 return NULL;
7189
7190 /*
7191 * Build a new upper relation to represent the result of partially
7192 * aggregating the rows from the input relation.
7193 */
7194 partially_grouped_rel = fetch_upper_rel(root,
7196 grouped_rel->relids);
7197 partially_grouped_rel->consider_parallel =
7198 grouped_rel->consider_parallel;
7199 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7200 partially_grouped_rel->serverid = grouped_rel->serverid;
7201 partially_grouped_rel->userid = grouped_rel->userid;
7202 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7203 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7204
7205 /*
7206 * Build target list for partial aggregate paths. These paths cannot just
7207 * emit the same tlist as regular aggregate paths, because (1) we must
7208 * include Vars and Aggrefs needed in HAVING, which might not appear in
7209 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7210 */
7211 partially_grouped_rel->reltarget =
7213 extra->havingQual);
7214
7215 if (!extra->partial_costs_set)
7216 {
7217 /*
7218 * Collect statistics about aggregates for estimating costs of
7219 * performing aggregation in parallel.
7220 */
7221 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7222 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7223 if (parse->hasAggs)
7224 {
7225 /* partial phase */
7227 agg_partial_costs);
7228
7229 /* final phase */
7231 agg_final_costs);
7232 }
7233
7234 extra->partial_costs_set = true;
7235 }
7236
7237 /* Estimate number of partial groups. */
7238 if (cheapest_total_path != NULL)
7239 dNumPartialGroups =
7241 cheapest_total_path->rows,
7242 gd,
7243 extra->targetList);
7244 if (cheapest_partial_path != NULL)
7245 dNumPartialPartialGroups =
7247 cheapest_partial_path->rows,
7248 gd,
7249 extra->targetList);
7250
7251 if (can_sort && cheapest_total_path != NULL)
7252 {
7253 /* This should have been checked previously */
7254 Assert(parse->hasAggs || parse->groupClause);
7255
7256 /*
7257 * Use any available suitably-sorted path as input, and also consider
7258 * sorting the cheapest partial path.
7259 */
7260 foreach(lc, input_rel->pathlist)
7261 {
7262 ListCell *lc2;
7263 Path *path = (Path *) lfirst(lc);
7264 Path *path_save = path;
7265 List *pathkey_orderings = NIL;
7266
7267 /* generate alternative group orderings that might be useful */
7268 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7269
7270 Assert(list_length(pathkey_orderings) > 0);
7271
7272 /* process all potentially interesting grouping reorderings */
7273 foreach(lc2, pathkey_orderings)
7274 {
7275 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7276
7277 /* restore the path (we replace it in the loop) */
7278 path = path_save;
7279
7280 path = make_ordered_path(root,
7281 partially_grouped_rel,
7282 path,
7283 cheapest_total_path,
7284 info->pathkeys,
7285 -1.0);
7286
7287 if (path == NULL)
7288 continue;
7289
7290 if (parse->hasAggs)
7291 add_path(partially_grouped_rel, (Path *)
7293 partially_grouped_rel,
7294 path,
7295 partially_grouped_rel->reltarget,
7296 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7298 info->clauses,
7299 NIL,
7300 agg_partial_costs,
7301 dNumPartialGroups));
7302 else
7303 add_path(partially_grouped_rel, (Path *)
7305 partially_grouped_rel,
7306 path,
7307 info->clauses,
7308 NIL,
7309 dNumPartialGroups));
7310 }
7311 }
7312 }
7313
7314 if (can_sort && cheapest_partial_path != NULL)
7315 {
7316 /* Similar to above logic, but for partial paths. */
7317 foreach(lc, input_rel->partial_pathlist)
7318 {
7319 ListCell *lc2;
7320 Path *path = (Path *) lfirst(lc);
7321 Path *path_save = path;
7322 List *pathkey_orderings = NIL;
7323
7324 /* generate alternative group orderings that might be useful */
7325 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7326
7327 Assert(list_length(pathkey_orderings) > 0);
7328
7329 /* process all potentially interesting grouping reorderings */
7330 foreach(lc2, pathkey_orderings)
7331 {
7332 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7333
7334
7335 /* restore the path (we replace it in the loop) */
7336 path = path_save;
7337
7338 path = make_ordered_path(root,
7339 partially_grouped_rel,
7340 path,
7341 cheapest_partial_path,
7342 info->pathkeys,
7343 -1.0);
7344
7345 if (path == NULL)
7346 continue;
7347
7348 if (parse->hasAggs)
7349 add_partial_path(partially_grouped_rel, (Path *)
7351 partially_grouped_rel,
7352 path,
7353 partially_grouped_rel->reltarget,
7354 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7356 info->clauses,
7357 NIL,
7358 agg_partial_costs,
7359 dNumPartialPartialGroups));
7360 else
7361 add_partial_path(partially_grouped_rel, (Path *)
7363 partially_grouped_rel,
7364 path,
7365 info->clauses,
7366 NIL,
7367 dNumPartialPartialGroups));
7368 }
7369 }
7370 }
7371
7372 /*
7373 * Add a partially-grouped HashAgg Path where possible
7374 */
7375 if (can_hash && cheapest_total_path != NULL)
7376 {
7377 /* Checked above */
7378 Assert(parse->hasAggs || parse->groupClause);
7379
7380 add_path(partially_grouped_rel, (Path *)
7382 partially_grouped_rel,
7383 cheapest_total_path,
7384 partially_grouped_rel->reltarget,
7385 AGG_HASHED,
7387 root->processed_groupClause,
7388 NIL,
7389 agg_partial_costs,
7390 dNumPartialGroups));
7391 }
7392
7393 /*
7394 * Now add a partially-grouped HashAgg partial Path where possible
7395 */
7396 if (can_hash && cheapest_partial_path != NULL)
7397 {
7398 add_partial_path(partially_grouped_rel, (Path *)
7400 partially_grouped_rel,
7401 cheapest_partial_path,
7402 partially_grouped_rel->reltarget,
7403 AGG_HASHED,
7405 root->processed_groupClause,
7406 NIL,
7407 agg_partial_costs,
7408 dNumPartialPartialGroups));
7409 }
7410
7411 /*
7412 * If there is an FDW that's responsible for all baserels of the query,
7413 * let it consider adding partially grouped ForeignPaths.
7414 */
7415 if (partially_grouped_rel->fdwroutine &&
7416 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7417 {
7418 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7419
7420 fdwroutine->GetForeignUpperPaths(root,
7422 input_rel, partially_grouped_rel,
7423 extra);
7424 }
7425
7426 return partially_grouped_rel;
7427}
7428
7429/*
7430 * make_ordered_path
7431 * Return a path ordered by 'pathkeys' based on the given 'path'. May
7432 * return NULL if it doesn't make sense to generate an ordered path in
7433 * this case.
7434 */
7435static Path *
7437 Path *cheapest_path, List *pathkeys, double limit_tuples)
7438{
7439 bool is_sorted;
7440 int presorted_keys;
7441
7442 is_sorted = pathkeys_count_contained_in(pathkeys,
7443 path->pathkeys,
7444 &presorted_keys);
7445
7446 if (!is_sorted)
7447 {
7448 /*
7449 * Try at least sorting the cheapest path and also try incrementally
7450 * sorting any path which is partially sorted already (no need to deal
7451 * with paths which have presorted keys when incremental sort is
7452 * disabled unless it's the cheapest input path).
7453 */
7454 if (path != cheapest_path &&
7455 (presorted_keys == 0 || !enable_incremental_sort))
7456 return NULL;
7457
7458 /*
7459 * We've no need to consider both a sort and incremental sort. We'll
7460 * just do a sort if there are no presorted keys and an incremental
7461 * sort when there are presorted keys.
7462 */
7463 if (presorted_keys == 0 || !enable_incremental_sort)
7464 path = (Path *) create_sort_path(root,
7465 rel,
7466 path,
7467 pathkeys,
7468 limit_tuples);
7469 else
7471 rel,
7472 path,
7473 pathkeys,
7474 presorted_keys,
7475 limit_tuples);
7476 }
7477
7478 return path;
7479}
7480
7481/*
7482 * Generate Gather and Gather Merge paths for a grouping relation or partial
7483 * grouping relation.
7484 *
7485 * generate_useful_gather_paths does most of the work, but we also consider a
7486 * special case: we could try sorting the data by the group_pathkeys and then
7487 * applying Gather Merge.
7488 *
7489 * NB: This function shouldn't be used for anything other than a grouped or
7490 * partially grouped relation not only because of the fact that it explicitly
7491 * references group_pathkeys but we pass "true" as the third argument to
7492 * generate_useful_gather_paths().
7493 */
7494static void
7496{
7497 ListCell *lc;
7498 Path *cheapest_partial_path;
7499 List *groupby_pathkeys;
7500
7501 /*
7502 * This occurs after any partial aggregation has taken place, so trim off
7503 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7504 */
7505 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7506 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7507 root->num_groupby_pathkeys);
7508 else
7509 groupby_pathkeys = root->group_pathkeys;
7510
7511 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7513
7514 cheapest_partial_path = linitial(rel->partial_pathlist);
7515
7516 /* XXX Shouldn't this also consider the group-key-reordering? */
7517 foreach(lc, rel->partial_pathlist)
7518 {
7519 Path *path = (Path *) lfirst(lc);
7520 bool is_sorted;
7521 int presorted_keys;
7522 double total_groups;
7523
7524 is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7525 path->pathkeys,
7526 &presorted_keys);
7527
7528 if (is_sorted)
7529 continue;
7530
7531 /*
7532 * Try at least sorting the cheapest path and also try incrementally
7533 * sorting any path which is partially sorted already (no need to deal
7534 * with paths which have presorted keys when incremental sort is
7535 * disabled unless it's the cheapest input path).
7536 */
7537 if (path != cheapest_partial_path &&
7538 (presorted_keys == 0 || !enable_incremental_sort))
7539 continue;
7540
7541 /*
7542 * We've no need to consider both a sort and incremental sort. We'll
7543 * just do a sort if there are no presorted keys and an incremental
7544 * sort when there are presorted keys.
7545 */
7546 if (presorted_keys == 0 || !enable_incremental_sort)
7547 path = (Path *) create_sort_path(root, rel, path,
7548 groupby_pathkeys,
7549 -1.0);
7550 else
7552 rel,
7553 path,
7554 groupby_pathkeys,
7555 presorted_keys,
7556 -1.0);
7557 total_groups = compute_gather_rows(path);
7558 path = (Path *)
7560 rel,
7561 path,
7562 rel->reltarget,
7563 groupby_pathkeys,
7564 NULL,
7565 &total_groups);
7566
7567 add_path(rel, path);
7568 }
7569}
7570
7571/*
7572 * can_partial_agg
7573 *
7574 * Determines whether or not partial grouping and/or aggregation is possible.
7575 * Returns true when possible, false otherwise.
7576 */
7577static bool
7579{
7580 Query *parse = root->parse;
7581
7582 if (!parse->hasAggs && parse->groupClause == NIL)
7583 {
7584 /*
7585 * We don't know how to do parallel aggregation unless we have either
7586 * some aggregates or a grouping clause.
7587 */
7588 return false;
7589 }
7590 else if (parse->groupingSets)
7591 {
7592 /* We don't know how to do grouping sets in parallel. */
7593 return false;
7594 }
7595 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7596 {
7597 /* Insufficient support for partial mode. */
7598 return false;
7599 }
7600
7601 /* Everything looks good. */
7602 return true;
7603}
7604
7605/*
7606 * apply_scanjoin_target_to_paths
7607 *
7608 * Adjust the final scan/join relation, and recursively all of its children,
7609 * to generate the final scan/join target. It would be more correct to model
7610 * this as a separate planning step with a new RelOptInfo at the toplevel and
7611 * for each child relation, but doing it this way is noticeably cheaper.
7612 * Maybe that problem can be solved at some point, but for now we do this.
7613 *
7614 * If tlist_same_exprs is true, then the scan/join target to be applied has
7615 * the same expressions as the existing reltarget, so we need only insert the
7616 * appropriate sortgroupref information. By avoiding the creation of
7617 * projection paths we save effort both immediately and at plan creation time.
7618 */
7619static void
7621 RelOptInfo *rel,
7622 List *scanjoin_targets,
7623 List *scanjoin_targets_contain_srfs,
7624 bool scanjoin_target_parallel_safe,
7625 bool tlist_same_exprs)
7626{
7627 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7628 PathTarget *scanjoin_target;
7629 ListCell *lc;
7630
7631 /* This recurses, so be paranoid. */
7633
7634 /*
7635 * If the rel is partitioned, we want to drop its existing paths and
7636 * generate new ones. This function would still be correct if we kept the
7637 * existing paths: we'd modify them to generate the correct target above
7638 * the partitioning Append, and then they'd compete on cost with paths
7639 * generating the target below the Append. However, in our current cost
7640 * model the latter way is always the same or cheaper cost, so modifying
7641 * the existing paths would just be useless work. Moreover, when the cost
7642 * is the same, varying roundoff errors might sometimes allow an existing
7643 * path to be picked, resulting in undesirable cross-platform plan
7644 * variations. So we drop old paths and thereby force the work to be done
7645 * below the Append, except in the case of a non-parallel-safe target.
7646 *
7647 * Some care is needed, because we have to allow
7648 * generate_useful_gather_paths to see the old partial paths in the next
7649 * stanza. Hence, zap the main pathlist here, then allow
7650 * generate_useful_gather_paths to add path(s) to the main list, and
7651 * finally zap the partial pathlist.
7652 */
7653 if (rel_is_partitioned)
7654 rel->pathlist = NIL;
7655
7656 /*
7657 * If the scan/join target is not parallel-safe, partial paths cannot
7658 * generate it.
7659 */
7660 if (!scanjoin_target_parallel_safe)
7661 {
7662 /*
7663 * Since we can't generate the final scan/join target in parallel
7664 * workers, this is our last opportunity to use any partial paths that
7665 * exist; so build Gather path(s) that use them and emit whatever the
7666 * current reltarget is. We don't do this in the case where the
7667 * target is parallel-safe, since we will be able to generate superior
7668 * paths by doing it after the final scan/join target has been
7669 * applied.
7670 */
7672
7673 /* Can't use parallel query above this level. */
7674 rel->partial_pathlist = NIL;
7675 rel->consider_parallel = false;
7676 }
7677
7678 /* Finish dropping old paths for a partitioned rel, per comment above */
7679 if (rel_is_partitioned)
7680 rel->partial_pathlist = NIL;
7681
7682 /* Extract SRF-free scan/join target. */
7683 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7684
7685 /*
7686 * Apply the SRF-free scan/join target to each existing path.
7687 *
7688 * If the tlist exprs are the same, we can just inject the sortgroupref
7689 * information into the existing pathtargets. Otherwise, replace each
7690 * path with a projection path that generates the SRF-free scan/join
7691 * target. This can't change the ordering of paths within rel->pathlist,
7692 * so we just modify the list in place.
7693 */
7694 foreach(lc, rel->pathlist)
7695 {
7696 Path *subpath = (Path *) lfirst(lc);
7697
7698 /* Shouldn't have any parameterized paths anymore */
7699 Assert(subpath->param_info == NULL);
7700
7701 if (tlist_same_exprs)
7702 subpath->pathtarget->sortgrouprefs =
7703 scanjoin_target->sortgrouprefs;
7704 else
7705 {
7706 Path *newpath;
7707
7708 newpath = (Path *) create_projection_path(root, rel, subpath,
7709 scanjoin_target);
7710 lfirst(lc) = newpath;
7711 }
7712 }
7713
7714 /* Likewise adjust the targets for any partial paths. */
7715 foreach(lc, rel->partial_pathlist)
7716 {
7717 Path *subpath = (Path *) lfirst(lc);
7718
7719 /* Shouldn't have any parameterized paths anymore */
7720 Assert(subpath->param_info == NULL);
7721
7722 if (tlist_same_exprs)
7723 subpath->pathtarget->sortgrouprefs =
7724 scanjoin_target->sortgrouprefs;
7725 else
7726 {
7727 Path *newpath;
7728
7729 newpath = (Path *) create_projection_path(root, rel, subpath,
7730 scanjoin_target);
7731 lfirst(lc) = newpath;
7732 }
7733 }
7734
7735 /*
7736 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7737 * atop each existing path. (Note that this function doesn't look at the
7738 * cheapest-path fields, which is a good thing because they're bogus right
7739 * now.)
7740 */
7741 if (root->parse->hasTargetSRFs)
7743 scanjoin_targets,
7744 scanjoin_targets_contain_srfs);
7745
7746 /*
7747 * Update the rel's target to be the final (with SRFs) scan/join target.
7748 * This now matches the actual output of all the paths, and we might get
7749 * confused in createplan.c if they don't agree. We must do this now so
7750 * that any append paths made in the next part will use the correct
7751 * pathtarget (cf. create_append_path).
7752 *
7753 * Note that this is also necessary if GetForeignUpperPaths() gets called
7754 * on the final scan/join relation or on any of its children, since the
7755 * FDW might look at the rel's target to create ForeignPaths.
7756 */
7757 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7758
7759 /*
7760 * If the relation is partitioned, recursively apply the scan/join target
7761 * to all partitions, and generate brand-new Append paths in which the
7762 * scan/join target is computed below the Append rather than above it.
7763 * Since Append is not projection-capable, that might save a separate
7764 * Result node, and it also is important for partitionwise aggregate.
7765 */
7766 if (rel_is_partitioned)
7767 {
7768 List *live_children = NIL;
7769 int i;
7770
7771 /* Adjust each partition. */
7772 i = -1;
7773 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7774 {
7775 RelOptInfo *child_rel = rel->part_rels[i];
7776 AppendRelInfo **appinfos;
7777 int nappinfos;
7778 List *child_scanjoin_targets = NIL;
7779
7780 Assert(child_rel != NULL);
7781
7782 /* Dummy children can be ignored. */
7783 if (IS_DUMMY_REL(child_rel))
7784 continue;
7785
7786 /* Translate scan/join targets for this child. */
7787 appinfos = find_appinfos_by_relids(root, child_rel->relids,
7788 &nappinfos);
7789 foreach(lc, scanjoin_targets)
7790 {
7791 PathTarget *target = lfirst_node(PathTarget, lc);
7792
7793 target = copy_pathtarget(target);
7794 target->exprs = (List *)
7796 (Node *) target->exprs,
7797 nappinfos, appinfos);
7798 child_scanjoin_targets = lappend(child_scanjoin_targets,
7799 target);
7800 }
7801 pfree(appinfos);
7802
7803 /* Recursion does the real work. */
7805 child_scanjoin_targets,
7806 scanjoin_targets_contain_srfs,
7807 scanjoin_target_parallel_safe,
7809
7810 /* Save non-dummy children for Append paths. */
7811 if (!IS_DUMMY_REL(child_rel))
7812 live_children = lappend(live_children, child_rel);
7813 }
7814
7815 /* Build new paths for this relation by appending child paths. */
7816 add_paths_to_append_rel(root, rel, live_children);
7817 }
7818
7819 /*
7820 * Consider generating Gather or Gather Merge paths. We must only do this
7821 * if the relation is parallel safe, and we don't do it for child rels to
7822 * avoid creating multiple Gather nodes within the same plan. We must do
7823 * this after all paths have been generated and before set_cheapest, since
7824 * one of the generated paths may turn out to be the cheapest one.
7825 */
7826 if (rel->consider_parallel && !IS_OTHER_REL(rel))
7828
7829 /*
7830 * Reassess which paths are the cheapest, now that we've potentially added
7831 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7832 * this relation.
7833 */
7834 set_cheapest(rel);
7835}
7836
7837/*
7838 * create_partitionwise_grouping_paths
7839 *
7840 * If the partition keys of input relation are part of the GROUP BY clause, all
7841 * the rows belonging to a given group come from a single partition. This
7842 * allows aggregation/grouping over a partitioned relation to be broken down
7843 * into aggregation/grouping on each partition. This should be no worse, and
7844 * often better, than the normal approach.
7845 *
7846 * However, if the GROUP BY clause does not contain all the partition keys,
7847 * rows from a given group may be spread across multiple partitions. In that
7848 * case, we perform partial aggregation for each group, append the results,
7849 * and then finalize aggregation. This is less certain to win than the
7850 * previous case. It may win if the PartialAggregate stage greatly reduces
7851 * the number of groups, because fewer rows will pass through the Append node.
7852 * It may lose if we have lots of small groups.
7853 */
7854static void
7856 RelOptInfo *input_rel,
7857 RelOptInfo *grouped_rel,
7858 RelOptInfo *partially_grouped_rel,
7859 const AggClauseCosts *agg_costs,
7862 GroupPathExtraData *extra)
7863{
7864 List *grouped_live_children = NIL;
7865 List *partially_grouped_live_children = NIL;
7866 PathTarget *target = grouped_rel->reltarget;
7867 bool partial_grouping_valid = true;
7868 int i;
7869
7872 partially_grouped_rel != NULL);
7873
7874 /* Add paths for partitionwise aggregation/grouping. */
7875 i = -1;
7876 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7877 {
7878 RelOptInfo *child_input_rel = input_rel->part_rels[i];
7879 PathTarget *child_target;
7880 AppendRelInfo **appinfos;
7881 int nappinfos;
7882 GroupPathExtraData child_extra;
7883 RelOptInfo *child_grouped_rel;
7884 RelOptInfo *child_partially_grouped_rel;
7885
7886 Assert(child_input_rel != NULL);
7887
7888 /* Dummy children can be ignored. */
7889 if (IS_DUMMY_REL(child_input_rel))
7890 continue;
7891
7892 child_target = copy_pathtarget(target);
7893
7894 /*
7895 * Copy the given "extra" structure as is and then override the
7896 * members specific to this child.
7897 */
7898 memcpy(&child_extra, extra, sizeof(child_extra));
7899
7900 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7901 &nappinfos);
7902
7903 child_target->exprs = (List *)
7905 (Node *) target->exprs,
7906 nappinfos, appinfos);
7907
7908 /* Translate havingQual and targetList. */
7909 child_extra.havingQual = (Node *)
7911 extra->havingQual,
7912 nappinfos, appinfos);
7913 child_extra.targetList = (List *)
7915 (Node *) extra->targetList,
7916 nappinfos, appinfos);
7917
7918 /*
7919 * extra->patype was the value computed for our parent rel; patype is
7920 * the value for this relation. For the child, our value is its
7921 * parent rel's value.
7922 */
7923 child_extra.patype = patype;
7924
7925 /*
7926 * Create grouping relation to hold fully aggregated grouping and/or
7927 * aggregation paths for the child.
7928 */
7929 child_grouped_rel = make_grouping_rel(root, child_input_rel,
7930 child_target,
7931 extra->target_parallel_safe,
7932 child_extra.havingQual);
7933
7934 /* Create grouping paths for this child relation. */
7935 create_ordinary_grouping_paths(root, child_input_rel,
7936 child_grouped_rel,
7937 agg_costs, gd, &child_extra,
7938 &child_partially_grouped_rel);
7939
7940 if (child_partially_grouped_rel)
7941 {
7942 partially_grouped_live_children =
7943 lappend(partially_grouped_live_children,
7944 child_partially_grouped_rel);
7945 }
7946 else
7947 partial_grouping_valid = false;
7948
7949 if (patype == PARTITIONWISE_AGGREGATE_FULL)
7950 {
7951 set_cheapest(child_grouped_rel);
7952 grouped_live_children = lappend(grouped_live_children,
7953 child_grouped_rel);
7954 }
7955
7956 pfree(appinfos);
7957 }
7958
7959 /*
7960 * Try to create append paths for partially grouped children. For full
7961 * partitionwise aggregation, we might have paths in the partial_pathlist
7962 * if parallel aggregation is possible. For partial partitionwise
7963 * aggregation, we may have paths in both pathlist and partial_pathlist.
7964 *
7965 * NB: We must have a partially grouped path for every child in order to
7966 * generate a partially grouped path for this relation.
7967 */
7968 if (partially_grouped_rel && partial_grouping_valid)
7969 {
7970 Assert(partially_grouped_live_children != NIL);
7971
7972 add_paths_to_append_rel(root, partially_grouped_rel,
7973 partially_grouped_live_children);
7974
7975 /*
7976 * We need call set_cheapest, since the finalization step will use the
7977 * cheapest path from the rel.
7978 */
7979 if (partially_grouped_rel->pathlist)
7980 set_cheapest(partially_grouped_rel);
7981 }
7982
7983 /* If possible, create append paths for fully grouped children. */
7984 if (patype == PARTITIONWISE_AGGREGATE_FULL)
7985 {
7986 Assert(grouped_live_children != NIL);
7987
7988 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7989 }
7990}
7991
7992/*
7993 * group_by_has_partkey
7994 *
7995 * Returns true if all the partition keys of the given relation are part of
7996 * the GROUP BY clauses, including having matching collation, false otherwise.
7997 */
7998static bool
8000 List *targetList,
8001 List *groupClause)
8002{
8003 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8004 int cnt = 0;
8005 int partnatts;
8006
8007 /* Input relation should be partitioned. */
8008 Assert(input_rel->part_scheme);
8009
8010 /* Rule out early, if there are no partition keys present. */
8011 if (!input_rel->partexprs)
8012 return false;
8013
8014 partnatts = input_rel->part_scheme->partnatts;
8015
8016 for (cnt = 0; cnt < partnatts; cnt++)
8017 {
8018 List *partexprs = input_rel->partexprs[cnt];
8019 ListCell *lc;
8020 bool found = false;
8021
8022 foreach(lc, partexprs)
8023 {
8024 ListCell *lg;
8025 Expr *partexpr = lfirst(lc);
8026 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8027
8028 foreach(lg, groupexprs)
8029 {
8030 Expr *groupexpr = lfirst(lg);
8031 Oid groupcoll = exprCollation((Node *) groupexpr);
8032
8033 /*
8034 * Note: we can assume there is at most one RelabelType node;
8035 * eval_const_expressions() will have simplified if more than
8036 * one.
8037 */
8038 if (IsA(groupexpr, RelabelType))
8039 groupexpr = ((RelabelType *) groupexpr)->arg;
8040
8041 if (equal(groupexpr, partexpr))
8042 {
8043 /*
8044 * Reject a match if the grouping collation does not match
8045 * the partitioning collation.
8046 */
8047 if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8048 partcoll != groupcoll)
8049 return false;
8050
8051 found = true;
8052 break;
8053 }
8054 }
8055
8056 if (found)
8057 break;
8058 }
8059
8060 /*
8061 * If none of the partition key expressions match with any of the
8062 * GROUP BY expression, return false.
8063 */
8064 if (!found)
8065 return false;
8066 }
8067
8068 return true;
8069}
8070
8071/*
8072 * generate_setop_child_grouplist
8073 * Build a SortGroupClause list defining the sort/grouping properties
8074 * of the child of a set operation.
8075 *
8076 * This is similar to generate_setop_grouplist() but differs as the setop
8077 * child query's targetlist entries may already have a tleSortGroupRef
8078 * assigned for other purposes, such as GROUP BYs. Here we keep the
8079 * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8080 * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'.
8081 */
8082static List *
8084{
8085 List *grouplist = copyObject(op->groupClauses);
8086 ListCell *lg;
8087 ListCell *lt;
8088
8089 lg = list_head(grouplist);
8090 foreach(lt, targetlist)
8091 {
8092 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8093 SortGroupClause *sgc;
8094
8095 /* resjunk columns could have sortgrouprefs. Leave these alone */
8096 if (tle->resjunk)
8097 continue;
8098
8099 /* we expect every non-resjunk target to have a SortGroupClause */
8100 Assert(lg != NULL);
8101 sgc = (SortGroupClause *) lfirst(lg);
8102 lg = lnext(grouplist, lg);
8103
8104 /* assign a tleSortGroupRef, or reuse the existing one */
8105 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8106 }
8107 Assert(lg == NULL);
8108 return grouplist;
8109}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4214
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3201
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1314
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:736
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:665
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:524
void pprint(const void *obj)
Definition: print.c:54
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:608
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:958
#define Max(x, y)
Definition: c.h:952
#define Assert(condition)
Definition: c.h:812
int64_t int64
Definition: c.h:482
#define FLOAT8PASSBYVAL
Definition: c.h:589
unsigned int Index
Definition: c.h:568
#define MemSet(start, val, len)
Definition: c.h:974
#define OidIsValid(objectId)
Definition: c.h:729
size_t Size
Definition: c.h:559
bool contain_agg_clause(Node *clause)
Definition: clauses.c:177
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2394
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:227
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2253
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2286
char max_parallel_hazard(Query *parse)
Definition: clauses.c:733
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:752
bool contain_subplans(Node *clause)
Definition: clauses.c:329
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:537
double cpu_operator_cost
Definition: costsize.c:134
bool enable_partitionwise_aggregate
Definition: costsize.c:160
int max_parallel_workers_per_gather
Definition: costsize.c:143
double parallel_setup_cost
Definition: costsize.c:136
double parallel_tuple_cost
Definition: costsize.c:135
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
double compute_gather_rows(Path *path)
Definition: costsize.c:6600
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4758
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6342
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4732
bool enable_presorted_aggregate
Definition: costsize.c:164
bool enable_hashagg
Definition: costsize.c:152
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_indexscan
Definition: costsize.c:146
bool enable_incremental_sort
Definition: costsize.c:151
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6604
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:340
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:510
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:679
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:419
int max_parallel_maintenance_workers
Definition: globals.c:133
bool IsUnderPostmaster
Definition: globals.c:119
int maintenance_work_mem
Definition: globals.c:132
#define IsParallelWorker()
Definition: parallel.h:60
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
int b
Definition: isn.c:69
int a
Definition: isn.c:68
int j
Definition: isn.c:73
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lcons(void *datum, List *list)
Definition: list.c:495
List * list_delete_int(List *list, int datum)
Definition: list.c:891
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
void list_free(List *list)
Definition: list.c:1546
bool list_member_int(const List *list, int datum)
Definition: list.c:702
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
#define NoLock
Definition: lockdefs.h:34
#define AccessShareLock
Definition: lockdefs.h:36
@ LockWaitBlock
Definition: lockoptions.h:39
LockClauseStrength
Definition: lockoptions.h:22
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_NONE
Definition: lockoptions.h:23
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1858
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2578
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:308
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:651
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:301
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:760
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:816
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:758
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1830
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3487
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:386
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
#define copyObject(obj)
Definition: nodes.h:224
double Cost
Definition: nodes.h:251
#define nodeTag(nodeptr)
Definition: nodes.h:133
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:338
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_DELETE
Definition: nodes.h:268
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_SELECT
Definition: nodes.h:265
AggStrategy
Definition: nodes.h:353
@ AGG_SORTED
Definition: nodes.h:355
@ AGG_HASHED
Definition: nodes.h:356
@ AGG_MIXED
Definition: nodes.h:357
@ AGG_PLAIN
Definition: nodes.h:354
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:387
AggSplit
Definition: nodes.h:375
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:381
@ AGGSPLIT_SIMPLE
Definition: nodes.h:377
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:379
@ LIMIT_OPTION_COUNT
Definition: nodes.h:431
#define makeNode(_type_)
Definition: nodes.h:155
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:188
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:190
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:108
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:106
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:189
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:191
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:187
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:664
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1894
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3308
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3314
@ RTE_JOIN
Definition: parsenodes.h:1019
@ RTE_VALUES
Definition: parsenodes.h:1022
@ RTE_SUBQUERY
Definition: parsenodes.h:1018
@ RTE_RESULT
Definition: parsenodes.h:1025
@ RTE_FUNCTION
Definition: parsenodes.h:1020
@ RTE_TABLEFUNC
Definition: parsenodes.h:1021
@ RTE_GROUP
Definition: parsenodes.h:1028
@ RTE_RELATION
Definition: parsenodes.h:1017
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3317
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3330
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3305
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:484
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1335
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition: pathkeys.c:1380
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1049
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2962
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2763
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3577
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3813
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2873
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3979
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:124
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3032
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3323
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3082
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3127
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3187
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3240
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3877
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1586
PartitionwiseAggregateType
Definition: pathnodes.h:3280
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3283
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3282
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3281
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:1958
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3265
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1560
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1062
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3264
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3266
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:832
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:854
@ PATHKEYS_BETTER2
Definition: paths.h:206
@ PATHKEYS_BETTER1
Definition: paths.h:205
@ PATHKEYS_DIFFERENT
Definition: paths.h:207
@ PATHKEYS_EQUAL
Definition: paths.h:204
bool has_subclass(Oid relationId)
Definition: pg_inherits.c:355
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define linitial_node(type, l)
Definition: pg_list.h:181
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define lfirst_int(lc)
Definition: pg_list.h:173
#define list_make1(x1)
Definition: pg_list.h:212
#define linitial_int(l)
Definition: pg_list.h:179
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define list_make1_int(x1)
Definition: pg_list.h:227
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:333
#define llast_node(type, l)
Definition: pg_list.h:202
static int scale
Definition: pgbench.c:181
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1067
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1234
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
#define DEFAULT_CURSOR_TUPLE_FRACTION
Definition: planmain.h:21
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:91
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4923
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5658
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5520
Expr * expression_planner_with_deps(Expr *expr, List **relationOids, List **invalItems)
Definition: planner.c:6597
#define EXPRKIND_TARGET
Definition: planner.c:80
#define EXPRKIND_APPINFO
Definition: planner.c:86
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7495
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2322
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:88
PlannedStmt * planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:284
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3847
#define EXPRKIND_GROUPEXPR
Definition: planner.c:92
planner_hook_type planner_hook
Definition: planner.c:72
double cursor_tuple_fraction
Definition: planner.c:66
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3826
bool plan_cluster_use_sort(Oid tableOid, Oid indexOid)
Definition: planner.c:6650
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1287
int plan_create_index_workers(Oid tableOid, Oid indexOid)
Definition: planner.c:6772
#define EXPRKIND_PHV
Definition: planner.c:87
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:82
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:84
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3911
#define EXPRKIND_LIMIT
Definition: planner.c:85
#define EXPRKIND_VALUES
Definition: planner.c:83
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7578
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2500
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6411
Expr * preprocess_phv_expression(PlannerInfo *root, Expr *expr)
Definition: planner.c:1331
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5103
bool parallel_leader_participation
Definition: planner.c:68
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:5992
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7620
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4670
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5695
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2434
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:638
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6454
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4740
#define EXPRKIND_QUAL
Definition: planner.c:79
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2751
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1185
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7436
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3107
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7142
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6240
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4500
bool enable_distinct_reordering
Definition: planner.c:69
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5623
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2104
int debug_parallel_query
Definition: planner.c:67
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2285
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3152
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5408
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3059
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:5923
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1364
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3773
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:8083
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5835
Expr * expression_planner(Expr *expr)
Definition: planner.c:6570
bool limit_needed(Query *parse)
Definition: planner.c:2685
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:75
#define EXPRKIND_TABLEFUNC
Definition: planner.c:90
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4051
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6112
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:6905
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5188
#define EXPRKIND_RTFUNC
Definition: planner.c:81
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3538
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2847
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3660
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7855
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:89
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:7999
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:297
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3333
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4413
PlannedStmt *(* planner_hook_type)(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.h:26
void(* create_upper_paths_hook_type)(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, RelOptInfo *output_rel, void *extra)
Definition: planner.h:33
RowMarkType
Definition: plannodes.h:1326
@ ROW_MARK_COPY
Definition: plannodes.h:1332
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1331
@ ROW_MARK_SHARE
Definition: plannodes.h:1329
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1327
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1328
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1330
#define qsort(a, b, c, d)
Definition: port.h:447
#define printf(...)
Definition: port.h:244
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:385
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_function_rtes(PlannerInfo *root)
Definition: prepjointree.c:887
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:168
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:453
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:395
void pull_up_subqueries(PlannerInfo *root)
Definition: prepjointree.c:928
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
void reduce_outer_joins(PlannerInfo *root)
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
char * c
e
Definition: preproc-init.c:82
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:62
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:93
tree ctl root
Definition: radixtree.h:1888
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5129
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5016
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:94
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:192
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
@ ForwardScanDirection
Definition: sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3420
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:3921
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:288
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3593
void check_stack_depth(void)
Definition: stack_depth.c:95
List * aggrefs
Definition: pathnodes.h:3387
List * aggdistinct
Definition: primnodes.h:474
List * args
Definition: primnodes.h:468
List * aggorder
Definition: primnodes.h:471
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
Cardinality limit_tuples
Definition: pathnodes.h:3327
Node * quals
Definition: primnodes.h:2309
List * fromlist
Definition: primnodes.h:2308
int num_workers
Definition: plannodes.h:1144
bool invisible
Definition: plannodes.h:1147
bool single_copy
Definition: plannodes.h:1146
Plan plan
Definition: plannodes.h:1143
int rescan_param
Definition: plannodes.h:1145
PartitionwiseAggregateType patype
Definition: pathnodes.h:3311
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3305
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3304
Cardinality numGroups
Definition: pathnodes.h:2285
Path path
Definition: pathnodes.h:1721
Definition: pg_list.h:54
Definition: nodes.h:129
List * exprs
Definition: pathnodes.h:1544
List * pathkeys
Definition: pathnodes.h:1677
Cardinality rows
Definition: pathnodes.h:1671
int disabled_nodes
Definition: pathnodes.h:1672
Cost total_cost
Definition: pathnodes.h:1674
LockClauseStrength strength
Definition: plannodes.h:1385
Index prti
Definition: plannodes.h:1381
RowMarkType markType
Definition: plannodes.h:1383
LockWaitPolicy waitPolicy
Definition: plannodes.h:1386
bool isParent
Definition: plannodes.h:1387
Index rowmarkId
Definition: plannodes.h:1382
int allMarkTypes
Definition: plannodes.h:1384
struct Plan * lefttree
Definition: plannodes.h:155
Cost total_cost
Definition: plannodes.h:130
struct Plan * righttree
Definition: plannodes.h:156
bool parallel_aware
Definition: plannodes.h:141
Cost startup_cost
Definition: plannodes.h:129
List * qual
Definition: plannodes.h:154
int plan_width
Definition: plannodes.h:136
bool parallel_safe
Definition: plannodes.h:142
Cardinality plan_rows
Definition: plannodes.h:135
List * targetlist
Definition: plannodes.h:153
List * initPlan
Definition: plannodes.h:157
struct Plan * planTree
Definition: plannodes.h:70
bool hasModifyingCTE
Definition: plannodes.h:58
List * appendRelations
Definition: plannodes.h:80
List * permInfos
Definition: plannodes.h:74
bool canSetTag
Definition: plannodes.h:60
List * rowMarks
Definition: plannodes.h:87
int jitFlags
Definition: plannodes.h:68
Bitmapset * rewindPlanIDs
Definition: plannodes.h:85
ParseLoc stmt_len
Definition: plannodes.h:99
bool hasReturning
Definition: plannodes.h:56
ParseLoc stmt_location
Definition: plannodes.h:98
List * invalItems
Definition: plannodes.h:91
bool transientPlan
Definition: plannodes.h:62
List * resultRelations
Definition: plannodes.h:78
List * subplans
Definition: plannodes.h:82
List * relationOids
Definition: plannodes.h:89
bool dependsOnRole
Definition: plannodes.h:64
CmdType commandType
Definition: plannodes.h:52
Node * utilityStmt
Definition: plannodes.h:95
List * rtable
Definition: plannodes.h:72
List * paramExecTypes
Definition: plannodes.h:93
bool parallelModeNeeded
Definition: plannodes.h:66
uint64 queryId
Definition: plannodes.h:54
int lastPlanNodeId
Definition: pathnodes.h:147
char maxParallelHazard
Definition: pathnodes.h:162
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:153
List * appendRelations
Definition: pathnodes.h:129
List * finalrowmarks
Definition: pathnodes.h:123
List * invalItems
Definition: pathnodes.h:135
List * relationOids
Definition: pathnodes.h:132
List * paramExecTypes
Definition: pathnodes.h:138
bool parallelModeOK
Definition: pathnodes.h:156
bool transientPlan
Definition: pathnodes.h:150
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:114
List * finalrteperminfos
Definition: pathnodes.h:120
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:141
Index lastRowMarkId
Definition: pathnodes.h:144
List * resultRelations
Definition: pathnodes.h:126
List * finalrtable
Definition: pathnodes.h:117
bool parallelModeNeeded
Definition: pathnodes.h:159
Index query_level
Definition: pathnodes.h:208
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:170
CmdType commandType
Definition: parsenodes.h:121
TableFunc * tablefunc
Definition: parsenodes.h:1184
struct TableSampleClause * tablesample
Definition: parsenodes.h:1098
Query * subquery
Definition: parsenodes.h:1104
List * values_lists
Definition: parsenodes.h:1190
JoinType jointype
Definition: parsenodes.h:1151
List * functions
Definition: parsenodes.h:1177
RTEKind rtekind
Definition: parsenodes.h:1047
bool useridiscurrent
Definition: pathnodes.h:968
Relids relids
Definition: pathnodes.h:871
struct PathTarget * reltarget
Definition: pathnodes.h:893
Index relid
Definition: pathnodes.h:918
Cardinality tuples
Definition: pathnodes.h:949
bool consider_parallel
Definition: pathnodes.h:887
BlockNumber pages
Definition: pathnodes.h:948
List * pathlist
Definition: pathnodes.h:898
RelOptKind reloptkind
Definition: pathnodes.h:865
List * indexlist
Definition: pathnodes.h:944
struct Path * cheapest_startup_path
Definition: pathnodes.h:901
struct Path * cheapest_total_path
Definition: pathnodes.h:902
Oid userid
Definition: pathnodes.h:966
Oid serverid
Definition: pathnodes.h:964
Bitmapset * live_parts
Definition: pathnodes.h:1039
int rel_parallel_workers
Definition: pathnodes.h:956
List * partial_pathlist
Definition: pathnodes.h:900
Cardinality rows
Definition: pathnodes.h:877
Form_pg_class rd_rel
Definition: rel.h:111
Cardinality numGroups
Definition: pathnodes.h:2296
List * groupClause
Definition: pathnodes.h:2293
List * gsets_data
Definition: pathnodes.h:2295
bool hashable
Definition: pathnodes.h:2297
List * gsets
Definition: pathnodes.h:2294
bool is_hashed
Definition: pathnodes.h:2298
LockClauseStrength strength
Definition: parsenodes.h:1580
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1581
Index tleSortGroupRef
Definition: parsenodes.h:1438
struct WindowClause * window_clause
Definition: supportnodes.h:339
AttrNumber resno
Definition: primnodes.h:2192
Index ressortgroupref
Definition: primnodes.h:2196
WindowClause * wc
Definition: planner.c:115
Node * startOffset
Definition: parsenodes.h:1547
List * partitionClause
Definition: parsenodes.h:1543
Node * endOffset
Definition: parsenodes.h:1548
List * orderClause
Definition: parsenodes.h:1545
List ** windowFuncs
Definition: clauses.h:23
Index maxWinRef
Definition: clauses.h:22
int numWindowFuncs
Definition: clauses.h:21
Index winref
Definition: primnodes.h:581
Oid winfnoid
Definition: primnodes.h:567
int * tleref_to_colnum_map
Definition: planner.c:106
List * rollups
Definition: planner.c:99
Bitmapset * unhashable_refs
Definition: planner.c:104
List * unsortable_sets
Definition: planner.c:105
List * hash_sets_idx
Definition: planner.c:100
double dNumHashGroups
Definition: planner.c:101
Bitmapset * unsortable_refs
Definition: planner.c:103
Definition: type.h:96
List * activeWindows
Definition: planner.c:123
grouping_sets_data * gset_data
Definition: planner.c:124
SetOperationStmt * setop
Definition: planner.c:125
Definition: regguts.h:323
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:1938
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:878
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2091
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:1889
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2273
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2217
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2153
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:422
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695
#define create_pathtarget(root, tlist)
Definition: tlist.h:53
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:924
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:113
List * pull_var_clause(Node *node, int flags)
Definition: var.c:609
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:745