PostgreSQL Source Code git master
Loading...
Searching...
No Matches
planner.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * planner.c
4 * The query optimizer external interface.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/plan/planner.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/genam.h"
22#include "access/parallel.h"
23#include "access/sysattr.h"
24#include "access/table.h"
26#include "catalog/pg_inherits.h"
27#include "catalog/pg_proc.h"
28#include "catalog/pg_type.h"
29#include "executor/executor.h"
30#include "foreign/fdwapi.h"
31#include "jit/jit.h"
32#include "lib/bipartite_match.h"
33#include "lib/knapsack.h"
34#include "miscadmin.h"
35#include "nodes/makefuncs.h"
36#include "nodes/nodeFuncs.h"
37#ifdef OPTIMIZER_DEBUG
38#include "nodes/print.h"
39#endif
40#include "nodes/supportnodes.h"
42#include "optimizer/clauses.h"
43#include "optimizer/cost.h"
44#include "optimizer/optimizer.h"
46#include "optimizer/pathnode.h"
47#include "optimizer/paths.h"
48#include "optimizer/plancat.h"
49#include "optimizer/planmain.h"
50#include "optimizer/planner.h"
51#include "optimizer/prep.h"
52#include "optimizer/subselect.h"
53#include "optimizer/tlist.h"
54#include "parser/analyze.h"
55#include "parser/parse_agg.h"
56#include "parser/parse_clause.h"
58#include "parser/parsetree.h"
61#include "utils/acl.h"
63#include "utils/lsyscache.h"
64#include "utils/rel.h"
65#include "utils/selfuncs.h"
66
67/* GUC parameters */
72
73/* Hook for plugins to get control in planner() */
75
76/* Hook for plugins to get control after PlannerGlobal is initialized */
78
79/* Hook for plugins to get control before PlannerGlobal is discarded */
81
82/* Hook for plugins to get control when grouping_planner() plans upper rels */
84
85
86/* Expression kind codes for preprocess_expression */
87#define EXPRKIND_QUAL 0
88#define EXPRKIND_TARGET 1
89#define EXPRKIND_RTFUNC 2
90#define EXPRKIND_RTFUNC_LATERAL 3
91#define EXPRKIND_VALUES 4
92#define EXPRKIND_VALUES_LATERAL 5
93#define EXPRKIND_LIMIT 6
94#define EXPRKIND_APPINFO 7
95#define EXPRKIND_PHV 8
96#define EXPRKIND_TABLESAMPLE 9
97#define EXPRKIND_ARBITER_ELEM 10
98#define EXPRKIND_TABLEFUNC 11
99#define EXPRKIND_TABLEFUNC_LATERAL 12
100#define EXPRKIND_GROUPEXPR 13
101
102/*
103 * Data specific to grouping sets
104 */
116
117/*
118 * Temporary structure for use during WindowClause reordering in order to be
119 * able to sort WindowClauses on partitioning/ordering prefix.
120 */
121typedef struct
122{
124 List *uniqueOrder; /* A List of unique ordering/partitioning
125 * clauses per Window */
127
128/* Passthrough data for standard_qp_callback */
129typedef struct
130{
131 List *activeWindows; /* active windows, if any */
132 grouping_sets_data *gset_data; /* grouping sets data, if any */
133 SetOperationStmt *setop; /* parent set operation or NULL if not a
134 * subquery belonging to a set operation */
136
137/* Local functions */
138static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140static void grouping_planner(PlannerInfo *root, double tuple_fraction,
143static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 int *tleref_to_colnum_map);
146static double preprocess_limit(PlannerInfo *root,
147 double tuple_fraction,
148 int64 *offset_est, int64 *count_est);
150static List *extract_rollup_sets(List *groupingSets);
151static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152static void standard_qp_callback(PlannerInfo *root, void *extra);
154 double path_rows,
159 PathTarget *target,
160 bool target_parallel_safe,
165 RelOptInfo *grouped_rel);
167 PathTarget *target, bool target_parallel_safe,
168 Node *havingQual);
171 RelOptInfo *grouped_rel,
174 GroupPathExtraData *extra,
177 RelOptInfo *grouped_rel,
178 Path *path,
179 bool is_sorted,
180 bool can_hash,
183 double dNumGroups);
190 List *activeWindows);
193 Path *path,
197 List *activeWindows);
200 PathTarget *target);
204 PathTarget *target);
213 PathTarget *target,
214 bool target_parallel_safe,
215 double limit_tuples);
220 Node *havingQual);
225static void name_active_windows(List *activeWindows);
228 List *activeWindows);
230 List *tlist);
233 bool *have_postponed_srfs);
235 List *targets, List *targets_contain_srfs);
237 RelOptInfo *grouped_rel,
241 GroupPathExtraData *extra);
243 RelOptInfo *grouped_rel,
246 GroupPathExtraData *extra,
247 bool force_rel_creation);
249 RelOptInfo *rel,
250 Path *path,
252 List *pathkeys,
253 double limit_tuples);
255static bool can_partial_agg(PlannerInfo *root);
257 RelOptInfo *rel,
261 bool tlist_same_exprs);
264 RelOptInfo *grouped_rel,
269 GroupPathExtraData *extra);
271 List *targetList,
272 List *groupClause);
273static int common_prefix_cmp(const void *a, const void *b);
275 List *targetlist);
277 List *sortPathkeys, List *groupClause,
278 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
280 List *sortPathkeys, List *groupClause,
281 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282
283
284/*****************************************************************************
285 *
286 * Query optimizer entry point
287 *
288 * Inputs:
289 * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 * query_string: source text for the query tree (used for error reports)
291 * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 * boundParams: passed-in parameter values, or NULL if none
293 * es: ExplainState if being called from EXPLAIN, else NULL
294 *
295 * The result is a PlannedStmt tree.
296 *
297 * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 * Parameter values not so marked are still relied on for estimation purposes.
300 *
301 * The ExplainState pointer is not currently used by the core planner, but it
302 * is passed through to some planner hooks so that they can report information
303 * back to EXPLAIN extension hooks.
304 *
305 * To support loadable plugins that monitor or modify planner behavior,
306 * we provide a hook variable that lets a plugin get control before and
307 * after the standard planning process. The plugin would normally call
308 * standard_planner().
309 *
310 * Note to plugin authors: standard_planner() scribbles on its Query input,
311 * so you'd better copy that data structure if you want to plan more than once.
312 *
313 *****************************************************************************/
315planner(Query *parse, const char *query_string, int cursorOptions,
316 ParamListInfo boundParams, ExplainState *es)
317{
319
320 if (planner_hook)
321 result = (*planner_hook) (parse, query_string, cursorOptions,
322 boundParams, es);
323 else
324 result = standard_planner(parse, query_string, cursorOptions,
325 boundParams, es);
326
327 pgstat_report_plan_id(result->planId, false);
328
329 return result;
330}
331
333standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 ParamListInfo boundParams, ExplainState *es)
335{
337 PlannerGlobal *glob;
338 double tuple_fraction;
342 Plan *top_plan;
343 ListCell *lp,
344 *lr,
345 *lc;
346
347 /*
348 * Set up global state for this planner invocation. This data is needed
349 * across all levels of sub-Query that might exist in the given command,
350 * so we keep it in a separate struct that's linked to by each per-Query
351 * PlannerInfo.
352 */
353 glob = makeNode(PlannerGlobal);
354
355 glob->boundParams = boundParams;
356 glob->subplans = NIL;
357 glob->subpaths = NIL;
358 glob->subroots = NIL;
359 glob->rewindPlanIDs = NULL;
360 glob->finalrtable = NIL;
361 glob->allRelids = NULL;
362 glob->prunableRelids = NULL;
363 glob->finalrteperminfos = NIL;
364 glob->finalrowmarks = NIL;
365 glob->resultRelations = NIL;
366 glob->appendRelations = NIL;
367 glob->partPruneInfos = NIL;
368 glob->relationOids = NIL;
369 glob->invalItems = NIL;
370 glob->paramExecTypes = NIL;
371 glob->lastPHId = 0;
372 glob->lastRowMarkId = 0;
373 glob->lastPlanNodeId = 0;
374 glob->transientPlan = false;
375 glob->dependsOnRole = false;
376 glob->partition_directory = NULL;
377 glob->rel_notnullatts_hash = NULL;
378
379 /*
380 * Assess whether it's feasible to use parallel mode for this query. We
381 * can't do this in a standalone backend, or if the command will try to
382 * modify any data, or if this is a cursor operation, or if GUCs are set
383 * to values that don't permit parallelism, or if parallel-unsafe
384 * functions are present in the query tree.
385 *
386 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
387 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
388 * the command is writing into a completely new table which workers won't
389 * be able to see. If the workers could see the table, the fact that
390 * group locking would cause them to ignore the leader's heavyweight GIN
391 * page locks would make this unsafe. We'll have to fix that somehow if
392 * we want to allow parallel inserts in general; updates and deletes have
393 * additional problems especially around combo CIDs.)
394 *
395 * For now, we don't try to use parallel mode if we're running inside a
396 * parallel worker. We might eventually be able to relax this
397 * restriction, but for now it seems best not to have parallel workers
398 * trying to create their own parallel workers.
399 */
400 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
402 parse->commandType == CMD_SELECT &&
403 !parse->hasModifyingCTE &&
406 {
407 /* all the cheap tests pass, so scan the query tree */
410 }
411 else
412 {
413 /* skip the query tree scan, just assume it's unsafe */
415 glob->parallelModeOK = false;
416 }
417
418 /*
419 * glob->parallelModeNeeded is normally set to false here and changed to
420 * true during plan creation if a Gather or Gather Merge plan is actually
421 * created (cf. create_gather_plan, create_gather_merge_plan).
422 *
423 * However, if debug_parallel_query = on or debug_parallel_query =
424 * regress, then we impose parallel mode whenever it's safe to do so, even
425 * if the final plan doesn't use parallelism. It's not safe to do so if
426 * the query contains anything parallel-unsafe; parallelModeOK will be
427 * false in that case. Note that parallelModeOK can't change after this
428 * point. Otherwise, everything in the query is either parallel-safe or
429 * parallel-restricted, and in either case it should be OK to impose
430 * parallel-mode restrictions. If that ends up breaking something, then
431 * either some function the user included in the query is incorrectly
432 * labeled as parallel-safe or parallel-restricted when in reality it's
433 * parallel-unsafe, or else the query planner itself has a bug.
434 */
435 glob->parallelModeNeeded = glob->parallelModeOK &&
437
438 /* Determine what fraction of the plan is likely to be scanned */
439 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
440 {
441 /*
442 * We have no real idea how many tuples the user will ultimately FETCH
443 * from a cursor, but it is often the case that he doesn't want 'em
444 * all, or would prefer a fast-start plan anyway so that he can
445 * process some of the tuples sooner. Use a GUC parameter to decide
446 * what fraction to optimize for.
447 */
448 tuple_fraction = cursor_tuple_fraction;
449
450 /*
451 * We document cursor_tuple_fraction as simply being a fraction, which
452 * means the edge cases 0 and 1 have to be treated specially here. We
453 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
454 */
455 if (tuple_fraction >= 1.0)
456 tuple_fraction = 0.0;
457 else if (tuple_fraction <= 0.0)
458 tuple_fraction = 1e-10;
459 }
460 else
461 {
462 /* Default assumption is we need all the tuples */
463 tuple_fraction = 0.0;
464 }
465
466 /*
467 * Compute the initial path generation strategy mask.
468 *
469 * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
470 * GUC, and so the corresponding bits are always set in the default
471 * strategy mask.
472 *
473 * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
474 * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
475 * corresponds to this exactly: enable_indexscan=off disables both
476 * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
477 * converts the index-only scan paths that we would have considered into
478 * index scan paths.
479 */
482 if (enable_tidscan)
484 if (enable_seqscan)
493 {
495 if (enable_material)
497 }
498 if (enable_nestloop)
499 {
501 if (enable_material)
503 if (enable_memoize)
505 }
506 if (enable_hashjoin)
512
513 /* Allow plugins to take control after we've initialized "glob" */
515 (*planner_setup_hook) (glob, parse, query_string, cursorOptions,
516 &tuple_fraction, es);
517
518 /* primary planning entry point (may recurse for subqueries) */
519 root = subquery_planner(glob, parse, NULL, NULL, NULL, false,
520 tuple_fraction, NULL);
521
522 /* Select best Path and turn it into a Plan */
525
527
528 /*
529 * If creating a plan for a scrollable cursor, make sure it can run
530 * backwards on demand. Add a Material node at the top at need.
531 */
532 if (cursorOptions & CURSOR_OPT_SCROLL)
533 {
536 }
537
538 /*
539 * Optionally add a Gather node for testing purposes, provided this is
540 * actually a safe thing to do.
541 *
542 * We can add Gather even when top_plan has parallel-safe initPlans, but
543 * then we have to move the initPlans to the Gather node because of
544 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
545 * regression tests when debug_parallel_query = regress, because initPlans
546 * that would normally appear on the top_plan move to the Gather, causing
547 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
548 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
549 */
551 top_plan->parallel_safe &&
552 (top_plan->initPlan == NIL ||
554 {
557 bool unsafe_initplans;
558
559 gather->plan.targetlist = top_plan->targetlist;
560 gather->plan.qual = NIL;
561 gather->plan.lefttree = top_plan;
562 gather->plan.righttree = NULL;
563 gather->num_workers = 1;
564 gather->single_copy = true;
566
567 /* Transfer any initPlans to the new top node */
568 gather->plan.initPlan = top_plan->initPlan;
569 top_plan->initPlan = NIL;
570
571 /*
572 * Since this Gather has no parallel-aware descendants to signal to,
573 * we don't need a rescan Param.
574 */
575 gather->rescan_param = -1;
576
577 /*
578 * Ideally we'd use cost_gather here, but setting up dummy path data
579 * to satisfy it doesn't seem much cleaner than knowing what it does.
580 */
581 gather->plan.startup_cost = top_plan->startup_cost +
583 gather->plan.total_cost = top_plan->total_cost +
585 gather->plan.plan_rows = top_plan->plan_rows;
586 gather->plan.plan_width = top_plan->plan_width;
587 gather->plan.parallel_aware = false;
588 gather->plan.parallel_safe = false;
589
590 /*
591 * Delete the initplans' cost from top_plan. We needn't add it to the
592 * Gather node, since the above coding already included it there.
593 */
594 SS_compute_initplan_cost(gather->plan.initPlan,
596 top_plan->startup_cost -= initplan_cost;
597 top_plan->total_cost -= initplan_cost;
598
599 /* use parallel mode for parallel plans. */
600 root->glob->parallelModeNeeded = true;
601
602 top_plan = &gather->plan;
603 }
604
605 /*
606 * If any Params were generated, run through the plan tree and compute
607 * each plan node's extParam/allParam sets. Ideally we'd merge this into
608 * set_plan_references' tree traversal, but for now it has to be separate
609 * because we need to visit subplans before not after main plan.
610 */
611 if (glob->paramExecTypes != NIL)
612 {
613 Assert(list_length(glob->subplans) == list_length(glob->subroots));
614 forboth(lp, glob->subplans, lr, glob->subroots)
615 {
616 Plan *subplan = (Plan *) lfirst(lp);
618
619 SS_finalize_plan(subroot, subplan);
620 }
622 }
623
624 /* final cleanup of the plan */
625 Assert(glob->finalrtable == NIL);
626 Assert(glob->finalrteperminfos == NIL);
627 Assert(glob->finalrowmarks == NIL);
628 Assert(glob->resultRelations == NIL);
629 Assert(glob->appendRelations == NIL);
631 /* ... and the subplans (both regular subplans and initplans) */
632 Assert(list_length(glob->subplans) == list_length(glob->subroots));
633 forboth(lp, glob->subplans, lr, glob->subroots)
634 {
635 Plan *subplan = (Plan *) lfirst(lp);
637
638 lfirst(lp) = set_plan_references(subroot, subplan);
639 }
640
641 /* build the PlannedStmt result */
643
644 result->commandType = parse->commandType;
645 result->queryId = parse->queryId;
646 result->planOrigin = PLAN_STMT_STANDARD;
647 result->hasReturning = (parse->returningList != NIL);
648 result->hasModifyingCTE = parse->hasModifyingCTE;
649 result->canSetTag = parse->canSetTag;
650 result->transientPlan = glob->transientPlan;
651 result->dependsOnRole = glob->dependsOnRole;
652 result->parallelModeNeeded = glob->parallelModeNeeded;
653 result->planTree = top_plan;
654 result->partPruneInfos = glob->partPruneInfos;
655 result->rtable = glob->finalrtable;
656 result->unprunableRelids = bms_difference(glob->allRelids,
657 glob->prunableRelids);
658 result->permInfos = glob->finalrteperminfos;
659 result->subrtinfos = glob->subrtinfos;
660 result->appendRelations = glob->appendRelations;
661 result->subplans = glob->subplans;
662 result->rewindPlanIDs = glob->rewindPlanIDs;
663 result->rowMarks = glob->finalrowmarks;
664
665 /*
666 * Compute resultRelationRelids and rowMarkRelids from resultRelations and
667 * rowMarks. These can be used for cheap membership checks.
668 */
669 foreach(lc, glob->resultRelations)
670 result->resultRelationRelids = bms_add_member(result->resultRelationRelids,
671 lfirst_int(lc));
672 foreach(lc, glob->finalrowmarks)
673 result->rowMarkRelids = bms_add_member(result->rowMarkRelids,
674 ((PlanRowMark *) lfirst(lc))->rti);
675
676 result->relationOids = glob->relationOids;
677 result->invalItems = glob->invalItems;
678 result->paramExecTypes = glob->paramExecTypes;
679 /* utilityStmt should be null, but we might as well copy it */
680 result->utilityStmt = parse->utilityStmt;
681 result->elidedNodes = glob->elidedNodes;
682 result->stmt_location = parse->stmt_location;
683 result->stmt_len = parse->stmt_len;
684
685 result->jitFlags = PGJIT_NONE;
686 if (jit_enabled && jit_above_cost >= 0 &&
687 top_plan->total_cost > jit_above_cost)
688 {
689 result->jitFlags |= PGJIT_PERFORM;
690
691 /*
692 * Decide how much effort should be put into generating better code.
693 */
694 if (jit_optimize_above_cost >= 0 &&
695 top_plan->total_cost > jit_optimize_above_cost)
696 result->jitFlags |= PGJIT_OPT3;
697 if (jit_inline_above_cost >= 0 &&
698 top_plan->total_cost > jit_inline_above_cost)
699 result->jitFlags |= PGJIT_INLINE;
700
701 /*
702 * Decide which operations should be JITed.
703 */
704 if (jit_expressions)
705 result->jitFlags |= PGJIT_EXPR;
707 result->jitFlags |= PGJIT_DEFORM;
708 }
709
710 /* Allow plugins to take control before we discard "glob" */
712 (*planner_shutdown_hook) (glob, parse, query_string, result);
713
714 if (glob->partition_directory != NULL)
715 DestroyPartitionDirectory(glob->partition_directory);
716
717 return result;
718}
719
720
721/*--------------------
722 * subquery_planner
723 * Invokes the planner on a subquery. We recurse to here for each
724 * sub-SELECT found in the query tree.
725 *
726 * glob is the global state for the current planner run.
727 * parse is the querytree produced by the parser & rewriter.
728 * plan_name is the name to assign to this subplan (NULL at the top level).
729 * parent_root is the immediate parent Query's info (NULL at the top level).
730 * alternative_root is a previously created PlannerInfo for which this query
731 * level is an alternative implementation, or else NULL.
732 * hasRecursion is true if this is a recursive WITH query.
733 * tuple_fraction is the fraction of tuples we expect will be retrieved.
734 * tuple_fraction is interpreted as explained for grouping_planner, below.
735 * setops is used for set operation subqueries to provide the subquery with
736 * the context in which it's being used so that Paths correctly sorted for the
737 * set operation can be generated. NULL when not planning a set operation
738 * child, or when a child of a set op that isn't interested in sorted input.
739 *
740 * Basically, this routine does the stuff that should only be done once
741 * per Query object. It then calls grouping_planner. At one time,
742 * grouping_planner could be invoked recursively on the same Query object;
743 * that's not currently true, but we keep the separation between the two
744 * routines anyway, in case we need it again someday.
745 *
746 * subquery_planner will be called recursively to handle sub-Query nodes
747 * found within the query's expressions and rangetable.
748 *
749 * Returns the PlannerInfo struct ("root") that contains all data generated
750 * while planning the subquery. In particular, the Path(s) attached to
751 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
752 * cheapest way(s) to implement the query. The top level will select the
753 * best Path and pass it through createplan.c to produce a finished Plan.
754 *--------------------
755 */
757subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
759 bool hasRecursion, double tuple_fraction,
761{
765 bool hasOuterJoins;
766 bool hasResultRTEs;
768 ListCell *l;
769
770 /* Create a PlannerInfo data structure for this subquery */
772 root->parse = parse;
773 root->glob = glob;
774 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
775 root->plan_name = plan_name;
776 if (alternative_root != NULL)
777 root->alternative_plan_name = alternative_root->plan_name;
778 else
779 root->alternative_plan_name = plan_name;
780 root->parent_root = parent_root;
781 root->plan_params = NIL;
782 root->outer_params = NULL;
783 root->planner_cxt = CurrentMemoryContext;
784 root->init_plans = NIL;
785 root->cte_plan_ids = NIL;
786 root->multiexpr_params = NIL;
787 root->join_domains = NIL;
788 root->eq_classes = NIL;
789 root->ec_merging_done = false;
790 root->last_rinfo_serial = 0;
791 root->all_result_relids =
792 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
793 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
794 root->append_rel_list = NIL;
795 root->row_identity_vars = NIL;
796 root->rowMarks = NIL;
797 memset(root->upper_rels, 0, sizeof(root->upper_rels));
798 memset(root->upper_targets, 0, sizeof(root->upper_targets));
799 root->processed_groupClause = NIL;
800 root->processed_distinctClause = NIL;
801 root->processed_tlist = NIL;
802 root->update_colnos = NIL;
803 root->grouping_map = NULL;
804 root->minmax_aggs = NIL;
805 root->qual_security_level = 0;
806 root->hasPseudoConstantQuals = false;
807 root->hasAlternativeSubPlans = false;
808 root->placeholdersFrozen = false;
809 root->hasRecursion = hasRecursion;
810 root->assumeReplanning = false;
811 if (hasRecursion)
812 root->wt_param_id = assign_special_exec_param(root);
813 else
814 root->wt_param_id = -1;
815 root->non_recursive_path = NULL;
816
817 /*
818 * Create the top-level join domain. This won't have valid contents until
819 * deconstruct_jointree fills it in, but the node needs to exist before
820 * that so we can build EquivalenceClasses referencing it.
821 */
822 root->join_domains = list_make1(makeNode(JoinDomain));
823
824 /*
825 * If there is a WITH list, process each WITH query and either convert it
826 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
827 */
828 if (parse->cteList)
830
831 /*
832 * If it's a MERGE command, transform the joinlist as appropriate.
833 */
835
836 /*
837 * Scan the rangetable for relation RTEs and retrieve the necessary
838 * catalog information for each relation. Using this information, clear
839 * the inh flag for any relation that has no children, collect not-null
840 * attribute numbers for any relation that has column not-null
841 * constraints, and expand virtual generated columns for any relation that
842 * contains them. Note that this step does not descend into sublinks and
843 * subqueries; if we pull up any sublinks or subqueries below, their
844 * relation RTEs are processed just before pulling them up.
845 */
847
848 /*
849 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
850 * that we don't need so many special cases to deal with that situation.
851 */
853
854 /*
855 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
856 * to transform them into joins. Note that this step does not descend
857 * into subqueries; if we pull up any subqueries below, their SubLinks are
858 * processed just before pulling them up.
859 */
860 if (parse->hasSubLinks)
862
863 /*
864 * Scan the rangetable for function RTEs, do const-simplification on them,
865 * and then inline them if possible (producing subqueries that might get
866 * pulled up next). Recursion issues here are handled in the same way as
867 * for SubLinks.
868 */
870
871 /*
872 * Check to see if any subqueries in the jointree can be merged into this
873 * query.
874 */
876
877 /*
878 * If this is a simple UNION ALL query, flatten it into an appendrel. We
879 * do this now because it requires applying pull_up_subqueries to the leaf
880 * queries of the UNION ALL, which weren't touched above because they
881 * weren't referenced by the jointree (they will be after we do this).
882 */
883 if (parse->setOperations)
885
886 /*
887 * Survey the rangetable to see what kinds of entries are present. We can
888 * skip some later processing if relevant SQL features are not used; for
889 * example if there are no JOIN RTEs we can avoid the expense of doing
890 * flatten_join_alias_vars(). This must be done after we have finished
891 * adding rangetable entries, of course. (Note: actually, processing of
892 * inherited or partitioned rels can cause RTEs for their child tables to
893 * get added later; but those must all be RTE_RELATION entries, so they
894 * don't invalidate the conclusions drawn here.)
895 */
896 root->hasJoinRTEs = false;
897 root->hasLateralRTEs = false;
898 root->group_rtindex = 0;
899 hasOuterJoins = false;
900 hasResultRTEs = false;
901 foreach(l, parse->rtable)
902 {
904
905 switch (rte->rtekind)
906 {
907 case RTE_JOIN:
908 root->hasJoinRTEs = true;
909 if (IS_OUTER_JOIN(rte->jointype))
910 hasOuterJoins = true;
911 break;
912 case RTE_RESULT:
913 hasResultRTEs = true;
914 break;
915 case RTE_GROUP:
916 Assert(parse->hasGroupRTE);
917 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
918 break;
919 default:
920 /* No work here for other RTE types */
921 break;
922 }
923
924 if (rte->lateral)
925 root->hasLateralRTEs = true;
926
927 /*
928 * We can also determine the maximum security level required for any
929 * securityQuals now. Addition of inheritance-child RTEs won't affect
930 * this, because child tables don't have their own securityQuals; see
931 * expand_single_inheritance_child().
932 */
933 if (rte->securityQuals)
934 root->qual_security_level = Max(root->qual_security_level,
935 list_length(rte->securityQuals));
936 }
937
938 /*
939 * If we have now verified that the query target relation is
940 * non-inheriting, mark it as a leaf target.
941 */
942 if (parse->resultRelation)
943 {
944 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
945
946 if (!rte->inh)
947 root->leaf_result_relids =
948 bms_make_singleton(parse->resultRelation);
949 }
950
951 /*
952 * This would be a convenient time to check access permissions for all
953 * relations mentioned in the query, since it would be better to fail now,
954 * before doing any detailed planning. However, for historical reasons,
955 * we leave this to be done at executor startup.
956 *
957 * Note, however, that we do need to check access permissions for any view
958 * relations mentioned in the query, in order to prevent information being
959 * leaked by selectivity estimation functions, which only check view owner
960 * permissions on underlying tables (see all_rows_selectable() and its
961 * callers). This is a little ugly, because it means that access
962 * permissions for views will be checked twice, which is another reason
963 * why it would be better to do all the ACL checks here.
964 */
965 foreach(l, parse->rtable)
966 {
968
969 if (rte->perminfoindex != 0 &&
970 rte->relkind == RELKIND_VIEW)
971 {
973 bool result;
974
975 perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
977 if (!result)
979 get_rel_name(perminfo->relid));
980 }
981 }
982
983 /*
984 * Preprocess RowMark information. We need to do this after subquery
985 * pullup, so that all base relations are present.
986 */
988
989 /*
990 * Set hasHavingQual to remember if HAVING clause is present. Needed
991 * because preprocess_expression will reduce a constant-true condition to
992 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
993 */
994 root->hasHavingQual = (parse->havingQual != NULL);
995
996 /*
997 * Do expression preprocessing on targetlist and quals, as well as other
998 * random expressions in the querytree. Note that we do not need to
999 * handle sort/group expressions explicitly, because they are actually
1000 * part of the targetlist.
1001 */
1002 parse->targetList = (List *)
1003 preprocess_expression(root, (Node *) parse->targetList,
1005
1007 foreach(l, parse->withCheckOptions)
1008 {
1010
1011 wco->qual = preprocess_expression(root, wco->qual,
1013 if (wco->qual != NULL)
1015 }
1016 parse->withCheckOptions = newWithCheckOptions;
1017
1018 parse->returningList = (List *)
1019 preprocess_expression(root, (Node *) parse->returningList,
1021
1022 preprocess_qual_conditions(root, (Node *) parse->jointree);
1023
1024 parse->havingQual = preprocess_expression(root, parse->havingQual,
1026
1027 foreach(l, parse->windowClause)
1028 {
1030
1031 /* partitionClause/orderClause are sort/group expressions */
1036 }
1037
1038 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1040 parse->limitCount = preprocess_expression(root, parse->limitCount,
1042
1043 if (parse->onConflict)
1044 {
1045 parse->onConflict->arbiterElems = (List *)
1047 (Node *) parse->onConflict->arbiterElems,
1049 parse->onConflict->arbiterWhere =
1051 parse->onConflict->arbiterWhere,
1053 parse->onConflict->onConflictSet = (List *)
1055 (Node *) parse->onConflict->onConflictSet,
1057 parse->onConflict->onConflictWhere =
1059 parse->onConflict->onConflictWhere,
1061 /* exclRelTlist contains only Vars, so no preprocessing needed */
1062 }
1063
1064 foreach(l, parse->mergeActionList)
1065 {
1066 MergeAction *action = (MergeAction *) lfirst(l);
1067
1068 action->targetList = (List *)
1070 (Node *) action->targetList,
1072 action->qual =
1074 (Node *) action->qual,
1076 }
1077
1078 parse->mergeJoinCondition =
1079 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1080
1081 root->append_rel_list = (List *)
1082 preprocess_expression(root, (Node *) root->append_rel_list,
1084
1085 /* Also need to preprocess expressions within RTEs */
1086 foreach(l, parse->rtable)
1087 {
1089 int kind;
1090 ListCell *lcsq;
1091
1092 if (rte->rtekind == RTE_RELATION)
1093 {
1094 if (rte->tablesample)
1095 rte->tablesample = (TableSampleClause *)
1097 (Node *) rte->tablesample,
1099 }
1100 else if (rte->rtekind == RTE_SUBQUERY)
1101 {
1102 /*
1103 * We don't want to do all preprocessing yet on the subquery's
1104 * expressions, since that will happen when we plan it. But if it
1105 * contains any join aliases of our level, those have to get
1106 * expanded now, because planning of the subquery won't do it.
1107 * That's only possible if the subquery is LATERAL.
1108 */
1109 if (rte->lateral && root->hasJoinRTEs)
1110 rte->subquery = (Query *)
1112 (Node *) rte->subquery);
1113 }
1114 else if (rte->rtekind == RTE_FUNCTION)
1115 {
1116 /* Preprocess the function expression(s) fully */
1117 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1118 rte->functions = (List *)
1119 preprocess_expression(root, (Node *) rte->functions, kind);
1120 }
1121 else if (rte->rtekind == RTE_TABLEFUNC)
1122 {
1123 /* Preprocess the function expression(s) fully */
1125 rte->tablefunc = (TableFunc *)
1126 preprocess_expression(root, (Node *) rte->tablefunc, kind);
1127 }
1128 else if (rte->rtekind == RTE_VALUES)
1129 {
1130 /* Preprocess the values lists fully */
1131 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1132 rte->values_lists = (List *)
1133 preprocess_expression(root, (Node *) rte->values_lists, kind);
1134 }
1135 else if (rte->rtekind == RTE_GROUP)
1136 {
1137 /* Preprocess the groupexprs list fully */
1138 rte->groupexprs = (List *)
1139 preprocess_expression(root, (Node *) rte->groupexprs,
1141 }
1142
1143 /*
1144 * Process each element of the securityQuals list as if it were a
1145 * separate qual expression (as indeed it is). We need to do it this
1146 * way to get proper canonicalization of AND/OR structure. Note that
1147 * this converts each element into an implicit-AND sublist.
1148 */
1149 foreach(lcsq, rte->securityQuals)
1150 {
1152 (Node *) lfirst(lcsq),
1154 }
1155 }
1156
1157 /*
1158 * Now that we are done preprocessing expressions, and in particular done
1159 * flattening join alias variables, get rid of the joinaliasvars lists.
1160 * They no longer match what expressions in the rest of the tree look
1161 * like, because we have not preprocessed expressions in those lists (and
1162 * do not want to; for example, expanding a SubLink there would result in
1163 * a useless unreferenced subplan). Leaving them in place simply creates
1164 * a hazard for later scans of the tree. We could try to prevent that by
1165 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1166 * but that doesn't sound very reliable.
1167 */
1168 if (root->hasJoinRTEs)
1169 {
1170 foreach(l, parse->rtable)
1171 {
1173
1174 rte->joinaliasvars = NIL;
1175 }
1176 }
1177
1178 /*
1179 * Replace any Vars in the subquery's targetlist and havingQual that
1180 * reference GROUP outputs with the underlying grouping expressions.
1181 *
1182 * Note that we need to perform this replacement after we've preprocessed
1183 * the grouping expressions. This is to ensure that there is only one
1184 * instance of SubPlan for each SubLink contained within the grouping
1185 * expressions.
1186 */
1187 if (parse->hasGroupRTE)
1188 {
1189 parse->targetList = (List *)
1190 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1191 parse->havingQual =
1192 flatten_group_exprs(root, root->parse, parse->havingQual);
1193 }
1194
1195 /* Constant-folding might have removed all set-returning functions */
1196 if (parse->hasTargetSRFs)
1197 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1198
1199 /*
1200 * If we have grouping sets, expand the groupingSets tree of this query to
1201 * a flat list of grouping sets. We need to do this before optimizing
1202 * HAVING, since we can't easily tell if there's an empty grouping set
1203 * until we have this representation.
1204 */
1205 if (parse->groupingSets)
1206 {
1207 parse->groupingSets =
1208 expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1209 }
1210
1211 /*
1212 * In some cases we may want to transfer a HAVING clause into WHERE. We
1213 * cannot do so if the HAVING clause contains aggregates (obviously) or
1214 * volatile functions (since a HAVING clause is supposed to be executed
1215 * only once per group). We also can't do this if there are any grouping
1216 * sets and the clause references any columns that are nullable by the
1217 * grouping sets; the nulled values of those columns are not available
1218 * before the grouping step. (The test on groupClause might seem wrong,
1219 * but it's okay: it's just an optimization to avoid running pull_varnos
1220 * when there cannot be any Vars in the HAVING clause.)
1221 *
1222 * Also, it may be that the clause is so expensive to execute that we're
1223 * better off doing it only once per group, despite the loss of
1224 * selectivity. This is hard to estimate short of doing the entire
1225 * planning process twice, so we use a heuristic: clauses containing
1226 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1227 * clause into WHERE, in hopes of eliminating tuples before aggregation
1228 * instead of after.
1229 *
1230 * If the query has no empty grouping set then we can simply move such a
1231 * clause into WHERE; any group that fails the clause will not be in the
1232 * output because none of its tuples will reach the grouping or
1233 * aggregation stage. Otherwise we have to keep the clause in HAVING to
1234 * ensure that we don't emit a bogus aggregated row. But then the HAVING
1235 * clause must be degenerate (variable-free), so we can copy it into WHERE
1236 * so that query_planner() can use it in a gating Result node. (This could
1237 * be done better, but it seems not worth optimizing.)
1238 *
1239 * Note that a HAVING clause may contain expressions that are not fully
1240 * preprocessed. This can happen if these expressions are part of
1241 * grouping items. In such cases, they are replaced with GROUP Vars in
1242 * the parser and then replaced back after we're done with expression
1243 * preprocessing on havingQual. This is not an issue if the clause
1244 * remains in HAVING, because these expressions will be matched to lower
1245 * target items in setrefs.c. However, if the clause is moved or copied
1246 * into WHERE, we need to ensure that these expressions are fully
1247 * preprocessed.
1248 *
1249 * Note that both havingQual and parse->jointree->quals are in
1250 * implicitly-ANDed-list form at this point, even though they are declared
1251 * as Node *.
1252 */
1253 newHaving = NIL;
1254 foreach(l, (List *) parse->havingQual)
1255 {
1256 Node *havingclause = (Node *) lfirst(l);
1257
1261 (parse->groupClause && parse->groupingSets &&
1262 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1263 {
1264 /* keep it in HAVING */
1266 }
1267 else if (parse->groupClause &&
1268 (parse->groupingSets == NIL ||
1269 (List *) linitial(parse->groupingSets) != NIL))
1270 {
1271 /* There is GROUP BY, but no empty grouping set */
1273
1274 /* Preprocess the HAVING clause fully */
1277 /* ... and move it to WHERE */
1278 parse->jointree->quals = (Node *)
1279 list_concat((List *) parse->jointree->quals,
1280 (List *) whereclause);
1281 }
1282 else
1283 {
1284 /* There is an empty grouping set (perhaps implicitly) */
1286
1287 /* Preprocess the HAVING clause fully */
1290 /* ... and put a copy in WHERE */
1291 parse->jointree->quals = (Node *)
1292 list_concat((List *) parse->jointree->quals,
1293 (List *) whereclause);
1294 /* ... and also keep it in HAVING */
1296 }
1297 }
1298 parse->havingQual = (Node *) newHaving;
1299
1300 /*
1301 * If we have any outer joins, try to reduce them to plain inner joins.
1302 * This step is most easily done after we've done expression
1303 * preprocessing.
1304 */
1305 if (hasOuterJoins)
1307
1308 /*
1309 * If we have any RTE_RESULT relations, see if they can be deleted from
1310 * the jointree. We also rely on this processing to flatten single-child
1311 * FromExprs underneath outer joins. This step is most effectively done
1312 * after we've done expression preprocessing and outer join reduction.
1313 */
1316
1317 /*
1318 * Do the main planning.
1319 */
1320 grouping_planner(root, tuple_fraction, setops);
1321
1322 /*
1323 * Capture the set of outer-level param IDs we have access to, for use in
1324 * extParam/allParam calculations later.
1325 */
1327
1328 /*
1329 * If any initPlans were created in this query level, adjust the surviving
1330 * Paths' costs and parallel-safety flags to account for them. The
1331 * initPlans won't actually get attached to the plan tree till
1332 * create_plan() runs, but we must include their effects now.
1333 */
1336
1337 /*
1338 * Make sure we've identified the cheapest Path for the final rel. (By
1339 * doing this here not in grouping_planner, we include initPlan costs in
1340 * the decision, though it's unlikely that will change anything.)
1341 */
1343
1344 return root;
1345}
1346
1347/*
1348 * preprocess_expression
1349 * Do subquery_planner's preprocessing work for an expression,
1350 * which can be a targetlist, a WHERE clause (including JOIN/ON
1351 * conditions), a HAVING clause, or a few other things.
1352 */
1353static Node *
1355{
1356 /*
1357 * Fall out quickly if expression is empty. This occurs often enough to
1358 * be worth checking. Note that null->null is the correct conversion for
1359 * implicit-AND result format, too.
1360 */
1361 if (expr == NULL)
1362 return NULL;
1363
1364 /*
1365 * If the query has any join RTEs, replace join alias variables with
1366 * base-relation variables. We must do this first, since any expressions
1367 * we may extract from the joinaliasvars lists have not been preprocessed.
1368 * For example, if we did this after sublink processing, sublinks expanded
1369 * out from join aliases would not get processed. But we can skip this in
1370 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1371 * they can't contain any Vars of the current query level.
1372 */
1373 if (root->hasJoinRTEs &&
1374 !(kind == EXPRKIND_RTFUNC ||
1375 kind == EXPRKIND_VALUES ||
1376 kind == EXPRKIND_TABLESAMPLE ||
1377 kind == EXPRKIND_TABLEFUNC))
1378 expr = flatten_join_alias_vars(root, root->parse, expr);
1379
1380 /*
1381 * Simplify constant expressions. For function RTEs, this was already
1382 * done by preprocess_function_rtes. (But note we must do it again for
1383 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1384 * un-simplified subexpressions inserted by flattening of subqueries or
1385 * join alias variables.)
1386 *
1387 * Note: an essential effect of this is to convert named-argument function
1388 * calls to positional notation and insert the current actual values of
1389 * any default arguments for functions. To ensure that happens, we *must*
1390 * process all expressions here. Previous PG versions sometimes skipped
1391 * const-simplification if it didn't seem worth the trouble, but we can't
1392 * do that anymore.
1393 *
1394 * Note: this also flattens nested AND and OR expressions into N-argument
1395 * form. All processing of a qual expression after this point must be
1396 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1397 * with AND directly under AND, nor OR directly under OR.
1398 */
1399 if (kind != EXPRKIND_RTFUNC)
1400 expr = eval_const_expressions(root, expr);
1401
1402 /*
1403 * If it's a qual or havingQual, canonicalize it.
1404 */
1405 if (kind == EXPRKIND_QUAL)
1406 {
1407 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1408
1409#ifdef OPTIMIZER_DEBUG
1410 printf("After canonicalize_qual()\n");
1411 pprint(expr);
1412#endif
1413 }
1414
1415 /*
1416 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1417 * hashfuncid of any that might execute more quickly by using hash lookups
1418 * instead of a linear search.
1419 */
1420 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1421 {
1423 }
1424
1425 /* Expand SubLinks to SubPlans */
1426 if (root->parse->hasSubLinks)
1427 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1428
1429 /*
1430 * XXX do not insert anything here unless you have grokked the comments in
1431 * SS_replace_correlation_vars ...
1432 */
1433
1434 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1435 if (root->query_level > 1)
1436 expr = SS_replace_correlation_vars(root, expr);
1437
1438 /*
1439 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1440 * don't want to do this before eval_const_expressions, since the latter
1441 * would be unable to simplify a top-level AND correctly. Also,
1442 * SS_process_sublinks expects explicit-AND format.)
1443 */
1444 if (kind == EXPRKIND_QUAL)
1445 expr = (Node *) make_ands_implicit((Expr *) expr);
1446
1447 return expr;
1448}
1449
1450/*
1451 * preprocess_qual_conditions
1452 * Recursively scan the query's jointree and do subquery_planner's
1453 * preprocessing work on each qual condition found therein.
1454 */
1455static void
1457{
1458 if (jtnode == NULL)
1459 return;
1460 if (IsA(jtnode, RangeTblRef))
1461 {
1462 /* nothing to do here */
1463 }
1464 else if (IsA(jtnode, FromExpr))
1465 {
1466 FromExpr *f = (FromExpr *) jtnode;
1467 ListCell *l;
1468
1469 foreach(l, f->fromlist)
1471
1473 }
1474 else if (IsA(jtnode, JoinExpr))
1475 {
1476 JoinExpr *j = (JoinExpr *) jtnode;
1477
1480
1481 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1482 }
1483 else
1484 elog(ERROR, "unrecognized node type: %d",
1485 (int) nodeTag(jtnode));
1486}
1487
1488/*
1489 * preprocess_phv_expression
1490 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1491 *
1492 * If a LATERAL subquery references an output of another subquery, and that
1493 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1494 * join, then we'll push the PlaceHolderVar expression down into the subquery
1495 * and later pull it back up during find_lateral_references, which runs after
1496 * subquery_planner has preprocessed all the expressions that were in the
1497 * current query level to start with. So we need to preprocess it then.
1498 */
1499Expr *
1504
1505/*--------------------
1506 * grouping_planner
1507 * Perform planning steps related to grouping, aggregation, etc.
1508 *
1509 * This function adds all required top-level processing to the scan/join
1510 * Path(s) produced by query_planner.
1511 *
1512 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1513 * tuple_fraction is interpreted as follows:
1514 * 0: expect all tuples to be retrieved (normal case)
1515 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1516 * from the plan to be retrieved
1517 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1518 * expected to be retrieved (ie, a LIMIT specification).
1519 * setops is used for set operation subqueries to provide the subquery with
1520 * the context in which it's being used so that Paths correctly sorted for the
1521 * set operation can be generated. NULL when not planning a set operation
1522 * child, or when a child of a set op that isn't interested in sorted input.
1523 *
1524 * Returns nothing; the useful output is in the Paths we attach to the
1525 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1526 * root->processed_tlist contains the final processed targetlist.
1527 *
1528 * Note that we have not done set_cheapest() on the final rel; it's convenient
1529 * to leave this to the caller.
1530 *--------------------
1531 */
1532static void
1533grouping_planner(PlannerInfo *root, double tuple_fraction,
1535{
1536 Query *parse = root->parse;
1537 int64 offset_est = 0;
1538 int64 count_est = 0;
1539 double limit_tuples = -1.0;
1540 bool have_postponed_srfs = false;
1547 FinalPathExtraData extra;
1548 ListCell *lc;
1549
1550 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1551 if (parse->limitCount || parse->limitOffset)
1552 {
1553 tuple_fraction = preprocess_limit(root, tuple_fraction,
1554 &offset_est, &count_est);
1555
1556 /*
1557 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1558 * estimate the effects of using a bounded sort.
1559 */
1560 if (count_est > 0 && offset_est >= 0)
1561 limit_tuples = (double) count_est + (double) offset_est;
1562 }
1563
1564 /* Make tuple_fraction accessible to lower-level routines */
1565 root->tuple_fraction = tuple_fraction;
1566
1567 if (parse->setOperations)
1568 {
1569 /*
1570 * Construct Paths for set operations. The results will not need any
1571 * work except perhaps a top-level sort and/or LIMIT. Note that any
1572 * special work for recursive unions is the responsibility of
1573 * plan_set_operations.
1574 */
1576
1577 /*
1578 * We should not need to call preprocess_targetlist, since we must be
1579 * in a SELECT query node. Instead, use the processed_tlist returned
1580 * by plan_set_operations (since this tells whether it returned any
1581 * resjunk columns!), and transfer any sort key information from the
1582 * original tlist.
1583 */
1584 Assert(parse->commandType == CMD_SELECT);
1585
1586 /* for safety, copy processed_tlist instead of modifying in-place */
1587 root->processed_tlist =
1588 postprocess_setop_tlist(copyObject(root->processed_tlist),
1589 parse->targetList);
1590
1591 /* Also extract the PathTarget form of the setop result tlist */
1592 final_target = current_rel->cheapest_total_path->pathtarget;
1593
1594 /* And check whether it's parallel safe */
1597
1598 /* The setop result tlist couldn't contain any SRFs */
1599 Assert(!parse->hasTargetSRFs);
1601
1602 /*
1603 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1604 * checked already, but let's make sure).
1605 */
1606 if (parse->rowMarks)
1607 ereport(ERROR,
1609 /*------
1610 translator: %s is a SQL row locking clause such as FOR UPDATE */
1611 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1613 parse->rowMarks)->strength))));
1614
1615 /*
1616 * Calculate pathkeys that represent result ordering requirements
1617 */
1618 Assert(parse->distinctClause == NIL);
1619 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1620 parse->sortClause,
1621 root->processed_tlist);
1622 }
1623 else
1624 {
1625 /* No set operations, do regular planning */
1639 bool have_grouping;
1641 List *activeWindows = NIL;
1642 grouping_sets_data *gset_data = NULL;
1644
1645 /* A recursive query should always have setOperations */
1646 Assert(!root->hasRecursion);
1647
1648 /* Preprocess grouping sets and GROUP BY clause, if any */
1649 if (parse->groupingSets)
1650 {
1651 gset_data = preprocess_grouping_sets(root);
1652 }
1653 else if (parse->groupClause)
1654 {
1655 /* Preprocess regular GROUP BY clause, if any */
1656 root->processed_groupClause = preprocess_groupclause(root, NIL);
1657 }
1658
1659 /*
1660 * Preprocess targetlist. Note that much of the remaining planning
1661 * work will be done with the PathTarget representation of tlists, but
1662 * we must also maintain the full representation of the final tlist so
1663 * that we can transfer its decoration (resnames etc) to the topmost
1664 * tlist of the finished Plan. This is kept in processed_tlist.
1665 */
1667
1668 /*
1669 * Mark all the aggregates with resolved aggtranstypes, and detect
1670 * aggregates that are duplicates or can share transition state. We
1671 * must do this before slicing and dicing the tlist into various
1672 * pathtargets, else some copies of the Aggref nodes might escape
1673 * being marked.
1674 */
1675 if (parse->hasAggs)
1676 {
1677 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1678 preprocess_aggrefs(root, (Node *) parse->havingQual);
1679 }
1680
1681 /*
1682 * Locate any window functions in the tlist. (We don't need to look
1683 * anywhere else, since expressions used in ORDER BY will be in there
1684 * too.) Note that they could all have been eliminated by constant
1685 * folding, in which case we don't need to do any more work.
1686 */
1687 if (parse->hasWindowFuncs)
1688 {
1689 wflists = find_window_functions((Node *) root->processed_tlist,
1690 list_length(parse->windowClause));
1691 if (wflists->numWindowFuncs > 0)
1692 {
1693 /*
1694 * See if any modifications can be made to each WindowClause
1695 * to allow the executor to execute the WindowFuncs more
1696 * quickly.
1697 */
1699
1700 /* Extract the list of windows actually in use. */
1701 activeWindows = select_active_windows(root, wflists);
1702
1703 /* Make sure they all have names, for EXPLAIN's use. */
1704 name_active_windows(activeWindows);
1705 }
1706 else
1707 parse->hasWindowFuncs = false;
1708 }
1709
1710 /*
1711 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1712 * adding logic between here and the query_planner() call. Anything
1713 * that is needed in MIN/MAX-optimizable cases will have to be
1714 * duplicated in planagg.c.
1715 */
1716 if (parse->hasAggs)
1718
1719 /*
1720 * Figure out whether there's a hard limit on the number of rows that
1721 * query_planner's result subplan needs to return. Even if we know a
1722 * hard limit overall, it doesn't apply if the query has any
1723 * grouping/aggregation operations, or SRFs in the tlist.
1724 */
1725 if (parse->groupClause ||
1726 parse->groupingSets ||
1727 parse->distinctClause ||
1728 parse->hasAggs ||
1729 parse->hasWindowFuncs ||
1730 parse->hasTargetSRFs ||
1731 root->hasHavingQual)
1732 root->limit_tuples = -1.0;
1733 else
1734 root->limit_tuples = limit_tuples;
1735
1736 /* Set up data needed by standard_qp_callback */
1737 qp_extra.activeWindows = activeWindows;
1738 qp_extra.gset_data = gset_data;
1739
1740 /*
1741 * If we're a subquery for a set operation, store the SetOperationStmt
1742 * in qp_extra.
1743 */
1744 qp_extra.setop = setops;
1745
1746 /*
1747 * Generate the best unsorted and presorted paths for the scan/join
1748 * portion of this Query, ie the processing represented by the
1749 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1750 * We also generate (in standard_qp_callback) pathkey representations
1751 * of the query's sort clause, distinct clause, etc.
1752 */
1754
1755 /*
1756 * Convert the query's result tlist into PathTarget format.
1757 *
1758 * Note: this cannot be done before query_planner() has performed
1759 * appendrel expansion, because that might add resjunk entries to
1760 * root->processed_tlist. Waiting till afterwards is also helpful
1761 * because the target width estimates can use per-Var width numbers
1762 * that were obtained within query_planner().
1763 */
1764 final_target = create_pathtarget(root, root->processed_tlist);
1767
1768 /*
1769 * If ORDER BY was given, consider whether we should use a post-sort
1770 * projection, and compute the adjusted target for preceding steps if
1771 * so.
1772 */
1773 if (parse->sortClause)
1774 {
1780 }
1781 else
1782 {
1785 }
1786
1787 /*
1788 * If we have window functions to deal with, the output from any
1789 * grouping step needs to be what the window functions want;
1790 * otherwise, it should be sort_input_target.
1791 */
1792 if (activeWindows)
1793 {
1796 activeWindows);
1799 }
1800 else
1801 {
1804 }
1805
1806 /*
1807 * If we have grouping or aggregation to do, the topmost scan/join
1808 * plan node must emit what the grouping step wants; otherwise, it
1809 * should emit grouping_target.
1810 */
1811 have_grouping = (parse->groupClause || parse->groupingSets ||
1812 parse->hasAggs || root->hasHavingQual);
1813 if (have_grouping)
1814 {
1818 }
1819 else
1820 {
1823 }
1824
1825 /*
1826 * If there are any SRFs in the targetlist, we must separate each of
1827 * these PathTargets into SRF-computing and SRF-free targets. Replace
1828 * each of the named targets with a SRF-free version, and remember the
1829 * list of additional projection steps we need to add afterwards.
1830 */
1831 if (parse->hasTargetSRFs)
1832 {
1833 /* final_target doesn't recompute any SRFs in sort_input_target */
1839 /* likewise for sort_input_target vs. grouping_target */
1845 /* likewise for grouping_target vs. scanjoin_target */
1852 /* scanjoin_target will not have any SRFs precomputed for it */
1858 }
1859 else
1860 {
1861 /* initialize lists; for most of these, dummy values are OK */
1867 }
1868
1869 /* Apply scan/join target. */
1871 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1876
1877 /*
1878 * Save the various upper-rel PathTargets we just computed into
1879 * root->upper_targets[]. The core code doesn't use this, but it
1880 * provides a convenient place for extensions to get at the info. For
1881 * consistency, we save all the intermediate targets, even though some
1882 * of the corresponding upperrels might not be needed for this query.
1883 */
1884 root->upper_targets[UPPERREL_FINAL] = final_target;
1885 root->upper_targets[UPPERREL_ORDERED] = final_target;
1886 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1888 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1889 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1890
1891 /*
1892 * If we have grouping and/or aggregation, consider ways to implement
1893 * that. We build a new upperrel representing the output of this
1894 * phase.
1895 */
1896 if (have_grouping)
1897 {
1902 gset_data);
1903 /* Fix things up if grouping_target contains SRFs */
1904 if (parse->hasTargetSRFs)
1908 }
1909
1910 /*
1911 * If we have window functions, consider ways to implement those. We
1912 * build a new upperrel representing the output of this phase.
1913 */
1914 if (activeWindows)
1915 {
1921 wflists,
1922 activeWindows);
1923 /* Fix things up if sort_input_target contains SRFs */
1924 if (parse->hasTargetSRFs)
1928 }
1929
1930 /*
1931 * If there is a DISTINCT clause, consider ways to implement that. We
1932 * build a new upperrel representing the output of this phase.
1933 */
1934 if (parse->distinctClause)
1935 {
1939 }
1940 } /* end of if (setOperations) */
1941
1942 /*
1943 * If ORDER BY was given, consider ways to implement that, and generate a
1944 * new upperrel containing only paths that emit the correct ordering and
1945 * project the correct final_target. We can apply the original
1946 * limit_tuples limit in sort costing here, but only if there are no
1947 * postponed SRFs.
1948 */
1949 if (parse->sortClause)
1950 {
1955 have_postponed_srfs ? -1.0 :
1956 limit_tuples);
1957 /* Fix things up if final_target contains SRFs */
1958 if (parse->hasTargetSRFs)
1962 }
1963
1964 /*
1965 * Now we are prepared to build the final-output upperrel.
1966 */
1968
1969 /*
1970 * If the input rel is marked consider_parallel and there's nothing that's
1971 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1972 * consider_parallel as well. Note that if the query has rowMarks or is
1973 * not a SELECT, consider_parallel will be false for every relation in the
1974 * query.
1975 */
1976 if (current_rel->consider_parallel &&
1977 is_parallel_safe(root, parse->limitOffset) &&
1978 is_parallel_safe(root, parse->limitCount))
1979 final_rel->consider_parallel = true;
1980
1981 /*
1982 * If the current_rel belongs to a single FDW, so does the final_rel.
1983 */
1984 final_rel->serverid = current_rel->serverid;
1985 final_rel->userid = current_rel->userid;
1986 final_rel->useridiscurrent = current_rel->useridiscurrent;
1987 final_rel->fdwroutine = current_rel->fdwroutine;
1988
1989 /*
1990 * Generate paths for the final_rel. Insert all surviving paths, with
1991 * LockRows, Limit, and/or ModifyTable steps added if needed.
1992 */
1993 foreach(lc, current_rel->pathlist)
1994 {
1995 Path *path = (Path *) lfirst(lc);
1996
1997 /*
1998 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1999 * (Note: we intentionally test parse->rowMarks not root->rowMarks
2000 * here. If there are only non-locking rowmarks, they should be
2001 * handled by the ModifyTable node instead. However, root->rowMarks
2002 * is what goes into the LockRows node.)
2003 */
2004 if (parse->rowMarks)
2005 {
2006 path = (Path *) create_lockrows_path(root, final_rel, path,
2007 root->rowMarks,
2009 }
2010
2011 /*
2012 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
2013 */
2014 if (limit_needed(parse))
2015 {
2016 path = (Path *) create_limit_path(root, final_rel, path,
2017 parse->limitOffset,
2018 parse->limitCount,
2019 parse->limitOption,
2020 offset_est, count_est);
2021 }
2022
2023 /*
2024 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2025 */
2026 if (parse->commandType != CMD_SELECT)
2027 {
2028 Index rootRelation;
2029 List *resultRelations = NIL;
2030 List *updateColnosLists = NIL;
2031 List *withCheckOptionLists = NIL;
2032 List *returningLists = NIL;
2033 List *mergeActionLists = NIL;
2034 List *mergeJoinConditions = NIL;
2035 List *rowMarks;
2036
2037 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2038 {
2039 /* Inherited UPDATE/DELETE/MERGE */
2041 parse->resultRelation);
2042 int resultRelation = -1;
2043
2044 /* Pass the root result rel forward to the executor. */
2045 rootRelation = parse->resultRelation;
2046
2047 /* Add only leaf children to ModifyTable. */
2048 while ((resultRelation = bms_next_member(root->leaf_result_relids,
2049 resultRelation)) >= 0)
2050 {
2052 resultRelation);
2053
2054 /*
2055 * Also exclude any leaf rels that have turned dummy since
2056 * being added to the list, for example, by being excluded
2057 * by constraint exclusion.
2058 */
2060 continue;
2061
2062 /* Build per-target-rel lists needed by ModifyTable */
2063 resultRelations = lappend_int(resultRelations,
2064 resultRelation);
2065 if (parse->commandType == CMD_UPDATE)
2066 {
2067 List *update_colnos = root->update_colnos;
2068
2070 update_colnos =
2072 update_colnos,
2073 this_result_rel->relid,
2074 top_result_rel->relid);
2075 updateColnosLists = lappend(updateColnosLists,
2076 update_colnos);
2077 }
2078 if (parse->withCheckOptions)
2079 {
2080 List *withCheckOptions = parse->withCheckOptions;
2081
2088 withCheckOptionLists = lappend(withCheckOptionLists,
2090 }
2091 if (parse->returningList)
2092 {
2093 List *returningList = parse->returningList;
2094
2096 returningList = (List *)
2098 (Node *) returningList,
2101 returningLists = lappend(returningLists,
2102 returningList);
2103 }
2104 if (parse->mergeActionList)
2105 {
2106 ListCell *l;
2107 List *mergeActionList = NIL;
2108
2109 /*
2110 * Copy MergeActions and translate stuff that
2111 * references attribute numbers.
2112 */
2113 foreach(l, parse->mergeActionList)
2114 {
2115 MergeAction *action = lfirst(l),
2116 *leaf_action = copyObject(action);
2117
2118 leaf_action->qual =
2120 (Node *) action->qual,
2123 leaf_action->targetList = (List *)
2125 (Node *) action->targetList,
2128 if (leaf_action->commandType == CMD_UPDATE)
2129 leaf_action->updateColnos =
2131 action->updateColnos,
2132 this_result_rel->relid,
2133 top_result_rel->relid);
2134 mergeActionList = lappend(mergeActionList,
2135 leaf_action);
2136 }
2137
2138 mergeActionLists = lappend(mergeActionLists,
2139 mergeActionList);
2140 }
2141 if (parse->commandType == CMD_MERGE)
2142 {
2143 Node *mergeJoinCondition = parse->mergeJoinCondition;
2144
2146 mergeJoinCondition =
2148 mergeJoinCondition,
2151 mergeJoinConditions = lappend(mergeJoinConditions,
2152 mergeJoinCondition);
2153 }
2154 }
2155
2156 if (resultRelations == NIL)
2157 {
2158 /*
2159 * We managed to exclude every child rel, so generate a
2160 * dummy one-relation plan using info for the top target
2161 * rel (even though that may not be a leaf target).
2162 * Although it's clear that no data will be updated or
2163 * deleted, we still need to have a ModifyTable node so
2164 * that any statement triggers will be executed. (This
2165 * could be cleaner if we fixed nodeModifyTable.c to allow
2166 * zero target relations, but that probably wouldn't be a
2167 * net win.)
2168 */
2169 resultRelations = list_make1_int(parse->resultRelation);
2170 if (parse->commandType == CMD_UPDATE)
2171 updateColnosLists = list_make1(root->update_colnos);
2172 if (parse->withCheckOptions)
2173 withCheckOptionLists = list_make1(parse->withCheckOptions);
2174 if (parse->returningList)
2175 returningLists = list_make1(parse->returningList);
2176 if (parse->mergeActionList)
2177 mergeActionLists = list_make1(parse->mergeActionList);
2178 if (parse->commandType == CMD_MERGE)
2179 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2180 }
2181 }
2182 else
2183 {
2184 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2185 rootRelation = 0; /* there's no separate root rel */
2186 resultRelations = list_make1_int(parse->resultRelation);
2187 if (parse->commandType == CMD_UPDATE)
2188 updateColnosLists = list_make1(root->update_colnos);
2189 if (parse->withCheckOptions)
2190 withCheckOptionLists = list_make1(parse->withCheckOptions);
2191 if (parse->returningList)
2192 returningLists = list_make1(parse->returningList);
2193 if (parse->mergeActionList)
2194 mergeActionLists = list_make1(parse->mergeActionList);
2195 if (parse->commandType == CMD_MERGE)
2196 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2197 }
2198
2199 /*
2200 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2201 * will have dealt with fetching non-locked marked rows, else we
2202 * need to have ModifyTable do that.
2203 */
2204 if (parse->rowMarks)
2205 rowMarks = NIL;
2206 else
2207 rowMarks = root->rowMarks;
2208
2209 path = (Path *)
2211 path,
2212 parse->commandType,
2213 parse->canSetTag,
2214 parse->resultRelation,
2215 rootRelation,
2216 resultRelations,
2217 updateColnosLists,
2218 withCheckOptionLists,
2219 returningLists,
2220 rowMarks,
2221 parse->onConflict,
2222 mergeActionLists,
2223 mergeJoinConditions,
2224 parse->forPortionOf,
2226 }
2227
2228 /* And shove it into final_rel */
2229 add_path(final_rel, path);
2230 }
2231
2232 /*
2233 * Generate partial paths for final_rel, too, if outer query levels might
2234 * be able to make use of them.
2235 */
2236 if (final_rel->consider_parallel && root->query_level > 1 &&
2238 {
2239 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2240 foreach(lc, current_rel->partial_pathlist)
2241 {
2242 Path *partial_path = (Path *) lfirst(lc);
2243
2245 }
2246 }
2247
2249 extra.limit_tuples = limit_tuples;
2250 extra.count_est = count_est;
2251 extra.offset_est = offset_est;
2252
2253 /*
2254 * If there is an FDW that's responsible for all baserels of the query,
2255 * let it consider adding ForeignPaths.
2256 */
2257 if (final_rel->fdwroutine &&
2258 final_rel->fdwroutine->GetForeignUpperPaths)
2259 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2261 &extra);
2262
2263 /* Let extensions possibly add some more paths */
2265 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2266 current_rel, final_rel, &extra);
2267
2268 /* Note: currently, we leave it to callers to do set_cheapest() */
2269}
2270
2271/*
2272 * Do preprocessing for groupingSets clause and related data.
2273 *
2274 * We expect that parse->groupingSets has already been expanded into a flat
2275 * list of grouping sets (that is, just integer Lists of ressortgroupref
2276 * numbers) by expand_grouping_sets(). This function handles the preliminary
2277 * steps of organizing the grouping sets into lists of rollups, and preparing
2278 * annotations which will later be filled in with size estimates.
2279 */
2280static grouping_sets_data *
2282{
2283 Query *parse = root->parse;
2284 List *sets;
2285 int maxref = 0;
2288
2289 /*
2290 * We don't currently make any attempt to optimize the groupClause when
2291 * there are grouping sets, so just duplicate it in processed_groupClause.
2292 */
2293 root->processed_groupClause = parse->groupClause;
2294
2295 /* Detect unhashable and unsortable grouping expressions */
2296 gd->any_hashable = false;
2297 gd->unhashable_refs = NULL;
2298 gd->unsortable_refs = NULL;
2299 gd->unsortable_sets = NIL;
2300
2301 if (parse->groupClause)
2302 {
2303 ListCell *lc;
2304
2305 foreach(lc, parse->groupClause)
2306 {
2308 Index ref = gc->tleSortGroupRef;
2309
2310 if (ref > maxref)
2311 maxref = ref;
2312
2313 if (!gc->hashable)
2314 gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2315
2316 if (!OidIsValid(gc->sortop))
2317 gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2318 }
2319 }
2320
2321 /* Allocate workspace array for remapping */
2322 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2323
2324 /*
2325 * If we have any unsortable sets, we must extract them before trying to
2326 * prepare rollups. Unsortable sets don't go through
2327 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2328 * here.
2329 */
2330 if (!bms_is_empty(gd->unsortable_refs))
2331 {
2333 ListCell *lc;
2334
2335 foreach(lc, parse->groupingSets)
2336 {
2337 List *gset = (List *) lfirst(lc);
2338
2339 if (bms_overlap_list(gd->unsortable_refs, gset))
2340 {
2342
2343 gs->set = gset;
2344 gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2345
2346 /*
2347 * We must enforce here that an unsortable set is hashable;
2348 * later code assumes this. Parse analysis only checks that
2349 * every individual column is either hashable or sortable.
2350 *
2351 * Note that passing this test doesn't guarantee we can
2352 * generate a plan; there might be other showstoppers.
2353 */
2354 if (bms_overlap_list(gd->unhashable_refs, gset))
2355 ereport(ERROR,
2357 errmsg("could not implement GROUP BY"),
2358 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2359 }
2360 else
2362 }
2363
2364 if (sortable_sets)
2366 else
2367 sets = NIL;
2368 }
2369 else
2370 sets = extract_rollup_sets(parse->groupingSets);
2371
2372 foreach(lc_set, sets)
2373 {
2377
2378 /*
2379 * Reorder the current list of grouping sets into correct prefix
2380 * order. If only one aggregation pass is needed, try to make the
2381 * list match the ORDER BY clause; if more than one pass is needed, we
2382 * don't bother with that.
2383 *
2384 * Note that this reorders the sets from smallest-member-first to
2385 * largest-member-first, and applies the GroupingSetData annotations,
2386 * though the data will be filled in later.
2387 */
2389 (list_length(sets) == 1
2390 ? parse->sortClause
2391 : NIL));
2392
2393 /*
2394 * Get the initial (and therefore largest) grouping set.
2395 */
2397
2398 /*
2399 * Order the groupClause appropriately. If the first grouping set is
2400 * empty, then the groupClause must also be empty; otherwise we have
2401 * to force the groupClause to match that grouping set's order.
2402 *
2403 * (The first grouping set can be empty even though parse->groupClause
2404 * is not empty only if all non-empty grouping sets are unsortable.
2405 * The groupClauses for hashed grouping sets are built later on.)
2406 */
2407 if (gs->set)
2408 rollup->groupClause = preprocess_groupclause(root, gs->set);
2409 else
2410 rollup->groupClause = NIL;
2411
2412 /*
2413 * Is it hashable? We pretend empty sets are hashable even though we
2414 * actually force them not to be hashed later. But don't bother if
2415 * there's nothing but empty sets (since in that case we can't hash
2416 * anything).
2417 */
2418 if (gs->set &&
2419 !bms_overlap_list(gd->unhashable_refs, gs->set))
2420 {
2421 rollup->hashable = true;
2422 gd->any_hashable = true;
2423 }
2424
2425 /*
2426 * Now that we've pinned down an order for the groupClause for this
2427 * list of grouping sets, we need to remap the entries in the grouping
2428 * sets from sortgrouprefs to plain indices (0-based) into the
2429 * groupClause for this collection of grouping sets. We keep the
2430 * original form for later use, though.
2431 */
2432 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2434 gd->tleref_to_colnum_map);
2435 rollup->gsets_data = current_sets;
2436
2437 gd->rollups = lappend(gd->rollups, rollup);
2438 }
2439
2440 if (gd->unsortable_sets)
2441 {
2442 /*
2443 * We have not yet pinned down a groupclause for this, but we will
2444 * need index-based lists for estimation purposes. Construct
2445 * hash_sets_idx based on the entire original groupclause for now.
2446 */
2447 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2448 gd->unsortable_sets,
2449 gd->tleref_to_colnum_map);
2450 gd->any_hashable = true;
2451 }
2452
2453 return gd;
2454}
2455
2456/*
2457 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2458 * (without annotation) mapped to indexes into the given groupclause.
2459 */
2460static List *
2462 List *gsets,
2463 int *tleref_to_colnum_map)
2464{
2465 int ref = 0;
2466 List *result = NIL;
2467 ListCell *lc;
2468
2469 foreach(lc, groupClause)
2470 {
2472
2473 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2474 }
2475
2476 foreach(lc, gsets)
2477 {
2478 List *set = NIL;
2479 ListCell *lc2;
2481
2482 foreach(lc2, gs->set)
2483 {
2484 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2485 }
2486
2487 result = lappend(result, set);
2488 }
2489
2490 return result;
2491}
2492
2493
2494/*
2495 * preprocess_rowmarks - set up PlanRowMarks if needed
2496 */
2497static void
2499{
2500 Query *parse = root->parse;
2501 Bitmapset *rels;
2502 List *prowmarks;
2503 ListCell *l;
2504 int i;
2505
2506 if (parse->rowMarks)
2507 {
2508 /*
2509 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2510 * grouping, since grouping renders a reference to individual tuple
2511 * CTIDs invalid. This is also checked at parse time, but that's
2512 * insufficient because of rule substitution, query pullup, etc.
2513 */
2515 parse->rowMarks)->strength);
2516 }
2517 else
2518 {
2519 /*
2520 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2521 * UPDATE/SHARE.
2522 */
2523 if (parse->commandType != CMD_UPDATE &&
2524 parse->commandType != CMD_DELETE &&
2525 parse->commandType != CMD_MERGE)
2526 return;
2527 }
2528
2529 /*
2530 * We need to have rowmarks for all base relations except the target. We
2531 * make a bitmapset of all base rels and then remove the items we don't
2532 * need or have FOR [KEY] UPDATE/SHARE marks for.
2533 */
2534 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2535 if (parse->resultRelation)
2536 rels = bms_del_member(rels, parse->resultRelation);
2537
2538 /*
2539 * Convert RowMarkClauses to PlanRowMark representation.
2540 */
2541 prowmarks = NIL;
2542 foreach(l, parse->rowMarks)
2543 {
2545 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2547
2548 /*
2549 * Currently, it is syntactically impossible to have FOR UPDATE et al
2550 * applied to an update/delete target rel. If that ever becomes
2551 * possible, we should drop the target from the PlanRowMark list.
2552 */
2553 Assert(rc->rti != parse->resultRelation);
2554
2555 /*
2556 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2557 * can't support true locking. Subqueries that got flattened into the
2558 * main query should be ignored completely. Any that didn't will get
2559 * ROW_MARK_COPY items in the next loop.
2560 */
2561 if (rte->rtekind != RTE_RELATION)
2562 continue;
2563
2564 rels = bms_del_member(rels, rc->rti);
2565
2567 newrc->rti = newrc->prti = rc->rti;
2568 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2569 newrc->markType = select_rowmark_type(rte, rc->strength);
2570 newrc->allMarkTypes = (1 << newrc->markType);
2571 newrc->strength = rc->strength;
2572 newrc->waitPolicy = rc->waitPolicy;
2573 newrc->isParent = false;
2574
2576 }
2577
2578 /*
2579 * Now, add rowmarks for any non-target, non-locked base relations.
2580 */
2581 i = 0;
2582 foreach(l, parse->rtable)
2583 {
2586
2587 i++;
2588 if (!bms_is_member(i, rels))
2589 continue;
2590
2592 newrc->rti = newrc->prti = i;
2593 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2594 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2595 newrc->allMarkTypes = (1 << newrc->markType);
2596 newrc->strength = LCS_NONE;
2597 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2598 newrc->isParent = false;
2599
2601 }
2602
2603 root->rowMarks = prowmarks;
2604}
2605
2606/*
2607 * Select RowMarkType to use for a given table
2608 */
2611{
2612 if (rte->rtekind != RTE_RELATION)
2613 {
2614 /* If it's not a table at all, use ROW_MARK_COPY */
2615 return ROW_MARK_COPY;
2616 }
2617 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2618 {
2619 /* Let the FDW select the rowmark type, if it wants to */
2620 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2621
2622 if (fdwroutine->GetForeignRowMarkType != NULL)
2623 return fdwroutine->GetForeignRowMarkType(rte, strength);
2624 /* Otherwise, use ROW_MARK_COPY by default */
2625 return ROW_MARK_COPY;
2626 }
2627 else
2628 {
2629 /* Regular table, apply the appropriate lock type */
2630 switch (strength)
2631 {
2632 case LCS_NONE:
2633
2634 /*
2635 * We don't need a tuple lock, only the ability to re-fetch
2636 * the row.
2637 */
2638 return ROW_MARK_REFERENCE;
2639 break;
2640 case LCS_FORKEYSHARE:
2641 return ROW_MARK_KEYSHARE;
2642 break;
2643 case LCS_FORSHARE:
2644 return ROW_MARK_SHARE;
2645 break;
2646 case LCS_FORNOKEYUPDATE:
2648 break;
2649 case LCS_FORUPDATE:
2650 return ROW_MARK_EXCLUSIVE;
2651 break;
2652 }
2653 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2654 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2655 }
2656}
2657
2658/*
2659 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2660 *
2661 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2662 * results back in *count_est and *offset_est. These variables are set to
2663 * 0 if the corresponding clause is not present, and -1 if it's present
2664 * but we couldn't estimate the value for it. (The "0" convention is OK
2665 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2666 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2667 * usual practice of never estimating less than one row.) These values will
2668 * be passed to create_limit_path, which see if you change this code.
2669 *
2670 * The return value is the suitably adjusted tuple_fraction to use for
2671 * planning the query. This adjustment is not overridable, since it reflects
2672 * plan actions that grouping_planner() will certainly take, not assumptions
2673 * about context.
2674 */
2675static double
2676preprocess_limit(PlannerInfo *root, double tuple_fraction,
2677 int64 *offset_est, int64 *count_est)
2678{
2679 Query *parse = root->parse;
2680 Node *est;
2681 double limit_fraction;
2682
2683 /* Should not be called unless LIMIT or OFFSET */
2684 Assert(parse->limitCount || parse->limitOffset);
2685
2686 /*
2687 * Try to obtain the clause values. We use estimate_expression_value
2688 * primarily because it can sometimes do something useful with Params.
2689 */
2690 if (parse->limitCount)
2691 {
2692 est = estimate_expression_value(root, parse->limitCount);
2693 if (est && IsA(est, Const))
2694 {
2695 if (((Const *) est)->constisnull)
2696 {
2697 /* NULL indicates LIMIT ALL, ie, no limit */
2698 *count_est = 0; /* treat as not present */
2699 }
2700 else
2701 {
2702 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2703 if (*count_est <= 0)
2704 *count_est = 1; /* force to at least 1 */
2705 }
2706 }
2707 else
2708 *count_est = -1; /* can't estimate */
2709 }
2710 else
2711 *count_est = 0; /* not present */
2712
2713 if (parse->limitOffset)
2714 {
2715 est = estimate_expression_value(root, parse->limitOffset);
2716 if (est && IsA(est, Const))
2717 {
2718 if (((Const *) est)->constisnull)
2719 {
2720 /* Treat NULL as no offset; the executor will too */
2721 *offset_est = 0; /* treat as not present */
2722 }
2723 else
2724 {
2725 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2726 if (*offset_est < 0)
2727 *offset_est = 0; /* treat as not present */
2728 }
2729 }
2730 else
2731 *offset_est = -1; /* can't estimate */
2732 }
2733 else
2734 *offset_est = 0; /* not present */
2735
2736 if (*count_est != 0)
2737 {
2738 /*
2739 * A LIMIT clause limits the absolute number of tuples returned.
2740 * However, if it's not a constant LIMIT then we have to guess; for
2741 * lack of a better idea, assume 10% of the plan's result is wanted.
2742 */
2743 if (*count_est < 0 || *offset_est < 0)
2744 {
2745 /* LIMIT or OFFSET is an expression ... punt ... */
2746 limit_fraction = 0.10;
2747 }
2748 else
2749 {
2750 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2751 limit_fraction = (double) *count_est + (double) *offset_est;
2752 }
2753
2754 /*
2755 * If we have absolute limits from both caller and LIMIT, use the
2756 * smaller value; likewise if they are both fractional. If one is
2757 * fractional and the other absolute, we can't easily determine which
2758 * is smaller, but we use the heuristic that the absolute will usually
2759 * be smaller.
2760 */
2761 if (tuple_fraction >= 1.0)
2762 {
2763 if (limit_fraction >= 1.0)
2764 {
2765 /* both absolute */
2766 tuple_fraction = Min(tuple_fraction, limit_fraction);
2767 }
2768 else
2769 {
2770 /* caller absolute, limit fractional; use caller's value */
2771 }
2772 }
2773 else if (tuple_fraction > 0.0)
2774 {
2775 if (limit_fraction >= 1.0)
2776 {
2777 /* caller fractional, limit absolute; use limit */
2778 tuple_fraction = limit_fraction;
2779 }
2780 else
2781 {
2782 /* both fractional */
2783 tuple_fraction = Min(tuple_fraction, limit_fraction);
2784 }
2785 }
2786 else
2787 {
2788 /* no info from caller, just use limit */
2789 tuple_fraction = limit_fraction;
2790 }
2791 }
2792 else if (*offset_est != 0 && tuple_fraction > 0.0)
2793 {
2794 /*
2795 * We have an OFFSET but no LIMIT. This acts entirely differently
2796 * from the LIMIT case: here, we need to increase rather than decrease
2797 * the caller's tuple_fraction, because the OFFSET acts to cause more
2798 * tuples to be fetched instead of fewer. This only matters if we got
2799 * a tuple_fraction > 0, however.
2800 *
2801 * As above, use 10% if OFFSET is present but unestimatable.
2802 */
2803 if (*offset_est < 0)
2804 limit_fraction = 0.10;
2805 else
2806 limit_fraction = (double) *offset_est;
2807
2808 /*
2809 * If we have absolute counts from both caller and OFFSET, add them
2810 * together; likewise if they are both fractional. If one is
2811 * fractional and the other absolute, we want to take the larger, and
2812 * we heuristically assume that's the fractional one.
2813 */
2814 if (tuple_fraction >= 1.0)
2815 {
2816 if (limit_fraction >= 1.0)
2817 {
2818 /* both absolute, so add them together */
2819 tuple_fraction += limit_fraction;
2820 }
2821 else
2822 {
2823 /* caller absolute, limit fractional; use limit */
2824 tuple_fraction = limit_fraction;
2825 }
2826 }
2827 else
2828 {
2829 if (limit_fraction >= 1.0)
2830 {
2831 /* caller fractional, limit absolute; use caller's value */
2832 }
2833 else
2834 {
2835 /* both fractional, so add them together */
2836 tuple_fraction += limit_fraction;
2837 if (tuple_fraction >= 1.0)
2838 tuple_fraction = 0.0; /* assume fetch all */
2839 }
2840 }
2841 }
2842
2843 return tuple_fraction;
2844}
2845
2846/*
2847 * limit_needed - do we actually need a Limit plan node?
2848 *
2849 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2850 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2851 * locution for an optimization fence. (Because other places in the planner
2852 * merely check whether parse->limitOffset isn't NULL, it will still work as
2853 * an optimization fence --- we're just suppressing unnecessary run-time
2854 * overhead.)
2855 *
2856 * This might look like it could be merged into preprocess_limit, but there's
2857 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2858 * in preprocess_limit it's good enough to consider estimated values.
2859 */
2860bool
2862{
2863 Node *node;
2864
2865 node = parse->limitCount;
2866 if (node)
2867 {
2868 if (IsA(node, Const))
2869 {
2870 /* NULL indicates LIMIT ALL, ie, no limit */
2871 if (!((Const *) node)->constisnull)
2872 return true; /* LIMIT with a constant value */
2873 }
2874 else
2875 return true; /* non-constant LIMIT */
2876 }
2877
2878 node = parse->limitOffset;
2879 if (node)
2880 {
2881 if (IsA(node, Const))
2882 {
2883 /* Treat NULL as no offset; the executor would too */
2884 if (!((Const *) node)->constisnull)
2885 {
2886 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2887
2888 if (offset != 0)
2889 return true; /* OFFSET with a nonzero value */
2890 }
2891 }
2892 else
2893 return true; /* non-constant OFFSET */
2894 }
2895
2896 return false; /* don't need a Limit plan node */
2897}
2898
2899/*
2900 * preprocess_groupclause - do preparatory work on GROUP BY clause
2901 *
2902 * The idea here is to adjust the ordering of the GROUP BY elements
2903 * (which in itself is semantically insignificant) to match ORDER BY,
2904 * thereby allowing a single sort operation to both implement the ORDER BY
2905 * requirement and set up for a Unique step that implements GROUP BY.
2906 * We also consider partial match between GROUP BY and ORDER BY elements,
2907 * which could allow to implement ORDER BY using the incremental sort.
2908 *
2909 * We also consider other orderings of the GROUP BY elements, which could
2910 * match the sort ordering of other possible plans (eg an indexscan) and
2911 * thereby reduce cost. This is implemented during the generation of grouping
2912 * paths. See get_useful_group_keys_orderings() for details.
2913 *
2914 * Note: we need no comparable processing of the distinctClause because
2915 * the parser already enforced that that matches ORDER BY.
2916 *
2917 * Note: we return a fresh List, but its elements are the same
2918 * SortGroupClauses appearing in parse->groupClause. This is important
2919 * because later processing may modify the processed_groupClause list.
2920 *
2921 * For grouping sets, the order of items is instead forced to agree with that
2922 * of the grouping set (and items not in the grouping set are skipped). The
2923 * work of sorting the order of grouping set elements to match the ORDER BY if
2924 * possible is done elsewhere.
2925 */
2926static List *
2928{
2929 Query *parse = root->parse;
2931 ListCell *sl;
2932 ListCell *gl;
2933
2934 /* For grouping sets, we need to force the ordering */
2935 if (force)
2936 {
2937 foreach(sl, force)
2938 {
2941
2943 }
2944
2945 return new_groupclause;
2946 }
2947
2948 /* If no ORDER BY, nothing useful to do here */
2949 if (parse->sortClause == NIL)
2950 return list_copy(parse->groupClause);
2951
2952 /*
2953 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2954 * items, but only as far as we can make a matching prefix.
2955 *
2956 * This code assumes that the sortClause contains no duplicate items.
2957 */
2958 foreach(sl, parse->sortClause)
2959 {
2961
2962 foreach(gl, parse->groupClause)
2963 {
2965
2966 if (equal(gc, sc))
2967 {
2969 break;
2970 }
2971 }
2972 if (gl == NULL)
2973 break; /* no match, so stop scanning */
2974 }
2975
2976
2977 /* If no match at all, no point in reordering GROUP BY */
2978 if (new_groupclause == NIL)
2979 return list_copy(parse->groupClause);
2980
2981 /*
2982 * Add any remaining GROUP BY items to the new list. We don't require a
2983 * complete match, because even partial match allows ORDER BY to be
2984 * implemented using incremental sort. Also, give up if there are any
2985 * non-sortable GROUP BY items, since then there's no hope anyway.
2986 */
2987 foreach(gl, parse->groupClause)
2988 {
2990
2992 continue; /* it matched an ORDER BY item */
2993 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2994 return list_copy(parse->groupClause);
2996 }
2997
2998 /* Success --- install the rearranged GROUP BY list */
3000 return new_groupclause;
3001}
3002
3003/*
3004 * Extract lists of grouping sets that can be implemented using a single
3005 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
3006 *
3007 * Input must be sorted with smallest sets first. Result has each sublist
3008 * sorted with smallest sets first.
3009 *
3010 * We want to produce the absolute minimum possible number of lists here to
3011 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
3012 * of finding the minimal partition of a partially-ordered set into chains
3013 * (which is what we need, taking the list of grouping sets as a poset ordered
3014 * by set inclusion) can be mapped to the problem of finding the maximum
3015 * cardinality matching on a bipartite graph, which is solvable in polynomial
3016 * time with a worst case of no worse than O(n^2.5) and usually much
3017 * better. Since our N is at most 4096, we don't need to consider fallbacks to
3018 * heuristic or approximate methods. (Planning time for a 12-d cube is under
3019 * half a second on my modest system even with optimization off and assertions
3020 * on.)
3021 */
3022static List *
3024{
3025 int num_sets_raw = list_length(groupingSets);
3026 int num_empty = 0;
3027 int num_sets = 0; /* distinct sets */
3028 int num_chains = 0;
3029 List *result = NIL;
3030 List **results;
3031 List **orig_sets;
3033 int *chains;
3034 short **adjacency;
3035 short *adjacency_buf;
3037 int i;
3038 int j;
3039 int j_size;
3040 ListCell *lc1 = list_head(groupingSets);
3041 ListCell *lc;
3042
3043 /*
3044 * Start by stripping out empty sets. The algorithm doesn't require this,
3045 * but the planner currently needs all empty sets to be returned in the
3046 * first list, so we strip them here and add them back after.
3047 */
3048 while (lc1 && lfirst(lc1) == NIL)
3049 {
3050 ++num_empty;
3051 lc1 = lnext(groupingSets, lc1);
3052 }
3053
3054 /* bail out now if it turns out that all we had were empty sets. */
3055 if (!lc1)
3056 return list_make1(groupingSets);
3057
3058 /*----------
3059 * We don't strictly need to remove duplicate sets here, but if we don't,
3060 * they tend to become scattered through the result, which is a bit
3061 * confusing (and irritating if we ever decide to optimize them out).
3062 * So we remove them here and add them back after.
3063 *
3064 * For each non-duplicate set, we fill in the following:
3065 *
3066 * orig_sets[i] = list of the original set lists
3067 * set_masks[i] = bitmapset for testing inclusion
3068 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3069 *
3070 * chains[i] will be the result group this set is assigned to.
3071 *
3072 * We index all of these from 1 rather than 0 because it is convenient
3073 * to leave 0 free for the NIL node in the graph algorithm.
3074 *----------
3075 */
3076 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3077 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3078 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3079 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3080
3081 j_size = 0;
3082 j = 0;
3083 i = 1;
3084
3085 for_each_cell(lc, groupingSets, lc1)
3086 {
3087 List *candidate = (List *) lfirst(lc);
3089 ListCell *lc2;
3090 int dup_of = 0;
3091
3092 foreach(lc2, candidate)
3093 {
3095 }
3096
3097 /* we can only be a dup if we're the same length as a previous set */
3099 {
3100 int k;
3101
3102 for (k = j; k < i; ++k)
3103 {
3105 {
3106 dup_of = k;
3107 break;
3108 }
3109 }
3110 }
3111 else if (j_size < list_length(candidate))
3112 {
3114 j = i;
3115 }
3116
3117 if (dup_of > 0)
3118 {
3121 }
3122 else
3123 {
3124 int k;
3125 int n_adj = 0;
3126
3129
3130 /* fill in adjacency list; no need to compare equal-size sets */
3131
3132 for (k = j - 1; k > 0; --k)
3133 {
3135 adjacency_buf[++n_adj] = k;
3136 }
3137
3138 if (n_adj > 0)
3139 {
3140 adjacency_buf[0] = n_adj;
3141 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3142 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3143 }
3144 else
3145 adjacency[i] = NULL;
3146
3147 ++i;
3148 }
3149 }
3150
3151 num_sets = i - 1;
3152
3153 /*
3154 * Apply the graph matching algorithm to do the work.
3155 */
3156 state = BipartiteMatch(num_sets, num_sets, adjacency);
3157
3158 /*
3159 * Now, the state->pair* fields have the info we need to assign sets to
3160 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3161 * pair_vu[v] = u (both will be true, but we check both so that we can do
3162 * it in one pass)
3163 */
3164 chains = palloc0((num_sets + 1) * sizeof(int));
3165
3166 for (i = 1; i <= num_sets; ++i)
3167 {
3168 int u = state->pair_vu[i];
3169 int v = state->pair_uv[i];
3170
3171 if (u > 0 && u < i)
3172 chains[i] = chains[u];
3173 else if (v > 0 && v < i)
3174 chains[i] = chains[v];
3175 else
3176 chains[i] = ++num_chains;
3177 }
3178
3179 /* build result lists. */
3180 results = palloc0((num_chains + 1) * sizeof(List *));
3181
3182 for (i = 1; i <= num_sets; ++i)
3183 {
3184 int c = chains[i];
3185
3186 Assert(c > 0);
3187
3188 results[c] = list_concat(results[c], orig_sets[i]);
3189 }
3190
3191 /* push any empty sets back on the first list. */
3192 while (num_empty-- > 0)
3193 results[1] = lcons(NIL, results[1]);
3194
3195 /* make result list */
3196 for (i = 1; i <= num_chains; ++i)
3197 result = lappend(result, results[i]);
3198
3199 /*
3200 * Free all the things.
3201 *
3202 * (This is over-fussy for small sets but for large sets we could have
3203 * tied up a nontrivial amount of memory.)
3204 */
3206 pfree(results);
3207 pfree(chains);
3208 for (i = 1; i <= num_sets; ++i)
3209 if (adjacency[i])
3210 pfree(adjacency[i]);
3211 pfree(adjacency);
3214 for (i = 1; i <= num_sets; ++i)
3217
3218 return result;
3219}
3220
3221/*
3222 * Reorder the elements of a list of grouping sets such that they have correct
3223 * prefix relationships. Also inserts the GroupingSetData annotations.
3224 *
3225 * The input must be ordered with smallest sets first; the result is returned
3226 * with largest sets first. Note that the result shares no list substructure
3227 * with the input, so it's safe for the caller to modify it later.
3228 *
3229 * If we're passed in a sortclause, we follow its order of columns to the
3230 * extent possible, to minimize the chance that we add unnecessary sorts.
3231 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3232 * gets implemented in one pass.)
3233 */
3234static List *
3236{
3237 ListCell *lc;
3238 List *previous = NIL;
3239 List *result = NIL;
3240
3241 foreach(lc, groupingSets)
3242 {
3243 List *candidate = (List *) lfirst(lc);
3246
3247 while (list_length(sortclause) > list_length(previous) &&
3248 new_elems != NIL)
3249 {
3251 int ref = sc->tleSortGroupRef;
3252
3254 {
3255 previous = lappend_int(previous, ref);
3257 }
3258 else
3259 {
3260 /* diverged from the sortclause; give up on it */
3261 sortclause = NIL;
3262 break;
3263 }
3264 }
3265
3266 previous = list_concat(previous, new_elems);
3267
3268 gs->set = list_copy(previous);
3269 result = lcons(gs, result);
3270 }
3271
3272 list_free(previous);
3273
3274 return result;
3275}
3276
3277/*
3278 * has_volatile_pathkey
3279 * Returns true if any PathKey in 'keys' has an EquivalenceClass
3280 * containing a volatile function. Otherwise returns false.
3281 */
3282static bool
3284{
3285 ListCell *lc;
3286
3287 foreach(lc, keys)
3288 {
3290
3291 if (pathkey->pk_eclass->ec_has_volatile)
3292 return true;
3293 }
3294
3295 return false;
3296}
3297
3298/*
3299 * adjust_group_pathkeys_for_groupagg
3300 * Add pathkeys to root->group_pathkeys to reflect the best set of
3301 * pre-ordered input for ordered aggregates.
3302 *
3303 * We define "best" as the pathkeys that suit the largest number of
3304 * aggregate functions. We find these by looking at the first ORDER BY /
3305 * DISTINCT aggregate and take the pathkeys for that before searching for
3306 * other aggregates that require the same or a more strict variation of the
3307 * same pathkeys. We then repeat that process for any remaining aggregates
3308 * with different pathkeys and if we find another set of pathkeys that suits a
3309 * larger number of aggregates then we select those pathkeys instead.
3310 *
3311 * When the best pathkeys are found we also mark each Aggref that can use
3312 * those pathkeys as aggpresorted = true.
3313 *
3314 * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3315 * volatile functions, we never make use of these pathkeys. We want to ensure
3316 * that sorts using volatile functions are done independently in each Aggref
3317 * rather than once at the query level. If we were to allow this then Aggrefs
3318 * with compatible sort orders would all transition their rows in the same
3319 * order if those pathkeys were deemed to be the best pathkeys to sort on.
3320 * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3321 * better pathkeys to sort on, then the volatile function Aggrefs would be
3322 * left to perform their sorts individually. To avoid this inconsistent
3323 * behavior which could make Aggref results depend on what other Aggrefs the
3324 * query contains, we always force Aggrefs with volatile functions to perform
3325 * their own sorts.
3326 */
3327static void
3329{
3330 List *grouppathkeys = root->group_pathkeys;
3334 ListCell *lc;
3335 int i;
3336
3337 /* Shouldn't be here if there are grouping sets */
3338 Assert(root->parse->groupingSets == NIL);
3339 /* Shouldn't be here unless there are some ordered aggregates */
3340 Assert(root->numOrderedAggs > 0);
3341
3342 /* Do nothing if disabled */
3344 return;
3345
3346 /*
3347 * Make a first pass over all AggInfos to collect a Bitmapset containing
3348 * the indexes of all AggInfos to be processed below.
3349 */
3351 foreach(lc, root->agginfos)
3352 {
3354 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3355
3356 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3357 continue;
3358
3359 /* Skip unless there's a DISTINCT or ORDER BY clause */
3360 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3361 continue;
3362
3363 /* Additional safety checks are needed if there's a FILTER clause */
3364 if (aggref->aggfilter != NULL)
3365 {
3366 ListCell *lc2;
3367 bool allow_presort = true;
3368
3369 /*
3370 * When the Aggref has a FILTER clause, it's possible that the
3371 * filter removes rows that cannot be sorted because the
3372 * expression to sort by results in an error during its
3373 * evaluation. This is a problem for presorting as that happens
3374 * before the FILTER, whereas without presorting, the Aggregate
3375 * node will apply the FILTER *before* sorting. So that we never
3376 * try to sort anything that might error, here we aim to skip over
3377 * any Aggrefs with arguments with expressions which, when
3378 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3379 * may be more cases that should be allowed, but more thought
3380 * needs to be given. Err on the side of caution.
3381 */
3382 foreach(lc2, aggref->args)
3383 {
3385 Expr *expr = tle->expr;
3386
3387 while (IsA(expr, RelabelType))
3388 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3389
3390 /* Common case, Vars and Consts are ok */
3391 if (IsA(expr, Var) || IsA(expr, Const))
3392 continue;
3393
3394 /* Unsupported. Don't try to presort for this Aggref */
3395 allow_presort = false;
3396 break;
3397 }
3398
3399 /* Skip unsupported Aggrefs */
3400 if (!allow_presort)
3401 continue;
3402 }
3403
3406 }
3407
3408 /*
3409 * Now process all the unprocessed_aggs to find the best set of pathkeys
3410 * for the given set of aggregates.
3411 *
3412 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3413 * this during the first loop using the pathkeys for the very first
3414 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3415 * a more strict set of compatible pathkeys. Once the outer loop is
3416 * complete, we mark off all the aggregates with compatible pathkeys then
3417 * remove those from the unprocessed_aggs and repeat the process to try to
3418 * find another set of pathkeys that are suitable for a larger number of
3419 * aggregates. The outer loop will stop when there are not enough
3420 * unprocessed aggregates for it to be possible to find a set of pathkeys
3421 * to suit a larger number of aggregates.
3422 */
3423 bestpathkeys = NIL;
3424 bestaggs = NULL;
3426 {
3429
3430 i = -1;
3431 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3432 {
3433 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3434 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3435 List *sortlist;
3436 List *pathkeys;
3437
3438 if (aggref->aggdistinct != NIL)
3439 sortlist = aggref->aggdistinct;
3440 else
3441 sortlist = aggref->aggorder;
3442
3444 aggref->args);
3445
3446 /*
3447 * Ignore Aggrefs which have volatile functions in their ORDER BY
3448 * or DISTINCT clause.
3449 */
3450 if (has_volatile_pathkey(pathkeys))
3451 {
3453 continue;
3454 }
3455
3456 /*
3457 * When not set yet, take the pathkeys from the first unprocessed
3458 * aggregate.
3459 */
3460 if (currpathkeys == NIL)
3461 {
3462 currpathkeys = pathkeys;
3463
3464 /* include the GROUP BY pathkeys, if they exist */
3465 if (grouppathkeys != NIL)
3467 currpathkeys);
3468
3469 /* record that we found pathkeys for this aggregate */
3471 }
3472 else
3473 {
3474 /* now look for a stronger set of matching pathkeys */
3475
3476 /* include the GROUP BY pathkeys, if they exist */
3477 if (grouppathkeys != NIL)
3479 pathkeys);
3480
3481 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3482 switch (compare_pathkeys(currpathkeys, pathkeys))
3483 {
3484 case PATHKEYS_BETTER2:
3485 /* 'pathkeys' are stronger, use these ones instead */
3486 currpathkeys = pathkeys;
3488
3489 case PATHKEYS_BETTER1:
3490 /* 'pathkeys' are less strict */
3492
3493 case PATHKEYS_EQUAL:
3494 /* mark this aggregate as covered by 'currpathkeys' */
3496 break;
3497
3498 case PATHKEYS_DIFFERENT:
3499 break;
3500 }
3501 }
3502 }
3503
3504 /* remove the aggregates that we've just processed */
3506
3507 /*
3508 * If this pass included more aggregates than the previous best then
3509 * use these ones as the best set.
3510 */
3512 {
3515 }
3516 }
3517
3518 /*
3519 * If we found any ordered aggregates, update root->group_pathkeys to add
3520 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3521 * the original GROUP BY pathkeys already.
3522 */
3523 if (bestpathkeys != NIL)
3524 root->group_pathkeys = bestpathkeys;
3525
3526 /*
3527 * Now that we've found the best set of aggregates we can set the
3528 * presorted flag to indicate to the executor that it needn't bother
3529 * performing a sort for these Aggrefs. We're able to do this now as
3530 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3531 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3532 * of ordered aggregates.
3533 */
3534 i = -1;
3535 while ((i = bms_next_member(bestaggs, i)) >= 0)
3536 {
3537 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3538
3539 foreach(lc, agginfo->aggrefs)
3540 {
3541 Aggref *aggref = lfirst_node(Aggref, lc);
3542
3543 aggref->aggpresorted = true;
3544 }
3545 }
3546}
3547
3548/*
3549 * Compute query_pathkeys and other pathkeys during plan generation
3550 */
3551static void
3553{
3554 Query *parse = root->parse;
3556 List *tlist = root->processed_tlist;
3557 List *activeWindows = qp_extra->activeWindows;
3558
3559 /*
3560 * Calculate pathkeys that represent grouping/ordering and/or ordered
3561 * aggregate requirements.
3562 */
3563 if (qp_extra->gset_data)
3564 {
3565 /*
3566 * With grouping sets, just use the first RollupData's groupClause. We
3567 * don't make any effort to optimize grouping clauses when there are
3568 * grouping sets, nor can we combine aggregate ordering keys with
3569 * grouping.
3570 */
3571 List *rollups = qp_extra->gset_data->rollups;
3572 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3573
3574 if (grouping_is_sortable(groupClause))
3575 {
3576 bool sortable;
3577
3578 /*
3579 * The groupClause is logically below the grouping step. So if
3580 * there is an RTE entry for the grouping step, we need to remove
3581 * its RT index from the sort expressions before we make PathKeys
3582 * for them.
3583 */
3584 root->group_pathkeys =
3586 &groupClause,
3587 tlist,
3588 false,
3589 parse->hasGroupRTE,
3590 &sortable,
3591 false);
3593 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3594 }
3595 else
3596 {
3597 root->group_pathkeys = NIL;
3598 root->num_groupby_pathkeys = 0;
3599 }
3600 }
3601 else if (parse->groupClause || root->numOrderedAggs > 0)
3602 {
3603 /*
3604 * With a plain GROUP BY list, we can remove any grouping items that
3605 * are proven redundant by EquivalenceClass processing. For example,
3606 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3607 * especially common cases, but they're nearly free to detect. Note
3608 * that we remove redundant items from processed_groupClause but not
3609 * the original parse->groupClause.
3610 */
3611 bool sortable;
3612
3613 /*
3614 * Convert group clauses into pathkeys. Set the ec_sortref field of
3615 * EquivalenceClass'es if it's not set yet.
3616 */
3617 root->group_pathkeys =
3619 &root->processed_groupClause,
3620 tlist,
3621 true,
3622 false,
3623 &sortable,
3624 true);
3625 if (!sortable)
3626 {
3627 /* Can't sort; no point in considering aggregate ordering either */
3628 root->group_pathkeys = NIL;
3629 root->num_groupby_pathkeys = 0;
3630 }
3631 else
3632 {
3633 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3634 /* If we have ordered aggs, consider adding onto group_pathkeys */
3635 if (root->numOrderedAggs > 0)
3637 }
3638 }
3639 else
3640 {
3641 root->group_pathkeys = NIL;
3642 root->num_groupby_pathkeys = 0;
3643 }
3644
3645 /* We consider only the first (bottom) window in pathkeys logic */
3646 if (activeWindows != NIL)
3647 {
3648 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3649
3650 root->window_pathkeys = make_pathkeys_for_window(root,
3651 wc,
3652 tlist);
3653 }
3654 else
3655 root->window_pathkeys = NIL;
3656
3657 /*
3658 * As with GROUP BY, we can discard any DISTINCT items that are proven
3659 * redundant by EquivalenceClass processing. The non-redundant list is
3660 * kept in root->processed_distinctClause, leaving the original
3661 * parse->distinctClause alone.
3662 */
3663 if (parse->distinctClause)
3664 {
3665 bool sortable;
3666
3667 /* Make a copy since pathkey processing can modify the list */
3668 root->processed_distinctClause = list_copy(parse->distinctClause);
3669 root->distinct_pathkeys =
3671 &root->processed_distinctClause,
3672 tlist,
3673 true,
3674 false,
3675 &sortable,
3676 false);
3677 if (!sortable)
3678 root->distinct_pathkeys = NIL;
3679 }
3680 else
3681 root->distinct_pathkeys = NIL;
3682
3683 root->sort_pathkeys =
3685 parse->sortClause,
3686 tlist);
3687
3688 /* setting setop_pathkeys might be useful to the union planner */
3689 if (qp_extra->setop != NULL)
3690 {
3691 List *groupClauses;
3692 bool sortable;
3693
3694 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3695
3696 root->setop_pathkeys =
3698 &groupClauses,
3699 tlist,
3700 false,
3701 false,
3702 &sortable,
3703 false);
3704 if (!sortable)
3705 root->setop_pathkeys = NIL;
3706 }
3707 else
3708 root->setop_pathkeys = NIL;
3709
3710 /*
3711 * Figure out whether we want a sorted result from query_planner.
3712 *
3713 * If we have a sortable GROUP BY clause, then we want a result sorted
3714 * properly for grouping. Otherwise, if we have window functions to
3715 * evaluate, we try to sort for the first window. Otherwise, if there's a
3716 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3717 * we try to produce output that's sufficiently well sorted for the
3718 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3719 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3720 * for a set operation which can benefit from presorted results and have a
3721 * sortable targetlist, we want to sort by the target list.
3722 *
3723 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3724 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3725 * that might just leave us failing to exploit an available sort order at
3726 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3727 * much easier, since we know that the parser ensured that one is a
3728 * superset of the other.
3729 */
3730 if (root->group_pathkeys)
3731 root->query_pathkeys = root->group_pathkeys;
3732 else if (root->window_pathkeys)
3733 root->query_pathkeys = root->window_pathkeys;
3734 else if (list_length(root->distinct_pathkeys) >
3735 list_length(root->sort_pathkeys))
3736 root->query_pathkeys = root->distinct_pathkeys;
3737 else if (root->sort_pathkeys)
3738 root->query_pathkeys = root->sort_pathkeys;
3739 else if (root->setop_pathkeys != NIL)
3740 root->query_pathkeys = root->setop_pathkeys;
3741 else
3742 root->query_pathkeys = NIL;
3743}
3744
3745/*
3746 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3747 *
3748 * path_rows: number of output rows from scan/join step
3749 * gd: grouping sets data including list of grouping sets and their clauses
3750 * target_list: target list containing group clause references
3751 *
3752 * If doing grouping sets, we also annotate the gsets data with the estimates
3753 * for each set and each individual rollup list, with a view to later
3754 * determining whether some combination of them could be hashed instead.
3755 */
3756static double
3758 double path_rows,
3761{
3762 Query *parse = root->parse;
3763 double dNumGroups;
3764
3765 if (parse->groupClause)
3766 {
3768
3769 if (parse->groupingSets)
3770 {
3771 /* Add up the estimates for each grouping set */
3772 ListCell *lc;
3773
3774 Assert(gd); /* keep Coverity happy */
3775
3776 dNumGroups = 0;
3777
3778 foreach(lc, gd->rollups)
3779 {
3781 ListCell *lc2;
3782 ListCell *lc3;
3783
3785 target_list);
3786
3787 rollup->numGroups = 0.0;
3788
3789 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3790 {
3791 List *gset = (List *) lfirst(lc2);
3793 double numGroups = estimate_num_groups(root,
3794 groupExprs,
3795 path_rows,
3796 &gset,
3797 NULL);
3798
3799 gs->numGroups = numGroups;
3800 rollup->numGroups += numGroups;
3801 }
3802
3803 dNumGroups += rollup->numGroups;
3804 }
3805
3806 if (gd->hash_sets_idx)
3807 {
3808 ListCell *lc2;
3809
3810 gd->dNumHashGroups = 0;
3811
3813 target_list);
3814
3815 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3816 {
3817 List *gset = (List *) lfirst(lc);
3819 double numGroups = estimate_num_groups(root,
3820 groupExprs,
3821 path_rows,
3822 &gset,
3823 NULL);
3824
3825 gs->numGroups = numGroups;
3826 gd->dNumHashGroups += numGroups;
3827 }
3828
3829 dNumGroups += gd->dNumHashGroups;
3830 }
3831 }
3832 else
3833 {
3834 /* Plain GROUP BY -- estimate based on optimized groupClause */
3835 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3836 target_list);
3837
3839 NULL, NULL);
3840 }
3841 }
3842 else if (parse->groupingSets)
3843 {
3844 /* Empty grouping sets ... one result row for each one */
3845 dNumGroups = list_length(parse->groupingSets);
3846 }
3847 else if (parse->hasAggs || root->hasHavingQual)
3848 {
3849 /* Plain aggregation, one result row */
3850 dNumGroups = 1;
3851 }
3852 else
3853 {
3854 /* Not grouping */
3855 dNumGroups = 1;
3856 }
3857
3858 return dNumGroups;
3859}
3860
3861/*
3862 * create_grouping_paths
3863 *
3864 * Build a new upperrel containing Paths for grouping and/or aggregation.
3865 * Along the way, we also build an upperrel for Paths which are partially
3866 * grouped and/or aggregated. A partially grouped and/or aggregated path
3867 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3868 * the only partially grouped paths we build are also partial paths; that
3869 * is, they need a Gather and then a FinalizeAggregate.
3870 *
3871 * input_rel: contains the source-data Paths
3872 * target: the pathtarget for the result Paths to compute
3873 * gd: grouping sets data including list of grouping sets and their clauses
3874 *
3875 * Note: all Paths in input_rel are expected to return the target computed
3876 * by make_group_input_target.
3877 */
3878static RelOptInfo *
3881 PathTarget *target,
3882 bool target_parallel_safe,
3884{
3885 Query *parse = root->parse;
3886 RelOptInfo *grouped_rel;
3889
3890 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3892
3893 /*
3894 * Create grouping relation to hold fully aggregated grouping and/or
3895 * aggregation paths.
3896 */
3897 grouped_rel = make_grouping_rel(root, input_rel, target,
3898 target_parallel_safe, parse->havingQual);
3899
3900 /*
3901 * Create either paths for a degenerate grouping or paths for ordinary
3902 * grouping, as appropriate.
3903 */
3906 else
3907 {
3908 int flags = 0;
3909 GroupPathExtraData extra;
3910
3911 /*
3912 * Determine whether it's possible to perform sort-based
3913 * implementations of grouping. (Note that if processed_groupClause
3914 * is empty, grouping_is_sortable() is trivially true, and all the
3915 * pathkeys_contained_in() tests will succeed too, so that we'll
3916 * consider every surviving input path.)
3917 *
3918 * If we have grouping sets, we might be able to sort some but not all
3919 * of them; in this case, we need can_sort to be true as long as we
3920 * must consider any sorted-input plan.
3921 */
3922 if ((gd && gd->rollups != NIL)
3923 || grouping_is_sortable(root->processed_groupClause))
3924 flags |= GROUPING_CAN_USE_SORT;
3925
3926 /*
3927 * Determine whether we should consider hash-based implementations of
3928 * grouping.
3929 *
3930 * Hashed aggregation only applies if we're grouping. If we have
3931 * grouping sets, some groups might be hashable but others not; in
3932 * this case we set can_hash true as long as there is nothing globally
3933 * preventing us from hashing (and we should therefore consider plans
3934 * with hashes).
3935 *
3936 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3937 * BY aggregates. (Doing so would imply storing *all* the input
3938 * values in the hash table, and/or running many sorts in parallel,
3939 * either of which seems like a certain loser.) We similarly don't
3940 * support ordered-set aggregates in hashed aggregation, but that case
3941 * is also included in the numOrderedAggs count.
3942 *
3943 * Note: grouping_is_hashable() is much more expensive to check than
3944 * the other gating conditions, so we want to do it last.
3945 */
3946 if ((parse->groupClause != NIL &&
3947 root->numOrderedAggs == 0 &&
3948 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3949 flags |= GROUPING_CAN_USE_HASH;
3950
3951 /*
3952 * Determine whether partial aggregation is possible.
3953 */
3954 if (can_partial_agg(root))
3955 flags |= GROUPING_CAN_PARTIAL_AGG;
3956
3957 extra.flags = flags;
3958 extra.target_parallel_safe = target_parallel_safe;
3959 extra.havingQual = parse->havingQual;
3960 extra.targetList = parse->targetList;
3961 extra.partial_costs_set = false;
3962
3963 /*
3964 * Determine whether partitionwise aggregation is in theory possible.
3965 * It can be disabled by the user, and for now, we don't try to
3966 * support grouping sets. create_ordinary_grouping_paths() will check
3967 * additional conditions, such as whether input_rel is partitioned.
3968 */
3969 if (enable_partitionwise_aggregate && !parse->groupingSets)
3971 else
3973
3975 &agg_costs, gd, &extra,
3977 }
3978
3979 set_cheapest(grouped_rel);
3980 return grouped_rel;
3981}
3982
3983/*
3984 * make_grouping_rel
3985 *
3986 * Create a new grouping rel and set basic properties.
3987 *
3988 * input_rel represents the underlying scan/join relation.
3989 * target is the output expected from the grouping relation.
3990 */
3991static RelOptInfo *
3993 PathTarget *target, bool target_parallel_safe,
3994 Node *havingQual)
3995{
3996 RelOptInfo *grouped_rel;
3997
3999 {
4001 input_rel->relids);
4002 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
4003 }
4004 else
4005 {
4006 /*
4007 * By tradition, the relids set for the main grouping relation is
4008 * NULL. (This could be changed, but might require adjustments
4009 * elsewhere.)
4010 */
4012 }
4013
4014 /* Set target. */
4015 grouped_rel->reltarget = target;
4016
4017 /*
4018 * If the input relation is not parallel-safe, then the grouped relation
4019 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4020 * target list and HAVING quals are parallel-safe.
4021 */
4022 if (input_rel->consider_parallel && target_parallel_safe &&
4023 is_parallel_safe(root, havingQual))
4024 grouped_rel->consider_parallel = true;
4025
4026 /* Assume that the same path generation strategies are allowed */
4027 grouped_rel->pgs_mask = input_rel->pgs_mask;
4028
4029 /*
4030 * If the input rel belongs to a single FDW, so does the grouped rel.
4031 */
4032 grouped_rel->serverid = input_rel->serverid;
4033 grouped_rel->userid = input_rel->userid;
4034 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4035 grouped_rel->fdwroutine = input_rel->fdwroutine;
4036
4037 return grouped_rel;
4038}
4039
4040/*
4041 * is_degenerate_grouping
4042 *
4043 * A degenerate grouping is one in which the query has a HAVING qual and/or
4044 * grouping sets, but no aggregates and no GROUP BY (which implies that the
4045 * grouping sets are all empty).
4046 */
4047static bool
4049{
4050 Query *parse = root->parse;
4051
4052 return (root->hasHavingQual || parse->groupingSets) &&
4053 !parse->hasAggs && parse->groupClause == NIL;
4054}
4055
4056/*
4057 * create_degenerate_grouping_paths
4058 *
4059 * When the grouping is degenerate (see is_degenerate_grouping), we are
4060 * supposed to emit either zero or one row for each grouping set depending on
4061 * whether HAVING succeeds. Furthermore, there cannot be any variables in
4062 * either HAVING or the targetlist, so we actually do not need the FROM table
4063 * at all! We can just throw away the plan-so-far and generate a Result node.
4064 * This is a sufficiently unusual corner case that it's not worth contorting
4065 * the structure of this module to avoid having to generate the earlier paths
4066 * in the first place.
4067 */
4068static void
4070 RelOptInfo *grouped_rel)
4071{
4072 Query *parse = root->parse;
4073 int nrows;
4074 Path *path;
4075
4076 nrows = list_length(parse->groupingSets);
4077 if (nrows > 1)
4078 {
4079 /*
4080 * Doesn't seem worthwhile writing code to cons up a generate_series
4081 * or a values scan to emit multiple rows. Instead just make N clones
4082 * and append them. (With a volatile HAVING clause, this means you
4083 * might get between 0 and N output rows. Offhand I think that's
4084 * desired.)
4085 */
4086 AppendPathInput append = {0};
4087
4088 while (--nrows >= 0)
4089 {
4090 path = (Path *)
4091 create_group_result_path(root, grouped_rel,
4092 grouped_rel->reltarget,
4093 (List *) parse->havingQual);
4094 append.subpaths = lappend(append.subpaths, path);
4095 }
4096 path = (Path *)
4098 grouped_rel,
4099 append,
4100 NIL,
4101 NULL,
4102 0,
4103 false,
4104 -1);
4105 }
4106 else
4107 {
4108 /* No grouping sets, or just one, so one output row */
4109 path = (Path *)
4110 create_group_result_path(root, grouped_rel,
4111 grouped_rel->reltarget,
4112 (List *) parse->havingQual);
4113 }
4114
4115 add_path(grouped_rel, path);
4116}
4117
4118/*
4119 * create_ordinary_grouping_paths
4120 *
4121 * Create grouping paths for the ordinary (that is, non-degenerate) case.
4122 *
4123 * We need to consider sorted and hashed aggregation in the same function,
4124 * because otherwise (1) it would be harder to throw an appropriate error
4125 * message if neither way works, and (2) we should not allow hashtable size
4126 * considerations to dissuade us from using hashing if sorting is not possible.
4127 *
4128 * *partially_grouped_rel_p will be set to the partially grouped rel which this
4129 * function creates, or to NULL if it doesn't create one.
4130 */
4131static void
4133 RelOptInfo *grouped_rel,
4136 GroupPathExtraData *extra,
4138{
4141
4142 /*
4143 * If this is the topmost grouping relation or if the parent relation is
4144 * doing some form of partitionwise aggregation, then we may be able to do
4145 * it at this level also. However, if the input relation is not
4146 * partitioned, partitionwise aggregate is impossible.
4147 */
4148 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4150 {
4151 /*
4152 * If this is the topmost relation or if the parent relation is doing
4153 * full partitionwise aggregation, then we can do full partitionwise
4154 * aggregation provided that the GROUP BY clause contains all of the
4155 * partitioning columns at this level and the collation used by GROUP
4156 * BY matches the partitioning collation. Otherwise, we can do at
4157 * most partial partitionwise aggregation. But if partial aggregation
4158 * is not supported in general then we can't use it for partitionwise
4159 * aggregation either.
4160 *
4161 * Check parse->groupClause not processed_groupClause, because it's
4162 * okay if some of the partitioning columns were proved redundant.
4163 */
4164 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4166 root->parse->groupClause))
4168 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4170 else
4172 }
4173
4174 /*
4175 * Before generating paths for grouped_rel, we first generate any possible
4176 * partially grouped paths; that way, later code can easily consider both
4177 * parallel and non-parallel approaches to grouping.
4178 */
4179 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4180 {
4181 bool force_rel_creation;
4182
4183 /*
4184 * If we're doing partitionwise aggregation at this level, force
4185 * creation of a partially_grouped_rel so we can add partitionwise
4186 * paths to it.
4187 */
4189
4192 grouped_rel,
4193 input_rel,
4194 gd,
4195 extra,
4197 }
4198
4199 /* Set out parameter. */
4201
4202 /* Apply partitionwise aggregation technique, if possible. */
4203 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4206 gd, patype, extra);
4207
4208 /* If we are doing partial aggregation only, return. */
4210 {
4212
4213 if (partially_grouped_rel->pathlist)
4215
4216 return;
4217 }
4218
4219 /* Gather any partially grouped partial paths. */
4220 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4222
4223 /* Now choose the best path(s) for partially_grouped_rel. */
4226
4227 /* Build final grouping paths */
4230 extra);
4231
4232 /* Give a helpful error if we failed to find any implementation */
4233 if (grouped_rel->pathlist == NIL)
4234 ereport(ERROR,
4236 errmsg("could not implement GROUP BY"),
4237 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4238
4239 /*
4240 * If there is an FDW that's responsible for all baserels of the query,
4241 * let it consider adding ForeignPaths.
4242 */
4243 if (grouped_rel->fdwroutine &&
4244 grouped_rel->fdwroutine->GetForeignUpperPaths)
4245 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4246 input_rel, grouped_rel,
4247 extra);
4248
4249 /* Let extensions possibly add some more paths */
4251 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4252 input_rel, grouped_rel,
4253 extra);
4254}
4255
4256/*
4257 * For a given input path, consider the possible ways of doing grouping sets on
4258 * it, by combinations of hashing and sorting. This can be called multiple
4259 * times, so it's important that it not scribble on input. No result is
4260 * returned, but any generated paths are added to grouped_rel.
4261 */
4262static void
4264 RelOptInfo *grouped_rel,
4265 Path *path,
4266 bool is_sorted,
4267 bool can_hash,
4270 double dNumGroups)
4271{
4272 Query *parse = root->parse;
4273 Size hash_mem_limit = get_hash_memory_limit();
4274
4275 /*
4276 * If we're not being offered sorted input, then only consider plans that
4277 * can be done entirely by hashing.
4278 *
4279 * We can hash everything if it looks like it'll fit in hash_mem. But if
4280 * the input is actually sorted despite not being advertised as such, we
4281 * prefer to make use of that in order to use less memory.
4282 *
4283 * If none of the grouping sets are sortable, then ignore the hash_mem
4284 * limit and generate a path anyway, since otherwise we'll just fail.
4285 */
4286 if (!is_sorted)
4287 {
4288 List *new_rollups = NIL;
4290 List *sets_data;
4292 List *empty_sets = NIL;
4293 ListCell *lc;
4294 ListCell *l_start = list_head(gd->rollups);
4296 double hashsize;
4297 double exclude_groups = 0.0;
4298
4300
4301 /*
4302 * If the input is coincidentally sorted usefully (which can happen
4303 * even if is_sorted is false, since that only means that our caller
4304 * has set up the sorting for us), then save some hashtable space by
4305 * making use of that. But we need to watch out for degenerate cases:
4306 *
4307 * 1) If there are any empty grouping sets, then group_pathkeys might
4308 * be NIL if all non-empty grouping sets are unsortable. In this case,
4309 * there will be a rollup containing only empty groups, and the
4310 * pathkeys_contained_in test is vacuously true; this is ok.
4311 *
4312 * XXX: the above relies on the fact that group_pathkeys is generated
4313 * from the first rollup. If we add the ability to consider multiple
4314 * sort orders for grouping input, this assumption might fail.
4315 *
4316 * 2) If there are no empty sets and only unsortable sets, then the
4317 * rollups list will be empty (and thus l_start == NULL), and
4318 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4319 * pathkeys_contained_in test doesn't cause us to crash.
4320 */
4321 if (l_start != NULL &&
4322 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4323 {
4325 exclude_groups = unhashed_rollup->numGroups;
4326 l_start = lnext(gd->rollups, l_start);
4327 }
4328
4330 path,
4331 agg_costs,
4333
4334 /*
4335 * gd->rollups is empty if we have only unsortable columns to work
4336 * with. Override hash_mem in that case; otherwise, we'll rely on the
4337 * sorted-input case to generate usable mixed paths.
4338 */
4339 if (hashsize > hash_mem_limit && gd->rollups)
4340 return; /* nope, won't fit */
4341
4342 /*
4343 * We need to burst the existing rollups list into individual grouping
4344 * sets and recompute a groupClause for each set.
4345 */
4346 sets_data = list_copy(gd->unsortable_sets);
4347
4348 for_each_cell(lc, gd->rollups, l_start)
4349 {
4351
4352 /*
4353 * If we find an unhashable rollup that's not been skipped by the
4354 * "actually sorted" check above, we can't cope; we'd need sorted
4355 * input (with a different sort order) but we can't get that here.
4356 * So bail out; we'll get a valid path from the is_sorted case
4357 * instead.
4358 *
4359 * The mere presence of empty grouping sets doesn't make a rollup
4360 * unhashable (see preprocess_grouping_sets), we handle those
4361 * specially below.
4362 */
4363 if (!rollup->hashable)
4364 return;
4365
4366 sets_data = list_concat(sets_data, rollup->gsets_data);
4367 }
4368 foreach(lc, sets_data)
4369 {
4371 List *gset = gs->set;
4373
4374 if (gset == NIL)
4375 {
4376 /* Empty grouping sets can't be hashed. */
4379 }
4380 else
4381 {
4383
4384 rollup->groupClause = preprocess_groupclause(root, gset);
4385 rollup->gsets_data = list_make1(gs);
4386 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4387 rollup->gsets_data,
4388 gd->tleref_to_colnum_map);
4389 rollup->numGroups = gs->numGroups;
4390 rollup->hashable = true;
4391 rollup->is_hashed = true;
4393 }
4394 }
4395
4396 /*
4397 * If we didn't find anything nonempty to hash, then bail. We'll
4398 * generate a path from the is_sorted case.
4399 */
4400 if (new_rollups == NIL)
4401 return;
4402
4403 /*
4404 * If there were empty grouping sets they should have been in the
4405 * first rollup.
4406 */
4408
4409 if (unhashed_rollup)
4410 {
4412 strat = AGG_MIXED;
4413 }
4414 else if (empty_sets)
4415 {
4417
4418 rollup->groupClause = NIL;
4419 rollup->gsets_data = empty_sets_data;
4420 rollup->gsets = empty_sets;
4421 rollup->numGroups = list_length(empty_sets);
4422 rollup->hashable = false;
4423 rollup->is_hashed = false;
4425 strat = AGG_MIXED;
4426 }
4427
4428 add_path(grouped_rel, (Path *)
4430 grouped_rel,
4431 path,
4432 (List *) parse->havingQual,
4433 strat,
4435 agg_costs));
4436 return;
4437 }
4438
4439 /*
4440 * If we have sorted input but nothing we can do with it, bail.
4441 */
4442 if (gd->rollups == NIL)
4443 return;
4444
4445 /*
4446 * Given sorted input, we try and make two paths: one sorted and one mixed
4447 * sort/hash. (We need to try both because hashagg might be disabled, or
4448 * some columns might not be sortable.)
4449 *
4450 * can_hash is passed in as false if some obstacle elsewhere (such as
4451 * ordered aggs) means that we shouldn't consider hashing at all.
4452 */
4453 if (can_hash && gd->any_hashable)
4454 {
4455 List *rollups = NIL;
4456 List *hash_sets = list_copy(gd->unsortable_sets);
4457 double availspace = hash_mem_limit;
4458 ListCell *lc;
4459
4460 /*
4461 * Account first for space needed for groups we can't sort at all.
4462 */
4464 path,
4465 agg_costs,
4466 gd->dNumHashGroups);
4467
4468 if (availspace > 0 && list_length(gd->rollups) > 1)
4469 {
4470 double scale;
4471 int num_rollups = list_length(gd->rollups);
4472 int k_capacity;
4473 int *k_weights = palloc(num_rollups * sizeof(int));
4475 int i;
4476
4477 /*
4478 * We treat this as a knapsack problem: the knapsack capacity
4479 * represents hash_mem, the item weights are the estimated memory
4480 * usage of the hashtables needed to implement a single rollup,
4481 * and we really ought to use the cost saving as the item value;
4482 * however, currently the costs assigned to sort nodes don't
4483 * reflect the comparison costs well, and so we treat all items as
4484 * of equal value (each rollup we hash instead saves us one sort).
4485 *
4486 * To use the discrete knapsack, we need to scale the values to a
4487 * reasonably small bounded range. We choose to allow a 5% error
4488 * margin; we have no more than 4096 rollups in the worst possible
4489 * case, which with a 5% error margin will require a bit over 42MB
4490 * of workspace. (Anyone wanting to plan queries that complex had
4491 * better have the memory for it. In more reasonable cases, with
4492 * no more than a couple of dozen rollups, the memory usage will
4493 * be negligible.)
4494 *
4495 * k_capacity is naturally bounded, but we clamp the values for
4496 * scale and weight (below) to avoid overflows or underflows (or
4497 * uselessly trying to use a scale factor less than 1 byte).
4498 */
4499 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4501
4502 /*
4503 * We leave the first rollup out of consideration since it's the
4504 * one that matches the input sort order. We assign indexes "i"
4505 * to only those entries considered for hashing; the second loop,
4506 * below, must use the same condition.
4507 */
4508 i = 0;
4509 for_each_from(lc, gd->rollups, 1)
4510 {
4512
4513 if (rollup->hashable)
4514 {
4516 path,
4517 agg_costs,
4518 rollup->numGroups);
4519
4520 /*
4521 * If sz is enormous, but hash_mem (and hence scale) is
4522 * small, avoid integer overflow here.
4523 */
4524 k_weights[i] = (int) Min(floor(sz / scale),
4525 k_capacity + 1.0);
4526 ++i;
4527 }
4528 }
4529
4530 /*
4531 * Apply knapsack algorithm; compute the set of items which
4532 * maximizes the value stored (in this case the number of sorts
4533 * saved) while keeping the total size (approximately) within
4534 * capacity.
4535 */
4536 if (i > 0)
4538
4540 {
4541 rollups = list_make1(linitial(gd->rollups));
4542
4543 i = 0;
4544 for_each_from(lc, gd->rollups, 1)
4545 {
4547
4548 if (rollup->hashable)
4549 {
4552 rollup->gsets_data);
4553 else
4554 rollups = lappend(rollups, rollup);
4555 ++i;
4556 }
4557 else
4558 rollups = lappend(rollups, rollup);
4559 }
4560 }
4561 }
4562
4563 if (!rollups && hash_sets)
4564 rollups = list_copy(gd->rollups);
4565
4566 foreach(lc, hash_sets)
4567 {
4570
4571 Assert(gs->set != NIL);
4572
4573 rollup->groupClause = preprocess_groupclause(root, gs->set);
4574 rollup->gsets_data = list_make1(gs);
4575 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4576 rollup->gsets_data,
4577 gd->tleref_to_colnum_map);
4578 rollup->numGroups = gs->numGroups;
4579 rollup->hashable = true;
4580 rollup->is_hashed = true;
4581 rollups = lcons(rollup, rollups);
4582 }
4583
4584 if (rollups)
4585 {
4586 add_path(grouped_rel, (Path *)
4588 grouped_rel,
4589 path,
4590 (List *) parse->havingQual,
4591 AGG_MIXED,
4592 rollups,
4593 agg_costs));
4594 }
4595 }
4596
4597 /*
4598 * Now try the simple sorted case.
4599 */
4600 if (!gd->unsortable_sets)
4601 add_path(grouped_rel, (Path *)
4603 grouped_rel,
4604 path,
4605 (List *) parse->havingQual,
4606 AGG_SORTED,
4607 gd->rollups,
4608 agg_costs));
4609}
4610
4611/*
4612 * create_window_paths
4613 *
4614 * Build a new upperrel containing Paths for window-function evaluation.
4615 *
4616 * input_rel: contains the source-data Paths
4617 * input_target: result of make_window_input_target
4618 * output_target: what the topmost WindowAggPath should return
4619 * wflists: result of find_window_functions
4620 * activeWindows: result of select_active_windows
4621 *
4622 * Note: all Paths in input_rel are expected to return input_target.
4623 */
4624static RelOptInfo *
4631 List *activeWindows)
4632{
4634 ListCell *lc;
4635
4636 /* For now, do all work in the (WINDOW, NULL) upperrel */
4638
4639 /*
4640 * If the input relation is not parallel-safe, then the window relation
4641 * can't be parallel-safe, either. Otherwise, we need to examine the
4642 * target list and active windows for non-parallel-safe constructs.
4643 */
4644 if (input_rel->consider_parallel && output_target_parallel_safe &&
4645 is_parallel_safe(root, (Node *) activeWindows))
4646 window_rel->consider_parallel = true;
4647
4648 /*
4649 * If the input rel belongs to a single FDW, so does the window rel.
4650 */
4651 window_rel->serverid = input_rel->serverid;
4652 window_rel->userid = input_rel->userid;
4653 window_rel->useridiscurrent = input_rel->useridiscurrent;
4654 window_rel->fdwroutine = input_rel->fdwroutine;
4655
4656 /*
4657 * Consider computing window functions starting from the existing
4658 * cheapest-total path (which will likely require a sort) as well as any
4659 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4660 */
4661 foreach(lc, input_rel->pathlist)
4662 {
4663 Path *path = (Path *) lfirst(lc);
4664 int presorted_keys;
4665
4666 if (path == input_rel->cheapest_total_path ||
4667 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4668 &presorted_keys) ||
4669 presorted_keys > 0)
4671 window_rel,
4672 path,
4675 wflists,
4676 activeWindows);
4677 }
4678
4679 /*
4680 * If there is an FDW that's responsible for all baserels of the query,
4681 * let it consider adding ForeignPaths.
4682 */
4683 if (window_rel->fdwroutine &&
4684 window_rel->fdwroutine->GetForeignUpperPaths)
4685 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4687 NULL);
4688
4689 /* Let extensions possibly add some more paths */
4691 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4693
4694 /* Now choose the best path(s) */
4696
4697 return window_rel;
4698}
4699
4700/*
4701 * Stack window-function implementation steps atop the given Path, and
4702 * add the result to window_rel.
4703 *
4704 * window_rel: upperrel to contain result
4705 * path: input Path to use (must return input_target)
4706 * input_target: result of make_window_input_target
4707 * output_target: what the topmost WindowAggPath should return
4708 * wflists: result of find_window_functions
4709 * activeWindows: result of select_active_windows
4710 */
4711static void
4714 Path *path,
4718 List *activeWindows)
4719{
4721 ListCell *l;
4722 List *topqual = NIL;
4723
4724 /*
4725 * Since each window clause could require a different sort order, we stack
4726 * up a WindowAgg node for each clause, with sort steps between them as
4727 * needed. (We assume that select_active_windows chose a good order for
4728 * executing the clauses in.)
4729 *
4730 * input_target should contain all Vars and Aggs needed for the result.
4731 * (In some cases we wouldn't need to propagate all of these all the way
4732 * to the top, since they might only be needed as inputs to WindowFuncs.
4733 * It's probably not worth trying to optimize that though.) It must also
4734 * contain all window partitioning and sorting expressions, to ensure
4735 * they're computed only once at the bottom of the stack (that's critical
4736 * for volatile functions). As we climb up the stack, we'll add outputs
4737 * for the WindowFuncs computed at each level.
4738 */
4740
4741 foreach(l, activeWindows)
4742 {
4744 List *window_pathkeys;
4745 List *runcondition = NIL;
4746 int presorted_keys;
4747 bool is_sorted;
4748 bool topwindow;
4749 ListCell *lc2;
4750
4751 window_pathkeys = make_pathkeys_for_window(root,
4752 wc,
4753 root->processed_tlist);
4754
4755 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4756 path->pathkeys,
4757 &presorted_keys);
4758
4759 /* Sort if necessary */
4760 if (!is_sorted)
4761 {
4762 /*
4763 * No presorted keys or incremental sort disabled, just perform a
4764 * complete sort.
4765 */
4766 if (presorted_keys == 0 || !enable_incremental_sort)
4768 path,
4769 window_pathkeys,
4770 -1.0);
4771 else
4772 {
4773 /*
4774 * Since we have presorted keys and incremental sort is
4775 * enabled, just use incremental sort.
4776 */
4778 window_rel,
4779 path,
4780 window_pathkeys,
4781 presorted_keys,
4782 -1.0);
4783 }
4784 }
4785
4786 if (lnext(activeWindows, l))
4787 {
4788 /*
4789 * Add the current WindowFuncs to the output target for this
4790 * intermediate WindowAggPath. We must copy window_target to
4791 * avoid changing the previous path's target.
4792 *
4793 * Note: a WindowFunc adds nothing to the target's eval costs; but
4794 * we do need to account for the increase in tlist width.
4795 */
4797
4799 foreach(lc2, wflists->windowFuncs[wc->winref])
4800 {
4802
4804 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4805 }
4807 }
4808 else
4809 {
4810 /* Install the goal target in the topmost WindowAgg */
4812 }
4813
4814 /* mark the final item in the list as the top-level window */
4815 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4816
4817 /*
4818 * Collect the WindowFuncRunConditions from each WindowFunc and
4819 * convert them into OpExprs
4820 */
4821 foreach(lc2, wflists->windowFuncs[wc->winref])
4822 {
4823 ListCell *lc3;
4825
4826 foreach(lc3, wfunc->runCondition)
4827 {
4830 Expr *opexpr;
4831 Expr *leftop;
4832 Expr *rightop;
4833
4834 if (wfuncrc->wfunc_left)
4835 {
4836 leftop = (Expr *) copyObject(wfunc);
4837 rightop = copyObject(wfuncrc->arg);
4838 }
4839 else
4840 {
4841 leftop = copyObject(wfuncrc->arg);
4842 rightop = (Expr *) copyObject(wfunc);
4843 }
4844
4845 opexpr = make_opclause(wfuncrc->opno,
4846 BOOLOID,
4847 false,
4848 leftop,
4849 rightop,
4850 InvalidOid,
4851 wfuncrc->inputcollid);
4852
4853 runcondition = lappend(runcondition, opexpr);
4854
4855 if (!topwindow)
4856 topqual = lappend(topqual, opexpr);
4857 }
4858 }
4859
4860 path = (Path *)
4862 wflists->windowFuncs[wc->winref],
4863 runcondition, wc,
4864 topwindow ? topqual : NIL, topwindow);
4865 }
4866
4867 add_path(window_rel, path);
4868}
4869
4870/*
4871 * create_distinct_paths
4872 *
4873 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4874 *
4875 * input_rel: contains the source-data Paths
4876 * target: the pathtarget for the result Paths to compute
4877 *
4878 * Note: input paths should already compute the desired pathtarget, since
4879 * Sort/Unique won't project anything.
4880 */
4881static RelOptInfo *
4883 PathTarget *target)
4884{
4886
4887 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4889
4890 /*
4891 * We don't compute anything at this level, so distinct_rel will be
4892 * parallel-safe if the input rel is parallel-safe. In particular, if
4893 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4894 * output those expressions, and will not be parallel-safe unless those
4895 * expressions are parallel-safe.
4896 */
4897 distinct_rel->consider_parallel = input_rel->consider_parallel;
4898
4899 /*
4900 * If the input rel belongs to a single FDW, so does the distinct_rel.
4901 */
4902 distinct_rel->serverid = input_rel->serverid;
4903 distinct_rel->userid = input_rel->userid;
4904 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4905 distinct_rel->fdwroutine = input_rel->fdwroutine;
4906
4907 /* build distinct paths based on input_rel's pathlist */
4909
4910 /* now build distinct paths based on input_rel's partial_pathlist */
4912
4913 /* Give a helpful error if we failed to create any paths */
4914 if (distinct_rel->pathlist == NIL)
4915 ereport(ERROR,
4917 errmsg("could not implement DISTINCT"),
4918 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4919
4920 /*
4921 * If there is an FDW that's responsible for all baserels of the query,
4922 * let it consider adding ForeignPaths.
4923 */
4924 if (distinct_rel->fdwroutine &&
4925 distinct_rel->fdwroutine->GetForeignUpperPaths)
4926 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4928 input_rel,
4930 NULL);
4931
4932 /* Let extensions possibly add some more paths */
4934 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4936
4937 /* Now choose the best path(s) */
4939
4940 return distinct_rel;
4941}
4942
4943/*
4944 * create_partial_distinct_paths
4945 *
4946 * Process 'input_rel' partial paths and add unique/aggregate paths to the
4947 * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4948 * paths on top and add a final unique/aggregate path to remove any duplicate
4949 * produced from combining rows from parallel workers.
4950 */
4951static void
4954 PathTarget *target)
4955{
4957 Query *parse;
4959 double numDistinctRows;
4961 ListCell *lc;
4962
4963 /* nothing to do when there are no partial paths in the input rel */
4964 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4965 return;
4966
4967 parse = root->parse;
4968
4969 /* can't do parallel DISTINCT ON */
4970 if (parse->hasDistinctOn)
4971 return;
4972
4974 NULL);
4975 partial_distinct_rel->reltarget = target;
4976 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4977
4978 /*
4979 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4980 */
4981 partial_distinct_rel->serverid = input_rel->serverid;
4982 partial_distinct_rel->userid = input_rel->userid;
4983 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4984 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4985
4986 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4987
4988 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4989 parse->targetList);
4990
4991 /* estimate how many distinct rows we'll get from each worker */
4994 NULL, NULL);
4995
4996 /*
4997 * Try sorting the cheapest path and incrementally sorting any paths with
4998 * presorted keys and put a unique paths atop of those. We'll also
4999 * attempt to reorder the required pathkeys to match the input path's
5000 * pathkeys as much as possible, in hopes of avoiding a possible need to
5001 * re-sort.
5002 */
5003 if (grouping_is_sortable(root->processed_distinctClause))
5004 {
5005 foreach(lc, input_rel->partial_pathlist)
5006 {
5007 Path *input_path = (Path *) lfirst(lc);
5010
5013 root->distinct_pathkeys,
5014 input_path->pathkeys);
5016
5018 {
5021 input_path,
5024 -1.0);
5025
5026 if (sorted_path == NULL)
5027 continue;
5028
5029 /*
5030 * An empty distinct_pathkeys means all tuples have the same
5031 * value for the DISTINCT clause. See
5032 * create_final_distinct_paths()
5033 */
5034 if (root->distinct_pathkeys == NIL)
5035 {
5036 Node *limitCount;
5037
5038 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5039 sizeof(int64),
5040 Int64GetDatum(1), false,
5041 true);
5042
5043 /*
5044 * Apply a LimitPath onto the partial path to restrict the
5045 * tuples from each worker to 1.
5046 * create_final_distinct_paths will need to apply an
5047 * additional LimitPath to restrict this to a single row
5048 * after the Gather node. If the query already has a
5049 * LIMIT clause, then we could end up with three Limit
5050 * nodes in the final plan. Consolidating the top two of
5051 * these could be done, but does not seem worth troubling
5052 * over.
5053 */
5057 NULL,
5058 limitCount,
5060 0, 1));
5061 }
5062 else
5063 {
5067 list_length(root->distinct_pathkeys),
5069 }
5070 }
5071 }
5072 }
5073
5074 /*
5075 * Now try hash aggregate paths, if enabled and hashing is possible. Since
5076 * we're not on the hook to ensure we do our best to create at least one
5077 * path here, we treat enable_hashagg as a hard off-switch rather than the
5078 * slightly softer variant in create_final_distinct_paths.
5079 */
5080 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5081 {
5086 cheapest_partial_path->pathtarget,
5087 AGG_HASHED,
5089 root->processed_distinctClause,
5090 NIL,
5091 NULL,
5093 }
5094
5095 /*
5096 * If there is an FDW that's responsible for all baserels of the query,
5097 * let it consider adding ForeignPaths.
5098 */
5099 if (partial_distinct_rel->fdwroutine &&
5100 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5101 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5103 input_rel,
5105 NULL);
5106
5107 /* Let extensions possibly add some more partial paths */
5109 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5111
5112 if (partial_distinct_rel->partial_pathlist != NIL)
5113 {
5116
5117 /*
5118 * Finally, create paths to distinctify the final result. This step
5119 * is needed to remove any duplicates due to combining rows from
5120 * parallel workers.
5121 */
5124 }
5125}
5126
5127/*
5128 * create_final_distinct_paths
5129 * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5130 *
5131 * input_rel: contains the source-data paths
5132 * distinct_rel: destination relation for storing created paths
5133 */
5134static RelOptInfo *
5137{
5138 Query *parse = root->parse;
5139 Path *cheapest_input_path = input_rel->cheapest_total_path;
5140 double numDistinctRows;
5141 bool allow_hash;
5142
5143 /* Estimate number of distinct rows there will be */
5144 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5145 root->hasHavingQual)
5146 {
5147 /*
5148 * If there was grouping or aggregation, use the number of input rows
5149 * as the estimated number of DISTINCT rows (ie, assume the input is
5150 * already mostly unique).
5151 */
5153 }
5154 else
5155 {
5156 /*
5157 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5158 */
5160
5161 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5162 parse->targetList);
5164 cheapest_input_path->rows,
5165 NULL, NULL);
5166 }
5167
5168 /*
5169 * Consider sort-based implementations of DISTINCT, if possible.
5170 */
5171 if (grouping_is_sortable(root->processed_distinctClause))
5172 {
5173 /*
5174 * Firstly, if we have any adequately-presorted paths, just stick a
5175 * Unique node on those. We also, consider doing an explicit sort of
5176 * the cheapest input path and Unique'ing that. If any paths have
5177 * presorted keys then we'll create an incremental sort atop of those
5178 * before adding a unique node on the top. We'll also attempt to
5179 * reorder the required pathkeys to match the input path's pathkeys as
5180 * much as possible, in hopes of avoiding a possible need to re-sort.
5181 *
5182 * When we have DISTINCT ON, we must sort by the more rigorous of
5183 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5184 * Also, if we do have to do an explicit sort, we might as well use
5185 * the more rigorous ordering to avoid a second sort later. (Note
5186 * that the parser will have ensured that one clause is a prefix of
5187 * the other.)
5188 */
5190 ListCell *lc;
5191 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5192
5193 if (parse->hasDistinctOn &&
5194 list_length(root->distinct_pathkeys) <
5195 list_length(root->sort_pathkeys))
5196 needed_pathkeys = root->sort_pathkeys;
5197 else
5198 needed_pathkeys = root->distinct_pathkeys;
5199
5200 foreach(lc, input_rel->pathlist)
5201 {
5202 Path *input_path = (Path *) lfirst(lc);
5205
5209 input_path->pathkeys);
5211
5213 {
5216 input_path,
5219 limittuples);
5220
5221 if (sorted_path == NULL)
5222 continue;
5223
5224 /*
5225 * distinct_pathkeys may have become empty if all of the
5226 * pathkeys were determined to be redundant. If all of the
5227 * pathkeys are redundant then each DISTINCT target must only
5228 * allow a single value, therefore all resulting tuples must
5229 * be identical (or at least indistinguishable by an equality
5230 * check). We can uniquify these tuples simply by just taking
5231 * the first tuple. All we do here is add a path to do "LIMIT
5232 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5233 * still have a non-NIL sort_pathkeys list, so we must still
5234 * only do this with paths which are correctly sorted by
5235 * sort_pathkeys.
5236 */
5237 if (root->distinct_pathkeys == NIL)
5238 {
5239 Node *limitCount;
5240
5241 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5242 sizeof(int64),
5243 Int64GetDatum(1), false,
5244 true);
5245
5246 /*
5247 * If the query already has a LIMIT clause, then we could
5248 * end up with a duplicate LimitPath in the final plan.
5249 * That does not seem worth troubling over too much.
5250 */
5253 NULL, limitCount,
5254 LIMIT_OPTION_COUNT, 0, 1));
5255 }
5256 else
5257 {
5261 list_length(root->distinct_pathkeys),
5263 }
5264 }
5265 }
5266 }
5267
5268 /*
5269 * Consider hash-based implementations of DISTINCT, if possible.
5270 *
5271 * If we were not able to make any other types of path, we *must* hash or
5272 * die trying. If we do have other choices, there are two things that
5273 * should prevent selection of hashing: if the query uses DISTINCT ON
5274 * (because it won't really have the expected behavior if we hash), or if
5275 * enable_hashagg is off.
5276 *
5277 * Note: grouping_is_hashable() is much more expensive to check than the
5278 * other gating conditions, so we want to do it last.
5279 */
5280 if (distinct_rel->pathlist == NIL)
5281 allow_hash = true; /* we have no alternatives */
5282 else if (parse->hasDistinctOn || !enable_hashagg)
5283 allow_hash = false; /* policy-based decision not to hash */
5284 else
5285 allow_hash = true; /* default */
5286
5287 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5288 {
5289 /* Generate hashed aggregate path --- no sort needed */
5294 cheapest_input_path->pathtarget,
5295 AGG_HASHED,
5297 root->processed_distinctClause,
5298 NIL,
5299 NULL,
5301 }
5302
5303 return distinct_rel;
5304}
5305
5306/*
5307 * get_useful_pathkeys_for_distinct
5308 * Get useful orderings of pathkeys for distinctClause by reordering
5309 * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5310 *
5311 * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5312 * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5313 */
5314static List *
5317{
5320
5321 /* always include the given 'needed_pathkeys' */
5324
5326 return useful_pathkeys_list;
5327
5328 /*
5329 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5330 * that match 'needed_pathkeys', but only up to the longest matching
5331 * prefix.
5332 *
5333 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5334 * list matches initial distinctClause pathkeys; otherwise, it won't have
5335 * the desired behavior.
5336 */
5338 {
5339 /*
5340 * The PathKey nodes are canonical, so they can be checked for
5341 * equality by simple pointer comparison.
5342 */
5344 break;
5345 if (root->parse->hasDistinctOn &&
5346 !list_member_ptr(root->distinct_pathkeys, pathkey))
5347 break;
5348
5350 }
5351
5352 /* If no match at all, no point in reordering needed_pathkeys */
5353 if (useful_pathkeys == NIL)
5354 return useful_pathkeys_list;
5355
5356 /*
5357 * If not full match, the resulting pathkey list is not useful without
5358 * incremental sort.
5359 */
5362 return useful_pathkeys_list;
5363
5364 /* Append the remaining PathKey nodes in needed_pathkeys */
5367
5368 /*
5369 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5370 * just drop it.
5371 */
5374 return useful_pathkeys_list;
5375
5378
5379 return useful_pathkeys_list;
5380}
5381
5382/*
5383 * create_ordered_paths
5384 *
5385 * Build a new upperrel containing Paths for ORDER BY evaluation.
5386 *
5387 * All paths in the result must satisfy the ORDER BY ordering.
5388 * The only new paths we need consider are an explicit full sort
5389 * and incremental sort on the cheapest-total existing path.
5390 *
5391 * input_rel: contains the source-data Paths
5392 * target: the output tlist the result Paths must emit
5393 * limit_tuples: estimated bound on the number of output tuples,
5394 * or -1 if no LIMIT or couldn't estimate
5395 *
5396 * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5397 * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5398 */
5399static RelOptInfo *
5402 PathTarget *target,
5403 bool target_parallel_safe,
5404 double limit_tuples)
5405{
5406 Path *cheapest_input_path = input_rel->cheapest_total_path;
5408 ListCell *lc;
5409
5410 /* For now, do all work in the (ORDERED, NULL) upperrel */
5412
5413 /*
5414 * If the input relation is not parallel-safe, then the ordered relation
5415 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5416 * target list is parallel-safe.
5417 */
5418 if (input_rel->consider_parallel && target_parallel_safe)
5419 ordered_rel->consider_parallel = true;
5420
5421 /* Assume that the same path generation strategies are allowed. */
5422 ordered_rel->pgs_mask = input_rel->pgs_mask;
5423
5424 /*
5425 * If the input rel belongs to a single FDW, so does the ordered_rel.
5426 */
5427 ordered_rel->serverid = input_rel->serverid;
5428 ordered_rel->userid = input_rel->userid;
5429 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5430 ordered_rel->fdwroutine = input_rel->fdwroutine;
5431
5432 foreach(lc, input_rel->pathlist)
5433 {
5434 Path *input_path = (Path *) lfirst(lc);
5436 bool is_sorted;
5437 int presorted_keys;
5438
5440 input_path->pathkeys, &presorted_keys);
5441
5442 if (is_sorted)
5444 else
5445 {
5446 /*
5447 * Try at least sorting the cheapest path and also try
5448 * incrementally sorting any path which is partially sorted
5449 * already (no need to deal with paths which have presorted keys
5450 * when incremental sort is disabled unless it's the cheapest
5451 * input path).
5452 */
5454 (presorted_keys == 0 || !enable_incremental_sort))
5455 continue;
5456
5457 /*
5458 * We've no need to consider both a sort and incremental sort.
5459 * We'll just do a sort if there are no presorted keys and an
5460 * incremental sort when there are presorted keys.
5461 */
5462 if (presorted_keys == 0 || !enable_incremental_sort)
5465 input_path,
5466 root->sort_pathkeys,
5467 limit_tuples);
5468 else
5471 input_path,
5472 root->sort_pathkeys,
5473 presorted_keys,
5474 limit_tuples);
5475 }
5476
5477 /*
5478 * If the pathtarget of the result path has different expressions from
5479 * the target to be applied, a projection step is needed.
5480 */
5481 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5483 sorted_path, target);
5484
5486 }
5487
5488 /*
5489 * generate_gather_paths() will have already generated a simple Gather
5490 * path for the best parallel path, if any, and the loop above will have
5491 * considered sorting it. Similarly, generate_gather_paths() will also
5492 * have generated order-preserving Gather Merge plans which can be used
5493 * without sorting if they happen to match the sort_pathkeys, and the loop
5494 * above will have handled those as well. However, there's one more
5495 * possibility: it may make sense to sort the cheapest partial path or
5496 * incrementally sort any partial path that is partially sorted according
5497 * to the required output order and then use Gather Merge.
5498 */
5499 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5500 input_rel->partial_pathlist != NIL)
5501 {
5503
5504 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5505
5506 foreach(lc, input_rel->partial_pathlist)
5507 {
5508 Path *input_path = (Path *) lfirst(lc);
5510 bool is_sorted;
5511 int presorted_keys;
5512 double total_groups;
5513
5515 input_path->pathkeys,
5516 &presorted_keys);
5517
5518 if (is_sorted)
5519 continue;
5520
5521 /*
5522 * Try at least sorting the cheapest path and also try
5523 * incrementally sorting any path which is partially sorted
5524 * already (no need to deal with paths which have presorted keys
5525 * when incremental sort is disabled unless it's the cheapest
5526 * partial path).
5527 */
5529 (presorted_keys == 0 || !enable_incremental_sort))
5530 continue;
5531
5532 /*
5533 * We've no need to consider both a sort and incremental sort.
5534 * We'll just do a sort if there are no presorted keys and an
5535 * incremental sort when there are presorted keys.
5536 */
5537 if (presorted_keys == 0 || !enable_incremental_sort)
5540 input_path,
5541 root->sort_pathkeys,
5542 limit_tuples);
5543 else
5546 input_path,
5547 root->sort_pathkeys,
5548 presorted_keys,
5549 limit_tuples);
5551 sorted_path = (Path *)
5554 sorted_path->pathtarget,
5555 root->sort_pathkeys, NULL,
5556 &total_groups);
5557
5558 /*
5559 * If the pathtarget of the result path has different expressions
5560 * from the target to be applied, a projection step is needed.
5561 */
5562 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5564 sorted_path, target);
5565
5567 }
5568 }
5569
5570 /*
5571 * If there is an FDW that's responsible for all baserels of the query,
5572 * let it consider adding ForeignPaths.
5573 */
5574 if (ordered_rel->fdwroutine &&
5575 ordered_rel->fdwroutine->GetForeignUpperPaths)
5576 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5578 NULL);
5579
5580 /* Let extensions possibly add some more paths */
5582 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5584
5585 /*
5586 * No need to bother with set_cheapest here; grouping_planner does not
5587 * need us to do it.
5588 */
5589 Assert(ordered_rel->pathlist != NIL);
5590
5591 return ordered_rel;
5592}
5593
5594
5595/*
5596 * make_group_input_target
5597 * Generate appropriate PathTarget for initial input to grouping nodes.
5598 *
5599 * If there is grouping or aggregation, the scan/join subplan cannot emit
5600 * the query's final targetlist; for example, it certainly can't emit any
5601 * aggregate function calls. This routine generates the correct target
5602 * for the scan/join subplan.
5603 *
5604 * The query target list passed from the parser already contains entries
5605 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5606 * for variables used only in HAVING clauses; so we need to add those
5607 * variables to the subplan target list. Also, we flatten all expressions
5608 * except GROUP BY items into their component variables; other expressions
5609 * will be computed by the upper plan nodes rather than by the subplan.
5610 * For example, given a query like
5611 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5612 * we want to pass this targetlist to the subplan:
5613 * a+b,c,d
5614 * where the a+b target will be used by the Sort/Group steps, and the
5615 * other targets will be used for computing the final results.
5616 *
5617 * 'final_target' is the query's final target list (in PathTarget form)
5618 *
5619 * The result is the PathTarget to be computed by the Paths returned from
5620 * query_planner().
5621 */
5622static PathTarget *
5624{
5625 Query *parse = root->parse;
5629 int i;
5630 ListCell *lc;
5631
5632 /*
5633 * We must build a target containing all grouping columns, plus any other
5634 * Vars mentioned in the query's targetlist and HAVING qual.
5635 */
5638
5639 i = 0;
5640 foreach(lc, final_target->exprs)
5641 {
5642 Expr *expr = (Expr *) lfirst(lc);
5644
5645 if (sgref && root->processed_groupClause &&
5647 root->processed_groupClause) != NULL)
5648 {
5649 /*
5650 * It's a grouping column, so add it to the input target as-is.
5651 *
5652 * Note that the target is logically below the grouping step. So
5653 * with grouping sets we need to remove the RT index of the
5654 * grouping step if there is any from the target expression.
5655 */
5656 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5657 {
5658 Assert(root->group_rtindex > 0);
5659 expr = (Expr *)
5660 remove_nulling_relids((Node *) expr,
5661 bms_make_singleton(root->group_rtindex),
5662 NULL);
5663 }
5665 }
5666 else
5667 {
5668 /*
5669 * Non-grouping column, so just remember the expression for later
5670 * call to pull_var_clause.
5671 */
5673 }
5674
5675 i++;
5676 }
5677
5678 /*
5679 * If there's a HAVING clause, we'll need the Vars it uses, too.
5680 */
5681 if (parse->havingQual)
5683
5684 /*
5685 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5686 * add them to the input target if not already present. (A Var used
5687 * directly as a GROUP BY item will be present already.) Note this
5688 * includes Vars used in resjunk items, so we are covering the needs of
5689 * ORDER BY and window specifications. Vars used within Aggrefs and
5690 * WindowFuncs will be pulled out here, too.
5691 *
5692 * Note that the target is logically below the grouping step. So with
5693 * grouping sets we need to remove the RT index of the grouping step if
5694 * there is any from the non-group Vars.
5695 */
5700 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5701 {
5702 Assert(root->group_rtindex > 0);
5703 non_group_vars = (List *)
5705 bms_make_singleton(root->group_rtindex),
5706 NULL);
5707 }
5709
5710 /* clean up cruft */
5713
5714 /* XXX this causes some redundant cost calculation ... */
5716}
5717
5718/*
5719 * make_partial_grouping_target
5720 * Generate appropriate PathTarget for output of partial aggregate
5721 * (or partial grouping, if there are no aggregates) nodes.
5722 *
5723 * A partial aggregation node needs to emit all the same aggregates that
5724 * a regular aggregation node would, plus any aggregates used in HAVING;
5725 * except that the Aggref nodes should be marked as partial aggregates.
5726 *
5727 * In addition, we'd better emit any Vars and PlaceHolderVars that are
5728 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5729 * these would be Vars that are grouped by or used in grouping expressions.)
5730 *
5731 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5732 * havingQual represents the HAVING clause.
5733 */
5734static PathTarget *
5737 Node *havingQual)
5738{
5742 int i;
5743 ListCell *lc;
5744
5747
5748 i = 0;
5749 foreach(lc, grouping_target->exprs)
5750 {
5751 Expr *expr = (Expr *) lfirst(lc);
5753
5754 if (sgref && root->processed_groupClause &&
5756 root->processed_groupClause) != NULL)
5757 {
5758 /*
5759 * It's a grouping column, so add it to the partial_target as-is.
5760 * (This allows the upper agg step to repeat the grouping calcs.)
5761 */
5763 }
5764 else
5765 {
5766 /*
5767 * Non-grouping column, so just remember the expression for later
5768 * call to pull_var_clause.
5769 */
5771 }
5772
5773 i++;
5774 }
5775
5776 /*
5777 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5778 */
5779 if (havingQual)
5780 non_group_cols = lappend(non_group_cols, havingQual);
5781
5782 /*
5783 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5784 * non-group cols (plus HAVING), and add them to the partial_target if not
5785 * already present. (An expression used directly as a GROUP BY item will
5786 * be present already.) Note this includes Vars used in resjunk items, so
5787 * we are covering the needs of ORDER BY and window specifications.
5788 */
5793
5795
5796 /*
5797 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5798 * are at the top level of the target list, so we can just scan the list
5799 * rather than recursing through the expression trees.
5800 */
5801 foreach(lc, partial_target->exprs)
5802 {
5803 Aggref *aggref = (Aggref *) lfirst(lc);
5804
5805 if (IsA(aggref, Aggref))
5806 {
5808
5809 /*
5810 * We shouldn't need to copy the substructure of the Aggref node,
5811 * but flat-copy the node itself to avoid damaging other trees.
5812 */
5814 memcpy(newaggref, aggref, sizeof(Aggref));
5815
5816 /* For now, assume serialization is required */
5818
5819 lfirst(lc) = newaggref;
5820 }
5821 }
5822
5823 /* clean up cruft */
5826
5827 /* XXX this causes some redundant cost calculation ... */
5829}
5830
5831/*
5832 * mark_partial_aggref
5833 * Adjust an Aggref to make it represent a partial-aggregation step.
5834 *
5835 * The Aggref node is modified in-place; caller must do any copying required.
5836 */
5837void
5839{
5840 /* aggtranstype should be computed by this point */
5841 Assert(OidIsValid(agg->aggtranstype));
5842 /* ... but aggsplit should still be as the parser left it */
5843 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5844
5845 /* Mark the Aggref with the intended partial-aggregation mode */
5846 agg->aggsplit = aggsplit;
5847
5848 /*
5849 * Adjust result type if needed. Normally, a partial aggregate returns
5850 * the aggregate's transition type; but if that's INTERNAL and we're
5851 * serializing, it returns BYTEA instead.
5852 */
5853 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5854 {
5855 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5856 agg->aggtype = BYTEAOID;
5857 else
5858 agg->aggtype = agg->aggtranstype;
5859 }
5860}
5861
5862/*
5863 * postprocess_setop_tlist
5864 * Fix up targetlist returned by plan_set_operations().
5865 *
5866 * We need to transpose sort key info from the orig_tlist into new_tlist.
5867 * NOTE: this would not be good enough if we supported resjunk sort keys
5868 * for results of set operations --- then, we'd need to project a whole
5869 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5870 * find any resjunk columns in orig_tlist.
5871 */
5872static List *
5874{
5875 ListCell *l;
5877
5878 foreach(l, new_tlist)
5879 {
5882
5883 /* ignore resjunk columns in setop result */
5884 if (new_tle->resjunk)
5885 continue;
5886
5890 if (orig_tle->resjunk) /* should not happen */
5891 elog(ERROR, "resjunk output columns are not implemented");
5892 Assert(new_tle->resno == orig_tle->resno);
5893 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5894 }
5895 if (orig_tlist_item != NULL)
5896 elog(ERROR, "resjunk output columns are not implemented");
5897 return new_tlist;
5898}
5899
5900/*
5901 * optimize_window_clauses
5902 * Call each WindowFunc's prosupport function to see if we're able to
5903 * make any adjustments to any of the WindowClause's so that the executor
5904 * can execute the window functions in a more optimal way.
5905 *
5906 * Currently we only allow adjustments to the WindowClause's frameOptions. We
5907 * may allow more things to be done here in the future.
5908 */
5909static void
5911{
5912 List *windowClause = root->parse->windowClause;
5913 ListCell *lc;
5914
5915 foreach(lc, windowClause)
5916 {
5918 ListCell *lc2;
5919 int optimizedFrameOptions = 0;
5920
5921 Assert(wc->winref <= wflists->maxWinRef);
5922
5923 /* skip any WindowClauses that have no WindowFuncs */
5924 if (wflists->windowFuncs[wc->winref] == NIL)
5925 continue;
5926
5927 foreach(lc2, wflists->windowFuncs[wc->winref])
5928 {
5933
5935
5936 /* Check if there's a support function for 'wfunc' */
5937 if (!OidIsValid(prosupport))
5938 break; /* can't optimize this WindowClause */
5939
5941 req.window_clause = wc;
5942 req.window_func = wfunc;
5943 req.frameOptions = wc->frameOptions;
5944
5945 /* call the support function */
5948 PointerGetDatum(&req)));
5949
5950 /*
5951 * Skip to next WindowClause if the support function does not
5952 * support this request type.
5953 */
5954 if (res == NULL)
5955 break;
5956
5957 /*
5958 * Save these frameOptions for the first WindowFunc for this
5959 * WindowClause.
5960 */
5961 if (foreach_current_index(lc2) == 0)
5963
5964 /*
5965 * On subsequent WindowFuncs, if the frameOptions are not the same
5966 * then we're unable to optimize the frameOptions for this
5967 * WindowClause.
5968 */
5969 else if (optimizedFrameOptions != res->frameOptions)
5970 break; /* skip to the next WindowClause, if any */
5971 }
5972
5973 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5974 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5975 {
5976 ListCell *lc3;
5977
5978 /* apply the new frame options */
5980
5981 /*
5982 * We now check to see if changing the frameOptions has caused
5983 * this WindowClause to be a duplicate of some other WindowClause.
5984 * This can only happen if we have multiple WindowClauses, so
5985 * don't bother if there's only 1.
5986 */
5987 if (list_length(windowClause) == 1)
5988 continue;
5989
5990 /*
5991 * Do the duplicate check and reuse the existing WindowClause if
5992 * we find a duplicate.
5993 */
5994 foreach(lc3, windowClause)
5995 {
5997
5998 /* skip over the WindowClause we're currently editing */
5999 if (existing_wc == wc)
6000 continue;
6001
6002 /*
6003 * Perform the same duplicate check that is done in
6004 * transformWindowFuncCall.
6005 */
6006 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
6007 equal(wc->orderClause, existing_wc->orderClause) &&
6008 wc->frameOptions == existing_wc->frameOptions &&
6009 equal(wc->startOffset, existing_wc->startOffset) &&
6010 equal(wc->endOffset, existing_wc->endOffset))
6011 {
6012 ListCell *lc4;
6013
6014 /*
6015 * Now move each WindowFunc in 'wc' into 'existing_wc'.
6016 * This required adjusting each WindowFunc's winref and
6017 * moving the WindowFuncs in 'wc' to the list of
6018 * WindowFuncs in 'existing_wc'.
6019 */
6020 foreach(lc4, wflists->windowFuncs[wc->winref])
6021 {
6023
6024 wfunc->winref = existing_wc->winref;
6025 }
6026
6027 /* move list items */
6028 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6029 wflists->windowFuncs[wc->winref]);
6030 wflists->windowFuncs[wc->winref] = NIL;
6031
6032 /*
6033 * transformWindowFuncCall() should have made sure there
6034 * are no other duplicates, so we needn't bother looking
6035 * any further.
6036 */
6037 break;
6038 }
6039 }
6040 }
6041 }
6042}
6043
6044/*
6045 * select_active_windows
6046 * Create a list of the "active" window clauses (ie, those referenced
6047 * by non-deleted WindowFuncs) in the order they are to be executed.
6048 */
6049static List *
6051{
6052 List *windowClause = root->parse->windowClause;
6053 List *result = NIL;
6054 ListCell *lc;
6055 int nActive = 0;
6057 list_length(windowClause));
6058
6059 /* First, construct an array of the active windows */
6060 foreach(lc, windowClause)
6061 {
6063
6064 /* It's only active if wflists shows some related WindowFuncs */
6065 Assert(wc->winref <= wflists->maxWinRef);
6066 if (wflists->windowFuncs[wc->winref] == NIL)
6067 continue;
6068
6069 actives[nActive].wc = wc; /* original clause */
6070
6071 /*
6072 * For sorting, we want the list of partition keys followed by the
6073 * list of sort keys. But pathkeys construction will remove duplicates
6074 * between the two, so we can as well (even though we can't detect all
6075 * of the duplicates, since some may come from ECs - that might mean
6076 * we miss optimization chances here). We must, however, ensure that
6077 * the order of entries is preserved with respect to the ones we do
6078 * keep.
6079 *
6080 * partitionClause and orderClause had their own duplicates removed in
6081 * parse analysis, so we're only concerned here with removing
6082 * orderClause entries that also appear in partitionClause.
6083 */
6084 actives[nActive].uniqueOrder =
6086 wc->orderClause);
6087 nActive++;
6088 }
6089
6090 /*
6091 * Sort active windows by their partitioning/ordering clauses, ignoring
6092 * any framing clauses, so that the windows that need the same sorting are
6093 * adjacent in the list. When we come to generate paths, this will avoid
6094 * inserting additional Sort nodes.
6095 *
6096 * This is how we implement a specific requirement from the SQL standard,
6097 * which says that when two or more windows are order-equivalent (i.e.
6098 * have matching partition and order clauses, even if their names or
6099 * framing clauses differ), then all peer rows must be presented in the
6100 * same order in all of them. If we allowed multiple sort nodes for such
6101 * cases, we'd risk having the peer rows end up in different orders in
6102 * equivalent windows due to sort instability. (See General Rule 4 of
6103 * <window clause> in SQL2008 - SQL2016.)
6104 *
6105 * Additionally, if the entire list of clauses of one window is a prefix
6106 * of another, put first the window with stronger sorting requirements.
6107 * This way we will first sort for stronger window, and won't have to sort
6108 * again for the weaker one.
6109 */
6111
6112 /* build ordered list of the original WindowClause nodes */
6113 for (int i = 0; i < nActive; i++)
6114 result = lappend(result, actives[i].wc);
6115
6116 pfree(actives);
6117
6118 return result;
6119}
6120
6121/*
6122 * name_active_windows
6123 * Ensure all active windows have unique names.
6124 *
6125 * The parser will have checked that user-assigned window names are unique
6126 * within the Query. Here we assign made-up names to any unnamed
6127 * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6128 * at parse time, because it'd mess up decompilation of views.)
6129 *
6130 * activeWindows: result of select_active_windows
6131 */
6132static void
6134{
6135 int next_n = 1;
6136 char newname[16];
6137 ListCell *lc;
6138
6139 foreach(lc, activeWindows)
6140 {
6142
6143 /* Nothing to do if it has a name already. */
6144 if (wc->name)
6145 continue;
6146
6147 /* Select a name not currently present in the list. */
6148 for (;;)
6149 {
6150 ListCell *lc2;
6151
6152 snprintf(newname, sizeof(newname), "w%d", next_n++);
6153 foreach(lc2, activeWindows)
6154 {
6156
6157 if (wc2->name && strcmp(wc2->name, newname) == 0)
6158 break; /* matched */
6159 }
6160 if (lc2 == NULL)
6161 break; /* reached the end with no match */
6162 }
6163 wc->name = pstrdup(newname);
6164 }
6165}
6166
6167/*
6168 * common_prefix_cmp
6169 * QSort comparison function for WindowClauseSortData
6170 *
6171 * Sort the windows by the required sorting clauses. First, compare the sort
6172 * clauses themselves. Second, if one window's clauses are a prefix of another
6173 * one's clauses, put the window with more sort clauses first.
6174 *
6175 * We purposefully sort by the highest tleSortGroupRef first. Since
6176 * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6177 * and because here we sort the lowest tleSortGroupRefs last, if a
6178 * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6179 * ORDER BY clause, this makes it more likely that the final WindowAgg will
6180 * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6181 * reducing the total number of sorts required for the query.
6182 */
6183static int
6184common_prefix_cmp(const void *a, const void *b)
6185{
6186 const WindowClauseSortData *wcsa = a;
6187 const WindowClauseSortData *wcsb = b;
6190
6191 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6192 {
6195
6196 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6197 return -1;
6198 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6199 return 1;
6200 else if (sca->sortop > scb->sortop)
6201 return -1;
6202 else if (sca->sortop < scb->sortop)
6203 return 1;
6204 else if (sca->nulls_first && !scb->nulls_first)
6205 return -1;
6206 else if (!sca->nulls_first && scb->nulls_first)
6207 return 1;
6208 /* no need to compare eqop, since it is fully determined by sortop */
6209 }
6210
6211 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6212 return -1;
6213 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6214 return 1;
6215
6216 return 0;
6217}
6218
6219/*
6220 * make_window_input_target
6221 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6222 *
6223 * When the query has window functions, this function computes the desired
6224 * target to be computed by the node just below the first WindowAgg.
6225 * This tlist must contain all values needed to evaluate the window functions,
6226 * compute the final target list, and perform any required final sort step.
6227 * If multiple WindowAggs are needed, each intermediate one adds its window
6228 * function results onto this base tlist; only the topmost WindowAgg computes
6229 * the actual desired target list.
6230 *
6231 * This function is much like make_group_input_target, though not quite enough
6232 * like it to share code. As in that function, we flatten most expressions
6233 * into their component variables. But we do not want to flatten window
6234 * PARTITION BY/ORDER BY clauses, since that might result in multiple
6235 * evaluations of them, which would be bad (possibly even resulting in
6236 * inconsistent answers, if they contain volatile functions).
6237 * Also, we must not flatten GROUP BY clauses that were left unflattened by
6238 * make_group_input_target, because we may no longer have access to the
6239 * individual Vars in them.
6240 *
6241 * Another key difference from make_group_input_target is that we don't
6242 * flatten Aggref expressions, since those are to be computed below the
6243 * window functions and just referenced like Vars above that.
6244 *
6245 * 'final_target' is the query's final target list (in PathTarget form)
6246 * 'activeWindows' is the list of active windows previously identified by
6247 * select_active_windows.
6248 *
6249 * The result is the PathTarget to be computed by the plan node immediately
6250 * below the first WindowAgg node.
6251 */
6252static PathTarget *
6255 List *activeWindows)
6256{
6261 int i;
6262 ListCell *lc;
6263
6264 Assert(root->parse->hasWindowFuncs);
6265
6266 /*
6267 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6268 * into a bitmapset for convenient reference below.
6269 */
6270 sgrefs = NULL;
6271 foreach(lc, activeWindows)
6272 {
6274 ListCell *lc2;
6275
6276 foreach(lc2, wc->partitionClause)
6277 {
6279
6280 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6281 }
6282 foreach(lc2, wc->orderClause)
6283 {
6285
6286 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6287 }
6288 }
6289
6290 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6291 foreach(lc, root->processed_groupClause)
6292 {
6294
6295 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6296 }
6297
6298 /*
6299 * Construct a target containing all the non-flattenable targetlist items,
6300 * and save aside the others for a moment.
6301 */
6304
6305 i = 0;
6306 foreach(lc, final_target->exprs)
6307 {
6308 Expr *expr = (Expr *) lfirst(lc);
6310
6311 /*
6312 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6313 * that such items can't contain window functions, so it's okay to
6314 * compute them below the WindowAgg nodes.)
6315 */
6316 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6317 {
6318 /*
6319 * Don't want to deconstruct this value, so add it to the input
6320 * target as-is.
6321 */
6323 }
6324 else
6325 {
6326 /*
6327 * Column is to be flattened, so just remember the expression for
6328 * later call to pull_var_clause.
6329 */
6331 }
6332
6333 i++;
6334 }
6335
6336 /*
6337 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6338 * add them to the input target if not already present. (Some might be
6339 * there already because they're used directly as window/group clauses.)
6340 *
6341 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6342 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6343 * at higher levels. On the other hand, we should recurse into
6344 * WindowFuncs to make sure their input expressions are available.
6345 */
6351
6352 /* clean up cruft */
6355
6356 /* XXX this causes some redundant cost calculation ... */
6358}
6359
6360/*
6361 * make_pathkeys_for_window
6362 * Create a pathkeys list describing the required input ordering
6363 * for the given WindowClause.
6364 *
6365 * Modifies wc's partitionClause to remove any clauses which are deemed
6366 * redundant by the pathkey logic.
6367 *
6368 * The required ordering is first the PARTITION keys, then the ORDER keys.
6369 * In the future we might try to implement windowing using hashing, in which
6370 * case the ordering could be relaxed, but for now we always sort.
6371 */
6372static List *
6374 List *tlist)
6375{
6376 List *window_pathkeys = NIL;
6377
6378 /* Throw error if can't sort */
6380 ereport(ERROR,
6382 errmsg("could not implement window PARTITION BY"),
6383 errdetail("Window partitioning columns must be of sortable datatypes.")));
6385 ereport(ERROR,
6387 errmsg("could not implement window ORDER BY"),
6388 errdetail("Window ordering columns must be of sortable datatypes.")));
6389
6390 /*
6391 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6392 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6393 */
6394 if (wc->partitionClause != NIL)
6395 {
6396 bool sortable;
6397
6399 &wc->partitionClause,
6400 tlist,
6401 true,
6402 false,
6403 &sortable,
6404 false);
6405
6407 }
6408
6409 /*
6410 * In principle, we could also consider removing redundant ORDER BY items
6411 * too as doing so does not alter the result of peer row checks done by
6412 * the executor. However, we must *not* remove the ordering column for
6413 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6414 * if it's known to be equal to some partitioning column.
6415 */
6416 if (wc->orderClause != NIL)
6417 {
6419
6421 wc->orderClause,
6422 tlist);
6423
6424 /* Okay, make the combined pathkeys */
6425 if (window_pathkeys != NIL)
6426 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6427 else
6428 window_pathkeys = orderby_pathkeys;
6429 }
6430
6431 return window_pathkeys;
6432}
6433
6434/*
6435 * make_sort_input_target
6436 * Generate appropriate PathTarget for initial input to Sort step.
6437 *
6438 * If the query has ORDER BY, this function chooses the target to be computed
6439 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6440 * project) steps. This might or might not be identical to the query's final
6441 * output target.
6442 *
6443 * The main argument for keeping the sort-input tlist the same as the final
6444 * is that we avoid a separate projection node (which will be needed if
6445 * they're different, because Sort can't project). However, there are also
6446 * advantages to postponing tlist evaluation till after the Sort: it ensures
6447 * a consistent order of evaluation for any volatile functions in the tlist,
6448 * and if there's also a LIMIT, we can stop the query without ever computing
6449 * tlist functions for later rows, which is beneficial for both volatile and
6450 * expensive functions.
6451 *
6452 * Our current policy is to postpone volatile expressions till after the sort
6453 * unconditionally (assuming that that's possible, ie they are in plain tlist
6454 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6455 * postpone set-returning expressions, because running them beforehand would
6456 * bloat the sort dataset, and because it might cause unexpected output order
6457 * if the sort isn't stable. However there's a constraint on that: all SRFs
6458 * in the tlist should be evaluated at the same plan step, so that they can
6459 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6460 * mustn't postpone any SRFs. (Note that in principle that policy should
6461 * probably get applied to the group/window input targetlists too, but we
6462 * have not done that historically.) Lastly, expensive expressions are
6463 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6464 * partial evaluation of the query is possible (if neither is true, we expect
6465 * to have to evaluate the expressions for every row anyway), or if there are
6466 * any volatile or set-returning expressions (since once we've put in a
6467 * projection at all, it won't cost any more to postpone more stuff).
6468 *
6469 * Another issue that could potentially be considered here is that
6470 * evaluating tlist expressions could result in data that's either wider
6471 * or narrower than the input Vars, thus changing the volume of data that
6472 * has to go through the Sort. However, we usually have only a very bad
6473 * idea of the output width of any expression more complex than a Var,
6474 * so for now it seems too risky to try to optimize on that basis.
6475 *
6476 * Note that if we do produce a modified sort-input target, and then the
6477 * query ends up not using an explicit Sort, no particular harm is done:
6478 * we'll initially use the modified target for the preceding path nodes,
6479 * but then change them to the final target with apply_projection_to_path.
6480 * Moreover, in such a case the guarantees about evaluation order of
6481 * volatile functions still hold, since the rows are sorted already.
6482 *
6483 * This function has some things in common with make_group_input_target and
6484 * make_window_input_target, though the detailed rules for what to do are
6485 * different. We never flatten/postpone any grouping or ordering columns;
6486 * those are needed before the sort. If we do flatten a particular
6487 * expression, we leave Aggref and WindowFunc nodes alone, since those were
6488 * computed earlier.
6489 *
6490 * 'final_target' is the query's final target list (in PathTarget form)
6491 * 'have_postponed_srfs' is an output argument, see below
6492 *
6493 * The result is the PathTarget to be computed by the plan node immediately
6494 * below the Sort step (and the Distinct step, if any). This will be
6495 * exactly final_target if we decide a projection step wouldn't be helpful.
6496 *
6497 * In addition, *have_postponed_srfs is set to true if we choose to postpone
6498 * any set-returning functions to after the Sort.
6499 */
6500static PathTarget *
6503 bool *have_postponed_srfs)
6504{
6505 Query *parse = root->parse;
6507 int ncols;
6508 bool *col_is_srf;
6509 bool *postpone_col;
6510 bool have_srf;
6511 bool have_volatile;
6512 bool have_expensive;
6513 bool have_srf_sortcols;
6514 bool postpone_srfs;
6517 int i;
6518 ListCell *lc;
6519
6520 /* Shouldn't get here unless query has ORDER BY */
6521 Assert(parse->sortClause);
6522
6523 *have_postponed_srfs = false; /* default result */
6524
6525 /* Inspect tlist and collect per-column information */
6526 ncols = list_length(final_target->exprs);
6527 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6528 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6530
6531 i = 0;
6532 foreach(lc, final_target->exprs)
6533 {
6534 Expr *expr = (Expr *) lfirst(lc);
6535
6536 /*
6537 * If the column has a sortgroupref, assume it has to be evaluated
6538 * before sorting. Generally such columns would be ORDER BY, GROUP
6539 * BY, etc targets. One exception is columns that were removed from
6540 * GROUP BY by remove_useless_groupby_columns() ... but those would
6541 * only be Vars anyway. There don't seem to be any cases where it
6542 * would be worth the trouble to double-check.
6543 */
6545 {
6546 /*
6547 * Check for SRF or volatile functions. Check the SRF case first
6548 * because we must know whether we have any postponed SRFs.
6549 */
6550 if (parse->hasTargetSRFs &&
6551 expression_returns_set((Node *) expr))
6552 {
6553 /* We'll decide below whether these are postponable */
6554 col_is_srf[i] = true;
6555 have_srf = true;
6556 }
6557 else if (contain_volatile_functions((Node *) expr))
6558 {
6559 /* Unconditionally postpone */
6560 postpone_col[i] = true;
6561 have_volatile = true;
6562 }
6563 else
6564 {
6565 /*
6566 * Else check the cost. XXX it's annoying to have to do this
6567 * when set_pathtarget_cost_width() just did it. Refactor to
6568 * allow sharing the work?
6569 */
6570 QualCost cost;
6571
6572 cost_qual_eval_node(&cost, (Node *) expr, root);
6573
6574 /*
6575 * We arbitrarily define "expensive" as "more than 10X
6576 * cpu_operator_cost". Note this will take in any PL function
6577 * with default cost.
6578 */
6579 if (cost.per_tuple > 10 * cpu_operator_cost)
6580 {
6581 postpone_col[i] = true;
6582 have_expensive = true;
6583 }
6584 }
6585 }
6586 else
6587 {
6588 /* For sortgroupref cols, just check if any contain SRFs */
6589 if (!have_srf_sortcols &&
6590 parse->hasTargetSRFs &&
6591 expression_returns_set((Node *) expr))
6592 have_srf_sortcols = true;
6593 }
6594
6595 i++;
6596 }
6597
6598 /*
6599 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6600 */
6602
6603 /*
6604 * If we don't need a post-sort projection, just return final_target.
6605 */
6606 if (!(postpone_srfs || have_volatile ||
6607 (have_expensive &&
6608 (parse->limitCount || root->tuple_fraction > 0))))
6609 return final_target;
6610
6611 /*
6612 * Report whether the post-sort projection will contain set-returning
6613 * functions. This is important because it affects whether the Sort can
6614 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6615 * to return.
6616 */
6618
6619 /*
6620 * Construct the sort-input target, taking all non-postponable columns and
6621 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6622 * the postponable ones.
6623 */
6626
6627 i = 0;
6628 foreach(lc, final_target->exprs)
6629 {
6630 Expr *expr = (Expr *) lfirst(lc);
6631
6632 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6634 else
6637
6638 i++;
6639 }
6640
6641 /*
6642 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6643 * postponable columns, and add them to the sort-input target if not
6644 * already present. (Some might be there already.) We mustn't
6645 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6646 * would be unable to recompute them.
6647 */
6653
6654 /* clean up cruft */
6657
6658 /* XXX this represents even more redundant cost calculation ... */
6660}
6661
6662/*
6663 * get_cheapest_fractional_path
6664 * Find the cheapest path for retrieving a specified fraction of all
6665 * the tuples expected to be returned by the given relation.
6666 *
6667 * Do not consider parameterized paths. If the caller needs a path for upper
6668 * rel, it can't have parameterized paths. If the caller needs an append
6669 * subpath, it could become limited by the treatment of similar
6670 * parameterization of all the subpaths.
6671 *
6672 * We interpret tuple_fraction the same way as grouping_planner.
6673 *
6674 * We assume set_cheapest() has been run on the given rel.
6675 */
6676Path *
6677get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6678{
6680 ListCell *l;
6681
6682 /* If all tuples will be retrieved, just return the cheapest-total path */
6683 if (tuple_fraction <= 0.0)
6684 return best_path;
6685
6686 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6687 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6688 tuple_fraction /= best_path->rows;
6689
6690 foreach(l, rel->pathlist)
6691 {
6692 Path *path = (Path *) lfirst(l);
6693
6694 if (path->param_info)
6695 continue;
6696
6697 if (path == rel->cheapest_total_path ||
6698 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6699 continue;
6700
6701 best_path = path;
6702 }
6703
6704 return best_path;
6705}
6706
6707/*
6708 * adjust_paths_for_srfs
6709 * Fix up the Paths of the given upperrel to handle tSRFs properly.
6710 *
6711 * The executor can only handle set-returning functions that appear at the
6712 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6713 * that are not at top level, we need to split up the evaluation into multiple
6714 * plan levels in which each level satisfies this constraint. This function
6715 * modifies each Path of an upperrel that (might) compute any SRFs in its
6716 * output tlist to insert appropriate projection steps.
6717 *
6718 * The given targets and targets_contain_srfs lists are from
6719 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6720 * target in targets.
6721 */
6722static void
6724 List *targets, List *targets_contain_srfs)
6725{
6726 ListCell *lc;
6727
6730
6731 /* If no SRFs appear at this plan level, nothing to do */
6732 if (list_length(targets) == 1)
6733 return;
6734
6735 /*
6736 * Stack SRF-evaluation nodes atop each path for the rel.
6737 *
6738 * In principle we should re-run set_cheapest() here to identify the
6739 * cheapest path, but it seems unlikely that adding the same tlist eval
6740 * costs to all the paths would change that, so we don't bother. Instead,
6741 * just assume that the cheapest-startup and cheapest-total paths remain
6742 * so. (There should be no parameterized paths anymore, so we needn't
6743 * worry about updating cheapest_parameterized_paths.)
6744 */
6745 foreach(lc, rel->pathlist)
6746 {
6747 Path *subpath = (Path *) lfirst(lc);
6748 Path *newpath = subpath;
6749 ListCell *lc1,
6750 *lc2;
6751
6752 Assert(subpath->param_info == NULL);
6754 {
6756 bool contains_srfs = (bool) lfirst_int(lc2);
6757
6758 /* If this level doesn't contain SRFs, do regular projection */
6759 if (contains_srfs)
6761 rel,
6762 newpath,
6763 thistarget);
6764 else
6766 rel,
6767 newpath,
6768 thistarget);
6769 }
6770 lfirst(lc) = newpath;
6771 if (subpath == rel->cheapest_startup_path)
6773 if (subpath == rel->cheapest_total_path)
6775 }
6776
6777 /* Likewise for partial paths, if any */
6778 foreach(lc, rel->partial_pathlist)
6779 {
6780 Path *subpath = (Path *) lfirst(lc);
6781 Path *newpath = subpath;
6782 ListCell *lc1,
6783 *lc2;
6784
6785 Assert(subpath->param_info == NULL);
6787 {
6789 bool contains_srfs = (bool) lfirst_int(lc2);
6790
6791 /* If this level doesn't contain SRFs, do regular projection */
6792 if (contains_srfs)
6794 rel,
6795 newpath,
6796 thistarget);
6797 else
6798 {
6799 /* avoid apply_projection_to_path, in case of multiple refs */
6801 rel,
6802 newpath,
6803 thistarget);
6804 }
6805 }
6806 lfirst(lc) = newpath;
6807 }
6808}
6809
6810/*
6811 * expression_planner
6812 * Perform planner's transformations on a standalone expression.
6813 *
6814 * Various utility commands need to evaluate expressions that are not part
6815 * of a plannable query. They can do so using the executor's regular
6816 * expression-execution machinery, but first the expression has to be fed
6817 * through here to transform it from parser output to something executable.
6818 *
6819 * Currently, we disallow sublinks in standalone expressions, so there's no
6820 * real "planning" involved here. (That might not always be true though.)
6821 * What we must do is run eval_const_expressions to ensure that any function
6822 * calls are converted to positional notation and function default arguments
6823 * get inserted. The fact that constant subexpressions get simplified is a
6824 * side-effect that is useful when the expression will get evaluated more than
6825 * once. Also, we must fix operator function IDs.
6826 *
6827 * This does not return any information about dependencies of the expression.
6828 * Hence callers should use the results only for the duration of the current
6829 * query. Callers that would like to cache the results for longer should use
6830 * expression_planner_with_deps, probably via the plancache.
6831 *
6832 * Note: this must not make any damaging changes to the passed-in expression
6833 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6834 * we first do an expression_tree_mutator-based walk, what is returned will
6835 * be a new node tree.) The result is constructed in the current memory
6836 * context; beware that this can leak a lot of additional stuff there, too.
6837 */
6838Expr *
6840{
6841 Node *result;
6842
6843 /*
6844 * Convert named-argument function calls, insert default arguments and
6845 * simplify constant subexprs
6846 */
6848
6849 /* Fill in opfuncid values if missing */
6851
6852 return (Expr *) result;
6853}
6854
6855/*
6856 * expression_planner_with_deps
6857 * Perform planner's transformations on a standalone expression,
6858 * returning expression dependency information along with the result.
6859 *
6860 * This is identical to expression_planner() except that it also returns
6861 * information about possible dependencies of the expression, ie identities of
6862 * objects whose definitions affect the result. As in a PlannedStmt, these
6863 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6864 */
6865Expr *
6867 List **relationOids,
6868 List **invalItems)
6869{
6870 Node *result;
6871 PlannerGlobal glob;
6873
6874 /* Make up dummy planner state so we can use setrefs machinery */
6875 MemSet(&glob, 0, sizeof(glob));
6876 glob.type = T_PlannerGlobal;
6877 glob.relationOids = NIL;
6878 glob.invalItems = NIL;
6879
6880 MemSet(&root, 0, sizeof(root));
6881 root.type = T_PlannerInfo;
6882 root.glob = &glob;
6883
6884 /*
6885 * Convert named-argument function calls, insert default arguments and
6886 * simplify constant subexprs. Collect identities of inlined functions
6887 * and elided domains, too.
6888 */
6889 result = eval_const_expressions(&root, (Node *) expr);
6890
6891 /* Fill in opfuncid values if missing */
6893
6894 /*
6895 * Now walk the finished expression to find anything else we ought to
6896 * record as an expression dependency.
6897 */
6899
6900 *relationOids = glob.relationOids;
6901 *invalItems = glob.invalItems;
6902
6903 return (Expr *) result;
6904}
6905
6906
6907/*
6908 * plan_cluster_use_sort
6909 * Use the planner to decide how CLUSTER should implement sorting
6910 *
6911 * tableOid is the OID of a table to be clustered on its index indexOid
6912 * (which is already known to be a btree index). Decide whether it's
6913 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6914 * Return true to use sorting, false to use an indexscan.
6915 *
6916 * Note: caller had better already hold some type of lock on the table.
6917 */
6918bool
6919plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6920{
6922 Query *query;
6923 PlannerGlobal *glob;
6925 RelOptInfo *rel;
6926 IndexOptInfo *indexInfo;
6932 ListCell *lc;
6933
6934 /* We can short-circuit the cost comparison if indexscans are disabled */
6935 if (!enable_indexscan)
6936 return true; /* use sort */
6937
6938 /* Set up mostly-dummy planner state */
6939 query = makeNode(Query);
6940 query->commandType = CMD_SELECT;
6941
6942 glob = makeNode(PlannerGlobal);
6943
6945 root->parse = query;
6946 root->glob = glob;
6947 root->query_level = 1;
6948 root->planner_cxt = CurrentMemoryContext;
6949 root->wt_param_id = -1;
6950 root->join_domains = list_make1(makeNode(JoinDomain));
6951
6952 /* Build a minimal RTE for the rel */
6954 rte->rtekind = RTE_RELATION;
6955 rte->relid = tableOid;
6956 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6957 rte->rellockmode = AccessShareLock;
6958 rte->lateral = false;
6959 rte->inh = false;
6960 rte->inFromCl = true;
6961 query->rtable = list_make1(rte);
6962 addRTEPermissionInfo(&query->rteperminfos, rte);
6963
6964 /* Set up RTE/RelOptInfo arrays */
6966
6967 /* Build RelOptInfo */
6968 rel = build_simple_rel(root, 1, NULL);
6969
6970 /* Locate IndexOptInfo for the target index */
6971 indexInfo = NULL;
6972 foreach(lc, rel->indexlist)
6973 {
6974 indexInfo = lfirst_node(IndexOptInfo, lc);
6975 if (indexInfo->indexoid == indexOid)
6976 break;
6977 }
6978
6979 /*
6980 * It's possible that get_relation_info did not generate an IndexOptInfo
6981 * for the desired index; this could happen if it's not yet reached its
6982 * indcheckxmin usability horizon, or if it's a system index and we're
6983 * ignoring system indexes. In such cases we should tell CLUSTER to not
6984 * trust the index contents but use seqscan-and-sort.
6985 */
6986 if (lc == NULL) /* not in the list? */
6987 return true; /* use sort */
6988
6989 /*
6990 * Rather than doing all the pushups that would be needed to use
6991 * set_baserel_size_estimates, just do a quick hack for rows and width.
6992 */
6993 rel->rows = rel->tuples;
6994 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6995
6996 root->total_table_pages = rel->pages;
6997
6998 /*
6999 * Determine eval cost of the index expressions, if any. We need to
7000 * charge twice that amount for each tuple comparison that happens during
7001 * the sort, since tuplesort.c will have to re-evaluate the index
7002 * expressions each time. (XXX that's pretty inefficient...)
7003 */
7004 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
7005 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
7006
7007 /* Estimate the cost of seq scan + sort */
7010 seqScanPath->disabled_nodes,
7011 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
7013
7014 /* Estimate the cost of index scan */
7016 NIL, NIL, NIL, NIL,
7017 ForwardScanDirection, false,
7018 NULL, 1.0, false);
7019
7020 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
7021}
7022
7023/*
7024 * plan_create_index_workers
7025 * Use the planner to decide how many parallel worker processes
7026 * CREATE INDEX should request for use
7027 *
7028 * tableOid is the table on which the index is to be built. indexOid is the
7029 * OID of an index to be created or reindexed (which must be an index with
7030 * support for parallel builds - currently btree, GIN, or BRIN).
7031 *
7032 * Return value is the number of parallel worker processes to request. It
7033 * may be unsafe to proceed if this is 0. Note that this does not include the
7034 * leader participating as a worker (value is always a number of parallel
7035 * worker processes).
7036 *
7037 * Note: caller had better already hold some type of lock on the table and
7038 * index.
7039 */
7040int
7042{
7044 Query *query;
7045 PlannerGlobal *glob;
7047 Relation heap;
7049 RelOptInfo *rel;
7050 int parallel_workers;
7052 double reltuples;
7053 double allvisfrac;
7054
7055 /*
7056 * We don't allow performing parallel operation in standalone backend or
7057 * when parallelism is disabled.
7058 */
7060 return 0;
7061
7062 /* Set up largely-dummy planner state */
7063 query = makeNode(Query);
7064 query->commandType = CMD_SELECT;
7065
7066 glob = makeNode(PlannerGlobal);
7067
7069 root->parse = query;
7070 root->glob = glob;
7071 root->query_level = 1;
7072 root->planner_cxt = CurrentMemoryContext;
7073 root->wt_param_id = -1;
7074 root->join_domains = list_make1(makeNode(JoinDomain));
7075
7076 /*
7077 * Build a minimal RTE.
7078 *
7079 * Mark the RTE with inh = true. This is a kludge to prevent
7080 * get_relation_info() from fetching index info, which is necessary
7081 * because it does not expect that any IndexOptInfo is currently
7082 * undergoing REINDEX.
7083 */
7085 rte->rtekind = RTE_RELATION;
7086 rte->relid = tableOid;
7087 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7088 rte->rellockmode = AccessShareLock;
7089 rte->lateral = false;
7090 rte->inh = true;
7091 rte->inFromCl = true;
7092 query->rtable = list_make1(rte);
7093 addRTEPermissionInfo(&query->rteperminfos, rte);
7094
7095 /* Set up RTE/RelOptInfo arrays */
7097
7098 /* Build RelOptInfo */
7099 rel = build_simple_rel(root, 1, NULL);
7100
7101 /* Rels are assumed already locked by the caller */
7102 heap = table_open(tableOid, NoLock);
7103 index = index_open(indexOid, NoLock);
7104
7105 /*
7106 * Determine if it's safe to proceed.
7107 *
7108 * Currently, parallel workers can't access the leader's temporary tables.
7109 * Furthermore, any index predicate or index expressions must be parallel
7110 * safe.
7111 */
7112 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7115 {
7116 parallel_workers = 0;
7117 goto done;
7118 }
7119
7120 /*
7121 * If parallel_workers storage parameter is set for the table, accept that
7122 * as the number of parallel worker processes to launch (though still cap
7123 * at max_parallel_maintenance_workers). Note that we deliberately do not
7124 * consider any other factor when parallel_workers is set. (e.g., memory
7125 * use by workers.)
7126 */
7127 if (rel->rel_parallel_workers != -1)
7128 {
7129 parallel_workers = Min(rel->rel_parallel_workers,
7131 goto done;
7132 }
7133
7134 /*
7135 * Estimate heap relation size ourselves, since rel->pages cannot be
7136 * trusted (heap RTE was marked as inheritance parent)
7137 */
7138 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7139
7140 /*
7141 * Determine number of workers to scan the heap relation using generic
7142 * model
7143 */
7144 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7146
7147 /*
7148 * Cap workers based on available maintenance_work_mem as needed.
7149 *
7150 * Note that each tuplesort participant receives an even share of the
7151 * total maintenance_work_mem budget. Aim to leave participants
7152 * (including the leader as a participant) with no less than 32MB of
7153 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7154 * immediately past the threshold of being capable of launching a single
7155 * parallel worker to sort.
7156 */
7157 while (parallel_workers > 0 &&
7158 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7159 parallel_workers--;
7160
7161done:
7163 table_close(heap, NoLock);
7164
7165 return parallel_workers;
7166}
7167
7168/*
7169 * add_paths_to_grouping_rel
7170 *
7171 * Add non-partial paths to grouping relation.
7172 */
7173static void
7175 RelOptInfo *grouped_rel,
7179 GroupPathExtraData *extra)
7180{
7181 Query *parse = root->parse;
7182 Path *cheapest_path = input_rel->cheapest_total_path;
7184 ListCell *lc;
7185 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7186 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7187 List *havingQual = (List *) extra->havingQual;
7188 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7189 double dNumGroups = 0;
7190 double dNumFinalGroups = 0;
7191
7192 /*
7193 * Estimate number of groups for non-split aggregation.
7194 */
7196 cheapest_path->rows,
7197 gd,
7198 extra->targetList);
7199
7201 {
7203 partially_grouped_rel->cheapest_total_path;
7204
7205 /*
7206 * Estimate number of groups for final phase of partial aggregation.
7207 */
7211 gd,
7212 extra->targetList);
7213 }
7214
7215 if (can_sort)
7216 {
7217 /*
7218 * Use any available suitably-sorted path as input, and also consider
7219 * sorting the cheapest-total path and incremental sort on any paths
7220 * with presorted keys.
7221 */
7222 foreach(lc, input_rel->pathlist)
7223 {
7224 ListCell *lc2;
7225 Path *path = (Path *) lfirst(lc);
7226 Path *path_save = path;
7228
7229 /* generate alternative group orderings that might be useful */
7231
7233
7234 foreach(lc2, pathkey_orderings)
7235 {
7237
7238 /* restore the path (we replace it in the loop) */
7239 path = path_save;
7240
7241 path = make_ordered_path(root,
7242 grouped_rel,
7243 path,
7245 info->pathkeys,
7246 -1.0);
7247 if (path == NULL)
7248 continue;
7249
7250 /* Now decide what to stick atop it */
7251 if (parse->groupingSets)
7252 {
7253 consider_groupingsets_paths(root, grouped_rel,
7254 path, true, can_hash,
7256 }
7257 else if (parse->hasAggs)
7258 {
7259 /*
7260 * We have aggregation, possibly with plain GROUP BY. Make
7261 * an AggPath.
7262 */
7263 add_path(grouped_rel, (Path *)
7265 grouped_rel,
7266 path,
7267 grouped_rel->reltarget,
7268 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7270 info->clauses,
7271 havingQual,
7272 agg_costs,
7273 dNumGroups));
7274 }
7275 else if (parse->groupClause)
7276 {
7277 /*
7278 * We have GROUP BY without aggregation or grouping sets.
7279 * Make a GroupPath.
7280 */
7281 add_path(grouped_rel, (Path *)
7283 grouped_rel,
7284 path,
7285 info->clauses,
7286 havingQual,
7287 dNumGroups));
7288 }
7289 else
7290 {
7291 /* Other cases should have been handled above */
7292 Assert(false);
7293 }
7294 }
7295 }
7296
7297 /*
7298 * Instead of operating directly on the input relation, we can
7299 * consider finalizing a partially aggregated path.
7300 */
7302 {
7303 foreach(lc, partially_grouped_rel->pathlist)
7304 {
7305 ListCell *lc2;
7306 Path *path = (Path *) lfirst(lc);
7307 Path *path_save = path;
7309
7310 /* generate alternative group orderings that might be useful */
7312
7314
7315 /* process all potentially interesting grouping reorderings */
7316 foreach(lc2, pathkey_orderings)
7317 {
7319
7320 /* restore the path (we replace it in the loop) */
7321 path = path_save;
7322
7323 path = make_ordered_path(root,
7324 grouped_rel,
7325 path,
7327 info->pathkeys,
7328 -1.0);
7329
7330 if (path == NULL)
7331 continue;
7332
7333 if (parse->hasAggs)
7334 add_path(grouped_rel, (Path *)
7336 grouped_rel,
7337 path,
7338 grouped_rel->reltarget,
7339 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7341 info->clauses,
7342 havingQual,
7343 agg_final_costs,
7345 else
7346 add_path(grouped_rel, (Path *)
7348 grouped_rel,
7349 path,
7350 info->clauses,
7351 havingQual,
7353
7354 }
7355 }
7356 }
7357 }
7358
7359 if (can_hash)
7360 {
7361 if (parse->groupingSets)
7362 {
7363 /*
7364 * Try for a hash-only groupingsets path over unsorted input.
7365 */
7366 consider_groupingsets_paths(root, grouped_rel,
7367 cheapest_path, false, true,
7369 }
7370 else
7371 {
7372 /*
7373 * Generate a HashAgg Path. We just need an Agg over the
7374 * cheapest-total input path, since input order won't matter.
7375 */
7376 add_path(grouped_rel, (Path *)
7377 create_agg_path(root, grouped_rel,
7379 grouped_rel->reltarget,
7380 AGG_HASHED,
7382 root->processed_groupClause,
7383 havingQual,
7384 agg_costs,
7385 dNumGroups));
7386 }
7387
7388 /*
7389 * Generate a Finalize HashAgg Path atop of the cheapest partially
7390 * grouped path, assuming there is one
7391 */
7393 {
7394 add_path(grouped_rel, (Path *)
7396 grouped_rel,
7398 grouped_rel->reltarget,
7399 AGG_HASHED,
7401 root->processed_groupClause,
7402 havingQual,
7403 agg_final_costs,
7405 }
7406 }
7407
7408 /*
7409 * When partitionwise aggregate is used, we might have fully aggregated
7410 * paths in the partial pathlist, because add_paths_to_append_rel() will
7411 * consider a path for grouped_rel consisting of a Parallel Append of
7412 * non-partial paths from each child.
7413 */
7414 if (grouped_rel->partial_pathlist != NIL)
7415 gather_grouping_paths(root, grouped_rel);
7416}
7417
7418/*
7419 * create_partial_grouping_paths
7420 *
7421 * Create a new upper relation representing the result of partial aggregation
7422 * and populate it with appropriate paths. Note that we don't finalize the
7423 * lists of paths here, so the caller can add additional partial or non-partial
7424 * paths and must afterward call gather_grouping_paths and set_cheapest on
7425 * the returned upper relation.
7426 *
7427 * All paths for this new upper relation -- both partial and non-partial --
7428 * have been partially aggregated but require a subsequent FinalizeAggregate
7429 * step.
7430 *
7431 * NB: This function is allowed to return NULL if it determines that there is
7432 * no real need to create a new RelOptInfo.
7433 */
7434static RelOptInfo *
7436 RelOptInfo *grouped_rel,
7439 GroupPathExtraData *extra,
7440 bool force_rel_creation)
7441{
7442 Query *parse = root->parse;
7445 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7446 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7448 Path *cheapest_total_path = NULL;
7449 double dNumPartialGroups = 0;
7450 double dNumPartialPartialGroups = 0;
7451 ListCell *lc;
7452 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7453 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7454
7455 /*
7456 * Check whether any partially aggregated paths have been generated
7457 * through eager aggregation.
7458 */
7459 if (input_rel->grouped_rel &&
7460 !IS_DUMMY_REL(input_rel->grouped_rel) &&
7461 input_rel->grouped_rel->pathlist != NIL)
7462 eager_agg_rel = input_rel->grouped_rel;
7463
7464 /*
7465 * Consider whether we should generate partially aggregated non-partial
7466 * paths. We can only do this if we have a non-partial path, and only if
7467 * the parent of the input rel is performing partial partitionwise
7468 * aggregation. (Note that extra->patype is the type of partitionwise
7469 * aggregation being used at the parent level, not this level.)
7470 */
7471 if (input_rel->pathlist != NIL &&
7473 cheapest_total_path = input_rel->cheapest_total_path;
7474
7475 /*
7476 * If parallelism is possible for grouped_rel, then we should consider
7477 * generating partially-grouped partial paths. However, if the input rel
7478 * has no partial paths, then we can't.
7479 */
7480 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7481 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7482
7483 /*
7484 * If we can't partially aggregate partial paths, and we can't partially
7485 * aggregate non-partial paths, and no partially aggregated paths were
7486 * generated by eager aggregation, then don't bother creating the new
7487 * RelOptInfo at all, unless the caller specified force_rel_creation.
7488 */
7489 if (cheapest_total_path == NULL &&
7491 eager_agg_rel == NULL &&
7493 return NULL;
7494
7495 /*
7496 * Build a new upper relation to represent the result of partially
7497 * aggregating the rows from the input relation.
7498 */
7501 grouped_rel->relids);
7502 partially_grouped_rel->consider_parallel =
7503 grouped_rel->consider_parallel;
7504 partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7505 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7506 partially_grouped_rel->serverid = grouped_rel->serverid;
7507 partially_grouped_rel->userid = grouped_rel->userid;
7508 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7509 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7510
7511 /*
7512 * Build target list for partial aggregate paths. These paths cannot just
7513 * emit the same tlist as regular aggregate paths, because (1) we must
7514 * include Vars and Aggrefs needed in HAVING, which might not appear in
7515 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7516 */
7519 extra->havingQual);
7520
7521 if (!extra->partial_costs_set)
7522 {
7523 /*
7524 * Collect statistics about aggregates for estimating costs of
7525 * performing aggregation in parallel.
7526 */
7527 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7528 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7529 if (parse->hasAggs)
7530 {
7531 /* partial phase */
7533 agg_partial_costs);
7534
7535 /* final phase */
7537 agg_final_costs);
7538 }
7539
7540 extra->partial_costs_set = true;
7541 }
7542
7543 /* Estimate number of partial groups. */
7544 if (cheapest_total_path != NULL)
7547 cheapest_total_path->rows,
7548 gd,
7549 extra->targetList);
7554 gd,
7555 extra->targetList);
7556
7557 if (can_sort && cheapest_total_path != NULL)
7558 {
7559 /* This should have been checked previously */
7560 Assert(parse->hasAggs || parse->groupClause);
7561
7562 /*
7563 * Use any available suitably-sorted path as input, and also consider
7564 * sorting the cheapest partial path.
7565 */
7566 foreach(lc, input_rel->pathlist)
7567 {
7568 ListCell *lc2;
7569 Path *path = (Path *) lfirst(lc);
7570 Path *path_save = path;
7572
7573 /* generate alternative group orderings that might be useful */
7575
7577
7578 /* process all potentially interesting grouping reorderings */
7579 foreach(lc2, pathkey_orderings)
7580 {
7582
7583 /* restore the path (we replace it in the loop) */
7584 path = path_save;
7585
7586 path = make_ordered_path(root,
7588 path,
7589 cheapest_total_path,
7590 info->pathkeys,
7591 -1.0);
7592
7593 if (path == NULL)
7594 continue;
7595
7596 if (parse->hasAggs)
7600 path,
7601 partially_grouped_rel->reltarget,
7602 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7604 info->clauses,
7605 NIL,
7606 agg_partial_costs,
7608 else
7612 path,
7613 info->clauses,
7614 NIL,
7616 }
7617 }
7618 }
7619
7621 {
7622 /* Similar to above logic, but for partial paths. */
7623 foreach(lc, input_rel->partial_pathlist)
7624 {
7625 ListCell *lc2;
7626 Path *path = (Path *) lfirst(lc);
7627 Path *path_save = path;
7629
7630 /* generate alternative group orderings that might be useful */
7632
7634
7635 /* process all potentially interesting grouping reorderings */
7636 foreach(lc2, pathkey_orderings)
7637 {
7639
7640
7641 /* restore the path (we replace it in the loop) */
7642 path = path_save;
7643
7644 path = make_ordered_path(root,
7646 path,
7648 info->pathkeys,
7649 -1.0);
7650
7651 if (path == NULL)
7652 continue;
7653
7654 if (parse->hasAggs)
7658 path,
7659 partially_grouped_rel->reltarget,
7660 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7662 info->clauses,
7663 NIL,
7664 agg_partial_costs,
7666 else
7670 path,
7671 info->clauses,
7672 NIL,
7674 }
7675 }
7676 }
7677
7678 /*
7679 * Add a partially-grouped HashAgg Path where possible
7680 */
7681 if (can_hash && cheapest_total_path != NULL)
7682 {
7683 /* Checked above */
7684 Assert(parse->hasAggs || parse->groupClause);
7685
7689 cheapest_total_path,
7690 partially_grouped_rel->reltarget,
7691 AGG_HASHED,
7693 root->processed_groupClause,
7694 NIL,
7695 agg_partial_costs,
7697 }
7698
7699 /*
7700 * Now add a partially-grouped HashAgg partial Path where possible
7701 */
7703 {
7708 partially_grouped_rel->reltarget,
7709 AGG_HASHED,
7711 root->processed_groupClause,
7712 NIL,
7713 agg_partial_costs,
7715 }
7716
7717 /*
7718 * Add any partially aggregated paths generated by eager aggregation to
7719 * the new upper relation after applying projection steps as needed.
7720 */
7721 if (eager_agg_rel)
7722 {
7723 /* Add the paths */
7724 foreach(lc, eager_agg_rel->pathlist)
7725 {
7726 Path *path = (Path *) lfirst(lc);
7727
7728 /* Shouldn't have any parameterized paths anymore */
7729 Assert(path->param_info == NULL);
7730
7731 path = (Path *) create_projection_path(root,
7733 path,
7734 partially_grouped_rel->reltarget);
7735
7737 }
7738
7739 /*
7740 * Likewise add the partial paths, but only if parallelism is possible
7741 * for partially_grouped_rel.
7742 */
7743 if (partially_grouped_rel->consider_parallel)
7744 {
7745 foreach(lc, eager_agg_rel->partial_pathlist)
7746 {
7747 Path *path = (Path *) lfirst(lc);
7748
7749 /* Shouldn't have any parameterized paths anymore */
7750 Assert(path->param_info == NULL);
7751
7752 path = (Path *) create_projection_path(root,
7754 path,
7755 partially_grouped_rel->reltarget);
7756
7758 }
7759 }
7760 }
7761
7762 /*
7763 * If there is an FDW that's responsible for all baserels of the query,
7764 * let it consider adding partially grouped ForeignPaths.
7765 */
7766 if (partially_grouped_rel->fdwroutine &&
7767 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7768 {
7769 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7770
7771 fdwroutine->GetForeignUpperPaths(root,
7774 extra);
7775 }
7776
7777 return partially_grouped_rel;
7778}
7779
7780/*
7781 * make_ordered_path
7782 * Return a path ordered by 'pathkeys' based on the given 'path'. May
7783 * return NULL if it doesn't make sense to generate an ordered path in
7784 * this case.
7785 */
7786static Path *
7788 Path *cheapest_path, List *pathkeys, double limit_tuples)
7789{
7790 bool is_sorted;
7791 int presorted_keys;
7792
7794 path->pathkeys,
7795 &presorted_keys);
7796
7797 if (!is_sorted)
7798 {
7799 /*
7800 * Try at least sorting the cheapest path and also try incrementally
7801 * sorting any path which is partially sorted already (no need to deal
7802 * with paths which have presorted keys when incremental sort is
7803 * disabled unless it's the cheapest input path).
7804 */
7805 if (path != cheapest_path &&
7806 (presorted_keys == 0 || !enable_incremental_sort))
7807 return NULL;
7808
7809 /*
7810 * We've no need to consider both a sort and incremental sort. We'll
7811 * just do a sort if there are no presorted keys and an incremental
7812 * sort when there are presorted keys.
7813 */
7814 if (presorted_keys == 0 || !enable_incremental_sort)
7815 path = (Path *) create_sort_path(root,
7816 rel,
7817 path,
7818 pathkeys,
7819 limit_tuples);
7820 else
7822 rel,
7823 path,
7824 pathkeys,
7825 presorted_keys,
7826 limit_tuples);
7827 }
7828
7829 return path;
7830}
7831
7832/*
7833 * Generate Gather and Gather Merge paths for a grouping relation or partial
7834 * grouping relation.
7835 *
7836 * generate_useful_gather_paths does most of the work, but we also consider a
7837 * special case: we could try sorting the data by the group_pathkeys and then
7838 * applying Gather Merge.
7839 *
7840 * NB: This function shouldn't be used for anything other than a grouped or
7841 * partially grouped relation not only because of the fact that it explicitly
7842 * references group_pathkeys but we pass "true" as the third argument to
7843 * generate_useful_gather_paths().
7844 */
7845static void
7847{
7848 ListCell *lc;
7851
7852 /*
7853 * This occurs after any partial aggregation has taken place, so trim off
7854 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7855 */
7856 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7857 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7858 root->num_groupby_pathkeys);
7859 else
7860 groupby_pathkeys = root->group_pathkeys;
7861
7862 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7864
7866
7867 /* XXX Shouldn't this also consider the group-key-reordering? */
7868 foreach(lc, rel->partial_pathlist)
7869 {
7870 Path *path = (Path *) lfirst(lc);
7871 bool is_sorted;
7872 int presorted_keys;
7873 double total_groups;
7874
7876 path->pathkeys,
7877 &presorted_keys);
7878
7879 if (is_sorted)
7880 continue;
7881
7882 /*
7883 * Try at least sorting the cheapest path and also try incrementally
7884 * sorting any path which is partially sorted already (no need to deal
7885 * with paths which have presorted keys when incremental sort is
7886 * disabled unless it's the cheapest input path).
7887 */
7888 if (path != cheapest_partial_path &&
7889 (presorted_keys == 0 || !enable_incremental_sort))
7890 continue;
7891
7892 /*
7893 * We've no need to consider both a sort and incremental sort. We'll
7894 * just do a sort if there are no presorted keys and an incremental
7895 * sort when there are presorted keys.
7896 */
7897 if (presorted_keys == 0 || !enable_incremental_sort)
7898 path = (Path *) create_sort_path(root, rel, path,
7900 -1.0);
7901 else
7903 rel,
7904 path,
7906 presorted_keys,
7907 -1.0);
7909 path = (Path *)
7911 rel,
7912 path,
7913 rel->reltarget,
7915 NULL,
7916 &total_groups);
7917
7918 add_path(rel, path);
7919 }
7920}
7921
7922/*
7923 * can_partial_agg
7924 *
7925 * Determines whether or not partial grouping and/or aggregation is possible.
7926 * Returns true when possible, false otherwise.
7927 */
7928static bool
7930{
7931 Query *parse = root->parse;
7932
7933 if (!parse->hasAggs && parse->groupClause == NIL)
7934 {
7935 /*
7936 * We don't know how to do parallel aggregation unless we have either
7937 * some aggregates or a grouping clause.
7938 */
7939 return false;
7940 }
7941 else if (parse->groupingSets)
7942 {
7943 /* We don't know how to do grouping sets in parallel. */
7944 return false;
7945 }
7946 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7947 {
7948 /* Insufficient support for partial mode. */
7949 return false;
7950 }
7951
7952 /* Everything looks good. */
7953 return true;
7954}
7955
7956/*
7957 * apply_scanjoin_target_to_paths
7958 *
7959 * Adjust the final scan/join relation, and recursively all of its children,
7960 * to generate the final scan/join target. It would be more correct to model
7961 * this as a separate planning step with a new RelOptInfo at the toplevel and
7962 * for each child relation, but doing it this way is noticeably cheaper.
7963 * Maybe that problem can be solved at some point, but for now we do this.
7964 *
7965 * If tlist_same_exprs is true, then the scan/join target to be applied has
7966 * the same expressions as the existing reltarget, so we need only insert the
7967 * appropriate sortgroupref information. By avoiding the creation of
7968 * projection paths we save effort both immediately and at plan creation time.
7969 */
7970static void
7972 RelOptInfo *rel,
7976 bool tlist_same_exprs)
7977{
7980 ListCell *lc;
7981
7982 /* This recurses, so be paranoid. */
7984
7985 /*
7986 * If the rel only has Append and MergeAppend paths, we want to drop its
7987 * existing paths and generate new ones. This function would still be
7988 * correct if we kept the existing paths: we'd modify them to generate the
7989 * correct target above the partitioning Append, and then they'd compete
7990 * on cost with paths generating the target below the Append. However, in
7991 * our current cost model the latter way is always the same or cheaper
7992 * cost, so modifying the existing paths would just be useless work.
7993 * Moreover, when the cost is the same, varying roundoff errors might
7994 * sometimes allow an existing path to be picked, resulting in undesirable
7995 * cross-platform plan variations. So we drop old paths and thereby force
7996 * the work to be done below the Append.
7997 *
7998 * However, there are several cases when this optimization is not safe. If
7999 * the rel isn't partitioned, then none of the paths will be Append or
8000 * MergeAppend paths, so we should definitely not do this. If it is
8001 * partitioned but is a joinrel, it may have Append and MergeAppend paths,
8002 * but it can also have join paths that we can't afford to discard.
8003 *
8004 * Some care is needed, because we have to allow
8005 * generate_useful_gather_paths to see the old partial paths in the next
8006 * stanza. Hence, zap the main pathlist here, then allow
8007 * generate_useful_gather_paths to add path(s) to the main list, and
8008 * finally zap the partial pathlist.
8009 */
8011 rel->pathlist = NIL;
8012
8013 /*
8014 * If the scan/join target is not parallel-safe, partial paths cannot
8015 * generate it.
8016 */
8018 {
8019 /*
8020 * Since we can't generate the final scan/join target in parallel
8021 * workers, this is our last opportunity to use any partial paths that
8022 * exist; so build Gather path(s) that use them and emit whatever the
8023 * current reltarget is. We don't do this in the case where the
8024 * target is parallel-safe, since we will be able to generate superior
8025 * paths by doing it after the final scan/join target has been
8026 * applied.
8027 */
8029
8030 /* Can't use parallel query above this level. */
8031 rel->partial_pathlist = NIL;
8032 rel->consider_parallel = false;
8033 }
8034
8035 /* Finish dropping old paths for a partitioned rel, per comment above */
8037 rel->partial_pathlist = NIL;
8038
8039 /* Extract SRF-free scan/join target. */
8041
8042 /*
8043 * Apply the SRF-free scan/join target to each existing path.
8044 *
8045 * If the tlist exprs are the same, we can just inject the sortgroupref
8046 * information into the existing pathtargets. Otherwise, replace each
8047 * path with a projection path that generates the SRF-free scan/join
8048 * target. This can't change the ordering of paths within rel->pathlist,
8049 * so we just modify the list in place.
8050 */
8051 foreach(lc, rel->pathlist)
8052 {
8053 Path *subpath = (Path *) lfirst(lc);
8054
8055 /* Shouldn't have any parameterized paths anymore */
8056 Assert(subpath->param_info == NULL);
8057
8058 if (tlist_same_exprs)
8059 subpath->pathtarget->sortgrouprefs =
8060 scanjoin_target->sortgrouprefs;
8061 else
8062 {
8063 Path *newpath;
8064
8067 lfirst(lc) = newpath;
8068 }
8069 }
8070
8071 /* Likewise adjust the targets for any partial paths. */
8072 foreach(lc, rel->partial_pathlist)
8073 {
8074 Path *subpath = (Path *) lfirst(lc);
8075
8076 /* Shouldn't have any parameterized paths anymore */
8077 Assert(subpath->param_info == NULL);
8078
8079 if (tlist_same_exprs)
8080 subpath->pathtarget->sortgrouprefs =
8081 scanjoin_target->sortgrouprefs;
8082 else
8083 {
8084 Path *newpath;
8085
8088 lfirst(lc) = newpath;
8089 }
8090 }
8091
8092 /*
8093 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8094 * atop each existing path. (Note that this function doesn't look at the
8095 * cheapest-path fields, which is a good thing because they're bogus right
8096 * now.)
8097 */
8098 if (root->parse->hasTargetSRFs)
8102
8103 /*
8104 * Update the rel's target to be the final (with SRFs) scan/join target.
8105 * This now matches the actual output of all the paths, and we might get
8106 * confused in createplan.c if they don't agree. We must do this now so
8107 * that any append paths made in the next part will use the correct
8108 * pathtarget (cf. create_append_path).
8109 *
8110 * Note that this is also necessary if GetForeignUpperPaths() gets called
8111 * on the final scan/join relation or on any of its children, since the
8112 * FDW might look at the rel's target to create ForeignPaths.
8113 */
8115
8116 /*
8117 * If the relation is partitioned, recursively apply the scan/join target
8118 * to all partitions, and generate brand-new Append paths in which the
8119 * scan/join target is computed below the Append rather than above it.
8120 * Since Append is not projection-capable, that might save a separate
8121 * Result node, and it also is important for partitionwise aggregate.
8122 */
8124 {
8126 int i;
8127
8128 /* Adjust each partition. */
8129 i = -1;
8130 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8131 {
8132 RelOptInfo *child_rel = rel->part_rels[i];
8133 AppendRelInfo **appinfos;
8134 int nappinfos;
8136
8137 Assert(child_rel != NULL);
8138
8139 /* Dummy children can be ignored. */
8141 continue;
8142
8143 /* Translate scan/join targets for this child. */
8144 appinfos = find_appinfos_by_relids(root, child_rel->relids,
8145 &nappinfos);
8146 foreach(lc, scanjoin_targets)
8147 {
8148 PathTarget *target = lfirst_node(PathTarget, lc);
8149
8150 target = copy_pathtarget(target);
8151 target->exprs = (List *)
8153 (Node *) target->exprs,
8154 nappinfos, appinfos);
8156 target);
8157 }
8158 pfree(appinfos);
8159
8160 /* Recursion does the real work. */
8166
8167 /* Save non-dummy children for Append paths. */
8168 if (!IS_DUMMY_REL(child_rel))
8170 }
8171
8172 /* Build new paths for this relation by appending child paths. */
8174 }
8175
8176 /*
8177 * Consider generating Gather or Gather Merge paths. We must only do this
8178 * if the relation is parallel safe, and we don't do it for child rels to
8179 * avoid creating multiple Gather nodes within the same plan. We must do
8180 * this after all paths have been generated and before set_cheapest, since
8181 * one of the generated paths may turn out to be the cheapest one.
8182 */
8183 if (rel->consider_parallel && !IS_OTHER_REL(rel))
8185
8186 /*
8187 * Reassess which paths are the cheapest, now that we've potentially added
8188 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8189 * this relation.
8190 */
8191 set_cheapest(rel);
8192}
8193
8194/*
8195 * create_partitionwise_grouping_paths
8196 *
8197 * If the partition keys of input relation are part of the GROUP BY clause, all
8198 * the rows belonging to a given group come from a single partition. This
8199 * allows aggregation/grouping over a partitioned relation to be broken down
8200 * into aggregation/grouping on each partition. This should be no worse, and
8201 * often better, than the normal approach.
8202 *
8203 * However, if the GROUP BY clause does not contain all the partition keys,
8204 * rows from a given group may be spread across multiple partitions. In that
8205 * case, we perform partial aggregation for each group, append the results,
8206 * and then finalize aggregation. This is less certain to win than the
8207 * previous case. It may win if the PartialAggregate stage greatly reduces
8208 * the number of groups, because fewer rows will pass through the Append node.
8209 * It may lose if we have lots of small groups.
8210 */
8211static void
8214 RelOptInfo *grouped_rel,
8219 GroupPathExtraData *extra)
8220{
8223 PathTarget *target = grouped_rel->reltarget;
8224 bool partial_grouping_valid = true;
8225 int i;
8226
8230
8231 /* Add paths for partitionwise aggregation/grouping. */
8232 i = -1;
8233 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8234 {
8235 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8237 AppendRelInfo **appinfos;
8238 int nappinfos;
8242
8244
8245 /* Dummy children can be ignored. */
8247 continue;
8248
8249 child_target = copy_pathtarget(target);
8250
8251 /*
8252 * Copy the given "extra" structure as is and then override the
8253 * members specific to this child.
8254 */
8255 memcpy(&child_extra, extra, sizeof(child_extra));
8256
8257 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8258 &nappinfos);
8259
8260 child_target->exprs = (List *)
8262 (Node *) target->exprs,
8263 nappinfos, appinfos);
8264
8265 /* Translate havingQual and targetList. */
8266 child_extra.havingQual = (Node *)
8268 extra->havingQual,
8269 nappinfos, appinfos);
8270 child_extra.targetList = (List *)
8272 (Node *) extra->targetList,
8273 nappinfos, appinfos);
8274
8275 /*
8276 * extra->patype was the value computed for our parent rel; patype is
8277 * the value for this relation. For the child, our value is its
8278 * parent rel's value.
8279 */
8280 child_extra.patype = patype;
8281
8282 /*
8283 * Create grouping relation to hold fully aggregated grouping and/or
8284 * aggregation paths for the child.
8285 */
8288 extra->target_parallel_safe,
8289 child_extra.havingQual);
8290
8291 /* Create grouping paths for this child relation. */
8296
8298 {
8302 }
8303 else
8304 partial_grouping_valid = false;
8305
8306 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8307 {
8311 }
8312
8313 pfree(appinfos);
8314 }
8315
8316 /*
8317 * Try to create append paths for partially grouped children. For full
8318 * partitionwise aggregation, we might have paths in the partial_pathlist
8319 * if parallel aggregation is possible. For partial partitionwise
8320 * aggregation, we may have paths in both pathlist and partial_pathlist.
8321 *
8322 * NB: We must have a partially grouped path for every child in order to
8323 * generate a partially grouped path for this relation.
8324 */
8326 {
8328
8331 }
8332
8333 /* If possible, create append paths for fully grouped children. */
8334 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8335 {
8337
8339 }
8340}
8341
8342/*
8343 * group_by_has_partkey
8344 *
8345 * Returns true if all the partition keys of the given relation are part of
8346 * the GROUP BY clauses, including having matching collation, false otherwise.
8347 */
8348static bool
8350 List *targetList,
8351 List *groupClause)
8352{
8353 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8354 int cnt = 0;
8355 int partnatts;
8356
8357 /* Input relation should be partitioned. */
8358 Assert(input_rel->part_scheme);
8359
8360 /* Rule out early, if there are no partition keys present. */
8361 if (!input_rel->partexprs)
8362 return false;
8363
8364 partnatts = input_rel->part_scheme->partnatts;
8365
8366 for (cnt = 0; cnt < partnatts; cnt++)
8367 {
8368 List *partexprs = input_rel->partexprs[cnt];
8369 ListCell *lc;
8370 bool found = false;
8371
8372 foreach(lc, partexprs)
8373 {
8374 ListCell *lg;
8375 Expr *partexpr = lfirst(lc);
8376 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8377
8378 foreach(lg, groupexprs)
8379 {
8380 Expr *groupexpr = lfirst(lg);
8382
8383 /*
8384 * Note: we can assume there is at most one RelabelType node;
8385 * eval_const_expressions() will have simplified if more than
8386 * one.
8387 */
8389 groupexpr = ((RelabelType *) groupexpr)->arg;
8390
8391 if (equal(groupexpr, partexpr))
8392 {
8393 /*
8394 * Reject a match if the grouping collation does not match
8395 * the partitioning collation.
8396 */
8399 return false;
8400
8401 found = true;
8402 break;
8403 }
8404 }
8405
8406 if (found)
8407 break;
8408 }
8409
8410 /*
8411 * If none of the partition key expressions match with any of the
8412 * GROUP BY expression, return false.
8413 */
8414 if (!found)
8415 return false;
8416 }
8417
8418 return true;
8419}
8420
8421/*
8422 * generate_setop_child_grouplist
8423 * Build a SortGroupClause list defining the sort/grouping properties
8424 * of the child of a set operation.
8425 *
8426 * This is similar to generate_setop_grouplist() but differs as the setop
8427 * child query's targetlist entries may already have a tleSortGroupRef
8428 * assigned for other purposes, such as GROUP BYs. Here we keep the
8429 * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8430 * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8431 * any of the columns in the targetlist don't match to the setop's colTypes
8432 * then we return an empty list. This may leave some TLEs with unreferenced
8433 * ressortgroupref markings, but that's harmless.
8434 */
8435static List *
8437{
8438 List *grouplist = copyObject(op->groupClauses);
8439 ListCell *lg;
8440 ListCell *lt;
8441 ListCell *ct;
8442
8444 ct = list_head(op->colTypes);
8445 foreach(lt, targetlist)
8446 {
8447 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8449 Oid coltype;
8450
8451 /* resjunk columns could have sortgrouprefs. Leave these alone */
8452 if (tle->resjunk)
8453 continue;
8454
8455 /*
8456 * We expect every non-resjunk target to have a SortGroupClause and
8457 * colTypes.
8458 */
8459 Assert(lg != NULL);
8460 Assert(ct != NULL);
8462 coltype = lfirst_oid(ct);
8463
8464 /* reject if target type isn't the same as the setop target type */
8465 if (coltype != exprType((Node *) tle->expr))
8466 return NIL;
8467
8468 lg = lnext(grouplist, lg);
8469 ct = lnext(op->colTypes, ct);
8470
8471 /* assign a tleSortGroupRef, or reuse the existing one */
8472 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8473 }
8474
8475 Assert(lg == NULL);
8476 Assert(ct == NULL);
8477
8478 return grouplist;
8479}
8480
8481/*
8482 * create_unique_paths
8483 * Build a new RelOptInfo containing Paths that represent elimination of
8484 * distinct rows from the input data. Distinct-ness is defined according to
8485 * the needs of the semijoin represented by sjinfo. If it is not possible
8486 * to identify how to make the data unique, NULL is returned.
8487 *
8488 * If used at all, this is likely to be called repeatedly on the same rel,
8489 * so we cache the result.
8490 */
8491RelOptInfo *
8493{
8494 RelOptInfo *unique_rel;
8496 List *groupClause = NIL;
8497 MemoryContext oldcontext;
8498
8499 /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8500 Assert(sjinfo->jointype == JOIN_SEMI);
8501 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8502
8503 /* If result already cached, return it */
8504 if (rel->unique_rel)
8505 return rel->unique_rel;
8506
8507 /* If it's not possible to unique-ify, return NULL */
8508 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8509 return NULL;
8510
8511 /*
8512 * Punt if this is a child relation and we failed to build a unique-ified
8513 * relation for its parent. This can happen if all the RHS columns were
8514 * found to be equated to constants when unique-ifying the parent table,
8515 * leaving no columns to unique-ify.
8516 */
8517 if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8518 return NULL;
8519
8520 /*
8521 * When called during GEQO join planning, we are in a short-lived memory
8522 * context. We must make sure that the unique rel and any subsidiary data
8523 * structures created for a baserel survive the GEQO cycle, else the
8524 * baserel is trashed for future GEQO cycles. On the other hand, when we
8525 * are creating those for a joinrel during GEQO, we don't want them to
8526 * clutter the main planning context. Upshot is that the best solution is
8527 * to explicitly allocate memory in the same context the given RelOptInfo
8528 * is in.
8529 */
8531
8532 unique_rel = makeNode(RelOptInfo);
8533 memcpy(unique_rel, rel, sizeof(RelOptInfo));
8534
8535 /*
8536 * clear path info
8537 */
8538 unique_rel->pathlist = NIL;
8539 unique_rel->ppilist = NIL;
8540 unique_rel->partial_pathlist = NIL;
8541 unique_rel->cheapest_startup_path = NULL;
8542 unique_rel->cheapest_total_path = NULL;
8543 unique_rel->cheapest_parameterized_paths = NIL;
8544
8545 /*
8546 * Build the target list for the unique rel. We also build the pathkeys
8547 * that represent the ordering requirements for the sort-based
8548 * implementation, and the list of SortGroupClause nodes that represent
8549 * the columns to be grouped on for the hash-based implementation.
8550 *
8551 * For a child rel, we can construct these fields from those of its
8552 * parent.
8553 */
8554 if (IS_OTHER_REL(rel))
8555 {
8558
8559 parent_unique_target = rel->top_parent->unique_rel->reltarget;
8560
8562
8563 /* Translate the target expressions */
8564 child_unique_target->exprs = (List *)
8566 (Node *) parent_unique_target->exprs,
8567 rel,
8568 rel->top_parent);
8569
8570 unique_rel->reltarget = child_unique_target;
8571
8572 sortPathkeys = rel->top_parent->unique_pathkeys;
8573 groupClause = rel->top_parent->unique_groupclause;
8574 }
8575 else
8576 {
8577 List *newtlist;
8578 int nextresno;
8579 List *sortList = NIL;
8580 ListCell *lc1;
8581 ListCell *lc2;
8582
8583 /*
8584 * The values we are supposed to unique-ify may be expressions in the
8585 * variables of the input rel's targetlist. We have to add any such
8586 * expressions to the unique rel's targetlist.
8587 *
8588 * To complicate matters, some of the values to be unique-ified may be
8589 * known redundant by the EquivalenceClass machinery (e.g., because
8590 * they have been equated to constants). There is no need to compare
8591 * such values during unique-ification, and indeed we had better not
8592 * try because the Vars involved may not have propagated as high as
8593 * the semijoin's level. We use make_pathkeys_for_sortclauses to
8594 * detect such cases, which is a tad inefficient but it doesn't seem
8595 * worth building specialized infrastructure for this.
8596 */
8599
8600 forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8601 {
8602 Expr *uniqexpr = lfirst(lc1);
8604 Oid sortop;
8606 bool made_tle = false;
8607
8609 if (!tle)
8610 {
8612 nextresno,
8613 NULL,
8614 false);
8616 nextresno++;
8617 made_tle = true;
8618 }
8619
8620 /*
8621 * Try to build an ORDER BY list to sort the input compatibly. We
8622 * do this for each sortable clause even when the clauses are not
8623 * all sortable, so that we can detect clauses that are redundant
8624 * according to the pathkey machinery.
8625 */
8627 if (OidIsValid(sortop))
8628 {
8629 Oid eqop;
8631
8632 /*
8633 * The Unique node will need equality operators. Normally
8634 * these are the same as the IN clause operators, but if those
8635 * are cross-type operators then the equality operators are
8636 * the ones for the IN clause operators' RHS datatype.
8637 */
8638 eqop = get_equality_op_for_ordering_op(sortop, NULL);
8639 if (!OidIsValid(eqop)) /* shouldn't happen */
8640 elog(ERROR, "could not find equality operator for ordering operator %u",
8641 sortop);
8642
8644 sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8645 sortcl->eqop = eqop;
8646 sortcl->sortop = sortop;
8647 sortcl->reverse_sort = false;
8648 sortcl->nulls_first = false;
8649 sortcl->hashable = false; /* no need to make this accurate */
8651
8652 /*
8653 * At each step, convert the SortGroupClause list to pathkey
8654 * form. If the just-added SortGroupClause is redundant, the
8655 * result will be shorter than the SortGroupClause list.
8656 */
8658 newtlist);
8660 {
8661 /* Drop the redundant SortGroupClause */
8664 /* Undo tlist addition, if we made one */
8665 if (made_tle)
8666 {
8668 nextresno--;
8669 }
8670 /* We need not consider this clause for hashing, either */
8671 continue;
8672 }
8673 }
8674 else if (sjinfo->semi_can_btree) /* shouldn't happen */
8675 elog(ERROR, "could not find ordering operator for equality operator %u",
8676 in_oper);
8677
8678 if (sjinfo->semi_can_hash)
8679 {
8680 /* Create a GROUP BY list for the Agg node to use */
8681 Oid eq_oper;
8683
8684 /*
8685 * Get the hashable equality operators for the Agg node to
8686 * use. Normally these are the same as the IN clause
8687 * operators, but if those are cross-type operators then the
8688 * equality operators are the ones for the IN clause
8689 * operators' RHS datatype.
8690 */
8692 elog(ERROR, "could not find compatible hash operator for operator %u",
8693 in_oper);
8694
8696 groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8697 groupcl->eqop = eq_oper;
8698 groupcl->sortop = sortop;
8699 groupcl->reverse_sort = false;
8700 groupcl->nulls_first = false;
8701 groupcl->hashable = true;
8702 groupClause = lappend(groupClause, groupcl);
8703 }
8704 }
8705
8706 /*
8707 * Done building the sortPathkeys and groupClause. But the
8708 * sortPathkeys are bogus if not all the clauses were sortable.
8709 */
8710 if (!sjinfo->semi_can_btree)
8711 sortPathkeys = NIL;
8712
8713 /*
8714 * It can happen that all the RHS columns are equated to constants.
8715 * We'd have to do something special to unique-ify in that case, and
8716 * it's such an unlikely-in-the-real-world case that it's not worth
8717 * the effort. So just punt if we found no columns to unique-ify.
8718 */
8719 if (sortPathkeys == NIL && groupClause == NIL)
8720 {
8721 MemoryContextSwitchTo(oldcontext);
8722 return NULL;
8723 }
8724
8725 /* Convert the required targetlist back to PathTarget form */
8726 unique_rel->reltarget = create_pathtarget(root, newtlist);
8727 }
8728
8729 /* build unique paths based on input rel's pathlist */
8730 create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8731 sjinfo, unique_rel);
8732
8733 /* build unique paths based on input rel's partial_pathlist */
8735 sjinfo, unique_rel);
8736
8737 /* Now choose the best path(s) */
8738 set_cheapest(unique_rel);
8739
8740 /*
8741 * There shouldn't be any partial paths for the unique relation;
8742 * otherwise, we won't be able to properly guarantee uniqueness.
8743 */
8744 Assert(unique_rel->partial_pathlist == NIL);
8745
8746 /* Cache the result */
8747 rel->unique_rel = unique_rel;
8749 rel->unique_groupclause = groupClause;
8750
8751 MemoryContextSwitchTo(oldcontext);
8752
8753 return unique_rel;
8754}
8755
8756/*
8757 * create_final_unique_paths
8758 * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8759 */
8760static void
8762 List *sortPathkeys, List *groupClause,
8763 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8764{
8765 Path *cheapest_input_path = input_rel->cheapest_total_path;
8766
8767 /* Estimate number of output rows */
8768 unique_rel->rows = estimate_num_groups(root,
8769 sjinfo->semi_rhs_exprs,
8770 cheapest_input_path->rows,
8771 NULL,
8772 NULL);
8773
8774 /* Consider sort-based implementations, if possible. */
8775 if (sjinfo->semi_can_btree)
8776 {
8777 ListCell *lc;
8778
8779 /*
8780 * Use any available suitably-sorted path as input, and also consider
8781 * sorting the cheapest-total path and incremental sort on any paths
8782 * with presorted keys.
8783 *
8784 * To save planning time, we ignore parameterized input paths unless
8785 * they are the cheapest-total path.
8786 */
8787 foreach(lc, input_rel->pathlist)
8788 {
8789 Path *input_path = (Path *) lfirst(lc);
8790 Path *path;
8791 bool is_sorted;
8792 int presorted_keys;
8793
8794 /*
8795 * Ignore parameterized paths that are not the cheapest-total
8796 * path.
8797 */
8798 if (input_path->param_info &&
8800 continue;
8801
8803 input_path->pathkeys,
8804 &presorted_keys);
8805
8806 /*
8807 * Ignore paths that are not suitably or partially sorted, unless
8808 * they are the cheapest total path (no need to deal with paths
8809 * which have presorted keys when incremental sort is disabled).
8810 */
8812 (presorted_keys == 0 || !enable_incremental_sort))
8813 continue;
8814
8815 /*
8816 * Make a separate ProjectionPath in case we need a Result node.
8817 */
8818 path = (Path *) create_projection_path(root,
8819 unique_rel,
8820 input_path,
8821 unique_rel->reltarget);
8822
8823 if (!is_sorted)
8824 {
8825 /*
8826 * We've no need to consider both a sort and incremental sort.
8827 * We'll just do a sort if there are no presorted keys and an
8828 * incremental sort when there are presorted keys.
8829 */
8830 if (presorted_keys == 0 || !enable_incremental_sort)
8831 path = (Path *) create_sort_path(root,
8832 unique_rel,
8833 path,
8835 -1.0);
8836 else
8838 unique_rel,
8839 path,
8841 presorted_keys,
8842 -1.0);
8843 }
8844
8845 path = (Path *) create_unique_path(root, unique_rel, path,
8847 unique_rel->rows);
8848
8849 add_path(unique_rel, path);
8850 }
8851 }
8852
8853 /* Consider hash-based implementation, if possible. */
8854 if (sjinfo->semi_can_hash)
8855 {
8856 Path *path;
8857
8858 /*
8859 * Make a separate ProjectionPath in case we need a Result node.
8860 */
8861 path = (Path *) create_projection_path(root,
8862 unique_rel,
8864 unique_rel->reltarget);
8865
8866 path = (Path *) create_agg_path(root,
8867 unique_rel,
8868 path,
8869 cheapest_input_path->pathtarget,
8870 AGG_HASHED,
8872 groupClause,
8873 NIL,
8874 NULL,
8875 unique_rel->rows);
8876
8877 add_path(unique_rel, path);
8878 }
8879}
8880
8881/*
8882 * create_partial_unique_paths
8883 * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8884 */
8885static void
8887 List *sortPathkeys, List *groupClause,
8888 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8889{
8892
8893 /* nothing to do when there are no partial paths in the input rel */
8894 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8895 return;
8896
8897 /*
8898 * nothing to do if there's anything in the targetlist that's
8899 * parallel-restricted.
8900 */
8901 if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8902 return;
8903
8904 cheapest_partial_path = linitial(input_rel->partial_pathlist);
8905
8908
8909 /*
8910 * clear path info
8911 */
8912 partial_unique_rel->pathlist = NIL;
8913 partial_unique_rel->ppilist = NIL;
8914 partial_unique_rel->partial_pathlist = NIL;
8915 partial_unique_rel->cheapest_startup_path = NULL;
8916 partial_unique_rel->cheapest_total_path = NULL;
8917 partial_unique_rel->cheapest_parameterized_paths = NIL;
8918
8919 /* Estimate number of output rows */
8921 sjinfo->semi_rhs_exprs,
8923 NULL,
8924 NULL);
8925 partial_unique_rel->reltarget = unique_rel->reltarget;
8926
8927 /* Consider sort-based implementations, if possible. */
8928 if (sjinfo->semi_can_btree)
8929 {
8930 ListCell *lc;
8931
8932 /*
8933 * Use any available suitably-sorted path as input, and also consider
8934 * sorting the cheapest partial path and incremental sort on any paths
8935 * with presorted keys.
8936 */
8937 foreach(lc, input_rel->partial_pathlist)
8938 {
8939 Path *input_path = (Path *) lfirst(lc);
8940 Path *path;
8941 bool is_sorted;
8942 int presorted_keys;
8943
8945 input_path->pathkeys,
8946 &presorted_keys);
8947
8948 /*
8949 * Ignore paths that are not suitably or partially sorted, unless
8950 * they are the cheapest partial path (no need to deal with paths
8951 * which have presorted keys when incremental sort is disabled).
8952 */
8954 (presorted_keys == 0 || !enable_incremental_sort))
8955 continue;
8956
8957 /*
8958 * Make a separate ProjectionPath in case we need a Result node.
8959 */
8960 path = (Path *) create_projection_path(root,
8962 input_path,
8963 partial_unique_rel->reltarget);
8964
8965 if (!is_sorted)
8966 {
8967 /*
8968 * We've no need to consider both a sort and incremental sort.
8969 * We'll just do a sort if there are no presorted keys and an
8970 * incremental sort when there are presorted keys.
8971 */
8972 if (presorted_keys == 0 || !enable_incremental_sort)
8973 path = (Path *) create_sort_path(root,
8975 path,
8977 -1.0);
8978 else
8981 path,
8983 presorted_keys,
8984 -1.0);
8985 }
8986
8990
8992 }
8993 }
8994
8995 /* Consider hash-based implementation, if possible. */
8996 if (sjinfo->semi_can_hash)
8997 {
8998 Path *path;
8999
9000 /*
9001 * Make a separate ProjectionPath in case we need a Result node.
9002 */
9003 path = (Path *) create_projection_path(root,
9006 partial_unique_rel->reltarget);
9007
9008 path = (Path *) create_agg_path(root,
9010 path,
9011 cheapest_partial_path->pathtarget,
9012 AGG_HASHED,
9014 groupClause,
9015 NIL,
9016 NULL,
9017 partial_unique_rel->rows);
9018
9020 }
9021
9022 if (partial_unique_rel->partial_pathlist != NIL)
9023 {
9026
9027 /*
9028 * Finally, create paths to unique-ify the final result. This step is
9029 * needed to remove any duplicates due to combining rows from parallel
9030 * workers.
9031 */
9033 sortPathkeys, groupClause,
9034 sjinfo, unique_rel);
9035 }
9036}
9037
9038/*
9039 * Choose a unique name for some subroot.
9040 *
9041 * Modifies glob->subplanNames to track names already used.
9042 */
9043char *
9045{
9046 unsigned n;
9047
9048 /*
9049 * If a numeric suffix is not required, then search the list of
9050 * previously-assigned names for a match. If none is found, then we can
9051 * use the provided name without modification.
9052 */
9053 if (!always_number)
9054 {
9055 bool found = false;
9056
9057 foreach_ptr(char, subplan_name, glob->subplanNames)
9058 {
9059 if (strcmp(subplan_name, name) == 0)
9060 {
9061 found = true;
9062 break;
9063 }
9064 }
9065
9066 if (!found)
9067 {
9068 /* pstrdup here is just to avoid cast-away-const */
9069 char *chosen_name = pstrdup(name);
9070
9071 glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9072 return chosen_name;
9073 }
9074 }
9075
9076 /*
9077 * If a numeric suffix is required or if the un-suffixed name is already
9078 * in use, then loop until we find a positive integer that produces a
9079 * novel name.
9080 */
9081 for (n = 1; true; ++n)
9082 {
9083 char *proposed_name = psprintf("%s_%u", name, n);
9084 bool found = false;
9085
9086 foreach_ptr(char, subplan_name, glob->subplanNames)
9087 {
9089 {
9090 found = true;
9091 break;
9092 }
9093 }
9094
9095 if (!found)
9096 {
9097 glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9098 return proposed_name;
9099 }
9100
9102 }
9103}
@ ACLCHECK_NO_PRIV
Definition acl.h:185
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition aclchk.c:2672
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition allpaths.c:4794
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition allpaths.c:3388
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition allpaths.c:1416
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition appendinfo.c:809
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition appendinfo.c:201
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition appendinfo.c:738
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition appendinfo.c:597
void pprint(const void *obj)
Definition print.c:54
void pgstat_report_plan_id(int64 plan_id, bool force)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:346
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1290
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:1145
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition bitmapset.c:852
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:412
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition bitmapset.c:744
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
BMS_Membership bms_membership(const Bitmapset *a)
Definition bitmapset.c:765
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition bitmapset.c:601
#define bms_is_empty(a)
Definition bitmapset.h:118
@ BMS_MULTIPLE
Definition bitmapset.h:73
uint32 BlockNumber
Definition block.h:31
#define Min(x, y)
Definition c.h:1091
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
unsigned int Index
Definition c.h:698
#define pg_fallthrough
Definition c.h:161
#define MemSet(start, val, len)
Definition c.h:1107
#define OidIsValid(objectId)
Definition c.h:858
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
bool contain_agg_clause(Node *clause)
Definition clauses.c:192
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition clauses.c:2639
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition clauses.c:242
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition clauses.c:2498
void convert_saop_to_hashed_saop(Node *node)
Definition clauses.c:2531
char max_parallel_hazard(Query *parse)
Definition clauses.c:745
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition clauses.c:764
bool contain_subplans(Node *clause)
Definition clauses.c:341
bool contain_volatile_functions(Node *clause)
Definition clauses.c:549
double cpu_operator_cost
Definition costsize.c:135
bool enable_partitionwise_aggregate
Definition costsize.c:161
bool enable_seqscan
Definition costsize.c:146
int max_parallel_workers_per_gather
Definition costsize.c:144
bool enable_memoize
Definition costsize.c:156
double parallel_setup_cost
Definition costsize.c:137
bool enable_gathermerge
Definition costsize.c:159
double parallel_tuple_cost
Definition costsize.c:136
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition costsize.c:2201
bool enable_indexonlyscan
Definition costsize.c:148
bool enable_tidscan
Definition costsize.c:150
bool enable_material
Definition costsize.c:155
bool enable_hashjoin
Definition costsize.c:158
bool enable_mergejoin
Definition costsize.c:157
double compute_gather_rows(Path *path)
Definition costsize.c:6769
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition costsize.c:4926
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition costsize.c:6511
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition costsize.c:4900
bool enable_presorted_aggregate
Definition costsize.c:165
bool enable_partitionwise_join
Definition costsize.c:160
bool enable_nestloop
Definition costsize.c:154
bool enable_bitmapscan
Definition costsize.c:149
bool enable_hashagg
Definition costsize.c:153
int32 clamp_width_est(int64 tuple_width)
Definition costsize.c:243
bool enable_indexscan
Definition costsize.c:147
bool enable_incremental_sort
Definition costsize.c:152
Plan * materialize_finished_plan(Plan *subplan)
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition createplan.c:339
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
bool equal(const void *a, const void *b)
Definition equalfuncs.c:223
bool ExecSupportsBackwardScan(Plan *node)
Definition execAmi.c:512
bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
Definition execMain.c:657
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_object(type)
Definition fe_memutils.h:75
#define OidFunctionCall1(functionId, arg1)
Definition fmgr.h:722
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition foreign.c:451
int max_parallel_maintenance_workers
Definition globals.c:136
bool IsUnderPostmaster
Definition globals.c:122
int maintenance_work_mem
Definition globals.c:135
#define IsParallelWorker()
Definition parallel.h:62
void parse(int)
Definition parse.c:49
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:178
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:134
int b
Definition isn.c:74
int a
Definition isn.c:73
int j
Definition isn.c:78
int i
Definition isn.c:77
double jit_optimize_above_cost
Definition jit.c:42
bool jit_enabled
Definition jit.c:33
bool jit_expressions
Definition jit.c:37
bool jit_tuple_deforming
Definition jit.c:39
double jit_above_cost
Definition jit.c:40
double jit_inline_above_cost
Definition jit.c:41
#define PGJIT_OPT3
Definition jit.h:21
#define PGJIT_NONE
Definition jit.h:19
#define PGJIT_EXPR
Definition jit.h:23
#define PGJIT_DEFORM
Definition jit.h:24
#define PGJIT_INLINE
Definition jit.h:22
#define PGJIT_PERFORM
Definition jit.h:20
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition knapsack.c:51
List * lappend(List *list, void *datum)
Definition list.c:339
List * list_difference_int(const List *list1, const List *list2)
Definition list.c:1288
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition list.c:1427
List * list_concat(List *list1, const List *list2)
Definition list.c:561
List * list_copy(const List *oldlist)
Definition list.c:1573
List * lappend_int(List *list, int datum)
Definition list.c:357
List * lcons(void *datum, List *list)
Definition list.c:495
List * list_delete_int(List *list, int datum)
Definition list.c:891
List * list_delete_last(List *list)
Definition list.c:957
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
void list_free(List *list)
Definition list.c:1546
bool list_member_int(const List *list, int datum)
Definition list.c:702
List * list_copy_head(const List *oldlist, int len)
Definition list.c:1593
List * list_concat_unique(List *list1, const List *list2)
Definition list.c:1405
#define NoLock
Definition lockdefs.h:34
#define AccessShareLock
Definition lockdefs.h:36
@ LockWaitBlock
Definition lockoptions.h:40
LockClauseStrength
Definition lockoptions.h:22
@ LCS_FORUPDATE
Definition lockoptions.h:28
@ LCS_NONE
Definition lockoptions.h:23
@ LCS_FORSHARE
Definition lockoptions.h:26
@ LCS_FORKEYSHARE
Definition lockoptions.h:25
@ LCS_FORNOKEYUPDATE
Definition lockoptions.h:27
char * get_rel_name(Oid relid)
Definition lsyscache.c:2148
bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno)
Definition lsyscache.c:477
RegProcedure get_func_support(Oid funcid)
Definition lsyscache.c:2078
Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse)
Definition lsyscache.c:326
Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
Definition lsyscache.c:364
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition lsyscache.c:2800
Datum subpath(PG_FUNCTION_ARGS)
Definition ltree_op.c:311
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition makefuncs.c:289
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition makefuncs.c:701
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition makefuncs.c:350
List * make_ands_implicit(Expr *clause)
Definition makefuncs.c:810
char * pstrdup(const char *in)
Definition mcxt.c:1781
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
MemoryContext GetMemoryChunkContext(void *pointer)
Definition mcxt.c:756
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42
Oid exprCollation(const Node *expr)
Definition nodeFuncs.c:826
bool expression_returns_set(Node *clause)
Definition nodeFuncs.c:768
void fix_opfuncids(Node *node)
Definition nodeFuncs.c:1848
size_t get_hash_memory_limit(void)
Definition nodeHash.c:3680
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition nodes.h:396
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define copyObject(obj)
Definition nodes.h:232
double Cost
Definition nodes.h:261
#define nodeTag(nodeptr)
Definition nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition nodes.h:348
@ CMD_MERGE
Definition nodes.h:279
@ CMD_DELETE
Definition nodes.h:278
@ CMD_UPDATE
Definition nodes.h:276
@ CMD_SELECT
Definition nodes.h:275
AggStrategy
Definition nodes.h:363
@ AGG_SORTED
Definition nodes.h:365
@ AGG_HASHED
Definition nodes.h:366
@ AGG_MIXED
Definition nodes.h:367
@ AGG_PLAIN
Definition nodes.h:364
#define DO_AGGSPLIT_SERIALIZE(as)
Definition nodes.h:397
AggSplit
Definition nodes.h:385
@ AGGSPLIT_FINAL_DESERIAL
Definition nodes.h:391
@ AGGSPLIT_SIMPLE
Definition nodes.h:387
@ AGGSPLIT_INITIAL_SERIAL
Definition nodes.h:389
@ LIMIT_OPTION_COUNT
Definition nodes.h:442
#define makeNode(_type_)
Definition nodes.h:161
#define castNode(_type_, nodeptr)
Definition nodes.h:182
@ JOIN_SEMI
Definition nodes.h:317
static char * errmsg
#define PVC_RECURSE_AGGREGATES
Definition optimizer.h:198
#define PVC_RECURSE_WINDOWFUNCS
Definition optimizer.h:200
@ DEBUG_PARALLEL_REGRESS
Definition optimizer.h:98
@ DEBUG_PARALLEL_OFF
Definition optimizer.h:96
#define PVC_INCLUDE_WINDOWFUNCS
Definition optimizer.h:199
#define PVC_INCLUDE_PLACEHOLDERS
Definition optimizer.h:201
#define PVC_INCLUDE_AGGREGATES
Definition optimizer.h:197
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
int assign_special_exec_param(PlannerInfo *root)
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition parse_agg.c:2019
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
#define CURSOR_OPT_SCROLL
#define CURSOR_OPT_FAST_PLAN
@ RTE_JOIN
@ RTE_VALUES
@ RTE_SUBQUERY
@ RTE_RESULT
@ RTE_FUNCTION
@ RTE_TABLEFUNC
@ RTE_GROUP
@ RTE_RELATION
@ OBJECT_VIEW
#define CURSOR_OPT_PARALLEL_OK
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition analyze.c:3729
const char * LCS_asString(LockClauseStrength strength)
Definition analyze.c:3704
#define rt_fetch(rangetable_index, rangetable)
Definition parsetree.h:31
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition partdesc.c:484
List * append_pathkeys(List *target, List *source)
Definition pathkeys.c:107
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition pathkeys.c:558
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition pathkeys.c:1336
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition pathkeys.c:1381
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition pathkeys.c:304
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition pathkeys.c:467
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition pathnode.c:1092
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2785
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition pathnode.c:2587
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition pathnode.c:3393
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition pathnode.c:3630
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition pathnode.c:2696
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition pathnode.c:1026
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition pathnode.c:1813
void set_cheapest(RelOptInfo *parent_rel)
Definition pathnode.c:268
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:793
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition pathnode.c:3793
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, ForPortionOfExpr *forPortionOf, int epqParam)
Definition pathnode.c:3692
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition pathnode.c:123
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition pathnode.c:2855
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition pathnode.c:3139
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition pathnode.c:2904
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition pathnode.c:2948
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition pathnode.c:459
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, AppendPathInput input, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition pathnode.c:1352
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition pathnode.c:3005
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition pathnode.c:3057
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition pathnode.c:1664
#define PGS_NESTLOOP_MEMOIZE
Definition pathnodes.h:76
#define PGS_TIDSCAN
Definition pathnodes.h:70
#define PGS_FOREIGNJOIN
Definition pathnodes.h:71
#define PGS_APPEND
Definition pathnodes.h:78
#define PGS_MERGE_APPEND
Definition pathnodes.h:79
PartitionwiseAggregateType
Definition pathnodes.h:3646
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition pathnodes.h:3649
@ PARTITIONWISE_AGGREGATE_FULL
Definition pathnodes.h:3648
@ PARTITIONWISE_AGGREGATE_NONE
Definition pathnodes.h:3647
#define IS_SIMPLE_REL(rel)
Definition pathnodes.h:989
#define IS_DUMMY_REL(r)
Definition pathnodes.h:2299
#define PGS_SEQSCAN
Definition pathnodes.h:66
#define PGS_CONSIDER_INDEXONLY
Definition pathnodes.h:82
#define PGS_NESTLOOP_MATERIALIZE
Definition pathnodes.h:75
#define PGS_MERGEJOIN_PLAIN
Definition pathnodes.h:72
#define GROUPING_CAN_USE_HASH
Definition pathnodes.h:3631
#define PGS_MERGEJOIN_MATERIALIZE
Definition pathnodes.h:73
#define PGS_HASHJOIN
Definition pathnodes.h:77
#define get_pathtarget_sortgroupref(target, colno)
Definition pathnodes.h:1894
#define IS_PARTITIONED_REL(rel)
Definition pathnodes.h:1231
#define PGS_CONSIDER_NONPARTIAL
Definition pathnodes.h:84
#define PGS_BITMAPSCAN
Definition pathnodes.h:69
#define PGS_GATHER
Definition pathnodes.h:80
#define GROUPING_CAN_USE_SORT
Definition pathnodes.h:3630
#define GROUPING_CAN_PARTIAL_AGG
Definition pathnodes.h:3632
#define PGS_CONSIDER_PARTITIONWISE
Definition pathnodes.h:83
#define PGS_GATHER_MERGE
Definition pathnodes.h:81
@ UPPERREL_GROUP_AGG
Definition pathnodes.h:147
@ UPPERREL_FINAL
Definition pathnodes.h:152
@ UPPERREL_DISTINCT
Definition pathnodes.h:150
@ UPPERREL_PARTIAL_GROUP_AGG
Definition pathnodes.h:145
@ UPPERREL_ORDERED
Definition pathnodes.h:151
@ UPPERREL_WINDOW
Definition pathnodes.h:148
@ UPPERREL_PARTIAL_DISTINCT
Definition pathnodes.h:149
@ RELOPT_OTHER_UPPER_REL
Definition pathnodes.h:982
#define IS_OTHER_REL(rel)
Definition pathnodes.h:1004
#define PGS_INDEXONLYSCAN
Definition pathnodes.h:68
#define PGS_INDEXSCAN
Definition pathnodes.h:67
#define PGS_NESTLOOP_PLAIN
Definition pathnodes.h:74
@ PATHKEYS_BETTER2
Definition paths.h:221
@ PATHKEYS_BETTER1
Definition paths.h:220
@ PATHKEYS_DIFFERENT
Definition paths.h:222
@ PATHKEYS_EQUAL
Definition paths.h:219
#define lfirst(lc)
Definition pg_list.h:172
#define lfirst_node(type, lc)
Definition pg_list.h:176
static int list_length(const List *l)
Definition pg_list.h:152
#define linitial_node(type, l)
Definition pg_list.h:181
#define NIL
Definition pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition pg_list.h:550
#define foreach_current_index(var_or_cell)
Definition pg_list.h:435
#define lfirst_int(lc)
Definition pg_list.h:173
#define list_make1(x1)
Definition pg_list.h:244
#define linitial_int(l)
Definition pg_list.h:179
#define foreach_ptr(type, var, lst)
Definition pg_list.h:501
#define for_each_cell(cell, lst, initcell)
Definition pg_list.h:470
#define for_each_from(cell, lst, N)
Definition pg_list.h:446
static void * list_nth(const List *list, int n)
Definition pg_list.h:331
#define linitial(l)
Definition pg_list.h:178
#define foreach_node(type, var, lst)
Definition pg_list.h:528
static ListCell * list_head(const List *l)
Definition pg_list.h:128
#define list_nth_node(type, list, n)
Definition pg_list.h:359
static ListCell * lnext(const List *l, const ListCell *c)
Definition pg_list.h:375
#define list_make1_int(x1)
Definition pg_list.h:259
#define lfirst_oid(lc)
Definition pg_list.h:174
static int list_cell_number(const List *l, const ListCell *c)
Definition pg_list.h:365
#define llast_node(type, l)
Definition pg_list.h:202
static int scale
Definition pgbench.c:182
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition planagg.c:74
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition plancat.c:1305
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition plancat.c:1472
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition planmain.c:54
#define DEFAULT_CURSOR_TUPLE_FRACTION
Definition planmain.h:21
#define EXPRKIND_TABLEFUNC_LATERAL
Definition planner.c:99
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition planner.c:5135
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition planner.c:5873
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition planner.c:5735
Expr * expression_planner_with_deps(Expr *expr, List **relationOids, List **invalItems)
Definition planner.c:6866
#define EXPRKIND_TARGET
Definition planner.c:88
#define EXPRKIND_APPINFO
Definition planner.c:94
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition planner.c:7846
static void preprocess_rowmarks(PlannerInfo *root)
Definition planner.c:2498
#define EXPRKIND_TABLESAMPLE
Definition planner.c:96
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name, PlannerInfo *parent_root, PlannerInfo *alternative_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:757
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra)
Definition planner.c:7174
PlannedStmt * planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition planner.c:315
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition planner.c:4069
char * choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
Definition planner.c:9044
#define EXPRKIND_GROUPEXPR
Definition planner.c:100
planner_hook_type planner_hook
Definition planner.c:74
double cursor_tuple_fraction
Definition planner.c:68
static bool is_degenerate_grouping(PlannerInfo *root)
Definition planner.c:4048
planner_shutdown_hook_type planner_shutdown_hook
Definition planner.c:80
bool plan_cluster_use_sort(Oid tableOid, Oid indexOid)
Definition planner.c:6919
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition planner.c:1456
int plan_create_index_workers(Oid tableOid, Oid indexOid)
Definition planner.c:7041
#define EXPRKIND_PHV
Definition planner.c:95
#define EXPRKIND_RTFUNC_LATERAL
Definition planner.c:90
#define EXPRKIND_VALUES_LATERAL
Definition planner.c:92
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition planner.c:4132
RelOptInfo * create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
Definition planner.c:8492
#define EXPRKIND_LIMIT
Definition planner.c:93
#define EXPRKIND_VALUES
Definition planner.c:91
static bool can_partial_agg(PlannerInfo *root)
Definition planner.c:7929
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition planner.c:2676
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition planner.c:6677
Expr * preprocess_phv_expression(PlannerInfo *root, Expr *expr)
Definition planner.c:1500
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition planner.c:5315
planner_setup_hook_type planner_setup_hook
Definition planner.c:77
bool parallel_leader_participation
Definition planner.c:70
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition planner.c:6253
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition planner.c:7971
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition planner.c:4882
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:5910
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition planner.c:2610
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition planner.c:6723
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition planner.c:4952
#define EXPRKIND_QUAL
Definition planner.c:87
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition planner.c:2927
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition planner.c:1354
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition planner.c:7787
static bool has_volatile_pathkey(List *keys)
Definition planner.c:3283
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition planner.c:7435
static void name_active_windows(List *activeWindows)
Definition planner.c:6133
static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8761
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition planner.c:6501
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4712
bool enable_distinct_reordering
Definition planner.c:71
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition planner.c:5838
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition planner.c:2281
int debug_parallel_query
Definition planner.c:69
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition planner.c:2461
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition planner.c:3328
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition planner.c:5623
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition planner.c:3235
static int common_prefix_cmp(const void *a, const void *b)
Definition planner.c:6184
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition planner.c:1533
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition planner.c:3992
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition planner.c:8436
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition planner.c:333
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition planner.c:6050
Expr * expression_planner(Expr *expr)
Definition planner.c:6839
static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition planner.c:8886
bool limit_needed(Query *parse)
Definition planner.c:2861
create_upper_paths_hook_type create_upper_paths_hook
Definition planner.c:83
#define EXPRKIND_TABLEFUNC
Definition planner.c:98
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition planner.c:4263
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition planner.c:6373
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition planner.c:5400
#define EXPRKIND_RTFUNC
Definition planner.c:89
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition planner.c:3757
static List * extract_rollup_sets(List *groupingSets)
Definition planner.c:3023
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition planner.c:3879
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition planner.c:8212
#define EXPRKIND_ARBITER_ELEM
Definition planner.c:97
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition planner.c:8349
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition planner.c:3552
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition planner.c:4625
PlannedStmt *(* planner_hook_type)(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams, ExplainState *es)
Definition planner.h:28
void(* planner_setup_hook_type)(PlannerGlobal *glob, Query *parse, const char *query_string, int cursorOptions, double *tuple_fraction, ExplainState *es)
Definition planner.h:36
void(* create_upper_paths_hook_type)(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, RelOptInfo *output_rel, void *extra)
Definition planner.h:50
void(* planner_shutdown_hook_type)(PlannerGlobal *glob, Query *parse, const char *query_string, PlannedStmt *pstmt)
Definition planner.h:44
@ PLAN_STMT_STANDARD
Definition plannodes.h:39
RowMarkType
Definition plannodes.h:1556
@ ROW_MARK_COPY
Definition plannodes.h:1562
@ ROW_MARK_REFERENCE
Definition plannodes.h:1561
@ ROW_MARK_SHARE
Definition plannodes.h:1559
@ ROW_MARK_EXCLUSIVE
Definition plannodes.h:1557
@ ROW_MARK_NOKEYEXCLUSIVE
Definition plannodes.h:1558
@ ROW_MARK_KEYSHARE
Definition plannodes.h:1560
#define snprintf
Definition port.h:260
#define qsort(a, b, c, d)
Definition port.h:495
#define printf(...)
Definition port.h:266
static Datum Int64GetDatum(int64 X)
Definition postgres.h:413
static int64 DatumGetInt64(Datum X)
Definition postgres.h:403
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
#define InvalidOid
unsigned int Oid
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition prepagg.c:559
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition prepagg.c:110
void preprocess_function_rtes(PlannerInfo *root)
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
void replace_empty_jointree(Query *parse)
void pull_up_subqueries(PlannerInfo *root)
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
Query * preprocess_relation_rtes(PlannerInfo *root)
void reduce_outer_joins(PlannerInfo *root)
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition prepqual.c:293
char * c
e
static int fb(int x)
void preprocess_targetlist(PlannerInfo *root)
Definition preptlist.c:66
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition prepunion.c:98
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
tree ctl root
Definition radixtree.h:1857
List * RelationGetIndexPredicate(Relation relation)
Definition relcache.c:5201
List * RelationGetIndexExpressions(Relation relation)
Definition relcache.c:5088
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition relnode.c:544
void setup_simple_rel_arrays(PlannerInfo *root)
Definition relnode.c:114
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition relnode.c:1617
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition relnode.c:212
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
@ ForwardScanDirection
Definition sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition selfuncs.c:3800
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition selfuncs.c:4526
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition setrefs.c:291
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition setrefs.c:3749
void check_stack_depth(void)
Definition stack_depth.c:95
List * aggdistinct
Definition primnodes.h:494
List * args
Definition primnodes.h:488
Expr * aggfilter
Definition primnodes.h:497
List * aggorder
Definition primnodes.h:491
GetForeignRowMarkType_function GetForeignRowMarkType
Definition fdwapi.h:251
GetForeignUpperPaths_function GetForeignUpperPaths
Definition fdwapi.h:230
Cardinality limit_tuples
Definition pathnodes.h:3693
Node * quals
Definition primnodes.h:2385
List * fromlist
Definition primnodes.h:2384
PartitionwiseAggregateType patype
Definition pathnodes.h:3677
AggClauseCosts agg_final_costs
Definition pathnodes.h:3671
AggClauseCosts agg_partial_costs
Definition pathnodes.h:3670
Definition pg_list.h:54
Definition nodes.h:135
List * exprs
Definition pathnodes.h:1878
List * pathkeys
Definition pathnodes.h:2011
Cardinality rows
Definition pathnodes.h:2005
Bitmapset * prunableRelids
Definition pathnodes.h:206
char maxParallelHazard
Definition pathnodes.h:260
List * subplans
Definition pathnodes.h:178
bool dependsOnRole
Definition pathnodes.h:251
Bitmapset * allRelids
Definition pathnodes.h:199
List * subrtinfos
Definition pathnodes.h:212
List * appendRelations
Definition pathnodes.h:221
List * finalrowmarks
Definition pathnodes.h:215
List * invalItems
Definition pathnodes.h:230
List * relationOids
Definition pathnodes.h:227
List * paramExecTypes
Definition pathnodes.h:233
bool parallelModeOK
Definition pathnodes.h:254
bool transientPlan
Definition pathnodes.h:248
Bitmapset * rewindPlanIDs
Definition pathnodes.h:190
List * finalrteperminfos
Definition pathnodes.h:209
List * subpaths
Definition pathnodes.h:181
Index lastRowMarkId
Definition pathnodes.h:242
List * resultRelations
Definition pathnodes.h:218
List * partPruneInfos
Definition pathnodes.h:224
List * finalrtable
Definition pathnodes.h:193
uint64 default_pgs_mask
Definition pathnodes.h:263
List * elidedNodes
Definition pathnodes.h:236
bool parallelModeNeeded
Definition pathnodes.h:257
Cost per_tuple
Definition pathnodes.h:121
List * rtable
Definition parsenodes.h:178
CmdType commandType
Definition parsenodes.h:121
List * ppilist
Definition pathnodes.h:1051
bool useridiscurrent
Definition pathnodes.h:1115
Relids relids
Definition pathnodes.h:1021
struct PathTarget * reltarget
Definition pathnodes.h:1045
uint64 pgs_mask
Definition pathnodes.h:1039
List * unique_pathkeys
Definition pathnodes.h:1134
Cardinality tuples
Definition pathnodes.h:1096
bool consider_parallel
Definition pathnodes.h:1037
BlockNumber pages
Definition pathnodes.h:1095
List * cheapest_parameterized_paths
Definition pathnodes.h:1055
List * pathlist
Definition pathnodes.h:1050
RelOptKind reloptkind
Definition pathnodes.h:1015
List * indexlist
Definition pathnodes.h:1091
struct Path * cheapest_startup_path
Definition pathnodes.h:1053
struct Path * cheapest_total_path
Definition pathnodes.h:1054
List * unique_groupclause
Definition pathnodes.h:1136
Bitmapset * live_parts
Definition pathnodes.h:1204
int rel_parallel_workers
Definition pathnodes.h:1103
List * partial_pathlist
Definition pathnodes.h:1052
struct RelOptInfo * unique_rel
Definition pathnodes.h:1132
Cardinality rows
Definition pathnodes.h:1027
Form_pg_class rd_rel
Definition rel.h:111
LockClauseStrength strength
LockWaitPolicy waitPolicy
List * semi_rhs_exprs
Definition pathnodes.h:3241
JoinType jointype
Definition pathnodes.h:3230
Relids syn_righthand
Definition pathnodes.h:3229
List * semi_operators
Definition pathnodes.h:3240
WindowClause * wc
Definition planner.c:123
Node * startOffset
List * partitionClause
Node * endOffset
List * orderClause
Index winref
Definition primnodes.h:612
int * tleref_to_colnum_map
Definition planner.c:114
Bitmapset * unhashable_refs
Definition planner.c:112
List * unsortable_sets
Definition planner.c:113
List * hash_sets_idx
Definition planner.c:108
double dNumHashGroups
Definition planner.c:109
Bitmapset * unsortable_refs
Definition planner.c:111
Definition type.h:96
List * activeWindows
Definition planner.c:131
grouping_sets_data * gset_data
Definition planner.c:132
SetOperationStmt * setop
Definition planner.c:133
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition subselect.c:2207
void SS_process_ctes(PlannerInfo *root)
Definition subselect.c:884
void SS_identify_outer_params(PlannerInfo *root)
Definition subselect.c:2365
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition subselect.c:2152
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition subselect.c:2549
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition subselect.c:2493
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition subselect.c:2429
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
void split_pathtarget_at_srfs_grouping(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:868
TargetEntry * tlist_member(Expr *node, List *targetlist)
Definition tlist.c:88
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition tlist.c:227
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition tlist.c:452
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition tlist.c:431
bool grouping_is_sortable(List *groupClause)
Definition tlist.c:549
List * make_tlist_from_pathtarget(PathTarget *target)
Definition tlist.c:633
PathTarget * copy_pathtarget(PathTarget *src)
Definition tlist.c:666
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition tlist.c:761
PathTarget * create_empty_pathtarget(void)
Definition tlist.c:690
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition tlist.c:401
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition tlist.c:845
bool grouping_is_hashable(List *groupClause)
Definition tlist.c:569
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition tlist.c:704
#define create_pathtarget(root, tlist)
Definition tlist.h:58
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition var.c:999
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition var.c:114
List * pull_var_clause(Node *node, int flags)
Definition var.c:653
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition var.c:781
const char * name