PostgreSQL Source Code  git master
nodeGatherMerge.h File Reference
#include "nodes/execnodes.h"
Include dependency graph for nodeGatherMerge.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

GatherMergeStateExecInitGatherMerge (GatherMerge *node, EState *estate, int eflags)
 
void ExecEndGatherMerge (GatherMergeState *node)
 
void ExecReScanGatherMerge (GatherMergeState *node)
 
void ExecShutdownGatherMerge (GatherMergeState *node)
 

Function Documentation

◆ ExecEndGatherMerge()

void ExecEndGatherMerge ( GatherMergeState node)

Definition at line 289 of file nodeGatherMerge.c.

290 {
291  ExecEndNode(outerPlanState(node)); /* let children clean up first */
293 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
#define outerPlanState(node)
Definition: execnodes.h:1132
void ExecShutdownGatherMerge(GatherMergeState *node)

References ExecEndNode(), ExecShutdownGatherMerge(), and outerPlanState.

Referenced by ExecEndNode().

◆ ExecInitGatherMerge()

GatherMergeState* ExecInitGatherMerge ( GatherMerge node,
EState estate,
int  eflags 
)

Definition at line 72 of file nodeGatherMerge.c.

73 {
74  GatherMergeState *gm_state;
75  Plan *outerNode;
76  TupleDesc tupDesc;
77 
78  /* Gather merge node doesn't have innerPlan node. */
79  Assert(innerPlan(node) == NULL);
80 
81  /*
82  * create state structure
83  */
84  gm_state = makeNode(GatherMergeState);
85  gm_state->ps.plan = (Plan *) node;
86  gm_state->ps.state = estate;
87  gm_state->ps.ExecProcNode = ExecGatherMerge;
88 
89  gm_state->initialized = false;
90  gm_state->gm_initialized = false;
91  gm_state->tuples_needed = -1;
92 
93  /*
94  * Miscellaneous initialization
95  *
96  * create expression context for node
97  */
98  ExecAssignExprContext(estate, &gm_state->ps);
99 
100  /*
101  * GatherMerge doesn't support checking a qual (it's always more efficient
102  * to do it in the child node).
103  */
104  Assert(!node->plan.qual);
105 
106  /*
107  * now initialize outer plan
108  */
109  outerNode = outerPlan(node);
110  outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags);
111 
112  /*
113  * Leader may access ExecProcNode result directly (if
114  * need_to_scan_locally), or from workers via tuple queue. So we can't
115  * trivially rely on the slot type being fixed for expressions evaluated
116  * within this node.
117  */
118  gm_state->ps.outeropsset = true;
119  gm_state->ps.outeropsfixed = false;
120 
121  /*
122  * Store the tuple descriptor into gather merge state, so we can use it
123  * while initializing the gather merge slots.
124  */
125  tupDesc = ExecGetResultType(outerPlanState(gm_state));
126  gm_state->tupDesc = tupDesc;
127 
128  /*
129  * Initialize result type and projection.
130  */
131  ExecInitResultTypeTL(&gm_state->ps);
132  ExecConditionalAssignProjectionInfo(&gm_state->ps, tupDesc, OUTER_VAR);
133 
134  /*
135  * Without projections result slot type is not trivially known, see
136  * comment above.
137  */
138  if (gm_state->ps.ps_ProjInfo == NULL)
139  {
140  gm_state->ps.resultopsset = true;
141  gm_state->ps.resultopsfixed = false;
142  }
143 
144  /*
145  * initialize sort-key information
146  */
147  if (node->numCols)
148  {
149  int i;
150 
151  gm_state->gm_nkeys = node->numCols;
152  gm_state->gm_sortkeys =
153  palloc0(sizeof(SortSupportData) * node->numCols);
154 
155  for (i = 0; i < node->numCols; i++)
156  {
157  SortSupport sortKey = gm_state->gm_sortkeys + i;
158 
159  sortKey->ssup_cxt = CurrentMemoryContext;
160  sortKey->ssup_collation = node->collations[i];
161  sortKey->ssup_nulls_first = node->nullsFirst[i];
162  sortKey->ssup_attno = node->sortColIdx[i];
163 
164  /*
165  * We don't perform abbreviated key conversion here, for the same
166  * reasons that it isn't used in MergeAppend
167  */
168  sortKey->abbreviate = false;
169 
170  PrepareSortSupportFromOrderingOp(node->sortOperators[i], sortKey);
171  }
172  }
173 
174  /* Now allocate the workspace for gather merge */
175  gather_merge_setup(gm_state);
176 
177  return gm_state;
178 }
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
void ExecInitResultTypeTL(PlanState *planstate)
Definition: execTuples.c:1756
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:498
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:488
void ExecConditionalAssignProjectionInfo(PlanState *planstate, TupleDesc inputDesc, int varno)
Definition: execUtils.c:563
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc0(Size size)
Definition: mcxt.c:1257
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
static void gather_merge_setup(GatherMergeState *gm_state)
static TupleTableSlot * ExecGatherMerge(PlanState *pstate)
#define makeNode(_type_)
Definition: nodes.h:176
#define innerPlan(node)
Definition: plannodes.h:181
#define outerPlan(node)
Definition: plannodes.h:182
#define OUTER_VAR
Definition: primnodes.h:223
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:135
TupleDesc tupDesc
Definition: execnodes.h:2617
SortSupport gm_sortkeys
Definition: execnodes.h:2619
PlanState ps
Definition: execnodes.h:2611
bool outeropsset
Definition: execnodes.h:1119
bool resultopsset
Definition: execnodes.h:1121
Plan * plan
Definition: execnodes.h:1036
bool outeropsfixed
Definition: execnodes.h:1115
EState * state
Definition: execnodes.h:1038
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1076
bool resultopsfixed
Definition: execnodes.h:1117
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1042
List * qual
Definition: plannodes.h:153
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66

References SortSupportData::abbreviate, Assert(), CurrentMemoryContext, ExecAssignExprContext(), ExecConditionalAssignProjectionInfo(), ExecGatherMerge(), ExecGetResultType(), ExecInitNode(), ExecInitResultTypeTL(), PlanState::ExecProcNode, gather_merge_setup(), GatherMergeState::gm_initialized, GatherMergeState::gm_nkeys, GatherMergeState::gm_sortkeys, i, GatherMergeState::initialized, innerPlan, makeNode, GatherMerge::numCols, OUTER_VAR, PlanState::outeropsfixed, PlanState::outeropsset, outerPlan, outerPlanState, palloc0(), PlanState::plan, GatherMerge::plan, PrepareSortSupportFromOrderingOp(), GatherMergeState::ps, PlanState::ps_ProjInfo, Plan::qual, PlanState::resultopsfixed, PlanState::resultopsset, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, PlanState::state, GatherMergeState::tupDesc, and GatherMergeState::tuples_needed.

Referenced by ExecInitNode().

◆ ExecReScanGatherMerge()

void ExecReScanGatherMerge ( GatherMergeState node)

Definition at line 339 of file nodeGatherMerge.c.

340 {
341  GatherMerge *gm = (GatherMerge *) node->ps.plan;
343 
344  /* Make sure any existing workers are gracefully shut down */
346 
347  /* Free any unused tuples, so we don't leak memory across rescans */
349 
350  /* Mark node so that shared state will be rebuilt at next call */
351  node->initialized = false;
352  node->gm_initialized = false;
353 
354  /*
355  * Set child node's chgParam to tell it that the next scan might deliver a
356  * different set of rows within the leader process. (The overall rowset
357  * shouldn't change, but the leader process's subset might; hence nodes
358  * between here and the parallel table scan node mustn't optimize on the
359  * assumption of an unchanging rowset.)
360  */
361  if (gm->rescan_param >= 0)
362  outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
363  gm->rescan_param);
364 
365  /*
366  * If chgParam of subnode is not null then plan will be re-scanned by
367  * first ExecProcNode. Note: because this does nothing if we have a
368  * rescan_param, it's currently guaranteed that parallel-aware child nodes
369  * will not see a ReScan call until after they get a ReInitializeDSM call.
370  * That ordering might not be something to rely on, though. A good rule
371  * of thumb is that ReInitializeDSM should reset only shared state, ReScan
372  * should reset only local state, and anything that depends on both of
373  * those steps being finished must wait until the first ExecProcNode call.
374  */
375  if (outerPlan->chgParam == NULL)
377 }
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:753
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
static void gather_merge_clear_tuples(GatherMergeState *gm_state)
static void ExecShutdownGatherMergeWorkers(GatherMergeState *node)
int rescan_param
Definition: plannodes.h:1161

References bms_add_member(), ExecReScan(), ExecShutdownGatherMergeWorkers(), gather_merge_clear_tuples(), GatherMergeState::gm_initialized, GatherMergeState::initialized, outerPlan, outerPlanState, PlanState::plan, GatherMergeState::ps, and GatherMerge::rescan_param.

Referenced by ExecReScan().

◆ ExecShutdownGatherMerge()

void ExecShutdownGatherMerge ( GatherMergeState node)

Definition at line 302 of file nodeGatherMerge.c.

303 {
305 
306  /* Now destroy the parallel context. */
307  if (node->pei != NULL)
308  {
309  ExecParallelCleanup(node->pei);
310  node->pei = NULL;
311  }
312 }
void ExecParallelCleanup(ParallelExecutorInfo *pei)
struct ParallelExecutorInfo * pei
Definition: execnodes.h:2620

References ExecParallelCleanup(), ExecShutdownGatherMergeWorkers(), and GatherMergeState::pei.

Referenced by ExecEndGatherMerge(), and ExecShutdownNode_walker().