PostgreSQL Source Code  git master
nodeGather.c File Reference
#include "postgres.h"
#include "access/relscan.h"
#include "access/xact.h"
#include "executor/execdebug.h"
#include "executor/execParallel.h"
#include "executor/nodeGather.h"
#include "executor/nodeSubplan.h"
#include "executor/tqueue.h"
#include "miscadmin.h"
#include "optimizer/optimizer.h"
#include "pgstat.h"
#include "utils/memutils.h"
#include "utils/rel.h"
Include dependency graph for nodeGather.c:

Go to the source code of this file.

Functions

static TupleTableSlotExecGather (PlanState *pstate)
 
static TupleTableSlotgather_getnext (GatherState *gatherstate)
 
static MinimalTuple gather_readnext (GatherState *gatherstate)
 
static void ExecShutdownGatherWorkers (GatherState *node)
 
GatherStateExecInitGather (Gather *node, EState *estate, int eflags)
 
void ExecEndGather (GatherState *node)
 
void ExecShutdownGather (GatherState *node)
 
void ExecReScanGather (GatherState *node)
 

Function Documentation

◆ ExecEndGather()

void ExecEndGather ( GatherState node)

Definition at line 249 of file nodeGather.c.

250 {
251  ExecEndNode(outerPlanState(node)); /* let children clean up first */
252  ExecShutdownGather(node);
253  ExecFreeExprContext(&node->ps);
254  if (node->ps.ps_ResultTupleSlot)
256 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:658
#define outerPlanState(node)
Definition: execnodes.h:1133
void ExecShutdownGather(GatherState *node)
Definition: nodeGather.c:419
PlanState ps
Definition: execnodes.h:2586
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1075
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:432

References ExecClearTuple(), ExecEndNode(), ExecFreeExprContext(), ExecShutdownGather(), outerPlanState, GatherState::ps, and PlanState::ps_ResultTupleSlot.

Referenced by ExecEndNode().

◆ ExecGather()

static TupleTableSlot * ExecGather ( PlanState pstate)
static

Definition at line 142 of file nodeGather.c.

143 {
144  GatherState *node = castNode(GatherState, pstate);
145  TupleTableSlot *slot;
146  ExprContext *econtext;
147 
149 
150  /*
151  * Initialize the parallel context and workers on first execution. We do
152  * this on first execution rather than during node initialization, as it
153  * needs to allocate a large dynamic segment, so it is better to do it
154  * only if it is really needed.
155  */
156  if (!node->initialized)
157  {
158  EState *estate = node->ps.state;
159  Gather *gather = (Gather *) node->ps.plan;
160 
161  /*
162  * Sometimes we might have to run without parallelism; but if parallel
163  * mode is active then we can try to fire up some workers.
164  */
165  if (gather->num_workers > 0 && estate->es_use_parallel_mode)
166  {
167  ParallelContext *pcxt;
168 
169  /* Initialize, or re-initialize, shared state needed by workers. */
170  if (!node->pei)
172  estate,
173  gather->initParam,
174  gather->num_workers,
175  node->tuples_needed);
176  else
178  node->pei,
179  gather->initParam);
180 
181  /*
182  * Register backend workers. We might not get as many as we
183  * requested, or indeed any at all.
184  */
185  pcxt = node->pei->pcxt;
186  LaunchParallelWorkers(pcxt);
187  /* We save # workers launched for the benefit of EXPLAIN */
188  node->nworkers_launched = pcxt->nworkers_launched;
189 
190  /* Set up tuple queue readers to read the results. */
191  if (pcxt->nworkers_launched > 0)
192  {
194  /* Make a working array showing the active readers */
195  node->nreaders = pcxt->nworkers_launched;
196  node->reader = (TupleQueueReader **)
197  palloc(node->nreaders * sizeof(TupleQueueReader *));
198  memcpy(node->reader, node->pei->reader,
199  node->nreaders * sizeof(TupleQueueReader *));
200  }
201  else
202  {
203  /* No workers? Then never mind. */
204  node->nreaders = 0;
205  node->reader = NULL;
206  }
207  node->nextreader = 0;
208  }
209 
210  /* Run plan locally if no workers or enabled and not single-copy. */
211  node->need_to_scan_locally = (node->nreaders == 0)
213  node->initialized = true;
214  }
215 
216  /*
217  * Reset per-tuple memory context to free any expression evaluation
218  * storage allocated in the previous tuple cycle.
219  */
220  econtext = node->ps.ps_ExprContext;
221  ResetExprContext(econtext);
222 
223  /*
224  * Get next tuple, either from one of our workers, or by running the plan
225  * ourselves.
226  */
227  slot = gather_getnext(node);
228  if (TupIsNull(slot))
229  return NULL;
230 
231  /* If no projection is required, we're done. */
232  if (node->ps.ps_ProjInfo == NULL)
233  return slot;
234 
235  /*
236  * Form the result tuple using ExecProject(), and return it.
237  */
238  econtext->ecxt_outertuple = slot;
239  return ExecProject(node->ps.ps_ProjInfo);
240 }
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:551
ParallelExecutorInfo * ExecInitParallelPlan(PlanState *planstate, EState *estate, Bitmapset *sendParams, int nworkers, int64 tuples_needed)
Definition: execParallel.c:589
void ExecParallelReinitialize(PlanState *planstate, ParallelExecutorInfo *pei, Bitmapset *sendParams)
Definition: execParallel.c:899
void ExecParallelCreateReaders(ParallelExecutorInfo *pei)
Definition: execParallel.c:873
static TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition: executor.h:375
#define ResetExprContext(econtext)
Definition: executor.h:543
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
void * palloc(Size size)
Definition: mcxt.c:1226
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
static TupleTableSlot * gather_getnext(GatherState *gatherstate)
Definition: nodeGather.c:264
#define castNode(_type_, nodeptr)
Definition: nodes.h:197
bool parallel_leader_participation
Definition: planner.c:74
bool es_use_parallel_mode
Definition: execnodes.h:693
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:253
bool initialized
Definition: execnodes.h:2587
struct ParallelExecutorInfo * pei
Definition: execnodes.h:2592
int nextreader
Definition: execnodes.h:2596
int nworkers_launched
Definition: execnodes.h:2594
struct TupleQueueReader ** reader
Definition: execnodes.h:2597
int64 tuples_needed
Definition: execnodes.h:2589
bool need_to_scan_locally
Definition: execnodes.h:2588
int num_workers
Definition: plannodes.h:1141
Bitmapset * initParam
Definition: plannodes.h:1145
bool single_copy
Definition: plannodes.h:1143
int nworkers_launched
Definition: parallel.h:38
ParallelContext * pcxt
Definition: execParallel.h:27
struct TupleQueueReader ** reader
Definition: execParallel.h:37
Plan * plan
Definition: execnodes.h:1037
EState * state
Definition: execnodes.h:1039
ExprContext * ps_ExprContext
Definition: execnodes.h:1076
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1077
#define TupIsNull(slot)
Definition: tuptable.h:299

References castNode, CHECK_FOR_INTERRUPTS, ExprContext::ecxt_outertuple, EState::es_use_parallel_mode, ExecInitParallelPlan(), ExecParallelCreateReaders(), ExecParallelReinitialize(), ExecProject(), gather_getnext(), if(), GatherState::initialized, Gather::initParam, LaunchParallelWorkers(), GatherState::need_to_scan_locally, GatherState::nextreader, GatherState::nreaders, Gather::num_workers, ParallelContext::nworkers_launched, GatherState::nworkers_launched, outerPlanState, palloc(), parallel_leader_participation, ParallelExecutorInfo::pcxt, GatherState::pei, PlanState::plan, GatherState::ps, PlanState::ps_ExprContext, PlanState::ps_ProjInfo, ParallelExecutorInfo::reader, GatherState::reader, ResetExprContext, Gather::single_copy, PlanState::state, TupIsNull, and GatherState::tuples_needed.

Referenced by ExecInitGather().

◆ ExecInitGather()

GatherState* ExecInitGather ( Gather node,
EState estate,
int  eflags 
)

Definition at line 58 of file nodeGather.c.

59 {
60  GatherState *gatherstate;
61  Plan *outerNode;
62  TupleDesc tupDesc;
63 
64  /* Gather node doesn't have innerPlan node. */
65  Assert(innerPlan(node) == NULL);
66 
67  /*
68  * create state structure
69  */
70  gatherstate = makeNode(GatherState);
71  gatherstate->ps.plan = (Plan *) node;
72  gatherstate->ps.state = estate;
73  gatherstate->ps.ExecProcNode = ExecGather;
74 
75  gatherstate->initialized = false;
76  gatherstate->need_to_scan_locally =
78  gatherstate->tuples_needed = -1;
79 
80  /*
81  * Miscellaneous initialization
82  *
83  * create expression context for node
84  */
85  ExecAssignExprContext(estate, &gatherstate->ps);
86 
87  /*
88  * now initialize outer plan
89  */
90  outerNode = outerPlan(node);
91  outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags);
92  tupDesc = ExecGetResultType(outerPlanState(gatherstate));
93 
94  /*
95  * Leader may access ExecProcNode result directly (if
96  * need_to_scan_locally), or from workers via tuple queue. So we can't
97  * trivially rely on the slot type being fixed for expressions evaluated
98  * within this node.
99  */
100  gatherstate->ps.outeropsset = true;
101  gatherstate->ps.outeropsfixed = false;
102 
103  /*
104  * Initialize result type and projection.
105  */
106  ExecInitResultTypeTL(&gatherstate->ps);
107  ExecConditionalAssignProjectionInfo(&gatherstate->ps, tupDesc, OUTER_VAR);
108 
109  /*
110  * Without projections result slot type is not trivially known, see
111  * comment above.
112  */
113  if (gatherstate->ps.ps_ProjInfo == NULL)
114  {
115  gatherstate->ps.resultopsset = true;
116  gatherstate->ps.resultopsfixed = false;
117  }
118 
119  /*
120  * Initialize funnel slot to same tuple descriptor as outer plan.
121  */
122  gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate, tupDesc,
124 
125  /*
126  * Gather doesn't support checking a qual (it's always more efficient to
127  * do it in the child node).
128  */
129  Assert(!node->plan.qual);
130 
131  return gatherstate;
132 }
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
void ExecInitResultTypeTL(PlanState *planstate)
Definition: execTuples.c:1756
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1832
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:85
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:498
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:488
void ExecConditionalAssignProjectionInfo(PlanState *planstate, TupleDesc inputDesc, int varno)
Definition: execUtils.c:563
Assert(fmt[strlen(fmt) - 1] !='\n')
static TupleTableSlot * ExecGather(PlanState *pstate)
Definition: nodeGather.c:142
#define makeNode(_type_)
Definition: nodes.h:176
#define innerPlan(node)
Definition: plannodes.h:182
#define outerPlan(node)
Definition: plannodes.h:183
#define OUTER_VAR
Definition: primnodes.h:215
TupleTableSlot * funnel_slot
Definition: execnodes.h:2591
Plan plan
Definition: plannodes.h:1140
bool outeropsset
Definition: execnodes.h:1120
bool resultopsset
Definition: execnodes.h:1122
bool outeropsfixed
Definition: execnodes.h:1116
bool resultopsfixed
Definition: execnodes.h:1118
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1043
List * qual
Definition: plannodes.h:154

References Assert(), ExecAssignExprContext(), ExecConditionalAssignProjectionInfo(), ExecGather(), ExecGetResultType(), ExecInitExtraTupleSlot(), ExecInitNode(), ExecInitResultTypeTL(), PlanState::ExecProcNode, GatherState::funnel_slot, GatherState::initialized, innerPlan, makeNode, GatherState::need_to_scan_locally, OUTER_VAR, PlanState::outeropsfixed, PlanState::outeropsset, outerPlan, outerPlanState, parallel_leader_participation, PlanState::plan, Gather::plan, GatherState::ps, PlanState::ps_ProjInfo, Plan::qual, PlanState::resultopsfixed, PlanState::resultopsset, Gather::single_copy, PlanState::state, TTSOpsMinimalTuple, and GatherState::tuples_needed.

Referenced by ExecInitNode().

◆ ExecReScanGather()

void ExecReScanGather ( GatherState node)

Definition at line 443 of file nodeGather.c.

444 {
445  Gather *gather = (Gather *) node->ps.plan;
447 
448  /* Make sure any existing workers are gracefully shut down */
450 
451  /* Mark node so that shared state will be rebuilt at next call */
452  node->initialized = false;
453 
454  /*
455  * Set child node's chgParam to tell it that the next scan might deliver a
456  * different set of rows within the leader process. (The overall rowset
457  * shouldn't change, but the leader process's subset might; hence nodes
458  * between here and the parallel table scan node mustn't optimize on the
459  * assumption of an unchanging rowset.)
460  */
461  if (gather->rescan_param >= 0)
462  outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
463  gather->rescan_param);
464 
465  /*
466  * If chgParam of subnode is not null then plan will be re-scanned by
467  * first ExecProcNode. Note: because this does nothing if we have a
468  * rescan_param, it's currently guaranteed that parallel-aware child nodes
469  * will not see a ReScan call until after they get a ReInitializeDSM call.
470  * That ordering might not be something to rely on, though. A good rule
471  * of thumb is that ReInitializeDSM should reset only shared state, ReScan
472  * should reset only local state, and anything that depends on both of
473  * those steps being finished must wait until the first ExecProcNode call.
474  */
475  if (outerPlan->chgParam == NULL)
477 }
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:753
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
static void ExecShutdownGatherWorkers(GatherState *node)
Definition: nodeGather.c:401
int rescan_param
Definition: plannodes.h:1142

References bms_add_member(), ExecReScan(), ExecShutdownGatherWorkers(), GatherState::initialized, outerPlan, outerPlanState, PlanState::plan, GatherState::ps, and Gather::rescan_param.

Referenced by ExecReScan().

◆ ExecShutdownGather()

void ExecShutdownGather ( GatherState node)

Definition at line 419 of file nodeGather.c.

420 {
422 
423  /* Now destroy the parallel context. */
424  if (node->pei != NULL)
425  {
426  ExecParallelCleanup(node->pei);
427  node->pei = NULL;
428  }
429 }
void ExecParallelCleanup(ParallelExecutorInfo *pei)

References ExecParallelCleanup(), ExecShutdownGatherWorkers(), and GatherState::pei.

Referenced by ExecEndGather(), and ExecShutdownNode_walker().

◆ ExecShutdownGatherWorkers()

static void ExecShutdownGatherWorkers ( GatherState node)
static

Definition at line 401 of file nodeGather.c.

402 {
403  if (node->pei != NULL)
404  ExecParallelFinish(node->pei);
405 
406  /* Flush local copy of reader array */
407  if (node->reader)
408  pfree(node->reader);
409  node->reader = NULL;
410 }
void ExecParallelFinish(ParallelExecutorInfo *pei)
void pfree(void *pointer)
Definition: mcxt.c:1456

References ExecParallelFinish(), GatherState::pei, pfree(), and GatherState::reader.

Referenced by ExecReScanGather(), ExecShutdownGather(), and gather_readnext().

◆ gather_getnext()

static TupleTableSlot * gather_getnext ( GatherState gatherstate)
static

Definition at line 264 of file nodeGather.c.

265 {
266  PlanState *outerPlan = outerPlanState(gatherstate);
267  TupleTableSlot *outerTupleSlot;
268  TupleTableSlot *fslot = gatherstate->funnel_slot;
269  MinimalTuple tup;
270 
271  while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
272  {
274 
275  if (gatherstate->nreaders > 0)
276  {
277  tup = gather_readnext(gatherstate);
278 
279  if (HeapTupleIsValid(tup))
280  {
281  ExecStoreMinimalTuple(tup, /* tuple to store */
282  fslot, /* slot to store the tuple */
283  false); /* don't pfree tuple */
284  return fslot;
285  }
286  }
287 
288  if (gatherstate->need_to_scan_locally)
289  {
290  EState *estate = gatherstate->ps.state;
291 
292  /* Install our DSA area while executing the plan. */
293  estate->es_query_dsa =
294  gatherstate->pei ? gatherstate->pei->area : NULL;
295  outerTupleSlot = ExecProcNode(outerPlan);
296  estate->es_query_dsa = NULL;
297 
298  if (!TupIsNull(outerTupleSlot))
299  return outerTupleSlot;
300 
301  gatherstate->need_to_scan_locally = false;
302  }
303  }
304 
305  return ExecClearTuple(fslot);
306 }
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1447
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:268
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static MinimalTuple gather_readnext(GatherState *gatherstate)
Definition: nodeGather.c:312
struct dsa_area * es_query_dsa
Definition: execnodes.h:696

References ParallelExecutorInfo::area, CHECK_FOR_INTERRUPTS, EState::es_query_dsa, ExecClearTuple(), ExecProcNode(), ExecStoreMinimalTuple(), GatherState::funnel_slot, gather_readnext(), HeapTupleIsValid, GatherState::need_to_scan_locally, GatherState::nreaders, outerPlan, outerPlanState, GatherState::pei, GatherState::ps, PlanState::state, and TupIsNull.

Referenced by ExecGather().

◆ gather_readnext()

static MinimalTuple gather_readnext ( GatherState gatherstate)
static

Definition at line 312 of file nodeGather.c.

313 {
314  int nvisited = 0;
315 
316  for (;;)
317  {
318  TupleQueueReader *reader;
319  MinimalTuple tup;
320  bool readerdone;
321 
322  /* Check for async events, particularly messages from workers. */
324 
325  /*
326  * Attempt to read a tuple, but don't block if none is available.
327  *
328  * Note that TupleQueueReaderNext will just return NULL for a worker
329  * which fails to initialize. We'll treat that worker as having
330  * produced no tuples; WaitForParallelWorkersToFinish will error out
331  * when we get there.
332  */
333  Assert(gatherstate->nextreader < gatherstate->nreaders);
334  reader = gatherstate->reader[gatherstate->nextreader];
335  tup = TupleQueueReaderNext(reader, true, &readerdone);
336 
337  /*
338  * If this reader is done, remove it from our working array of active
339  * readers. If all readers are done, we're outta here.
340  */
341  if (readerdone)
342  {
343  Assert(!tup);
344  --gatherstate->nreaders;
345  if (gatherstate->nreaders == 0)
346  {
347  ExecShutdownGatherWorkers(gatherstate);
348  return NULL;
349  }
350  memmove(&gatherstate->reader[gatherstate->nextreader],
351  &gatherstate->reader[gatherstate->nextreader + 1],
352  sizeof(TupleQueueReader *)
353  * (gatherstate->nreaders - gatherstate->nextreader));
354  if (gatherstate->nextreader >= gatherstate->nreaders)
355  gatherstate->nextreader = 0;
356  continue;
357  }
358 
359  /* If we got a tuple, return it. */
360  if (tup)
361  return tup;
362 
363  /*
364  * Advance nextreader pointer in round-robin fashion. Note that we
365  * only reach this code if we weren't able to get a tuple from the
366  * current worker. We used to advance the nextreader pointer after
367  * every tuple, but it turns out to be much more efficient to keep
368  * reading from the same queue until that would require blocking.
369  */
370  gatherstate->nextreader++;
371  if (gatherstate->nextreader >= gatherstate->nreaders)
372  gatherstate->nextreader = 0;
373 
374  /* Have we visited every (surviving) TupleQueueReader? */
375  nvisited++;
376  if (nvisited >= gatherstate->nreaders)
377  {
378  /*
379  * If (still) running plan locally, return NULL so caller can
380  * generate another tuple from the local copy of the plan.
381  */
382  if (gatherstate->need_to_scan_locally)
383  return NULL;
384 
385  /* Nothing to do except wait for developments. */
387  WAIT_EVENT_EXECUTE_GATHER);
389  nvisited = 0;
390  }
391  }
392 }
struct Latch * MyLatch
Definition: globals.c:58
void ResetLatch(Latch *latch)
Definition: latch.c:697
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:490
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
MinimalTuple TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done)
Definition: tqueue.c:176

References Assert(), CHECK_FOR_INTERRUPTS, ExecShutdownGatherWorkers(), MyLatch, GatherState::need_to_scan_locally, GatherState::nextreader, GatherState::nreaders, GatherState::reader, ResetLatch(), TupleQueueReaderNext(), WaitLatch(), WL_EXIT_ON_PM_DEATH, and WL_LATCH_SET.

Referenced by gather_getnext().