PostgreSQL Source Code  git master
nodeAppend.h File Reference
#include "access/parallel.h"
#include "nodes/execnodes.h"
Include dependency graph for nodeAppend.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 
void ExecAsyncAppendResponse (AsyncRequest *areq)
 

Function Documentation

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 495 of file nodeAppend.c.

References add_size(), AppendState::as_nplans, ParallelContext::estimator, offsetof, ParallelAppendState::pa_finished, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

497 {
498  node->pstate_len =
499  add_size(offsetof(ParallelAppendState, pa_finished),
500  sizeof(bool) * node->as_nplans);
501 
503  shm_toc_estimate_keys(&pcxt->estimator, 1);
504 }
Size pstate_len
Definition: execnodes.h:1272
shm_toc_estimator estimator
Definition: parallel.h:42
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define offsetof(type, field)
Definition: c.h:727

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 514 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

516 {
517  ParallelAppendState *pstate;
518 
519  pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
520  memset(pstate, 0, node->pstate_len);
522  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
523 
524  node->as_pstate = pstate;
526 }
Size pstate_len
Definition: execnodes.h:1272
int plan_node_id
Definition: plannodes.h:140
PlanState ps
Definition: execnodes.h:1253
ParallelAppendState * as_pstate
Definition: execnodes.h:1271
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1276
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:736
Plan * plan
Definition: execnodes.h:966
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:628
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:45

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 551 of file nodeAppend.c.

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

552 {
553  node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
555 }
int plan_node_id
Definition: plannodes.h:140
PlanState ps
Definition: execnodes.h:1253
ParallelAppendState * as_pstate
Definition: execnodes.h:1271
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1276
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:709
Plan * plan
Definition: execnodes.h:966
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 535 of file nodeAppend.c.

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

536 {
537  ParallelAppendState *pstate = node->as_pstate;
538 
539  pstate->pa_next_plan = 0;
540  memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
541 }
ParallelAppendState * as_pstate
Definition: execnodes.h:1271
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:80

◆ ExecAsyncAppendResponse()

void ExecAsyncAppendResponse ( AsyncRequest areq)

Definition at line 1108 of file nodeAppend.c.

References AppendState::as_asyncresults, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, Assert, bms_add_member(), AsyncRequest::callback_pending, IsA, AsyncRequest::request_complete, AsyncRequest::request_index, AsyncRequest::requestor, AsyncRequest::result, and TupIsNull.

Referenced by ExecAsyncResponse().

1109 {
1110  AppendState *node = (AppendState *) areq->requestor;
1111  TupleTableSlot *slot = areq->result;
1112 
1113  /* The result should be a TupleTableSlot or NULL. */
1114  Assert(slot == NULL || IsA(slot, TupleTableSlot));
1115 
1116  /* Nothing to do if the request is pending. */
1117  if (!areq->request_complete)
1118  {
1119  /* The request would have been pending for a callback. */
1120  Assert(areq->callback_pending);
1121  return;
1122  }
1123 
1124  /* If the result is NULL or an empty slot, there's nothing more to do. */
1125  if (TupIsNull(slot))
1126  {
1127  /* The ending subplan wouldn't have been pending for a callback. */
1128  Assert(!areq->callback_pending);
1129  --node->as_nasyncremain;
1130  return;
1131  }
1132 
1133  /* Save result so we can return it. */
1134  Assert(node->as_nasyncresults < node->as_nasyncplans);
1135  node->as_asyncresults[node->as_nasyncresults++] = slot;
1136 
1137  /*
1138  * Mark the subplan that returned a result as ready for a new request. We
1139  * don't launch another one here immediately because it might complete.
1140  */
1142  areq->request_index);
1143 }
#define IsA(nodeptr, _type_)
Definition: nodes.h:587
int as_nasyncremain
Definition: execnodes.h:1265
int as_nasyncplans
Definition: execnodes.h:1259
struct PlanState * requestor
Definition: execnodes.h:537
int as_nasyncresults
Definition: execnodes.h:1262
#define TupIsNull(slot)
Definition: tuptable.h:292
TupleTableSlot * result
Definition: execnodes.h:542
bool callback_pending
Definition: execnodes.h:540
#define Assert(condition)
Definition: c.h:804
int request_index
Definition: execnodes.h:539
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
Bitmapset * as_needrequest
Definition: execnodes.h:1266
bool request_complete
Definition: execnodes.h:541
TupleTableSlot ** as_asyncresults
Definition: execnodes.h:1261

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 395 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

396 {
397  PlanState **appendplans;
398  int nplans;
399  int i;
400 
401  /*
402  * get information from the node
403  */
404  appendplans = node->appendplans;
405  nplans = node->as_nplans;
406 
407  /*
408  * shut down each of the subscans
409  */
410  for (i = 0; i < nplans; i++)
411  ExecEndNode(appendplans[i]);
412 }
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:556
PlanState ** appendplans
Definition: execnodes.h:1254
int i

◆ ExecInitAppend()

AppendState* ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 109 of file nodeAppend.c.

References Append::appendplans, AppendState::appendplans, AppendState::as_asyncplans, AppendState::as_asyncrequests, AppendState::as_asyncresults, AppendState::as_begun, AppendState::as_eventset, AppendState::as_first_partial_plan, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_whichplan, Assert, Plan::async_capable, bms_add_member(), bms_add_range(), bms_next_member(), bms_num_members(), AsyncRequest::callback_pending, AppendState::choose_next_subplan, choose_next_subplan_locally(), classify_matching_subplans(), PartitionPruneState::do_exec_prune, PartitionPruneState::do_initial_prune, EState::es_epq_active, EXEC_FLAG_MARK, ExecAppend(), ExecAssignExprContext(), ExecCreatePartitionPruneState(), ExecFindInitialMatchingSubPlans(), ExecInitNode(), ExecInitResultTupleSlotTL(), PlanState::ExecProcNode, Append::first_partial_plan, i, INVALID_SUBPLAN_INDEX, list_length(), list_nth(), makeNode, palloc(), palloc0(), Append::part_prune_info, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, AsyncRequest::request_complete, AsyncRequest::request_index, AsyncRequest::requestee, AsyncRequest::requestor, AsyncRequest::result, PlanState::resultopsfixed, PlanState::resultopsset, PlanState::state, and TTSOpsVirtual.

Referenced by ExecInitNode().

110 {
111  AppendState *appendstate = makeNode(AppendState);
112  PlanState **appendplanstates;
113  Bitmapset *validsubplans;
114  Bitmapset *asyncplans;
115  int nplans;
116  int nasyncplans;
117  int firstvalid;
118  int i,
119  j;
120 
121  /* check for unsupported flags */
122  Assert(!(eflags & EXEC_FLAG_MARK));
123 
124  /*
125  * create new AppendState for our append node
126  */
127  appendstate->ps.plan = (Plan *) node;
128  appendstate->ps.state = estate;
129  appendstate->ps.ExecProcNode = ExecAppend;
130 
131  /* Let choose_next_subplan_* function handle setting the first subplan */
132  appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
133  appendstate->as_syncdone = false;
134  appendstate->as_begun = false;
135 
136  /* If run-time partition pruning is enabled, then set that up now */
137  if (node->part_prune_info != NULL)
138  {
139  PartitionPruneState *prunestate;
140 
141  /* We may need an expression context to evaluate partition exprs */
142  ExecAssignExprContext(estate, &appendstate->ps);
143 
144  /* Create the working data structure for pruning. */
145  prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
146  node->part_prune_info);
147  appendstate->as_prune_state = prunestate;
148 
149  /* Perform an initial partition prune, if required. */
150  if (prunestate->do_initial_prune)
151  {
152  /* Determine which subplans survive initial pruning */
153  validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
154  list_length(node->appendplans));
155 
156  nplans = bms_num_members(validsubplans);
157  }
158  else
159  {
160  /* We'll need to initialize all subplans */
161  nplans = list_length(node->appendplans);
162  Assert(nplans > 0);
163  validsubplans = bms_add_range(NULL, 0, nplans - 1);
164  }
165 
166  /*
167  * When no run-time pruning is required and there's at least one
168  * subplan, we can fill as_valid_subplans immediately, preventing
169  * later calls to ExecFindMatchingSubPlans.
170  */
171  if (!prunestate->do_exec_prune && nplans > 0)
172  appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
173  }
174  else
175  {
176  nplans = list_length(node->appendplans);
177 
178  /*
179  * When run-time partition pruning is not enabled we can just mark all
180  * subplans as valid; they must also all be initialized.
181  */
182  Assert(nplans > 0);
183  appendstate->as_valid_subplans = validsubplans =
184  bms_add_range(NULL, 0, nplans - 1);
185  appendstate->as_prune_state = NULL;
186  }
187 
188  /*
189  * Initialize result tuple type and slot.
190  */
191  ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
192 
193  /* node returns slots from each of its subnodes, therefore not fixed */
194  appendstate->ps.resultopsset = true;
195  appendstate->ps.resultopsfixed = false;
196 
197  appendplanstates = (PlanState **) palloc(nplans *
198  sizeof(PlanState *));
199 
200  /*
201  * call ExecInitNode on each of the valid plans to be executed and save
202  * the results into the appendplanstates array.
203  *
204  * While at it, find out the first valid partial plan.
205  */
206  j = 0;
207  asyncplans = NULL;
208  nasyncplans = 0;
209  firstvalid = nplans;
210  i = -1;
211  while ((i = bms_next_member(validsubplans, i)) >= 0)
212  {
213  Plan *initNode = (Plan *) list_nth(node->appendplans, i);
214 
215  /*
216  * Record async subplans. When executing EvalPlanQual, we treat them
217  * as sync ones; don't do this when initializing an EvalPlanQual plan
218  * tree.
219  */
220  if (initNode->async_capable && estate->es_epq_active == NULL)
221  {
222  asyncplans = bms_add_member(asyncplans, j);
223  nasyncplans++;
224  }
225 
226  /*
227  * Record the lowest appendplans index which is a valid partial plan.
228  */
229  if (i >= node->first_partial_plan && j < firstvalid)
230  firstvalid = j;
231 
232  appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
233  }
234 
235  appendstate->as_first_partial_plan = firstvalid;
236  appendstate->appendplans = appendplanstates;
237  appendstate->as_nplans = nplans;
238 
239  /* Initialize async state */
240  appendstate->as_asyncplans = asyncplans;
241  appendstate->as_nasyncplans = nasyncplans;
242  appendstate->as_asyncrequests = NULL;
243  appendstate->as_asyncresults = NULL;
244  appendstate->as_nasyncresults = 0;
245  appendstate->as_nasyncremain = 0;
246  appendstate->as_needrequest = NULL;
247  appendstate->as_eventset = NULL;
248  appendstate->as_valid_asyncplans = NULL;
249 
250  if (nasyncplans > 0)
251  {
252  appendstate->as_asyncrequests = (AsyncRequest **)
253  palloc0(nplans * sizeof(AsyncRequest *));
254 
255  i = -1;
256  while ((i = bms_next_member(asyncplans, i)) >= 0)
257  {
258  AsyncRequest *areq;
259 
260  areq = palloc(sizeof(AsyncRequest));
261  areq->requestor = (PlanState *) appendstate;
262  areq->requestee = appendplanstates[i];
263  areq->request_index = i;
264  areq->callback_pending = false;
265  areq->request_complete = false;
266  areq->result = NULL;
267 
268  appendstate->as_asyncrequests[i] = areq;
269  }
270 
271  appendstate->as_asyncresults = (TupleTableSlot **)
272  palloc0(nasyncplans * sizeof(TupleTableSlot *));
273 
274  if (appendstate->as_valid_subplans != NULL)
275  classify_matching_subplans(appendstate);
276  }
277 
278  /*
279  * Miscellaneous initialization
280  */
281 
282  appendstate->ps.ps_ProjInfo = NULL;
283 
284  /* For parallel query, this will be overridden later. */
286 
287  return appendstate;
288 }
static void classify_matching_subplans(AppendState *node)
Definition: nodeAppend.c:1154
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:297
int as_nasyncremain
Definition: execnodes.h:1265
int as_nasyncplans
Definition: execnodes.h:1259
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1006
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1273
Bitmapset * as_asyncplans
Definition: execnodes.h:1258
Bitmapset * as_valid_asyncplans
Definition: execnodes.h:1275
struct PlanState * requestor
Definition: execnodes.h:537
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:565
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
EState * state
Definition: execnodes.h:968
int as_nasyncresults
Definition: execnodes.h:1262
struct EPQState * es_epq_active
Definition: execnodes.h:629
PlanState ps
Definition: execnodes.h:1253
bool as_begun
Definition: execnodes.h:1257
List * appendplans
Definition: plannodes.h:253
bool async_capable
Definition: plannodes.h:135
struct WaitEventSet * as_eventset
Definition: execnodes.h:1267
bool as_syncdone
Definition: execnodes.h:1263
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:834
struct PlanState * requestee
Definition: execnodes.h:538
static void * list_nth(const List *list, int n)
Definition: pg_list.h:278
int first_partial_plan
Definition: plannodes.h:260
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1276
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
int as_first_partial_plan
Definition: execnodes.h:1269
bool resultopsset
Definition: execnodes.h:1051
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:83
TupleTableSlot * result
Definition: execnodes.h:542
void * palloc0(Size size)
Definition: mcxt.c:1093
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:972
Bitmapset * as_valid_subplans
Definition: execnodes.h:1274
Plan * plan
Definition: execnodes.h:966
bool callback_pending
Definition: execnodes.h:540
#define makeNode(_type_)
Definition: nodes.h:584
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:263
#define Assert(condition)
Definition: c.h:804
#define EXEC_FLAG_MARK
Definition: executor.h:59
int as_whichplan
Definition: execnodes.h:1256
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:480
int request_index
Definition: execnodes.h:539
static int list_length(const List *l)
Definition: pg_list.h:149
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1799
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
bool resultopsfixed
Definition: execnodes.h:1047
AsyncRequest ** as_asyncrequests
Definition: execnodes.h:1260
void * palloc(Size size)
Definition: mcxt.c:1062
PlanState ** appendplans
Definition: execnodes.h:1254
int i
Bitmapset * as_needrequest
Definition: execnodes.h:1266
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:141
bool request_complete
Definition: execnodes.h:541
TupleTableSlot ** as_asyncresults
Definition: execnodes.h:1261

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 415 of file nodeAppend.c.

References AppendState::appendplans, AppendState::as_asyncplans, AppendState::as_asyncrequests, AppendState::as_begun, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_whichplan, bms_free(), bms_next_member(), bms_overlap(), AsyncRequest::callback_pending, PlanState::chgParam, PartitionPruneState::execparamids, ExecReScan(), i, INVALID_SUBPLAN_INDEX, AppendState::ps, AsyncRequest::request_complete, AsyncRequest::result, and UpdateChangedParamSet().

Referenced by ExecReScan().

416 {
417  int nasyncplans = node->as_nasyncplans;
418  int i;
419 
420  /*
421  * If any PARAM_EXEC Params used in pruning expressions have changed, then
422  * we'd better unset the valid subplans so that they are reselected for
423  * the new parameter values.
424  */
425  if (node->as_prune_state &&
426  bms_overlap(node->ps.chgParam,
428  {
430  node->as_valid_subplans = NULL;
431  if (nasyncplans > 0)
432  {
434  node->as_valid_asyncplans = NULL;
435  }
436  }
437 
438  for (i = 0; i < node->as_nplans; i++)
439  {
440  PlanState *subnode = node->appendplans[i];
441 
442  /*
443  * ExecReScan doesn't know about my subplans, so I have to do
444  * changed-parameter signaling myself.
445  */
446  if (node->ps.chgParam != NULL)
447  UpdateChangedParamSet(subnode, node->ps.chgParam);
448 
449  /*
450  * If chgParam of subnode is not null then plan will be re-scanned by
451  * first ExecProcNode or by first ExecAsyncRequest.
452  */
453  if (subnode->chgParam == NULL)
454  ExecReScan(subnode);
455  }
456 
457  /* Reset async state */
458  if (nasyncplans > 0)
459  {
460  i = -1;
461  while ((i = bms_next_member(node->as_asyncplans, i)) >= 0)
462  {
463  AsyncRequest *areq = node->as_asyncrequests[i];
464 
465  areq->callback_pending = false;
466  areq->request_complete = false;
467  areq->result = NULL;
468  }
469 
470  node->as_nasyncresults = 0;
471  node->as_nasyncremain = 0;
472  bms_free(node->as_needrequest);
473  node->as_needrequest = NULL;
474  }
475 
476  /* Let choose_next_subplan_* function handle setting the first subplan */
478  node->as_syncdone = false;
479  node->as_begun = false;
480 }
int as_nasyncremain
Definition: execnodes.h:1265
int as_nasyncplans
Definition: execnodes.h:1259
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1273
Bitmapset * as_asyncplans
Definition: execnodes.h:1258
Bitmapset * as_valid_asyncplans
Definition: execnodes.h:1275
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
int as_nasyncresults
Definition: execnodes.h:1262
PlanState ps
Definition: execnodes.h:1253
bool as_begun
Definition: execnodes.h:1257
bool as_syncdone
Definition: execnodes.h:1263
Bitmapset * execparamids
Bitmapset * chgParam
Definition: execnodes.h:998
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:83
TupleTableSlot * result
Definition: execnodes.h:542
Bitmapset * as_valid_subplans
Definition: execnodes.h:1274
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:864
bool callback_pending
Definition: execnodes.h:540
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
int as_whichplan
Definition: execnodes.h:1256
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:494
AsyncRequest ** as_asyncrequests
Definition: execnodes.h:1260
PlanState ** appendplans
Definition: execnodes.h:1254
int i
Bitmapset * as_needrequest
Definition: execnodes.h:1266
bool request_complete
Definition: execnodes.h:541