PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
nodeAppend.c File Reference
#include "postgres.h"
#include "executor/execAsync.h"
#include "executor/execPartition.h"
#include "executor/executor.h"
#include "executor/nodeAppend.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/latch.h"
Include dependency graph for nodeAppend.c:

Go to the source code of this file.

Data Structures

struct  ParallelAppendState
 

Macros

#define INVALID_SUBPLAN_INDEX   -1
 
#define EVENT_BUFFER_SIZE   16
 

Functions

static TupleTableSlotExecAppend (PlanState *pstate)
 
static bool choose_next_subplan_locally (AppendState *node)
 
static bool choose_next_subplan_for_leader (AppendState *node)
 
static bool choose_next_subplan_for_worker (AppendState *node)
 
static void mark_invalid_subplans_as_finished (AppendState *node)
 
static void ExecAppendAsyncBegin (AppendState *node)
 
static bool ExecAppendAsyncGetNext (AppendState *node, TupleTableSlot **result)
 
static bool ExecAppendAsyncRequest (AppendState *node, TupleTableSlot **result)
 
static void ExecAppendAsyncEventWait (AppendState *node)
 
static void classify_matching_subplans (AppendState *node)
 
AppendStateExecInitAppend (Append *node, EState *estate, int eflags)
 
void ExecEndAppend (AppendState *node)
 
void ExecReScanAppend (AppendState *node)
 
void ExecAppendEstimate (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendReInitializeDSM (AppendState *node, ParallelContext *pcxt)
 
void ExecAppendInitializeWorker (AppendState *node, ParallelWorkerContext *pwcxt)
 
void ExecAsyncAppendResponse (AsyncRequest *areq)
 

Macro Definition Documentation

◆ EVENT_BUFFER_SIZE

#define EVENT_BUFFER_SIZE   16

Definition at line 84 of file nodeAppend.c.

◆ INVALID_SUBPLAN_INDEX

#define INVALID_SUBPLAN_INDEX   -1

Definition at line 83 of file nodeAppend.c.

Function Documentation

◆ choose_next_subplan_for_leader()

static bool choose_next_subplan_for_leader ( AppendState node)
static

Definition at line 634 of file nodeAppend.c.

635{
636 ParallelAppendState *pstate = node->as_pstate;
637
638 /* Backward scan is not supported by parallel-aware plans */
640
641 /* We should never be called when there are no subplans */
642 Assert(node->as_nplans > 0);
643
645
647 {
648 /* Mark just-completed subplan as finished. */
649 node->as_pstate->pa_finished[node->as_whichplan] = true;
650 }
651 else
652 {
653 /* Start with last subplan. */
654 node->as_whichplan = node->as_nplans - 1;
655
656 /*
657 * If we've yet to determine the valid subplans then do so now. If
658 * run-time pruning is disabled then the valid subplans will always be
659 * set to all subplans.
660 */
662 {
663 node->as_valid_subplans =
665 node->as_valid_subplans_identified = true;
666
667 /*
668 * Mark each invalid plan as finished to allow the loop below to
669 * select the first valid subplan.
670 */
672 }
673 }
674
675 /* Loop until we find a subplan to execute. */
676 while (pstate->pa_finished[node->as_whichplan])
677 {
678 if (node->as_whichplan == 0)
679 {
682 LWLockRelease(&pstate->pa_lock);
683 return false;
684 }
685
686 /*
687 * We needn't pay attention to as_valid_subplans here as all invalid
688 * plans have been marked as finished.
689 */
690 node->as_whichplan--;
691 }
692
693 /* If non-partial, immediately mark as finished. */
694 if (node->as_whichplan < node->as_first_partial_plan)
695 node->as_pstate->pa_finished[node->as_whichplan] = true;
696
697 LWLockRelease(&pstate->pa_lock);
698
699 return true;
700}
#define Assert(condition)
Definition: c.h:812
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate, bool initial_prune)
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_EXCLUSIVE
Definition: lwlock.h:114
static void mark_invalid_subplans_as_finished(AppendState *node)
Definition: nodeAppend.c:842
#define INVALID_SUBPLAN_INDEX
Definition: nodeAppend.c:83
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
struct PartitionPruneState * as_prune_state
Definition: execnodes.h:1468
int as_whichplan
Definition: execnodes.h:1451
int as_first_partial_plan
Definition: execnodes.h:1464
PlanState ps
Definition: execnodes.h:1448
ParallelAppendState * as_pstate
Definition: execnodes.h:1466
Bitmapset * as_valid_subplans
Definition: execnodes.h:1470
bool as_valid_subplans_identified
Definition: execnodes.h:1469
ScanDirection es_direction
Definition: execnodes.h:631
bool pa_finished[FLEXIBLE_ARRAY_MEMBER]
Definition: nodeAppend.c:80
EState * state
Definition: execnodes.h:1128

References AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, AppendState::as_whichplan, Assert, EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeDSM().

◆ choose_next_subplan_for_worker()

static bool choose_next_subplan_for_worker ( AppendState node)
static

Definition at line 716 of file nodeAppend.c.

717{
718 ParallelAppendState *pstate = node->as_pstate;
719
720 /* Backward scan is not supported by parallel-aware plans */
722
723 /* We should never be called when there are no subplans */
724 Assert(node->as_nplans > 0);
725
727
728 /* Mark just-completed subplan as finished. */
730 node->as_pstate->pa_finished[node->as_whichplan] = true;
731
732 /*
733 * If we've yet to determine the valid subplans then do so now. If
734 * run-time pruning is disabled then the valid subplans will always be set
735 * to all subplans.
736 */
737 else if (!node->as_valid_subplans_identified)
738 {
739 node->as_valid_subplans =
741 node->as_valid_subplans_identified = true;
742
744 }
745
746 /* If all the plans are already done, we have nothing to do */
747 if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
748 {
749 LWLockRelease(&pstate->pa_lock);
750 return false;
751 }
752
753 /* Save the plan from which we are starting the search. */
754 node->as_whichplan = pstate->pa_next_plan;
755
756 /* Loop until we find a valid subplan to execute. */
757 while (pstate->pa_finished[pstate->pa_next_plan])
758 {
759 int nextplan;
760
761 nextplan = bms_next_member(node->as_valid_subplans,
762 pstate->pa_next_plan);
763 if (nextplan >= 0)
764 {
765 /* Advance to the next valid plan. */
766 pstate->pa_next_plan = nextplan;
767 }
768 else if (node->as_whichplan > node->as_first_partial_plan)
769 {
770 /*
771 * Try looping back to the first valid partial plan, if there is
772 * one. If there isn't, arrange to bail out below.
773 */
774 nextplan = bms_next_member(node->as_valid_subplans,
775 node->as_first_partial_plan - 1);
776 pstate->pa_next_plan =
777 nextplan < 0 ? node->as_whichplan : nextplan;
778 }
779 else
780 {
781 /*
782 * At last plan, and either there are no partial plans or we've
783 * tried them all. Arrange to bail out.
784 */
785 pstate->pa_next_plan = node->as_whichplan;
786 }
787
788 if (pstate->pa_next_plan == node->as_whichplan)
789 {
790 /* We've tried everything! */
792 LWLockRelease(&pstate->pa_lock);
793 return false;
794 }
795 }
796
797 /* Pick the plan we found, and advance pa_next_plan one more time. */
798 node->as_whichplan = pstate->pa_next_plan;
800 pstate->pa_next_plan);
801
802 /*
803 * If there are no more valid plans then try setting the next plan to the
804 * first valid partial plan.
805 */
806 if (pstate->pa_next_plan < 0)
807 {
808 int nextplan = bms_next_member(node->as_valid_subplans,
809 node->as_first_partial_plan - 1);
810
811 if (nextplan >= 0)
812 pstate->pa_next_plan = nextplan;
813 else
814 {
815 /*
816 * There are no valid partial plans, and we already chose the last
817 * non-partial plan; so flag that there's nothing more for our
818 * fellow workers to do.
819 */
821 }
822 }
823
824 /* If non-partial, immediately mark as finished. */
825 if (node->as_whichplan < node->as_first_partial_plan)
826 node->as_pstate->pa_finished[node->as_whichplan] = true;
827
828 LWLockRelease(&pstate->pa_lock);
829
830 return true;
831}
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306

References AppendState::as_first_partial_plan, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, AppendState::as_whichplan, Assert, bms_next_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), mark_invalid_subplans_as_finished(), ParallelAppendState::pa_finished, ParallelAppendState::pa_lock, ParallelAppendState::pa_next_plan, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppendInitializeWorker().

◆ choose_next_subplan_locally()

static bool choose_next_subplan_locally ( AppendState node)
static

Definition at line 568 of file nodeAppend.c.

569{
570 int whichplan = node->as_whichplan;
571 int nextplan;
572
573 /* We should never be called when there are no subplans */
574 Assert(node->as_nplans > 0);
575
576 /* Nothing to do if syncdone */
577 if (node->as_syncdone)
578 return false;
579
580 /*
581 * If first call then have the bms member function choose the first valid
582 * sync subplan by initializing whichplan to -1. If there happen to be no
583 * valid sync subplans then the bms member function will handle that by
584 * returning a negative number which will allow us to exit returning a
585 * false value.
586 */
587 if (whichplan == INVALID_SUBPLAN_INDEX)
588 {
589 if (node->as_nasyncplans > 0)
590 {
591 /* We'd have filled as_valid_subplans already */
593 }
594 else if (!node->as_valid_subplans_identified)
595 {
596 node->as_valid_subplans =
598 node->as_valid_subplans_identified = true;
599 }
600
601 whichplan = -1;
602 }
603
604 /* Ensure whichplan is within the expected range */
605 Assert(whichplan >= -1 && whichplan <= node->as_nplans);
606
608 nextplan = bms_next_member(node->as_valid_subplans, whichplan);
609 else
610 nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
611
612 if (nextplan < 0)
613 {
614 /* Set as_syncdone if in async mode */
615 if (node->as_nasyncplans > 0)
616 node->as_syncdone = true;
617 return false;
618 }
619
620 node->as_whichplan = nextplan;
621
622 return true;
623}
int bms_prev_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1367
bool as_syncdone
Definition: execnodes.h:1458
int as_nasyncplans
Definition: execnodes.h:1454

References AppendState::as_nasyncplans, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, AppendState::as_whichplan, Assert, bms_next_member(), bms_prev_member(), EState::es_direction, ExecFindMatchingSubPlans(), INVALID_SUBPLAN_INDEX, AppendState::ps, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecInitAppend().

◆ classify_matching_subplans()

static void classify_matching_subplans ( AppendState node)
static

Definition at line 1165 of file nodeAppend.c.

1166{
1167 Bitmapset *valid_asyncplans;
1168
1170 Assert(node->as_valid_asyncplans == NULL);
1171
1172 /* Nothing to do if there are no valid subplans. */
1174 {
1175 node->as_syncdone = true;
1176 node->as_nasyncremain = 0;
1177 return;
1178 }
1179
1180 /* Nothing to do if there are no valid async subplans. */
1181 if (!bms_overlap(node->as_valid_subplans, node->as_asyncplans))
1182 {
1183 node->as_nasyncremain = 0;
1184 return;
1185 }
1186
1187 /* Get valid async subplans. */
1188 valid_asyncplans = bms_intersect(node->as_asyncplans,
1189 node->as_valid_subplans);
1190
1191 /* Adjust the valid subplans to contain sync subplans only. */
1193 valid_asyncplans);
1194
1195 /* Save valid async subplans. */
1196 node->as_valid_asyncplans = valid_asyncplans;
1197}
Bitmapset * bms_intersect(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:292
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582
#define bms_is_empty(a)
Definition: bitmapset.h:118
Bitmapset * as_valid_asyncplans
Definition: execnodes.h:1471
Bitmapset * as_asyncplans
Definition: execnodes.h:1453
int as_nasyncremain
Definition: execnodes.h:1460

References AppendState::as_asyncplans, AppendState::as_nasyncremain, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, Assert, bms_del_members(), bms_intersect(), bms_is_empty, and bms_overlap().

Referenced by ExecAppendAsyncBegin(), and ExecInitAppend().

◆ ExecAppend()

static TupleTableSlot * ExecAppend ( PlanState pstate)
static

Definition at line 302 of file nodeAppend.c.

303{
304 AppendState *node = castNode(AppendState, pstate);
305 TupleTableSlot *result;
306
307 /*
308 * If this is the first call after Init or ReScan, we need to do the
309 * initialization work.
310 */
311 if (!node->as_begun)
312 {
314 Assert(!node->as_syncdone);
315
316 /* Nothing to do if there are no subplans */
317 if (node->as_nplans == 0)
319
320 /* If there are any async subplans, begin executing them. */
321 if (node->as_nasyncplans > 0)
323
324 /*
325 * If no sync subplan has been chosen, we must choose one before
326 * proceeding.
327 */
328 if (!node->choose_next_subplan(node) && node->as_nasyncremain == 0)
330
331 Assert(node->as_syncdone ||
332 (node->as_whichplan >= 0 &&
333 node->as_whichplan < node->as_nplans));
334
335 /* And we're initialized. */
336 node->as_begun = true;
337 }
338
339 for (;;)
340 {
341 PlanState *subnode;
342
344
345 /*
346 * try to get a tuple from an async subplan if any
347 */
348 if (node->as_syncdone || !bms_is_empty(node->as_needrequest))
349 {
350 if (ExecAppendAsyncGetNext(node, &result))
351 return result;
352 Assert(!node->as_syncdone);
354 }
355
356 /*
357 * figure out which sync subplan we are currently processing
358 */
359 Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
360 subnode = node->appendplans[node->as_whichplan];
361
362 /*
363 * get a tuple from the subplan
364 */
365 result = ExecProcNode(subnode);
366
367 if (!TupIsNull(result))
368 {
369 /*
370 * If the subplan gave us something then return it as-is. We do
371 * NOT make use of the result slot that was set up in
372 * ExecInitAppend; there's no need for it.
373 */
374 return result;
375 }
376
377 /*
378 * wait or poll for async events if any. We do this before checking
379 * for the end of iteration, because it might drain the remaining
380 * async subplans.
381 */
382 if (node->as_nasyncremain > 0)
384
385 /* choose new sync subplan; if no sync/async subplans, we're done */
386 if (!node->choose_next_subplan(node) && node->as_nasyncremain == 0)
388 }
389}
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:267
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
static void ExecAppendAsyncBegin(AppendState *node)
Definition: nodeAppend.c:876
static void ExecAppendAsyncEventWait(AppendState *node)
Definition: nodeAppend.c:1031
static bool ExecAppendAsyncGetNext(AppendState *node, TupleTableSlot **result)
Definition: nodeAppend.c:928
#define castNode(_type_, nodeptr)
Definition: nodes.h:176
Bitmapset * as_needrequest
Definition: execnodes.h:1461
bool as_begun
Definition: execnodes.h:1452
bool(* choose_next_subplan)(AppendState *)
Definition: execnodes.h:1472
PlanState ** appendplans
Definition: execnodes.h:1449
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1164
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:454
#define TupIsNull(slot)
Definition: tuptable.h:306

References AppendState::appendplans, AppendState::as_begun, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_needrequest, AppendState::as_nplans, AppendState::as_syncdone, AppendState::as_whichplan, Assert, bms_is_empty, castNode, CHECK_FOR_INTERRUPTS, AppendState::choose_next_subplan, ExecAppendAsyncBegin(), ExecAppendAsyncEventWait(), ExecAppendAsyncGetNext(), ExecClearTuple(), ExecProcNode(), INVALID_SUBPLAN_INDEX, AppendState::ps, PlanState::ps_ResultTupleSlot, and TupIsNull.

Referenced by ExecInitAppend().

◆ ExecAppendAsyncBegin()

static void ExecAppendAsyncBegin ( AppendState node)
static

Definition at line 876 of file nodeAppend.c.

877{
878 int i;
879
880 /* Backward scan is not supported by async-aware Appends. */
882
883 /* We should never be called when there are no subplans */
884 Assert(node->as_nplans > 0);
885
886 /* We should never be called when there are no async subplans. */
887 Assert(node->as_nasyncplans > 0);
888
889 /* If we've yet to determine the valid subplans then do so now. */
891 {
892 node->as_valid_subplans =
894 node->as_valid_subplans_identified = true;
895
897 }
898
899 /* Initialize state variables. */
902
903 /* Nothing to do if there are no valid async subplans. */
904 if (node->as_nasyncremain == 0)
905 return;
906
907 /* Make a request for each of the valid async subplans. */
908 i = -1;
909 while ((i = bms_next_member(node->as_valid_asyncplans, i)) >= 0)
910 {
911 AsyncRequest *areq = node->as_asyncrequests[i];
912
913 Assert(areq->request_index == i);
914 Assert(!areq->callback_pending);
915
916 /* Do the actual work. */
917 ExecAsyncRequest(areq);
918 }
919}
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
void ExecAsyncRequest(AsyncRequest *areq)
Definition: execAsync.c:26
int i
Definition: isn.c:72
static void classify_matching_subplans(AppendState *node)
Definition: nodeAppend.c:1165
AsyncRequest ** as_asyncrequests
Definition: execnodes.h:1455
int request_index
Definition: execnodes.h:613
bool callback_pending
Definition: execnodes.h:614

References AppendState::as_asyncrequests, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, Assert, bms_is_empty, bms_next_member(), bms_num_members(), AsyncRequest::callback_pending, classify_matching_subplans(), EState::es_direction, ExecAsyncRequest(), ExecFindMatchingSubPlans(), i, AppendState::ps, AsyncRequest::request_index, ScanDirectionIsForward, and PlanState::state.

Referenced by ExecAppend().

◆ ExecAppendAsyncEventWait()

static void ExecAppendAsyncEventWait ( AppendState node)
static

Definition at line 1031 of file nodeAppend.c.

1032{
1033 int nevents = node->as_nasyncplans + 1;
1034 long timeout = node->as_syncdone ? -1 : 0;
1035 WaitEvent occurred_event[EVENT_BUFFER_SIZE];
1036 int noccurred;
1037 int i;
1038
1039 /* We should never be called when there are no valid async subplans. */
1040 Assert(node->as_nasyncremain > 0);
1041
1042 Assert(node->as_eventset == NULL);
1045 NULL, NULL);
1046
1047 /* Give each waiting subplan a chance to add an event. */
1048 i = -1;
1049 while ((i = bms_next_member(node->as_asyncplans, i)) >= 0)
1050 {
1051 AsyncRequest *areq = node->as_asyncrequests[i];
1052
1053 if (areq->callback_pending)
1055 }
1056
1057 /*
1058 * No need for further processing if there are no configured events other
1059 * than the postmaster death event.
1060 */
1062 {
1064 node->as_eventset = NULL;
1065 return;
1066 }
1067
1068 /* Return at most EVENT_BUFFER_SIZE events in one call. */
1069 if (nevents > EVENT_BUFFER_SIZE)
1070 nevents = EVENT_BUFFER_SIZE;
1071
1072 /*
1073 * If the timeout is -1, wait until at least one event occurs. If the
1074 * timeout is 0, poll for events, but do not wait at all.
1075 */
1076 noccurred = WaitEventSetWait(node->as_eventset, timeout, occurred_event,
1077 nevents, WAIT_EVENT_APPEND_READY);
1079 node->as_eventset = NULL;
1080 if (noccurred == 0)
1081 return;
1082
1083 /* Deliver notifications. */
1084 for (i = 0; i < noccurred; i++)
1085 {
1086 WaitEvent *w = &occurred_event[i];
1087
1088 /*
1089 * Each waiting subplan should have registered its wait event with
1090 * user_data pointing back to its AsyncRequest.
1091 */
1092 if ((w->events & WL_SOCKET_READABLE) != 0)
1093 {
1094 AsyncRequest *areq = (AsyncRequest *) w->user_data;
1095
1096 if (areq->callback_pending)
1097 {
1098 /*
1099 * Mark it as no longer needing a callback. We must do this
1100 * before dispatching the callback in case the callback resets
1101 * the flag.
1102 */
1103 areq->callback_pending = false;
1104
1105 /* Do the actual work. */
1106 ExecAsyncNotify(areq);
1107 }
1108 }
1109 }
1110}
void ExecAsyncConfigureWait(AsyncRequest *areq)
Definition: execAsync.c:62
void ExecAsyncNotify(AsyncRequest *areq)
Definition: execAsync.c:88
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
int GetNumRegisteredWaitEvents(WaitEventSet *set)
Definition: latch.c:2256
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: latch.c:957
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
Definition: latch.c:1418
void FreeWaitEventSet(WaitEventSet *set)
Definition: latch.c:868
WaitEventSet * CreateWaitEventSet(ResourceOwner resowner, int nevents)
Definition: latch.c:751
#define WL_SOCKET_READABLE
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define EVENT_BUFFER_SIZE
Definition: nodeAppend.c:84
#define PGINVALID_SOCKET
Definition: port.h:31
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
struct WaitEventSet * as_eventset
Definition: execnodes.h:1462
void * user_data
Definition: latch.h:157
uint32 events
Definition: latch.h:155

References AddWaitEventToSet(), AppendState::as_asyncplans, AppendState::as_asyncrequests, AppendState::as_eventset, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_syncdone, Assert, bms_next_member(), AsyncRequest::callback_pending, CreateWaitEventSet(), CurrentResourceOwner, EVENT_BUFFER_SIZE, WaitEvent::events, ExecAsyncConfigureWait(), ExecAsyncNotify(), FreeWaitEventSet(), GetNumRegisteredWaitEvents(), i, if(), PGINVALID_SOCKET, WaitEvent::user_data, WaitEventSetWait(), WL_EXIT_ON_PM_DEATH, and WL_SOCKET_READABLE.

Referenced by ExecAppend(), and ExecAppendAsyncGetNext().

◆ ExecAppendAsyncGetNext()

static bool ExecAppendAsyncGetNext ( AppendState node,
TupleTableSlot **  result 
)
static

Definition at line 928 of file nodeAppend.c.

929{
930 *result = NULL;
931
932 /* We should never be called when there are no valid async subplans. */
933 Assert(node->as_nasyncremain > 0);
934
935 /* Request a tuple asynchronously. */
936 if (ExecAppendAsyncRequest(node, result))
937 return true;
938
939 while (node->as_nasyncremain > 0)
940 {
942
943 /* Wait or poll for async events. */
945
946 /* Request a tuple asynchronously. */
947 if (ExecAppendAsyncRequest(node, result))
948 return true;
949
950 /* Break from loop if there's any sync subplan that isn't complete. */
951 if (!node->as_syncdone)
952 break;
953 }
954
955 /*
956 * If all sync subplans are complete, we're totally done scanning the
957 * given node. Otherwise, we're done with the asynchronous stuff but must
958 * continue scanning the sync subplans.
959 */
960 if (node->as_syncdone)
961 {
962 Assert(node->as_nasyncremain == 0);
963 *result = ExecClearTuple(node->ps.ps_ResultTupleSlot);
964 return true;
965 }
966
967 return false;
968}
static bool ExecAppendAsyncRequest(AppendState *node, TupleTableSlot **result)
Definition: nodeAppend.c:977

References AppendState::as_nasyncremain, AppendState::as_syncdone, Assert, CHECK_FOR_INTERRUPTS, ExecAppendAsyncEventWait(), ExecAppendAsyncRequest(), ExecClearTuple(), AppendState::ps, and PlanState::ps_ResultTupleSlot.

Referenced by ExecAppend().

◆ ExecAppendAsyncRequest()

static bool ExecAppendAsyncRequest ( AppendState node,
TupleTableSlot **  result 
)
static

Definition at line 977 of file nodeAppend.c.

978{
979 Bitmapset *needrequest;
980 int i;
981
982 /* Nothing to do if there are no async subplans needing a new request. */
983 if (bms_is_empty(node->as_needrequest))
984 {
985 Assert(node->as_nasyncresults == 0);
986 return false;
987 }
988
989 /*
990 * If there are any asynchronously-generated results that have not yet
991 * been returned, we have nothing to do; just return one of them.
992 */
993 if (node->as_nasyncresults > 0)
994 {
995 --node->as_nasyncresults;
996 *result = node->as_asyncresults[node->as_nasyncresults];
997 return true;
998 }
999
1000 /* Make a new request for each of the async subplans that need it. */
1001 needrequest = node->as_needrequest;
1002 node->as_needrequest = NULL;
1003 i = -1;
1004 while ((i = bms_next_member(needrequest, i)) >= 0)
1005 {
1006 AsyncRequest *areq = node->as_asyncrequests[i];
1007
1008 /* Do the actual work. */
1009 ExecAsyncRequest(areq);
1010 }
1011 bms_free(needrequest);
1012
1013 /* Return one of the asynchronously-generated results if any. */
1014 if (node->as_nasyncresults > 0)
1015 {
1016 --node->as_nasyncresults;
1017 *result = node->as_asyncresults[node->as_nasyncresults];
1018 return true;
1019 }
1020
1021 return false;
1022}
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int as_nasyncresults
Definition: execnodes.h:1457
TupleTableSlot ** as_asyncresults
Definition: execnodes.h:1456

References AppendState::as_asyncrequests, AppendState::as_asyncresults, AppendState::as_nasyncresults, AppendState::as_needrequest, Assert, bms_free(), bms_is_empty, bms_next_member(), ExecAsyncRequest(), and i.

Referenced by ExecAppendAsyncGetNext().

◆ ExecAppendEstimate()

void ExecAppendEstimate ( AppendState node,
ParallelContext pcxt 
)

Definition at line 498 of file nodeAppend.c.

500{
501 node->pstate_len =
502 add_size(offsetof(ParallelAppendState, pa_finished),
503 sizeof(bool) * node->as_nplans);
504
507}
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size pstate_len
Definition: execnodes.h:1467
shm_toc_estimator estimator
Definition: parallel.h:41

References add_size(), AppendState::as_nplans, ParallelContext::estimator, AppendState::pstate_len, shm_toc_estimate_chunk, and shm_toc_estimate_keys.

Referenced by ExecParallelEstimate().

◆ ExecAppendInitializeDSM()

void ExecAppendInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 517 of file nodeAppend.c.

519{
520 ParallelAppendState *pstate;
521
522 pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
523 memset(pstate, 0, node->pstate_len);
525 shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
526
527 node->as_pstate = pstate;
529}
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
@ LWTRANCHE_PARALLEL_APPEND
Definition: lwlock.h:201
static bool choose_next_subplan_for_leader(AppendState *node)
Definition: nodeAppend.c:634
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:44
Plan * plan
Definition: execnodes.h:1126
int plan_node_id
Definition: plannodes.h:152

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_leader(), LWLockInitialize(), LWTRANCHE_PARALLEL_APPEND, ParallelAppendState::pa_lock, PlanState::plan, Plan::plan_node_id, AppendState::ps, AppendState::pstate_len, shm_toc_allocate(), shm_toc_insert(), and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

◆ ExecAppendInitializeWorker()

void ExecAppendInitializeWorker ( AppendState node,
ParallelWorkerContext pwcxt 
)

Definition at line 554 of file nodeAppend.c.

555{
556 node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
558}
static bool choose_next_subplan_for_worker(AppendState *node)
Definition: nodeAppend.c:716
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

References AppendState::as_pstate, AppendState::choose_next_subplan, choose_next_subplan_for_worker(), PlanState::plan, Plan::plan_node_id, AppendState::ps, shm_toc_lookup(), and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

◆ ExecAppendReInitializeDSM()

void ExecAppendReInitializeDSM ( AppendState node,
ParallelContext pcxt 
)

Definition at line 538 of file nodeAppend.c.

539{
540 ParallelAppendState *pstate = node->as_pstate;
541
542 pstate->pa_next_plan = 0;
543 memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
544}

References AppendState::as_nplans, AppendState::as_pstate, ParallelAppendState::pa_finished, and ParallelAppendState::pa_next_plan.

Referenced by ExecParallelReInitializeDSM().

◆ ExecAsyncAppendResponse()

void ExecAsyncAppendResponse ( AsyncRequest areq)

Definition at line 1119 of file nodeAppend.c.

1120{
1121 AppendState *node = (AppendState *) areq->requestor;
1122 TupleTableSlot *slot = areq->result;
1123
1124 /* The result should be a TupleTableSlot or NULL. */
1125 Assert(slot == NULL || IsA(slot, TupleTableSlot));
1126
1127 /* Nothing to do if the request is pending. */
1128 if (!areq->request_complete)
1129 {
1130 /* The request would have been pending for a callback. */
1131 Assert(areq->callback_pending);
1132 return;
1133 }
1134
1135 /* If the result is NULL or an empty slot, there's nothing more to do. */
1136 if (TupIsNull(slot))
1137 {
1138 /* The ending subplan wouldn't have been pending for a callback. */
1139 Assert(!areq->callback_pending);
1140 --node->as_nasyncremain;
1141 return;
1142 }
1143
1144 /* Save result so we can return it. */
1145 Assert(node->as_nasyncresults < node->as_nasyncplans);
1146 node->as_asyncresults[node->as_nasyncresults++] = slot;
1147
1148 /*
1149 * Mark the subplan that returned a result as ready for a new request. We
1150 * don't launch another one here immediately because it might complete.
1151 */
1153 areq->request_index);
1154}
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
struct PlanState * requestor
Definition: execnodes.h:611
TupleTableSlot * result
Definition: execnodes.h:616
bool request_complete
Definition: execnodes.h:615

References AppendState::as_asyncresults, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, Assert, bms_add_member(), AsyncRequest::callback_pending, IsA, AsyncRequest::request_complete, AsyncRequest::request_index, AsyncRequest::requestor, AsyncRequest::result, and TupIsNull.

Referenced by ExecAsyncResponse().

◆ ExecEndAppend()

void ExecEndAppend ( AppendState node)

Definition at line 400 of file nodeAppend.c.

401{
402 PlanState **appendplans;
403 int nplans;
404 int i;
405
406 /*
407 * get information from the node
408 */
409 appendplans = node->appendplans;
410 nplans = node->as_nplans;
411
412 /*
413 * shut down each of the subscans
414 */
415 for (i = 0; i < nplans; i++)
416 ExecEndNode(appendplans[i]);
417}
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562

References AppendState::appendplans, AppendState::as_nplans, ExecEndNode(), and i.

Referenced by ExecEndNode().

◆ ExecInitAppend()

AppendState * ExecInitAppend ( Append node,
EState estate,
int  eflags 
)

Definition at line 109 of file nodeAppend.c.

110{
111 AppendState *appendstate = makeNode(AppendState);
112 PlanState **appendplanstates;
113 const TupleTableSlotOps *appendops;
114 Bitmapset *validsubplans;
115 Bitmapset *asyncplans;
116 int nplans;
117 int nasyncplans;
118 int firstvalid;
119 int i,
120 j;
121
122 /* check for unsupported flags */
123 Assert(!(eflags & EXEC_FLAG_MARK));
124
125 /*
126 * create new AppendState for our append node
127 */
128 appendstate->ps.plan = (Plan *) node;
129 appendstate->ps.state = estate;
130 appendstate->ps.ExecProcNode = ExecAppend;
131
132 /* Let choose_next_subplan_* function handle setting the first subplan */
133 appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
134 appendstate->as_syncdone = false;
135 appendstate->as_begun = false;
136
137 /* If run-time partition pruning is enabled, then set that up now */
138 if (node->part_prune_info != NULL)
139 {
140 PartitionPruneState *prunestate;
141
142 /*
143 * Set up pruning data structure. This also initializes the set of
144 * subplans to initialize (validsubplans) by taking into account the
145 * result of performing initial pruning if any.
146 */
147 prunestate = ExecInitPartitionPruning(&appendstate->ps,
149 node->part_prune_info,
150 &validsubplans);
151 appendstate->as_prune_state = prunestate;
152 nplans = bms_num_members(validsubplans);
153
154 /*
155 * When no run-time pruning is required and there's at least one
156 * subplan, we can fill as_valid_subplans immediately, preventing
157 * later calls to ExecFindMatchingSubPlans.
158 */
159 if (!prunestate->do_exec_prune && nplans > 0)
160 {
161 appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
162 appendstate->as_valid_subplans_identified = true;
163 }
164 }
165 else
166 {
167 nplans = list_length(node->appendplans);
168
169 /*
170 * When run-time partition pruning is not enabled we can just mark all
171 * subplans as valid; they must also all be initialized.
172 */
173 Assert(nplans > 0);
174 appendstate->as_valid_subplans = validsubplans =
175 bms_add_range(NULL, 0, nplans - 1);
176 appendstate->as_valid_subplans_identified = true;
177 appendstate->as_prune_state = NULL;
178 }
179
180 appendplanstates = (PlanState **) palloc(nplans *
181 sizeof(PlanState *));
182
183 /*
184 * call ExecInitNode on each of the valid plans to be executed and save
185 * the results into the appendplanstates array.
186 *
187 * While at it, find out the first valid partial plan.
188 */
189 j = 0;
190 asyncplans = NULL;
191 nasyncplans = 0;
192 firstvalid = nplans;
193 i = -1;
194 while ((i = bms_next_member(validsubplans, i)) >= 0)
195 {
196 Plan *initNode = (Plan *) list_nth(node->appendplans, i);
197
198 /*
199 * Record async subplans. When executing EvalPlanQual, we treat them
200 * as sync ones; don't do this when initializing an EvalPlanQual plan
201 * tree.
202 */
203 if (initNode->async_capable && estate->es_epq_active == NULL)
204 {
205 asyncplans = bms_add_member(asyncplans, j);
206 nasyncplans++;
207 }
208
209 /*
210 * Record the lowest appendplans index which is a valid partial plan.
211 */
212 if (i >= node->first_partial_plan && j < firstvalid)
213 firstvalid = j;
214
215 appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
216 }
217
218 appendstate->as_first_partial_plan = firstvalid;
219 appendstate->appendplans = appendplanstates;
220 appendstate->as_nplans = nplans;
221
222 /*
223 * Initialize Append's result tuple type and slot. If the child plans all
224 * produce the same fixed slot type, we can use that slot type; otherwise
225 * make a virtual slot. (Note that the result slot itself is used only to
226 * return a null tuple at end of execution; real tuples are returned to
227 * the caller in the children's own result slots. What we are doing here
228 * is allowing the parent plan node to optimize if the Append will return
229 * only one kind of slot.)
230 */
231 appendops = ExecGetCommonSlotOps(appendplanstates, j);
232 if (appendops != NULL)
233 {
234 ExecInitResultTupleSlotTL(&appendstate->ps, appendops);
235 }
236 else
237 {
239 /* show that the output slot type is not fixed */
240 appendstate->ps.resultopsset = true;
241 appendstate->ps.resultopsfixed = false;
242 }
243
244 /* Initialize async state */
245 appendstate->as_asyncplans = asyncplans;
246 appendstate->as_nasyncplans = nasyncplans;
247 appendstate->as_asyncrequests = NULL;
248 appendstate->as_asyncresults = NULL;
249 appendstate->as_nasyncresults = 0;
250 appendstate->as_nasyncremain = 0;
251 appendstate->as_needrequest = NULL;
252 appendstate->as_eventset = NULL;
253 appendstate->as_valid_asyncplans = NULL;
254
255 if (nasyncplans > 0)
256 {
257 appendstate->as_asyncrequests = (AsyncRequest **)
258 palloc0(nplans * sizeof(AsyncRequest *));
259
260 i = -1;
261 while ((i = bms_next_member(asyncplans, i)) >= 0)
262 {
263 AsyncRequest *areq;
264
265 areq = palloc(sizeof(AsyncRequest));
266 areq->requestor = (PlanState *) appendstate;
267 areq->requestee = appendplanstates[i];
268 areq->request_index = i;
269 areq->callback_pending = false;
270 areq->request_complete = false;
271 areq->result = NULL;
272
273 appendstate->as_asyncrequests[i] = areq;
274 }
275
276 appendstate->as_asyncresults = (TupleTableSlot **)
277 palloc0(nasyncplans * sizeof(TupleTableSlot *));
278
279 if (appendstate->as_valid_subplans_identified)
280 classify_matching_subplans(appendstate);
281 }
282
283 /*
284 * Miscellaneous initialization
285 */
286
287 appendstate->ps.ps_ProjInfo = NULL;
288
289 /* For parallel query, this will be overridden later. */
291
292 return appendstate;
293}
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:1019
PartitionPruneState * ExecInitPartitionPruning(PlanState *planstate, int n_total_subplans, PartitionPruneInfo *pruneinfo, Bitmapset **initially_valid_subplans)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1986
const TupleTableSlotOps * ExecGetCommonSlotOps(PlanState **planstates, int nplans)
Definition: execUtils.c:536
#define EXEC_FLAG_MARK
Definition: executor.h:69
int j
Definition: isn.c:73
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
static TupleTableSlot * ExecAppend(PlanState *pstate)
Definition: nodeAppend.c:302
static bool choose_next_subplan_locally(AppendState *node)
Definition: nodeAppend.c:568
#define makeNode(_type_)
Definition: nodes.h:155
static int list_length(const List *l)
Definition: pg_list.h:152
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
int first_partial_plan
Definition: plannodes.h:277
struct PartitionPruneInfo * part_prune_info
Definition: plannodes.h:280
List * appendplans
Definition: plannodes.h:270
struct PlanState * requestee
Definition: execnodes.h:612
struct EPQState * es_epq_active
Definition: execnodes.h:707
bool resultopsset
Definition: execnodes.h:1211
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1166
bool resultopsfixed
Definition: execnodes.h:1207
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1132
bool async_capable
Definition: plannodes.h:147

References AppendState::appendplans, Append::appendplans, AppendState::as_asyncplans, AppendState::as_asyncrequests, AppendState::as_asyncresults, AppendState::as_begun, AppendState::as_eventset, AppendState::as_first_partial_plan, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, AppendState::as_whichplan, Assert, Plan::async_capable, bms_add_member(), bms_add_range(), bms_next_member(), bms_num_members(), AsyncRequest::callback_pending, AppendState::choose_next_subplan, choose_next_subplan_locally(), classify_matching_subplans(), PartitionPruneState::do_exec_prune, EState::es_epq_active, EXEC_FLAG_MARK, ExecAppend(), ExecGetCommonSlotOps(), ExecInitNode(), ExecInitPartitionPruning(), ExecInitResultTupleSlotTL(), PlanState::ExecProcNode, Append::first_partial_plan, i, INVALID_SUBPLAN_INDEX, j, list_length(), list_nth(), makeNode, palloc(), palloc0(), Append::part_prune_info, PlanState::plan, AppendState::ps, PlanState::ps_ProjInfo, AsyncRequest::request_complete, AsyncRequest::request_index, AsyncRequest::requestee, AsyncRequest::requestor, AsyncRequest::result, PlanState::resultopsfixed, PlanState::resultopsset, PlanState::state, and TTSOpsVirtual.

Referenced by ExecInitNode().

◆ ExecReScanAppend()

void ExecReScanAppend ( AppendState node)

Definition at line 420 of file nodeAppend.c.

421{
422 int nasyncplans = node->as_nasyncplans;
423 int i;
424
425 /*
426 * If any PARAM_EXEC Params used in pruning expressions have changed, then
427 * we'd better unset the valid subplans so that they are reselected for
428 * the new parameter values.
429 */
430 if (node->as_prune_state &&
431 bms_overlap(node->ps.chgParam,
433 {
434 node->as_valid_subplans_identified = false;
436 node->as_valid_subplans = NULL;
438 node->as_valid_asyncplans = NULL;
439 }
440
441 for (i = 0; i < node->as_nplans; i++)
442 {
443 PlanState *subnode = node->appendplans[i];
444
445 /*
446 * ExecReScan doesn't know about my subplans, so I have to do
447 * changed-parameter signaling myself.
448 */
449 if (node->ps.chgParam != NULL)
450 UpdateChangedParamSet(subnode, node->ps.chgParam);
451
452 /*
453 * If chgParam of subnode is not null then plan will be re-scanned by
454 * first ExecProcNode or by first ExecAsyncRequest.
455 */
456 if (subnode->chgParam == NULL)
457 ExecReScan(subnode);
458 }
459
460 /* Reset async state */
461 if (nasyncplans > 0)
462 {
463 i = -1;
464 while ((i = bms_next_member(node->as_asyncplans, i)) >= 0)
465 {
466 AsyncRequest *areq = node->as_asyncrequests[i];
467
468 areq->callback_pending = false;
469 areq->request_complete = false;
470 areq->result = NULL;
471 }
472
473 node->as_nasyncresults = 0;
474 node->as_nasyncremain = 0;
476 node->as_needrequest = NULL;
477 }
478
479 /* Let choose_next_subplan_* function handle setting the first subplan */
481 node->as_syncdone = false;
482 node->as_begun = false;
483}
void ExecReScan(PlanState *node)
Definition: execAmi.c:76
void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Definition: execUtils.c:889
Bitmapset * execparamids
Bitmapset * chgParam
Definition: execnodes.h:1158

References AppendState::appendplans, AppendState::as_asyncplans, AppendState::as_asyncrequests, AppendState::as_begun, AppendState::as_nasyncplans, AppendState::as_nasyncremain, AppendState::as_nasyncresults, AppendState::as_needrequest, AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_syncdone, AppendState::as_valid_asyncplans, AppendState::as_valid_subplans, AppendState::as_valid_subplans_identified, AppendState::as_whichplan, bms_free(), bms_next_member(), bms_overlap(), AsyncRequest::callback_pending, PlanState::chgParam, PartitionPruneState::execparamids, ExecReScan(), i, INVALID_SUBPLAN_INDEX, AppendState::ps, AsyncRequest::request_complete, AsyncRequest::result, and UpdateChangedParamSet().

Referenced by ExecReScan().

◆ mark_invalid_subplans_as_finished()

static void mark_invalid_subplans_as_finished ( AppendState node)
static

Definition at line 842 of file nodeAppend.c.

843{
844 int i;
845
846 /* Only valid to call this while in parallel Append mode */
847 Assert(node->as_pstate);
848
849 /* Shouldn't have been called when run-time pruning is not enabled */
850 Assert(node->as_prune_state);
851
852 /* Nothing to do if all plans are valid */
853 if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
854 return;
855
856 /* Mark all non-valid plans as finished */
857 for (i = 0; i < node->as_nplans; i++)
858 {
859 if (!bms_is_member(i, node->as_valid_subplans))
860 node->as_pstate->pa_finished[i] = true;
861 }
862}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510

References AppendState::as_nplans, AppendState::as_prune_state, AppendState::as_pstate, AppendState::as_valid_subplans, Assert, bms_is_member(), bms_num_members(), i, and ParallelAppendState::pa_finished.

Referenced by choose_next_subplan_for_leader(), and choose_next_subplan_for_worker().