PostgreSQL Source Code  git master
execParallel.h File Reference
#include "access/parallel.h"
#include "nodes/execnodes.h"
#include "nodes/parsenodes.h"
#include "nodes/plannodes.h"
#include "utils/dsa.h"
Include dependency graph for execParallel.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  ParallelExecutorInfo
 

Typedefs

typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation
 
typedef struct ParallelExecutorInfo ParallelExecutorInfo
 

Functions

ParallelExecutorInfoExecInitParallelPlan (PlanState *planstate, EState *estate, Bitmapset *sendParam, int nworkers, int64 tuples_needed)
 
void ExecParallelCreateReaders (ParallelExecutorInfo *pei)
 
void ExecParallelFinish (ParallelExecutorInfo *pei)
 
void ExecParallelCleanup (ParallelExecutorInfo *pei)
 
void ExecParallelReinitialize (PlanState *planstate, ParallelExecutorInfo *pei, Bitmapset *sendParam)
 
void ParallelQueryMain (dsm_segment *seg, shm_toc *toc)
 

Typedef Documentation

◆ ParallelExecutorInfo

◆ SharedExecutorInstrumentation

Function Documentation

◆ ExecInitParallelPlan()

ParallelExecutorInfo* ExecInitParallelPlan ( PlanState planstate,
EState estate,
Bitmapset sendParam,
int  nworkers,
int64  tuples_needed 
)

Definition at line 550 of file execParallel.c.

References ParallelExecutorInfo::area, bms_is_empty(), ParallelExecutorInfo::buffer_usage, CreateParallelContext(), dsa_create_in_place(), dsa_minimum_size(), FixedParallelExecutorState::eflags, elog, ERROR, EState::es_instrument, EState::es_param_list_info, EState::es_query_dsa, EState::es_sourceText, EState::es_top_eflags, EstimateParamListSpace(), ParallelContext::estimator, ExecEvalParamExecParams(), ExecParallelEstimate(), ExecParallelInitializeDSM(), ExecParallelSetupTupleQueues(), ExecSerializePlan(), ParallelExecutorInfo::finished, GetInstrumentationArray, i, InitializeParallelDSM(), InstrInit(), SharedExecutorInstrumentation::instrument_offset, SharedExecutorInstrumentation::instrument_options, ParallelExecutorInfo::instrumentation, ExecParallelInitializeDSMContext::instrumentation, InvalidDsaPointer, LWTRANCHE_PARALLEL_QUERY_DSA, MAXALIGN, mul_size(), ExecParallelEstimateContext::nnodes, ExecParallelInitializeDSMContext::nnodes, SharedExecutorInstrumentation::num_plan_nodes, SharedExecutorInstrumentation::num_workers, ParallelContext::nworkers, offsetof, palloc0(), PARALLEL_KEY_BUFFER_USAGE, PARALLEL_KEY_DSA, PARALLEL_KEY_EXECUTOR_FIXED, PARALLEL_KEY_INSTRUMENTATION, PARALLEL_KEY_PARAMLISTINFO, PARALLEL_KEY_PLANNEDSTMT, PARALLEL_KEY_QUERY_TEXT, PARALLEL_TUPLE_QUEUE_SIZE, ParallelExecutorInfo::param_exec, FixedParallelExecutorState::param_exec, ParallelExecutorInfo::pcxt, ExecParallelEstimateContext::pcxt, ExecParallelInitializeDSMContext::pcxt, PlanState::plan, ParallelExecutorInfo::planstate, ParallelExecutorInfo::reader, ParallelContext::seg, SerializeParamExecParams(), SerializeParamList(), shm_toc_allocate(), shm_toc_estimate_chunk, shm_toc_estimate_keys, shm_toc_insert(), ParallelContext::toc, ParallelExecutorInfo::tqueue, and FixedParallelExecutorState::tuples_needed.

Referenced by ExecGather(), and ExecGatherMerge().

553 {
555  ParallelContext *pcxt;
559  char *pstmt_data;
560  char *pstmt_space;
561  char *paramlistinfo_space;
562  BufferUsage *bufusage_space;
563  SharedExecutorInstrumentation *instrumentation = NULL;
564  int pstmt_len;
565  int paramlistinfo_len;
566  int instrumentation_len = 0;
567  int instrument_offset = 0;
568  Size dsa_minsize = dsa_minimum_size();
569  char *query_string;
570  int query_len;
571 
572  /* Force parameters we're going to pass to workers to be evaluated. */
573  ExecEvalParamExecParams(sendParams, estate);
574 
575  /* Allocate object for return value. */
576  pei = palloc0(sizeof(ParallelExecutorInfo));
577  pei->finished = false;
578  pei->planstate = planstate;
579 
580  /* Fix up and serialize plan to be sent to workers. */
581  pstmt_data = ExecSerializePlan(planstate->plan, estate);
582 
583  /* Create a parallel context. */
584  pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers);
585  pei->pcxt = pcxt;
586 
587  /*
588  * Before telling the parallel context to create a dynamic shared memory
589  * segment, we need to figure out how big it should be. Estimate space
590  * for the various things we need to store.
591  */
592 
593  /* Estimate space for fixed-size state. */
596  shm_toc_estimate_keys(&pcxt->estimator, 1);
597 
598  /* Estimate space for query text. */
599  query_len = strlen(estate->es_sourceText);
600  shm_toc_estimate_chunk(&pcxt->estimator, query_len);
601  shm_toc_estimate_keys(&pcxt->estimator, 1);
602 
603  /* Estimate space for serialized PlannedStmt. */
604  pstmt_len = strlen(pstmt_data) + 1;
605  shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
606  shm_toc_estimate_keys(&pcxt->estimator, 1);
607 
608  /* Estimate space for serialized ParamListInfo. */
609  paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info);
610  shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len);
611  shm_toc_estimate_keys(&pcxt->estimator, 1);
612 
613  /*
614  * Estimate space for BufferUsage.
615  *
616  * If EXPLAIN is not in use and there are no extensions loaded that care,
617  * we could skip this. But we have no way of knowing whether anyone's
618  * looking at pgBufferUsage, so do it unconditionally.
619  */
621  mul_size(sizeof(BufferUsage), pcxt->nworkers));
622  shm_toc_estimate_keys(&pcxt->estimator, 1);
623 
624  /* Estimate space for tuple queues. */
627  shm_toc_estimate_keys(&pcxt->estimator, 1);
628 
629  /*
630  * Give parallel-aware nodes a chance to add to the estimates, and get a
631  * count of how many PlanState nodes there are.
632  */
633  e.pcxt = pcxt;
634  e.nnodes = 0;
635  ExecParallelEstimate(planstate, &e);
636 
637  /* Estimate space for instrumentation, if required. */
638  if (estate->es_instrument)
639  {
640  instrumentation_len =
641  offsetof(SharedExecutorInstrumentation, plan_node_id) +
642  sizeof(int) * e.nnodes;
643  instrumentation_len = MAXALIGN(instrumentation_len);
644  instrument_offset = instrumentation_len;
645  instrumentation_len +=
646  mul_size(sizeof(Instrumentation),
647  mul_size(e.nnodes, nworkers));
648  shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
649  shm_toc_estimate_keys(&pcxt->estimator, 1);
650  }
651 
652  /* Estimate space for DSA area. */
653  shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
654  shm_toc_estimate_keys(&pcxt->estimator, 1);
655 
656  /* Everyone's had a chance to ask for space, so now create the DSM. */
657  InitializeParallelDSM(pcxt);
658 
659  /*
660  * OK, now we have a dynamic shared memory segment, and it should be big
661  * enough to store all of the data we estimated we would want to put into
662  * it, plus whatever general stuff (not specifically executor-related) the
663  * ParallelContext itself needs to store there. None of the space we
664  * asked for has been allocated or initialized yet, though, so do that.
665  */
666 
667  /* Store fixed-size state. */
668  fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState));
669  fpes->tuples_needed = tuples_needed;
671  fpes->eflags = estate->es_top_eflags;
673 
674  /* Store query string */
675  query_string = shm_toc_allocate(pcxt->toc, query_len);
676  memcpy(query_string, estate->es_sourceText, query_len);
677  shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string);
678 
679  /* Store serialized PlannedStmt. */
680  pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
681  memcpy(pstmt_space, pstmt_data, pstmt_len);
682  shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
683 
684  /* Store serialized ParamListInfo. */
685  paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len);
686  shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space);
687  SerializeParamList(estate->es_param_list_info, &paramlistinfo_space);
688 
689  /* Allocate space for each worker's BufferUsage; no need to initialize. */
690  bufusage_space = shm_toc_allocate(pcxt->toc,
691  mul_size(sizeof(BufferUsage), pcxt->nworkers));
692  shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
693  pei->buffer_usage = bufusage_space;
694 
695  /* Set up the tuple queues that the workers will write into. */
696  pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
697 
698  /* We don't need the TupleQueueReaders yet, though. */
699  pei->reader = NULL;
700 
701  /*
702  * If instrumentation options were supplied, allocate space for the data.
703  * It only gets partially initialized here; the rest happens during
704  * ExecParallelInitializeDSM.
705  */
706  if (estate->es_instrument)
707  {
708  Instrumentation *instrument;
709  int i;
710 
711  instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
712  instrumentation->instrument_options = estate->es_instrument;
713  instrumentation->instrument_offset = instrument_offset;
714  instrumentation->num_workers = nworkers;
715  instrumentation->num_plan_nodes = e.nnodes;
716  instrument = GetInstrumentationArray(instrumentation);
717  for (i = 0; i < nworkers * e.nnodes; ++i)
718  InstrInit(&instrument[i], estate->es_instrument);
720  instrumentation);
721  pei->instrumentation = instrumentation;
722  }
723 
724  /*
725  * Create a DSA area that can be used by the leader and all workers.
726  * (However, if we failed to create a DSM and are using private memory
727  * instead, then skip this.)
728  */
729  if (pcxt->seg != NULL)
730  {
731  char *area_space;
732 
733  area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
734  shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
735  pei->area = dsa_create_in_place(area_space, dsa_minsize,
737  pcxt->seg);
738 
739  /*
740  * Make the area available to executor nodes running in the leader.
741  * See also ParallelQueryMain which makes it available to workers.
742  */
743  estate->es_query_dsa = pei->area;
744 
745  /*
746  * Serialize parameters, if any, using DSA storage. We don't dare use
747  * the main parallel query DSM for this because we might relaunch
748  * workers after the values have changed (and thus the amount of
749  * storage required has changed).
750  */
751  if (!bms_is_empty(sendParams))
752  {
753  pei->param_exec = SerializeParamExecParams(estate, sendParams);
754  fpes->param_exec = pei->param_exec;
755  }
756  }
757 
758  /*
759  * Give parallel-aware nodes a chance to initialize their shared data.
760  * This also initializes the elements of instrumentation->ps_instrument,
761  * if it exists.
762  */
763  d.pcxt = pcxt;
764  d.instrumentation = instrumentation;
765  d.nnodes = 0;
766  ExecParallelInitializeDSM(planstate, &d);
767 
768  /*
769  * Make sure that the world hasn't shifted under our feet. This could
770  * probably just be an Assert(), but let's be conservative for now.
771  */
772  if (e.nnodes != d.nnodes)
773  elog(ERROR, "inconsistent count of PlanState nodes");
774 
775  /* OK, we're ready to rock and roll. */
776  return pei;
777 }
struct dsa_area * es_query_dsa
Definition: execnodes.h:514
ParallelContext * pcxt
Definition: execParallel.h:27
#define InvalidDsaPointer
Definition: dsa.h:78
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:142
dsm_segment * seg
Definition: parallel.h:42
shm_toc_estimator estimator
Definition: parallel.h:41
PlanState * planstate
Definition: execParallel.h:26
static char * ExecSerializePlan(Plan *plan, EState *estate)
Definition: execParallel.c:141
static shm_mq_handle ** ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
Definition: execParallel.c:498
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
Size EstimateParamListSpace(ParamListInfo paramLI)
Definition: params.c:95
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.c:117
ParallelContext * pcxt
Definition: execParallel.c:109
#define PARALLEL_KEY_INSTRUMENTATION
Definition: execParallel.c:61
const char * es_sourceText
Definition: execnodes.h:434
static bool ExecParallelInitializeDSM(PlanState *node, ExecParallelInitializeDSMContext *d)
Definition: execParallel.c:417
#define ERROR
Definition: elog.h:43
BufferUsage * buffer_usage
Definition: execParallel.h:28
#define PARALLEL_KEY_PARAMLISTINFO
Definition: execParallel.c:58
#define PARALLEL_KEY_PLANNEDSTMT
Definition: execParallel.c:57
struct TupleQueueReader ** reader
Definition: execParallel.h:35
#define PARALLEL_TUPLE_QUEUE_SIZE
Definition: execParallel.c:65
int es_instrument
Definition: execnodes.h:482
#define PARALLEL_KEY_EXECUTOR_FIXED
Definition: execParallel.c:56
static bool ExecParallelEstimate(PlanState *node, ExecParallelEstimateContext *e)
Definition: execParallel.c:224
dsa_area * dsa_create_in_place(void *place, size_t size, int tranche_id, dsm_segment *segment)
Definition: dsa.c:468
#define PARALLEL_KEY_BUFFER_USAGE
Definition: execParallel.c:59
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:663
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:194
void InstrInit(Instrumentation *instr, int instrument_options)
Definition: instrument.c:54
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
void * palloc0(Size size)
Definition: mcxt.c:877
Plan * plan
Definition: execnodes.h:850
Size dsa_minimum_size(void)
Definition: dsa.c:1160
void SerializeParamList(ParamListInfo paramLI, char **start_address)
Definition: params.c:158
void ExecEvalParamExecParams(Bitmapset *params, EState *estate)
size_t Size
Definition: c.h:404
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define MAXALIGN(LEN)
Definition: c.h:623
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
dsa_pointer param_exec
Definition: execParallel.h:31
#define PARALLEL_KEY_DSA
Definition: execParallel.c:62
e
Definition: preproc-init.c:82
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
int es_top_eflags
Definition: execnodes.h:481
int i
#define PARALLEL_KEY_QUERY_TEXT
Definition: execParallel.c:63
ParamListInfo es_param_list_info
Definition: execnodes.h:466
shm_mq_handle ** tqueue
Definition: execParallel.h:34
#define elog
Definition: elog.h:219
static dsa_pointer SerializeParamExecParams(EState *estate, Bitmapset *params)
Definition: execParallel.c:333
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.h:29
#define offsetof(type, field)
Definition: c.h:593
#define GetInstrumentationArray(sei)
Definition: execParallel.c:102
shm_toc * toc
Definition: parallel.h:44

◆ ExecParallelCleanup()

void ExecParallelCleanup ( ParallelExecutorInfo pei)

Definition at line 1064 of file execParallel.c.

References ParallelExecutorInfo::area, DestroyParallelContext(), dsa_detach(), dsa_free(), DsaPointerIsValid, InvalidDsaPointer, ParallelExecutorInfo::param_exec, ParallelExecutorInfo::pcxt, and pfree().

Referenced by ExecShutdownGather(), and ExecShutdownGatherMerge().

1065 {
1066  /* Free any serialized parameters. */
1067  if (DsaPointerIsValid(pei->param_exec))
1068  {
1069  dsa_free(pei->area, pei->param_exec);
1071  }
1072  if (pei->area != NULL)
1073  {
1074  dsa_detach(pei->area);
1075  pei->area = NULL;
1076  }
1077  if (pei->pcxt != NULL)
1078  {
1080  pei->pcxt = NULL;
1081  }
1082  pfree(pei);
1083 }
ParallelContext * pcxt
Definition: execParallel.h:27
#define InvalidDsaPointer
Definition: dsa.h:78
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:629
void pfree(void *pointer)
Definition: mcxt.c:949
void dsa_detach(dsa_area *area)
Definition: dsa.c:1884
dsa_pointer param_exec
Definition: execParallel.h:31
#define DsaPointerIsValid(x)
Definition: dsa.h:81
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:812

◆ ExecParallelCreateReaders()

void ExecParallelCreateReaders ( ParallelExecutorInfo pei)

Definition at line 786 of file execParallel.c.

References Assert, ParallelWorkerInfo::bgwhandle, CreateTupleQueueReader(), i, ParallelContext::nworkers_launched, palloc(), ParallelExecutorInfo::pcxt, ParallelExecutorInfo::reader, shm_mq_set_handle(), ParallelExecutorInfo::tqueue, and ParallelContext::worker.

Referenced by ExecGather(), and ExecGatherMerge().

787 {
788  int nworkers = pei->pcxt->nworkers_launched;
789  int i;
790 
791  Assert(pei->reader == NULL);
792 
793  if (nworkers > 0)
794  {
795  pei->reader = (TupleQueueReader **)
796  palloc(nworkers * sizeof(TupleQueueReader *));
797 
798  for (i = 0; i < nworkers; i++)
799  {
800  shm_mq_set_handle(pei->tqueue[i],
801  pei->pcxt->worker[i].bgwhandle);
802  pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]);
803  }
804  }
805 }
ParallelContext * pcxt
Definition: execParallel.h:27
ParallelWorkerInfo * worker
Definition: parallel.h:45
struct TupleQueueReader ** reader
Definition: execParallel.h:35
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
int nworkers_launched
Definition: parallel.h:37
TupleQueueReader * CreateTupleQueueReader(shm_mq_handle *handle)
Definition: tqueue.c:135
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:315
#define Assert(condition)
Definition: c.h:670
void * palloc(Size size)
Definition: mcxt.c:848
int i
shm_mq_handle ** tqueue
Definition: execParallel.h:34

◆ ExecParallelFinish()

void ExecParallelFinish ( ParallelExecutorInfo pei)

Definition at line 1006 of file execParallel.c.

References ParallelExecutorInfo::buffer_usage, DestroyTupleQueueReader(), ExecParallelRetrieveInstrumentation(), ParallelExecutorInfo::finished, i, InstrAccumParallelQuery(), ParallelExecutorInfo::instrumentation, ParallelContext::nworkers_launched, ParallelExecutorInfo::pcxt, pfree(), ParallelExecutorInfo::planstate, ParallelExecutorInfo::reader, shm_mq_detach(), ParallelExecutorInfo::tqueue, and WaitForParallelWorkersToFinish().

Referenced by ExecShutdownGatherMergeWorkers(), and ExecShutdownGatherWorkers().

1007 {
1008  int nworkers = pei->pcxt->nworkers_launched;
1009  int i;
1010 
1011  /* Make this be a no-op if called twice in a row. */
1012  if (pei->finished)
1013  return;
1014 
1015  /*
1016  * Detach from tuple queues ASAP, so that any still-active workers will
1017  * notice that no further results are wanted.
1018  */
1019  if (pei->tqueue != NULL)
1020  {
1021  for (i = 0; i < nworkers; i++)
1022  shm_mq_detach(pei->tqueue[i]);
1023  pfree(pei->tqueue);
1024  pei->tqueue = NULL;
1025  }
1026 
1027  /*
1028  * While we're waiting for the workers to finish, let's get rid of the
1029  * tuple queue readers. (Any other local cleanup could be done here too.)
1030  */
1031  if (pei->reader != NULL)
1032  {
1033  for (i = 0; i < nworkers; i++)
1035  pfree(pei->reader);
1036  pei->reader = NULL;
1037  }
1038 
1039  /* Now wait for the workers to finish. */
1041 
1042  /*
1043  * Next, accumulate buffer usage. (This must wait for the workers to
1044  * finish, or we might get incomplete data.)
1045  */
1046  for (i = 0; i < nworkers; i++)
1048 
1049  /* Finally, accumulate instrumentation, if any. */
1050  if (pei->instrumentation)
1052  pei->instrumentation);
1053 
1054  pei->finished = true;
1055 }
ParallelContext * pcxt
Definition: execParallel.h:27
void DestroyTupleQueueReader(TupleQueueReader *reader)
Definition: tqueue.c:151
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:775
PlanState * planstate
Definition: execParallel.h:26
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:539
void pfree(void *pointer)
Definition: mcxt.c:949
BufferUsage * buffer_usage
Definition: execParallel.h:28
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, SharedExecutorInstrumentation *instrumentation)
Definition: execParallel.c:933
struct TupleQueueReader ** reader
Definition: execParallel.h:35
void InstrAccumParallelQuery(BufferUsage *result)
Definition: instrument.c:185
int nworkers_launched
Definition: parallel.h:37
int i
shm_mq_handle ** tqueue
Definition: execParallel.h:34
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.h:29

◆ ExecParallelReinitialize()

void ExecParallelReinitialize ( PlanState planstate,
ParallelExecutorInfo pei,
Bitmapset sendParam 
)

Definition at line 812 of file execParallel.c.

References Assert, bms_is_empty(), dsa_free(), DsaPointerIsValid, EState::es_instrument, EState::es_query_dsa, ExecEvalParamExecParams(), ExecParallelReInitializeDSM(), ExecParallelSetupTupleQueues(), ParallelExecutorInfo::finished, GetInstrumentationArray, i, InstrInit(), ParallelExecutorInfo::instrumentation, InvalidDsaPointer, SharedExecutorInstrumentation::num_plan_nodes, SharedExecutorInstrumentation::num_workers, PARALLEL_KEY_EXECUTOR_FIXED, ParallelExecutorInfo::param_exec, FixedParallelExecutorState::param_exec, ParallelExecutorInfo::pcxt, ParallelExecutorInfo::planstate, ParallelExecutorInfo::reader, ReinitializeParallelDSM(), SerializeParamExecParams(), shm_toc_lookup(), PlanState::state, ParallelContext::toc, and ParallelExecutorInfo::tqueue.

Referenced by ExecGather(), and ExecGatherMerge().

815 {
816  EState *estate = planstate->state;
818 
819  /* Old workers must already be shut down */
820  Assert(pei->finished);
821 
822  /* Clear the instrumentation space from the last round. */
823  if (pei->instrumentation)
824  {
825  Instrumentation *instrument;
827  int i;
828 
829  sh_instr = pei->instrumentation;
830  instrument = GetInstrumentationArray(sh_instr);
831  for (i = 0; i < sh_instr->num_workers * sh_instr->num_plan_nodes; ++i)
832  InstrInit(&instrument[i], pei->planstate->state->es_instrument);
833  }
834 
835  /* Force parameters we're going to pass to workers to be evaluated. */
836  ExecEvalParamExecParams(sendParams, estate);
837 
839  pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
840  pei->reader = NULL;
841  pei->finished = false;
842 
843  fpes = shm_toc_lookup(pei->pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
844 
845  /* Free any serialized parameters from the last round. */
846  if (DsaPointerIsValid(fpes->param_exec))
847  {
848  dsa_free(estate->es_query_dsa, fpes->param_exec);
850  }
851 
852  /* Serialize current parameter values if required. */
853  if (!bms_is_empty(sendParams))
854  {
855  pei->param_exec = SerializeParamExecParams(estate, sendParams);
856  fpes->param_exec = pei->param_exec;
857  }
858 
859  /* Traverse plan tree and let each child node reset associated state. */
860  ExecParallelReInitializeDSM(planstate, pei->pcxt);
861 }
struct dsa_area * es_query_dsa
Definition: execnodes.h:514
ParallelContext * pcxt
Definition: execParallel.h:27
#define InvalidDsaPointer
Definition: dsa.h:78
PlanState * planstate
Definition: execParallel.h:26
static shm_mq_handle ** ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
Definition: execParallel.c:498
EState * state
Definition: execnodes.h:852
static bool ExecParallelReInitializeDSM(PlanState *planstate, ParallelContext *pcxt)
Definition: execParallel.c:867
struct TupleQueueReader ** reader
Definition: execParallel.h:35
int es_instrument
Definition: execnodes.h:482
#define PARALLEL_KEY_EXECUTOR_FIXED
Definition: execParallel.c:56
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:663
void InstrInit(Instrumentation *instr, int instrument_options)
Definition: instrument.c:54
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:413
#define Assert(condition)
Definition: c.h:670
void ExecEvalParamExecParams(Bitmapset *params, EState *estate)
dsa_pointer param_exec
Definition: execParallel.h:31
#define DsaPointerIsValid(x)
Definition: dsa.h:81
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:812
int i
shm_mq_handle ** tqueue
Definition: execParallel.h:34
static dsa_pointer SerializeParamExecParams(EState *estate, Bitmapset *params)
Definition: execParallel.c:333
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.h:29
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define GetInstrumentationArray(sei)
Definition: execParallel.c:102
shm_toc * toc
Definition: parallel.h:44

◆ ParallelQueryMain()

void ParallelQueryMain ( dsm_segment seg,
shm_toc toc 
)

Definition at line 1260 of file execParallel.c.

References debug_query_string, dsa_attach_in_place(), dsa_detach(), dsa_get_address(), DsaPointerIsValid, FixedParallelExecutorState::eflags, EState::es_query_dsa, QueryDesc::estate, ExecParallelGetQueryDesc(), ExecParallelGetReceiver(), ExecParallelInitializeWorker(), ExecParallelReportInstrumentation(), ExecSetTupleBound(), ExecutorEnd(), ExecutorFinish(), ExecutorRun(), ExecutorStart(), ForwardScanDirection, FreeQueryDesc(), InstrEndParallelQuery(), InstrStartParallelQuery(), SharedExecutorInstrumentation::instrument_options, PARALLEL_KEY_BUFFER_USAGE, PARALLEL_KEY_DSA, PARALLEL_KEY_EXECUTOR_FIXED, PARALLEL_KEY_INSTRUMENTATION, ParallelWorkerNumber, FixedParallelExecutorState::param_exec, pgstat_report_activity(), QueryDesc::planstate, _DestReceiver::rDestroy, RestoreParamExecParams(), ParallelWorkerContext::seg, shm_toc_lookup(), QueryDesc::sourceText, PlanState::state, STATE_RUNNING, ParallelWorkerContext::toc, and FixedParallelExecutorState::tuples_needed.

1261 {
1263  BufferUsage *buffer_usage;
1264  DestReceiver *receiver;
1265  QueryDesc *queryDesc;
1266  SharedExecutorInstrumentation *instrumentation;
1267  int instrument_options = 0;
1268  void *area_space;
1269  dsa_area *area;
1270  ParallelWorkerContext pwcxt;
1271 
1272  /* Get fixed-size state. */
1273  fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
1274 
1275  /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
1276  receiver = ExecParallelGetReceiver(seg, toc);
1277  instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
1278  if (instrumentation != NULL)
1279  instrument_options = instrumentation->instrument_options;
1280  queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
1281 
1282  /* Setting debug_query_string for individual workers */
1283  debug_query_string = queryDesc->sourceText;
1284 
1285  /* Report workers' query for monitoring purposes */
1287 
1288  /* Prepare to track buffer usage during query execution. */
1290 
1291  /* Attach to the dynamic shared memory area. */
1292  area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
1293  area = dsa_attach_in_place(area_space, seg);
1294 
1295  /* Start up the executor */
1296  ExecutorStart(queryDesc, fpes->eflags);
1297 
1298  /* Special executor initialization steps for parallel workers */
1299  queryDesc->planstate->state->es_query_dsa = area;
1300  if (DsaPointerIsValid(fpes->param_exec))
1301  {
1302  char *paramexec_space;
1303 
1304  paramexec_space = dsa_get_address(area, fpes->param_exec);
1305  RestoreParamExecParams(paramexec_space, queryDesc->estate);
1306 
1307  }
1308  pwcxt.toc = toc;
1309  pwcxt.seg = seg;
1310  ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt);
1311 
1312  /* Pass down any tuple bound */
1313  ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate);
1314 
1315  /*
1316  * Run the plan. If we specified a tuple bound, be careful not to demand
1317  * more tuples than that.
1318  */
1319  ExecutorRun(queryDesc,
1321  fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed,
1322  true);
1323 
1324  /* Shut down the executor */
1325  ExecutorFinish(queryDesc);
1326 
1327  /* Report buffer usage during parallel execution. */
1328  buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1330 
1331  /* Report instrumentation data if any instrumentation options are set. */
1332  if (instrumentation != NULL)
1334  instrumentation);
1335 
1336  /* Must do this after capturing instrumentation. */
1337  ExecutorEnd(queryDesc);
1338 
1339  /* Cleanup. */
1340  dsa_detach(area);
1341  FreeQueryDesc(queryDesc);
1342  receiver->rDestroy(receiver);
1343 }
struct dsa_area * es_query_dsa
Definition: execnodes.h:514
EState * estate
Definition: execdesc.h:48
void FreeQueryDesc(QueryDesc *qdesc)
Definition: pquery.c:105
void pgstat_report_activity(BackendState state, const char *cmd_str)
Definition: pgstat.c:2994
static bool ExecParallelReportInstrumentation(PlanState *planstate, SharedExecutorInstrumentation *instrumentation)
void ExecutorStart(QueryDesc *queryDesc, int eflags)
Definition: execMain.c:140
dsa_area * dsa_attach_in_place(void *place, dsm_segment *segment)
Definition: dsa.c:540
void InstrEndParallelQuery(BufferUsage *result)
Definition: instrument.c:177
EState * state
Definition: execnodes.h:852
void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node)
Definition: execProcnode.c:781
void ExecutorEnd(QueryDesc *queryDesc)
Definition: execMain.c:459
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:924
#define PARALLEL_KEY_INSTRUMENTATION
Definition: execParallel.c:61
PlanState * planstate
Definition: execdesc.h:49
void ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once)
Definition: execMain.c:297
static bool ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
void dsa_detach(dsa_area *area)
Definition: dsa.c:1884
int ParallelWorkerNumber
Definition: parallel.c:100
#define PARALLEL_KEY_EXECUTOR_FIXED
Definition: execParallel.c:56
#define PARALLEL_KEY_BUFFER_USAGE
Definition: execParallel.c:59
void ExecutorFinish(QueryDesc *queryDesc)
Definition: execMain.c:399
const char * debug_query_string
Definition: postgres.c:85
void InstrStartParallelQuery(void)
Definition: instrument.c:170
void(* rDestroy)(DestReceiver *self)
Definition: dest.h:126
const char * sourceText
Definition: execdesc.h:38
#define DsaPointerIsValid(x)
Definition: dsa.h:81
#define PARALLEL_KEY_DSA
Definition: execParallel.c:62
Definition: dsa.c:354
static DestReceiver * ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
static void RestoreParamExecParams(char *start_address, EState *estate)
Definition: execParallel.c:388
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
static QueryDesc * ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, int instrument_options)
dsm_segment * seg
Definition: parallel.h:50