PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
execParallel.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * execParallel.c
4 * Support routines for parallel execution.
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * This file contains routines that are intended to support setting up,
10 * using, and tearing down a ParallelContext from within the PostgreSQL
11 * executor. The ParallelContext machinery will handle starting the
12 * workers and ensuring that their state generally matches that of the
13 * leader; see src/backend/access/transam/README.parallel for details.
14 * However, we must save and restore relevant executor state, such as
15 * any ParamListInfo associated with the query, buffer/WAL usage info, and
16 * the actual plan to be passed down to the worker.
17 *
18 * IDENTIFICATION
19 * src/backend/executor/execParallel.c
20 *
21 *-------------------------------------------------------------------------
22 */
23
24#include "postgres.h"
25
27#include "executor/executor.h"
28#include "executor/nodeAgg.h"
29#include "executor/nodeAppend.h"
31#include "executor/nodeCustom.h"
33#include "executor/nodeHash.h"
40#include "executor/nodeSort.h"
42#include "executor/tqueue.h"
43#include "jit/jit.h"
44#include "nodes/nodeFuncs.h"
45#include "pgstat.h"
46#include "tcop/tcopprot.h"
47#include "utils/datum.h"
48#include "utils/dsa.h"
49#include "utils/lsyscache.h"
50#include "utils/snapmgr.h"
51
52/*
53 * Magic numbers for parallel executor communication. We use constants
54 * greater than any 32-bit integer here so that values < 2^32 can be used
55 * by individual parallel nodes to store their own state.
56 */
57#define PARALLEL_KEY_EXECUTOR_FIXED UINT64CONST(0xE000000000000001)
58#define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000002)
59#define PARALLEL_KEY_PARAMLISTINFO UINT64CONST(0xE000000000000003)
60#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000004)
61#define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000005)
62#define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000006)
63#define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000007)
64#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000008)
65#define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009)
66#define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xE00000000000000A)
67
68#define PARALLEL_TUPLE_QUEUE_SIZE 65536
69
70/*
71 * Fixed-size random stuff that we need to pass to parallel workers.
72 */
74{
75 int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */
77 int eflags;
80
81/*
82 * DSM structure for accumulating per-PlanState instrumentation.
83 *
84 * instrument_options: Same meaning here as in instrument.c.
85 *
86 * instrument_offset: Offset, relative to the start of this structure,
87 * of the first Instrumentation object. This will depend on the length of
88 * the plan_node_id array.
89 *
90 * num_workers: Number of workers.
91 *
92 * num_plan_nodes: Number of plan nodes.
93 *
94 * plan_node_id: Array of plan nodes for which we are gathering instrumentation
95 * from parallel workers. The length of this array is given by num_plan_nodes.
96 */
98{
104 /* array of num_plan_nodes * num_workers Instrumentation objects follows */
105};
106#define GetInstrumentationArray(sei) \
107 (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
108 (Instrumentation *) (((char *) sei) + sei->instrument_offset))
109
110/* Context object for ExecParallelEstimate. */
112{
116
117/* Context object for ExecParallelInitializeDSM. */
119{
124
125/* Helper functions that run in the parallel leader. */
126static char *ExecSerializePlan(Plan *plan, EState *estate);
127static bool ExecParallelEstimate(PlanState *planstate,
129static bool ExecParallelInitializeDSM(PlanState *planstate,
132 bool reinitialize);
133static bool ExecParallelReInitializeDSM(PlanState *planstate,
134 ParallelContext *pcxt);
136 SharedExecutorInstrumentation *instrumentation);
137
138/* Helper function that runs in the parallel worker. */
140
141/*
142 * Create a serialized representation of the plan to be sent to each worker.
143 */
144static char *
146{
147 PlannedStmt *pstmt;
148 ListCell *lc;
149
150 /* We can't scribble on the original plan, so make a copy. */
152
153 /*
154 * The worker will start its own copy of the executor, and that copy will
155 * insert a junk filter if the toplevel node has any resjunk entries. We
156 * don't want that to happen, because while resjunk columns shouldn't be
157 * sent back to the user, here the tuples are coming back to another
158 * backend which may very well need them. So mutate the target list
159 * accordingly. This is sort of a hack; there might be better ways to do
160 * this...
161 */
162 foreach(lc, plan->targetlist)
163 {
165
166 tle->resjunk = false;
167 }
168
169 /*
170 * Create a dummy PlannedStmt. Most of the fields don't need to be valid
171 * for our purposes, but the worker will need at least a minimal
172 * PlannedStmt to start the executor.
173 */
174 pstmt = makeNode(PlannedStmt);
175 pstmt->commandType = CMD_SELECT;
177 pstmt->hasReturning = false;
178 pstmt->hasModifyingCTE = false;
179 pstmt->canSetTag = true;
180 pstmt->transientPlan = false;
181 pstmt->dependsOnRole = false;
182 pstmt->parallelModeNeeded = false;
183 pstmt->planTree = plan;
184 pstmt->rtable = estate->es_range_table;
185 pstmt->permInfos = estate->es_rteperminfos;
186 pstmt->resultRelations = NIL;
187 pstmt->appendRelations = NIL;
188
189 /*
190 * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
191 * for unsafe ones (so that the list indexes of the safe ones are
192 * preserved). This positively ensures that the worker won't try to run,
193 * or even do ExecInitNode on, an unsafe subplan. That's important to
194 * protect, eg, non-parallel-aware FDWs from getting into trouble.
195 */
196 pstmt->subplans = NIL;
197 foreach(lc, estate->es_plannedstmt->subplans)
198 {
199 Plan *subplan = (Plan *) lfirst(lc);
200
201 if (subplan && !subplan->parallel_safe)
202 subplan = NULL;
203 pstmt->subplans = lappend(pstmt->subplans, subplan);
204 }
205
206 pstmt->rewindPlanIDs = NULL;
207 pstmt->rowMarks = NIL;
208 pstmt->relationOids = NIL;
209 pstmt->invalItems = NIL; /* workers can't replan anyway... */
211 pstmt->utilityStmt = NULL;
212 pstmt->stmt_location = -1;
213 pstmt->stmt_len = -1;
214
215 /* Return serialized copy of our dummy PlannedStmt. */
216 return nodeToString(pstmt);
217}
218
219/*
220 * Parallel-aware plan nodes (and occasionally others) may need some state
221 * which is shared across all parallel workers. Before we size the DSM, give
222 * them a chance to call shm_toc_estimate_chunk or shm_toc_estimate_keys on
223 * &pcxt->estimator.
224 *
225 * While we're at it, count the number of PlanState nodes in the tree, so
226 * we know how many Instrumentation structures we need.
227 */
228static bool
230{
231 if (planstate == NULL)
232 return false;
233
234 /* Count this node. */
235 e->nnodes++;
236
237 switch (nodeTag(planstate))
238 {
239 case T_SeqScanState:
240 if (planstate->plan->parallel_aware)
241 ExecSeqScanEstimate((SeqScanState *) planstate,
242 e->pcxt);
243 break;
244 case T_IndexScanState:
245 if (planstate->plan->parallel_aware)
247 e->pcxt);
248 break;
249 case T_IndexOnlyScanState:
250 if (planstate->plan->parallel_aware)
252 e->pcxt);
253 break;
254 case T_ForeignScanState:
255 if (planstate->plan->parallel_aware)
257 e->pcxt);
258 break;
259 case T_AppendState:
260 if (planstate->plan->parallel_aware)
261 ExecAppendEstimate((AppendState *) planstate,
262 e->pcxt);
263 break;
264 case T_CustomScanState:
265 if (planstate->plan->parallel_aware)
267 e->pcxt);
268 break;
269 case T_BitmapHeapScanState:
270 if (planstate->plan->parallel_aware)
272 e->pcxt);
273 break;
274 case T_HashJoinState:
275 if (planstate->plan->parallel_aware)
277 e->pcxt);
278 break;
279 case T_HashState:
280 /* even when not parallel-aware, for EXPLAIN ANALYZE */
281 ExecHashEstimate((HashState *) planstate, e->pcxt);
282 break;
283 case T_SortState:
284 /* even when not parallel-aware, for EXPLAIN ANALYZE */
285 ExecSortEstimate((SortState *) planstate, e->pcxt);
286 break;
287 case T_IncrementalSortState:
288 /* even when not parallel-aware, for EXPLAIN ANALYZE */
290 break;
291 case T_AggState:
292 /* even when not parallel-aware, for EXPLAIN ANALYZE */
293 ExecAggEstimate((AggState *) planstate, e->pcxt);
294 break;
295 case T_MemoizeState:
296 /* even when not parallel-aware, for EXPLAIN ANALYZE */
297 ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt);
298 break;
299 default:
300 break;
301 }
302
303 return planstate_tree_walker(planstate, ExecParallelEstimate, e);
304}
305
306/*
307 * Estimate the amount of space required to serialize the indicated parameters.
308 */
309static Size
311{
312 int paramid;
313 Size sz = sizeof(int);
314
315 paramid = -1;
316 while ((paramid = bms_next_member(params, paramid)) >= 0)
317 {
318 Oid typeOid;
319 int16 typLen;
320 bool typByVal;
321 ParamExecData *prm;
322
323 prm = &(estate->es_param_exec_vals[paramid]);
324 typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
325 paramid);
326
327 sz = add_size(sz, sizeof(int)); /* space for paramid */
328
329 /* space for datum/isnull */
330 if (OidIsValid(typeOid))
331 get_typlenbyval(typeOid, &typLen, &typByVal);
332 else
333 {
334 /* If no type OID, assume by-value, like copyParamList does. */
335 typLen = sizeof(Datum);
336 typByVal = true;
337 }
338 sz = add_size(sz,
340 typByVal, typLen));
341 }
342 return sz;
343}
344
345/*
346 * Serialize specified PARAM_EXEC parameters.
347 *
348 * We write the number of parameters first, as a 4-byte integer, and then
349 * write details for each parameter in turn. The details for each parameter
350 * consist of a 4-byte paramid (location of param in execution time internal
351 * parameter array) and then the datum as serialized by datumSerialize().
352 */
353static dsa_pointer
355{
356 Size size;
357 int nparams;
358 int paramid;
359 ParamExecData *prm;
360 dsa_pointer handle;
361 char *start_address;
362
363 /* Allocate enough space for the current parameter values. */
364 size = EstimateParamExecSpace(estate, params);
365 handle = dsa_allocate(area, size);
366 start_address = dsa_get_address(area, handle);
367
368 /* First write the number of parameters as a 4-byte integer. */
369 nparams = bms_num_members(params);
370 memcpy(start_address, &nparams, sizeof(int));
371 start_address += sizeof(int);
372
373 /* Write details for each parameter in turn. */
374 paramid = -1;
375 while ((paramid = bms_next_member(params, paramid)) >= 0)
376 {
377 Oid typeOid;
378 int16 typLen;
379 bool typByVal;
380
381 prm = &(estate->es_param_exec_vals[paramid]);
382 typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
383 paramid);
384
385 /* Write paramid. */
386 memcpy(start_address, &paramid, sizeof(int));
387 start_address += sizeof(int);
388
389 /* Write datum/isnull */
390 if (OidIsValid(typeOid))
391 get_typlenbyval(typeOid, &typLen, &typByVal);
392 else
393 {
394 /* If no type OID, assume by-value, like copyParamList does. */
395 typLen = sizeof(Datum);
396 typByVal = true;
397 }
398 datumSerialize(prm->value, prm->isnull, typByVal, typLen,
399 &start_address);
400 }
401
402 return handle;
403}
404
405/*
406 * Restore specified PARAM_EXEC parameters.
407 */
408static void
409RestoreParamExecParams(char *start_address, EState *estate)
410{
411 int nparams;
412 int i;
413 int paramid;
414
415 memcpy(&nparams, start_address, sizeof(int));
416 start_address += sizeof(int);
417
418 for (i = 0; i < nparams; i++)
419 {
420 ParamExecData *prm;
421
422 /* Read paramid */
423 memcpy(&paramid, start_address, sizeof(int));
424 start_address += sizeof(int);
425 prm = &(estate->es_param_exec_vals[paramid]);
426
427 /* Read datum/isnull. */
428 prm->value = datumRestore(&start_address, &prm->isnull);
429 prm->execPlan = NULL;
430 }
431}
432
433/*
434 * Initialize the dynamic shared memory segment that will be used to control
435 * parallel execution.
436 */
437static bool
440{
441 if (planstate == NULL)
442 return false;
443
444 /* If instrumentation is enabled, initialize slot for this node. */
445 if (d->instrumentation != NULL)
447 planstate->plan->plan_node_id;
448
449 /* Count this node. */
450 d->nnodes++;
451
452 /*
453 * Call initializers for DSM-using plan nodes.
454 *
455 * Most plan nodes won't do anything here, but plan nodes that allocated
456 * DSM may need to initialize shared state in the DSM before parallel
457 * workers are launched. They can allocate the space they previously
458 * estimated using shm_toc_allocate, and add the keys they previously
459 * estimated using shm_toc_insert, in each case targeting pcxt->toc.
460 */
461 switch (nodeTag(planstate))
462 {
463 case T_SeqScanState:
464 if (planstate->plan->parallel_aware)
466 d->pcxt);
467 break;
468 case T_IndexScanState:
469 if (planstate->plan->parallel_aware)
471 d->pcxt);
472 break;
473 case T_IndexOnlyScanState:
474 if (planstate->plan->parallel_aware)
476 d->pcxt);
477 break;
478 case T_ForeignScanState:
479 if (planstate->plan->parallel_aware)
481 d->pcxt);
482 break;
483 case T_AppendState:
484 if (planstate->plan->parallel_aware)
486 d->pcxt);
487 break;
488 case T_CustomScanState:
489 if (planstate->plan->parallel_aware)
491 d->pcxt);
492 break;
493 case T_BitmapHeapScanState:
494 if (planstate->plan->parallel_aware)
496 d->pcxt);
497 break;
498 case T_HashJoinState:
499 if (planstate->plan->parallel_aware)
501 d->pcxt);
502 break;
503 case T_HashState:
504 /* even when not parallel-aware, for EXPLAIN ANALYZE */
505 ExecHashInitializeDSM((HashState *) planstate, d->pcxt);
506 break;
507 case T_SortState:
508 /* even when not parallel-aware, for EXPLAIN ANALYZE */
509 ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
510 break;
511 case T_IncrementalSortState:
512 /* even when not parallel-aware, for EXPLAIN ANALYZE */
514 break;
515 case T_AggState:
516 /* even when not parallel-aware, for EXPLAIN ANALYZE */
517 ExecAggInitializeDSM((AggState *) planstate, d->pcxt);
518 break;
519 case T_MemoizeState:
520 /* even when not parallel-aware, for EXPLAIN ANALYZE */
521 ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt);
522 break;
523 default:
524 break;
525 }
526
528}
529
530/*
531 * It sets up the response queues for backend workers to return tuples
532 * to the main backend and start the workers.
533 */
534static shm_mq_handle **
536{
537 shm_mq_handle **responseq;
538 char *tqueuespace;
539 int i;
540
541 /* Skip this if no workers. */
542 if (pcxt->nworkers == 0)
543 return NULL;
544
545 /* Allocate memory for shared memory queue handles. */
546 responseq = (shm_mq_handle **)
547 palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
548
549 /*
550 * If not reinitializing, allocate space from the DSM for the queues;
551 * otherwise, find the already allocated space.
552 */
553 if (!reinitialize)
554 tqueuespace =
555 shm_toc_allocate(pcxt->toc,
557 pcxt->nworkers));
558 else
559 tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
560
561 /* Create the queues, and become the receiver for each. */
562 for (i = 0; i < pcxt->nworkers; ++i)
563 {
564 shm_mq *mq;
565
566 mq = shm_mq_create(tqueuespace +
569
571 responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
572 }
573
574 /* Add array of queues to shm_toc, so others can find it. */
575 if (!reinitialize)
576 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
577
578 /* Return array of handles. */
579 return responseq;
580}
581
582/*
583 * Sets up the required infrastructure for backend workers to perform
584 * execution and return results to the main backend.
585 */
588 Bitmapset *sendParams, int nworkers,
589 int64 tuples_needed)
590{
592 ParallelContext *pcxt;
596 char *pstmt_data;
597 char *pstmt_space;
598 char *paramlistinfo_space;
599 BufferUsage *bufusage_space;
600 WalUsage *walusage_space;
601 SharedExecutorInstrumentation *instrumentation = NULL;
602 SharedJitInstrumentation *jit_instrumentation = NULL;
603 int pstmt_len;
604 int paramlistinfo_len;
605 int instrumentation_len = 0;
606 int jit_instrumentation_len = 0;
607 int instrument_offset = 0;
608 Size dsa_minsize = dsa_minimum_size();
609 char *query_string;
610 int query_len;
611
612 /*
613 * Force any initplan outputs that we're going to pass to workers to be
614 * evaluated, if they weren't already.
615 *
616 * For simplicity, we use the EState's per-output-tuple ExprContext here.
617 * That risks intra-query memory leakage, since we might pass through here
618 * many times before that ExprContext gets reset; but ExecSetParamPlan
619 * doesn't normally leak any memory in the context (see its comments), so
620 * it doesn't seem worth complicating this function's API to pass it a
621 * shorter-lived ExprContext. This might need to change someday.
622 */
624
625 /* Allocate object for return value. */
626 pei = palloc0(sizeof(ParallelExecutorInfo));
627 pei->finished = false;
628 pei->planstate = planstate;
629
630 /* Fix up and serialize plan to be sent to workers. */
631 pstmt_data = ExecSerializePlan(planstate->plan, estate);
632
633 /* Create a parallel context. */
634 pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers);
635 pei->pcxt = pcxt;
636
637 /*
638 * Before telling the parallel context to create a dynamic shared memory
639 * segment, we need to figure out how big it should be. Estimate space
640 * for the various things we need to store.
641 */
642
643 /* Estimate space for fixed-size state. */
647
648 /* Estimate space for query text. */
649 query_len = strlen(estate->es_sourceText);
650 shm_toc_estimate_chunk(&pcxt->estimator, query_len + 1);
652
653 /* Estimate space for serialized PlannedStmt. */
654 pstmt_len = strlen(pstmt_data) + 1;
655 shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
657
658 /* Estimate space for serialized ParamListInfo. */
659 paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info);
660 shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len);
662
663 /*
664 * Estimate space for BufferUsage.
665 *
666 * If EXPLAIN is not in use and there are no extensions loaded that care,
667 * we could skip this. But we have no way of knowing whether anyone's
668 * looking at pgBufferUsage, so do it unconditionally.
669 */
671 mul_size(sizeof(BufferUsage), pcxt->nworkers));
673
674 /*
675 * Same thing for WalUsage.
676 */
678 mul_size(sizeof(WalUsage), pcxt->nworkers));
680
681 /* Estimate space for tuple queues. */
685
686 /*
687 * Give parallel-aware nodes a chance to add to the estimates, and get a
688 * count of how many PlanState nodes there are.
689 */
690 e.pcxt = pcxt;
691 e.nnodes = 0;
692 ExecParallelEstimate(planstate, &e);
693
694 /* Estimate space for instrumentation, if required. */
695 if (estate->es_instrument)
696 {
697 instrumentation_len =
698 offsetof(SharedExecutorInstrumentation, plan_node_id) +
699 sizeof(int) * e.nnodes;
700 instrumentation_len = MAXALIGN(instrumentation_len);
701 instrument_offset = instrumentation_len;
702 instrumentation_len +=
704 mul_size(e.nnodes, nworkers));
705 shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
707
708 /* Estimate space for JIT instrumentation, if required. */
709 if (estate->es_jit_flags != PGJIT_NONE)
710 {
711 jit_instrumentation_len =
712 offsetof(SharedJitInstrumentation, jit_instr) +
713 sizeof(JitInstrumentation) * nworkers;
714 shm_toc_estimate_chunk(&pcxt->estimator, jit_instrumentation_len);
716 }
717 }
718
719 /* Estimate space for DSA area. */
720 shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
722
723 /*
724 * InitializeParallelDSM() passes the active snapshot to the parallel
725 * worker, which uses it to set es_snapshot. Make sure we don't set
726 * es_snapshot differently in the child.
727 */
729
730 /* Everyone's had a chance to ask for space, so now create the DSM. */
732
733 /*
734 * OK, now we have a dynamic shared memory segment, and it should be big
735 * enough to store all of the data we estimated we would want to put into
736 * it, plus whatever general stuff (not specifically executor-related) the
737 * ParallelContext itself needs to store there. None of the space we
738 * asked for has been allocated or initialized yet, though, so do that.
739 */
740
741 /* Store fixed-size state. */
742 fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState));
743 fpes->tuples_needed = tuples_needed;
745 fpes->eflags = estate->es_top_eflags;
746 fpes->jit_flags = estate->es_jit_flags;
748
749 /* Store query string */
750 query_string = shm_toc_allocate(pcxt->toc, query_len + 1);
751 memcpy(query_string, estate->es_sourceText, query_len + 1);
752 shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string);
753
754 /* Store serialized PlannedStmt. */
755 pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
756 memcpy(pstmt_space, pstmt_data, pstmt_len);
757 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
758
759 /* Store serialized ParamListInfo. */
760 paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len);
761 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space);
762 SerializeParamList(estate->es_param_list_info, &paramlistinfo_space);
763
764 /* Allocate space for each worker's BufferUsage; no need to initialize. */
765 bufusage_space = shm_toc_allocate(pcxt->toc,
766 mul_size(sizeof(BufferUsage), pcxt->nworkers));
767 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
768 pei->buffer_usage = bufusage_space;
769
770 /* Same for WalUsage. */
771 walusage_space = shm_toc_allocate(pcxt->toc,
772 mul_size(sizeof(WalUsage), pcxt->nworkers));
773 shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage_space);
774 pei->wal_usage = walusage_space;
775
776 /* Set up the tuple queues that the workers will write into. */
777 pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
778
779 /* We don't need the TupleQueueReaders yet, though. */
780 pei->reader = NULL;
781
782 /*
783 * If instrumentation options were supplied, allocate space for the data.
784 * It only gets partially initialized here; the rest happens during
785 * ExecParallelInitializeDSM.
786 */
787 if (estate->es_instrument)
788 {
789 Instrumentation *instrument;
790 int i;
791
792 instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
793 instrumentation->instrument_options = estate->es_instrument;
794 instrumentation->instrument_offset = instrument_offset;
795 instrumentation->num_workers = nworkers;
796 instrumentation->num_plan_nodes = e.nnodes;
797 instrument = GetInstrumentationArray(instrumentation);
798 for (i = 0; i < nworkers * e.nnodes; ++i)
799 InstrInit(&instrument[i], estate->es_instrument);
801 instrumentation);
802 pei->instrumentation = instrumentation;
803
804 if (estate->es_jit_flags != PGJIT_NONE)
805 {
806 jit_instrumentation = shm_toc_allocate(pcxt->toc,
807 jit_instrumentation_len);
808 jit_instrumentation->num_workers = nworkers;
809 memset(jit_instrumentation->jit_instr, 0,
810 sizeof(JitInstrumentation) * nworkers);
812 jit_instrumentation);
813 pei->jit_instrumentation = jit_instrumentation;
814 }
815 }
816
817 /*
818 * Create a DSA area that can be used by the leader and all workers.
819 * (However, if we failed to create a DSM and are using private memory
820 * instead, then skip this.)
821 */
822 if (pcxt->seg != NULL)
823 {
824 char *area_space;
825
826 area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
827 shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
828 pei->area = dsa_create_in_place(area_space, dsa_minsize,
830 pcxt->seg);
831
832 /*
833 * Serialize parameters, if any, using DSA storage. We don't dare use
834 * the main parallel query DSM for this because we might relaunch
835 * workers after the values have changed (and thus the amount of
836 * storage required has changed).
837 */
838 if (!bms_is_empty(sendParams))
839 {
840 pei->param_exec = SerializeParamExecParams(estate, sendParams,
841 pei->area);
842 fpes->param_exec = pei->param_exec;
843 }
844 }
845
846 /*
847 * Give parallel-aware nodes a chance to initialize their shared data.
848 * This also initializes the elements of instrumentation->ps_instrument,
849 * if it exists.
850 */
851 d.pcxt = pcxt;
852 d.instrumentation = instrumentation;
853 d.nnodes = 0;
854
855 /* Install our DSA area while initializing the plan. */
856 estate->es_query_dsa = pei->area;
857 ExecParallelInitializeDSM(planstate, &d);
858 estate->es_query_dsa = NULL;
859
860 /*
861 * Make sure that the world hasn't shifted under our feet. This could
862 * probably just be an Assert(), but let's be conservative for now.
863 */
864 if (e.nnodes != d.nnodes)
865 elog(ERROR, "inconsistent count of PlanState nodes");
866
867 /* OK, we're ready to rock and roll. */
868 return pei;
869}
870
871/*
872 * Set up tuple queue readers to read the results of a parallel subplan.
873 *
874 * This is separate from ExecInitParallelPlan() because we can launch the
875 * worker processes and let them start doing something before we do this.
876 */
877void
879{
880 int nworkers = pei->pcxt->nworkers_launched;
881 int i;
882
883 Assert(pei->reader == NULL);
884
885 if (nworkers > 0)
886 {
887 pei->reader = (TupleQueueReader **)
888 palloc(nworkers * sizeof(TupleQueueReader *));
889
890 for (i = 0; i < nworkers; i++)
891 {
893 pei->pcxt->worker[i].bgwhandle);
894 pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]);
895 }
896 }
897}
898
899/*
900 * Re-initialize the parallel executor shared memory state before launching
901 * a fresh batch of workers.
902 */
903void
906 Bitmapset *sendParams)
907{
908 EState *estate = planstate->state;
910
911 /* Old workers must already be shut down */
912 Assert(pei->finished);
913
914 /*
915 * Force any initplan outputs that we're going to pass to workers to be
916 * evaluated, if they weren't already (see comments in
917 * ExecInitParallelPlan).
918 */
920
922 pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
923 pei->reader = NULL;
924 pei->finished = false;
925
927
928 /* Free any serialized parameters from the last round. */
929 if (DsaPointerIsValid(fpes->param_exec))
930 {
931 dsa_free(pei->area, fpes->param_exec);
933 }
934
935 /* Serialize current parameter values if required. */
936 if (!bms_is_empty(sendParams))
937 {
938 pei->param_exec = SerializeParamExecParams(estate, sendParams,
939 pei->area);
940 fpes->param_exec = pei->param_exec;
941 }
942
943 /* Traverse plan tree and let each child node reset associated state. */
944 estate->es_query_dsa = pei->area;
945 ExecParallelReInitializeDSM(planstate, pei->pcxt);
946 estate->es_query_dsa = NULL;
947}
948
949/*
950 * Traverse plan tree to reinitialize per-node dynamic shared memory state
951 */
952static bool
954 ParallelContext *pcxt)
955{
956 if (planstate == NULL)
957 return false;
958
959 /*
960 * Call reinitializers for DSM-using plan nodes.
961 */
962 switch (nodeTag(planstate))
963 {
964 case T_SeqScanState:
965 if (planstate->plan->parallel_aware)
967 pcxt);
968 break;
969 case T_IndexScanState:
970 if (planstate->plan->parallel_aware)
972 pcxt);
973 break;
974 case T_IndexOnlyScanState:
975 if (planstate->plan->parallel_aware)
977 pcxt);
978 break;
979 case T_ForeignScanState:
980 if (planstate->plan->parallel_aware)
982 pcxt);
983 break;
984 case T_AppendState:
985 if (planstate->plan->parallel_aware)
986 ExecAppendReInitializeDSM((AppendState *) planstate, pcxt);
987 break;
988 case T_CustomScanState:
989 if (planstate->plan->parallel_aware)
991 pcxt);
992 break;
993 case T_BitmapHeapScanState:
994 if (planstate->plan->parallel_aware)
996 pcxt);
997 break;
998 case T_HashJoinState:
999 if (planstate->plan->parallel_aware)
1001 pcxt);
1002 break;
1003 case T_HashState:
1004 case T_SortState:
1005 case T_IncrementalSortState:
1006 case T_MemoizeState:
1007 /* these nodes have DSM state, but no reinitialization is required */
1008 break;
1009
1010 default:
1011 break;
1012 }
1013
1014 return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
1015}
1016
1017/*
1018 * Copy instrumentation information about this node and its descendants from
1019 * dynamic shared memory.
1020 */
1021static bool
1023 SharedExecutorInstrumentation *instrumentation)
1024{
1025 Instrumentation *instrument;
1026 int i;
1027 int n;
1028 int ibytes;
1029 int plan_node_id = planstate->plan->plan_node_id;
1030 MemoryContext oldcontext;
1031
1032 /* Find the instrumentation for this node. */
1033 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1034 if (instrumentation->plan_node_id[i] == plan_node_id)
1035 break;
1036 if (i >= instrumentation->num_plan_nodes)
1037 elog(ERROR, "plan node %d not found", plan_node_id);
1038
1039 /* Accumulate the statistics from all workers. */
1040 instrument = GetInstrumentationArray(instrumentation);
1041 instrument += i * instrumentation->num_workers;
1042 for (n = 0; n < instrumentation->num_workers; ++n)
1043 InstrAggNode(planstate->instrument, &instrument[n]);
1044
1045 /*
1046 * Also store the per-worker detail.
1047 *
1048 * Worker instrumentation should be allocated in the same context as the
1049 * regular instrumentation information, which is the per-query context.
1050 * Switch into per-query memory context.
1051 */
1052 oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
1053 ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
1054 planstate->worker_instrument =
1055 palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
1056 MemoryContextSwitchTo(oldcontext);
1057
1058 planstate->worker_instrument->num_workers = instrumentation->num_workers;
1059 memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
1060
1061 /* Perform any node-type-specific work that needs to be done. */
1062 switch (nodeTag(planstate))
1063 {
1064 case T_SortState:
1066 break;
1067 case T_IncrementalSortState:
1069 break;
1070 case T_HashState:
1072 break;
1073 case T_AggState:
1075 break;
1076 case T_MemoizeState:
1078 break;
1079 case T_BitmapHeapScanState:
1081 break;
1082 default:
1083 break;
1084 }
1085
1087 instrumentation);
1088}
1089
1090/*
1091 * Add up the workers' JIT instrumentation from dynamic shared memory.
1092 */
1093static void
1095 SharedJitInstrumentation *shared_jit)
1096{
1097 JitInstrumentation *combined;
1098 int ibytes;
1099
1100 int n;
1101
1102 /*
1103 * Accumulate worker JIT instrumentation into the combined JIT
1104 * instrumentation, allocating it if required.
1105 */
1106 if (!planstate->state->es_jit_worker_instr)
1107 planstate->state->es_jit_worker_instr =
1109 combined = planstate->state->es_jit_worker_instr;
1110
1111 /* Accumulate all the workers' instrumentations. */
1112 for (n = 0; n < shared_jit->num_workers; ++n)
1113 InstrJitAgg(combined, &shared_jit->jit_instr[n]);
1114
1115 /*
1116 * Store the per-worker detail.
1117 *
1118 * Similar to ExecParallelRetrieveInstrumentation(), allocate the
1119 * instrumentation in per-query context.
1120 */
1121 ibytes = offsetof(SharedJitInstrumentation, jit_instr)
1122 + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
1123 planstate->worker_jit_instrument =
1124 MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
1125
1126 memcpy(planstate->worker_jit_instrument, shared_jit, ibytes);
1127}
1128
1129/*
1130 * Finish parallel execution. We wait for parallel workers to finish, and
1131 * accumulate their buffer/WAL usage.
1132 */
1133void
1135{
1136 int nworkers = pei->pcxt->nworkers_launched;
1137 int i;
1138
1139 /* Make this be a no-op if called twice in a row. */
1140 if (pei->finished)
1141 return;
1142
1143 /*
1144 * Detach from tuple queues ASAP, so that any still-active workers will
1145 * notice that no further results are wanted.
1146 */
1147 if (pei->tqueue != NULL)
1148 {
1149 for (i = 0; i < nworkers; i++)
1150 shm_mq_detach(pei->tqueue[i]);
1151 pfree(pei->tqueue);
1152 pei->tqueue = NULL;
1153 }
1154
1155 /*
1156 * While we're waiting for the workers to finish, let's get rid of the
1157 * tuple queue readers. (Any other local cleanup could be done here too.)
1158 */
1159 if (pei->reader != NULL)
1160 {
1161 for (i = 0; i < nworkers; i++)
1163 pfree(pei->reader);
1164 pei->reader = NULL;
1165 }
1166
1167 /* Now wait for the workers to finish. */
1169
1170 /*
1171 * Next, accumulate buffer/WAL usage. (This must wait for the workers to
1172 * finish, or we might get incomplete data.)
1173 */
1174 for (i = 0; i < nworkers; i++)
1176
1177 pei->finished = true;
1178}
1179
1180/*
1181 * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
1182 * resources still exist after ExecParallelFinish. We separate these
1183 * routines because someone might want to examine the contents of the DSM
1184 * after ExecParallelFinish and before calling this routine.
1185 */
1186void
1188{
1189 /* Accumulate instrumentation, if any. */
1190 if (pei->instrumentation)
1192 pei->instrumentation);
1193
1194 /* Accumulate JIT instrumentation, if any. */
1195 if (pei->jit_instrumentation)
1197 pei->jit_instrumentation);
1198
1199 /* Free any serialized parameters. */
1200 if (DsaPointerIsValid(pei->param_exec))
1201 {
1202 dsa_free(pei->area, pei->param_exec);
1204 }
1205 if (pei->area != NULL)
1206 {
1207 dsa_detach(pei->area);
1208 pei->area = NULL;
1209 }
1210 if (pei->pcxt != NULL)
1211 {
1213 pei->pcxt = NULL;
1214 }
1215 pfree(pei);
1216}
1217
1218/*
1219 * Create a DestReceiver to write tuples we produce to the shm_mq designated
1220 * for that purpose.
1221 */
1222static DestReceiver *
1224{
1225 char *mqspace;
1226 shm_mq *mq;
1227
1228 mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
1230 mq = (shm_mq *) mqspace;
1232 return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
1233}
1234
1235/*
1236 * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
1237 */
1238static QueryDesc *
1240 int instrument_options)
1241{
1242 char *pstmtspace;
1243 char *paramspace;
1244 PlannedStmt *pstmt;
1245 ParamListInfo paramLI;
1246 char *queryString;
1247
1248 /* Get the query string from shared memory */
1249 queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
1250
1251 /* Reconstruct leader-supplied PlannedStmt. */
1252 pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
1253 pstmt = (PlannedStmt *) stringToNode(pstmtspace);
1254
1255 /* Reconstruct ParamListInfo. */
1256 paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
1257 paramLI = RestoreParamList(&paramspace);
1258
1259 /* Create a QueryDesc for the query. */
1260 return CreateQueryDesc(pstmt,
1261 queryString,
1263 receiver, paramLI, NULL, instrument_options);
1264}
1265
1266/*
1267 * Copy instrumentation information from this node and its descendants into
1268 * dynamic shared memory, so that the parallel leader can retrieve it.
1269 */
1270static bool
1272 SharedExecutorInstrumentation *instrumentation)
1273{
1274 int i;
1275 int plan_node_id = planstate->plan->plan_node_id;
1276 Instrumentation *instrument;
1277
1278 InstrEndLoop(planstate->instrument);
1279
1280 /*
1281 * If we shuffled the plan_node_id values in ps_instrument into sorted
1282 * order, we could use binary search here. This might matter someday if
1283 * we're pushing down sufficiently large plan trees. For now, do it the
1284 * slow, dumb way.
1285 */
1286 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1287 if (instrumentation->plan_node_id[i] == plan_node_id)
1288 break;
1289 if (i >= instrumentation->num_plan_nodes)
1290 elog(ERROR, "plan node %d not found", plan_node_id);
1291
1292 /*
1293 * Add our statistics to the per-node, per-worker totals. It's possible
1294 * that this could happen more than once if we relaunched workers.
1295 */
1296 instrument = GetInstrumentationArray(instrumentation);
1297 instrument += i * instrumentation->num_workers;
1299 Assert(ParallelWorkerNumber < instrumentation->num_workers);
1300 InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
1301
1303 instrumentation);
1304}
1305
1306/*
1307 * Initialize the PlanState and its descendants with the information
1308 * retrieved from shared memory. This has to be done once the PlanState
1309 * is allocated and initialized by executor; that is, after ExecutorStart().
1310 */
1311static bool
1313{
1314 if (planstate == NULL)
1315 return false;
1316
1317 switch (nodeTag(planstate))
1318 {
1319 case T_SeqScanState:
1320 if (planstate->plan->parallel_aware)
1321 ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt);
1322 break;
1323 case T_IndexScanState:
1324 if (planstate->plan->parallel_aware)
1326 pwcxt);
1327 break;
1328 case T_IndexOnlyScanState:
1329 if (planstate->plan->parallel_aware)
1331 pwcxt);
1332 break;
1333 case T_ForeignScanState:
1334 if (planstate->plan->parallel_aware)
1336 pwcxt);
1337 break;
1338 case T_AppendState:
1339 if (planstate->plan->parallel_aware)
1340 ExecAppendInitializeWorker((AppendState *) planstate, pwcxt);
1341 break;
1342 case T_CustomScanState:
1343 if (planstate->plan->parallel_aware)
1345 pwcxt);
1346 break;
1347 case T_BitmapHeapScanState:
1348 if (planstate->plan->parallel_aware)
1350 pwcxt);
1351 break;
1352 case T_HashJoinState:
1353 if (planstate->plan->parallel_aware)
1355 pwcxt);
1356 break;
1357 case T_HashState:
1358 /* even when not parallel-aware, for EXPLAIN ANALYZE */
1359 ExecHashInitializeWorker((HashState *) planstate, pwcxt);
1360 break;
1361 case T_SortState:
1362 /* even when not parallel-aware, for EXPLAIN ANALYZE */
1363 ExecSortInitializeWorker((SortState *) planstate, pwcxt);
1364 break;
1365 case T_IncrementalSortState:
1366 /* even when not parallel-aware, for EXPLAIN ANALYZE */
1368 pwcxt);
1369 break;
1370 case T_AggState:
1371 /* even when not parallel-aware, for EXPLAIN ANALYZE */
1372 ExecAggInitializeWorker((AggState *) planstate, pwcxt);
1373 break;
1374 case T_MemoizeState:
1375 /* even when not parallel-aware, for EXPLAIN ANALYZE */
1376 ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt);
1377 break;
1378 default:
1379 break;
1380 }
1381
1383 pwcxt);
1384}
1385
1386/*
1387 * Main entrypoint for parallel query worker processes.
1388 *
1389 * We reach this function from ParallelWorkerMain, so the setup necessary to
1390 * create a sensible parallel environment has already been done;
1391 * ParallelWorkerMain worries about stuff like the transaction state, combo
1392 * CID mappings, and GUC values, so we don't need to deal with any of that
1393 * here.
1394 *
1395 * Our job is to deal with concerns specific to the executor. The parallel
1396 * group leader will have stored a serialized PlannedStmt, and it's our job
1397 * to execute that plan and write the resulting tuples to the appropriate
1398 * tuple queue. Various bits of supporting information that we need in order
1399 * to do this are also stored in the dsm_segment and can be accessed through
1400 * the shm_toc.
1401 */
1402void
1404{
1406 BufferUsage *buffer_usage;
1407 WalUsage *wal_usage;
1408 DestReceiver *receiver;
1409 QueryDesc *queryDesc;
1410 SharedExecutorInstrumentation *instrumentation;
1411 SharedJitInstrumentation *jit_instrumentation;
1412 int instrument_options = 0;
1413 void *area_space;
1414 dsa_area *area;
1416
1417 /* Get fixed-size state. */
1418 fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
1419
1420 /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
1421 receiver = ExecParallelGetReceiver(seg, toc);
1422 instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
1423 if (instrumentation != NULL)
1424 instrument_options = instrumentation->instrument_options;
1425 jit_instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
1426 true);
1427 queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
1428
1429 /* Setting debug_query_string for individual workers */
1430 debug_query_string = queryDesc->sourceText;
1431
1432 /* Report workers' query for monitoring purposes */
1434
1435 /* Attach to the dynamic shared memory area. */
1436 area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
1437 area = dsa_attach_in_place(area_space, seg);
1438
1439 /* Start up the executor */
1440 queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
1441 ExecutorStart(queryDesc, fpes->eflags);
1442
1443 /* Special executor initialization steps for parallel workers */
1444 queryDesc->planstate->state->es_query_dsa = area;
1445 if (DsaPointerIsValid(fpes->param_exec))
1446 {
1447 char *paramexec_space;
1448
1449 paramexec_space = dsa_get_address(area, fpes->param_exec);
1450 RestoreParamExecParams(paramexec_space, queryDesc->estate);
1451 }
1452 pwcxt.toc = toc;
1453 pwcxt.seg = seg;
1454 ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt);
1455
1456 /* Pass down any tuple bound */
1457 ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate);
1458
1459 /*
1460 * Prepare to track buffer/WAL usage during query execution.
1461 *
1462 * We do this after starting up the executor to match what happens in the
1463 * leader, which also doesn't count buffer accesses and WAL activity that
1464 * occur during executor startup.
1465 */
1467
1468 /*
1469 * Run the plan. If we specified a tuple bound, be careful not to demand
1470 * more tuples than that.
1471 */
1472 ExecutorRun(queryDesc,
1474 fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed);
1475
1476 /* Shut down the executor */
1477 ExecutorFinish(queryDesc);
1478
1479 /* Report buffer/WAL usage during parallel execution. */
1480 buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1481 wal_usage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1483 &wal_usage[ParallelWorkerNumber]);
1484
1485 /* Report instrumentation data if any instrumentation options are set. */
1486 if (instrumentation != NULL)
1488 instrumentation);
1489
1490 /* Report JIT instrumentation data if any */
1491 if (queryDesc->estate->es_jit && jit_instrumentation != NULL)
1492 {
1493 Assert(ParallelWorkerNumber < jit_instrumentation->num_workers);
1494 jit_instrumentation->jit_instr[ParallelWorkerNumber] =
1495 queryDesc->estate->es_jit->instr;
1496 }
1497
1498 /* Must do this after capturing instrumentation. */
1499 ExecutorEnd(queryDesc);
1500
1501 /* Cleanup. */
1502 dsa_detach(area);
1503 FreeQueryDesc(queryDesc);
1504 receiver->rDestroy(receiver);
1505}
int ParallelWorkerNumber
Definition: parallel.c:114
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:207
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:792
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:504
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:946
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:169
uint64 pgstat_get_my_query_id(void)
void pgstat_report_activity(BackendState state, const char *cmd_str)
@ STATE_RUNNING
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
#define bms_is_empty(a)
Definition: bitmapset.h:118
#define MAXALIGN(LEN)
Definition: c.h:765
#define Assert(condition)
Definition: c.h:812
int64_t int64
Definition: c.h:482
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:417
int16_t int16
Definition: c.h:480
#define OidIsValid(objectId)
Definition: c.h:729
size_t Size
Definition: c.h:559
Datum datumRestore(char **start_address, bool *isnull)
Definition: datum.c:521
void datumSerialize(Datum value, bool isnull, bool typByVal, int typLen, char **start_address)
Definition: datum.c:459
Size datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen)
Definition: datum.c:412
dsa_area * dsa_attach_in_place(void *place, dsm_segment *segment)
Definition: dsa.c:545
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_detach(dsa_area *area)
Definition: dsa.c:1952
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
size_t dsa_minimum_size(void)
Definition: dsa.c:1196
#define dsa_create_in_place(place, size, tranch_id, segment)
Definition: dsa.h:122
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
#define InvalidDsaPointer
Definition: dsa.h:78
#define DsaPointerIsValid(x)
Definition: dsa.h:106
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
void ExecutorEnd(QueryDesc *queryDesc)
Definition: execMain.c:463
void ExecutorFinish(QueryDesc *queryDesc)
Definition: execMain.c:403
void ExecutorStart(QueryDesc *queryDesc, int eflags)
Definition: execMain.c:119
void ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count)
Definition: execMain.c:294
#define PARALLEL_KEY_BUFFER_USAGE
Definition: execParallel.c:60
static bool ExecParallelReInitializeDSM(PlanState *planstate, ParallelContext *pcxt)
Definition: execParallel.c:953
#define PARALLEL_KEY_JIT_INSTRUMENTATION
Definition: execParallel.c:65
struct ExecParallelEstimateContext ExecParallelEstimateContext
#define PARALLEL_KEY_PARAMLISTINFO
Definition: execParallel.c:59
#define PARALLEL_TUPLE_QUEUE_SIZE
Definition: execParallel.c:68
static QueryDesc * ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, int instrument_options)
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, SharedExecutorInstrumentation *instrumentation)
static dsa_pointer SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area)
Definition: execParallel.c:354
void ExecParallelCleanup(ParallelExecutorInfo *pei)
struct ExecParallelInitializeDSMContext ExecParallelInitializeDSMContext
#define PARALLEL_KEY_INSTRUMENTATION
Definition: execParallel.c:62
static DestReceiver * ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
void ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
static shm_mq_handle ** ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
Definition: execParallel.c:535
#define PARALLEL_KEY_PLANNEDSTMT
Definition: execParallel.c:58
static bool ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
Definition: execParallel.c:229
#define GetInstrumentationArray(sei)
Definition: execParallel.c:106
void ExecParallelReinitialize(PlanState *planstate, ParallelExecutorInfo *pei, Bitmapset *sendParams)
Definition: execParallel.c:904
#define PARALLEL_KEY_DSA
Definition: execParallel.c:63
static bool ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
void ExecParallelCreateReaders(ParallelExecutorInfo *pei)
Definition: execParallel.c:878
#define PARALLEL_KEY_TUPLE_QUEUE
Definition: execParallel.c:61
#define PARALLEL_KEY_EXECUTOR_FIXED
Definition: execParallel.c:57
static char * ExecSerializePlan(Plan *plan, EState *estate)
Definition: execParallel.c:145
ParallelExecutorInfo * ExecInitParallelPlan(PlanState *planstate, EState *estate, Bitmapset *sendParams, int nworkers, int64 tuples_needed)
Definition: execParallel.c:587
struct FixedParallelExecutorState FixedParallelExecutorState
#define PARALLEL_KEY_QUERY_TEXT
Definition: execParallel.c:64
static Size EstimateParamExecSpace(EState *estate, Bitmapset *params)
Definition: execParallel.c:310
void ExecParallelFinish(ParallelExecutorInfo *pei)
static bool ExecParallelReportInstrumentation(PlanState *planstate, SharedExecutorInstrumentation *instrumentation)
#define PARALLEL_KEY_WAL_USAGE
Definition: execParallel.c:66
static void ExecParallelRetrieveJitInstrumentation(PlanState *planstate, SharedJitInstrumentation *shared_jit)
static bool ExecParallelInitializeDSM(PlanState *planstate, ExecParallelInitializeDSMContext *d)
Definition: execParallel.c:438
static void RestoreParamExecParams(char *start_address, EState *estate)
Definition: execParallel.c:409
void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node)
Definition: execProcnode.c:848
#define GetPerTupleExprContext(estate)
Definition: executor.h:563
#define IsParallelWorker()
Definition: parallel.h:60
void InstrAccumParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:218
void InstrEndLoop(Instrumentation *instr)
Definition: instrument.c:140
void InstrAggNode(Instrumentation *dst, Instrumentation *add)
Definition: instrument.c:169
void InstrEndParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:208
void InstrStartParallelQuery(void)
Definition: instrument.c:200
void InstrInit(Instrumentation *instr, int instrument_options)
Definition: instrument.c:58
int i
Definition: isn.c:72
void InstrJitAgg(JitInstrumentation *dst, JitInstrumentation *add)
Definition: jit.c:182
struct JitInstrumentation JitInstrumentation
#define PGJIT_NONE
Definition: jit.h:19
List * lappend(List *list, void *datum)
Definition: list.c:339
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2251
@ LWTRANCHE_PARALLEL_QUERY_DSA
Definition: lwlock.h:195
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
void ExecAggEstimate(AggState *node, ParallelContext *pcxt)
Definition: nodeAgg.c:4683
void ExecAggInitializeWorker(AggState *node, ParallelWorkerContext *pwcxt)
Definition: nodeAgg.c:4729
void ExecAggRetrieveInstrumentation(AggState *node)
Definition: nodeAgg.c:4742
void ExecAggInitializeDSM(AggState *node, ParallelContext *pcxt)
Definition: nodeAgg.c:4704
void ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:538
void ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
Definition: nodeAppend.c:554
void ExecAppendInitializeDSM(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:517
void ExecAppendEstimate(AppendState *node, ParallelContext *pcxt)
Definition: nodeAppend.c:498
void ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, ParallelWorkerContext *pwcxt)
void ExecBitmapHeapEstimate(BitmapHeapScanState *node, ParallelContext *pcxt)
void ExecBitmapHeapRetrieveInstrumentation(BitmapHeapScanState *node)
void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, ParallelContext *pcxt)
void ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node, ParallelContext *pcxt)
void ExecCustomScanInitializeDSM(CustomScanState *node, ParallelContext *pcxt)
Definition: nodeCustom.c:174
void ExecCustomScanEstimate(CustomScanState *node, ParallelContext *pcxt)
Definition: nodeCustom.c:161
void ExecCustomScanReInitializeDSM(CustomScanState *node, ParallelContext *pcxt)
Definition: nodeCustom.c:190
void ExecCustomScanInitializeWorker(CustomScanState *node, ParallelWorkerContext *pwcxt)
Definition: nodeCustom.c:205
void ExecForeignScanInitializeDSM(ForeignScanState *node, ParallelContext *pcxt)
void ExecForeignScanReInitializeDSM(ForeignScanState *node, ParallelContext *pcxt)
void ExecForeignScanEstimate(ForeignScanState *node, ParallelContext *pcxt)
void ExecForeignScanInitializeWorker(ForeignScanState *node, ParallelWorkerContext *pwcxt)
#define planstate_tree_walker(ps, w, c)
Definition: nodeFuncs.h:179
void ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2645
void ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
Definition: nodeHash.c:2670
void ExecHashEstimate(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2626
void ExecHashRetrieveInstrumentation(HashState *node)
Definition: nodeHash.c:2711
void ExecHashJoinInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
void ExecHashJoinEstimate(HashJoinState *state, ParallelContext *pcxt)
void ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
void ExecHashJoinInitializeWorker(HashJoinState *state, ParallelWorkerContext *pwcxt)
void ExecIncrementalSortEstimate(IncrementalSortState *node, ParallelContext *pcxt)
void ExecIncrementalSortInitializeDSM(IncrementalSortState *node, ParallelContext *pcxt)
void ExecIncrementalSortRetrieveInstrumentation(IncrementalSortState *node)
void ExecIncrementalSortInitializeWorker(IncrementalSortState *node, ParallelWorkerContext *pwcxt)
void ExecIndexOnlyScanEstimate(IndexOnlyScanState *node, ParallelContext *pcxt)
void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, ParallelWorkerContext *pwcxt)
void ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node, ParallelContext *pcxt)
void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, ParallelContext *pcxt)
void ExecIndexScanEstimate(IndexScanState *node, ParallelContext *pcxt)
void ExecIndexScanReInitializeDSM(IndexScanState *node, ParallelContext *pcxt)
void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt)
void ExecIndexScanInitializeWorker(IndexScanState *node, ParallelWorkerContext *pwcxt)
void ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt)
Definition: nodeMemoize.c:1210
void ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt)
Definition: nodeMemoize.c:1189
void ExecMemoizeRetrieveInstrumentation(MemoizeState *node)
Definition: nodeMemoize.c:1248
void ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt)
Definition: nodeMemoize.c:1235
void ExecSeqScanReInitializeDSM(SeqScanState *node, ParallelContext *pcxt)
Definition: nodeSeqscan.c:278
void ExecSeqScanInitializeWorker(SeqScanState *node, ParallelWorkerContext *pwcxt)
Definition: nodeSeqscan.c:294
void ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt)
Definition: nodeSeqscan.c:256
void ExecSeqScanEstimate(SeqScanState *node, ParallelContext *pcxt)
Definition: nodeSeqscan.c:238
void ExecSortInitializeWorker(SortState *node, ParallelWorkerContext *pwcxt)
Definition: nodeSort.c:462
void ExecSortEstimate(SortState *node, ParallelContext *pcxt)
Definition: nodeSort.c:416
void ExecSortInitializeDSM(SortState *node, ParallelContext *pcxt)
Definition: nodeSort.c:437
void ExecSortRetrieveInstrumentation(SortState *node)
Definition: nodeSort.c:476
void ExecSetParamPlanMulti(const Bitmapset *params, ExprContext *econtext)
Definition: nodeSubplan.c:1276
#define copyObject(obj)
Definition: nodes.h:224
#define nodeTag(nodeptr)
Definition: nodes.h:133
@ CMD_SELECT
Definition: nodes.h:265
#define makeNode(_type_)
Definition: nodes.h:155
char * nodeToString(const void *obj)
Definition: outfuncs.c:794
Size EstimateParamListSpace(ParamListInfo paramLI)
Definition: params.c:167
void SerializeParamList(ParamListInfo paramLI, char **start_address)
Definition: params.c:229
ParamListInfo RestoreParamList(char **start_address)
Definition: params.c:292
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define NIL
Definition: pg_list.h:68
static Oid list_nth_oid(const List *list, int n)
Definition: pg_list.h:321
#define plan(x)
Definition: pg_regress.c:161
const char * debug_query_string
Definition: postgres.c:87
uintptr_t Datum
Definition: postgres.h:64
unsigned int Oid
Definition: postgres_ext.h:31
void FreeQueryDesc(QueryDesc *qdesc)
Definition: pquery.c:105
QueryDesc * CreateQueryDesc(PlannedStmt *plannedstmt, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, DestReceiver *dest, ParamListInfo params, QueryEnvironment *queryEnv, int instrument_options)
Definition: pquery.c:67
e
Definition: preproc-init.c:82
MemoryContextSwitchTo(old_ctx)
void * stringToNode(const char *str)
Definition: read.c:90
@ ForwardScanDirection
Definition: sdir.h:28
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:224
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:177
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:319
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:843
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:206
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:290
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
static pg_noinline void Size size
Definition: slab.c:607
Snapshot GetActiveSnapshot(void)
Definition: snapmgr.c:728
#define InvalidSnapshot
Definition: snapshot.h:119
PGPROC * MyProc
Definition: proc.c:66
struct dsa_area * es_query_dsa
Definition: execnodes.h:717
int es_top_eflags
Definition: execnodes.h:684
struct JitContext * es_jit
Definition: execnodes.h:729
int es_instrument
Definition: execnodes.h:685
PlannedStmt * es_plannedstmt
Definition: execnodes.h:641
struct JitInstrumentation * es_jit_worker_instr
Definition: execnodes.h:730
ParamExecData * es_param_exec_vals
Definition: execnodes.h:670
List * es_range_table
Definition: execnodes.h:634
List * es_rteperminfos
Definition: execnodes.h:640
ParamListInfo es_param_list_info
Definition: execnodes.h:669
MemoryContext es_query_cxt
Definition: execnodes.h:675
int es_jit_flags
Definition: execnodes.h:728
const char * es_sourceText
Definition: execnodes.h:642
Snapshot es_snapshot
Definition: execnodes.h:632
ParallelContext * pcxt
Definition: execParallel.c:113
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.c:121
JitInstrumentation instr
Definition: jit.h:62
dsm_segment * seg
Definition: parallel.h:42
shm_toc_estimator estimator
Definition: parallel.h:41
ParallelWorkerInfo * worker
Definition: parallel.h:45
shm_toc * toc
Definition: parallel.h:44
int nworkers_launched
Definition: parallel.h:37
PlanState * planstate
Definition: execParallel.h:26
struct SharedJitInstrumentation * jit_instrumentation
Definition: execParallel.h:31
BufferUsage * buffer_usage
Definition: execParallel.h:28
dsa_pointer param_exec
Definition: execParallel.h:33
ParallelContext * pcxt
Definition: execParallel.h:27
WalUsage * wal_usage
Definition: execParallel.h:29
shm_mq_handle ** tqueue
Definition: execParallel.h:36
SharedExecutorInstrumentation * instrumentation
Definition: execParallel.h:30
struct TupleQueueReader ** reader
Definition: execParallel.h:37
dsm_segment * seg
Definition: parallel.h:52
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
bool isnull
Definition: params.h:150
Datum value
Definition: params.h:149
void * execPlan
Definition: params.h:148
struct SharedJitInstrumentation * worker_jit_instrument
Definition: execnodes.h:1140
Instrumentation * instrument
Definition: execnodes.h:1136
Plan * plan
Definition: execnodes.h:1126
EState * state
Definition: execnodes.h:1128
WorkerInstrumentation * worker_instrument
Definition: execnodes.h:1137
bool parallel_aware
Definition: plannodes.h:141
bool parallel_safe
Definition: plannodes.h:142
int plan_node_id
Definition: plannodes.h:152
struct Plan * planTree
Definition: plannodes.h:70
bool hasModifyingCTE
Definition: plannodes.h:58
List * appendRelations
Definition: plannodes.h:80
List * permInfos
Definition: plannodes.h:74
bool canSetTag
Definition: plannodes.h:60
List * rowMarks
Definition: plannodes.h:87
int jitFlags
Definition: plannodes.h:68
Bitmapset * rewindPlanIDs
Definition: plannodes.h:85
ParseLoc stmt_len
Definition: plannodes.h:99
bool hasReturning
Definition: plannodes.h:56
ParseLoc stmt_location
Definition: plannodes.h:98
List * invalItems
Definition: plannodes.h:91
bool transientPlan
Definition: plannodes.h:62
List * resultRelations
Definition: plannodes.h:78
List * subplans
Definition: plannodes.h:82
List * relationOids
Definition: plannodes.h:89
bool dependsOnRole
Definition: plannodes.h:64
CmdType commandType
Definition: plannodes.h:52
Node * utilityStmt
Definition: plannodes.h:95
List * rtable
Definition: plannodes.h:72
List * paramExecTypes
Definition: plannodes.h:93
bool parallelModeNeeded
Definition: plannodes.h:66
uint64 queryId
Definition: plannodes.h:54
const char * sourceText
Definition: execdesc.h:38
EState * estate
Definition: execdesc.h:48
PlannedStmt * plannedstmt
Definition: execdesc.h:37
PlanState * planstate
Definition: execdesc.h:49
int plan_node_id[FLEXIBLE_ARRAY_MEMBER]
Definition: execParallel.c:103
JitInstrumentation jit_instr[FLEXIBLE_ARRAY_MEMBER]
Definition: jit.h:54
Instrumentation instrument[FLEXIBLE_ARRAY_MEMBER]
Definition: instrument.h:98
void(* rDestroy)(DestReceiver *self)
Definition: dest.h:126
Definition: dsa.c:348
Definition: shm_mq.c:72
DestReceiver * CreateTupleQueueDestReceiver(shm_mq_handle *handle)
Definition: tqueue.c:119
TupleQueueReader * CreateTupleQueueReader(shm_mq_handle *handle)
Definition: tqueue.c:139
void DestroyTupleQueueReader(TupleQueueReader *reader)
Definition: tqueue.c:155