PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
parallel.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * parallel.c
4 * Infrastructure for launching parallel workers
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * IDENTIFICATION
10 * src/backend/access/transam/parallel.c
11 *
12 *-------------------------------------------------------------------------
13 */
14
15#include "postgres.h"
16
17#include "access/brin.h"
18#include "access/nbtree.h"
19#include "access/parallel.h"
20#include "access/session.h"
21#include "access/xact.h"
22#include "access/xlog.h"
23#include "catalog/index.h"
24#include "catalog/namespace.h"
25#include "catalog/pg_enum.h"
26#include "catalog/storage.h"
27#include "commands/async.h"
28#include "commands/vacuum.h"
30#include "libpq/libpq.h"
31#include "libpq/pqformat.h"
32#include "libpq/pqmq.h"
33#include "miscadmin.h"
34#include "optimizer/optimizer.h"
35#include "pgstat.h"
36#include "storage/ipc.h"
37#include "storage/predicate.h"
38#include "storage/spin.h"
39#include "tcop/tcopprot.h"
40#include "utils/combocid.h"
41#include "utils/guc.h"
42#include "utils/inval.h"
43#include "utils/memutils.h"
44#include "utils/relmapper.h"
45#include "utils/snapmgr.h"
46
47/*
48 * We don't want to waste a lot of memory on an error queue which, most of
49 * the time, will process only a handful of small messages. However, it is
50 * desirable to make it large enough that a typical ErrorResponse can be sent
51 * without blocking. That way, a worker that errors out can write the whole
52 * message into the queue and terminate without waiting for the user backend.
53 */
54#define PARALLEL_ERROR_QUEUE_SIZE 16384
55
56/* Magic number for parallel context TOC. */
57#define PARALLEL_MAGIC 0x50477c7c
58
59/*
60 * Magic numbers for per-context parallel state sharing. Higher-level code
61 * should use smaller values, leaving these very large ones for use by this
62 * module.
63 */
64#define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001)
65#define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002)
66#define PARALLEL_KEY_LIBRARY UINT64CONST(0xFFFFFFFFFFFF0003)
67#define PARALLEL_KEY_GUC UINT64CONST(0xFFFFFFFFFFFF0004)
68#define PARALLEL_KEY_COMBO_CID UINT64CONST(0xFFFFFFFFFFFF0005)
69#define PARALLEL_KEY_TRANSACTION_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0006)
70#define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007)
71#define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008)
72#define PARALLEL_KEY_ENTRYPOINT UINT64CONST(0xFFFFFFFFFFFF0009)
73#define PARALLEL_KEY_SESSION_DSM UINT64CONST(0xFFFFFFFFFFFF000A)
74#define PARALLEL_KEY_PENDING_SYNCS UINT64CONST(0xFFFFFFFFFFFF000B)
75#define PARALLEL_KEY_REINDEX_STATE UINT64CONST(0xFFFFFFFFFFFF000C)
76#define PARALLEL_KEY_RELMAPPER_STATE UINT64CONST(0xFFFFFFFFFFFF000D)
77#define PARALLEL_KEY_UNCOMMITTEDENUMS UINT64CONST(0xFFFFFFFFFFFF000E)
78#define PARALLEL_KEY_CLIENTCONNINFO UINT64CONST(0xFFFFFFFFFFFF000F)
79
80/* Fixed-size parallel state. */
81typedef struct FixedParallelState
82{
83 /* Fixed-size state that workers must restore. */
100
101 /* Mutex protects remaining fields. */
102 slock_t mutex;
103
104 /* Maximum XactLastRecEnd of any worker. */
107
108/*
109 * Our parallel worker number. We initialize this to -1, meaning that we are
110 * not a parallel worker. In parallel workers, it will be set to a value >= 0
111 * and < the number of workers before any user code is invoked; each parallel
112 * worker will get a different parallel worker number.
113 */
115
116/* Is there a parallel message pending which we need to receive? */
117volatile sig_atomic_t ParallelMessagePending = false;
118
119/* Are we initializing a parallel worker? */
121
122/* Pointer to our fixed parallel state. */
124
125/* List of active parallel contexts. */
127
128/* Backend-local copy of data from FixedParallelState. */
129static pid_t ParallelLeaderPid;
130
131/*
132 * List of internal parallel worker entry points. We need this for
133 * reasons explained in LookupParallelWorkerFunction(), below.
134 */
135static const struct
136{
137 const char *fn_name;
140
141{
142 {
143 "ParallelQueryMain", ParallelQueryMain
144 },
145 {
146 "_bt_parallel_build_main", _bt_parallel_build_main
147 },
148 {
149 "_brin_parallel_build_main", _brin_parallel_build_main
150 },
151 {
152 "parallel_vacuum_main", parallel_vacuum_main
153 }
155
156/* Private functions. */
157static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
159static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname);
160static void ParallelWorkerShutdown(int code, Datum arg);
161
162
163/*
164 * Establish a new parallel context. This should be done after entering
165 * parallel mode, and (unless there is an error) the context should be
166 * destroyed before exiting the current subtransaction.
167 */
169CreateParallelContext(const char *library_name, const char *function_name,
170 int nworkers)
171{
172 MemoryContext oldcontext;
173 ParallelContext *pcxt;
174
175 /* It is unsafe to create a parallel context if not in parallel mode. */
177
178 /* Number of workers should be non-negative. */
179 Assert(nworkers >= 0);
180
181 /* We might be running in a short-lived memory context. */
183
184 /* Initialize a new ParallelContext. */
185 pcxt = palloc0(sizeof(ParallelContext));
187 pcxt->nworkers = nworkers;
188 pcxt->nworkers_to_launch = nworkers;
189 pcxt->library_name = pstrdup(library_name);
190 pcxt->function_name = pstrdup(function_name);
194
195 /* Restore previous memory context. */
196 MemoryContextSwitchTo(oldcontext);
197
198 return pcxt;
199}
200
201/*
202 * Establish the dynamic shared memory segment for a parallel context and
203 * copy state and other bookkeeping information that will be needed by
204 * parallel workers into it.
205 */
206void
208{
209 MemoryContext oldcontext;
210 Size library_len = 0;
211 Size guc_len = 0;
212 Size combocidlen = 0;
213 Size tsnaplen = 0;
214 Size asnaplen = 0;
215 Size tstatelen = 0;
216 Size pendingsyncslen = 0;
217 Size reindexlen = 0;
218 Size relmapperlen = 0;
219 Size uncommittedenumslen = 0;
220 Size clientconninfolen = 0;
221 Size segsize = 0;
222 int i;
224 dsm_handle session_dsm_handle = DSM_HANDLE_INVALID;
225 Snapshot transaction_snapshot = GetTransactionSnapshot();
226 Snapshot active_snapshot = GetActiveSnapshot();
227
228 /* We might be running in a very short-lived memory context. */
230
231 /* Allow space to store the fixed-size parallel state. */
234
235 /*
236 * If we manage to reach here while non-interruptible, it's unsafe to
237 * launch any workers: we would fail to process interrupts sent by them.
238 * We can deal with that edge case by pretending no workers were
239 * requested.
240 */
242 pcxt->nworkers = 0;
243
244 /*
245 * Normally, the user will have requested at least one worker process, but
246 * if by chance they have not, we can skip a bunch of things here.
247 */
248 if (pcxt->nworkers > 0)
249 {
250 /* Get (or create) the per-session DSM segment's handle. */
251 session_dsm_handle = GetSessionDsmHandle();
252
253 /*
254 * If we weren't able to create a per-session DSM segment, then we can
255 * continue but we can't safely launch any workers because their
256 * record typmods would be incompatible so they couldn't exchange
257 * tuples.
258 */
259 if (session_dsm_handle == DSM_HANDLE_INVALID)
260 pcxt->nworkers = 0;
261 }
262
263 if (pcxt->nworkers > 0)
264 {
265 /* Estimate space for various kinds of state sharing. */
266 library_len = EstimateLibraryStateSpace();
267 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
268 guc_len = EstimateGUCStateSpace();
269 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
270 combocidlen = EstimateComboCIDStateSpace();
271 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
273 {
274 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
275 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
276 }
277 asnaplen = EstimateSnapshotSpace(active_snapshot);
278 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
279 tstatelen = EstimateTransactionStateSpace();
280 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
282 pendingsyncslen = EstimatePendingSyncsSpace();
283 shm_toc_estimate_chunk(&pcxt->estimator, pendingsyncslen);
284 reindexlen = EstimateReindexStateSpace();
285 shm_toc_estimate_chunk(&pcxt->estimator, reindexlen);
286 relmapperlen = EstimateRelationMapSpace();
287 shm_toc_estimate_chunk(&pcxt->estimator, relmapperlen);
288 uncommittedenumslen = EstimateUncommittedEnumsSpace();
289 shm_toc_estimate_chunk(&pcxt->estimator, uncommittedenumslen);
290 clientconninfolen = EstimateClientConnectionInfoSpace();
291 shm_toc_estimate_chunk(&pcxt->estimator, clientconninfolen);
292 /* If you add more chunks here, you probably need to add keys. */
294
295 /* Estimate space need for error queues. */
298 "parallel error queue size not buffer-aligned");
301 pcxt->nworkers));
303
304 /* Estimate how much we'll need for the entrypoint info. */
305 shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name) +
306 strlen(pcxt->function_name) + 2);
308 }
309
310 /*
311 * Create DSM and initialize with new table of contents. But if the user
312 * didn't request any workers, then don't bother creating a dynamic shared
313 * memory segment; instead, just use backend-private memory.
314 *
315 * Also, if we can't create a dynamic shared memory segment because the
316 * maximum number of segments have already been created, then fall back to
317 * backend-private memory, and plan not to use any workers. We hope this
318 * won't happen very often, but it's better to abandon the use of
319 * parallelism than to fail outright.
320 */
321 segsize = shm_toc_estimate(&pcxt->estimator);
322 if (pcxt->nworkers > 0)
324 if (pcxt->seg != NULL)
327 segsize);
328 else
329 {
330 pcxt->nworkers = 0;
333 segsize);
334 }
335
336 /* Initialize fixed-size state in shared memory. */
337 fps = (FixedParallelState *)
354 SpinLockInit(&fps->mutex);
355 fps->last_xlog_end = 0;
357
358 /* We can skip the rest of this if we're not budgeting for any workers. */
359 if (pcxt->nworkers > 0)
360 {
361 char *libraryspace;
362 char *gucspace;
363 char *combocidspace;
364 char *tsnapspace;
365 char *asnapspace;
366 char *tstatespace;
367 char *pendingsyncsspace;
368 char *reindexspace;
369 char *relmapperspace;
370 char *error_queue_space;
371 char *session_dsm_handle_space;
372 char *entrypointstate;
373 char *uncommittedenumsspace;
374 char *clientconninfospace;
375 Size lnamelen;
376
377 /* Serialize shared libraries we have loaded. */
378 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
379 SerializeLibraryState(library_len, libraryspace);
380 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
381
382 /* Serialize GUC settings. */
383 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
384 SerializeGUCState(guc_len, gucspace);
385 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
386
387 /* Serialize combo CID state. */
388 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
389 SerializeComboCIDState(combocidlen, combocidspace);
390 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
391
392 /*
393 * Serialize the transaction snapshot if the transaction isolation
394 * level uses a transaction snapshot.
395 */
397 {
398 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
399 SerializeSnapshot(transaction_snapshot, tsnapspace);
401 tsnapspace);
402 }
403
404 /* Serialize the active snapshot. */
405 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
406 SerializeSnapshot(active_snapshot, asnapspace);
408
409 /* Provide the handle for per-session segment. */
410 session_dsm_handle_space = shm_toc_allocate(pcxt->toc,
411 sizeof(dsm_handle));
412 *(dsm_handle *) session_dsm_handle_space = session_dsm_handle;
414 session_dsm_handle_space);
415
416 /* Serialize transaction state. */
417 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
418 SerializeTransactionState(tstatelen, tstatespace);
420
421 /* Serialize pending syncs. */
422 pendingsyncsspace = shm_toc_allocate(pcxt->toc, pendingsyncslen);
423 SerializePendingSyncs(pendingsyncslen, pendingsyncsspace);
425 pendingsyncsspace);
426
427 /* Serialize reindex state. */
428 reindexspace = shm_toc_allocate(pcxt->toc, reindexlen);
429 SerializeReindexState(reindexlen, reindexspace);
430 shm_toc_insert(pcxt->toc, PARALLEL_KEY_REINDEX_STATE, reindexspace);
431
432 /* Serialize relmapper state. */
433 relmapperspace = shm_toc_allocate(pcxt->toc, relmapperlen);
434 SerializeRelationMap(relmapperlen, relmapperspace);
436 relmapperspace);
437
438 /* Serialize uncommitted enum state. */
439 uncommittedenumsspace = shm_toc_allocate(pcxt->toc,
440 uncommittedenumslen);
441 SerializeUncommittedEnums(uncommittedenumsspace, uncommittedenumslen);
443 uncommittedenumsspace);
444
445 /* Serialize our ClientConnectionInfo. */
446 clientconninfospace = shm_toc_allocate(pcxt->toc, clientconninfolen);
447 SerializeClientConnectionInfo(clientconninfolen, clientconninfospace);
449 clientconninfospace);
450
451 /* Allocate space for worker information. */
452 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
453
454 /*
455 * Establish error queues in dynamic shared memory.
456 *
457 * These queues should be used only for transmitting ErrorResponse,
458 * NoticeResponse, and NotifyResponse protocol messages. Tuple data
459 * should be transmitted via separate (possibly larger?) queues.
460 */
461 error_queue_space =
462 shm_toc_allocate(pcxt->toc,
464 pcxt->nworkers));
465 for (i = 0; i < pcxt->nworkers; ++i)
466 {
467 char *start;
468 shm_mq *mq;
469
470 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
473 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
474 }
475 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
476
477 /*
478 * Serialize entrypoint information. It's unsafe to pass function
479 * pointers across processes, as the function pointer may be different
480 * in each process in EXEC_BACKEND builds, so we always pass library
481 * and function name. (We use library name "postgres" for functions
482 * in the core backend.)
483 */
484 lnamelen = strlen(pcxt->library_name);
485 entrypointstate = shm_toc_allocate(pcxt->toc, lnamelen +
486 strlen(pcxt->function_name) + 2);
487 strcpy(entrypointstate, pcxt->library_name);
488 strcpy(entrypointstate + lnamelen + 1, pcxt->function_name);
489 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
490 }
491
492 /* Update nworkers_to_launch, in case we changed nworkers above. */
493 pcxt->nworkers_to_launch = pcxt->nworkers;
494
495 /* Restore previous memory context. */
496 MemoryContextSwitchTo(oldcontext);
497}
498
499/*
500 * Reinitialize the dynamic shared memory segment for a parallel context such
501 * that we could launch workers for it again.
502 */
503void
505{
507
508 /* Wait for any old workers to exit. */
509 if (pcxt->nworkers_launched > 0)
510 {
513 pcxt->nworkers_launched = 0;
514 if (pcxt->known_attached_workers)
515 {
517 pcxt->known_attached_workers = NULL;
518 pcxt->nknown_attached_workers = 0;
519 }
520 }
521
522 /* Reset a few bits of fixed parallel state to a clean state. */
523 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
524 fps->last_xlog_end = 0;
525
526 /* Recreate error queues (if they exist). */
527 if (pcxt->nworkers > 0)
528 {
529 char *error_queue_space;
530 int i;
531
532 error_queue_space =
534 for (i = 0; i < pcxt->nworkers; ++i)
535 {
536 char *start;
537 shm_mq *mq;
538
539 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
542 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
543 }
544 }
545}
546
547/*
548 * Reinitialize parallel workers for a parallel context such that we could
549 * launch a different number of workers. This is required for cases where
550 * we need to reuse the same DSM segment, but the number of workers can
551 * vary from run-to-run.
552 */
553void
554ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
555{
556 /*
557 * The number of workers that need to be launched must be less than the
558 * number of workers with which the parallel context is initialized. But
559 * the caller might not know that InitializeParallelDSM reduced nworkers,
560 * so just silently trim the request.
561 */
562 pcxt->nworkers_to_launch = Min(pcxt->nworkers, nworkers_to_launch);
563}
564
565/*
566 * Launch parallel workers.
567 */
568void
570{
571 MemoryContext oldcontext;
572 BackgroundWorker worker;
573 int i;
574 bool any_registrations_failed = false;
575
576 /* Skip this if we have no workers. */
577 if (pcxt->nworkers == 0 || pcxt->nworkers_to_launch == 0)
578 return;
579
580 /* We need to be a lock group leader. */
582
583 /* If we do have workers, we'd better have a DSM segment. */
584 Assert(pcxt->seg != NULL);
585
586 /* We might be running in a short-lived memory context. */
588
589 /* Configure a worker. */
590 memset(&worker, 0, sizeof(worker));
591 snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
592 MyProcPid);
593 snprintf(worker.bgw_type, BGW_MAXLEN, "parallel worker");
594 worker.bgw_flags =
599 sprintf(worker.bgw_library_name, "postgres");
600 sprintf(worker.bgw_function_name, "ParallelWorkerMain");
602 worker.bgw_notify_pid = MyProcPid;
603
604 /*
605 * Start workers.
606 *
607 * The caller must be able to tolerate ending up with fewer workers than
608 * expected, so there is no need to throw an error here if registration
609 * fails. It wouldn't help much anyway, because registering the worker in
610 * no way guarantees that it will start up and initialize successfully.
611 */
612 for (i = 0; i < pcxt->nworkers_to_launch; ++i)
613 {
614 memcpy(worker.bgw_extra, &i, sizeof(int));
615 if (!any_registrations_failed &&
617 &pcxt->worker[i].bgwhandle))
618 {
620 pcxt->worker[i].bgwhandle);
621 pcxt->nworkers_launched++;
622 }
623 else
624 {
625 /*
626 * If we weren't able to register the worker, then we've bumped up
627 * against the max_worker_processes limit, and future
628 * registrations will probably fail too, so arrange to skip them.
629 * But we still have to execute this code for the remaining slots
630 * to make sure that we forget about the error queues we budgeted
631 * for those workers. Otherwise, we'll wait for them to start,
632 * but they never will.
633 */
634 any_registrations_failed = true;
635 pcxt->worker[i].bgwhandle = NULL;
637 pcxt->worker[i].error_mqh = NULL;
638 }
639 }
640
641 /*
642 * Now that nworkers_launched has taken its final value, we can initialize
643 * known_attached_workers.
644 */
645 if (pcxt->nworkers_launched > 0)
646 {
648 palloc0(sizeof(bool) * pcxt->nworkers_launched);
649 pcxt->nknown_attached_workers = 0;
650 }
651
652 /* Restore previous memory context. */
653 MemoryContextSwitchTo(oldcontext);
654}
655
656/*
657 * Wait for all workers to attach to their error queues, and throw an error if
658 * any worker fails to do this.
659 *
660 * Callers can assume that if this function returns successfully, then the
661 * number of workers given by pcxt->nworkers_launched have initialized and
662 * attached to their error queues. Whether or not these workers are guaranteed
663 * to still be running depends on what code the caller asked them to run;
664 * this function does not guarantee that they have not exited. However, it
665 * does guarantee that any workers which exited must have done so cleanly and
666 * after successfully performing the work with which they were tasked.
667 *
668 * If this function is not called, then some of the workers that were launched
669 * may not have been started due to a fork() failure, or may have exited during
670 * early startup prior to attaching to the error queue, so nworkers_launched
671 * cannot be viewed as completely reliable. It will never be less than the
672 * number of workers which actually started, but it might be more. Any workers
673 * that failed to start will still be discovered by
674 * WaitForParallelWorkersToFinish and an error will be thrown at that time,
675 * provided that function is eventually reached.
676 *
677 * In general, the leader process should do as much work as possible before
678 * calling this function. fork() failures and other early-startup failures
679 * are very uncommon, and having the leader sit idle when it could be doing
680 * useful work is undesirable. However, if the leader needs to wait for
681 * all of its workers or for a specific worker, it may want to call this
682 * function before doing so. If not, it must make some other provision for
683 * the failure-to-start case, lest it wait forever. On the other hand, a
684 * leader which never waits for a worker that might not be started yet, or
685 * at least never does so prior to WaitForParallelWorkersToFinish(), need not
686 * call this function at all.
687 */
688void
690{
691 int i;
692
693 /* Skip this if we have no launched workers. */
694 if (pcxt->nworkers_launched == 0)
695 return;
696
697 for (;;)
698 {
699 /*
700 * This will process any parallel messages that are pending and it may
701 * also throw an error propagated from a worker.
702 */
704
705 for (i = 0; i < pcxt->nworkers_launched; ++i)
706 {
707 BgwHandleStatus status;
708 shm_mq *mq;
709 int rc;
710 pid_t pid;
711
712 if (pcxt->known_attached_workers[i])
713 continue;
714
715 /*
716 * If error_mqh is NULL, then the worker has already exited
717 * cleanly.
718 */
719 if (pcxt->worker[i].error_mqh == NULL)
720 {
721 pcxt->known_attached_workers[i] = true;
723 continue;
724 }
725
726 status = GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle, &pid);
727 if (status == BGWH_STARTED)
728 {
729 /* Has the worker attached to the error queue? */
730 mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
731 if (shm_mq_get_sender(mq) != NULL)
732 {
733 /* Yes, so it is known to be attached. */
734 pcxt->known_attached_workers[i] = true;
736 }
737 }
738 else if (status == BGWH_STOPPED)
739 {
740 /*
741 * If the worker stopped without attaching to the error queue,
742 * throw an error.
743 */
744 mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
745 if (shm_mq_get_sender(mq) == NULL)
747 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
748 errmsg("parallel worker failed to initialize"),
749 errhint("More details may be available in the server log.")));
750
751 pcxt->known_attached_workers[i] = true;
753 }
754 else
755 {
756 /*
757 * Worker not yet started, so we must wait. The postmaster
758 * will notify us if the worker's state changes. Our latch
759 * might also get set for some other reason, but if so we'll
760 * just end up waiting for the same worker again.
761 */
762 rc = WaitLatch(MyLatch,
764 -1, WAIT_EVENT_BGWORKER_STARTUP);
765
766 if (rc & WL_LATCH_SET)
768 }
769 }
770
771 /* If all workers are known to have started, we're done. */
773 {
775 break;
776 }
777 }
778}
779
780/*
781 * Wait for all workers to finish computing.
782 *
783 * Even if the parallel operation seems to have completed successfully, it's
784 * important to call this function afterwards. We must not miss any errors
785 * the workers may have thrown during the parallel operation, or any that they
786 * may yet throw while shutting down.
787 *
788 * Also, we want to update our notion of XactLastRecEnd based on worker
789 * feedback.
790 */
791void
793{
794 for (;;)
795 {
796 bool anyone_alive = false;
797 int nfinished = 0;
798 int i;
799
800 /*
801 * This will process any parallel messages that are pending, which may
802 * change the outcome of the loop that follows. It may also throw an
803 * error propagated from a worker.
804 */
806
807 for (i = 0; i < pcxt->nworkers_launched; ++i)
808 {
809 /*
810 * If error_mqh is NULL, then the worker has already exited
811 * cleanly. If we have received a message through error_mqh from
812 * the worker, we know it started up cleanly, and therefore we're
813 * certain to be notified when it exits.
814 */
815 if (pcxt->worker[i].error_mqh == NULL)
816 ++nfinished;
817 else if (pcxt->known_attached_workers[i])
818 {
819 anyone_alive = true;
820 break;
821 }
822 }
823
824 if (!anyone_alive)
825 {
826 /* If all workers are known to have finished, we're done. */
827 if (nfinished >= pcxt->nworkers_launched)
828 {
829 Assert(nfinished == pcxt->nworkers_launched);
830 break;
831 }
832
833 /*
834 * We didn't detect any living workers, but not all workers are
835 * known to have exited cleanly. Either not all workers have
836 * launched yet, or maybe some of them failed to start or
837 * terminated abnormally.
838 */
839 for (i = 0; i < pcxt->nworkers_launched; ++i)
840 {
841 pid_t pid;
842 shm_mq *mq;
843
844 /*
845 * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we
846 * should just keep waiting. If it is BGWH_STOPPED, then
847 * further investigation is needed.
848 */
849 if (pcxt->worker[i].error_mqh == NULL ||
850 pcxt->worker[i].bgwhandle == NULL ||
852 &pid) != BGWH_STOPPED)
853 continue;
854
855 /*
856 * Check whether the worker ended up stopped without ever
857 * attaching to the error queue. If so, the postmaster was
858 * unable to fork the worker or it exited without initializing
859 * properly. We must throw an error, since the caller may
860 * have been expecting the worker to do some work before
861 * exiting.
862 */
863 mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
864 if (shm_mq_get_sender(mq) == NULL)
866 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
867 errmsg("parallel worker failed to initialize"),
868 errhint("More details may be available in the server log.")));
869
870 /*
871 * The worker is stopped, but is attached to the error queue.
872 * Unless there's a bug somewhere, this will only happen when
873 * the worker writes messages and terminates after the
874 * CHECK_FOR_INTERRUPTS() near the top of this function and
875 * before the call to GetBackgroundWorkerPid(). In that case,
876 * or latch should have been set as well and the right things
877 * will happen on the next pass through the loop.
878 */
879 }
880 }
881
883 WAIT_EVENT_PARALLEL_FINISH);
885 }
886
887 if (pcxt->toc != NULL)
888 {
890
891 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
892 if (fps->last_xlog_end > XactLastRecEnd)
894 }
895}
896
897/*
898 * Wait for all workers to exit.
899 *
900 * This function ensures that workers have been completely shutdown. The
901 * difference between WaitForParallelWorkersToFinish and this function is
902 * that the former just ensures that last message sent by a worker backend is
903 * received by the leader backend whereas this ensures the complete shutdown.
904 */
905static void
907{
908 int i;
909
910 /* Wait until the workers actually die. */
911 for (i = 0; i < pcxt->nworkers_launched; ++i)
912 {
913 BgwHandleStatus status;
914
915 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
916 continue;
917
919
920 /*
921 * If the postmaster kicked the bucket, we have no chance of cleaning
922 * up safely -- we won't be able to tell when our workers are actually
923 * dead. This doesn't necessitate a PANIC since they will all abort
924 * eventually, but we can't safely continue this session.
925 */
926 if (status == BGWH_POSTMASTER_DIED)
928 (errcode(ERRCODE_ADMIN_SHUTDOWN),
929 errmsg("postmaster exited during a parallel transaction")));
930
931 /* Release memory. */
932 pfree(pcxt->worker[i].bgwhandle);
933 pcxt->worker[i].bgwhandle = NULL;
934 }
935}
936
937/*
938 * Destroy a parallel context.
939 *
940 * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
941 * first, before calling this function. When this function is invoked, any
942 * remaining workers are forcibly killed; the dynamic shared memory segment
943 * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
944 */
945void
947{
948 int i;
949
950 /*
951 * Be careful about order of operations here! We remove the parallel
952 * context from the list before we do anything else; otherwise, if an
953 * error occurs during a subsequent step, we might try to nuke it again
954 * from AtEOXact_Parallel or AtEOSubXact_Parallel.
955 */
956 dlist_delete(&pcxt->node);
957
958 /* Kill each worker in turn, and forget their error queues. */
959 if (pcxt->worker != NULL)
960 {
961 for (i = 0; i < pcxt->nworkers_launched; ++i)
962 {
963 if (pcxt->worker[i].error_mqh != NULL)
964 {
966
968 pcxt->worker[i].error_mqh = NULL;
969 }
970 }
971 }
972
973 /*
974 * If we have allocated a shared memory segment, detach it. This will
975 * implicitly detach the error queues, and any other shared memory queues,
976 * stored there.
977 */
978 if (pcxt->seg != NULL)
979 {
980 dsm_detach(pcxt->seg);
981 pcxt->seg = NULL;
982 }
983
984 /*
985 * If this parallel context is actually in backend-private memory rather
986 * than shared memory, free that memory instead.
987 */
988 if (pcxt->private_memory != NULL)
989 {
990 pfree(pcxt->private_memory);
991 pcxt->private_memory = NULL;
992 }
993
994 /*
995 * We can't finish transaction commit or abort until all of the workers
996 * have exited. This means, in particular, that we can't respond to
997 * interrupts at this stage.
998 */
1002
1003 /* Free the worker array itself. */
1004 if (pcxt->worker != NULL)
1005 {
1006 pfree(pcxt->worker);
1007 pcxt->worker = NULL;
1008 }
1009
1010 /* Free memory. */
1011 pfree(pcxt->library_name);
1012 pfree(pcxt->function_name);
1013 pfree(pcxt);
1014}
1015
1016/*
1017 * Are there any parallel contexts currently active?
1018 */
1019bool
1021{
1022 return !dlist_is_empty(&pcxt_list);
1023}
1024
1025/*
1026 * Handle receipt of an interrupt indicating a parallel worker message.
1027 *
1028 * Note: this is called within a signal handler! All we can do is set
1029 * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
1030 * HandleParallelMessages().
1031 */
1032void
1034{
1035 InterruptPending = true;
1038}
1039
1040/*
1041 * Handle any queued protocol messages received from parallel workers.
1042 */
1043void
1045{
1046 dlist_iter iter;
1047 MemoryContext oldcontext;
1048
1049 static MemoryContext hpm_context = NULL;
1050
1051 /*
1052 * This is invoked from ProcessInterrupts(), and since some of the
1053 * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
1054 * for recursive calls if more signals are received while this runs. It's
1055 * unclear that recursive entry would be safe, and it doesn't seem useful
1056 * even if it is safe, so let's block interrupts until done.
1057 */
1059
1060 /*
1061 * Moreover, CurrentMemoryContext might be pointing almost anywhere. We
1062 * don't want to risk leaking data into long-lived contexts, so let's do
1063 * our work here in a private context that we can reset on each use.
1064 */
1065 if (hpm_context == NULL) /* first time through? */
1067 "HandleParallelMessages",
1069 else
1070 MemoryContextReset(hpm_context);
1071
1072 oldcontext = MemoryContextSwitchTo(hpm_context);
1073
1074 /* OK to process messages. Reset the flag saying there are more to do. */
1075 ParallelMessagePending = false;
1076
1077 dlist_foreach(iter, &pcxt_list)
1078 {
1079 ParallelContext *pcxt;
1080 int i;
1081
1082 pcxt = dlist_container(ParallelContext, node, iter.cur);
1083 if (pcxt->worker == NULL)
1084 continue;
1085
1086 for (i = 0; i < pcxt->nworkers_launched; ++i)
1087 {
1088 /*
1089 * Read as many messages as we can from each worker, but stop when
1090 * either (1) the worker's error queue goes away, which can happen
1091 * if we receive a Terminate message from the worker; or (2) no
1092 * more messages can be read from the worker without blocking.
1093 */
1094 while (pcxt->worker[i].error_mqh != NULL)
1095 {
1097 Size nbytes;
1098 void *data;
1099
1100 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
1101 &data, true);
1102 if (res == SHM_MQ_WOULD_BLOCK)
1103 break;
1104 else if (res == SHM_MQ_SUCCESS)
1105 {
1106 StringInfoData msg;
1107
1108 initStringInfo(&msg);
1109 appendBinaryStringInfo(&msg, data, nbytes);
1110 HandleParallelMessage(pcxt, i, &msg);
1111 pfree(msg.data);
1112 }
1113 else
1114 ereport(ERROR,
1115 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1116 errmsg("lost connection to parallel worker")));
1117 }
1118 }
1119 }
1120
1121 MemoryContextSwitchTo(oldcontext);
1122
1123 /* Might as well clear the context on our way out */
1124 MemoryContextReset(hpm_context);
1125
1127}
1128
1129/*
1130 * Handle a single protocol message received from a single parallel worker.
1131 */
1132static void
1134{
1135 char msgtype;
1136
1137 if (pcxt->known_attached_workers != NULL &&
1138 !pcxt->known_attached_workers[i])
1139 {
1140 pcxt->known_attached_workers[i] = true;
1142 }
1143
1144 msgtype = pq_getmsgbyte(msg);
1145
1146 switch (msgtype)
1147 {
1150 {
1151 ErrorData edata;
1152 ErrorContextCallback *save_error_context_stack;
1153
1154 /* Parse ErrorResponse or NoticeResponse. */
1155 pq_parse_errornotice(msg, &edata);
1156
1157 /* Death of a worker isn't enough justification for suicide. */
1158 edata.elevel = Min(edata.elevel, ERROR);
1159
1160 /*
1161 * If desired, add a context line to show that this is a
1162 * message propagated from a parallel worker. Otherwise, it
1163 * can sometimes be confusing to understand what actually
1164 * happened. (We don't do this in DEBUG_PARALLEL_REGRESS mode
1165 * because it causes test-result instability depending on
1166 * whether a parallel worker is actually used or not.)
1167 */
1169 {
1170 if (edata.context)
1171 edata.context = psprintf("%s\n%s", edata.context,
1172 _("parallel worker"));
1173 else
1174 edata.context = pstrdup(_("parallel worker"));
1175 }
1176
1177 /*
1178 * Context beyond that should use the error context callbacks
1179 * that were in effect when the ParallelContext was created,
1180 * not the current ones.
1181 */
1182 save_error_context_stack = error_context_stack;
1184
1185 /* Rethrow error or print notice. */
1186 ThrowErrorData(&edata);
1187
1188 /* Not an error, so restore previous context stack. */
1189 error_context_stack = save_error_context_stack;
1190
1191 break;
1192 }
1193
1195 {
1196 /* Propagate NotifyResponse. */
1197 int32 pid;
1198 const char *channel;
1199 const char *payload;
1200
1201 pid = pq_getmsgint(msg, 4);
1202 channel = pq_getmsgrawstring(msg);
1203 payload = pq_getmsgrawstring(msg);
1204 pq_endmessage(msg);
1205
1206 NotifyMyFrontEnd(channel, payload, pid);
1207
1208 break;
1209 }
1210
1211 case 'P': /* Parallel progress reporting */
1212 {
1213 /*
1214 * Only incremental progress reporting is currently supported.
1215 * However, it's possible to add more fields to the message to
1216 * allow for handling of other backend progress APIs.
1217 */
1218 int index = pq_getmsgint(msg, 4);
1219 int64 incr = pq_getmsgint64(msg);
1220
1221 pq_getmsgend(msg);
1222
1224
1225 break;
1226 }
1227
1228 case PqMsg_Terminate:
1229 {
1231 pcxt->worker[i].error_mqh = NULL;
1232 break;
1233 }
1234
1235 default:
1236 {
1237 elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
1238 msgtype, msg->len);
1239 }
1240 }
1241}
1242
1243/*
1244 * End-of-subtransaction cleanup for parallel contexts.
1245 *
1246 * Here we remove only parallel contexts initiated within the current
1247 * subtransaction.
1248 */
1249void
1251{
1252 while (!dlist_is_empty(&pcxt_list))
1253 {
1254 ParallelContext *pcxt;
1255
1257 if (pcxt->subid != mySubId)
1258 break;
1259 if (isCommit)
1260 elog(WARNING, "leaked parallel context");
1262 }
1263}
1264
1265/*
1266 * End-of-transaction cleanup for parallel contexts.
1267 *
1268 * We nuke all remaining parallel contexts.
1269 */
1270void
1271AtEOXact_Parallel(bool isCommit)
1272{
1273 while (!dlist_is_empty(&pcxt_list))
1274 {
1275 ParallelContext *pcxt;
1276
1278 if (isCommit)
1279 elog(WARNING, "leaked parallel context");
1281 }
1282}
1283
1284/*
1285 * Main entrypoint for parallel workers.
1286 */
1287void
1289{
1290 dsm_segment *seg;
1291 shm_toc *toc;
1292 FixedParallelState *fps;
1293 char *error_queue_space;
1294 shm_mq *mq;
1295 shm_mq_handle *mqh;
1296 char *libraryspace;
1297 char *entrypointstate;
1298 char *library_name;
1299 char *function_name;
1301 char *gucspace;
1302 char *combocidspace;
1303 char *tsnapspace;
1304 char *asnapspace;
1305 char *tstatespace;
1306 char *pendingsyncsspace;
1307 char *reindexspace;
1308 char *relmapperspace;
1309 char *uncommittedenumsspace;
1310 char *clientconninfospace;
1311 char *session_dsm_handle_space;
1312 Snapshot tsnapshot;
1313 Snapshot asnapshot;
1314
1315 /* Set flag to indicate that we're initializing a parallel worker. */
1317
1318 /* Establish signal handlers. */
1319 pqsignal(SIGTERM, die);
1321
1322 /* Determine and set our parallel worker number. */
1324 memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
1325
1326 /* Set up a memory context to work in, just for cleanliness. */
1328 "Parallel worker",
1330
1331 /*
1332 * Attach to the dynamic shared memory segment for the parallel query, and
1333 * find its table of contents.
1334 *
1335 * Note: at this point, we have not created any ResourceOwner in this
1336 * process. This will result in our DSM mapping surviving until process
1337 * exit, which is fine. If there were a ResourceOwner, it would acquire
1338 * ownership of the mapping, but we have no need for that.
1339 */
1340 seg = dsm_attach(DatumGetUInt32(main_arg));
1341 if (seg == NULL)
1342 ereport(ERROR,
1343 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1344 errmsg("could not map dynamic shared memory segment")));
1346 if (toc == NULL)
1347 ereport(ERROR,
1348 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1349 errmsg("invalid magic number in dynamic shared memory segment")));
1350
1351 /* Look up fixed parallel state. */
1352 fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false);
1354
1355 /* Arrange to signal the leader if we exit. */
1359
1360 /*
1361 * Now we can find and attach to the error queue provided for us. That's
1362 * good, because until we do that, any errors that happen here will not be
1363 * reported back to the process that requested that this worker be
1364 * launched.
1365 */
1366 error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false);
1367 mq = (shm_mq *) (error_queue_space +
1370 mqh = shm_mq_attach(mq, seg, NULL);
1371 pq_redirect_to_shm_mq(seg, mqh);
1374
1375 /*
1376 * Hooray! Primary initialization is complete. Now, we need to set up our
1377 * backend-local state to match the original backend.
1378 */
1379
1380 /*
1381 * Join locking group. We must do this before anything that could try to
1382 * acquire a heavyweight lock, because any heavyweight locks acquired to
1383 * this point could block either directly against the parallel group
1384 * leader or against some process which in turn waits for a lock that
1385 * conflicts with the parallel group leader, causing an undetected
1386 * deadlock. (If we can't join the lock group, the leader has gone away,
1387 * so just exit quietly.)
1388 */
1390 fps->parallel_leader_pid))
1391 return;
1392
1393 /*
1394 * Restore transaction and statement start-time timestamps. This must
1395 * happen before anything that would start a transaction, else asserts in
1396 * xact.c will fire.
1397 */
1399
1400 /*
1401 * Identify the entry point to be called. In theory this could result in
1402 * loading an additional library, though most likely the entry point is in
1403 * the core backend or in a library we just loaded.
1404 */
1405 entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT, false);
1406 library_name = entrypointstate;
1407 function_name = entrypointstate + strlen(library_name) + 1;
1408
1409 entrypt = LookupParallelWorkerFunction(library_name, function_name);
1410
1411 /*
1412 * Restore current session authorization and role id. No verification
1413 * happens here, we just blindly adopt the leader's state. Note that this
1414 * has to happen before InitPostgres, since InitializeSessionUserId will
1415 * not set these variables.
1416 */
1421
1422 /*
1423 * Restore database connection. We skip connection authorization checks,
1424 * reasoning that (a) the leader checked these things when it started, and
1425 * (b) we do not want parallel mode to cause these failures, because that
1426 * would make use of parallel query plans not transparent to applications.
1427 */
1432
1433 /*
1434 * Set the client encoding to the database encoding, since that is what
1435 * the leader will expect. (We're cheating a bit by not calling
1436 * PrepareClientEncoding first. It's okay because this call will always
1437 * result in installing a no-op conversion. No error should be possible,
1438 * but check anyway.)
1439 */
1441 elog(ERROR, "SetClientEncoding(%d) failed", GetDatabaseEncoding());
1442
1443 /*
1444 * Load libraries that were loaded by original backend. We want to do
1445 * this before restoring GUCs, because the libraries might define custom
1446 * variables.
1447 */
1448 libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false);
1450 RestoreLibraryState(libraryspace);
1452
1453 /* Crank up a transaction state appropriate to a parallel worker. */
1454 tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE, false);
1455 StartParallelWorkerTransaction(tstatespace);
1456
1457 /*
1458 * Restore state that affects catalog access. Ideally we'd do this even
1459 * before calling InitPostgres, but that has order-of-initialization
1460 * problems, and also the relmapper would get confused during the
1461 * CommitTransactionCommand call above.
1462 */
1463 pendingsyncsspace = shm_toc_lookup(toc, PARALLEL_KEY_PENDING_SYNCS,
1464 false);
1465 RestorePendingSyncs(pendingsyncsspace);
1466 relmapperspace = shm_toc_lookup(toc, PARALLEL_KEY_RELMAPPER_STATE, false);
1467 RestoreRelationMap(relmapperspace);
1468 reindexspace = shm_toc_lookup(toc, PARALLEL_KEY_REINDEX_STATE, false);
1469 RestoreReindexState(reindexspace);
1470 combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false);
1471 RestoreComboCIDState(combocidspace);
1472
1473 /* Attach to the per-session DSM segment and contained objects. */
1474 session_dsm_handle_space =
1476 AttachSession(*(dsm_handle *) session_dsm_handle_space);
1477
1478 /*
1479 * If the transaction isolation level is REPEATABLE READ or SERIALIZABLE,
1480 * the leader has serialized the transaction snapshot and we must restore
1481 * it. At lower isolation levels, there is no transaction-lifetime
1482 * snapshot, but we need TransactionXmin to get set to a value which is
1483 * less than or equal to the xmin of every snapshot that will be used by
1484 * this worker. The easiest way to accomplish that is to install the
1485 * active snapshot as the transaction snapshot. Code running in this
1486 * parallel worker might take new snapshots via GetTransactionSnapshot()
1487 * or GetLatestSnapshot(), but it shouldn't have any way of acquiring a
1488 * snapshot older than the active snapshot.
1489 */
1490 asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
1491 tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, true);
1492 asnapshot = RestoreSnapshot(asnapspace);
1493 tsnapshot = tsnapspace ? RestoreSnapshot(tsnapspace) : asnapshot;
1496 PushActiveSnapshot(asnapshot);
1497
1498 /*
1499 * We've changed which tuples we can see, and must therefore invalidate
1500 * system caches.
1501 */
1503
1504 /*
1505 * Restore GUC values from launching backend. We can't do this earlier,
1506 * because GUC check hooks that do catalog lookups need to see the same
1507 * database state as the leader. Also, the check hooks for
1508 * session_authorization and role assume we already set the correct role
1509 * OIDs.
1510 */
1511 gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false);
1512 RestoreGUCState(gucspace);
1513
1514 /*
1515 * Restore current user ID and security context. No verification happens
1516 * here, we just blindly adopt the leader's state. We can't do this till
1517 * after restoring GUCs, else we'll get complaints about restoring
1518 * session_authorization and role. (In effect, we're assuming that all
1519 * the restored values are okay to set, even if we are now inside a
1520 * restricted context.)
1521 */
1523
1524 /* Restore temp-namespace state to ensure search path matches leader's. */
1527
1528 /* Restore uncommitted enums. */
1529 uncommittedenumsspace = shm_toc_lookup(toc, PARALLEL_KEY_UNCOMMITTEDENUMS,
1530 false);
1531 RestoreUncommittedEnums(uncommittedenumsspace);
1532
1533 /* Restore the ClientConnectionInfo. */
1534 clientconninfospace = shm_toc_lookup(toc, PARALLEL_KEY_CLIENTCONNINFO,
1535 false);
1536 RestoreClientConnectionInfo(clientconninfospace);
1537
1538 /*
1539 * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
1540 * ensure that auth_method is actually valid, aka authn_id is not NULL.
1541 */
1545
1546 /* Attach to the leader's serializable transaction, if SERIALIZABLE. */
1548
1549 /*
1550 * We've initialized all of our state now; nothing should change
1551 * hereafter.
1552 */
1555
1556 /*
1557 * Time to do the real work: invoke the caller-supplied code.
1558 */
1559 entrypt(seg, toc);
1560
1561 /* Must exit parallel mode to pop active snapshot. */
1563
1564 /* Must pop active snapshot so snapmgr.c doesn't complain. */
1566
1567 /* Shut down the parallel-worker transaction. */
1569
1570 /* Detach from the per-session DSM segment. */
1571 DetachSession();
1572
1573 /* Report success. */
1575}
1576
1577/*
1578 * Update shared memory with the ending location of the last WAL record we
1579 * wrote, if it's greater than the value already stored there.
1580 */
1581void
1583{
1585
1586 Assert(fps != NULL);
1587 SpinLockAcquire(&fps->mutex);
1588 if (fps->last_xlog_end < last_xlog_end)
1589 fps->last_xlog_end = last_xlog_end;
1590 SpinLockRelease(&fps->mutex);
1591}
1592
1593/*
1594 * Make sure the leader tries to read from our error queue one more time.
1595 * This guards against the case where we exit uncleanly without sending an
1596 * ErrorResponse to the leader, for example because some code calls proc_exit
1597 * directly.
1598 *
1599 * Also explicitly detach from dsm segment so that subsystems using
1600 * on_dsm_detach() have a chance to send stats before the stats subsystem is
1601 * shut down as part of a before_shmem_exit() hook.
1602 *
1603 * One might think this could instead be solved by carefully ordering the
1604 * attaching to dsm segments, so that the pgstats segments get detached from
1605 * later than the parallel query one. That turns out to not work because the
1606 * stats hash might need to grow which can cause new segments to be allocated,
1607 * which then will be detached from earlier.
1608 */
1609static void
1611{
1615
1617}
1618
1619/*
1620 * Look up (and possibly load) a parallel worker entry point function.
1621 *
1622 * For functions contained in the core code, we use library name "postgres"
1623 * and consult the InternalParallelWorkers array. External functions are
1624 * looked up, and loaded if necessary, using load_external_function().
1625 *
1626 * The point of this is to pass function names as strings across process
1627 * boundaries. We can't pass actual function addresses because of the
1628 * possibility that the function has been loaded at a different address
1629 * in a different process. This is obviously a hazard for functions in
1630 * loadable libraries, but it can happen even for functions in the core code
1631 * on platforms using EXEC_BACKEND (e.g., Windows).
1632 *
1633 * At some point it might be worthwhile to get rid of InternalParallelWorkers[]
1634 * in favor of applying load_external_function() for core functions too;
1635 * but that raises portability issues that are not worth addressing now.
1636 */
1638LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
1639{
1640 /*
1641 * If the function is to be loaded from postgres itself, search the
1642 * InternalParallelWorkers array.
1643 */
1644 if (strcmp(libraryname, "postgres") == 0)
1645 {
1646 int i;
1647
1648 for (i = 0; i < lengthof(InternalParallelWorkers); i++)
1649 {
1650 if (strcmp(InternalParallelWorkers[i].fn_name, funcname) == 0)
1652 }
1653
1654 /* We can only reach this by programming error. */
1655 elog(ERROR, "internal function \"%s\" not found", funcname);
1656 }
1657
1658 /* Otherwise load from external library. */
1660 load_external_function(libraryname, funcname, true, NULL);
1661}
void NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
Definition: async.c:2224
static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
Definition: parallel.c:1638
#define PARALLEL_KEY_TRANSACTION_STATE
Definition: parallel.c:71
int ParallelWorkerNumber
Definition: parallel.c:114
void HandleParallelMessageInterrupt(void)
Definition: parallel.c:1033
struct FixedParallelState FixedParallelState
bool InitializingParallelWorker
Definition: parallel.c:120
#define PARALLEL_KEY_GUC
Definition: parallel.c:67
parallel_worker_main_type fn_addr
Definition: parallel.c:138
#define PARALLEL_KEY_UNCOMMITTEDENUMS
Definition: parallel.c:77
#define PARALLEL_KEY_TRANSACTION_SNAPSHOT
Definition: parallel.c:69
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:207
#define PARALLEL_KEY_CLIENTCONNINFO
Definition: parallel.c:78
static FixedParallelState * MyFixedParallelState
Definition: parallel.c:123
#define PARALLEL_KEY_PENDING_SYNCS
Definition: parallel.c:74
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:792
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:569
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:504
void HandleParallelMessages(void)
Definition: parallel.c:1044
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:946
#define PARALLEL_KEY_ACTIVE_SNAPSHOT
Definition: parallel.c:70
void ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
Definition: parallel.c:1582
#define PARALLEL_KEY_ERROR_QUEUE
Definition: parallel.c:65
#define PARALLEL_KEY_SESSION_DSM
Definition: parallel.c:73
static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
Definition: parallel.c:1133
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:169
#define PARALLEL_MAGIC
Definition: parallel.c:57
bool ParallelContextActive(void)
Definition: parallel.c:1020
void ParallelWorkerMain(Datum main_arg)
Definition: parallel.c:1288
static void WaitForParallelWorkersToExit(ParallelContext *pcxt)
Definition: parallel.c:906
static pid_t ParallelLeaderPid
Definition: parallel.c:129
#define PARALLEL_KEY_REINDEX_STATE
Definition: parallel.c:75
#define PARALLEL_KEY_LIBRARY
Definition: parallel.c:66
static void ParallelWorkerShutdown(int code, Datum arg)
Definition: parallel.c:1610
static dlist_head pcxt_list
Definition: parallel.c:126
const char * fn_name
Definition: parallel.c:137
#define PARALLEL_KEY_FIXED
Definition: parallel.c:64
#define PARALLEL_KEY_ENTRYPOINT
Definition: parallel.c:72
volatile sig_atomic_t ParallelMessagePending
Definition: parallel.c:117
void ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
Definition: parallel.c:554
#define PARALLEL_KEY_COMBO_CID
Definition: parallel.c:68
static const struct @16 InternalParallelWorkers[]
void WaitForParallelWorkersToAttach(ParallelContext *pcxt)
Definition: parallel.c:689
#define PARALLEL_ERROR_QUEUE_SIZE
Definition: parallel.c:54
void AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
Definition: parallel.c:1250
void AtEOXact_Parallel(bool isCommit)
Definition: parallel.c:1271
#define PARALLEL_KEY_RELMAPPER_STATE
Definition: parallel.c:76
void pgstat_progress_incr_param(int index, int64 incr)
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1296
BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1257
void BackgroundWorkerUnblockSignals(void)
Definition: bgworker.c:926
void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags)
Definition: bgworker.c:886
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1157
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:1045
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
#define BGWORKER_BYPASS_ROLELOGINCHECK
Definition: bgworker.h:157
#define BGWORKER_CLASS_PARALLEL
Definition: bgworker.h:68
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_POSTMASTER_DIED
Definition: bgworker.h:108
@ BGWH_STARTED
Definition: bgworker.h:105
@ BGWH_STOPPED
Definition: bgworker.h:107
@ BgWorkerStart_ConsistentState
Definition: bgworker.h:80
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:60
#define BGWORKER_BYPASS_ALLOWCONN
Definition: bgworker.h:156
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
void _brin_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: brin.c:2855
#define Min(x, y)
Definition: c.h:958
uint32 SubTransactionId
Definition: c.h:610
#define BUFFERALIGN(LEN)
Definition: c.h:767
#define Assert(condition)
Definition: c.h:812
int64_t int64
Definition: c.h:482
int32_t int32
Definition: c.h:481
#define lengthof(array)
Definition: c.h:742
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:892
size_t Size
Definition: c.h:559
void RestoreComboCIDState(char *comboCIDstate)
Definition: combocid.c:342
void SerializeComboCIDState(Size maxsize, char *start_address)
Definition: combocid.c:316
Size EstimateComboCIDStateSpace(void)
Definition: combocid.c:297
int64 TimestampTz
Definition: timestamp.h:39
void RestoreLibraryState(char *start_address)
Definition: dfmgr.c:670
void SerializeLibraryState(Size maxsize, char *start_address)
Definition: dfmgr.c:648
Size EstimateLibraryStateSpace(void)
Definition: dfmgr.c:631
void * load_external_function(const char *filename, const char *funcname, bool signalNotFound, void **filehandle)
Definition: dfmgr.c:95
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1123
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:803
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1095
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:516
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:665
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
uint32 dsm_handle
Definition: dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
ErrorContextCallback * error_context_stack
Definition: elog.c:94
int errhint(const char *fmt,...)
Definition: elog.c:1317
void ThrowErrorData(ErrorData *edata)
Definition: elog.c:1895
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define _(x)
Definition: elog.c:90
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
void ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
ProcNumber ParallelLeaderProcNumber
Definition: globals.c:91
volatile sig_atomic_t InterruptPending
Definition: globals.c:31
int MyProcPid
Definition: globals.c:46
ProcNumber MyProcNumber
Definition: globals.c:89
struct Latch * MyLatch
Definition: globals.c:62
Oid MyDatabaseId
Definition: globals.c:93
void RestoreGUCState(void *gucstate)
Definition: guc.c:6193
void SerializeGUCState(Size maxsize, char *start_address)
Definition: guc.c:6101
Size EstimateGUCStateSpace(void)
Definition: guc.c:5948
bool current_role_is_superuser
Definition: guc_tables.c:519
return str start
const char * hba_authname(UserAuth auth_method)
Definition: hba.c:3065
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:281
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void(* parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc)
Definition: parallel.h:23
#define funcname
Definition: indent_codes.h:69
void SerializeReindexState(Size maxsize, char *start_address)
Definition: index.c:4223
void RestoreReindexState(const void *reindexstate)
Definition: index.c:4241
Size EstimateReindexStateSpace(void)
Definition: index.c:4212
void InvalidateSystemCaches(void)
Definition: inval.c:849
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:337
int i
Definition: isn.c:72
void SetLatch(Latch *latch)
Definition: latch.c:632
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
#define pq_putmessage(msgtype, s, len)
Definition: libpq.h:49
int GetDatabaseEncoding(void)
Definition: mbutils.c:1261
int SetClientEncoding(int encoding)
Definition: mbutils.c:208
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:383
MemoryContext TopTransactionContext
Definition: mcxt.c:154
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext TopMemoryContext
Definition: mcxt.c:149
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define INTERRUPTS_CAN_BE_PROCESSED()
Definition: miscadmin.h:129
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
void SerializeClientConnectionInfo(Size maxsize, char *start_address)
Definition: miscinit.c:1099
void InitializeSystemUser(const char *authn_id, const char *auth_method)
Definition: miscinit.c:922
void GetUserIdAndSecContext(Oid *userid, int *sec_context)
Definition: miscinit.c:660
void SetSessionAuthorization(Oid userid, bool is_superuser)
Definition: miscinit.c:968
bool GetSessionUserIsSuperuser(void)
Definition: miscinit.c:563
Size EstimateClientConnectionInfoSpace(void)
Definition: miscinit.c:1083
Oid GetSessionUserId(void)
Definition: miscinit.c:556
void SetCurrentRoleId(Oid roleid, bool is_superuser)
Definition: miscinit.c:1004
Oid GetAuthenticatedUserId(void)
Definition: miscinit.c:593
ClientConnectionInfo MyClientConnectionInfo
Definition: miscinit.c:1066
void RestoreClientConnectionInfo(char *conninfo)
Definition: miscinit.c:1131
void SetAuthenticatedUserId(Oid userid)
Definition: miscinit.c:600
Oid GetCurrentRoleId(void)
Definition: miscinit.c:983
void SetUserIdAndSecContext(Oid userid, int sec_context)
Definition: miscinit.c:667
void GetTempNamespaceState(Oid *tempNamespaceId, Oid *tempToastNamespaceId)
Definition: namespace.c:3805
void SetTempNamespaceState(Oid tempNamespaceId, Oid tempToastNamespaceId)
Definition: namespace.c:3821
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1743
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:108
void * arg
const void * data
void RestoreUncommittedEnums(void *space)
Definition: pg_enum.c:873
Size EstimateUncommittedEnumsSpace(void)
Definition: pg_enum.c:813
void SerializeUncommittedEnums(void *space, Size size)
Definition: pg_enum.c:827
#define die(msg)
int debug_parallel_query
Definition: planner.c:67
#define sprintf
Definition: port.h:240
pqsigfunc pqsignal(int signo, pqsigfunc func)
#define snprintf
Definition: port.h:238
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:232
unsigned int Oid
Definition: postgres_ext.h:31
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:192
unsigned int pq_getmsgint(StringInfo msg, int b)
Definition: pqformat.c:415
void pq_getmsgend(StringInfo msg)
Definition: pqformat.c:635
void pq_endmessage(StringInfo buf)
Definition: pqformat.c:296
int pq_getmsgbyte(StringInfo msg)
Definition: pqformat.c:399
const char * pq_getmsgrawstring(StringInfo msg)
Definition: pqformat.c:608
int64 pq_getmsgint64(StringInfo msg)
Definition: pqformat.c:453
void pq_set_parallel_leader(pid_t pid, ProcNumber procNumber)
Definition: pqmq.c:78
void pq_parse_errornotice(StringInfo msg, ErrorData *edata)
Definition: pqmq.c:216
void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
Definition: pqmq.c:53
void AttachSerializableXact(SerializableXactHandle handle)
Definition: predicate.c:5045
SerializableXactHandle ShareSerializableXact(void)
Definition: predicate.c:5036
void * SerializableXactHandle
Definition: predicate.h:34
int ProcNumber
Definition: procnumber.h:24
int SendProcSignal(pid_t pid, ProcSignalReason reason, ProcNumber procNumber)
Definition: procsignal.c:281
@ PROCSIG_PARALLEL_MESSAGE
Definition: procsignal.h:34
#define PqMsg_NotificationResponse
Definition: protocol.h:41
#define PqMsg_ErrorResponse
Definition: protocol.h:44
#define PqMsg_NoticeResponse
Definition: protocol.h:49
#define PqMsg_Terminate
Definition: protocol.h:28
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
MemoryContextSwitchTo(old_ctx)
Size EstimateRelationMapSpace(void)
Definition: relmapper.c:713
void SerializeRelationMap(Size maxSize, char *startAddress)
Definition: relmapper.c:724
void RestoreRelationMap(char *startAddress)
Definition: relmapper.c:741
void DetachSession(void)
Definition: session.c:201
void AttachSession(dsm_handle handle)
Definition: session.c:155
dsm_handle GetSessionDsmHandle(void)
Definition: session.c:70
shm_mq * shm_mq_get_queue(shm_mq_handle *mqh)
Definition: shm_mq.c:905
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:224
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:177
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:319
PGPROC * shm_mq_get_sender(shm_mq *mq)
Definition: shm_mq.c:257
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:843
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:206
shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
Definition: shm_mq.c:572
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:290
shm_mq_result
Definition: shm_mq.h:37
@ SHM_MQ_SUCCESS
Definition: shm_mq.h:38
@ SHM_MQ_WOULD_BLOCK
Definition: shm_mq.h:39
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:64
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:1664
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:212
void PushActiveSnapshot(Snapshot snapshot)
Definition: snapmgr.c:610
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:1721
void RestoreTransactionSnapshot(Snapshot snapshot, void *source_pgproc)
Definition: snapmgr.c:1784
void PopActiveSnapshot(void)
Definition: snapmgr.c:703
Size EstimateSnapshotSpace(Snapshot snapshot)
Definition: snapmgr.c:1640
Snapshot GetActiveSnapshot(void)
Definition: snapmgr.c:728
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:66
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1953
void BecomeLockGroupLeader(void)
Definition: proc.c:1923
void SerializePendingSyncs(Size maxSize, char *startAddress)
Definition: storage.c:584
Size EstimatePendingSyncsSpace(void)
Definition: storage.c:571
void RestorePendingSyncs(char *startAddress)
Definition: storage.c:635
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:56
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:91
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:99
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[MAXPGPATH]
Definition: bgworker.h:96
const char * authn_id
Definition: libpq-be.h:103
UserAuth auth_method
Definition: libpq-be.h:109
char * context
Definition: elog.h:444
int elevel
Definition: elog.h:429
Oid temp_toast_namespace_id
Definition: parallel.c:90
XLogRecPtr last_xlog_end
Definition: parallel.c:105
bool role_is_superuser
Definition: parallel.c:93
TimestampTz stmt_ts
Definition: parallel.c:98
SerializableXactHandle serializable_xact_handle
Definition: parallel.c:99
TimestampTz xact_ts
Definition: parallel.c:97
PGPROC * parallel_leader_pgproc
Definition: parallel.c:94
bool session_user_is_superuser
Definition: parallel.c:92
pid_t parallel_leader_pid
Definition: parallel.c:95
Oid authenticated_user_id
Definition: parallel.c:85
ProcNumber parallel_leader_proc_number
Definition: parallel.c:96
Definition: proc.h:162
char * library_name
Definition: parallel.h:38
dsm_segment * seg
Definition: parallel.h:42
bool * known_attached_workers
Definition: parallel.h:47
ErrorContextCallback * error_context_stack
Definition: parallel.h:40
SubTransactionId subid
Definition: parallel.h:34
shm_toc_estimator estimator
Definition: parallel.h:41
int nknown_attached_workers
Definition: parallel.h:46
ParallelWorkerInfo * worker
Definition: parallel.h:45
shm_toc * toc
Definition: parallel.h:44
dlist_node node
Definition: parallel.h:33
void * private_memory
Definition: parallel.h:43
int nworkers_launched
Definition: parallel.h:37
int nworkers_to_launch
Definition: parallel.h:36
char * function_name
Definition: parallel.h:39
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
shm_mq_handle * error_mqh
Definition: parallel.h:28
dlist_node * cur
Definition: ilist.h:179
Definition: type.h:96
Definition: shm_mq.c:72
void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
void SerializeTransactionState(Size maxsize, char *start_address)
Definition: xact.c:5528
void ExitParallelMode(void)
Definition: xact.c:1063
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:790
void EnterParallelMode(void)
Definition: xact.c:1050
Size EstimateTransactionStateSpace(void)
Definition: xact.c:5500
void StartTransactionCommand(void)
Definition: xact.c:3051
void StartParallelWorkerTransaction(char *tstatespace)
Definition: xact.c:5599
void SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts)
Definition: xact.c:858
bool IsInParallelMode(void)
Definition: xact.c:1088
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:878
TimestampTz GetCurrentTransactionStartTimestamp(void)
Definition: xact.c:869
void EndParallelWorkerTransaction(void)
Definition: xact.c:5624
void CommitTransactionCommand(void)
Definition: xact.c:3149
#define IsolationUsesXactSnapshot()
Definition: xact.h:51
XLogRecPtr XactLastRecEnd
Definition: xlog.c:254
uint64 XLogRecPtr
Definition: xlogdefs.h:21