PostgreSQL Source Code  git master
parallel.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  * Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 
15 #include "postgres.h"
16 
17 #include "access/heapam.h"
18 #include "access/nbtree.h"
19 #include "access/parallel.h"
20 #include "access/session.h"
21 #include "access/xact.h"
22 #include "access/xlog.h"
23 #include "catalog/index.h"
24 #include "catalog/namespace.h"
25 #include "catalog/pg_enum.h"
26 #include "catalog/storage.h"
27 #include "commands/async.h"
28 #include "executor/execParallel.h"
29 #include "libpq/libpq.h"
30 #include "libpq/pqformat.h"
31 #include "libpq/pqmq.h"
32 #include "miscadmin.h"
33 #include "optimizer/optimizer.h"
34 #include "pgstat.h"
35 #include "storage/ipc.h"
36 #include "storage/predicate.h"
37 #include "storage/sinval.h"
38 #include "storage/spin.h"
39 #include "tcop/tcopprot.h"
40 #include "utils/combocid.h"
41 #include "utils/guc.h"
42 #include "utils/inval.h"
43 #include "utils/memutils.h"
44 #include "utils/relmapper.h"
45 #include "utils/snapmgr.h"
46 #include "utils/typcache.h"
47 
48 /*
49  * We don't want to waste a lot of memory on an error queue which, most of
50  * the time, will process only a handful of small messages. However, it is
51  * desirable to make it large enough that a typical ErrorResponse can be sent
52  * without blocking. That way, a worker that errors out can write the whole
53  * message into the queue and terminate without waiting for the user backend.
54  */
55 #define PARALLEL_ERROR_QUEUE_SIZE 16384
56 
57 /* Magic number for parallel context TOC. */
58 #define PARALLEL_MAGIC 0x50477c7c
59 
60 /*
61  * Magic numbers for per-context parallel state sharing. Higher-level code
62  * should use smaller values, leaving these very large ones for use by this
63  * module.
64  */
65 #define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001)
66 #define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002)
67 #define PARALLEL_KEY_LIBRARY UINT64CONST(0xFFFFFFFFFFFF0003)
68 #define PARALLEL_KEY_GUC UINT64CONST(0xFFFFFFFFFFFF0004)
69 #define PARALLEL_KEY_COMBO_CID UINT64CONST(0xFFFFFFFFFFFF0005)
70 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0006)
71 #define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007)
72 #define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008)
73 #define PARALLEL_KEY_ENTRYPOINT UINT64CONST(0xFFFFFFFFFFFF0009)
74 #define PARALLEL_KEY_SESSION_DSM UINT64CONST(0xFFFFFFFFFFFF000A)
75 #define PARALLEL_KEY_PENDING_SYNCS UINT64CONST(0xFFFFFFFFFFFF000B)
76 #define PARALLEL_KEY_REINDEX_STATE UINT64CONST(0xFFFFFFFFFFFF000C)
77 #define PARALLEL_KEY_RELMAPPER_STATE UINT64CONST(0xFFFFFFFFFFFF000D)
78 #define PARALLEL_KEY_UNCOMMITTEDENUMS UINT64CONST(0xFFFFFFFFFFFF000E)
79 
80 /* Fixed-size parallel state. */
81 typedef struct FixedParallelState
82 {
83  /* Fixed-size state that workers must restore. */
98 
99  /* Mutex protects remaining fields. */
101 
102  /* Maximum XactLastRecEnd of any worker. */
105 
106 /*
107  * Our parallel worker number. We initialize this to -1, meaning that we are
108  * not a parallel worker. In parallel workers, it will be set to a value >= 0
109  * and < the number of workers before any user code is invoked; each parallel
110  * worker will get a different parallel worker number.
111  */
113 
114 /* Is there a parallel message pending which we need to receive? */
115 volatile bool ParallelMessagePending = false;
116 
117 /* Are we initializing a parallel worker? */
119 
120 /* Pointer to our fixed parallel state. */
122 
123 /* List of active parallel contexts. */
125 
126 /* Backend-local copy of data from FixedParallelState. */
127 static pid_t ParallelLeaderPid;
128 
129 /*
130  * List of internal parallel worker entry points. We need this for
131  * reasons explained in LookupParallelWorkerFunction(), below.
132  */
133 static const struct
134 {
135  const char *fn_name;
138 
139 {
140  {
141  "ParallelQueryMain", ParallelQueryMain
142  },
143  {
144  "_bt_parallel_build_main", _bt_parallel_build_main
145  },
146  {
147  "parallel_vacuum_main", parallel_vacuum_main
148  }
149 };
150 
151 /* Private functions. */
152 static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
154 static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname);
155 static void ParallelWorkerShutdown(int code, Datum arg);
156 
157 
158 /*
159  * Establish a new parallel context. This should be done after entering
160  * parallel mode, and (unless there is an error) the context should be
161  * destroyed before exiting the current subtransaction.
162  */
164 CreateParallelContext(const char *library_name, const char *function_name,
165  int nworkers)
166 {
167  MemoryContext oldcontext;
168  ParallelContext *pcxt;
169 
170  /* It is unsafe to create a parallel context if not in parallel mode. */
172 
173  /* Number of workers should be non-negative. */
174  Assert(nworkers >= 0);
175 
176  /* We might be running in a short-lived memory context. */
178 
179  /* Initialize a new ParallelContext. */
180  pcxt = palloc0(sizeof(ParallelContext));
182  pcxt->nworkers = nworkers;
183  pcxt->nworkers_to_launch = nworkers;
184  pcxt->library_name = pstrdup(library_name);
185  pcxt->function_name = pstrdup(function_name);
188  dlist_push_head(&pcxt_list, &pcxt->node);
189 
190  /* Restore previous memory context. */
191  MemoryContextSwitchTo(oldcontext);
192 
193  return pcxt;
194 }
195 
196 /*
197  * Establish the dynamic shared memory segment for a parallel context and
198  * copy state and other bookkeeping information that will be needed by
199  * parallel workers into it.
200  */
201 void
203 {
204  MemoryContext oldcontext;
205  Size library_len = 0;
206  Size guc_len = 0;
207  Size combocidlen = 0;
208  Size tsnaplen = 0;
209  Size asnaplen = 0;
210  Size tstatelen = 0;
211  Size pendingsyncslen = 0;
212  Size reindexlen = 0;
213  Size relmapperlen = 0;
214  Size uncommittedenumslen = 0;
215  Size segsize = 0;
216  int i;
217  FixedParallelState *fps;
218  dsm_handle session_dsm_handle = DSM_HANDLE_INVALID;
219  Snapshot transaction_snapshot = GetTransactionSnapshot();
220  Snapshot active_snapshot = GetActiveSnapshot();
221 
222  /* We might be running in a very short-lived memory context. */
224 
225  /* Allow space to store the fixed-size parallel state. */
227  shm_toc_estimate_keys(&pcxt->estimator, 1);
228 
229  /*
230  * Normally, the user will have requested at least one worker process, but
231  * if by chance they have not, we can skip a bunch of things here.
232  */
233  if (pcxt->nworkers > 0)
234  {
235  /* Get (or create) the per-session DSM segment's handle. */
236  session_dsm_handle = GetSessionDsmHandle();
237 
238  /*
239  * If we weren't able to create a per-session DSM segment, then we can
240  * continue but we can't safely launch any workers because their
241  * record typmods would be incompatible so they couldn't exchange
242  * tuples.
243  */
244  if (session_dsm_handle == DSM_HANDLE_INVALID)
245  pcxt->nworkers = 0;
246  }
247 
248  if (pcxt->nworkers > 0)
249  {
250  /* Estimate space for various kinds of state sharing. */
251  library_len = EstimateLibraryStateSpace();
252  shm_toc_estimate_chunk(&pcxt->estimator, library_len);
253  guc_len = EstimateGUCStateSpace();
254  shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
255  combocidlen = EstimateComboCIDStateSpace();
256  shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
258  {
259  tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
260  shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
261  }
262  asnaplen = EstimateSnapshotSpace(active_snapshot);
263  shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
264  tstatelen = EstimateTransactionStateSpace();
265  shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
267  pendingsyncslen = EstimatePendingSyncsSpace();
268  shm_toc_estimate_chunk(&pcxt->estimator, pendingsyncslen);
269  reindexlen = EstimateReindexStateSpace();
270  shm_toc_estimate_chunk(&pcxt->estimator, reindexlen);
271  relmapperlen = EstimateRelationMapSpace();
272  shm_toc_estimate_chunk(&pcxt->estimator, relmapperlen);
273  uncommittedenumslen = EstimateUncommittedEnumsSpace();
274  shm_toc_estimate_chunk(&pcxt->estimator, uncommittedenumslen);
275  /* If you add more chunks here, you probably need to add keys. */
276  shm_toc_estimate_keys(&pcxt->estimator, 11);
277 
278  /* Estimate space need for error queues. */
281  "parallel error queue size not buffer-aligned");
284  pcxt->nworkers));
285  shm_toc_estimate_keys(&pcxt->estimator, 1);
286 
287  /* Estimate how much we'll need for the entrypoint info. */
288  shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name) +
289  strlen(pcxt->function_name) + 2);
290  shm_toc_estimate_keys(&pcxt->estimator, 1);
291  }
292 
293  /*
294  * Create DSM and initialize with new table of contents. But if the user
295  * didn't request any workers, then don't bother creating a dynamic shared
296  * memory segment; instead, just use backend-private memory.
297  *
298  * Also, if we can't create a dynamic shared memory segment because the
299  * maximum number of segments have already been created, then fall back to
300  * backend-private memory, and plan not to use any workers. We hope this
301  * won't happen very often, but it's better to abandon the use of
302  * parallelism than to fail outright.
303  */
304  segsize = shm_toc_estimate(&pcxt->estimator);
305  if (pcxt->nworkers > 0)
307  if (pcxt->seg != NULL)
309  dsm_segment_address(pcxt->seg),
310  segsize);
311  else
312  {
313  pcxt->nworkers = 0;
316  segsize);
317  }
318 
319  /* Initialize fixed-size state in shared memory. */
320  fps = (FixedParallelState *)
321  shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
322  fps->database_id = MyDatabaseId;
335  SpinLockInit(&fps->mutex);
336  fps->last_xlog_end = 0;
338 
339  /* We can skip the rest of this if we're not budgeting for any workers. */
340  if (pcxt->nworkers > 0)
341  {
342  char *libraryspace;
343  char *gucspace;
344  char *combocidspace;
345  char *tsnapspace;
346  char *asnapspace;
347  char *tstatespace;
348  char *pendingsyncsspace;
349  char *reindexspace;
350  char *relmapperspace;
351  char *error_queue_space;
352  char *session_dsm_handle_space;
353  char *entrypointstate;
354  char *uncommittedenumsspace;
355  Size lnamelen;
356 
357  /* Serialize shared libraries we have loaded. */
358  libraryspace = shm_toc_allocate(pcxt->toc, library_len);
359  SerializeLibraryState(library_len, libraryspace);
360  shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
361 
362  /* Serialize GUC settings. */
363  gucspace = shm_toc_allocate(pcxt->toc, guc_len);
364  SerializeGUCState(guc_len, gucspace);
365  shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
366 
367  /* Serialize combo CID state. */
368  combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
369  SerializeComboCIDState(combocidlen, combocidspace);
370  shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
371 
372  /*
373  * Serialize the transaction snapshot if the transaction
374  * isolation-level uses a transaction snapshot.
375  */
377  {
378  tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
379  SerializeSnapshot(transaction_snapshot, tsnapspace);
381  tsnapspace);
382  }
383 
384  /* Serialize the active snapshot. */
385  asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
386  SerializeSnapshot(active_snapshot, asnapspace);
387  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
388 
389  /* Provide the handle for per-session segment. */
390  session_dsm_handle_space = shm_toc_allocate(pcxt->toc,
391  sizeof(dsm_handle));
392  *(dsm_handle *) session_dsm_handle_space = session_dsm_handle;
394  session_dsm_handle_space);
395 
396  /* Serialize transaction state. */
397  tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
398  SerializeTransactionState(tstatelen, tstatespace);
400 
401  /* Serialize pending syncs. */
402  pendingsyncsspace = shm_toc_allocate(pcxt->toc, pendingsyncslen);
403  SerializePendingSyncs(pendingsyncslen, pendingsyncsspace);
405  pendingsyncsspace);
406 
407  /* Serialize reindex state. */
408  reindexspace = shm_toc_allocate(pcxt->toc, reindexlen);
409  SerializeReindexState(reindexlen, reindexspace);
410  shm_toc_insert(pcxt->toc, PARALLEL_KEY_REINDEX_STATE, reindexspace);
411 
412  /* Serialize relmapper state. */
413  relmapperspace = shm_toc_allocate(pcxt->toc, relmapperlen);
414  SerializeRelationMap(relmapperlen, relmapperspace);
416  relmapperspace);
417 
418  /* Serialize uncommitted enum state. */
419  uncommittedenumsspace = shm_toc_allocate(pcxt->toc,
420  uncommittedenumslen);
421  SerializeUncommittedEnums(uncommittedenumsspace, uncommittedenumslen);
423  uncommittedenumsspace);
424 
425  /* Allocate space for worker information. */
426  pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
427 
428  /*
429  * Establish error queues in dynamic shared memory.
430  *
431  * These queues should be used only for transmitting ErrorResponse,
432  * NoticeResponse, and NotifyResponse protocol messages. Tuple data
433  * should be transmitted via separate (possibly larger?) queues.
434  */
435  error_queue_space =
436  shm_toc_allocate(pcxt->toc,
438  pcxt->nworkers));
439  for (i = 0; i < pcxt->nworkers; ++i)
440  {
441  char *start;
442  shm_mq *mq;
443 
444  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
445  mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
447  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
448  }
449  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
450 
451  /*
452  * Serialize entrypoint information. It's unsafe to pass function
453  * pointers across processes, as the function pointer may be different
454  * in each process in EXEC_BACKEND builds, so we always pass library
455  * and function name. (We use library name "postgres" for functions
456  * in the core backend.)
457  */
458  lnamelen = strlen(pcxt->library_name);
459  entrypointstate = shm_toc_allocate(pcxt->toc, lnamelen +
460  strlen(pcxt->function_name) + 2);
461  strcpy(entrypointstate, pcxt->library_name);
462  strcpy(entrypointstate + lnamelen + 1, pcxt->function_name);
463  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
464  }
465 
466  /* Restore previous memory context. */
467  MemoryContextSwitchTo(oldcontext);
468 }
469 
470 /*
471  * Reinitialize the dynamic shared memory segment for a parallel context such
472  * that we could launch workers for it again.
473  */
474 void
476 {
477  FixedParallelState *fps;
478 
479  /* Wait for any old workers to exit. */
480  if (pcxt->nworkers_launched > 0)
481  {
484  pcxt->nworkers_launched = 0;
485  if (pcxt->known_attached_workers)
486  {
488  pcxt->known_attached_workers = NULL;
489  pcxt->nknown_attached_workers = 0;
490  }
491  }
492 
493  /* Reset a few bits of fixed parallel state to a clean state. */
494  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
495  fps->last_xlog_end = 0;
496 
497  /* Recreate error queues (if they exist). */
498  if (pcxt->nworkers > 0)
499  {
500  char *error_queue_space;
501  int i;
502 
503  error_queue_space =
505  for (i = 0; i < pcxt->nworkers; ++i)
506  {
507  char *start;
508  shm_mq *mq;
509 
510  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
511  mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
513  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
514  }
515  }
516 }
517 
518 /*
519  * Reinitialize parallel workers for a parallel context such that we could
520  * launch a different number of workers. This is required for cases where
521  * we need to reuse the same DSM segment, but the number of workers can
522  * vary from run-to-run.
523  */
524 void
525 ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
526 {
527  /*
528  * The number of workers that need to be launched must be less than the
529  * number of workers with which the parallel context is initialized.
530  */
531  Assert(pcxt->nworkers >= nworkers_to_launch);
532  pcxt->nworkers_to_launch = nworkers_to_launch;
533 }
534 
535 /*
536  * Launch parallel workers.
537  */
538 void
540 {
541  MemoryContext oldcontext;
542  BackgroundWorker worker;
543  int i;
544  bool any_registrations_failed = false;
545 
546  /* Skip this if we have no workers. */
547  if (pcxt->nworkers == 0 || pcxt->nworkers_to_launch == 0)
548  return;
549 
550  /* We need to be a lock group leader. */
552 
553  /* If we do have workers, we'd better have a DSM segment. */
554  Assert(pcxt->seg != NULL);
555 
556  /* We might be running in a short-lived memory context. */
558 
559  /* Configure a worker. */
560  memset(&worker, 0, sizeof(worker));
561  snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
562  MyProcPid);
563  snprintf(worker.bgw_type, BGW_MAXLEN, "parallel worker");
564  worker.bgw_flags =
569  sprintf(worker.bgw_library_name, "postgres");
570  sprintf(worker.bgw_function_name, "ParallelWorkerMain");
572  worker.bgw_notify_pid = MyProcPid;
573 
574  /*
575  * Start workers.
576  *
577  * The caller must be able to tolerate ending up with fewer workers than
578  * expected, so there is no need to throw an error here if registration
579  * fails. It wouldn't help much anyway, because registering the worker in
580  * no way guarantees that it will start up and initialize successfully.
581  */
582  for (i = 0; i < pcxt->nworkers_to_launch; ++i)
583  {
584  memcpy(worker.bgw_extra, &i, sizeof(int));
585  if (!any_registrations_failed &&
587  &pcxt->worker[i].bgwhandle))
588  {
590  pcxt->worker[i].bgwhandle);
591  pcxt->nworkers_launched++;
592  }
593  else
594  {
595  /*
596  * If we weren't able to register the worker, then we've bumped up
597  * against the max_worker_processes limit, and future
598  * registrations will probably fail too, so arrange to skip them.
599  * But we still have to execute this code for the remaining slots
600  * to make sure that we forget about the error queues we budgeted
601  * for those workers. Otherwise, we'll wait for them to start,
602  * but they never will.
603  */
604  any_registrations_failed = true;
605  pcxt->worker[i].bgwhandle = NULL;
606  shm_mq_detach(pcxt->worker[i].error_mqh);
607  pcxt->worker[i].error_mqh = NULL;
608  }
609  }
610 
611  /*
612  * Now that nworkers_launched has taken its final value, we can initialize
613  * known_attached_workers.
614  */
615  if (pcxt->nworkers_launched > 0)
616  {
617  pcxt->known_attached_workers =
618  palloc0(sizeof(bool) * pcxt->nworkers_launched);
619  pcxt->nknown_attached_workers = 0;
620  }
621 
622  /* Restore previous memory context. */
623  MemoryContextSwitchTo(oldcontext);
624 }
625 
626 /*
627  * Wait for all workers to attach to their error queues, and throw an error if
628  * any worker fails to do this.
629  *
630  * Callers can assume that if this function returns successfully, then the
631  * number of workers given by pcxt->nworkers_launched have initialized and
632  * attached to their error queues. Whether or not these workers are guaranteed
633  * to still be running depends on what code the caller asked them to run;
634  * this function does not guarantee that they have not exited. However, it
635  * does guarantee that any workers which exited must have done so cleanly and
636  * after successfully performing the work with which they were tasked.
637  *
638  * If this function is not called, then some of the workers that were launched
639  * may not have been started due to a fork() failure, or may have exited during
640  * early startup prior to attaching to the error queue, so nworkers_launched
641  * cannot be viewed as completely reliable. It will never be less than the
642  * number of workers which actually started, but it might be more. Any workers
643  * that failed to start will still be discovered by
644  * WaitForParallelWorkersToFinish and an error will be thrown at that time,
645  * provided that function is eventually reached.
646  *
647  * In general, the leader process should do as much work as possible before
648  * calling this function. fork() failures and other early-startup failures
649  * are very uncommon, and having the leader sit idle when it could be doing
650  * useful work is undesirable. However, if the leader needs to wait for
651  * all of its workers or for a specific worker, it may want to call this
652  * function before doing so. If not, it must make some other provision for
653  * the failure-to-start case, lest it wait forever. On the other hand, a
654  * leader which never waits for a worker that might not be started yet, or
655  * at least never does so prior to WaitForParallelWorkersToFinish(), need not
656  * call this function at all.
657  */
658 void
660 {
661  int i;
662 
663  /* Skip this if we have no launched workers. */
664  if (pcxt->nworkers_launched == 0)
665  return;
666 
667  for (;;)
668  {
669  /*
670  * This will process any parallel messages that are pending and it may
671  * also throw an error propagated from a worker.
672  */
674 
675  for (i = 0; i < pcxt->nworkers_launched; ++i)
676  {
678  shm_mq *mq;
679  int rc;
680  pid_t pid;
681 
682  if (pcxt->known_attached_workers[i])
683  continue;
684 
685  /*
686  * If error_mqh is NULL, then the worker has already exited
687  * cleanly.
688  */
689  if (pcxt->worker[i].error_mqh == NULL)
690  {
691  pcxt->known_attached_workers[i] = true;
692  ++pcxt->nknown_attached_workers;
693  continue;
694  }
695 
696  status = GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle, &pid);
697  if (status == BGWH_STARTED)
698  {
699  /* Has the worker attached to the error queue? */
700  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
701  if (shm_mq_get_sender(mq) != NULL)
702  {
703  /* Yes, so it is known to be attached. */
704  pcxt->known_attached_workers[i] = true;
705  ++pcxt->nknown_attached_workers;
706  }
707  }
708  else if (status == BGWH_STOPPED)
709  {
710  /*
711  * If the worker stopped without attaching to the error queue,
712  * throw an error.
713  */
714  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
715  if (shm_mq_get_sender(mq) == NULL)
716  ereport(ERROR,
717  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
718  errmsg("parallel worker failed to initialize"),
719  errhint("More details may be available in the server log.")));
720 
721  pcxt->known_attached_workers[i] = true;
722  ++pcxt->nknown_attached_workers;
723  }
724  else
725  {
726  /*
727  * Worker not yet started, so we must wait. The postmaster
728  * will notify us if the worker's state changes. Our latch
729  * might also get set for some other reason, but if so we'll
730  * just end up waiting for the same worker again.
731  */
732  rc = WaitLatch(MyLatch,
735 
736  if (rc & WL_LATCH_SET)
738  }
739  }
740 
741  /* If all workers are known to have started, we're done. */
742  if (pcxt->nknown_attached_workers >= pcxt->nworkers_launched)
743  {
745  break;
746  }
747  }
748 }
749 
750 /*
751  * Wait for all workers to finish computing.
752  *
753  * Even if the parallel operation seems to have completed successfully, it's
754  * important to call this function afterwards. We must not miss any errors
755  * the workers may have thrown during the parallel operation, or any that they
756  * may yet throw while shutting down.
757  *
758  * Also, we want to update our notion of XactLastRecEnd based on worker
759  * feedback.
760  */
761 void
763 {
764  for (;;)
765  {
766  bool anyone_alive = false;
767  int nfinished = 0;
768  int i;
769 
770  /*
771  * This will process any parallel messages that are pending, which may
772  * change the outcome of the loop that follows. It may also throw an
773  * error propagated from a worker.
774  */
776 
777  for (i = 0; i < pcxt->nworkers_launched; ++i)
778  {
779  /*
780  * If error_mqh is NULL, then the worker has already exited
781  * cleanly. If we have received a message through error_mqh from
782  * the worker, we know it started up cleanly, and therefore we're
783  * certain to be notified when it exits.
784  */
785  if (pcxt->worker[i].error_mqh == NULL)
786  ++nfinished;
787  else if (pcxt->known_attached_workers[i])
788  {
789  anyone_alive = true;
790  break;
791  }
792  }
793 
794  if (!anyone_alive)
795  {
796  /* If all workers are known to have finished, we're done. */
797  if (nfinished >= pcxt->nworkers_launched)
798  {
799  Assert(nfinished == pcxt->nworkers_launched);
800  break;
801  }
802 
803  /*
804  * We didn't detect any living workers, but not all workers are
805  * known to have exited cleanly. Either not all workers have
806  * launched yet, or maybe some of them failed to start or
807  * terminated abnormally.
808  */
809  for (i = 0; i < pcxt->nworkers_launched; ++i)
810  {
811  pid_t pid;
812  shm_mq *mq;
813 
814  /*
815  * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we
816  * should just keep waiting. If it is BGWH_STOPPED, then
817  * further investigation is needed.
818  */
819  if (pcxt->worker[i].error_mqh == NULL ||
820  pcxt->worker[i].bgwhandle == NULL ||
822  &pid) != BGWH_STOPPED)
823  continue;
824 
825  /*
826  * Check whether the worker ended up stopped without ever
827  * attaching to the error queue. If so, the postmaster was
828  * unable to fork the worker or it exited without initializing
829  * properly. We must throw an error, since the caller may
830  * have been expecting the worker to do some work before
831  * exiting.
832  */
833  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
834  if (shm_mq_get_sender(mq) == NULL)
835  ereport(ERROR,
836  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
837  errmsg("parallel worker failed to initialize"),
838  errhint("More details may be available in the server log.")));
839 
840  /*
841  * The worker is stopped, but is attached to the error queue.
842  * Unless there's a bug somewhere, this will only happen when
843  * the worker writes messages and terminates after the
844  * CHECK_FOR_INTERRUPTS() near the top of this function and
845  * before the call to GetBackgroundWorkerPid(). In that case,
846  * or latch should have been set as well and the right things
847  * will happen on the next pass through the loop.
848  */
849  }
850  }
851 
855  }
856 
857  if (pcxt->toc != NULL)
858  {
859  FixedParallelState *fps;
860 
861  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
862  if (fps->last_xlog_end > XactLastRecEnd)
864  }
865 }
866 
867 /*
868  * Wait for all workers to exit.
869  *
870  * This function ensures that workers have been completely shutdown. The
871  * difference between WaitForParallelWorkersToFinish and this function is
872  * that the former just ensures that last message sent by a worker backend is
873  * received by the leader backend whereas this ensures the complete shutdown.
874  */
875 static void
877 {
878  int i;
879 
880  /* Wait until the workers actually die. */
881  for (i = 0; i < pcxt->nworkers_launched; ++i)
882  {
884 
885  if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
886  continue;
887 
889 
890  /*
891  * If the postmaster kicked the bucket, we have no chance of cleaning
892  * up safely -- we won't be able to tell when our workers are actually
893  * dead. This doesn't necessitate a PANIC since they will all abort
894  * eventually, but we can't safely continue this session.
895  */
896  if (status == BGWH_POSTMASTER_DIED)
897  ereport(FATAL,
898  (errcode(ERRCODE_ADMIN_SHUTDOWN),
899  errmsg("postmaster exited during a parallel transaction")));
900 
901  /* Release memory. */
902  pfree(pcxt->worker[i].bgwhandle);
903  pcxt->worker[i].bgwhandle = NULL;
904  }
905 }
906 
907 /*
908  * Destroy a parallel context.
909  *
910  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
911  * first, before calling this function. When this function is invoked, any
912  * remaining workers are forcibly killed; the dynamic shared memory segment
913  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
914  */
915 void
917 {
918  int i;
919 
920  /*
921  * Be careful about order of operations here! We remove the parallel
922  * context from the list before we do anything else; otherwise, if an
923  * error occurs during a subsequent step, we might try to nuke it again
924  * from AtEOXact_Parallel or AtEOSubXact_Parallel.
925  */
926  dlist_delete(&pcxt->node);
927 
928  /* Kill each worker in turn, and forget their error queues. */
929  if (pcxt->worker != NULL)
930  {
931  for (i = 0; i < pcxt->nworkers_launched; ++i)
932  {
933  if (pcxt->worker[i].error_mqh != NULL)
934  {
936 
937  shm_mq_detach(pcxt->worker[i].error_mqh);
938  pcxt->worker[i].error_mqh = NULL;
939  }
940  }
941  }
942 
943  /*
944  * If we have allocated a shared memory segment, detach it. This will
945  * implicitly detach the error queues, and any other shared memory queues,
946  * stored there.
947  */
948  if (pcxt->seg != NULL)
949  {
950  dsm_detach(pcxt->seg);
951  pcxt->seg = NULL;
952  }
953 
954  /*
955  * If this parallel context is actually in backend-private memory rather
956  * than shared memory, free that memory instead.
957  */
958  if (pcxt->private_memory != NULL)
959  {
960  pfree(pcxt->private_memory);
961  pcxt->private_memory = NULL;
962  }
963 
964  /*
965  * We can't finish transaction commit or abort until all of the workers
966  * have exited. This means, in particular, that we can't respond to
967  * interrupts at this stage.
968  */
969  HOLD_INTERRUPTS();
972 
973  /* Free the worker array itself. */
974  if (pcxt->worker != NULL)
975  {
976  pfree(pcxt->worker);
977  pcxt->worker = NULL;
978  }
979 
980  /* Free memory. */
981  pfree(pcxt->library_name);
982  pfree(pcxt->function_name);
983  pfree(pcxt);
984 }
985 
986 /*
987  * Are there any parallel contexts currently active?
988  */
989 bool
991 {
992  return !dlist_is_empty(&pcxt_list);
993 }
994 
995 /*
996  * Handle receipt of an interrupt indicating a parallel worker message.
997  *
998  * Note: this is called within a signal handler! All we can do is set
999  * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
1000  * HandleParallelMessages().
1001  */
1002 void
1004 {
1005  InterruptPending = true;
1006  ParallelMessagePending = true;
1007  SetLatch(MyLatch);
1008 }
1009 
1010 /*
1011  * Handle any queued protocol messages received from parallel workers.
1012  */
1013 void
1015 {
1016  dlist_iter iter;
1017  MemoryContext oldcontext;
1018 
1019  static MemoryContext hpm_context = NULL;
1020 
1021  /*
1022  * This is invoked from ProcessInterrupts(), and since some of the
1023  * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
1024  * for recursive calls if more signals are received while this runs. It's
1025  * unclear that recursive entry would be safe, and it doesn't seem useful
1026  * even if it is safe, so let's block interrupts until done.
1027  */
1028  HOLD_INTERRUPTS();
1029 
1030  /*
1031  * Moreover, CurrentMemoryContext might be pointing almost anywhere. We
1032  * don't want to risk leaking data into long-lived contexts, so let's do
1033  * our work here in a private context that we can reset on each use.
1034  */
1035  if (hpm_context == NULL) /* first time through? */
1037  "HandleParallelMessages",
1039  else
1040  MemoryContextReset(hpm_context);
1041 
1042  oldcontext = MemoryContextSwitchTo(hpm_context);
1043 
1044  /* OK to process messages. Reset the flag saying there are more to do. */
1045  ParallelMessagePending = false;
1046 
1047  dlist_foreach(iter, &pcxt_list)
1048  {
1049  ParallelContext *pcxt;
1050  int i;
1051 
1052  pcxt = dlist_container(ParallelContext, node, iter.cur);
1053  if (pcxt->worker == NULL)
1054  continue;
1055 
1056  for (i = 0; i < pcxt->nworkers_launched; ++i)
1057  {
1058  /*
1059  * Read as many messages as we can from each worker, but stop when
1060  * either (1) the worker's error queue goes away, which can happen
1061  * if we receive a Terminate message from the worker; or (2) no
1062  * more messages can be read from the worker without blocking.
1063  */
1064  while (pcxt->worker[i].error_mqh != NULL)
1065  {
1066  shm_mq_result res;
1067  Size nbytes;
1068  void *data;
1069 
1070  res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
1071  &data, true);
1072  if (res == SHM_MQ_WOULD_BLOCK)
1073  break;
1074  else if (res == SHM_MQ_SUCCESS)
1075  {
1076  StringInfoData msg;
1077 
1078  initStringInfo(&msg);
1079  appendBinaryStringInfo(&msg, data, nbytes);
1080  HandleParallelMessage(pcxt, i, &msg);
1081  pfree(msg.data);
1082  }
1083  else
1084  ereport(ERROR,
1085  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1086  errmsg("lost connection to parallel worker")));
1087  }
1088  }
1089  }
1090 
1091  MemoryContextSwitchTo(oldcontext);
1092 
1093  /* Might as well clear the context on our way out */
1094  MemoryContextReset(hpm_context);
1095 
1097 }
1098 
1099 /*
1100  * Handle a single protocol message received from a single parallel worker.
1101  */
1102 static void
1104 {
1105  char msgtype;
1106 
1107  if (pcxt->known_attached_workers != NULL &&
1108  !pcxt->known_attached_workers[i])
1109  {
1110  pcxt->known_attached_workers[i] = true;
1111  pcxt->nknown_attached_workers++;
1112  }
1113 
1114  msgtype = pq_getmsgbyte(msg);
1115 
1116  switch (msgtype)
1117  {
1118  case 'K': /* BackendKeyData */
1119  {
1120  int32 pid = pq_getmsgint(msg, 4);
1121 
1122  (void) pq_getmsgint(msg, 4); /* discard cancel key */
1123  (void) pq_getmsgend(msg);
1124  pcxt->worker[i].pid = pid;
1125  break;
1126  }
1127 
1128  case 'E': /* ErrorResponse */
1129  case 'N': /* NoticeResponse */
1130  {
1131  ErrorData edata;
1132  ErrorContextCallback *save_error_context_stack;
1133 
1134  /* Parse ErrorResponse or NoticeResponse. */
1135  pq_parse_errornotice(msg, &edata);
1136 
1137  /* Death of a worker isn't enough justification for suicide. */
1138  edata.elevel = Min(edata.elevel, ERROR);
1139 
1140  /*
1141  * If desired, add a context line to show that this is a
1142  * message propagated from a parallel worker. Otherwise, it
1143  * can sometimes be confusing to understand what actually
1144  * happened. (We don't do this in FORCE_PARALLEL_REGRESS mode
1145  * because it causes test-result instability depending on
1146  * whether a parallel worker is actually used or not.)
1147  */
1149  {
1150  if (edata.context)
1151  edata.context = psprintf("%s\n%s", edata.context,
1152  _("parallel worker"));
1153  else
1154  edata.context = pstrdup(_("parallel worker"));
1155  }
1156 
1157  /*
1158  * Context beyond that should use the error context callbacks
1159  * that were in effect when the ParallelContext was created,
1160  * not the current ones.
1161  */
1162  save_error_context_stack = error_context_stack;
1164 
1165  /* Rethrow error or print notice. */
1166  ThrowErrorData(&edata);
1167 
1168  /* Not an error, so restore previous context stack. */
1169  error_context_stack = save_error_context_stack;
1170 
1171  break;
1172  }
1173 
1174  case 'A': /* NotifyResponse */
1175  {
1176  /* Propagate NotifyResponse. */
1177  int32 pid;
1178  const char *channel;
1179  const char *payload;
1180 
1181  pid = pq_getmsgint(msg, 4);
1182  channel = pq_getmsgrawstring(msg);
1183  payload = pq_getmsgrawstring(msg);
1184  pq_endmessage(msg);
1185 
1186  NotifyMyFrontEnd(channel, payload, pid);
1187 
1188  break;
1189  }
1190 
1191  case 'X': /* Terminate, indicating clean exit */
1192  {
1193  shm_mq_detach(pcxt->worker[i].error_mqh);
1194  pcxt->worker[i].error_mqh = NULL;
1195  break;
1196  }
1197 
1198  default:
1199  {
1200  elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
1201  msgtype, msg->len);
1202  }
1203  }
1204 }
1205 
1206 /*
1207  * End-of-subtransaction cleanup for parallel contexts.
1208  *
1209  * Currently, it's forbidden to enter or leave a subtransaction while
1210  * parallel mode is in effect, so we could just blow away everything. But
1211  * we may want to relax that restriction in the future, so this code
1212  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
1213  */
1214 void
1216 {
1217  while (!dlist_is_empty(&pcxt_list))
1218  {
1219  ParallelContext *pcxt;
1220 
1221  pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
1222  if (pcxt->subid != mySubId)
1223  break;
1224  if (isCommit)
1225  elog(WARNING, "leaked parallel context");
1226  DestroyParallelContext(pcxt);
1227  }
1228 }
1229 
1230 /*
1231  * End-of-transaction cleanup for parallel contexts.
1232  */
1233 void
1234 AtEOXact_Parallel(bool isCommit)
1235 {
1236  while (!dlist_is_empty(&pcxt_list))
1237  {
1238  ParallelContext *pcxt;
1239 
1240  pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
1241  if (isCommit)
1242  elog(WARNING, "leaked parallel context");
1243  DestroyParallelContext(pcxt);
1244  }
1245 }
1246 
1247 /*
1248  * Main entrypoint for parallel workers.
1249  */
1250 void
1252 {
1253  dsm_segment *seg;
1254  shm_toc *toc;
1255  FixedParallelState *fps;
1256  char *error_queue_space;
1257  shm_mq *mq;
1258  shm_mq_handle *mqh;
1259  char *libraryspace;
1260  char *entrypointstate;
1261  char *library_name;
1262  char *function_name;
1263  parallel_worker_main_type entrypt;
1264  char *gucspace;
1265  char *combocidspace;
1266  char *tsnapspace;
1267  char *asnapspace;
1268  char *tstatespace;
1269  char *pendingsyncsspace;
1270  char *reindexspace;
1271  char *relmapperspace;
1272  char *uncommittedenumsspace;
1273  StringInfoData msgbuf;
1274  char *session_dsm_handle_space;
1275  Snapshot tsnapshot;
1276  Snapshot asnapshot;
1277 
1278  /* Set flag to indicate that we're initializing a parallel worker. */
1280 
1281  /* Establish signal handlers. */
1282  pqsignal(SIGTERM, die);
1284 
1285  /* Determine and set our parallel worker number. */
1287  memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
1288 
1289  /* Set up a memory context to work in, just for cleanliness. */
1291  "Parallel worker",
1293 
1294  /*
1295  * Attach to the dynamic shared memory segment for the parallel query, and
1296  * find its table of contents.
1297  *
1298  * Note: at this point, we have not created any ResourceOwner in this
1299  * process. This will result in our DSM mapping surviving until process
1300  * exit, which is fine. If there were a ResourceOwner, it would acquire
1301  * ownership of the mapping, but we have no need for that.
1302  */
1303  seg = dsm_attach(DatumGetUInt32(main_arg));
1304  if (seg == NULL)
1305  ereport(ERROR,
1306  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1307  errmsg("could not map dynamic shared memory segment")));
1309  if (toc == NULL)
1310  ereport(ERROR,
1311  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1312  errmsg("invalid magic number in dynamic shared memory segment")));
1313 
1314  /* Look up fixed parallel state. */
1315  fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false);
1316  MyFixedParallelState = fps;
1317 
1318  /* Arrange to signal the leader if we exit. */
1322 
1323  /*
1324  * Now we can find and attach to the error queue provided for us. That's
1325  * good, because until we do that, any errors that happen here will not be
1326  * reported back to the process that requested that this worker be
1327  * launched.
1328  */
1329  error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false);
1330  mq = (shm_mq *) (error_queue_space +
1333  mqh = shm_mq_attach(mq, seg, NULL);
1334  pq_redirect_to_shm_mq(seg, mqh);
1337 
1338  /*
1339  * Send a BackendKeyData message to the process that initiated parallelism
1340  * so that it has access to our PID before it receives any other messages
1341  * from us. Our cancel key is sent, too, since that's the way the
1342  * protocol message is defined, but it won't actually be used for anything
1343  * in this case.
1344  */
1345  pq_beginmessage(&msgbuf, 'K');
1346  pq_sendint32(&msgbuf, (int32) MyProcPid);
1347  pq_sendint32(&msgbuf, (int32) MyCancelKey);
1348  pq_endmessage(&msgbuf);
1349 
1350  /*
1351  * Hooray! Primary initialization is complete. Now, we need to set up our
1352  * backend-local state to match the original backend.
1353  */
1354 
1355  /*
1356  * Join locking group. We must do this before anything that could try to
1357  * acquire a heavyweight lock, because any heavyweight locks acquired to
1358  * this point could block either directly against the parallel group
1359  * leader or against some process which in turn waits for a lock that
1360  * conflicts with the parallel group leader, causing an undetected
1361  * deadlock. (If we can't join the lock group, the leader has gone away,
1362  * so just exit quietly.)
1363  */
1365  fps->parallel_leader_pid))
1366  return;
1367 
1368  /*
1369  * Restore transaction and statement start-time timestamps. This must
1370  * happen before anything that would start a transaction, else asserts in
1371  * xact.c will fire.
1372  */
1374 
1375  /*
1376  * Identify the entry point to be called. In theory this could result in
1377  * loading an additional library, though most likely the entry point is in
1378  * the core backend or in a library we just loaded.
1379  */
1380  entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT, false);
1381  library_name = entrypointstate;
1382  function_name = entrypointstate + strlen(library_name) + 1;
1383 
1384  entrypt = LookupParallelWorkerFunction(library_name, function_name);
1385 
1386  /* Restore database connection. */
1388  fps->authenticated_user_id,
1389  0);
1390 
1391  /*
1392  * Set the client encoding to the database encoding, since that is what
1393  * the leader will expect.
1394  */
1396 
1397  /*
1398  * Load libraries that were loaded by original backend. We want to do
1399  * this before restoring GUCs, because the libraries might define custom
1400  * variables.
1401  */
1402  libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false);
1404  RestoreLibraryState(libraryspace);
1405 
1406  /* Restore GUC values from launching backend. */
1407  gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false);
1408  RestoreGUCState(gucspace);
1410 
1411  /* Crank up a transaction state appropriate to a parallel worker. */
1412  tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE, false);
1413  StartParallelWorkerTransaction(tstatespace);
1414 
1415  /* Restore combo CID state. */
1416  combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false);
1417  RestoreComboCIDState(combocidspace);
1418 
1419  /* Attach to the per-session DSM segment and contained objects. */
1420  session_dsm_handle_space =
1422  AttachSession(*(dsm_handle *) session_dsm_handle_space);
1423 
1424  /*
1425  * If the transaction isolation level is REPEATABLE READ or SERIALIZABLE,
1426  * the leader has serialized the transaction snapshot and we must restore
1427  * it. At lower isolation levels, there is no transaction-lifetime
1428  * snapshot, but we need TransactionXmin to get set to a value which is
1429  * less than or equal to the xmin of every snapshot that will be used by
1430  * this worker. The easiest way to accomplish that is to install the
1431  * active snapshot as the transaction snapshot. Code running in this
1432  * parallel worker might take new snapshots via GetTransactionSnapshot()
1433  * or GetLatestSnapshot(), but it shouldn't have any way of acquiring a
1434  * snapshot older than the active snapshot.
1435  */
1436  asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
1437  tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, true);
1438  asnapshot = RestoreSnapshot(asnapspace);
1439  tsnapshot = tsnapspace ? RestoreSnapshot(tsnapspace) : asnapshot;
1440  RestoreTransactionSnapshot(tsnapshot,
1441  fps->parallel_leader_pgproc);
1442  PushActiveSnapshot(asnapshot);
1443 
1444  /*
1445  * We've changed which tuples we can see, and must therefore invalidate
1446  * system caches.
1447  */
1449 
1450  /*
1451  * Restore current role id. Skip verifying whether session user is
1452  * allowed to become this role and blindly restore the leader's state for
1453  * current role.
1454  */
1456 
1457  /* Restore user ID and security context. */
1459 
1460  /* Restore temp-namespace state to ensure search path matches leader's. */
1463 
1464  /* Restore pending syncs. */
1465  pendingsyncsspace = shm_toc_lookup(toc, PARALLEL_KEY_PENDING_SYNCS,
1466  false);
1467  RestorePendingSyncs(pendingsyncsspace);
1468 
1469  /* Restore reindex state. */
1470  reindexspace = shm_toc_lookup(toc, PARALLEL_KEY_REINDEX_STATE, false);
1471  RestoreReindexState(reindexspace);
1472 
1473  /* Restore relmapper state. */
1474  relmapperspace = shm_toc_lookup(toc, PARALLEL_KEY_RELMAPPER_STATE, false);
1475  RestoreRelationMap(relmapperspace);
1476 
1477  /* Restore uncommitted enums. */
1478  uncommittedenumsspace = shm_toc_lookup(toc, PARALLEL_KEY_UNCOMMITTEDENUMS,
1479  false);
1480  RestoreUncommittedEnums(uncommittedenumsspace);
1481 
1482  /* Attach to the leader's serializable transaction, if SERIALIZABLE. */
1484 
1485  /*
1486  * We've initialized all of our state now; nothing should change
1487  * hereafter.
1488  */
1491 
1492  /*
1493  * Time to do the real work: invoke the caller-supplied code.
1494  */
1495  entrypt(seg, toc);
1496 
1497  /* Must exit parallel mode to pop active snapshot. */
1498  ExitParallelMode();
1499 
1500  /* Must pop active snapshot so snapmgr.c doesn't complain. */
1502 
1503  /* Shut down the parallel-worker transaction. */
1505 
1506  /* Detach from the per-session DSM segment. */
1507  DetachSession();
1508 
1509  /* Report success. */
1510  pq_putmessage('X', NULL, 0);
1511 }
1512 
1513 /*
1514  * Update shared memory with the ending location of the last WAL record we
1515  * wrote, if it's greater than the value already stored there.
1516  */
1517 void
1519 {
1521 
1522  Assert(fps != NULL);
1523  SpinLockAcquire(&fps->mutex);
1524  if (fps->last_xlog_end < last_xlog_end)
1526  SpinLockRelease(&fps->mutex);
1527 }
1528 
1529 /*
1530  * Make sure the leader tries to read from our error queue one more time.
1531  * This guards against the case where we exit uncleanly without sending an
1532  * ErrorResponse to the leader, for example because some code calls proc_exit
1533  * directly.
1534  *
1535  * Also explicitly detach from dsm segment so that subsystems using
1536  * on_dsm_detach() have a chance to send stats before the stats subsystem is
1537  * shut down as part of a before_shmem_exit() hook.
1538  *
1539  * One might think this could instead be solved by carefully ordering the
1540  * attaching to dsm segments, so that the pgstats segments get detached from
1541  * later than the parallel query one. That turns out to not work because the
1542  * stats hash might need to grow which can cause new segments to be allocated,
1543  * which then will be detached from earlier.
1544  */
1545 static void
1547 {
1551 
1553 }
1554 
1555 /*
1556  * Look up (and possibly load) a parallel worker entry point function.
1557  *
1558  * For functions contained in the core code, we use library name "postgres"
1559  * and consult the InternalParallelWorkers array. External functions are
1560  * looked up, and loaded if necessary, using load_external_function().
1561  *
1562  * The point of this is to pass function names as strings across process
1563  * boundaries. We can't pass actual function addresses because of the
1564  * possibility that the function has been loaded at a different address
1565  * in a different process. This is obviously a hazard for functions in
1566  * loadable libraries, but it can happen even for functions in the core code
1567  * on platforms using EXEC_BACKEND (e.g., Windows).
1568  *
1569  * At some point it might be worthwhile to get rid of InternalParallelWorkers[]
1570  * in favor of applying load_external_function() for core functions too;
1571  * but that raises portability issues that are not worth addressing now.
1572  */
1574 LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
1575 {
1576  /*
1577  * If the function is to be loaded from postgres itself, search the
1578  * InternalParallelWorkers array.
1579  */
1580  if (strcmp(libraryname, "postgres") == 0)
1581  {
1582  int i;
1583 
1584  for (i = 0; i < lengthof(InternalParallelWorkers); i++)
1585  {
1586  if (strcmp(InternalParallelWorkers[i].fn_name, funcname) == 0)
1587  return InternalParallelWorkers[i].fn_addr;
1588  }
1589 
1590  /* We can only reach this by programming error. */
1591  elog(ERROR, "internal function \"%s\" not found", funcname);
1592  }
1593 
1594  /* Otherwise load from external library. */
1595  return (parallel_worker_main_type)
1596  load_external_function(libraryname, funcname, true, NULL);
1597 }
static pid_t ParallelLeaderPid
Definition: parallel.c:127
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:99
#define DatumGetUInt32(X)
Definition: postgres.h:530
int slock_t
Definition: s_lock.h:958
#define PARALLEL_ERROR_QUEUE_SIZE
Definition: parallel.c:55
#define AllocSetContextCreate
Definition: memutils.h:173
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
int MyProcPid
Definition: globals.c:43
int errhint(const char *fmt,...)
Definition: elog.c:1156
BackendId MyBackendId
Definition: globals.c:84
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2176
MemoryContext TopTransactionContext
Definition: mcxt.c:53
uint32 dsm_handle
Definition: dsm_impl.h:55
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:164
void SerializeUncommittedEnums(void *space, Size size)
Definition: pg_enum.c:709
void SetUserIdAndSecContext(Oid userid, int sec_context)
Definition: miscinit.c:607
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
PGPROC * parallel_leader_pgproc
Definition: parallel.c:92
XLogRecPtr XactLastRecEnd
Definition: xlog.c:354
void AttachSerializableXact(SerializableXactHandle handle)
Definition: predicate.c:5195
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:842
PGPROC * MyProc
Definition: proc.c:68
int64 TimestampTz
Definition: timestamp.h:39
#define PointerGetDatum(X)
Definition: postgres.h:600
dsm_segment * seg
Definition: parallel.h:43
static void WaitForParallelWorkersToExit(ParallelContext *pcxt)
Definition: parallel.c:876
#define dlist_foreach(iter, lhead)
Definition: ilist.h:526
void SerializeReindexState(Size maxsize, char *start_address)
Definition: index.c:4082
char * pstrdup(const char *in)
Definition: mcxt.c:1299
void CommitTransactionCommand(void)
Definition: xact.c:2959
shm_toc_estimator estimator
Definition: parallel.h:42
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
void EndParallelWorkerTransaction(void)
Definition: xact.c:5354
#define SpinLockInit(lock)
Definition: spin.h:60
void GetTempNamespaceState(Oid *tempNamespaceId, Oid *tempToastNamespaceId)
Definition: namespace.c:3358
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:631
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1790
#define Min(x, y)
Definition: c.h:986
PGPROC * shm_mq_get_sender(shm_mq *mq)
Definition: shm_mq.c:258
Oid authenticated_user_id
Definition: parallel.c:85
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define IsolationUsesXactSnapshot()
Definition: xact.h:51
Snapshot GetActiveSnapshot(void)
Definition: snapmgr.c:801
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1087
int bgw_restart_time
Definition: bgworker.h:95
int errcode(int sqlerrcode)
Definition: elog.c:698
Oid temp_toast_namespace_id
Definition: parallel.c:89
#define BGWORKER_CLASS_PARALLEL
Definition: bgworker.h:68
void DetachSession(void)
Definition: session.c:201
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:195
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:143
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1977
void PopActiveSnapshot(void)
Definition: snapmgr.c:774
int nknown_attached_workers
Definition: parallel.h:47
uint32 SubTransactionId
Definition: c.h:591
#define PARALLEL_KEY_RELMAPPER_STATE
Definition: parallel.c:77
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
#define lengthof(array)
Definition: c.h:734
void RestoreTransactionSnapshot(Snapshot snapshot, void *source_pgproc)
Definition: snapmgr.c:2241
void SerializeTransactionState(Size maxsize, char *start_address)
Definition: xact.c:5259
parallel_worker_main_type fn_addr
Definition: parallel.c:136
const char * pq_getmsgrawstring(StringInfo msg)
Definition: pqformat.c:610
unsigned int Oid
Definition: postgres_ext.h:31
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
void SetLatch(Latch *latch)
Definition: latch.c:567
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:250
void InvalidateSystemCaches(void)
Definition: inval.c:701
int nworkers_to_launch
Definition: parallel.h:37
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
void ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
Definition: parallel.c:525
void ResetLatch(Latch *latch)
Definition: latch.c:660
void pq_beginmessage(StringInfo buf, char msgtype)
Definition: pqformat.c:87
void RestoreComboCIDState(char *comboCIDstate)
Definition: combocid.c:342
signed int int32
Definition: c.h:429
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:452
SubTransactionId subid
Definition: parallel.h:35
Oid GetCurrentRoleId(void)
Definition: miscinit.c:854
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:133
ErrorContextCallback * error_context_stack
Definition: elog.c:93
void RestorePendingSyncs(char *startAddress)
Definition: storage.c:591
SerializableXactHandle serializable_xact_handle
Definition: parallel.c:97
volatile bool ParallelMessagePending
Definition: parallel.c:115
static void pq_sendint32(StringInfo buf, uint32 i)
Definition: pqformat.h:145
#define DSM_HANDLE_INVALID
Definition: dsm.h:23
#define sprintf
Definition: port.h:219
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:762
#define SpinLockAcquire(lock)
Definition: spin.h:62
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:916
int SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
Definition: procsignal.c:261
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
ParallelWorkerInfo * worker
Definition: parallel.h:46
Datum bgw_main_arg
Definition: bgworker.h:98
void pfree(void *pointer)
Definition: mcxt.c:1169
bool IsInParallelMode(void)
Definition: xact.c:1013
void SerializeLibraryState(Size maxsize, char *start_address)
Definition: dfmgr.c:726
#define ERROR
Definition: elog.h:46
BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1168
void ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
Oid GetAuthenticatedUserId(void)
Definition: miscinit.c:554
#define PARALLEL_KEY_TRANSACTION_SNAPSHOT
Definition: parallel.c:70
char * function_name
Definition: parallel.h:40
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:2117
int32 MyCancelKey
Definition: globals.c:47
#define PARALLEL_KEY_UNCOMMITTEDENUMS
Definition: parallel.c:78
void pq_parse_errornotice(StringInfo msg, ErrorData *edata)
Definition: pqmq.c:204
#define FATAL
Definition: elog.h:49
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:178
void ExitParallelMode(void)
Definition: xact.c:993
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
#define PARALLEL_KEY_FIXED
Definition: parallel.c:65
void HandleParallelMessages(void)
Definition: parallel.c:1014
#define PARALLEL_KEY_ERROR_QUEUE
Definition: parallel.c:66
void SetTempNamespaceState(Oid tempNamespaceId, Oid tempToastNamespaceId)
Definition: namespace.c:3374
static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
Definition: parallel.c:1103
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
void PushActiveSnapshot(Snapshot snap)
Definition: snapmgr.c:680
void GetUserIdAndSecContext(Oid *userid, int *sec_context)
Definition: miscinit.c:600
shm_mq_handle * error_mqh
Definition: parallel.h:28
#define PARALLEL_KEY_GUC
Definition: parallel.c:68
void RestoreUncommittedEnums(void *space)
Definition: pg_enum.c:741
int SetClientEncoding(int encoding)
Definition: mbutils.c:208
int ParallelWorkerNumber
Definition: parallel.c:112
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:333
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
void SerializeRelationMap(Size maxSize, char *startAddress)
Definition: relmapper.c:657
Size EstimateGUCStateSpace(void)
Definition: guc.c:10621
void AttachSession(dsm_handle handle)
Definition: session.c:155
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
Size EstimateComboCIDStateSpace(void)
Definition: combocid.c:297
#define UInt32GetDatum(X)
Definition: postgres.h:537
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
static void ParallelWorkerShutdown(int code, Datum arg)
Definition: parallel.c:1546
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
int nworkers_launched
Definition: parallel.h:38
XLogRecPtr last_xlog_end
Definition: parallel.c:103
BgwHandleStatus
Definition: bgworker.h:103
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:539
Size EstimateReindexStateSpace(void)
Definition: index.c:4071
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:225
void BecomeLockGroupLeader(void)
Definition: proc.c:1947
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void * load_external_function(const char *filename, const char *funcname, bool signalNotFound, void **filehandle)
Definition: dfmgr.c:107
TimestampTz GetCurrentTransactionStartTimestamp(void)
Definition: xact.c:799
void ThrowErrorData(ErrorData *edata)
Definition: elog.c:1679
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:248
#define WARNING
Definition: elog.h:40
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:202
Size EstimateUncommittedEnumsSpace(void)
Definition: pg_enum.c:695
int elevel
Definition: elog.h:371
bool * known_attached_workers
Definition: parallel.h:48
bool ParallelContextActive(void)
Definition: parallel.c:990
#define SpinLockRelease(lock)
Definition: spin.h:64
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:506
Size EstimateSnapshotSpace(Snapshot snap)
Definition: snapmgr.c:2093
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
void * palloc0(Size size)
Definition: mcxt.c:1093
static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
Definition: parallel.c:1574
void RestoreLibraryState(char *start_address)
Definition: dfmgr.c:748
uintptr_t Datum
Definition: postgres.h:411
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:487
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:64
Size EstimatePendingSyncsSpace(void)
Definition: storage.c:527
int GetDatabaseEncoding(void)
Definition: mbutils.c:1210
int BackendId
Definition: backendid.h:21
void SerializePendingSyncs(Size maxSize, char *startAddress)
Definition: storage.c:540
Oid MyDatabaseId
Definition: globals.c:88
Size EstimateLibraryStateSpace(void)
Definition: dfmgr.c:709
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:475
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:320
dlist_node * cur
Definition: ilist.h:161
void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags)
Definition: postmaster.c:5752
#define PARALLEL_MAGIC
Definition: parallel.c:58
void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
Definition: pqmq.c:51
void * SerializableXactHandle
Definition: predicate.h:37
void ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
Definition: parallel.c:1518
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1207
#define ereport(elevel,...)
Definition: elog.h:157
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
int pq_getmsgbyte(StringInfo msg)
Definition: pqformat.c:401
shm_mq_result
Definition: shm_mq.h:36
TimestampTz xact_ts
Definition: parallel.c:95
BackendId parallel_leader_backend_id
Definition: parallel.c:94
char * library_name
Definition: parallel.h:39
BackendId ParallelLeaderBackendId
Definition: globals.c:86
int force_parallel_mode
Definition: planner.c:70
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1059
uint64 XLogRecPtr
Definition: xlogdefs.h:21
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:91
#define Assert(condition)
Definition: c.h:804
void StartParallelWorkerTransaction(char *tstatespace)
Definition: xact.c:5329
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:60
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:724
Size EstimateTransactionStateSpace(void)
Definition: xact.c:5231
void StartTransactionCommand(void)
Definition: xact.c:2858
#define PARALLEL_KEY_REINDEX_STATE
Definition: parallel.c:76
const char * fn_name
Definition: parallel.c:135
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
#define BGW_MAXLEN
Definition: bgworker.h:86
size_t Size
Definition: c.h:540
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
dlist_node node
Definition: parallel.h:34
void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
Definition: vacuumlazy.c:4137
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
shm_mq * shm_mq_get_queue(shm_mq_handle *mqh)
Definition: shm_mq.c:904
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:956
void EnterParallelMode(void)
Definition: xact.c:980
volatile sig_atomic_t InterruptPending
Definition: globals.c:30
Size EstimateRelationMapSpace(void)
Definition: relmapper.c:646
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
char * context
Definition: elog.h:386
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:291
#define DatumGetPointer(X)
Definition: postgres.h:593
ErrorContextCallback * error_context_stack
Definition: parallel.h:41
static const struct @14 InternalParallelWorkers[]
void SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts)
Definition: xact.c:788
#define PARALLEL_KEY_TRANSACTION_STATE
Definition: parallel.c:72
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:769
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void RestoreReindexState(void *reindexstate)
Definition: index.c:4100
int errmsg(const char *fmt,...)
Definition: elog.c:909
void(* parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc)
Definition: parallel.h:23
void pq_endmessage(StringInfo buf)
Definition: pqformat.c:298
void ParallelWorkerMain(Datum main_arg)
Definition: parallel.c:1251
pid_t bgw_notify_pid
Definition: bgworker.h:100
static FixedParallelState * MyFixedParallelState
Definition: parallel.c:121
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
void SetCurrentRoleId(Oid roleid, bool is_superuser)
Definition: miscinit.c:875
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:131
#define elog(elevel,...)
Definition: elog.h:232
bool InitializingParallelWorker
Definition: parallel.c:118
int i
TimestampTz stmt_ts
Definition: parallel.c:96
Definition: shm_mq.c:72
void RestoreRelationMap(char *startAddress)
Definition: relmapper.c:674
#define PARALLEL_KEY_SESSION_DSM
Definition: parallel.c:74
#define BUFFERALIGN(LEN)
Definition: c.h:759
void pq_set_parallel_leader(pid_t pid, BackendId backend_id)
Definition: pqmq.c:76
void * arg
struct Latch * MyLatch
Definition: globals.c:57
void HandleParallelMessageInterrupt(void)
Definition: parallel.c:1003
unsigned int pq_getmsgint(StringInfo msg, int b)
Definition: pqformat.c:417
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
pid_t parallel_leader_pid
Definition: parallel.c:93
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:207
static dlist_head pcxt_list
Definition: parallel.c:124
void pq_getmsgend(StringInfo msg)
Definition: pqformat.c:637
#define pq_putmessage(msgtype, s, len)
Definition: libpq.h:49
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:229
struct FixedParallelState FixedParallelState
dsm_handle GetSessionDsmHandle(void)
Definition: session.c:70
shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
Definition: shm_mq.c:574
char bgw_library_name[BGW_MAXLEN]
Definition: bgworker.h:96
void WaitForParallelWorkersToAttach(ParallelContext *pcxt)
Definition: parallel.c:659
#define PARALLEL_KEY_PENDING_SYNCS
Definition: parallel.c:75
bool session_auth_is_superuser
Definition: guc.c:594
SerializableXactHandle ShareSerializableXact(void)
Definition: predicate.c:5186
void AtEOXact_Parallel(bool isCommit)
Definition: parallel.c:1234
Definition: proc.h:121
#define PARALLEL_KEY_ENTRYPOINT
Definition: parallel.c:73
#define snprintf
Definition: port.h:217
#define PARALLEL_KEY_COMBO_CID
Definition: parallel.c:69
#define WL_LATCH_SET
Definition: latch.h:125
#define _(x)
Definition: elog.c:89
void AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
Definition: parallel.c:1215
void SerializeGUCState(Size maxsize, char *start_address)
Definition: guc.c:10764
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:227
#define PARALLEL_KEY_ACTIVE_SNAPSHOT
Definition: parallel.c:71
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define die(msg)
Definition: pg_test_fsync.c:97
void SerializeComboCIDState(Size maxsize, char *start_address)
Definition: combocid.c:316
#define PARALLEL_KEY_LIBRARY
Definition: parallel.c:67
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1068
void RestoreGUCState(void *gucstate)
Definition: guc.c:10850
shm_toc * toc
Definition: parallel.h:45
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
void NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
Definition: async.c:2278
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:808
void * private_memory
Definition: parallel.h:44
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5781