PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
parallel.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  * Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 
15 #include "postgres.h"
16 
17 #include "access/parallel.h"
18 #include "access/xact.h"
19 #include "access/xlog.h"
20 #include "catalog/namespace.h"
21 #include "commands/async.h"
22 #include "libpq/libpq.h"
23 #include "libpq/pqformat.h"
24 #include "libpq/pqmq.h"
25 #include "miscadmin.h"
26 #include "optimizer/planmain.h"
27 #include "pgstat.h"
28 #include "storage/ipc.h"
29 #include "storage/sinval.h"
30 #include "storage/spin.h"
31 #include "tcop/tcopprot.h"
32 #include "utils/combocid.h"
33 #include "utils/guc.h"
34 #include "utils/inval.h"
35 #include "utils/memutils.h"
36 #include "utils/resowner.h"
37 #include "utils/snapmgr.h"
38 
39 
40 /*
41  * We don't want to waste a lot of memory on an error queue which, most of
42  * the time, will process only a handful of small messages. However, it is
43  * desirable to make it large enough that a typical ErrorResponse can be sent
44  * without blocking. That way, a worker that errors out can write the whole
45  * message into the queue and terminate without waiting for the user backend.
46  */
47 #define PARALLEL_ERROR_QUEUE_SIZE 16384
48 
49 /* Magic number for parallel context TOC. */
50 #define PARALLEL_MAGIC 0x50477c7c
51 
52 /*
53  * Magic numbers for parallel state sharing. Higher-level code should use
54  * smaller values, leaving these very large ones for use by this module.
55  */
56 #define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001)
57 #define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002)
58 #define PARALLEL_KEY_LIBRARY UINT64CONST(0xFFFFFFFFFFFF0003)
59 #define PARALLEL_KEY_GUC UINT64CONST(0xFFFFFFFFFFFF0004)
60 #define PARALLEL_KEY_COMBO_CID UINT64CONST(0xFFFFFFFFFFFF0005)
61 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0006)
62 #define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007)
63 #define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008)
64 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE UINT64CONST(0xFFFFFFFFFFFF0009)
65 
66 /* Fixed-size parallel state. */
67 typedef struct FixedParallelState
68 {
69  /* Fixed-size state that workers must restore. */
79 
80  /* Entrypoint for parallel workers. */
82 
83  /* Mutex protects remaining fields. */
85 
86  /* Maximum XactLastRecEnd of any worker. */
89 
90 /*
91  * Our parallel worker number. We initialize this to -1, meaning that we are
92  * not a parallel worker. In parallel workers, it will be set to a value >= 0
93  * and < the number of workers before any user code is invoked; each parallel
94  * worker will get a different parallel worker number.
95  */
97 
98 /* Is there a parallel message pending which we need to receive? */
99 volatile bool ParallelMessagePending = false;
100 
101 /* Are we initializing a parallel worker? */
103 
104 /* Pointer to our fixed parallel state. */
106 
107 /* List of active parallel contexts. */
109 
110 /* Private functions. */
111 static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
112 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
113 static void ParallelWorkerMain(Datum main_arg);
115 
116 
117 /*
118  * Establish a new parallel context. This should be done after entering
119  * parallel mode, and (unless there is an error) the context should be
120  * destroyed before exiting the current subtransaction.
121  */
124 {
125  MemoryContext oldcontext;
126  ParallelContext *pcxt;
127 
128  /* It is unsafe to create a parallel context if not in parallel mode. */
130 
131  /* Number of workers should be non-negative. */
132  Assert(nworkers >= 0);
133 
134  /*
135  * If dynamic shared memory is not available, we won't be able to use
136  * background workers.
137  */
139  nworkers = 0;
140 
141  /*
142  * If we are running under serializable isolation, we can't use parallel
143  * workers, at least not until somebody enhances that mechanism to be
144  * parallel-aware.
145  */
147  nworkers = 0;
148 
149  /* We might be running in a short-lived memory context. */
151 
152  /* Initialize a new ParallelContext. */
153  pcxt = palloc0(sizeof(ParallelContext));
155  pcxt->nworkers = nworkers;
156  pcxt->entrypoint = entrypoint;
159  dlist_push_head(&pcxt_list, &pcxt->node);
160 
161  /* Restore previous memory context. */
162  MemoryContextSwitchTo(oldcontext);
163 
164  return pcxt;
165 }
166 
167 /*
168  * Establish a new parallel context that calls a function provided by an
169  * extension. This works around the fact that the library might get mapped
170  * at a different address in each backend.
171  */
174  char *function_name,
175  int nworkers)
176 {
177  MemoryContext oldcontext;
178  ParallelContext *pcxt;
179 
180  /* We might be running in a very short-lived memory context. */
182 
183  /* Create the context. */
185  pcxt->library_name = pstrdup(library_name);
186  pcxt->function_name = pstrdup(function_name);
187 
188  /* Restore previous memory context. */
189  MemoryContextSwitchTo(oldcontext);
190 
191  return pcxt;
192 }
193 
194 /*
195  * Establish the dynamic shared memory segment for a parallel context and
196  * copy state and other bookkeeping information that will be needed by
197  * parallel workers into it.
198  */
199 void
201 {
202  MemoryContext oldcontext;
203  Size library_len = 0;
204  Size guc_len = 0;
205  Size combocidlen = 0;
206  Size tsnaplen = 0;
207  Size asnaplen = 0;
208  Size tstatelen = 0;
209  Size segsize = 0;
210  int i;
211  FixedParallelState *fps;
212  Snapshot transaction_snapshot = GetTransactionSnapshot();
213  Snapshot active_snapshot = GetActiveSnapshot();
214 
215  /* We might be running in a very short-lived memory context. */
217 
218  /* Allow space to store the fixed-size parallel state. */
220  shm_toc_estimate_keys(&pcxt->estimator, 1);
221 
222  /*
223  * Normally, the user will have requested at least one worker process, but
224  * if by chance they have not, we can skip a bunch of things here.
225  */
226  if (pcxt->nworkers > 0)
227  {
228  /* Estimate space for various kinds of state sharing. */
229  library_len = EstimateLibraryStateSpace();
230  shm_toc_estimate_chunk(&pcxt->estimator, library_len);
231  guc_len = EstimateGUCStateSpace();
232  shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
233  combocidlen = EstimateComboCIDStateSpace();
234  shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
235  tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
236  shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
237  asnaplen = EstimateSnapshotSpace(active_snapshot);
238  shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
239  tstatelen = EstimateTransactionStateSpace();
240  shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
241  /* If you add more chunks here, you probably need to add keys. */
242  shm_toc_estimate_keys(&pcxt->estimator, 6);
243 
244  /* Estimate space need for error queues. */
247  "parallel error queue size not buffer-aligned");
250  pcxt->nworkers));
251  shm_toc_estimate_keys(&pcxt->estimator, 1);
252 
253  /* Estimate how much we'll need for extension entrypoint info. */
254  if (pcxt->library_name != NULL)
255  {
257  Assert(pcxt->function_name != NULL);
258  shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
259  + strlen(pcxt->function_name) + 2);
260  shm_toc_estimate_keys(&pcxt->estimator, 1);
261  }
262  }
263 
264  /*
265  * Create DSM and initialize with new table of contents. But if the user
266  * didn't request any workers, then don't bother creating a dynamic shared
267  * memory segment; instead, just use backend-private memory.
268  *
269  * Also, if we can't create a dynamic shared memory segment because the
270  * maximum number of segments have already been created, then fall back to
271  * backend-private memory, and plan not to use any workers. We hope this
272  * won't happen very often, but it's better to abandon the use of
273  * parallelism than to fail outright.
274  */
275  segsize = shm_toc_estimate(&pcxt->estimator);
276  if (pcxt->nworkers > 0)
278  if (pcxt->seg != NULL)
280  dsm_segment_address(pcxt->seg),
281  segsize);
282  else
283  {
284  pcxt->nworkers = 0;
287  segsize);
288  }
289 
290  /* Initialize fixed-size state in shared memory. */
291  fps = (FixedParallelState *)
292  shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
293  fps->database_id = MyDatabaseId;
301  fps->entrypoint = pcxt->entrypoint;
302  SpinLockInit(&fps->mutex);
303  fps->last_xlog_end = 0;
305 
306  /* We can skip the rest of this if we're not budgeting for any workers. */
307  if (pcxt->nworkers > 0)
308  {
309  char *libraryspace;
310  char *gucspace;
311  char *combocidspace;
312  char *tsnapspace;
313  char *asnapspace;
314  char *tstatespace;
315  char *error_queue_space;
316 
317  /* Serialize shared libraries we have loaded. */
318  libraryspace = shm_toc_allocate(pcxt->toc, library_len);
319  SerializeLibraryState(library_len, libraryspace);
320  shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
321 
322  /* Serialize GUC settings. */
323  gucspace = shm_toc_allocate(pcxt->toc, guc_len);
324  SerializeGUCState(guc_len, gucspace);
325  shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
326 
327  /* Serialize combo CID state. */
328  combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
329  SerializeComboCIDState(combocidlen, combocidspace);
330  shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
331 
332  /* Serialize transaction snapshot and active snapshot. */
333  tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
334  SerializeSnapshot(transaction_snapshot, tsnapspace);
336  tsnapspace);
337  asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
338  SerializeSnapshot(active_snapshot, asnapspace);
339  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
340 
341  /* Serialize transaction state. */
342  tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
343  SerializeTransactionState(tstatelen, tstatespace);
345 
346  /* Allocate space for worker information. */
347  pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
348 
349  /*
350  * Establish error queues in dynamic shared memory.
351  *
352  * These queues should be used only for transmitting ErrorResponse,
353  * NoticeResponse, and NotifyResponse protocol messages. Tuple data
354  * should be transmitted via separate (possibly larger?) queues.
355  */
356  error_queue_space =
357  shm_toc_allocate(pcxt->toc,
359  pcxt->nworkers));
360  for (i = 0; i < pcxt->nworkers; ++i)
361  {
362  char *start;
363  shm_mq *mq;
364 
365  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
366  mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
368  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
369  }
370  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
371 
372  /* Serialize extension entrypoint information. */
373  if (pcxt->library_name != NULL)
374  {
375  Size lnamelen = strlen(pcxt->library_name);
376  char *extensionstate;
377 
378  extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
379  + strlen(pcxt->function_name) + 2);
380  strcpy(extensionstate, pcxt->library_name);
381  strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
383  extensionstate);
384  }
385  }
386 
387  /* Restore previous memory context. */
388  MemoryContextSwitchTo(oldcontext);
389 }
390 
391 /*
392  * Reinitialize the dynamic shared memory segment for a parallel context such
393  * that we could launch workers for it again.
394  */
395 void
397 {
398  FixedParallelState *fps;
399  char *error_queue_space;
400  int i;
401 
402  /* Wait for any old workers to exit. */
403  if (pcxt->nworkers_launched > 0)
404  {
407  pcxt->nworkers_launched = 0;
408  }
409 
410  /* Reset a few bits of fixed parallel state to a clean state. */
411  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
412  fps->last_xlog_end = 0;
413 
414  /* Recreate error queues. */
415  error_queue_space =
417  for (i = 0; i < pcxt->nworkers; ++i)
418  {
419  char *start;
420  shm_mq *mq;
421 
422  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
423  mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
425  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
426  }
427 }
428 
429 /*
430  * Launch parallel workers.
431  */
432 void
434 {
435  MemoryContext oldcontext;
436  BackgroundWorker worker;
437  int i;
438  bool any_registrations_failed = false;
439 
440  /* Skip this if we have no workers. */
441  if (pcxt->nworkers == 0)
442  return;
443 
444  /* We need to be a lock group leader. */
446 
447  /* If we do have workers, we'd better have a DSM segment. */
448  Assert(pcxt->seg != NULL);
449 
450  /* We might be running in a short-lived memory context. */
452 
453  /* Configure a worker. */
454  snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
455  MyProcPid);
456  worker.bgw_flags =
461  worker.bgw_main = ParallelWorkerMain;
463  worker.bgw_notify_pid = MyProcPid;
464  memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
465 
466  /*
467  * Start workers.
468  *
469  * The caller must be able to tolerate ending up with fewer workers than
470  * expected, so there is no need to throw an error here if registration
471  * fails. It wouldn't help much anyway, because registering the worker in
472  * no way guarantees that it will start up and initialize successfully.
473  */
474  for (i = 0; i < pcxt->nworkers; ++i)
475  {
476  memcpy(worker.bgw_extra, &i, sizeof(int));
477  if (!any_registrations_failed &&
479  &pcxt->worker[i].bgwhandle))
480  {
482  pcxt->worker[i].bgwhandle);
483  pcxt->nworkers_launched++;
484  }
485  else
486  {
487  /*
488  * If we weren't able to register the worker, then we've bumped up
489  * against the max_worker_processes limit, and future
490  * registrations will probably fail too, so arrange to skip them.
491  * But we still have to execute this code for the remaining slots
492  * to make sure that we forget about the error queues we budgeted
493  * for those workers. Otherwise, we'll wait for them to start,
494  * but they never will.
495  */
496  any_registrations_failed = true;
497  pcxt->worker[i].bgwhandle = NULL;
498  pfree(pcxt->worker[i].error_mqh);
499  pcxt->worker[i].error_mqh = NULL;
500  }
501  }
502 
503  /* Restore previous memory context. */
504  MemoryContextSwitchTo(oldcontext);
505 }
506 
507 /*
508  * Wait for all workers to finish computing.
509  *
510  * Even if the parallel operation seems to have completed successfully, it's
511  * important to call this function afterwards. We must not miss any errors
512  * the workers may have thrown during the parallel operation, or any that they
513  * may yet throw while shutting down.
514  *
515  * Also, we want to update our notion of XactLastRecEnd based on worker
516  * feedback.
517  */
518 void
520 {
521  for (;;)
522  {
523  bool anyone_alive = false;
524  int i;
525 
526  /*
527  * This will process any parallel messages that are pending, which may
528  * change the outcome of the loop that follows. It may also throw an
529  * error propagated from a worker.
530  */
532 
533  for (i = 0; i < pcxt->nworkers_launched; ++i)
534  {
535  if (pcxt->worker[i].error_mqh != NULL)
536  {
537  anyone_alive = true;
538  break;
539  }
540  }
541 
542  if (!anyone_alive)
543  break;
544 
548  }
549 
550  if (pcxt->toc != NULL)
551  {
552  FixedParallelState *fps;
553 
554  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
555  if (fps->last_xlog_end > XactLastRecEnd)
557  }
558 }
559 
560 /*
561  * Wait for all workers to exit.
562  *
563  * This function ensures that workers have been completely shutdown. The
564  * difference between WaitForParallelWorkersToFinish and this function is
565  * that former just ensures that last message sent by worker backend is
566  * received by master backend whereas this ensures the complete shutdown.
567  */
568 static void
570 {
571  int i;
572 
573  /* Wait until the workers actually die. */
574  for (i = 0; i < pcxt->nworkers_launched; ++i)
575  {
577 
578  if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
579  continue;
580 
582 
583  /*
584  * If the postmaster kicked the bucket, we have no chance of cleaning
585  * up safely -- we won't be able to tell when our workers are actually
586  * dead. This doesn't necessitate a PANIC since they will all abort
587  * eventually, but we can't safely continue this session.
588  */
589  if (status == BGWH_POSTMASTER_DIED)
590  ereport(FATAL,
591  (errcode(ERRCODE_ADMIN_SHUTDOWN),
592  errmsg("postmaster exited during a parallel transaction")));
593 
594  /* Release memory. */
595  pfree(pcxt->worker[i].bgwhandle);
596  pcxt->worker[i].bgwhandle = NULL;
597  }
598 }
599 
600 /*
601  * Destroy a parallel context.
602  *
603  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
604  * first, before calling this function. When this function is invoked, any
605  * remaining workers are forcibly killed; the dynamic shared memory segment
606  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
607  */
608 void
610 {
611  int i;
612 
613  /*
614  * Be careful about order of operations here! We remove the parallel
615  * context from the list before we do anything else; otherwise, if an
616  * error occurs during a subsequent step, we might try to nuke it again
617  * from AtEOXact_Parallel or AtEOSubXact_Parallel.
618  */
619  dlist_delete(&pcxt->node);
620 
621  /* Kill each worker in turn, and forget their error queues. */
622  if (pcxt->worker != NULL)
623  {
624  for (i = 0; i < pcxt->nworkers_launched; ++i)
625  {
626  if (pcxt->worker[i].error_mqh != NULL)
627  {
629 
630  pfree(pcxt->worker[i].error_mqh);
631  pcxt->worker[i].error_mqh = NULL;
632  }
633  }
634  }
635 
636  /*
637  * If we have allocated a shared memory segment, detach it. This will
638  * implicitly detach the error queues, and any other shared memory queues,
639  * stored there.
640  */
641  if (pcxt->seg != NULL)
642  {
643  dsm_detach(pcxt->seg);
644  pcxt->seg = NULL;
645  }
646 
647  /*
648  * If this parallel context is actually in backend-private memory rather
649  * than shared memory, free that memory instead.
650  */
651  if (pcxt->private_memory != NULL)
652  {
653  pfree(pcxt->private_memory);
654  pcxt->private_memory = NULL;
655  }
656 
657  /*
658  * We can't finish transaction commit or abort until all of the workers
659  * have exited. This means, in particular, that we can't respond to
660  * interrupts at this stage.
661  */
662  HOLD_INTERRUPTS();
665 
666  /* Free the worker array itself. */
667  if (pcxt->worker != NULL)
668  {
669  pfree(pcxt->worker);
670  pcxt->worker = NULL;
671  }
672 
673  /* Free memory. */
674  pfree(pcxt);
675 }
676 
677 /*
678  * Are there any parallel contexts currently active?
679  */
680 bool
682 {
683  return !dlist_is_empty(&pcxt_list);
684 }
685 
686 /*
687  * Handle receipt of an interrupt indicating a parallel worker message.
688  *
689  * Note: this is called within a signal handler! All we can do is set
690  * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
691  * HandleParallelMessages().
692  */
693 void
695 {
696  InterruptPending = true;
697  ParallelMessagePending = true;
698  SetLatch(MyLatch);
699 }
700 
701 /*
702  * Handle any queued protocol messages received from parallel workers.
703  */
704 void
706 {
707  dlist_iter iter;
708  MemoryContext oldcontext;
709 
710  static MemoryContext hpm_context = NULL;
711 
712  /*
713  * This is invoked from ProcessInterrupts(), and since some of the
714  * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
715  * for recursive calls if more signals are received while this runs. It's
716  * unclear that recursive entry would be safe, and it doesn't seem useful
717  * even if it is safe, so let's block interrupts until done.
718  */
719  HOLD_INTERRUPTS();
720 
721  /*
722  * Moreover, CurrentMemoryContext might be pointing almost anywhere. We
723  * don't want to risk leaking data into long-lived contexts, so let's do
724  * our work here in a private context that we can reset on each use.
725  */
726  if (hpm_context == NULL) /* first time through? */
728  "HandleParallelMessages",
730  else
731  MemoryContextReset(hpm_context);
732 
733  oldcontext = MemoryContextSwitchTo(hpm_context);
734 
735  /* OK to process messages. Reset the flag saying there are more to do. */
736  ParallelMessagePending = false;
737 
738  dlist_foreach(iter, &pcxt_list)
739  {
740  ParallelContext *pcxt;
741  int i;
742 
743  pcxt = dlist_container(ParallelContext, node, iter.cur);
744  if (pcxt->worker == NULL)
745  continue;
746 
747  for (i = 0; i < pcxt->nworkers_launched; ++i)
748  {
749  /*
750  * Read as many messages as we can from each worker, but stop when
751  * either (1) the worker's error queue goes away, which can happen
752  * if we receive a Terminate message from the worker; or (2) no
753  * more messages can be read from the worker without blocking.
754  */
755  while (pcxt->worker[i].error_mqh != NULL)
756  {
757  shm_mq_result res;
758  Size nbytes;
759  void *data;
760 
761  res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
762  &data, true);
763  if (res == SHM_MQ_WOULD_BLOCK)
764  break;
765  else if (res == SHM_MQ_SUCCESS)
766  {
767  StringInfoData msg;
768 
769  initStringInfo(&msg);
770  appendBinaryStringInfo(&msg, data, nbytes);
771  HandleParallelMessage(pcxt, i, &msg);
772  pfree(msg.data);
773  }
774  else
775  ereport(ERROR,
776  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
777  errmsg("lost connection to parallel worker")));
778  }
779  }
780  }
781 
782  MemoryContextSwitchTo(oldcontext);
783 
784  /* Might as well clear the context on our way out */
785  MemoryContextReset(hpm_context);
786 
788 }
789 
790 /*
791  * Handle a single protocol message received from a single parallel worker.
792  */
793 static void
795 {
796  char msgtype;
797 
798  msgtype = pq_getmsgbyte(msg);
799 
800  switch (msgtype)
801  {
802  case 'K': /* BackendKeyData */
803  {
804  int32 pid = pq_getmsgint(msg, 4);
805 
806  (void) pq_getmsgint(msg, 4); /* discard cancel key */
807  (void) pq_getmsgend(msg);
808  pcxt->worker[i].pid = pid;
809  break;
810  }
811 
812  case 'E': /* ErrorResponse */
813  case 'N': /* NoticeResponse */
814  {
815  ErrorData edata;
816  ErrorContextCallback *save_error_context_stack;
817 
818  /* Parse ErrorResponse or NoticeResponse. */
819  pq_parse_errornotice(msg, &edata);
820 
821  /* Death of a worker isn't enough justification for suicide. */
822  edata.elevel = Min(edata.elevel, ERROR);
823 
824  /*
825  * If desired, add a context line to show that this is a
826  * message propagated from a parallel worker. Otherwise, it
827  * can sometimes be confusing to understand what actually
828  * happened. (We don't do this in FORCE_PARALLEL_REGRESS mode
829  * because it causes test-result instability depending on
830  * whether a parallel worker is actually used or not.)
831  */
833  {
834  if (edata.context)
835  edata.context = psprintf("%s\n%s", edata.context,
836  _("parallel worker"));
837  else
838  edata.context = pstrdup(_("parallel worker"));
839  }
840 
841  /*
842  * Context beyond that should use the error context callbacks
843  * that were in effect when the ParallelContext was created,
844  * not the current ones.
845  */
846  save_error_context_stack = error_context_stack;
848 
849  /* Rethrow error or print notice. */
850  ThrowErrorData(&edata);
851 
852  /* Not an error, so restore previous context stack. */
853  error_context_stack = save_error_context_stack;
854 
855  break;
856  }
857 
858  case 'A': /* NotifyResponse */
859  {
860  /* Propagate NotifyResponse. */
861  int32 pid;
862  const char *channel;
863  const char *payload;
864 
865  pid = pq_getmsgint(msg, 4);
866  channel = pq_getmsgrawstring(msg);
867  payload = pq_getmsgrawstring(msg);
868  pq_endmessage(msg);
869 
870  NotifyMyFrontEnd(channel, payload, pid);
871 
872  break;
873  }
874 
875  case 'X': /* Terminate, indicating clean exit */
876  {
877  pfree(pcxt->worker[i].error_mqh);
878  pcxt->worker[i].error_mqh = NULL;
879  break;
880  }
881 
882  default:
883  {
884  elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
885  msgtype, msg->len);
886  }
887  }
888 }
889 
890 /*
891  * End-of-subtransaction cleanup for parallel contexts.
892  *
893  * Currently, it's forbidden to enter or leave a subtransaction while
894  * parallel mode is in effect, so we could just blow away everything. But
895  * we may want to relax that restriction in the future, so this code
896  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
897  */
898 void
899 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
900 {
901  while (!dlist_is_empty(&pcxt_list))
902  {
903  ParallelContext *pcxt;
904 
905  pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
906  if (pcxt->subid != mySubId)
907  break;
908  if (isCommit)
909  elog(WARNING, "leaked parallel context");
911  }
912 }
913 
914 /*
915  * End-of-transaction cleanup for parallel contexts.
916  */
917 void
918 AtEOXact_Parallel(bool isCommit)
919 {
920  while (!dlist_is_empty(&pcxt_list))
921  {
922  ParallelContext *pcxt;
923 
924  pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
925  if (isCommit)
926  elog(WARNING, "leaked parallel context");
928  }
929 }
930 
931 /*
932  * Main entrypoint for parallel workers.
933  */
934 static void
936 {
937  dsm_segment *seg;
938  shm_toc *toc;
939  FixedParallelState *fps;
940  char *error_queue_space;
941  shm_mq *mq;
942  shm_mq_handle *mqh;
943  char *libraryspace;
944  char *gucspace;
945  char *combocidspace;
946  char *tsnapspace;
947  char *asnapspace;
948  char *tstatespace;
949  StringInfoData msgbuf;
950 
951  /* Set flag to indicate that we're initializing a parallel worker. */
953 
954  /* Establish signal handlers. */
955  pqsignal(SIGTERM, die);
957 
958  /* Determine and set our parallel worker number. */
960  memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
961 
962  /* Set up a memory context and resource owner. */
964  CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
966  "Parallel worker",
968 
969  /*
970  * Now that we have a resource owner, we can attach to the dynamic shared
971  * memory segment and read the table of contents.
972  */
973  seg = dsm_attach(DatumGetUInt32(main_arg));
974  if (seg == NULL)
975  ereport(ERROR,
976  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
977  errmsg("could not map dynamic shared memory segment")));
979  if (toc == NULL)
980  ereport(ERROR,
981  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
982  errmsg("invalid magic number in dynamic shared memory segment")));
983 
984  /* Look up fixed parallel state. */
986  Assert(fps != NULL);
987  MyFixedParallelState = fps;
988 
989  /*
990  * Now that we have a worker number, we can find and attach to the error
991  * queue provided for us. That's good, because until we do that, any
992  * errors that happen here will not be reported back to the process that
993  * requested that this worker be launched.
994  */
995  error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
996  mq = (shm_mq *) (error_queue_space +
999  mqh = shm_mq_attach(mq, seg, NULL);
1000  pq_redirect_to_shm_mq(seg, mqh);
1003 
1004  /*
1005  * Send a BackendKeyData message to the process that initiated parallelism
1006  * so that it has access to our PID before it receives any other messages
1007  * from us. Our cancel key is sent, too, since that's the way the
1008  * protocol message is defined, but it won't actually be used for anything
1009  * in this case.
1010  */
1011  pq_beginmessage(&msgbuf, 'K');
1012  pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
1013  pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
1014  pq_endmessage(&msgbuf);
1015 
1016  /*
1017  * Hooray! Primary initialization is complete. Now, we need to set up our
1018  * backend-local state to match the original backend.
1019  */
1020 
1021  /*
1022  * Join locking group. We must do this before anything that could try to
1023  * acquire a heavyweight lock, because any heavyweight locks acquired to
1024  * this point could block either directly against the parallel group
1025  * leader or against some process which in turn waits for a lock that
1026  * conflicts with the parallel group leader, causing an undetected
1027  * deadlock. (If we can't join the lock group, the leader has gone away,
1028  * so just exit quietly.)
1029  */
1031  fps->parallel_master_pid))
1032  return;
1033 
1034  /*
1035  * Load libraries that were loaded by original backend. We want to do
1036  * this before restoring GUCs, because the libraries might define custom
1037  * variables.
1038  */
1039  libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
1040  Assert(libraryspace != NULL);
1041  RestoreLibraryState(libraryspace);
1042 
1043  /* Restore database connection. */
1045  fps->authenticated_user_id);
1046 
1047  /*
1048  * Set the client encoding to the database encoding, since that is what
1049  * the leader will expect.
1050  */
1052 
1053  /* Restore GUC values from launching backend. */
1054  gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
1055  Assert(gucspace != NULL);
1057  RestoreGUCState(gucspace);
1059 
1060  /* Crank up a transaction state appropriate to a parallel worker. */
1061  tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
1062  StartParallelWorkerTransaction(tstatespace);
1063 
1064  /* Restore combo CID state. */
1065  combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
1066  Assert(combocidspace != NULL);
1067  RestoreComboCIDState(combocidspace);
1068 
1069  /* Restore transaction snapshot. */
1071  Assert(tsnapspace != NULL);
1073  fps->parallel_master_pgproc);
1074 
1075  /* Restore active snapshot. */
1076  asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
1077  Assert(asnapspace != NULL);
1078  PushActiveSnapshot(RestoreSnapshot(asnapspace));
1079 
1080  /*
1081  * We've changed which tuples we can see, and must therefore invalidate
1082  * system caches.
1083  */
1085 
1086  /* Restore user ID and security context. */
1088 
1089  /* Restore temp-namespace state to ensure search path matches leader's. */
1092 
1093  /* Set ParallelMasterBackendId so we know how to address temp relations. */
1095 
1096  /*
1097  * We've initialized all of our state now; nothing should change
1098  * hereafter.
1099  */
1102 
1103  /*
1104  * Time to do the real work: invoke the caller-supplied code.
1105  *
1106  * If you get a crash at this line, see the comments for
1107  * ParallelExtensionTrampoline.
1108  */
1109  fps->entrypoint(seg, toc);
1110 
1111  /* Must exit parallel mode to pop active snapshot. */
1112  ExitParallelMode();
1113 
1114  /* Must pop active snapshot so resowner.c doesn't complain. */
1116 
1117  /* Shut down the parallel-worker transaction. */
1119 
1120  /* Report success. */
1121  pq_putmessage('X', NULL, 0);
1122 }
1123 
1124 /*
1125  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1126  * function living in a dynamically loaded module, because the module might
1127  * not be loaded in every process, or might be loaded but not at the same
1128  * address. To work around that problem, CreateParallelContextForExtension()
1129  * arranges to call this function rather than calling the extension-provided
1130  * function directly; and this function then looks up the real entrypoint and
1131  * calls it.
1132  */
1133 static void
1135 {
1136  char *extensionstate;
1137  char *library_name;
1138  char *function_name;
1139  parallel_worker_main_type entrypt;
1140 
1141  extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1142  Assert(extensionstate != NULL);
1143  library_name = extensionstate;
1144  function_name = extensionstate + strlen(library_name) + 1;
1145 
1146  entrypt = (parallel_worker_main_type)
1147  load_external_function(library_name, function_name, true, NULL);
1148  entrypt(seg, toc);
1149 }
1150 
1151 /*
1152  * Update shared memory with the ending location of the last WAL record we
1153  * wrote, if it's greater than the value already stored there.
1154  */
1155 void
1157 {
1159 
1160  Assert(fps != NULL);
1161  SpinLockAcquire(&fps->mutex);
1162  if (fps->last_xlog_end < last_xlog_end)
1163  fps->last_xlog_end = last_xlog_end;
1164  SpinLockRelease(&fps->mutex);
1165 }
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:98
parallel_worker_main_type entrypoint
Definition: parallel.h:38
#define DatumGetUInt32(X)
Definition: postgres.h:492
int slock_t
Definition: s_lock.h:888
#define PARALLEL_ERROR_QUEUE_SIZE
Definition: parallel.c:47
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
int MyProcPid
Definition: globals.c:38
BackendId MyBackendId
Definition: globals.c:72
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2075
MemoryContext TopTransactionContext
Definition: mcxt.c:48
void SetUserIdAndSecContext(Oid userid, int sec_context)
Definition: miscinit.c:395
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
XLogRecPtr XactLastRecEnd
Definition: xlog.c:337
PGPROC * MyProc
Definition: proc.c:67
dsm_segment * seg
Definition: parallel.h:43
static void WaitForParallelWorkersToExit(ParallelContext *pcxt)
Definition: parallel.c:569
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
char * pstrdup(const char *in)
Definition: mcxt.c:1077
void CommitTransactionCommand(void)
Definition: xact.c:2747
shm_toc_estimator estimator
Definition: parallel.h:42
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
void EndParallelWorkerTransaction(void)
Definition: xact.c:4940
#define SpinLockInit(lock)
Definition: spin.h:60
void GetTempNamespaceState(Oid *tempNamespaceId, Oid *tempToastNamespaceId)
Definition: namespace.c:3124
void RestoreTransactionSnapshot(Snapshot snapshot, void *master_pgproc)
Definition: snapmgr.c:2139
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:549
#define Min(x, y)
Definition: c.h:806
Oid authenticated_user_id
Definition: parallel.c:71
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Snapshot GetActiveSnapshot(void)
Definition: snapmgr.c:834
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1016
int bgw_restart_time
Definition: bgworker.h:93
int errcode(int sqlerrcode)
Definition: elog.c:575
PGPROC * parallel_master_pgproc
Definition: parallel.c:76
Oid temp_toast_namespace_id
Definition: parallel.c:74
#define BGWORKER_CLASS_PARALLEL
Definition: bgworker.h:67
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:189
int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:135
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1845
void PopActiveSnapshot(void)
Definition: snapmgr.c:807
uint32 SubTransactionId
Definition: c.h:401
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:241
void ResetLatch(volatile Latch *latch)
Definition: latch.c:461
void SerializeTransactionState(Size maxsize, char *start_address)
Definition: xact.c:4849
const char * pq_getmsgrawstring(StringInfo msg)
Definition: pqformat.c:650
unsigned int Oid
Definition: postgres_ext.h:31
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:49
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:52
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:300
void InvalidateSystemCaches(void)
Definition: inval.c:634
parallel_worker_main_type entrypoint
Definition: parallel.c:81
void pq_beginmessage(StringInfo buf, char msgtype)
Definition: pqformat.c:88
Latch procLatch
Definition: proc.h:103
void RestoreComboCIDState(char *comboCIDstate)
Definition: combocid.c:344
signed int int32
Definition: c.h:256
SubTransactionId subid
Definition: parallel.h:35
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:116
ErrorContextCallback * error_context_stack
Definition: elog.c:88
volatile bool ParallelMessagePending
Definition: parallel.c:99
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:757
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:519
#define SpinLockAcquire(lock)
Definition: spin.h:62
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:609
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
ParallelWorkerInfo * worker
Definition: parallel.h:46
static void ParallelWorkerMain(Datum main_arg)
Definition: parallel.c:935
Datum bgw_main_arg
Definition: bgworker.h:97
int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:300
void pfree(void *pointer)
Definition: mcxt.c:950
bool IsInParallelMode(void)
Definition: xact.c:913
void SerializeLibraryState(Size maxsize, char *start_address)
Definition: dfmgr.c:723
#define ERROR
Definition: elog.h:43
BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1095
Oid GetAuthenticatedUserId(void)
Definition: miscinit.c:342
#define PARALLEL_KEY_TRANSACTION_SNAPSHOT
Definition: parallel.c:61
char * function_name
Definition: parallel.h:40
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:2016
int32 MyCancelKey
Definition: globals.c:41
void pq_parse_errornotice(StringInfo msg, ErrorData *edata)
Definition: pqmq.c:219
#define FATAL
Definition: elog.h:52
bgworker_main_type bgw_main
Definition: bgworker.h:94
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:167
void ExitParallelMode(void)
Definition: xact.c:893
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
#define PARALLEL_KEY_FIXED
Definition: parallel.c:56
ParallelContext * CreateParallelContextForExternalFunction(char *library_name, char *function_name, int nworkers)
Definition: parallel.c:173
void HandleParallelMessages(void)
Definition: parallel.c:705
#define PARALLEL_KEY_ERROR_QUEUE
Definition: parallel.c:57
void * shm_toc_lookup(shm_toc *toc, uint64 key)
Definition: shm_toc.c:218
void SetTempNamespaceState(Oid tempNamespaceId, Oid tempToastNamespaceId)
Definition: namespace.c:3140
static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
Definition: parallel.c:794
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
void PushActiveSnapshot(Snapshot snap)
Definition: snapmgr.c:728
void GetUserIdAndSecContext(Oid *userid, int *sec_context)
Definition: miscinit.c:388
shm_mq_handle * error_mqh
Definition: parallel.h:28
#define PARALLEL_KEY_GUC
Definition: parallel.c:59
int SetClientEncoding(int encoding)
Definition: mbutils.c:212
int ParallelWorkerNumber
Definition: parallel.c:96
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
int dynamic_shared_memory_type
Definition: dsm_impl.c:112
Size EstimateGUCStateSpace(void)
Definition: guc.c:9066
#define BGW_NEVER_RESTART
Definition: bgworker.h:84
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:47
Size EstimateComboCIDStateSpace(void)
Definition: combocid.c:299
#define UInt32GetDatum(X)
Definition: postgres.h:499
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid)
Definition: postmaster.c:5475
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
int nworkers_launched
Definition: parallel.h:37
XLogRecPtr last_xlog_end
Definition: parallel.c:87
BgwHandleStatus
Definition: bgworker.h:102
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:433
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:215
void BecomeLockGroupLeader(void)
Definition: proc.c:1815
#define ereport(elevel, rest)
Definition: elog.h:122
MemoryContext TopMemoryContext
Definition: mcxt.c:43
void ThrowErrorData(ErrorData *edata)
Definition: elog.c:1612
void initStringInfo(StringInfo str)
Definition: stringinfo.c:65
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:248
#define WARNING
Definition: elog.h:40
PGFunction load_external_function(char *filename, char *funcname, bool signalNotFound, void **filehandle)
Definition: dfmgr.c:94
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:200
int elevel
Definition: elog.h:331
bool ParallelContextActive(void)
Definition: parallel.c:681
#define SpinLockRelease(lock)
Definition: spin.h:64
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
Size EstimateSnapshotSpace(Snapshot snap)
Definition: snapmgr.c:1992
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
void * palloc0(Size size)
Definition: mcxt.c:878
void RestoreLibraryState(char *start_address)
Definition: dfmgr.c:745
uintptr_t Datum
Definition: postgres.h:372
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:458
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:59
int GetDatabaseEncoding(void)
Definition: mbutils.c:1015
int BackendId
Definition: backendid.h:21
Oid MyDatabaseId
Definition: globals.c:76
pid_t parallel_master_pid
Definition: parallel.c:77
Size EstimateLibraryStateSpace(void)
Definition: dfmgr.c:706
void ReinitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:396
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:311
dlist_node * cur
Definition: ilist.h:161
#define PARALLEL_MAGIC
Definition: parallel.c:50
void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
Definition: pqmq.c:56
void ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
Definition: parallel.c:1156
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1134
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:168
volatile bool InterruptPending
Definition: globals.c:29
int pq_getmsgbyte(StringInfo msg)
Definition: pqformat.c:432
shm_mq_result
Definition: shm_mq.h:36
char * library_name
Definition: parallel.h:39
void SetLatch(volatile Latch *latch)
Definition: latch.c:379
BackendId parallel_master_backend_id
Definition: parallel.c:78
#define NULL
Definition: c.h:229
int force_parallel_mode
Definition: planner.c:63
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:988
uint64 XLogRecPtr
Definition: xlogdefs.h:21
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:90
#define Assert(condition)
Definition: c.h:675
BackendId ParallelMasterBackendId
Definition: globals.c:74
void StartParallelWorkerTransaction(char *tstatespace)
Definition: xact.c:4917
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:59
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:649
#define PARALLEL_KEY_EXTENSION_TRAMPOLINE
Definition: parallel.c:64
Size EstimateTransactionStateSpace(void)
Definition: xact.c:4816
void StartTransactionCommand(void)
Definition: xact.c:2677
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
#define DSM_IMPL_NONE
Definition: dsm_impl.h:17
#define BGW_MAXLEN
Definition: bgworker.h:85
size_t Size
Definition: c.h:356
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:92
dlist_node node
Definition: parallel.h:34
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:52
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:899
void EnterParallelMode(void)
Definition: xact.c:880
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:83
char * context
Definition: elog.h:347
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:284
ErrorContextCallback * error_context_stack
Definition: parallel.h:41
void pq_set_parallel_master(pid_t pid, BackendId backend_id)
Definition: pqmq.c:83
#define PARALLEL_KEY_TRANSACTION_STATE
Definition: parallel.c:63
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:714
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:161
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define IsolationIsSerializable()
Definition: xact.h:44
void pq_sendint(StringInfo buf, int i, int b)
Definition: pqformat.c:236
void pq_endmessage(StringInfo buf)
Definition: pqformat.c:344
pid_t bgw_notify_pid
Definition: bgworker.h:99
static FixedParallelState * MyFixedParallelState
Definition: parallel.c:105
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:707
void die(SIGNAL_ARGS)
Definition: postgres.c:2619
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:114
bool InitializingParallelWorker
Definition: parallel.c:102
int i
Definition: shm_mq.c:69
#define BUFFERALIGN(LEN)
Definition: c.h:590
struct Latch * MyLatch
Definition: globals.c:51
void HandleParallelMessageInterrupt(void)
Definition: parallel.c:694
unsigned int pq_getmsgint(StringInfo msg, int b)
Definition: pqformat.c:448
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:196
ParallelContext * CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
Definition: parallel.c:123
static dlist_head pcxt_list
Definition: parallel.c:108
#define elog
Definition: elog.h:219
void pq_getmsgend(StringInfo msg)
Definition: pqformat.c:677
#define pq_putmessage(msgtype, s, len)
Definition: libpq.h:42
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
struct FixedParallelState FixedParallelState
shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
Definition: shm_mq.c:518
void AtEOXact_Parallel(bool isCommit)
Definition: parallel.c:918
Definition: proc.h:94
#define BGW_EXTRALEN
Definition: bgworker.h:86
#define PARALLEL_KEY_COMBO_CID
Definition: parallel.c:60
#define WL_LATCH_SET
Definition: latch.h:124
#define _(x)
Definition: elog.c:84
void AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
Definition: parallel.c:899
void SerializeGUCState(Size maxsize, char *start_address)
Definition: guc.c:9213
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:240
#define PARALLEL_KEY_ACTIVE_SNAPSHOT
Definition: parallel.c:62
void SerializeComboCIDState(Size maxsize, char *start_address)
Definition: combocid.c:318
#define PARALLEL_KEY_LIBRARY
Definition: parallel.c:58
void RestoreGUCState(void *gucstate)
Definition: guc.c:9291
shm_toc * toc
Definition: parallel.h:45
void NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
Definition: async.c:2077
void * private_memory
Definition: parallel.h:44
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:416
void(* parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc)
Definition: parallel.h:23
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5504
static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
Definition: parallel.c:1134