PostgreSQL Source Code  git master
parallel.h File Reference
#include "access/xlogdefs.h"
#include "lib/ilist.h"
#include "postmaster/bgworker.h"
#include "storage/shm_mq.h"
#include "storage/shm_toc.h"
Include dependency graph for parallel.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  ParallelWorkerInfo
 
struct  ParallelContext
 
struct  ParallelWorkerContext
 

Macros

#define IsParallelWorker()   (ParallelWorkerNumber >= 0)
 

Typedefs

typedef void(* parallel_worker_main_type) (dsm_segment *seg, shm_toc *toc)
 
typedef struct ParallelWorkerInfo ParallelWorkerInfo
 
typedef struct ParallelContext ParallelContext
 
typedef struct ParallelWorkerContext ParallelWorkerContext
 

Functions

ParallelContextCreateParallelContext (const char *library_name, const char *function_name, int nworkers)
 
void InitializeParallelDSM (ParallelContext *pcxt)
 
void ReinitializeParallelDSM (ParallelContext *pcxt)
 
void ReinitializeParallelWorkers (ParallelContext *pcxt, int nworkers_to_launch)
 
void LaunchParallelWorkers (ParallelContext *pcxt)
 
void WaitForParallelWorkersToAttach (ParallelContext *pcxt)
 
void WaitForParallelWorkersToFinish (ParallelContext *pcxt)
 
void DestroyParallelContext (ParallelContext *pcxt)
 
bool ParallelContextActive (void)
 
void HandleParallelMessageInterrupt (void)
 
void HandleParallelMessages (void)
 
void AtEOXact_Parallel (bool isCommit)
 
void AtEOSubXact_Parallel (bool isCommit, SubTransactionId mySubId)
 
void ParallelWorkerReportLastRecEnd (XLogRecPtr last_xlog_end)
 
void ParallelWorkerMain (Datum main_arg)
 

Variables

PGDLLIMPORT volatile sig_atomic_t ParallelMessagePending
 
PGDLLIMPORT int ParallelWorkerNumber
 
PGDLLIMPORT bool InitializingParallelWorker
 

Macro Definition Documentation

◆ IsParallelWorker

#define IsParallelWorker ( )    (ParallelWorkerNumber >= 0)

Definition at line 60 of file parallel.h.

Typedef Documentation

◆ parallel_worker_main_type

typedef void(* parallel_worker_main_type) (dsm_segment *seg, shm_toc *toc)

Definition at line 23 of file parallel.h.

◆ ParallelContext

◆ ParallelWorkerContext

◆ ParallelWorkerInfo

Function Documentation

◆ AtEOSubXact_Parallel()

void AtEOSubXact_Parallel ( bool  isCommit,
SubTransactionId  mySubId 
)

Definition at line 1250 of file parallel.c.

1251 {
1252  while (!dlist_is_empty(&pcxt_list))
1253  {
1254  ParallelContext *pcxt;
1255 
1257  if (pcxt->subid != mySubId)
1258  break;
1259  if (isCommit)
1260  elog(WARNING, "leaked parallel context");
1261  DestroyParallelContext(pcxt);
1262  }
1263 }
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:946
static dlist_head pcxt_list
Definition: parallel.c:126
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:225
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
SubTransactionId subid
Definition: parallel.h:34

References DestroyParallelContext(), dlist_head_element, dlist_is_empty(), elog, pcxt_list, ParallelContext::subid, and WARNING.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_Parallel()

void AtEOXact_Parallel ( bool  isCommit)

Definition at line 1271 of file parallel.c.

1272 {
1273  while (!dlist_is_empty(&pcxt_list))
1274  {
1275  ParallelContext *pcxt;
1276 
1278  if (isCommit)
1279  elog(WARNING, "leaked parallel context");
1280  DestroyParallelContext(pcxt);
1281  }
1282 }

References DestroyParallelContext(), dlist_head_element, dlist_is_empty(), elog, pcxt_list, and WARNING.

Referenced by AbortTransaction(), and CommitTransaction().

◆ CreateParallelContext()

ParallelContext* CreateParallelContext ( const char *  library_name,
const char *  function_name,
int  nworkers 
)

Definition at line 169 of file parallel.c.

171 {
172  MemoryContext oldcontext;
173  ParallelContext *pcxt;
174 
175  /* It is unsafe to create a parallel context if not in parallel mode. */
177 
178  /* Number of workers should be non-negative. */
179  Assert(nworkers >= 0);
180 
181  /* We might be running in a short-lived memory context. */
183 
184  /* Initialize a new ParallelContext. */
185  pcxt = palloc0(sizeof(ParallelContext));
187  pcxt->nworkers = nworkers;
188  pcxt->nworkers_to_launch = nworkers;
189  pcxt->library_name = pstrdup(library_name);
190  pcxt->function_name = pstrdup(function_name);
193  dlist_push_head(&pcxt_list, &pcxt->node);
194 
195  /* Restore previous memory context. */
196  MemoryContextSwitchTo(oldcontext);
197 
198  return pcxt;
199 }
#define Assert(condition)
Definition: c.h:837
ErrorContextCallback * error_context_stack
Definition: elog.c:94
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
MemoryContext TopTransactionContext
Definition: mcxt.c:154
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContextSwitchTo(old_ctx)
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
char * library_name
Definition: parallel.h:38
ErrorContextCallback * error_context_stack
Definition: parallel.h:40
shm_toc_estimator estimator
Definition: parallel.h:41
dlist_node node
Definition: parallel.h:33
int nworkers_to_launch
Definition: parallel.h:36
char * function_name
Definition: parallel.h:39
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:790
bool IsInParallelMode(void)
Definition: xact.c:1088

References Assert, dlist_push_head(), error_context_stack, ParallelContext::error_context_stack, ParallelContext::estimator, ParallelContext::function_name, GetCurrentSubTransactionId(), IsInParallelMode(), ParallelContext::library_name, MemoryContextSwitchTo(), ParallelContext::node, ParallelContext::nworkers, ParallelContext::nworkers_to_launch, palloc0(), pcxt_list, pstrdup(), shm_toc_initialize_estimator, ParallelContext::subid, and TopTransactionContext.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), ExecInitParallelPlan(), and parallel_vacuum_init().

◆ DestroyParallelContext()

void DestroyParallelContext ( ParallelContext pcxt)

Definition at line 946 of file parallel.c.

947 {
948  int i;
949 
950  /*
951  * Be careful about order of operations here! We remove the parallel
952  * context from the list before we do anything else; otherwise, if an
953  * error occurs during a subsequent step, we might try to nuke it again
954  * from AtEOXact_Parallel or AtEOSubXact_Parallel.
955  */
956  dlist_delete(&pcxt->node);
957 
958  /* Kill each worker in turn, and forget their error queues. */
959  if (pcxt->worker != NULL)
960  {
961  for (i = 0; i < pcxt->nworkers_launched; ++i)
962  {
963  if (pcxt->worker[i].error_mqh != NULL)
964  {
966 
968  pcxt->worker[i].error_mqh = NULL;
969  }
970  }
971  }
972 
973  /*
974  * If we have allocated a shared memory segment, detach it. This will
975  * implicitly detach the error queues, and any other shared memory queues,
976  * stored there.
977  */
978  if (pcxt->seg != NULL)
979  {
980  dsm_detach(pcxt->seg);
981  pcxt->seg = NULL;
982  }
983 
984  /*
985  * If this parallel context is actually in backend-private memory rather
986  * than shared memory, free that memory instead.
987  */
988  if (pcxt->private_memory != NULL)
989  {
990  pfree(pcxt->private_memory);
991  pcxt->private_memory = NULL;
992  }
993 
994  /*
995  * We can't finish transaction commit or abort until all of the workers
996  * have exited. This means, in particular, that we can't respond to
997  * interrupts at this stage.
998  */
999  HOLD_INTERRUPTS();
1002 
1003  /* Free the worker array itself. */
1004  if (pcxt->worker != NULL)
1005  {
1006  pfree(pcxt->worker);
1007  pcxt->worker = NULL;
1008  }
1009 
1010  /* Free memory. */
1011  pfree(pcxt->library_name);
1012  pfree(pcxt->function_name);
1013  pfree(pcxt);
1014 }
static void WaitForParallelWorkersToExit(ParallelContext *pcxt)
Definition: parallel.c:906
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1296
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:803
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
int i
Definition: isn.c:72
void pfree(void *pointer)
Definition: mcxt.c:1521
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:843
dsm_segment * seg
Definition: parallel.h:42
ParallelWorkerInfo * worker
Definition: parallel.h:45
void * private_memory
Definition: parallel.h:43
int nworkers_launched
Definition: parallel.h:37
BackgroundWorkerHandle * bgwhandle
Definition: parallel.h:27
shm_mq_handle * error_mqh
Definition: parallel.h:28

References ParallelWorkerInfo::bgwhandle, dlist_delete(), dsm_detach(), ParallelWorkerInfo::error_mqh, ParallelContext::function_name, HOLD_INTERRUPTS, i, ParallelContext::library_name, ParallelContext::node, ParallelContext::nworkers_launched, pfree(), ParallelContext::private_memory, RESUME_INTERRUPTS, ParallelContext::seg, shm_mq_detach(), TerminateBackgroundWorker(), WaitForParallelWorkersToExit(), and ParallelContext::worker.

Referenced by _brin_begin_parallel(), _brin_end_parallel(), _bt_begin_parallel(), _bt_end_parallel(), AtEOSubXact_Parallel(), AtEOXact_Parallel(), ExecParallelCleanup(), and parallel_vacuum_end().

◆ HandleParallelMessageInterrupt()

void HandleParallelMessageInterrupt ( void  )

Definition at line 1033 of file parallel.c.

1034 {
1035  InterruptPending = true;
1036  ParallelMessagePending = true;
1037  SetLatch(MyLatch);
1038 }
volatile sig_atomic_t ParallelMessagePending
Definition: parallel.c:117
volatile sig_atomic_t InterruptPending
Definition: globals.c:31
struct Latch * MyLatch
Definition: globals.c:62
void SetLatch(Latch *latch)
Definition: latch.c:632

References InterruptPending, MyLatch, ParallelMessagePending, and SetLatch().

Referenced by procsignal_sigusr1_handler().

◆ HandleParallelMessages()

void HandleParallelMessages ( void  )

Definition at line 1044 of file parallel.c.

1045 {
1046  dlist_iter iter;
1047  MemoryContext oldcontext;
1048 
1049  static MemoryContext hpm_context = NULL;
1050 
1051  /*
1052  * This is invoked from ProcessInterrupts(), and since some of the
1053  * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
1054  * for recursive calls if more signals are received while this runs. It's
1055  * unclear that recursive entry would be safe, and it doesn't seem useful
1056  * even if it is safe, so let's block interrupts until done.
1057  */
1058  HOLD_INTERRUPTS();
1059 
1060  /*
1061  * Moreover, CurrentMemoryContext might be pointing almost anywhere. We
1062  * don't want to risk leaking data into long-lived contexts, so let's do
1063  * our work here in a private context that we can reset on each use.
1064  */
1065  if (hpm_context == NULL) /* first time through? */
1067  "HandleParallelMessages",
1069  else
1070  MemoryContextReset(hpm_context);
1071 
1072  oldcontext = MemoryContextSwitchTo(hpm_context);
1073 
1074  /* OK to process messages. Reset the flag saying there are more to do. */
1075  ParallelMessagePending = false;
1076 
1077  dlist_foreach(iter, &pcxt_list)
1078  {
1079  ParallelContext *pcxt;
1080  int i;
1081 
1082  pcxt = dlist_container(ParallelContext, node, iter.cur);
1083  if (pcxt->worker == NULL)
1084  continue;
1085 
1086  for (i = 0; i < pcxt->nworkers_launched; ++i)
1087  {
1088  /*
1089  * Read as many messages as we can from each worker, but stop when
1090  * either (1) the worker's error queue goes away, which can happen
1091  * if we receive a Terminate message from the worker; or (2) no
1092  * more messages can be read from the worker without blocking.
1093  */
1094  while (pcxt->worker[i].error_mqh != NULL)
1095  {
1097  Size nbytes;
1098  void *data;
1099 
1100  res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
1101  &data, true);
1102  if (res == SHM_MQ_WOULD_BLOCK)
1103  break;
1104  else if (res == SHM_MQ_SUCCESS)
1105  {
1106  StringInfoData msg;
1107 
1108  initStringInfo(&msg);
1109  appendBinaryStringInfo(&msg, data, nbytes);
1110  HandleParallelMessage(pcxt, i, &msg);
1111  pfree(msg.data);
1112  }
1113  else
1114  ereport(ERROR,
1115  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1116  errmsg("lost connection to parallel worker")));
1117  }
1118  }
1119  }
1120 
1121  MemoryContextSwitchTo(oldcontext);
1122 
1123  /* Might as well clear the context on our way out */
1124  MemoryContextReset(hpm_context);
1125 
1127 }
static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
Definition: parallel.c:1133
size_t Size
Definition: c.h:584
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:383
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
const void * data
shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
Definition: shm_mq.c:572
shm_mq_result
Definition: shm_mq.h:37
@ SHM_MQ_SUCCESS
Definition: shm_mq.h:38
@ SHM_MQ_WOULD_BLOCK
Definition: shm_mq.h:39
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:230
void initStringInfo(StringInfo str)
Definition: stringinfo.c:56
dlist_node * cur
Definition: ilist.h:179

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, appendBinaryStringInfo(), dlist_iter::cur, StringInfoData::data, data, dlist_container, dlist_foreach, ereport, errcode(), errmsg(), ERROR, ParallelWorkerInfo::error_mqh, HandleParallelMessage(), HOLD_INTERRUPTS, i, initStringInfo(), MemoryContextReset(), MemoryContextSwitchTo(), ParallelContext::nworkers_launched, ParallelMessagePending, pcxt_list, pfree(), res, RESUME_INTERRUPTS, shm_mq_receive(), SHM_MQ_SUCCESS, SHM_MQ_WOULD_BLOCK, TopMemoryContext, and ParallelContext::worker.

Referenced by ProcessInterrupts().

◆ InitializeParallelDSM()

void InitializeParallelDSM ( ParallelContext pcxt)

Definition at line 207 of file parallel.c.

208 {
209  MemoryContext oldcontext;
210  Size library_len = 0;
211  Size guc_len = 0;
212  Size combocidlen = 0;
213  Size tsnaplen = 0;
214  Size asnaplen = 0;
215  Size tstatelen = 0;
216  Size pendingsyncslen = 0;
217  Size reindexlen = 0;
218  Size relmapperlen = 0;
219  Size uncommittedenumslen = 0;
220  Size clientconninfolen = 0;
221  Size segsize = 0;
222  int i;
223  FixedParallelState *fps;
224  dsm_handle session_dsm_handle = DSM_HANDLE_INVALID;
225  Snapshot transaction_snapshot = GetTransactionSnapshot();
226  Snapshot active_snapshot = GetActiveSnapshot();
227 
228  /* We might be running in a very short-lived memory context. */
230 
231  /* Allow space to store the fixed-size parallel state. */
233  shm_toc_estimate_keys(&pcxt->estimator, 1);
234 
235  /*
236  * If we manage to reach here while non-interruptible, it's unsafe to
237  * launch any workers: we would fail to process interrupts sent by them.
238  * We can deal with that edge case by pretending no workers were
239  * requested.
240  */
242  pcxt->nworkers = 0;
243 
244  /*
245  * Normally, the user will have requested at least one worker process, but
246  * if by chance they have not, we can skip a bunch of things here.
247  */
248  if (pcxt->nworkers > 0)
249  {
250  /* Get (or create) the per-session DSM segment's handle. */
251  session_dsm_handle = GetSessionDsmHandle();
252 
253  /*
254  * If we weren't able to create a per-session DSM segment, then we can
255  * continue but we can't safely launch any workers because their
256  * record typmods would be incompatible so they couldn't exchange
257  * tuples.
258  */
259  if (session_dsm_handle == DSM_HANDLE_INVALID)
260  pcxt->nworkers = 0;
261  }
262 
263  if (pcxt->nworkers > 0)
264  {
265  /* Estimate space for various kinds of state sharing. */
266  library_len = EstimateLibraryStateSpace();
267  shm_toc_estimate_chunk(&pcxt->estimator, library_len);
268  guc_len = EstimateGUCStateSpace();
269  shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
270  combocidlen = EstimateComboCIDStateSpace();
271  shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
273  {
274  tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
275  shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
276  }
277  asnaplen = EstimateSnapshotSpace(active_snapshot);
278  shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
279  tstatelen = EstimateTransactionStateSpace();
280  shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
282  pendingsyncslen = EstimatePendingSyncsSpace();
283  shm_toc_estimate_chunk(&pcxt->estimator, pendingsyncslen);
284  reindexlen = EstimateReindexStateSpace();
285  shm_toc_estimate_chunk(&pcxt->estimator, reindexlen);
286  relmapperlen = EstimateRelationMapSpace();
287  shm_toc_estimate_chunk(&pcxt->estimator, relmapperlen);
288  uncommittedenumslen = EstimateUncommittedEnumsSpace();
289  shm_toc_estimate_chunk(&pcxt->estimator, uncommittedenumslen);
290  clientconninfolen = EstimateClientConnectionInfoSpace();
291  shm_toc_estimate_chunk(&pcxt->estimator, clientconninfolen);
292  /* If you add more chunks here, you probably need to add keys. */
293  shm_toc_estimate_keys(&pcxt->estimator, 12);
294 
295  /* Estimate space need for error queues. */
298  "parallel error queue size not buffer-aligned");
301  pcxt->nworkers));
302  shm_toc_estimate_keys(&pcxt->estimator, 1);
303 
304  /* Estimate how much we'll need for the entrypoint info. */
305  shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name) +
306  strlen(pcxt->function_name) + 2);
307  shm_toc_estimate_keys(&pcxt->estimator, 1);
308  }
309 
310  /*
311  * Create DSM and initialize with new table of contents. But if the user
312  * didn't request any workers, then don't bother creating a dynamic shared
313  * memory segment; instead, just use backend-private memory.
314  *
315  * Also, if we can't create a dynamic shared memory segment because the
316  * maximum number of segments have already been created, then fall back to
317  * backend-private memory, and plan not to use any workers. We hope this
318  * won't happen very often, but it's better to abandon the use of
319  * parallelism than to fail outright.
320  */
321  segsize = shm_toc_estimate(&pcxt->estimator);
322  if (pcxt->nworkers > 0)
324  if (pcxt->seg != NULL)
326  dsm_segment_address(pcxt->seg),
327  segsize);
328  else
329  {
330  pcxt->nworkers = 0;
333  segsize);
334  }
335 
336  /* Initialize fixed-size state in shared memory. */
337  fps = (FixedParallelState *)
338  shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
339  fps->database_id = MyDatabaseId;
354  SpinLockInit(&fps->mutex);
355  fps->last_xlog_end = 0;
357 
358  /* We can skip the rest of this if we're not budgeting for any workers. */
359  if (pcxt->nworkers > 0)
360  {
361  char *libraryspace;
362  char *gucspace;
363  char *combocidspace;
364  char *tsnapspace;
365  char *asnapspace;
366  char *tstatespace;
367  char *pendingsyncsspace;
368  char *reindexspace;
369  char *relmapperspace;
370  char *error_queue_space;
371  char *session_dsm_handle_space;
372  char *entrypointstate;
373  char *uncommittedenumsspace;
374  char *clientconninfospace;
375  Size lnamelen;
376 
377  /* Serialize shared libraries we have loaded. */
378  libraryspace = shm_toc_allocate(pcxt->toc, library_len);
379  SerializeLibraryState(library_len, libraryspace);
380  shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
381 
382  /* Serialize GUC settings. */
383  gucspace = shm_toc_allocate(pcxt->toc, guc_len);
384  SerializeGUCState(guc_len, gucspace);
385  shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
386 
387  /* Serialize combo CID state. */
388  combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
389  SerializeComboCIDState(combocidlen, combocidspace);
390  shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
391 
392  /*
393  * Serialize the transaction snapshot if the transaction isolation
394  * level uses a transaction snapshot.
395  */
397  {
398  tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
399  SerializeSnapshot(transaction_snapshot, tsnapspace);
401  tsnapspace);
402  }
403 
404  /* Serialize the active snapshot. */
405  asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
406  SerializeSnapshot(active_snapshot, asnapspace);
407  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
408 
409  /* Provide the handle for per-session segment. */
410  session_dsm_handle_space = shm_toc_allocate(pcxt->toc,
411  sizeof(dsm_handle));
412  *(dsm_handle *) session_dsm_handle_space = session_dsm_handle;
414  session_dsm_handle_space);
415 
416  /* Serialize transaction state. */
417  tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
418  SerializeTransactionState(tstatelen, tstatespace);
420 
421  /* Serialize pending syncs. */
422  pendingsyncsspace = shm_toc_allocate(pcxt->toc, pendingsyncslen);
423  SerializePendingSyncs(pendingsyncslen, pendingsyncsspace);
425  pendingsyncsspace);
426 
427  /* Serialize reindex state. */
428  reindexspace = shm_toc_allocate(pcxt->toc, reindexlen);
429  SerializeReindexState(reindexlen, reindexspace);
430  shm_toc_insert(pcxt->toc, PARALLEL_KEY_REINDEX_STATE, reindexspace);
431 
432  /* Serialize relmapper state. */
433  relmapperspace = shm_toc_allocate(pcxt->toc, relmapperlen);
434  SerializeRelationMap(relmapperlen, relmapperspace);
436  relmapperspace);
437 
438  /* Serialize uncommitted enum state. */
439  uncommittedenumsspace = shm_toc_allocate(pcxt->toc,
440  uncommittedenumslen);
441  SerializeUncommittedEnums(uncommittedenumsspace, uncommittedenumslen);
443  uncommittedenumsspace);
444 
445  /* Serialize our ClientConnectionInfo. */
446  clientconninfospace = shm_toc_allocate(pcxt->toc, clientconninfolen);
447  SerializeClientConnectionInfo(clientconninfolen, clientconninfospace);
449  clientconninfospace);
450 
451  /* Allocate space for worker information. */
452  pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
453 
454  /*
455  * Establish error queues in dynamic shared memory.
456  *
457  * These queues should be used only for transmitting ErrorResponse,
458  * NoticeResponse, and NotifyResponse protocol messages. Tuple data
459  * should be transmitted via separate (possibly larger?) queues.
460  */
461  error_queue_space =
462  shm_toc_allocate(pcxt->toc,
464  pcxt->nworkers));
465  for (i = 0; i < pcxt->nworkers; ++i)
466  {
467  char *start;
468  shm_mq *mq;
469 
470  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
473  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
474  }
475  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
476 
477  /*
478  * Serialize entrypoint information. It's unsafe to pass function
479  * pointers across processes, as the function pointer may be different
480  * in each process in EXEC_BACKEND builds, so we always pass library
481  * and function name. (We use library name "postgres" for functions
482  * in the core backend.)
483  */
484  lnamelen = strlen(pcxt->library_name);
485  entrypointstate = shm_toc_allocate(pcxt->toc, lnamelen +
486  strlen(pcxt->function_name) + 2);
487  strcpy(entrypointstate, pcxt->library_name);
488  strcpy(entrypointstate + lnamelen + 1, pcxt->function_name);
489  shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
490  }
491 
492  /* Update nworkers_to_launch, in case we changed nworkers above. */
493  pcxt->nworkers_to_launch = pcxt->nworkers;
494 
495  /* Restore previous memory context. */
496  MemoryContextSwitchTo(oldcontext);
497 }
#define PARALLEL_KEY_TRANSACTION_STATE
Definition: parallel.c:71
#define PARALLEL_KEY_GUC
Definition: parallel.c:67
#define PARALLEL_KEY_UNCOMMITTEDENUMS
Definition: parallel.c:77
#define PARALLEL_KEY_TRANSACTION_SNAPSHOT
Definition: parallel.c:69
#define PARALLEL_KEY_CLIENTCONNINFO
Definition: parallel.c:78
#define PARALLEL_KEY_PENDING_SYNCS
Definition: parallel.c:74
#define PARALLEL_KEY_ACTIVE_SNAPSHOT
Definition: parallel.c:70
#define PARALLEL_KEY_ERROR_QUEUE
Definition: parallel.c:65
#define PARALLEL_KEY_SESSION_DSM
Definition: parallel.c:73
#define PARALLEL_MAGIC
Definition: parallel.c:57
#define PARALLEL_KEY_REINDEX_STATE
Definition: parallel.c:75
#define PARALLEL_KEY_LIBRARY
Definition: parallel.c:66
#define PARALLEL_KEY_FIXED
Definition: parallel.c:64
#define PARALLEL_KEY_ENTRYPOINT
Definition: parallel.c:72
#define PARALLEL_KEY_COMBO_CID
Definition: parallel.c:68
#define PARALLEL_ERROR_QUEUE_SIZE
Definition: parallel.c:54
#define PARALLEL_KEY_RELMAPPER_STATE
Definition: parallel.c:76
#define BUFFERALIGN(LEN)
Definition: c.h:792
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:917
void SerializeComboCIDState(Size maxsize, char *start_address)
Definition: combocid.c:316
Size EstimateComboCIDStateSpace(void)
Definition: combocid.c:297
void SerializeLibraryState(Size maxsize, char *start_address)
Definition: dfmgr.c:648
Size EstimateLibraryStateSpace(void)
Definition: dfmgr.c:631
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1095
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:516
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
uint32 dsm_handle
Definition: dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
int MyProcPid
Definition: globals.c:46
ProcNumber MyProcNumber
Definition: globals.c:89
Oid MyDatabaseId
Definition: globals.c:93
void SerializeGUCState(Size maxsize, char *start_address)
Definition: guc.c:6101
Size EstimateGUCStateSpace(void)
Definition: guc.c:5948
bool current_role_is_superuser
Definition: guc_tables.c:519
return str start
void SerializeReindexState(Size maxsize, char *start_address)
Definition: index.c:4222
Size EstimateReindexStateSpace(void)
Definition: index.c:4211
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
#define INTERRUPTS_CAN_BE_PROCESSED()
Definition: miscadmin.h:129
void SerializeClientConnectionInfo(Size maxsize, char *start_address)
Definition: miscinit.c:1104
void GetUserIdAndSecContext(Oid *userid, int *sec_context)
Definition: miscinit.c:667
bool GetSessionUserIsSuperuser(void)
Definition: miscinit.c:570
Size EstimateClientConnectionInfoSpace(void)
Definition: miscinit.c:1088
Oid GetSessionUserId(void)
Definition: miscinit.c:563
Oid GetAuthenticatedUserId(void)
Definition: miscinit.c:600
Oid GetCurrentRoleId(void)
Definition: miscinit.c:988
void GetTempNamespaceState(Oid *tempNamespaceId, Oid *tempToastNamespaceId)
Definition: namespace.c:3805
Size EstimateUncommittedEnumsSpace(void)
Definition: pg_enum.c:813
void SerializeUncommittedEnums(void *space, Size size)
Definition: pg_enum.c:827
SerializableXactHandle ShareSerializableXact(void)
Definition: predicate.c:5036
Size EstimateRelationMapSpace(void)
Definition: relmapper.c:713
void SerializeRelationMap(Size maxSize, char *startAddress)
Definition: relmapper.c:724
dsm_handle GetSessionDsmHandle(void)
Definition: session.c:70
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:290
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:177
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:206
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Definition: snapmgr.c:1716
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:216
Size EstimateSnapshotSpace(Snapshot snapshot)
Definition: snapmgr.c:1692
Snapshot GetActiveSnapshot(void)
Definition: snapmgr.c:770
#define SpinLockInit(lock)
Definition: spin.h:57
PGPROC * MyProc
Definition: proc.c:66
void SerializePendingSyncs(Size maxSize, char *startAddress)
Definition: storage.c:572
Size EstimatePendingSyncsSpace(void)
Definition: storage.c:559
Oid temp_toast_namespace_id
Definition: parallel.c:90
XLogRecPtr last_xlog_end
Definition: parallel.c:105
bool role_is_superuser
Definition: parallel.c:93
TimestampTz stmt_ts
Definition: parallel.c:98
SerializableXactHandle serializable_xact_handle
Definition: parallel.c:99
TimestampTz xact_ts
Definition: parallel.c:97
PGPROC * parallel_leader_pgproc
Definition: parallel.c:94
bool session_user_is_superuser
Definition: parallel.c:92
pid_t parallel_leader_pid
Definition: parallel.c:95
Oid authenticated_user_id
Definition: parallel.c:85
ProcNumber parallel_leader_proc_number
Definition: parallel.c:96
shm_toc * toc
Definition: parallel.h:44
Definition: shm_mq.c:72
void SerializeTransactionState(Size maxsize, char *start_address)
Definition: xact.c:5528
Size EstimateTransactionStateSpace(void)
Definition: xact.c:5500
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:878
TimestampTz GetCurrentTransactionStartTimestamp(void)
Definition: xact.c:869
#define IsolationUsesXactSnapshot()
Definition: xact.h:51

References FixedParallelState::authenticated_user_id, BUFFERALIGN, current_role_is_superuser, FixedParallelState::current_user_id, FixedParallelState::database_id, dsm_create(), DSM_CREATE_NULL_IF_MAXSEGMENTS, DSM_HANDLE_INVALID, dsm_segment_address(), ParallelWorkerInfo::error_mqh, EstimateClientConnectionInfoSpace(), EstimateComboCIDStateSpace(), EstimateGUCStateSpace(), EstimateLibraryStateSpace(), EstimatePendingSyncsSpace(), EstimateReindexStateSpace(), EstimateRelationMapSpace(), EstimateSnapshotSpace(), EstimateTransactionStateSpace(), EstimateUncommittedEnumsSpace(), ParallelContext::estimator, ParallelContext::function_name, GetActiveSnapshot(), GetAuthenticatedUserId(), GetCurrentRoleId(), GetCurrentStatementStartTimestamp(), GetCurrentTransactionStartTimestamp(), GetSessionDsmHandle(), GetSessionUserId(), GetSessionUserIsSuperuser(), GetTempNamespaceState(), GetTransactionSnapshot(), GetUserIdAndSecContext(), i, INTERRUPTS_CAN_BE_PROCESSED, IsolationUsesXactSnapshot, FixedParallelState::last_xlog_end, ParallelContext::library_name, MemoryContextAlloc(), MemoryContextSwitchTo(), mul_size(), FixedParallelState::mutex, MyDatabaseId, MyProc, MyProcNumber, MyProcPid, ParallelContext::nworkers, ParallelContext::nworkers_to_launch, FixedParallelState::outer_user_id, palloc0(), PARALLEL_ERROR_QUEUE_SIZE, PARALLEL_KEY_ACTIVE_SNAPSHOT, PARALLEL_KEY_CLIENTCONNINFO, PARALLEL_KEY_COMBO_CID, PARALLEL_KEY_ENTRYPOINT, PARALLEL_KEY_ERROR_QUEUE, PARALLEL_KEY_FIXED, PARALLEL_KEY_GUC, PARALLEL_KEY_LIBRARY, PARALLEL_KEY_PENDING_SYNCS, PARALLEL_KEY_REINDEX_STATE, PARALLEL_KEY_RELMAPPER_STATE, PARALLEL_KEY_SESSION_DSM, PARALLEL_KEY_TRANSACTION_SNAPSHOT, PARALLEL_KEY_TRANSACTION_STATE, PARALLEL_KEY_UNCOMMITTEDENUMS, FixedParallelState::parallel_leader_pgproc, FixedParallelState::parallel_leader_pid, FixedParallelState::parallel_leader_proc_number, PARALLEL_MAGIC, ParallelContext::private_memory, FixedParallelState::role_is_superuser, FixedParallelState::sec_context, ParallelContext::seg, FixedParallelState::serializable_xact_handle, SerializeClientConnectionInfo(), SerializeComboCIDState(), SerializeGUCState(), SerializeLibraryState(), SerializePendingSyncs(), SerializeReindexState(), SerializeRelationMap(), SerializeSnapshot(), SerializeTransactionState(), SerializeUncommittedEnums(), FixedParallelState::session_user_id, FixedParallelState::session_user_is_superuser, ShareSerializableXact(), shm_mq_attach(), shm_mq_create(), shm_mq_set_receiver(), shm_toc_allocate(), shm_toc_create(), shm_toc_estimate(), shm_toc_estimate_chunk, shm_toc_estimate_keys, shm_toc_insert(), SpinLockInit, start, StaticAssertStmt, FixedParallelState::stmt_ts, FixedParallelState::temp_namespace_id, FixedParallelState::temp_toast_namespace_id, ParallelContext::toc, TopMemoryContext, TopTransactionContext, ParallelContext::worker, and FixedParallelState::xact_ts.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), ExecInitParallelPlan(), and parallel_vacuum_init().

◆ LaunchParallelWorkers()

void LaunchParallelWorkers ( ParallelContext pcxt)

Definition at line 569 of file parallel.c.

570 {
571  MemoryContext oldcontext;
572  BackgroundWorker worker;
573  int i;
574  bool any_registrations_failed = false;
575 
576  /* Skip this if we have no workers. */
577  if (pcxt->nworkers == 0 || pcxt->nworkers_to_launch == 0)
578  return;
579 
580  /* We need to be a lock group leader. */
582 
583  /* If we do have workers, we'd better have a DSM segment. */
584  Assert(pcxt->seg != NULL);
585 
586  /* We might be running in a short-lived memory context. */
588 
589  /* Configure a worker. */
590  memset(&worker, 0, sizeof(worker));
591  snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
592  MyProcPid);
593  snprintf(worker.bgw_type, BGW_MAXLEN, "parallel worker");
594  worker.bgw_flags =
599  sprintf(worker.bgw_library_name, "postgres");
600  sprintf(worker.bgw_function_name, "ParallelWorkerMain");
602  worker.bgw_notify_pid = MyProcPid;
603 
604  /*
605  * Start workers.
606  *
607  * The caller must be able to tolerate ending up with fewer workers than
608  * expected, so there is no need to throw an error here if registration
609  * fails. It wouldn't help much anyway, because registering the worker in
610  * no way guarantees that it will start up and initialize successfully.
611  */
612  for (i = 0; i < pcxt->nworkers_to_launch; ++i)
613  {
614  memcpy(worker.bgw_extra, &i, sizeof(int));
615  if (!any_registrations_failed &&
617  &pcxt->worker[i].bgwhandle))
618  {
620  pcxt->worker[i].bgwhandle);
621  pcxt->nworkers_launched++;
622  }
623  else
624  {
625  /*
626  * If we weren't able to register the worker, then we've bumped up
627  * against the max_worker_processes limit, and future
628  * registrations will probably fail too, so arrange to skip them.
629  * But we still have to execute this code for the remaining slots
630  * to make sure that we forget about the error queues we budgeted
631  * for those workers. Otherwise, we'll wait for them to start,
632  * but they never will.
633  */
634  any_registrations_failed = true;
635  pcxt->worker[i].bgwhandle = NULL;
637  pcxt->worker[i].error_mqh = NULL;
638  }
639  }
640 
641  /*
642  * Now that nworkers_launched has taken its final value, we can initialize
643  * known_attached_workers.
644  */
645  if (pcxt->nworkers_launched > 0)
646  {
647  pcxt->known_attached_workers =
648  palloc0(sizeof(bool) * pcxt->nworkers_launched);
649  pcxt->nknown_attached_workers = 0;
650  }
651 
652  /* Restore previous memory context. */
653  MemoryContextSwitchTo(oldcontext);
654 }
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:1045
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
#define BGWORKER_CLASS_PARALLEL
Definition: bgworker.h:68
@ BgWorkerStart_ConsistentState
Definition: bgworker.h:80
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:60
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1123
#define sprintf
Definition: port.h:240
#define snprintf
Definition: port.h:238
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:232
void shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:319
void BecomeLockGroupLeader(void)
Definition: proc.c:1918
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:91
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:99
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[MAXPGPATH]
Definition: bgworker.h:96
bool * known_attached_workers
Definition: parallel.h:47
int nknown_attached_workers
Definition: parallel.h:46

References Assert, BecomeLockGroupLeader(), BackgroundWorker::bgw_extra, BackgroundWorker::bgw_flags, BackgroundWorker::bgw_function_name, BackgroundWorker::bgw_library_name, BackgroundWorker::bgw_main_arg, BGW_MAXLEN, BackgroundWorker::bgw_name, BGW_NEVER_RESTART, BackgroundWorker::bgw_notify_pid, BackgroundWorker::bgw_restart_time, BackgroundWorker::bgw_start_time, BackgroundWorker::bgw_type, ParallelWorkerInfo::bgwhandle, BGWORKER_BACKEND_DATABASE_CONNECTION, BGWORKER_CLASS_PARALLEL, BGWORKER_SHMEM_ACCESS, BgWorkerStart_ConsistentState, dsm_segment_handle(), ParallelWorkerInfo::error_mqh, i, ParallelContext::known_attached_workers, MemoryContextSwitchTo(), MyProcPid, ParallelContext::nknown_attached_workers, ParallelContext::nworkers, ParallelContext::nworkers_launched, ParallelContext::nworkers_to_launch, palloc0(), RegisterDynamicBackgroundWorker(), ParallelContext::seg, shm_mq_detach(), shm_mq_set_handle(), snprintf, sprintf, TopTransactionContext, UInt32GetDatum(), and ParallelContext::worker.

Referenced by _brin_begin_parallel(), _bt_begin_parallel(), ExecGather(), ExecGatherMerge(), and parallel_vacuum_process_all_indexes().

◆ ParallelContextActive()

bool ParallelContextActive ( void  )

Definition at line 1020 of file parallel.c.

1021 {
1022  return !dlist_is_empty(&pcxt_list);
1023 }

References dlist_is_empty(), and pcxt_list.

Referenced by AtPrepare_PredicateLocks(), ExitParallelMode(), and ReleasePredicateLocks().

◆ ParallelWorkerMain()

void ParallelWorkerMain ( Datum  main_arg)

Definition at line 1288 of file parallel.c.

1289 {
1290  dsm_segment *seg;
1291  shm_toc *toc;
1292  FixedParallelState *fps;
1293  char *error_queue_space;
1294  shm_mq *mq;
1295  shm_mq_handle *mqh;
1296  char *libraryspace;
1297  char *entrypointstate;
1298  char *library_name;
1299  char *function_name;
1300  parallel_worker_main_type entrypt;
1301  char *gucspace;
1302  char *combocidspace;
1303  char *tsnapspace;
1304  char *asnapspace;
1305  char *tstatespace;
1306  char *pendingsyncsspace;
1307  char *reindexspace;
1308  char *relmapperspace;
1309  char *uncommittedenumsspace;
1310  char *clientconninfospace;
1311  char *session_dsm_handle_space;
1312  Snapshot tsnapshot;
1313  Snapshot asnapshot;
1314 
1315  /* Set flag to indicate that we're initializing a parallel worker. */
1317 
1318  /* Establish signal handlers. */
1319  pqsignal(SIGTERM, die);
1321 
1322  /* Determine and set our parallel worker number. */
1324  memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
1325 
1326  /* Set up a memory context to work in, just for cleanliness. */
1328  "Parallel worker",
1330 
1331  /*
1332  * Attach to the dynamic shared memory segment for the parallel query, and
1333  * find its table of contents.
1334  *
1335  * Note: at this point, we have not created any ResourceOwner in this
1336  * process. This will result in our DSM mapping surviving until process
1337  * exit, which is fine. If there were a ResourceOwner, it would acquire
1338  * ownership of the mapping, but we have no need for that.
1339  */
1340  seg = dsm_attach(DatumGetUInt32(main_arg));
1341  if (seg == NULL)
1342  ereport(ERROR,
1343  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1344  errmsg("could not map dynamic shared memory segment")));
1346  if (toc == NULL)
1347  ereport(ERROR,
1348  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1349  errmsg("invalid magic number in dynamic shared memory segment")));
1350 
1351  /* Look up fixed parallel state. */
1352  fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false);
1353  MyFixedParallelState = fps;
1354 
1355  /* Arrange to signal the leader if we exit. */
1359 
1360  /*
1361  * Now we can find and attach to the error queue provided for us. That's
1362  * good, because until we do that, any errors that happen here will not be
1363  * reported back to the process that requested that this worker be
1364  * launched.
1365  */
1366  error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false);
1367  mq = (shm_mq *) (error_queue_space +
1370  mqh = shm_mq_attach(mq, seg, NULL);
1371  pq_redirect_to_shm_mq(seg, mqh);
1374 
1375  /*
1376  * Hooray! Primary initialization is complete. Now, we need to set up our
1377  * backend-local state to match the original backend.
1378  */
1379 
1380  /*
1381  * Join locking group. We must do this before anything that could try to
1382  * acquire a heavyweight lock, because any heavyweight locks acquired to
1383  * this point could block either directly against the parallel group
1384  * leader or against some process which in turn waits for a lock that
1385  * conflicts with the parallel group leader, causing an undetected
1386  * deadlock. (If we can't join the lock group, the leader has gone away,
1387  * so just exit quietly.)
1388  */
1390  fps->parallel_leader_pid))
1391  return;
1392 
1393  /*
1394  * Restore transaction and statement start-time timestamps. This must
1395  * happen before anything that would start a transaction, else asserts in
1396  * xact.c will fire.
1397  */
1399 
1400  /*
1401  * Identify the entry point to be called. In theory this could result in
1402  * loading an additional library, though most likely the entry point is in
1403  * the core backend or in a library we just loaded.
1404  */
1405  entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT, false);
1406  library_name = entrypointstate;
1407  function_name = entrypointstate + strlen(library_name) + 1;
1408 
1409  entrypt = LookupParallelWorkerFunction(library_name, function_name);
1410 
1411  /*
1412  * Restore current session authorization and role id. No verification
1413  * happens here, we just blindly adopt the leader's state. Note that this
1414  * has to happen before InitPostgres, since InitializeSessionUserId will
1415  * not set these variables.
1416  */
1421 
1422  /* Restore database connection. */
1424  fps->authenticated_user_id,
1425  0);
1426 
1427  /*
1428  * Set the client encoding to the database encoding, since that is what
1429  * the leader will expect. (We're cheating a bit by not calling
1430  * PrepareClientEncoding first. It's okay because this call will always
1431  * result in installing a no-op conversion. No error should be possible,
1432  * but check anyway.)
1433  */
1435  elog(ERROR, "SetClientEncoding(%d) failed", GetDatabaseEncoding());
1436 
1437  /*
1438  * Load libraries that were loaded by original backend. We want to do
1439  * this before restoring GUCs, because the libraries might define custom
1440  * variables.
1441  */
1442  libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false);
1444  RestoreLibraryState(libraryspace);
1446 
1447  /* Crank up a transaction state appropriate to a parallel worker. */
1448  tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE, false);
1449  StartParallelWorkerTransaction(tstatespace);
1450 
1451  /*
1452  * Restore state that affects catalog access. Ideally we'd do this even
1453  * before calling InitPostgres, but that has order-of-initialization
1454  * problems, and also the relmapper would get confused during the
1455  * CommitTransactionCommand call above.
1456  */
1457  pendingsyncsspace = shm_toc_lookup(toc, PARALLEL_KEY_PENDING_SYNCS,
1458  false);
1459  RestorePendingSyncs(pendingsyncsspace);
1460  relmapperspace = shm_toc_lookup(toc, PARALLEL_KEY_RELMAPPER_STATE, false);
1461  RestoreRelationMap(relmapperspace);
1462  reindexspace = shm_toc_lookup(toc, PARALLEL_KEY_REINDEX_STATE, false);
1463  RestoreReindexState(reindexspace);
1464  combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false);
1465  RestoreComboCIDState(combocidspace);
1466 
1467  /* Attach to the per-session DSM segment and contained objects. */
1468  session_dsm_handle_space =
1470  AttachSession(*(dsm_handle *) session_dsm_handle_space);
1471 
1472  /*
1473  * If the transaction isolation level is REPEATABLE READ or SERIALIZABLE,
1474  * the leader has serialized the transaction snapshot and we must restore
1475  * it. At lower isolation levels, there is no transaction-lifetime
1476  * snapshot, but we need TransactionXmin to get set to a value which is
1477  * less than or equal to the xmin of every snapshot that will be used by
1478  * this worker. The easiest way to accomplish that is to install the
1479  * active snapshot as the transaction snapshot. Code running in this
1480  * parallel worker might take new snapshots via GetTransactionSnapshot()
1481  * or GetLatestSnapshot(), but it shouldn't have any way of acquiring a
1482  * snapshot older than the active snapshot.
1483  */
1484  asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
1485  tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, true);
1486  asnapshot = RestoreSnapshot(asnapspace);
1487  tsnapshot = tsnapspace ? RestoreSnapshot(tsnapspace) : asnapshot;
1488  RestoreTransactionSnapshot(tsnapshot,
1489  fps->parallel_leader_pgproc);
1490  PushActiveSnapshot(asnapshot);
1491 
1492  /*
1493  * We've changed which tuples we can see, and must therefore invalidate
1494  * system caches.
1495  */
1497 
1498  /*
1499  * Restore GUC values from launching backend. We can't do this earlier,
1500  * because GUC check hooks that do catalog lookups need to see the same
1501  * database state as the leader. Also, the check hooks for
1502  * session_authorization and role assume we already set the correct role
1503  * OIDs.
1504  */
1505  gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false);
1506  RestoreGUCState(gucspace);
1507 
1508  /*
1509  * Restore current user ID and security context. No verification happens
1510  * here, we just blindly adopt the leader's state. We can't do this till
1511  * after restoring GUCs, else we'll get complaints about restoring
1512  * session_authorization and role. (In effect, we're assuming that all
1513  * the restored values are okay to set, even if we are now inside a
1514  * restricted context.)
1515  */
1517 
1518  /* Restore temp-namespace state to ensure search path matches leader's. */
1521 
1522  /* Restore uncommitted enums. */
1523  uncommittedenumsspace = shm_toc_lookup(toc, PARALLEL_KEY_UNCOMMITTEDENUMS,
1524  false);
1525  RestoreUncommittedEnums(uncommittedenumsspace);
1526 
1527  /* Restore the ClientConnectionInfo. */
1528  clientconninfospace = shm_toc_lookup(toc, PARALLEL_KEY_CLIENTCONNINFO,
1529  false);
1530  RestoreClientConnectionInfo(clientconninfospace);
1531 
1532  /*
1533  * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
1534  * ensure that auth_method is actually valid, aka authn_id is not NULL.
1535  */
1539 
1540  /* Attach to the leader's serializable transaction, if SERIALIZABLE. */
1542 
1543  /*
1544  * We've initialized all of our state now; nothing should change
1545  * hereafter.
1546  */
1549 
1550  /*
1551  * Time to do the real work: invoke the caller-supplied code.
1552  */
1553  entrypt(seg, toc);
1554 
1555  /* Must exit parallel mode to pop active snapshot. */
1556  ExitParallelMode();
1557 
1558  /* Must pop active snapshot so snapmgr.c doesn't complain. */
1560 
1561  /* Shut down the parallel-worker transaction. */
1563 
1564  /* Detach from the per-session DSM segment. */
1565  DetachSession();
1566 
1567  /* Report success. */
1568  pq_putmessage(PqMsg_Terminate, NULL, 0);
1569 }
static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
Definition: parallel.c:1632
int ParallelWorkerNumber
Definition: parallel.c:114
bool InitializingParallelWorker
Definition: parallel.c:120
static FixedParallelState * MyFixedParallelState
Definition: parallel.c:123
static pid_t ParallelLeaderPid
Definition: parallel.c:129
static void ParallelWorkerShutdown(int code, Datum arg)
Definition: parallel.c:1604
void BackgroundWorkerUnblockSignals(void)
Definition: bgworker.c:926
void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags)
Definition: bgworker.c:886
void RestoreComboCIDState(char *comboCIDstate)
Definition: combocid.c:342
void RestoreLibraryState(char *start_address)
Definition: dfmgr.c:670
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:665
ProcNumber ParallelLeaderProcNumber
Definition: globals.c:91
void RestoreGUCState(void *gucstate)
Definition: guc.c:6193
const char * hba_authname(UserAuth auth_method)
Definition: hba.c:3065
void(* parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc)
Definition: parallel.h:23
void RestoreReindexState(const void *reindexstate)
Definition: index.c:4240
void InvalidateSystemCaches(void)
Definition: inval.c:849
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:337
#define pq_putmessage(msgtype, s, len)
Definition: libpq.h:49
int GetDatabaseEncoding(void)
Definition: mbutils.c:1261
int SetClientEncoding(int encoding)
Definition: mbutils.c:208
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void InitializeSystemUser(const char *authn_id, const char *auth_method)
Definition: miscinit.c:927
void SetSessionAuthorization(Oid userid, bool is_superuser)
Definition: miscinit.c:973
void SetCurrentRoleId(Oid roleid, bool is_superuser)
Definition: miscinit.c:1009
ClientConnectionInfo MyClientConnectionInfo
Definition: miscinit.c:1071
void RestoreClientConnectionInfo(char *conninfo)
Definition: miscinit.c:1136
void SetAuthenticatedUserId(Oid userid)
Definition: miscinit.c:607
void SetUserIdAndSecContext(Oid userid, int sec_context)
Definition: miscinit.c:674
void SetTempNamespaceState(Oid tempNamespaceId, Oid tempToastNamespaceId)
Definition: namespace.c:3821
void RestoreUncommittedEnums(void *space)
Definition: pg_enum.c:873
#define die(msg)
pqsigfunc pqsignal(int signo, pqsigfunc func)
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:192
void pq_set_parallel_leader(pid_t pid, ProcNumber procNumber)
Definition: pqmq.c:78
void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
Definition: pqmq.c:53
void AttachSerializableXact(SerializableXactHandle handle)
Definition: predicate.c:5045
#define PqMsg_Terminate
Definition: protocol.h:28
void RestoreRelationMap(char *startAddress)
Definition: relmapper.c:741
void DetachSession(void)
Definition: session.c:201
void AttachSession(dsm_handle handle)
Definition: session.c:155
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:224
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:64
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
void PushActiveSnapshot(Snapshot snapshot)
Definition: snapmgr.c:648
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:1775
void RestoreTransactionSnapshot(Snapshot snapshot, void *source_pgproc)
Definition: snapmgr.c:1840
void PopActiveSnapshot(void)
Definition: snapmgr.c:743
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1948
void RestorePendingSyncs(char *startAddress)
Definition: storage.c:623
const char * authn_id
Definition: libpq-be.h:103
UserAuth auth_method
Definition: libpq-be.h:109
void ExitParallelMode(void)
Definition: xact.c:1063
void EnterParallelMode(void)
Definition: xact.c:1050
void StartTransactionCommand(void)
Definition: xact.c:3051
void StartParallelWorkerTransaction(char *tstatespace)
Definition: xact.c:5599
void SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts)
Definition: xact.c:858
void EndParallelWorkerTransaction(void)
Definition: xact.c:5624
void CommitTransactionCommand(void)
Definition: xact.c:3149

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, AttachSerializableXact(), AttachSession(), ClientConnectionInfo::auth_method, FixedParallelState::authenticated_user_id, ClientConnectionInfo::authn_id, BackgroundWorkerInitializeConnectionByOid(), BackgroundWorkerUnblockSignals(), BecomeLockGroupMember(), before_shmem_exit(), BackgroundWorker::bgw_extra, CommitTransactionCommand(), FixedParallelState::current_user_id, CurrentMemoryContext, FixedParallelState::database_id, DatumGetUInt32(), DetachSession(), die, dsm_attach(), dsm_segment_address(), elog, EndParallelWorkerTransaction(), EnterParallelMode(), ereport, errcode(), errmsg(), ERROR, ExitParallelMode(), GetDatabaseEncoding(), hba_authname(), InitializeSystemUser(), InitializingParallelWorker, InvalidateSystemCaches(), LookupParallelWorkerFunction(), MyBgworkerEntry, MyClientConnectionInfo, MyFixedParallelState, MyProc, FixedParallelState::outer_user_id, PARALLEL_ERROR_QUEUE_SIZE, PARALLEL_KEY_ACTIVE_SNAPSHOT, PARALLEL_KEY_CLIENTCONNINFO, PARALLEL_KEY_COMBO_CID, PARALLEL_KEY_ENTRYPOINT, PARALLEL_KEY_ERROR_QUEUE, PARALLEL_KEY_FIXED, PARALLEL_KEY_GUC, PARALLEL_KEY_LIBRARY, PARALLEL_KEY_PENDING_SYNCS, PARALLEL_KEY_REINDEX_STATE, PARALLEL_KEY_RELMAPPER_STATE, PARALLEL_KEY_SESSION_DSM, PARALLEL_KEY_TRANSACTION_SNAPSHOT, PARALLEL_KEY_TRANSACTION_STATE, PARALLEL_KEY_UNCOMMITTEDENUMS, FixedParallelState::parallel_leader_pgproc, FixedParallelState::parallel_leader_pid, FixedParallelState::parallel_leader_proc_number, PARALLEL_MAGIC, ParallelLeaderPid, ParallelLeaderProcNumber, ParallelWorkerNumber, ParallelWorkerShutdown(), PointerGetDatum(), PopActiveSnapshot(), pq_putmessage, pq_redirect_to_shm_mq(), pq_set_parallel_leader(), PqMsg_Terminate, pqsignal(), PushActiveSnapshot(), RestoreClientConnectionInfo(), RestoreComboCIDState(), RestoreGUCState(), RestoreLibraryState(), RestorePendingSyncs(), RestoreReindexState(), RestoreRelationMap(), RestoreSnapshot(), RestoreTransactionSnapshot(), RestoreUncommittedEnums(), FixedParallelState::role_is_superuser, FixedParallelState::sec_context, FixedParallelState::serializable_xact_handle, FixedParallelState::session_user_id, FixedParallelState::session_user_is_superuser, SetAuthenticatedUserId(), SetClientEncoding(), SetCurrentRoleId(), SetParallelStartTimestamps(), SetSessionAuthorization(), SetTempNamespaceState(), SetUserIdAndSecContext(), shm_mq_attach(), shm_mq_set_sender(), shm_toc_attach(), shm_toc_lookup(), StartParallelWorkerTransaction(), StartTransactionCommand(), FixedParallelState::stmt_ts, FixedParallelState::temp_namespace_id, FixedParallelState::temp_toast_namespace_id, TopMemoryContext, and FixedParallelState::xact_ts.

◆ ParallelWorkerReportLastRecEnd()

void ParallelWorkerReportLastRecEnd ( XLogRecPtr  last_xlog_end)

Definition at line 1576 of file parallel.c.

1577 {
1579 
1580  Assert(fps != NULL);
1581  SpinLockAcquire(&fps->mutex);
1582  if (fps->last_xlog_end < last_xlog_end)
1583  fps->last_xlog_end = last_xlog_end;
1584  SpinLockRelease(&fps->mutex);
1585 }
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59

References Assert, FixedParallelState::last_xlog_end, FixedParallelState::mutex, MyFixedParallelState, SpinLockAcquire, and SpinLockRelease.

Referenced by CommitTransaction().

◆ ReinitializeParallelDSM()

void ReinitializeParallelDSM ( ParallelContext pcxt)

Definition at line 504 of file parallel.c.

505 {
506  FixedParallelState *fps;
507 
508  /* Wait for any old workers to exit. */
509  if (pcxt->nworkers_launched > 0)
510  {
513  pcxt->nworkers_launched = 0;
514  if (pcxt->known_attached_workers)
515  {
517  pcxt->known_attached_workers = NULL;
518  pcxt->nknown_attached_workers = 0;
519  }
520  }
521 
522  /* Reset a few bits of fixed parallel state to a clean state. */
523  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
524  fps->last_xlog_end = 0;
525 
526  /* Recreate error queues (if they exist). */
527  if (pcxt->nworkers > 0)
528  {
529  char *error_queue_space;
530  int i;
531 
532  error_queue_space =
534  for (i = 0; i < pcxt->nworkers; ++i)
535  {
536  char *start;
537  shm_mq *mq;
538 
539  start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
542  pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
543  }
544  }
545 }
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:792

References ParallelWorkerInfo::error_mqh, i, ParallelContext::known_attached_workers, FixedParallelState::last_xlog_end, MyProc, ParallelContext::nknown_attached_workers, ParallelContext::nworkers, ParallelContext::nworkers_launched, PARALLEL_ERROR_QUEUE_SIZE, PARALLEL_KEY_ERROR_QUEUE, PARALLEL_KEY_FIXED, pfree(), ParallelContext::seg, shm_mq_attach(), shm_mq_create(), shm_mq_set_receiver(), shm_toc_lookup(), start, ParallelContext::toc, WaitForParallelWorkersToExit(), WaitForParallelWorkersToFinish(), and ParallelContext::worker.

Referenced by ExecParallelReinitialize(), and parallel_vacuum_process_all_indexes().

◆ ReinitializeParallelWorkers()

void ReinitializeParallelWorkers ( ParallelContext pcxt,
int  nworkers_to_launch 
)

Definition at line 554 of file parallel.c.

555 {
556  /*
557  * The number of workers that need to be launched must be less than the
558  * number of workers with which the parallel context is initialized. But
559  * the caller might not know that InitializeParallelDSM reduced nworkers,
560  * so just silently trim the request.
561  */
562  pcxt->nworkers_to_launch = Min(pcxt->nworkers, nworkers_to_launch);
563 }
#define Min(x, y)
Definition: c.h:983

References Min, ParallelContext::nworkers, and ParallelContext::nworkers_to_launch.

Referenced by parallel_vacuum_process_all_indexes().

◆ WaitForParallelWorkersToAttach()

void WaitForParallelWorkersToAttach ( ParallelContext pcxt)

Definition at line 689 of file parallel.c.

690 {
691  int i;
692 
693  /* Skip this if we have no launched workers. */
694  if (pcxt->nworkers_launched == 0)
695  return;
696 
697  for (;;)
698  {
699  /*
700  * This will process any parallel messages that are pending and it may
701  * also throw an error propagated from a worker.
702  */
704 
705  for (i = 0; i < pcxt->nworkers_launched; ++i)
706  {
707  BgwHandleStatus status;
708  shm_mq *mq;
709  int rc;
710  pid_t pid;
711 
712  if (pcxt->known_attached_workers[i])
713  continue;
714 
715  /*
716  * If error_mqh is NULL, then the worker has already exited
717  * cleanly.
718  */
719  if (pcxt->worker[i].error_mqh == NULL)
720  {
721  pcxt->known_attached_workers[i] = true;
722  ++pcxt->nknown_attached_workers;
723  continue;
724  }
725 
726  status = GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle, &pid);
727  if (status == BGWH_STARTED)
728  {
729  /* Has the worker attached to the error queue? */
730  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
731  if (shm_mq_get_sender(mq) != NULL)
732  {
733  /* Yes, so it is known to be attached. */
734  pcxt->known_attached_workers[i] = true;
735  ++pcxt->nknown_attached_workers;
736  }
737  }
738  else if (status == BGWH_STOPPED)
739  {
740  /*
741  * If the worker stopped without attaching to the error queue,
742  * throw an error.
743  */
744  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
745  if (shm_mq_get_sender(mq) == NULL)
746  ereport(ERROR,
747  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
748  errmsg("parallel worker failed to initialize"),
749  errhint("More details may be available in the server log.")));
750 
751  pcxt->known_attached_workers[i] = true;
752  ++pcxt->nknown_attached_workers;
753  }
754  else
755  {
756  /*
757  * Worker not yet started, so we must wait. The postmaster
758  * will notify us if the worker's state changes. Our latch
759  * might also get set for some other reason, but if so we'll
760  * just end up waiting for the same worker again.
761  */
762  rc = WaitLatch(MyLatch,
764  -1, WAIT_EVENT_BGWORKER_STARTUP);
765 
766  if (rc & WL_LATCH_SET)
768  }
769  }
770 
771  /* If all workers are known to have started, we're done. */
772  if (pcxt->nknown_attached_workers >= pcxt->nworkers_launched)
773  {
775  break;
776  }
777  }
778 }
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1157
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_STARTED
Definition: bgworker.h:105
@ BGWH_STOPPED
Definition: bgworker.h:107
int errhint(const char *fmt,...)
Definition: elog.c:1317
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
shm_mq * shm_mq_get_queue(shm_mq_handle *mqh)
Definition: shm_mq.c:905
PGPROC * shm_mq_get_sender(shm_mq *mq)
Definition: shm_mq.c:257

References Assert, BGWH_STARTED, BGWH_STOPPED, ParallelWorkerInfo::bgwhandle, CHECK_FOR_INTERRUPTS, ereport, errcode(), errhint(), errmsg(), ERROR, ParallelWorkerInfo::error_mqh, GetBackgroundWorkerPid(), i, ParallelContext::known_attached_workers, MyLatch, ParallelContext::nknown_attached_workers, ParallelContext::nworkers_launched, ResetLatch(), shm_mq_get_queue(), shm_mq_get_sender(), WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and ParallelContext::worker.

Referenced by _brin_begin_parallel(), and _bt_begin_parallel().

◆ WaitForParallelWorkersToFinish()

void WaitForParallelWorkersToFinish ( ParallelContext pcxt)

Definition at line 792 of file parallel.c.

793 {
794  for (;;)
795  {
796  bool anyone_alive = false;
797  int nfinished = 0;
798  int i;
799 
800  /*
801  * This will process any parallel messages that are pending, which may
802  * change the outcome of the loop that follows. It may also throw an
803  * error propagated from a worker.
804  */
806 
807  for (i = 0; i < pcxt->nworkers_launched; ++i)
808  {
809  /*
810  * If error_mqh is NULL, then the worker has already exited
811  * cleanly. If we have received a message through error_mqh from
812  * the worker, we know it started up cleanly, and therefore we're
813  * certain to be notified when it exits.
814  */
815  if (pcxt->worker[i].error_mqh == NULL)
816  ++nfinished;
817  else if (pcxt->known_attached_workers[i])
818  {
819  anyone_alive = true;
820  break;
821  }
822  }
823 
824  if (!anyone_alive)
825  {
826  /* If all workers are known to have finished, we're done. */
827  if (nfinished >= pcxt->nworkers_launched)
828  {
829  Assert(nfinished == pcxt->nworkers_launched);
830  break;
831  }
832 
833  /*
834  * We didn't detect any living workers, but not all workers are
835  * known to have exited cleanly. Either not all workers have
836  * launched yet, or maybe some of them failed to start or
837  * terminated abnormally.
838  */
839  for (i = 0; i < pcxt->nworkers_launched; ++i)
840  {
841  pid_t pid;
842  shm_mq *mq;
843 
844  /*
845  * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we
846  * should just keep waiting. If it is BGWH_STOPPED, then
847  * further investigation is needed.
848  */
849  if (pcxt->worker[i].error_mqh == NULL ||
850  pcxt->worker[i].bgwhandle == NULL ||
852  &pid) != BGWH_STOPPED)
853  continue;
854 
855  /*
856  * Check whether the worker ended up stopped without ever
857  * attaching to the error queue. If so, the postmaster was
858  * unable to fork the worker or it exited without initializing
859  * properly. We must throw an error, since the caller may
860  * have been expecting the worker to do some work before
861  * exiting.
862  */
863  mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
864  if (shm_mq_get_sender(mq) == NULL)
865  ereport(ERROR,
866  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
867  errmsg("parallel worker failed to initialize"),
868  errhint("More details may be available in the server log.")));
869 
870  /*
871  * The worker is stopped, but is attached to the error queue.
872  * Unless there's a bug somewhere, this will only happen when
873  * the worker writes messages and terminates after the
874  * CHECK_FOR_INTERRUPTS() near the top of this function and
875  * before the call to GetBackgroundWorkerPid(). In that case,
876  * or latch should have been set as well and the right things
877  * will happen on the next pass through the loop.
878  */
879  }
880  }
881 
883  WAIT_EVENT_PARALLEL_FINISH);
885  }
886 
887  if (pcxt->toc != NULL)
888  {
889  FixedParallelState *fps;
890 
891  fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
892  if (fps->last_xlog_end > XactLastRecEnd)
894  }
895 }
XLogRecPtr XactLastRecEnd
Definition: xlog.c:254

References Assert, BGWH_STOPPED, ParallelWorkerInfo::bgwhandle, CHECK_FOR_INTERRUPTS, ereport, errcode(), errhint(), errmsg(), ERROR, ParallelWorkerInfo::error_mqh, GetBackgroundWorkerPid(), i, ParallelContext::known_attached_workers, FixedParallelState::last_xlog_end, MyLatch, ParallelContext::nworkers_launched, PARALLEL_KEY_FIXED, ResetLatch(), shm_mq_get_queue(), shm_mq_get_sender(), shm_toc_lookup(), ParallelContext::toc, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, ParallelContext::worker, and XactLastRecEnd.

Referenced by _brin_end_parallel(), _bt_end_parallel(), ExecParallelFinish(), parallel_vacuum_process_all_indexes(), and ReinitializeParallelDSM().

Variable Documentation

◆ InitializingParallelWorker

◆ ParallelMessagePending

PGDLLIMPORT volatile sig_atomic_t ParallelMessagePending
extern

◆ ParallelWorkerNumber