PostgreSQL Source Code  git master
setup.c
Go to the documentation of this file.
1 /*--------------------------------------------------------------------------
2  *
3  * setup.c
4  * Code to set up a dynamic shared memory segments and a specified
5  * number of background workers for shared memory message queue
6  * testing.
7  *
8  * Copyright (c) 2013-2023, PostgreSQL Global Development Group
9  *
10  * IDENTIFICATION
11  * src/test/modules/test_shm_mq/setup.c
12  *
13  * -------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include "miscadmin.h"
19 #include "pgstat.h"
20 #include "postmaster/bgworker.h"
21 #include "storage/procsignal.h"
22 #include "storage/shm_toc.h"
23 #include "test_shm_mq.h"
24 #include "utils/memutils.h"
25 
26 typedef struct
27 {
28  int nworkers;
30 } worker_state;
31 
32 static void setup_dynamic_shared_memory(int64 queue_size, int nworkers,
33  dsm_segment **segp,
34  test_shm_mq_header **hdrp,
35  shm_mq **outp, shm_mq **inp);
36 static worker_state *setup_background_workers(int nworkers,
37  dsm_segment *seg);
40  volatile test_shm_mq_header *hdr);
41 static bool check_worker_status(worker_state *wstate);
42 
43 /*
44  * Set up a dynamic shared memory segment and zero or more background workers
45  * for a test run.
46  */
47 void
48 test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
50 {
51  dsm_segment *seg;
52  test_shm_mq_header *hdr;
53  shm_mq *outq = NULL; /* placate compiler */
54  shm_mq *inq = NULL; /* placate compiler */
55  worker_state *wstate;
56 
57  /* Set up a dynamic shared memory segment. */
58  setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
59  *segp = seg;
60 
61  /* Register background workers. */
62  wstate = setup_background_workers(nworkers, seg);
63 
64  /* Attach the queues. */
65  *output = shm_mq_attach(outq, seg, wstate->handle[0]);
66  *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
67 
68  /* Wait for workers to become ready. */
70 
71  /*
72  * Once we reach this point, all workers are ready. We no longer need to
73  * kill them if we die; they'll die on their own as the message queues
74  * shut down.
75  */
77  PointerGetDatum(wstate));
78  pfree(wstate);
79 }
80 
81 /*
82  * Set up a dynamic shared memory segment.
83  *
84  * We set up a small control region that contains only a test_shm_mq_header,
85  * plus one region per message queue. There are as many message queues as
86  * the number of workers, plus one.
87  */
88 static void
89 setup_dynamic_shared_memory(int64 queue_size, int nworkers,
90  dsm_segment **segp, test_shm_mq_header **hdrp,
91  shm_mq **outp, shm_mq **inp)
92 {
94  int i;
95  Size segsize;
96  dsm_segment *seg;
97  shm_toc *toc;
98  test_shm_mq_header *hdr;
99 
100  /* Ensure a valid queue size. */
101  if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
102  ereport(ERROR,
103  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
104  errmsg("queue size must be at least %zu bytes",
106  if (queue_size != ((Size) queue_size))
107  ereport(ERROR,
108  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
109  errmsg("queue size overflows size_t")));
110 
111  /*
112  * Estimate how much shared memory we need.
113  *
114  * Because the TOC machinery may choose to insert padding of oddly-sized
115  * requests, we must estimate each chunk separately.
116  *
117  * We need one key to register the location of the header, and we need
118  * nworkers + 1 keys to track the locations of the message queues.
119  */
122  for (i = 0; i <= nworkers; ++i)
123  shm_toc_estimate_chunk(&e, (Size) queue_size);
124  shm_toc_estimate_keys(&e, 2 + nworkers);
125  segsize = shm_toc_estimate(&e);
126 
127  /* Create the shared memory segment and establish a table of contents. */
128  seg = dsm_create(shm_toc_estimate(&e), 0);
130  segsize);
131 
132  /* Set up the header region. */
133  hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
134  SpinLockInit(&hdr->mutex);
135  hdr->workers_total = nworkers;
136  hdr->workers_attached = 0;
137  hdr->workers_ready = 0;
138  shm_toc_insert(toc, 0, hdr);
139 
140  /* Set up one message queue per worker, plus one. */
141  for (i = 0; i <= nworkers; ++i)
142  {
143  shm_mq *mq;
144 
145  mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
146  (Size) queue_size);
147  shm_toc_insert(toc, i + 1, mq);
148 
149  if (i == 0)
150  {
151  /* We send messages to the first queue. */
153  *outp = mq;
154  }
155  if (i == nworkers)
156  {
157  /* We receive messages from the last queue. */
159  *inp = mq;
160  }
161  }
162 
163  /* Return results to caller. */
164  *segp = seg;
165  *hdrp = hdr;
166 }
167 
168 /*
169  * Register background workers.
170  */
171 static worker_state *
173 {
174  MemoryContext oldcontext;
175  BackgroundWorker worker;
176  worker_state *wstate;
177  int i;
178 
179  /*
180  * We need the worker_state object and the background worker handles to
181  * which it points to be allocated in CurTransactionContext rather than
182  * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
183  * hooks run.
184  */
186 
187  /* Create worker state object. */
189  offsetof(worker_state, handle) +
190  sizeof(BackgroundWorkerHandle *) * nworkers);
191  wstate->nworkers = 0;
192 
193  /*
194  * Arrange to kill all the workers if we abort before all workers are
195  * finished hooking themselves up to the dynamic shared memory segment.
196  *
197  * If we die after all the workers have finished hooking themselves up to
198  * the dynamic shared memory segment, we'll mark the two queues to which
199  * we're directly connected as detached, and the worker(s) connected to
200  * those queues will exit, marking any other queues to which they are
201  * connected as detached. This will cause any as-yet-unaware workers
202  * connected to those queues to exit in their turn, and so on, until
203  * everybody exits.
204  *
205  * But suppose the workers which are supposed to connect to the queues to
206  * which we're directly attached exit due to some error before they
207  * actually attach the queues. The remaining workers will have no way of
208  * knowing this. From their perspective, they're still waiting for those
209  * workers to start, when in fact they've already died.
210  */
212  PointerGetDatum(wstate));
213 
214  /* Configure a worker. */
215  memset(&worker, 0, sizeof(worker));
219  sprintf(worker.bgw_library_name, "test_shm_mq");
220  sprintf(worker.bgw_function_name, "test_shm_mq_main");
221  snprintf(worker.bgw_type, BGW_MAXLEN, "test_shm_mq");
223  /* set bgw_notify_pid, so we can detect if the worker stops */
224  worker.bgw_notify_pid = MyProcPid;
225 
226  /* Register the workers. */
227  for (i = 0; i < nworkers; ++i)
228  {
229  if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
230  ereport(ERROR,
231  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
232  errmsg("could not register background process"),
233  errhint("You may need to increase max_worker_processes.")));
234  ++wstate->nworkers;
235  }
236 
237  /* All done. */
238  MemoryContextSwitchTo(oldcontext);
239  return wstate;
240 }
241 
242 static void
244 {
246 
247  while (wstate->nworkers > 0)
248  {
249  --wstate->nworkers;
250  TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
251  }
252 }
253 
254 static void
256  volatile test_shm_mq_header *hdr)
257 {
258  bool result = false;
259 
260  for (;;)
261  {
262  int workers_ready;
263 
264  /* If all the workers are ready, we have succeeded. */
265  SpinLockAcquire(&hdr->mutex);
266  workers_ready = hdr->workers_ready;
267  SpinLockRelease(&hdr->mutex);
268  if (workers_ready >= wstate->nworkers)
269  {
270  result = true;
271  break;
272  }
273 
274  /* If any workers (or the postmaster) have died, we have failed. */
275  if (!check_worker_status(wstate))
276  {
277  result = false;
278  break;
279  }
280 
281  /* Wait to be signaled. */
284 
285  /* Reset the latch so we don't spin. */
287 
288  /* An interrupt may have occurred while we were waiting. */
290  }
291 
292  if (!result)
293  ereport(ERROR,
294  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
295  errmsg("one or more background workers failed to start")));
296 }
297 
298 static bool
300 {
301  int n;
302 
303  /* If any workers (or the postmaster) have died, we have failed. */
304  for (n = 0; n < wstate->nworkers; ++n)
305  {
306  BgwHandleStatus status;
307  pid_t pid;
308 
309  status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
310  if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
311  return false;
312  }
313 
314  /* Otherwise, things still look OK. */
315  return true;
316 }
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1210
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1071
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:959
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_POSTMASTER_DIED
Definition: bgworker.h:108
@ BGWH_STOPPED
Definition: bgworker.h:107
@ BgWorkerStart_ConsistentState
Definition: bgworker.h:80
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
signed int int32
Definition: c.h:478
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:382
size_t Size
Definition: c.h:589
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1094
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1066
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1103
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:489
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1118
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:44
struct Latch * MyLatch
Definition: globals.c:58
FILE * input
FILE * output
int i
Definition: isn.c:73
void ResetLatch(Latch *latch)
Definition: latch.c:699
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:492
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
MemoryContext TopTransactionContext
Definition: mcxt.c:146
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext CurTransactionContext
Definition: mcxt.c:147
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1021
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
void * arg
#define sprintf
Definition: port.h:240
#define snprintf
Definition: port.h:238
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:232
e
Definition: preproc-init.c:82
static worker_state * setup_background_workers(int nworkers, dsm_segment *seg)
Definition: setup.c:172
static void cleanup_background_workers(dsm_segment *seg, Datum arg)
Definition: setup.c:243
static void wait_for_workers_to_become_ready(worker_state *wstate, volatile test_shm_mq_header *hdr)
Definition: setup.c:255
void test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp, shm_mq_handle **output, shm_mq_handle **input)
Definition: setup.c:48
static bool check_worker_status(worker_state *wstate)
Definition: setup.c:299
static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
Definition: setup.c:89
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:291
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:225
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:178
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:207
const Size shm_mq_minimum_size
Definition: shm_mq.c:169
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define SpinLockInit(lock)
Definition: spin.h:60
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
PGPROC * MyProc
Definition: proc.c:66
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[BGW_MAXLEN]
Definition: bgworker.h:96
Definition: shm_mq.c:73
int nworkers
Definition: setup.c:28
BackgroundWorkerHandle * handle[FLEXIBLE_ARRAY_MEMBER]
Definition: setup.c:29
#define PG_TEST_SHM_MQ_MAGIC
Definition: test_shm_mq.h:22
#define PG_WAIT_EXTENSION
Definition: wait_event.h:23