PostgreSQL Source Code git master
setup.c
Go to the documentation of this file.
1/*--------------------------------------------------------------------------
2 *
3 * setup.c
4 * Code to set up a dynamic shared memory segments and a specified
5 * number of background workers for shared memory message queue
6 * testing.
7 *
8 * Copyright (c) 2013-2025, PostgreSQL Global Development Group
9 *
10 * IDENTIFICATION
11 * src/test/modules/test_shm_mq/setup.c
12 *
13 * -------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include "miscadmin.h"
19#include "pgstat.h"
20#include "postmaster/bgworker.h"
21#include "storage/shm_toc.h"
22#include "test_shm_mq.h"
23#include "utils/memutils.h"
24
25typedef struct
26{
30
31static void setup_dynamic_shared_memory(int64 queue_size, int nworkers,
32 dsm_segment **segp,
33 test_shm_mq_header **hdrp,
34 shm_mq **outp, shm_mq **inp);
35static worker_state *setup_background_workers(int nworkers,
36 dsm_segment *seg);
39 volatile test_shm_mq_header *hdr);
40static bool check_worker_status(worker_state *wstate);
41
42/* value cached, fetched from shared memory */
44
45/*
46 * Set up a dynamic shared memory segment and zero or more background workers
47 * for a test run.
48 */
49void
50test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
52{
53 dsm_segment *seg;
55 shm_mq *outq = NULL; /* placate compiler */
56 shm_mq *inq = NULL; /* placate compiler */
57 worker_state *wstate;
58
59 /* Set up a dynamic shared memory segment. */
60 setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
61 *segp = seg;
62
63 /* Register background workers. */
64 wstate = setup_background_workers(nworkers, seg);
65
66 /* Attach the queues. */
67 *output = shm_mq_attach(outq, seg, wstate->handle[0]);
68 *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
69
70 /* Wait for workers to become ready. */
72
73 /*
74 * Once we reach this point, all workers are ready. We no longer need to
75 * kill them if we die; they'll die on their own as the message queues
76 * shut down.
77 */
79 PointerGetDatum(wstate));
80 pfree(wstate);
81}
82
83/*
84 * Set up a dynamic shared memory segment.
85 *
86 * We set up a small control region that contains only a test_shm_mq_header,
87 * plus one region per message queue. There are as many message queues as
88 * the number of workers, plus one.
89 */
90static void
91setup_dynamic_shared_memory(int64 queue_size, int nworkers,
92 dsm_segment **segp, test_shm_mq_header **hdrp,
93 shm_mq **outp, shm_mq **inp)
94{
96 int i;
97 Size segsize;
98 dsm_segment *seg;
99 shm_toc *toc;
101
102 /* Ensure a valid queue size. */
103 if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
105 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
106 errmsg("queue size must be at least %zu bytes",
108 if (queue_size != ((Size) queue_size))
110 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
111 errmsg("queue size overflows size_t")));
112
113 /*
114 * Estimate how much shared memory we need.
115 *
116 * Because the TOC machinery may choose to insert padding of oddly-sized
117 * requests, we must estimate each chunk separately.
118 *
119 * We need one key to register the location of the header, and we need
120 * nworkers + 1 keys to track the locations of the message queues.
121 */
124 for (i = 0; i <= nworkers; ++i)
125 shm_toc_estimate_chunk(&e, (Size) queue_size);
126 shm_toc_estimate_keys(&e, 2 + nworkers);
127 segsize = shm_toc_estimate(&e);
128
129 /* Create the shared memory segment and establish a table of contents. */
130 seg = dsm_create(shm_toc_estimate(&e), 0);
132 segsize);
133
134 /* Set up the header region. */
135 hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
136 SpinLockInit(&hdr->mutex);
137 hdr->workers_total = nworkers;
138 hdr->workers_attached = 0;
139 hdr->workers_ready = 0;
140 shm_toc_insert(toc, 0, hdr);
141
142 /* Set up one message queue per worker, plus one. */
143 for (i = 0; i <= nworkers; ++i)
144 {
145 shm_mq *mq;
146
147 mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
148 (Size) queue_size);
149 shm_toc_insert(toc, i + 1, mq);
150
151 if (i == 0)
152 {
153 /* We send messages to the first queue. */
155 *outp = mq;
156 }
157 if (i == nworkers)
158 {
159 /* We receive messages from the last queue. */
161 *inp = mq;
162 }
163 }
164
165 /* Return results to caller. */
166 *segp = seg;
167 *hdrp = hdr;
168}
169
170/*
171 * Register background workers.
172 */
173static worker_state *
175{
176 MemoryContext oldcontext;
177 BackgroundWorker worker;
178 worker_state *wstate;
179 int i;
180
181 /*
182 * We need the worker_state object and the background worker handles to
183 * which it points to be allocated in CurTransactionContext rather than
184 * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
185 * hooks run.
186 */
188
189 /* Create worker state object. */
191 offsetof(worker_state, handle) +
192 sizeof(BackgroundWorkerHandle *) * nworkers);
193 wstate->nworkers = 0;
194
195 /*
196 * Arrange to kill all the workers if we abort before all workers are
197 * finished hooking themselves up to the dynamic shared memory segment.
198 *
199 * If we die after all the workers have finished hooking themselves up to
200 * the dynamic shared memory segment, we'll mark the two queues to which
201 * we're directly connected as detached, and the worker(s) connected to
202 * those queues will exit, marking any other queues to which they are
203 * connected as detached. This will cause any as-yet-unaware workers
204 * connected to those queues to exit in their turn, and so on, until
205 * everybody exits.
206 *
207 * But suppose the workers which are supposed to connect to the queues to
208 * which we're directly attached exit due to some error before they
209 * actually attach the queues. The remaining workers will have no way of
210 * knowing this. From their perspective, they're still waiting for those
211 * workers to start, when in fact they've already died.
212 */
214 PointerGetDatum(wstate));
215
216 /* Configure a worker. */
217 memset(&worker, 0, sizeof(worker));
221 sprintf(worker.bgw_library_name, "test_shm_mq");
222 sprintf(worker.bgw_function_name, "test_shm_mq_main");
223 snprintf(worker.bgw_type, BGW_MAXLEN, "test_shm_mq");
225 /* set bgw_notify_pid, so we can detect if the worker stops */
226 worker.bgw_notify_pid = MyProcPid;
227
228 /* Register the workers. */
229 for (i = 0; i < nworkers; ++i)
230 {
231 if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
233 (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
234 errmsg("could not register background process"),
235 errhint("You may need to increase \"max_worker_processes\".")));
236 ++wstate->nworkers;
237 }
238
239 /* All done. */
240 MemoryContextSwitchTo(oldcontext);
241 return wstate;
242}
243
244static void
246{
248
249 while (wstate->nworkers > 0)
250 {
251 --wstate->nworkers;
252 TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
253 }
254}
255
256static void
258 volatile test_shm_mq_header *hdr)
259{
260 bool result = false;
261
262 for (;;)
263 {
264 int workers_ready;
265
266 /* If all the workers are ready, we have succeeded. */
267 SpinLockAcquire(&hdr->mutex);
268 workers_ready = hdr->workers_ready;
269 SpinLockRelease(&hdr->mutex);
270 if (workers_ready >= wstate->nworkers)
271 {
272 result = true;
273 break;
274 }
275
276 /* If any workers (or the postmaster) have died, we have failed. */
277 if (!check_worker_status(wstate))
278 {
279 result = false;
280 break;
281 }
282
283 /* first time, allocate or get the custom wait event */
284 if (we_bgworker_startup == 0)
285 we_bgworker_startup = WaitEventExtensionNew("TestShmMqBgWorkerStartup");
286
287 /* Wait to be signaled. */
290
291 /* Reset the latch so we don't spin. */
293
294 /* An interrupt may have occurred while we were waiting. */
296 }
297
298 if (!result)
300 (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
301 errmsg("one or more background workers failed to start")));
302}
303
304static bool
306{
307 int n;
308
309 /* If any workers (or the postmaster) have died, we have failed. */
310 for (n = 0; n < wstate->nworkers; ++n)
311 {
312 BgwHandleStatus status;
313 pid_t pid;
314
315 status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
316 if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
317 return false;
318 }
319
320 /* Otherwise, things still look OK. */
321 return true;
322}
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1296
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1157
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:1045
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_POSTMASTER_DIED
Definition: bgworker.h:108
@ BGWH_STOPPED
Definition: bgworker.h:107
@ BgWorkerStart_ConsistentState
Definition: bgworker.h:80
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
int64_t int64
Definition: c.h:485
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:420
int32_t int32
Definition: c.h:484
uint64_t uint64
Definition: c.h:489
uint32_t uint32
Definition: c.h:488
size_t Size
Definition: c.h:562
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1123
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1095
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:516
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1147
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:46
struct Latch * MyLatch
Definition: globals.c:62
FILE * input
FILE * output
int i
Definition: isn.c:72
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
MemoryContext TopTransactionContext
Definition: mcxt.c:154
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext CurTransactionContext
Definition: mcxt.c:155
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
void * arg
#define sprintf
Definition: port.h:241
#define snprintf
Definition: port.h:239
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:237
e
Definition: preproc-init.c:82
static worker_state * setup_background_workers(int nworkers, dsm_segment *seg)
Definition: setup.c:174
static void cleanup_background_workers(dsm_segment *seg, Datum arg)
Definition: setup.c:245
static void wait_for_workers_to_become_ready(worker_state *wstate, volatile test_shm_mq_header *hdr)
Definition: setup.c:257
void test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp, shm_mq_handle **output, shm_mq_handle **input)
Definition: setup.c:50
static uint32 we_bgworker_startup
Definition: setup.c:43
static bool check_worker_status(worker_state *wstate)
Definition: setup.c:305
static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
Definition: setup.c:91
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:224
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:177
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:206
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:290
const Size shm_mq_minimum_size
Definition: shm_mq.c:168
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:66
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[MAXPGPATH]
Definition: bgworker.h:96
Definition: shm_mq.c:72
int nworkers
Definition: setup.c:27
BackgroundWorkerHandle * handle[FLEXIBLE_ARRAY_MEMBER]
Definition: setup.c:28
#define PG_TEST_SHM_MQ_MAGIC
Definition: test_shm_mq.h:22
uint32 WaitEventExtensionNew(const char *wait_event_name)
Definition: wait_event.c:163