PostgreSQL Source Code  git master
setup.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "postmaster/bgworker.h"
#include "storage/shm_toc.h"
#include "test_shm_mq.h"
#include "utils/memutils.h"
Include dependency graph for setup.c:

Go to the source code of this file.

Data Structures

struct  worker_state
 

Functions

static void setup_dynamic_shared_memory (int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
 
static worker_statesetup_background_workers (int nworkers, dsm_segment *seg)
 
static void cleanup_background_workers (dsm_segment *seg, Datum arg)
 
static void wait_for_workers_to_become_ready (worker_state *wstate, volatile test_shm_mq_header *hdr)
 
static bool check_worker_status (worker_state *wstate)
 
void test_shm_mq_setup (int64 queue_size, int32 nworkers, dsm_segment **segp, shm_mq_handle **output, shm_mq_handle **input)
 

Variables

static uint32 we_bgworker_startup = 0
 

Function Documentation

◆ check_worker_status()

static bool check_worker_status ( worker_state wstate)
static

Definition at line 305 of file setup.c.

306 {
307  int n;
308 
309  /* If any workers (or the postmaster) have died, we have failed. */
310  for (n = 0; n < wstate->nworkers; ++n)
311  {
312  BgwHandleStatus status;
313  pid_t pid;
314 
315  status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
316  if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
317  return false;
318  }
319 
320  /* Otherwise, things still look OK. */
321  return true;
322 }
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1157
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_POSTMASTER_DIED
Definition: bgworker.h:108
@ BGWH_STOPPED
Definition: bgworker.h:107
int nworkers
Definition: setup.c:27
BackgroundWorkerHandle * handle[FLEXIBLE_ARRAY_MEMBER]
Definition: setup.c:28

References BGWH_POSTMASTER_DIED, BGWH_STOPPED, GetBackgroundWorkerPid(), worker_state::handle, and worker_state::nworkers.

Referenced by wait_for_workers_to_become_ready().

◆ cleanup_background_workers()

static void cleanup_background_workers ( dsm_segment seg,
Datum  arg 
)
static

Definition at line 245 of file setup.c.

246 {
248 
249  while (wstate->nworkers > 0)
250  {
251  --wstate->nworkers;
252  TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
253  }
254 }
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
Definition: bgworker.c:1296
void * arg
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312

References arg, DatumGetPointer(), worker_state::handle, worker_state::nworkers, and TerminateBackgroundWorker().

Referenced by setup_background_workers(), and test_shm_mq_setup().

◆ setup_background_workers()

static worker_state * setup_background_workers ( int  nworkers,
dsm_segment seg 
)
static

Definition at line 174 of file setup.c.

175 {
176  MemoryContext oldcontext;
177  BackgroundWorker worker;
178  worker_state *wstate;
179  int i;
180 
181  /*
182  * We need the worker_state object and the background worker handles to
183  * which it points to be allocated in CurTransactionContext rather than
184  * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
185  * hooks run.
186  */
188 
189  /* Create worker state object. */
191  offsetof(worker_state, handle) +
192  sizeof(BackgroundWorkerHandle *) * nworkers);
193  wstate->nworkers = 0;
194 
195  /*
196  * Arrange to kill all the workers if we abort before all workers are
197  * finished hooking themselves up to the dynamic shared memory segment.
198  *
199  * If we die after all the workers have finished hooking themselves up to
200  * the dynamic shared memory segment, we'll mark the two queues to which
201  * we're directly connected as detached, and the worker(s) connected to
202  * those queues will exit, marking any other queues to which they are
203  * connected as detached. This will cause any as-yet-unaware workers
204  * connected to those queues to exit in their turn, and so on, until
205  * everybody exits.
206  *
207  * But suppose the workers which are supposed to connect to the queues to
208  * which we're directly attached exit due to some error before they
209  * actually attach the queues. The remaining workers will have no way of
210  * knowing this. From their perspective, they're still waiting for those
211  * workers to start, when in fact they've already died.
212  */
214  PointerGetDatum(wstate));
215 
216  /* Configure a worker. */
217  memset(&worker, 0, sizeof(worker));
221  sprintf(worker.bgw_library_name, "test_shm_mq");
222  sprintf(worker.bgw_function_name, "test_shm_mq_main");
223  snprintf(worker.bgw_type, BGW_MAXLEN, "test_shm_mq");
225  /* set bgw_notify_pid, so we can detect if the worker stops */
226  worker.bgw_notify_pid = MyProcPid;
227 
228  /* Register the workers. */
229  for (i = 0; i < nworkers; ++i)
230  {
231  if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
232  ereport(ERROR,
233  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
234  errmsg("could not register background process"),
235  errhint("You may need to increase \"max_worker_processes\".")));
236  ++wstate->nworkers;
237  }
238 
239  /* All done. */
240  MemoryContextSwitchTo(oldcontext);
241  return wstate;
242 }
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:1045
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
@ BgWorkerStart_ConsistentState
Definition: bgworker.h:80
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1123
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:46
int i
Definition: isn.c:72
MemoryContext TopTransactionContext
Definition: mcxt.c:154
MemoryContext CurTransactionContext
Definition: mcxt.c:155
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
#define sprintf
Definition: port.h:240
#define snprintf
Definition: port.h:238
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:232
MemoryContextSwitchTo(old_ctx)
static void cleanup_background_workers(dsm_segment *seg, Datum arg)
Definition: setup.c:245
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[MAXPGPATH]
Definition: bgworker.h:96

References BackgroundWorker::bgw_flags, BackgroundWorker::bgw_function_name, BackgroundWorker::bgw_library_name, BackgroundWorker::bgw_main_arg, BGW_MAXLEN, BGW_NEVER_RESTART, BackgroundWorker::bgw_notify_pid, BackgroundWorker::bgw_restart_time, BackgroundWorker::bgw_start_time, BackgroundWorker::bgw_type, BGWORKER_SHMEM_ACCESS, BgWorkerStart_ConsistentState, cleanup_background_workers(), CurTransactionContext, dsm_segment_handle(), ereport, errcode(), errhint(), errmsg(), ERROR, worker_state::handle, i, MemoryContextAlloc(), MemoryContextSwitchTo(), MyProcPid, worker_state::nworkers, on_dsm_detach(), PointerGetDatum(), RegisterDynamicBackgroundWorker(), snprintf, sprintf, TopTransactionContext, and UInt32GetDatum().

Referenced by test_shm_mq_setup().

◆ setup_dynamic_shared_memory()

static void setup_dynamic_shared_memory ( int64  queue_size,
int  nworkers,
dsm_segment **  segp,
test_shm_mq_header **  hdrp,
shm_mq **  outp,
shm_mq **  inp 
)
static

Definition at line 91 of file setup.c.

94 {
96  int i;
97  Size segsize;
98  dsm_segment *seg;
99  shm_toc *toc;
100  test_shm_mq_header *hdr;
101 
102  /* Ensure a valid queue size. */
103  if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
104  ereport(ERROR,
105  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
106  errmsg("queue size must be at least %zu bytes",
108  if (queue_size != ((Size) queue_size))
109  ereport(ERROR,
110  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
111  errmsg("queue size overflows size_t")));
112 
113  /*
114  * Estimate how much shared memory we need.
115  *
116  * Because the TOC machinery may choose to insert padding of oddly-sized
117  * requests, we must estimate each chunk separately.
118  *
119  * We need one key to register the location of the header, and we need
120  * nworkers + 1 keys to track the locations of the message queues.
121  */
124  for (i = 0; i <= nworkers; ++i)
125  shm_toc_estimate_chunk(&e, (Size) queue_size);
126  shm_toc_estimate_keys(&e, 2 + nworkers);
127  segsize = shm_toc_estimate(&e);
128 
129  /* Create the shared memory segment and establish a table of contents. */
130  seg = dsm_create(shm_toc_estimate(&e), 0);
132  segsize);
133 
134  /* Set up the header region. */
135  hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
136  SpinLockInit(&hdr->mutex);
137  hdr->workers_total = nworkers;
138  hdr->workers_attached = 0;
139  hdr->workers_ready = 0;
140  shm_toc_insert(toc, 0, hdr);
141 
142  /* Set up one message queue per worker, plus one. */
143  for (i = 0; i <= nworkers; ++i)
144  {
145  shm_mq *mq;
146 
147  mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
148  (Size) queue_size);
149  shm_toc_insert(toc, i + 1, mq);
150 
151  if (i == 0)
152  {
153  /* We send messages to the first queue. */
155  *outp = mq;
156  }
157  if (i == nworkers)
158  {
159  /* We receive messages from the last queue. */
161  *inp = mq;
162  }
163  }
164 
165  /* Return results to caller. */
166  *segp = seg;
167  *hdrp = hdr;
168 }
uint64_t uint64
Definition: c.h:486
size_t Size
Definition: c.h:559
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1095
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:516
e
Definition: preproc-init.c:82
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:224
shm_mq * shm_mq_create(void *address, Size size)
Definition: shm_mq.c:177
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:206
const Size shm_mq_minimum_size
Definition: shm_mq.c:168
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
Definition: shm_toc.c:40
Size shm_toc_estimate(shm_toc_estimator *e)
Definition: shm_toc.c:263
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_initialize_estimator(e)
Definition: shm_toc.h:49
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
#define SpinLockInit(lock)
Definition: spin.h:57
PGPROC * MyProc
Definition: proc.c:66
Definition: shm_mq.c:72
#define PG_TEST_SHM_MQ_MAGIC
Definition: test_shm_mq.h:22

References dsm_create(), dsm_segment_address(), ereport, errcode(), errmsg(), ERROR, i, test_shm_mq_header::mutex, MyProc, PG_TEST_SHM_MQ_MAGIC, shm_mq_create(), shm_mq_minimum_size, shm_mq_set_receiver(), shm_mq_set_sender(), shm_toc_allocate(), shm_toc_create(), shm_toc_estimate(), shm_toc_estimate_chunk, shm_toc_estimate_keys, shm_toc_initialize_estimator, shm_toc_insert(), SpinLockInit, test_shm_mq_header::workers_attached, test_shm_mq_header::workers_ready, and test_shm_mq_header::workers_total.

Referenced by test_shm_mq_setup().

◆ test_shm_mq_setup()

void test_shm_mq_setup ( int64  queue_size,
int32  nworkers,
dsm_segment **  segp,
shm_mq_handle **  output,
shm_mq_handle **  input 
)

Definition at line 50 of file setup.c.

52 {
53  dsm_segment *seg;
54  test_shm_mq_header *hdr;
55  shm_mq *outq = NULL; /* placate compiler */
56  shm_mq *inq = NULL; /* placate compiler */
57  worker_state *wstate;
58 
59  /* Set up a dynamic shared memory segment. */
60  setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
61  *segp = seg;
62 
63  /* Register background workers. */
64  wstate = setup_background_workers(nworkers, seg);
65 
66  /* Attach the queues. */
67  *output = shm_mq_attach(outq, seg, wstate->handle[0]);
68  *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
69 
70  /* Wait for workers to become ready. */
72 
73  /*
74  * Once we reach this point, all workers are ready. We no longer need to
75  * kill them if we die; they'll die on their own as the message queues
76  * shut down.
77  */
79  PointerGetDatum(wstate));
80  pfree(wstate);
81 }
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1147
FILE * input
FILE * output
void pfree(void *pointer)
Definition: mcxt.c:1521
static worker_state * setup_background_workers(int nworkers, dsm_segment *seg)
Definition: setup.c:174
static void wait_for_workers_to_become_ready(worker_state *wstate, volatile test_shm_mq_header *hdr)
Definition: setup.c:257
static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
Definition: setup.c:91
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:290

References cancel_on_dsm_detach(), cleanup_background_workers(), worker_state::handle, input, output, pfree(), PointerGetDatum(), setup_background_workers(), setup_dynamic_shared_memory(), shm_mq_attach(), and wait_for_workers_to_become_ready().

Referenced by test_shm_mq(), and test_shm_mq_pipelined().

◆ wait_for_workers_to_become_ready()

static void wait_for_workers_to_become_ready ( worker_state wstate,
volatile test_shm_mq_header hdr 
)
static

Definition at line 257 of file setup.c.

259 {
260  bool result = false;
261 
262  for (;;)
263  {
264  int workers_ready;
265 
266  /* If all the workers are ready, we have succeeded. */
267  SpinLockAcquire(&hdr->mutex);
268  workers_ready = hdr->workers_ready;
269  SpinLockRelease(&hdr->mutex);
270  if (workers_ready >= wstate->nworkers)
271  {
272  result = true;
273  break;
274  }
275 
276  /* If any workers (or the postmaster) have died, we have failed. */
277  if (!check_worker_status(wstate))
278  {
279  result = false;
280  break;
281  }
282 
283  /* first time, allocate or get the custom wait event */
284  if (we_bgworker_startup == 0)
285  we_bgworker_startup = WaitEventExtensionNew("TestShmMqBgWorkerStartup");
286 
287  /* Wait to be signaled. */
290 
291  /* Reset the latch so we don't spin. */
293 
294  /* An interrupt may have occurred while we were waiting. */
296  }
297 
298  if (!result)
299  ereport(ERROR,
300  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
301  errmsg("one or more background workers failed to start")));
302 }
struct Latch * MyLatch
Definition: globals.c:62
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
static uint32 we_bgworker_startup
Definition: setup.c:43
static bool check_worker_status(worker_state *wstate)
Definition: setup.c:305
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
uint32 WaitEventExtensionNew(const char *wait_event_name)
Definition: wait_event.c:163

References CHECK_FOR_INTERRUPTS, check_worker_status(), ereport, errcode(), errmsg(), ERROR, test_shm_mq_header::mutex, MyLatch, worker_state::nworkers, ResetLatch(), SpinLockAcquire, SpinLockRelease, WaitEventExtensionNew(), WaitLatch(), we_bgworker_startup, WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and test_shm_mq_header::workers_ready.

Referenced by test_shm_mq_setup().

Variable Documentation

◆ we_bgworker_startup

uint32 we_bgworker_startup = 0
static

Definition at line 43 of file setup.c.

Referenced by wait_for_workers_to_become_ready().