PostgreSQL Source Code  git master
test_shm_mq.h File Reference
#include "storage/dsm.h"
#include "storage/shm_mq.h"
#include "storage/spin.h"
Include dependency graph for test_shm_mq.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  test_shm_mq_header
 

Macros

#define PG_TEST_SHM_MQ_MAGIC   0x79fb2447
 

Functions

void test_shm_mq_setup (int64 queue_size, int32 nworkers, dsm_segment **seg, shm_mq_handle **output, shm_mq_handle **input)
 
void test_shm_mq_main (Datum) pg_attribute_noreturn()
 

Macro Definition Documentation

◆ PG_TEST_SHM_MQ_MAGIC

#define PG_TEST_SHM_MQ_MAGIC   0x79fb2447

Definition at line 22 of file test_shm_mq.h.

Referenced by setup_dynamic_shared_memory(), and test_shm_mq_main().

Function Documentation

◆ test_shm_mq_main()

void test_shm_mq_main ( Datum  )

Definition at line 47 of file worker.c.

References attach_to_queues(), BackendPidGetProc(), BackgroundWorkerUnblockSignals(), BackgroundWorker::bgw_notify_pid, copy_messages(), DatumGetInt32, DEBUG1, die, dsm_attach(), dsm_detach(), dsm_segment_address(), elog, ereport, errcode(), errmsg(), ERROR, test_shm_mq_header::mutex, MyBgworkerEntry, PG_TEST_SHM_MQ_MAGIC, pqsignal(), proc_exit(), PGPROC::procLatch, SetLatch(), shm_toc_attach(), shm_toc_lookup(), SpinLockAcquire, SpinLockRelease, test_shm_mq_header::workers_attached, test_shm_mq_header::workers_ready, and test_shm_mq_header::workers_total.

48 {
49  dsm_segment *seg;
50  shm_toc *toc;
51  shm_mq_handle *inqh;
52  shm_mq_handle *outqh;
53  volatile test_shm_mq_header *hdr;
54  int myworkernumber;
55  PGPROC *registrant;
56 
57  /*
58  * Establish signal handlers.
59  *
60  * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
61  * it would a normal user backend. To make that happen, we use die().
62  */
63  pqsignal(SIGTERM, die);
65 
66  /*
67  * Connect to the dynamic shared memory segment.
68  *
69  * The backend that registered this worker passed us the ID of a shared
70  * memory segment to which we must attach for further instructions. Once
71  * we've mapped the segment in our address space, attach to the table of
72  * contents so we can locate the various data structures we'll need to
73  * find within the segment.
74  *
75  * Note: at this point, we have not created any ResourceOwner in this
76  * process. This will result in our DSM mapping surviving until process
77  * exit, which is fine. If there were a ResourceOwner, it would acquire
78  * ownership of the mapping, but we have no need for that.
79  */
80  seg = dsm_attach(DatumGetInt32(main_arg));
81  if (seg == NULL)
82  ereport(ERROR,
83  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
84  errmsg("unable to map dynamic shared memory segment")));
86  if (toc == NULL)
87  ereport(ERROR,
88  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
89  errmsg("bad magic number in dynamic shared memory segment")));
90 
91  /*
92  * Acquire a worker number.
93  *
94  * By convention, the process registering this background worker should
95  * have stored the control structure at key 0. We look up that key to
96  * find it. Our worker number gives our identity: there may be just one
97  * worker involved in this parallel operation, or there may be many.
98  */
99  hdr = shm_toc_lookup(toc, 0, false);
100  SpinLockAcquire(&hdr->mutex);
101  myworkernumber = ++hdr->workers_attached;
102  SpinLockRelease(&hdr->mutex);
103  if (myworkernumber > hdr->workers_total)
104  ereport(ERROR,
105  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
106  errmsg("too many message queue testing workers already")));
107 
108  /*
109  * Attach to the appropriate message queues.
110  */
111  attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
112 
113  /*
114  * Indicate that we're fully initialized and ready to begin the main part
115  * of the parallel operation.
116  *
117  * Once we signal that we're ready, the user backend is entitled to assume
118  * that our on_dsm_detach callbacks will fire before we disconnect from
119  * the shared memory segment and exit. Generally, that means we must have
120  * attached to all relevant dynamic shared memory data structures by now.
121  */
122  SpinLockAcquire(&hdr->mutex);
123  ++hdr->workers_ready;
124  SpinLockRelease(&hdr->mutex);
126  if (registrant == NULL)
127  {
128  elog(DEBUG1, "registrant backend has exited prematurely");
129  proc_exit(1);
130  }
131  SetLatch(&registrant->procLatch);
132 
133  /* Do the work. */
134  copy_messages(inqh, outqh);
135 
136  /*
137  * We're done. For cleanliness, explicitly detach from the shared memory
138  * segment (that would happen anyway during process exit, though).
139  */
140  dsm_detach(seg);
141  proc_exit(1);
142 }
#define DEBUG1
Definition: elog.h:25
#define DatumGetInt32(X)
Definition: postgres.h:516
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:3133
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:631
void proc_exit(int code)
Definition: ipc.c:104
int errcode(int sqlerrcode)
Definition: elog.c:698
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:195
void SetLatch(Latch *latch)
Definition: latch.c:567
Latch procLatch
Definition: proc.h:130
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define ERROR
Definition: elog.h:46
static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh)
Definition: worker.c:176
#define SpinLockRelease(lock)
Definition: spin.h:64
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:64
#define ereport(elevel,...)
Definition: elog.h:157
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1059
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:769
int errmsg(const char *fmt,...)
Definition: elog.c:909
pid_t bgw_notify_pid
Definition: bgworker.h:100
#define elog(elevel,...)
Definition: elog.h:232
Definition: proc.h:121
static void attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber, shm_mq_handle **inqhp, shm_mq_handle **outqhp)
Definition: worker.c:154
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define die(msg)
Definition: pg_test_fsync.c:97
#define PG_TEST_SHM_MQ_MAGIC
Definition: test_shm_mq.h:22
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5781

◆ test_shm_mq_setup()

void test_shm_mq_setup ( int64  queue_size,
int32  nworkers,
dsm_segment **  seg,
shm_mq_handle **  output,
shm_mq_handle **  input 
)

Definition at line 48 of file setup.c.

References cancel_on_dsm_detach(), cleanup_background_workers(), worker_state::handle, pfree(), PointerGetDatum, setup_background_workers(), setup_dynamic_shared_memory(), shm_mq_attach(), and wait_for_workers_to_become_ready().

Referenced by test_shm_mq(), and test_shm_mq_pipelined().

50 {
51  dsm_segment *seg;
52  test_shm_mq_header *hdr;
53  shm_mq *outq = NULL; /* placate compiler */
54  shm_mq *inq = NULL; /* placate compiler */
55  worker_state *wstate;
56 
57  /* Set up a dynamic shared memory segment. */
58  setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
59  *segp = seg;
60 
61  /* Register background workers. */
62  wstate = setup_background_workers(nworkers, seg);
63 
64  /* Attach the queues. */
65  *output = shm_mq_attach(outq, seg, wstate->handle[0]);
66  *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
67 
68  /* Wait for workers to become ready. */
70 
71  /*
72  * Once we reach this point, all workers are ready. We no longer need to
73  * kill them if we die; they'll die on their own as the message queues
74  * shut down.
75  */
77  PointerGetDatum(wstate));
78  pfree(wstate);
79 }
#define PointerGetDatum(X)
Definition: postgres.h:600
static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
Definition: setup.c:89
static void wait_for_workers_to_become_ready(worker_state *wstate, volatile test_shm_mq_header *hdr)
Definition: setup.c:255
void pfree(void *pointer)
Definition: mcxt.c:1169
static worker_state * setup_background_workers(int nworkers, dsm_segment *seg)
Definition: setup.c:172
BackgroundWorkerHandle * handle[FLEXIBLE_ARRAY_MEMBER]
Definition: setup.c:29
static void cleanup_background_workers(dsm_segment *seg, Datum arg)
Definition: setup.c:243
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:284
Definition: shm_mq.c:72
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1111