PostgreSQL Source Code  git master
worker.c
Go to the documentation of this file.
1 /*--------------------------------------------------------------------------
2  *
3  * worker.c
4  * Code for sample worker making use of shared memory message queues.
5  * Our test worker simply reads messages from one message queue and
6  * writes them back out to another message queue. In a real
7  * application, you'd presumably want the worker to do some more
8  * complex calculation rather than simply returning the input,
9  * but it should be possible to use much of the control logic just
10  * as presented here.
11  *
12  * Copyright (c) 2013-2019, PostgreSQL Global Development Group
13  *
14  * IDENTIFICATION
15  * src/test/modules/test_shm_mq/worker.c
16  *
17  * -------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "miscadmin.h"
23 #include "storage/ipc.h"
24 #include "storage/procarray.h"
25 #include "storage/shm_mq.h"
26 #include "storage/shm_toc.h"
27 
28 #include "test_shm_mq.h"
29 
30 static void handle_sigterm(SIGNAL_ARGS);
31 static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
32  int myworkernumber, shm_mq_handle **inqhp,
33  shm_mq_handle **outqhp);
34 static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
35 
36 /*
37  * Background worker entrypoint.
38  *
39  * This is intended to demonstrate how a background worker can be used to
40  * facilitate a parallel computation. Most of the logic here is fairly
41  * boilerplate stuff, designed to attach to the shared memory segment,
42  * notify the user backend that we're alive, and so on. The
43  * application-specific bits of logic that you'd replace for your own worker
44  * are attach_to_queues() and copy_messages().
45  */
46 void
48 {
49  dsm_segment *seg;
50  shm_toc *toc;
51  shm_mq_handle *inqh;
52  shm_mq_handle *outqh;
53  volatile test_shm_mq_header *hdr;
54  int myworkernumber;
55  PGPROC *registrant;
56 
57  /*
58  * Establish signal handlers.
59  *
60  * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
61  * it would a normal user backend. To make that happen, we establish a
62  * signal handler that is a stripped-down version of die().
63  */
64  pqsignal(SIGTERM, handle_sigterm);
66 
67  /*
68  * Connect to the dynamic shared memory segment.
69  *
70  * The backend that registered this worker passed us the ID of a shared
71  * memory segment to which we must attach for further instructions. Once
72  * we've mapped the segment in our address space, attach to the table of
73  * contents so we can locate the various data structures we'll need to
74  * find within the segment.
75  *
76  * Note: at this point, we have not created any ResourceOwner in this
77  * process. This will result in our DSM mapping surviving until process
78  * exit, which is fine. If there were a ResourceOwner, it would acquire
79  * ownership of the mapping, but we have no need for that.
80  */
81  seg = dsm_attach(DatumGetInt32(main_arg));
82  if (seg == NULL)
83  ereport(ERROR,
84  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
85  errmsg("unable to map dynamic shared memory segment")));
87  if (toc == NULL)
88  ereport(ERROR,
89  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
90  errmsg("bad magic number in dynamic shared memory segment")));
91 
92  /*
93  * Acquire a worker number.
94  *
95  * By convention, the process registering this background worker should
96  * have stored the control structure at key 0. We look up that key to
97  * find it. Our worker number gives our identity: there may be just one
98  * worker involved in this parallel operation, or there may be many.
99  */
100  hdr = shm_toc_lookup(toc, 0, false);
101  SpinLockAcquire(&hdr->mutex);
102  myworkernumber = ++hdr->workers_attached;
103  SpinLockRelease(&hdr->mutex);
104  if (myworkernumber > hdr->workers_total)
105  ereport(ERROR,
106  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
107  errmsg("too many message queue testing workers already")));
108 
109  /*
110  * Attach to the appropriate message queues.
111  */
112  attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
113 
114  /*
115  * Indicate that we're fully initialized and ready to begin the main part
116  * of the parallel operation.
117  *
118  * Once we signal that we're ready, the user backend is entitled to assume
119  * that our on_dsm_detach callbacks will fire before we disconnect from
120  * the shared memory segment and exit. Generally, that means we must have
121  * attached to all relevant dynamic shared memory data structures by now.
122  */
123  SpinLockAcquire(&hdr->mutex);
124  ++hdr->workers_ready;
125  SpinLockRelease(&hdr->mutex);
127  if (registrant == NULL)
128  {
129  elog(DEBUG1, "registrant backend has exited prematurely");
130  proc_exit(1);
131  }
132  SetLatch(&registrant->procLatch);
133 
134  /* Do the work. */
135  copy_messages(inqh, outqh);
136 
137  /*
138  * We're done. For cleanliness, explicitly detach from the shared memory
139  * segment (that would happen anyway during process exit, though).
140  */
141  dsm_detach(seg);
142  proc_exit(1);
143 }
144 
145 /*
146  * Attach to shared memory message queues.
147  *
148  * We use our worker number to determine to which queue we should attach.
149  * The queues are registered at keys 1..<number-of-workers>. The user backend
150  * writes to queue #1 and reads from queue #<number-of-workers>; each worker
151  * reads from the queue whose number is equal to its worker number and writes
152  * to the next higher-numbered queue.
153  */
154 static void
155 attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber,
156  shm_mq_handle **inqhp, shm_mq_handle **outqhp)
157 {
158  shm_mq *inq;
159  shm_mq *outq;
160 
161  inq = shm_toc_lookup(toc, myworkernumber, false);
163  *inqhp = shm_mq_attach(inq, seg, NULL);
164  outq = shm_toc_lookup(toc, myworkernumber + 1, false);
165  shm_mq_set_sender(outq, MyProc);
166  *outqhp = shm_mq_attach(outq, seg, NULL);
167 }
168 
169 /*
170  * Loop, receiving and sending messages, until the connection is broken.
171  *
172  * This is the "real work" performed by this worker process. Everything that
173  * happens before this is initialization of one form or another, and everything
174  * after this point is cleanup.
175  */
176 static void
178 {
179  Size len;
180  void *data;
181  shm_mq_result res;
182 
183  for (;;)
184  {
185  /* Notice any interrupts that have occurred. */
187 
188  /* Receive a message. */
189  res = shm_mq_receive(inqh, &len, &data, false);
190  if (res != SHM_MQ_SUCCESS)
191  break;
192 
193  /* Send it back out. */
194  res = shm_mq_send(outqh, len, data, false);
195  if (res != SHM_MQ_SUCCESS)
196  break;
197  }
198 }
199 
200 /*
201  * When we receive a SIGTERM, we set InterruptPending and ProcDiePending just
202  * like a normal backend. The next CHECK_FOR_INTERRUPTS() will do the right
203  * thing.
204  */
205 static void
207 {
208  int save_errno = errno;
209 
210  SetLatch(MyLatch);
211 
213  {
214  InterruptPending = true;
215  ProcDiePending = true;
216  }
217 
218  errno = save_errno;
219 }
#define DEBUG1
Definition: elog.h:25
#define DatumGetInt32(X)
Definition: postgres.h:472
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:2363
PGPROC * MyProc
Definition: proc.c:68
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:533
void proc_exit(int code)
Definition: ipc.c:104
int errcode(int sqlerrcode)
Definition: elog.c:570
BackgroundWorker * MyBgworkerEntry
Definition: postmaster.c:192
void SetLatch(Latch *latch)
Definition: latch.c:436
Latch procLatch
Definition: proc.h:104
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define ERROR
Definition: elog.h:43
static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh)
Definition: worker.c:177
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:216
#define ereport(elevel, rest)
Definition: elog.h:141
#define SpinLockRelease(lock)
Definition: spin.h:64
uintptr_t Datum
Definition: postgres.h:367
shm_toc * shm_toc_attach(uint64 magic, void *address)
Definition: shm_toc.c:64
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
shm_mq_result
Definition: shm_mq.h:36
#define SIGNAL_ARGS
Definition: c.h:1259
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:938
volatile sig_atomic_t ProcDiePending
Definition: globals.c:32
bool proc_exit_inprogress
Definition: ipc.c:40
size_t Size
Definition: c.h:466
shm_mq_result shm_mq_send(shm_mq_handle *mqh, Size nbytes, const void *data, bool nowait)
Definition: shm_mq.c:320
volatile sig_atomic_t InterruptPending
Definition: globals.c:30
static void handle_sigterm(SIGNAL_ARGS)
Definition: worker.c:206
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
Definition: shm_mq.c:282
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:664
void test_shm_mq_main(Datum main_arg)
Definition: worker.c:47
int errmsg(const char *fmt,...)
Definition: elog.c:784
pid_t bgw_notify_pid
Definition: bgworker.h:99
#define elog(elevel,...)
Definition: elog.h:226
Definition: shm_mq.c:70
struct Latch * MyLatch
Definition: globals.c:54
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
Definition: shm_mq.c:198
shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
Definition: shm_mq.c:540
Definition: proc.h:95
static void attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber, shm_mq_handle **inqhp, shm_mq_handle **outqhp)
Definition: worker.c:155
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define PG_TEST_SHM_MQ_MAGIC
Definition: test_shm_mq.h:22
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5662