PostgreSQL Source Code  git master
procsignal.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * procsignal.c
4  * Routines for interprocess signaling
5  *
6  *
7  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/storage/ipc/procsignal.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <signal.h>
18 #include <unistd.h>
19 
20 #include "access/parallel.h"
21 #include "port/pg_bitutils.h"
22 #include "commands/async.h"
23 #include "miscadmin.h"
24 #include "pgstat.h"
25 #include "replication/walsender.h"
26 #include "storage/ipc.h"
27 #include "storage/latch.h"
28 #include "storage/proc.h"
29 #include "storage/shmem.h"
30 #include "storage/sinval.h"
31 #include "tcop/tcopprot.h"
32 
33 /*
34  * The SIGUSR1 signal is multiplexed to support signaling multiple event
35  * types. The specific reason is communicated via flags in shared memory.
36  * We keep a boolean flag for each possible "reason", so that different
37  * reasons can be signaled to a process concurrently. (However, if the same
38  * reason is signaled more than once nearly simultaneously, the process may
39  * observe it only once.)
40  *
41  * Each process that wants to receive signals registers its process ID
42  * in the ProcSignalSlots array. The array is indexed by backend ID to make
43  * slot allocation simple, and to avoid having to search the array when you
44  * know the backend ID of the process you're signaling. (We do support
45  * signaling without backend ID, but it's a bit less efficient.)
46  *
47  * The flags are actually declared as "volatile sig_atomic_t" for maximum
48  * portability. This should ensure that loads and stores of the flag
49  * values are atomic, allowing us to dispense with any explicit locking.
50  *
51  * pss_signalFlags are intended to be set in cases where we don't need to
52  * keep track of whether or not the target process has handled the signal,
53  * but sometimes we need confirmation, as when making a global state change
54  * that cannot be considered complete until all backends have taken notice
55  * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
56  * increment the current "barrier generation"; when the new barrier generation
57  * (or greater) appears in the pss_barrierGeneration flag of every process,
58  * we know that the message has been received everywhere.
59  */
60 typedef struct
61 {
62  pid_t pss_pid;
63  sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
67 
68 /*
69  * Information that is global to the entire ProcSignal system can be stored
70  * here.
71  *
72  * psh_barrierGeneration is the highest barrier generation in existence.
73  */
74 typedef struct
75 {
79 
80 /*
81  * We reserve a slot for each possible BackendId, plus one for each
82  * possible auxiliary process type. (This scheme assumes there is not
83  * more than one of any auxiliary process type at a time.)
84  */
85 #define NumProcSignalSlots (MaxBackends + NUM_AUXPROCTYPES)
86 
87 /* Check whether the relevant type bit is set in the flags. */
88 #define BARRIER_SHOULD_CHECK(flags, type) \
89  (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
90 
91 /* Clear the relevant type bit from the flags. */
92 #define BARRIER_CLEAR_BIT(flags, type) \
93  ((flags) &= ~(((uint32) 1) << (uint32) (type)))
94 
96 static volatile ProcSignalSlot *MyProcSignalSlot = NULL;
97 
98 static bool CheckProcSignal(ProcSignalReason reason);
99 static void CleanupProcSignalState(int status, Datum arg);
100 static void ResetProcSignalBarrierBits(uint32 flags);
101 static bool ProcessBarrierPlaceholder(void);
102 
103 /*
104  * ProcSignalShmemSize
105  * Compute space needed for procsignal's shared memory
106  */
107 Size
109 {
110  Size size;
111 
112  size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
113  size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
114  return size;
115 }
116 
117 /*
118  * ProcSignalShmemInit
119  * Allocate and initialize procsignal's shared memory
120  */
121 void
123 {
124  Size size = ProcSignalShmemSize();
125  bool found;
126 
127  ProcSignal = (ProcSignalHeader *)
128  ShmemInitStruct("ProcSignal", size, &found);
129 
130  /* If we're first, initialize. */
131  if (!found)
132  {
133  int i;
134 
135  pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
136 
137  for (i = 0; i < NumProcSignalSlots; ++i)
138  {
139  ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
140 
141  slot->pss_pid = 0;
142  MemSet(slot->pss_signalFlags, 0, sizeof(slot->pss_signalFlags));
145  }
146  }
147 }
148 
149 /*
150  * ProcSignalInit
151  * Register the current process in the procsignal array
152  *
153  * The passed index should be my BackendId if the process has one,
154  * or MaxBackends + aux process type if not.
155  */
156 void
157 ProcSignalInit(int pss_idx)
158 {
159  volatile ProcSignalSlot *slot;
160  uint64 barrier_generation;
161 
162  Assert(pss_idx >= 1 && pss_idx <= NumProcSignalSlots);
163 
164  slot = &ProcSignal->psh_slot[pss_idx - 1];
165 
166  /* sanity check */
167  if (slot->pss_pid != 0)
168  elog(LOG, "process %d taking over ProcSignal slot %d, but it's not empty",
169  MyProcPid, pss_idx);
170 
171  /* Clear out any leftover signal reasons */
172  MemSet(slot->pss_signalFlags, 0, NUM_PROCSIGNALS * sizeof(sig_atomic_t));
173 
174  /*
175  * Initialize barrier state. Since we're a brand-new process, there
176  * shouldn't be any leftover backend-private state that needs to be
177  * updated. Therefore, we can broadcast the latest barrier generation and
178  * disregard any previously-set check bits.
179  *
180  * NB: This only works if this initialization happens early enough in the
181  * startup sequence that we haven't yet cached any state that might need
182  * to be invalidated. That's also why we have a memory barrier here, to be
183  * sure that any later reads of memory happen strictly after this.
184  */
186  barrier_generation =
188  pg_atomic_write_u64(&slot->pss_barrierGeneration, barrier_generation);
190 
191  /* Mark slot with my PID */
192  slot->pss_pid = MyProcPid;
193 
194  /* Remember slot location for CheckProcSignal */
195  MyProcSignalSlot = slot;
196 
197  /* Set up to release the slot on process exit */
199 }
200 
201 /*
202  * CleanupProcSignalState
203  * Remove current process from ProcSignal mechanism
204  *
205  * This function is called via on_shmem_exit() during backend shutdown.
206  */
207 static void
209 {
210  int pss_idx = DatumGetInt32(arg);
211  volatile ProcSignalSlot *slot;
212 
213  slot = &ProcSignal->psh_slot[pss_idx - 1];
214  Assert(slot == MyProcSignalSlot);
215 
216  /*
217  * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
218  * won't try to access it after it's no longer ours (and perhaps even
219  * after we've unmapped the shared memory segment).
220  */
221  MyProcSignalSlot = NULL;
222 
223  /* sanity check */
224  if (slot->pss_pid != MyProcPid)
225  {
226  /*
227  * don't ERROR here. We're exiting anyway, and don't want to get into
228  * infinite loop trying to exit
229  */
230  elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
231  MyProcPid, pss_idx, (int) slot->pss_pid);
232  return; /* XXX better to zero the slot anyway? */
233  }
234 
235  /*
236  * Make this slot look like it's absorbed all possible barriers, so that
237  * no barrier waits block on it.
238  */
240 
241  slot->pss_pid = 0;
242 }
243 
244 /*
245  * SendProcSignal
246  * Send a signal to a Postgres process
247  *
248  * Providing backendId is optional, but it will speed up the operation.
249  *
250  * On success (a signal was sent), zero is returned.
251  * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
252  *
253  * Not to be confused with ProcSendSignal
254  */
255 int
256 SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
257 {
258  volatile ProcSignalSlot *slot;
259 
260  if (backendId != InvalidBackendId)
261  {
262  slot = &ProcSignal->psh_slot[backendId - 1];
263 
264  /*
265  * Note: Since there's no locking, it's possible that the target
266  * process detaches from shared memory and exits right after this
267  * test, before we set the flag and send signal. And the signal slot
268  * might even be recycled by a new process, so it's remotely possible
269  * that we set a flag for a wrong process. That's OK, all the signals
270  * are such that no harm is done if they're mistakenly fired.
271  */
272  if (slot->pss_pid == pid)
273  {
274  /* Atomically set the proper flag */
275  slot->pss_signalFlags[reason] = true;
276  /* Send signal */
277  return kill(pid, SIGUSR1);
278  }
279  }
280  else
281  {
282  /*
283  * BackendId not provided, so search the array using pid. We search
284  * the array back to front so as to reduce search overhead. Passing
285  * InvalidBackendId means that the target is most likely an auxiliary
286  * process, which will have a slot near the end of the array.
287  */
288  int i;
289 
290  for (i = NumProcSignalSlots - 1; i >= 0; i--)
291  {
292  slot = &ProcSignal->psh_slot[i];
293 
294  if (slot->pss_pid == pid)
295  {
296  /* the above note about race conditions applies here too */
297 
298  /* Atomically set the proper flag */
299  slot->pss_signalFlags[reason] = true;
300  /* Send signal */
301  return kill(pid, SIGUSR1);
302  }
303  }
304  }
305 
306  errno = ESRCH;
307  return -1;
308 }
309 
310 /*
311  * EmitProcSignalBarrier
312  * Send a signal to every Postgres process
313  *
314  * The return value of this function is the barrier "generation" created
315  * by this operation. This value can be passed to WaitForProcSignalBarrier
316  * to wait until it is known that every participant in the ProcSignal
317  * mechanism has absorbed the signal (or started afterwards).
318  *
319  * Note that it would be a bad idea to use this for anything that happens
320  * frequently, as interrupting every backend could cause a noticeable
321  * performance hit.
322  *
323  * Callers are entitled to assume that this function will not throw ERROR
324  * or FATAL.
325  */
326 uint64
328 {
329  uint32 flagbit = 1 << (uint32) type;
330  uint64 generation;
331 
332  /*
333  * Set all the flags.
334  *
335  * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
336  * totally ordered with respect to anything the caller did before, and
337  * anything that we do afterwards. (This is also true of the later call to
338  * pg_atomic_add_fetch_u64.)
339  */
340  for (int i = 0; i < NumProcSignalSlots; i++)
341  {
342  volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
343 
345  }
346 
347  /*
348  * Increment the generation counter.
349  */
350  generation =
352 
353  /*
354  * Signal all the processes, so that they update their advertised barrier
355  * generation.
356  *
357  * Concurrency is not a problem here. Backends that have exited don't
358  * matter, and new backends that have joined since we entered this
359  * function must already have current state, since the caller is
360  * responsible for making sure that the relevant state is entirely visible
361  * before calling this function in the first place. We still have to wake
362  * them up - because we can't distinguish between such backends and older
363  * backends that need to update state - but they won't actually need to
364  * change any state.
365  */
366  for (int i = NumProcSignalSlots - 1; i >= 0; i--)
367  {
368  volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
369  pid_t pid = slot->pss_pid;
370 
371  if (pid != 0)
372  {
373  /* see SendProcSignal for details */
374  slot->pss_signalFlags[PROCSIG_BARRIER] = true;
375  kill(pid, SIGUSR1);
376  }
377  }
378 
379  return generation;
380 }
381 
382 /*
383  * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
384  * requested by a specific call to EmitProcSignalBarrier() have taken effect.
385  *
386  * We expect that the barrier will normally be absorbed very quickly by other
387  * backends, so we start by waiting just 1/8 of a second and then back off
388  * by a factor of two every time we time out, to a maximum wait time of
389  * 1 second.
390  */
391 void
392 WaitForProcSignalBarrier(uint64 generation)
393 {
394  long timeout = 125L;
395 
396  Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
397 
398  for (int i = NumProcSignalSlots - 1; i >= 0; i--)
399  {
400  volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
401  uint64 oldval;
402 
403  /*
404  * It's important that we check only pss_barrierGeneration here and
405  * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
406  * before the barrier is actually absorbed, but pss_barrierGeneration
407  * is updated only afterward.
408  */
409  oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
410  while (oldval < generation)
411  {
412  int events;
413 
415 
416  events =
421 
422  oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
423  if (events & WL_TIMEOUT)
424  timeout = Min(timeout * 2, 1000L);
425  }
426  }
427 
428  /*
429  * The caller is probably calling this function because it wants to read
430  * the shared state or perform further writes to shared state once all
431  * backends are known to have absorbed the barrier. However, the read of
432  * pss_barrierGeneration was performed unlocked; insert a memory barrier
433  * to separate it from whatever follows.
434  */
436 }
437 
438 /*
439  * Handle receipt of an interrupt indicating a global barrier event.
440  *
441  * All the actual work is deferred to ProcessProcSignalBarrier(), because we
442  * cannot safely access the barrier generation inside the signal handler as
443  * 64bit atomics might use spinlock based emulation, even for reads. As this
444  * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
445  * lot of unnecessary work.
446  */
447 static void
449 {
450  InterruptPending = true;
452  /* latch will be set by procsignal_sigusr1_handler */
453 }
454 
455 /*
456  * Perform global barrier related interrupt checking.
457  *
458  * Any backend that participates in ProcSignal signaling must arrange to
459  * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
460  * which is enough for normal backends, but not necessarily for all types of
461  * background processes.
462  */
463 void
465 {
466  uint64 local_gen;
467  uint64 shared_gen;
468  volatile uint32 flags;
469 
470  Assert(MyProcSignalSlot);
471 
472  /* Exit quickly if there's no work to do. */
474  return;
475  ProcSignalBarrierPending = false;
476 
477  /*
478  * It's not unlikely to process multiple barriers at once, before the
479  * signals for all the barriers have arrived. To avoid unnecessary work in
480  * response to subsequent signals, exit early if we already have processed
481  * all of them.
482  */
483  local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
484  shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
485 
486  Assert(local_gen <= shared_gen);
487 
488  if (local_gen == shared_gen)
489  return;
490 
491  /*
492  * Get and clear the flags that are set for this backend. Note that
493  * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
494  * read of the barrier generation above happens before we atomically
495  * extract the flags, and that any subsequent state changes happen
496  * afterward.
497  *
498  * NB: In order to avoid race conditions, we must zero pss_barrierCheckMask
499  * first and only afterwards try to do barrier processing. If we did it
500  * in the other order, someone could send us another barrier of some
501  * type right after we called the barrier-processing function but before
502  * we cleared the bit. We would have no way of knowing that the bit needs
503  * to stay set in that case, so the need to call the barrier-processing
504  * function again would just get forgotten. So instead, we tentatively
505  * clear all the bits and then put back any for which we don't manage
506  * to successfully absorb the barrier.
507  */
508  flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
509 
510  /*
511  * If there are no flags set, then we can skip doing any real work.
512  * Otherwise, establish a PG_TRY block, so that we don't lose track of
513  * which types of barrier processing are needed if an ERROR occurs.
514  */
515  if (flags != 0)
516  {
517  bool success = true;
518 
519  PG_TRY();
520  {
521  /*
522  * Process each type of barrier. The barrier-processing functions
523  * should normally return true, but may return false if the barrier
524  * can't be absorbed at the current time. This should be rare,
525  * because it's pretty expensive. Every single
526  * CHECK_FOR_INTERRUPTS() will return here until we manage to
527  * absorb the barrier, and that cost will add up in a hurry.
528  *
529  * NB: It ought to be OK to call the barrier-processing functions
530  * unconditionally, but it's more efficient to call only the ones
531  * that might need us to do something based on the flags.
532  */
533  while (flags != 0)
534  {
536  bool processed = true;
537 
539  switch (type)
540  {
542  processed = ProcessBarrierPlaceholder();
543  break;
544  }
545 
546  /*
547  * To avoid an infinite loop, we must always unset the bit
548  * in flags.
549  */
550  BARRIER_CLEAR_BIT(flags, type);
551 
552  /*
553  * If we failed to process the barrier, reset the shared bit
554  * so we try again later, and set a flag so that we don't bump
555  * our generation.
556  */
557  if (!processed)
558  {
559  ResetProcSignalBarrierBits(((uint32) 1) << type);
560  success = false;
561  }
562  }
563  }
564  PG_CATCH();
565  {
566  /*
567  * If an ERROR occurred, we'll need to try again later to handle
568  * that barrier type and any others that haven't been handled yet
569  * or weren't successfully absorbed.
570  */
572  PG_RE_THROW();
573  }
574  PG_END_TRY();
575 
576  /*
577  * If some barrier types were not successfully absorbed, we will have
578  * to try again later.
579  */
580  if (!success)
581  return;
582  }
583 
584  /*
585  * State changes related to all types of barriers that might have been
586  * emitted have now been handled, so we can update our notion of the
587  * generation to the one we observed before beginning the updates. If
588  * things have changed further, it'll get fixed up when this function is
589  * next called.
590  */
591  pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
592 }
593 
594 /*
595  * If it turns out that we couldn't absorb one or more barrier types, either
596  * because the barrier-processing functions returned false or due to an error,
597  * arrange for processing to be retried later.
598  */
599 static void
601 {
602  pg_atomic_fetch_or_u32(&MyProcSignalSlot->pss_barrierCheckMask, flags);
604  InterruptPending = true;
605 }
606 
607 static bool
609 {
610  /*
611  * XXX. This is just a placeholder until the first real user of this
612  * machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
613  * PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
614  * appropriately descriptive. Get rid of this function and instead have
615  * ProcessBarrierSomethingElse. Most likely, that function should live in
616  * the file pertaining to that subsystem, rather than here.
617  *
618  * The return value should be 'true' if the barrier was successfully
619  * absorbed and 'false' if not. Note that returning 'false' can lead to
620  * very frequent retries, so try hard to make that an uncommon case.
621  */
622  return true;
623 }
624 
625 /*
626  * CheckProcSignal - check to see if a particular reason has been
627  * signaled, and clear the signal flag. Should be called after receiving
628  * SIGUSR1.
629  */
630 static bool
632 {
633  volatile ProcSignalSlot *slot = MyProcSignalSlot;
634 
635  if (slot != NULL)
636  {
637  /* Careful here --- don't clear flag if we haven't seen it set */
638  if (slot->pss_signalFlags[reason])
639  {
640  slot->pss_signalFlags[reason] = false;
641  return true;
642  }
643  }
644 
645  return false;
646 }
647 
648 /*
649  * procsignal_sigusr1_handler - handle SIGUSR1 signal.
650  */
651 void
653 {
654  int save_errno = errno;
655 
658 
661 
664 
667 
670 
673 
676 
679 
682 
685 
688 
689  SetLatch(MyLatch);
690 
692 
693  errno = save_errno;
694 }
uint64 EmitProcSignalBarrier(ProcSignalBarrierType type)
Definition: procsignal.c:327
void RecoveryConflictInterrupt(ProcSignalReason reason)
Definition: postgres.c:2930
int MyProcPid
Definition: globals.c:41
#define PG_UINT64_MAX
Definition: c.h:516
static void HandleProcSignalBarrierInterrupt(void)
Definition: procsignal.c:448
#define WL_TIMEOUT
Definition: latch.h:127
#define DatumGetInt32(X)
Definition: postgres.h:472
#define SIGUSR1
Definition: win32_port.h:171
pg_atomic_uint64 psh_barrierGeneration
Definition: procsignal.c:76
#define Min(x, y)
Definition: c.h:974
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:464
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:338
static bool ProcessBarrierPlaceholder(void)
Definition: procsignal.c:608
#define MemSet(start, val, len)
Definition: c.h:996
#define kill(pid, sig)
Definition: win32_port.h:454
#define LOG
Definition: elog.h:26
void SetLatch(Latch *latch)
Definition: latch.c:505
pg_atomic_uint64 pss_barrierGeneration
Definition: procsignal.c:64
void ResetLatch(Latch *latch)
Definition: latch.c:588
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:390
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER]
Definition: procsignal.c:77
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:292
static void CleanupProcSignalState(int status, Datum arg)
Definition: procsignal.c:208
void HandleWalSndInitStopping(void)
Definition: walsender.c:3011
int SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
Definition: procsignal.c:256
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
pg_atomic_uint32 pss_barrierCheckMask
Definition: procsignal.c:65
unsigned int uint32
Definition: c.h:429
Size ProcSignalShmemSize(void)
Definition: procsignal.c:108
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:367
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
int BackendId
Definition: backendid.h:21
static ProcSignalHeader * ProcSignal
Definition: procsignal.c:95
static bool CheckProcSignal(ProcSignalReason reason)
Definition: procsignal.c:631
#define BARRIER_CLEAR_BIT(flags, type)
Definition: procsignal.c:92
#define pg_memory_barrier()
Definition: atomics.h:145
#define PG_CATCH()
Definition: elog.h:319
#define SIGNAL_ARGS
Definition: c.h:1321
#define Assert(condition)
Definition: c.h:792
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:36
size_t Size
Definition: c.h:528
static void ResetProcSignalBarrierBits(uint32 flags)
Definition: procsignal.c:600
volatile sig_atomic_t InterruptPending
Definition: globals.c:30
#define PG_RE_THROW()
Definition: elog.h:350
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:504
ProcSignalBarrierType
Definition: procsignal.h:49
void ProcSignalInit(int pss_idx)
Definition: procsignal.c:157
#define Int32GetDatum(X)
Definition: postgres.h:479
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:429
void HandleNotifyInterrupt(void)
Definition: async.c:1889
#define elog(elevel,...)
Definition: elog.h:228
int i
ProcSignalReason
Definition: procsignal.h:30
sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS]
Definition: procsignal.c:63
void * arg
void ProcSignalShmemInit(void)
Definition: procsignal.c:122
static volatile ProcSignalSlot * MyProcSignalSlot
Definition: procsignal.c:96
#define NumProcSignalSlots
Definition: procsignal.c:85
struct Latch * MyLatch
Definition: globals.c:55
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:372
void HandleParallelMessageInterrupt(void)
Definition: parallel.c:992
void WaitForProcSignalBarrier(uint64 generation)
Definition: procsignal.c:392
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
void latch_sigusr1_handler(void)
Definition: latch.c:1944
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
void procsignal_sigusr1_handler(SIGNAL_ARGS)
Definition: procsignal.c:652
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define PG_TRY()
Definition: elog.h:309
static bool success
Definition: initdb.c:163
#define WL_LATCH_SET
Definition: latch.h:124
void HandleCatchupInterrupt(void)
Definition: sinval.c:156
#define PG_END_TRY()
Definition: elog.h:334
#define offsetof(type, field)
Definition: c.h:715
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129
static int pg_rightmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:85