PostgreSQL Source Code  git master
async.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * async.c
4  * Asynchronous notification: NOTIFY, LISTEN, UNLISTEN
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/async.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 
15 /*-------------------------------------------------------------------------
16  * Async Notification Model as of 9.0:
17  *
18  * 1. Multiple backends on same machine. Multiple backends listening on
19  * several channels. (Channels are also called "conditions" in other
20  * parts of the code.)
21  *
22  * 2. There is one central queue in disk-based storage (directory pg_notify/),
23  * with actively-used pages mapped into shared memory by the slru.c module.
24  * All notification messages are placed in the queue and later read out
25  * by listening backends.
26  *
27  * There is no central knowledge of which backend listens on which channel;
28  * every backend has its own list of interesting channels.
29  *
30  * Although there is only one queue, notifications are treated as being
31  * database-local; this is done by including the sender's database OID
32  * in each notification message. Listening backends ignore messages
33  * that don't match their database OID. This is important because it
34  * ensures senders and receivers have the same database encoding and won't
35  * misinterpret non-ASCII text in the channel name or payload string.
36  *
37  * Since notifications are not expected to survive database crashes,
38  * we can simply clean out the pg_notify data at any reboot, and there
39  * is no need for WAL support or fsync'ing.
40  *
41  * 3. Every backend that is listening on at least one channel registers by
42  * entering its PID into the array in AsyncQueueControl. It then scans all
43  * incoming notifications in the central queue and first compares the
44  * database OID of the notification with its own database OID and then
45  * compares the notified channel with the list of channels that it listens
46  * to. In case there is a match it delivers the notification event to its
47  * frontend. Non-matching events are simply skipped.
48  *
49  * 4. The NOTIFY statement (routine Async_Notify) stores the notification in
50  * a backend-local list which will not be processed until transaction end.
51  *
52  * Duplicate notifications from the same transaction are sent out as one
53  * notification only. This is done to save work when for example a trigger
54  * on a 2 million row table fires a notification for each row that has been
55  * changed. If the application needs to receive every single notification
56  * that has been sent, it can easily add some unique string into the extra
57  * payload parameter.
58  *
59  * When the transaction is ready to commit, PreCommit_Notify() adds the
60  * pending notifications to the head of the queue. The head pointer of the
61  * queue always points to the next free position and a position is just a
62  * page number and the offset in that page. This is done before marking the
63  * transaction as committed in clog. If we run into problems writing the
64  * notifications, we can still call elog(ERROR, ...) and the transaction
65  * will roll back.
66  *
67  * Once we have put all of the notifications into the queue, we return to
68  * CommitTransaction() which will then do the actual transaction commit.
69  *
70  * After commit we are called another time (AtCommit_Notify()). Here we
71  * make the actual updates to the effective listen state (listenChannels).
72  *
73  * Finally, after we are out of the transaction altogether, we check if
74  * we need to signal listening backends. In SignalBackends() we scan the
75  * list of listening backends and send a PROCSIG_NOTIFY_INTERRUPT signal
76  * to every listening backend (we don't know which backend is listening on
77  * which channel so we must signal them all). We can exclude backends that
78  * are already up to date, though, and we can also exclude backends that
79  * are in other databases (unless they are way behind and should be kicked
80  * to make them advance their pointers). We don't bother with a
81  * self-signal either, but just process the queue directly.
82  *
83  * 5. Upon receipt of a PROCSIG_NOTIFY_INTERRUPT signal, the signal handler
84  * sets the process's latch, which triggers the event to be processed
85  * immediately if this backend is idle (i.e., it is waiting for a frontend
86  * command and is not within a transaction block. C.f.
87  * ProcessClientReadInterrupt()). Otherwise the handler may only set a
88  * flag, which will cause the processing to occur just before we next go
89  * idle.
90  *
91  * Inbound-notify processing consists of reading all of the notifications
92  * that have arrived since scanning last time. We read every notification
93  * until we reach either a notification from an uncommitted transaction or
94  * the head pointer's position.
95  *
96  * 6. To avoid SLRU wraparound and limit disk space consumption, the tail
97  * pointer needs to be advanced so that old pages can be truncated.
98  * This is relatively expensive (notably, it requires an exclusive lock),
99  * so we don't want to do it often. We make sending backends do this work
100  * if they advanced the queue head into a new page, but only once every
101  * QUEUE_CLEANUP_DELAY pages.
102  *
103  * An application that listens on the same channel it notifies will get
104  * NOTIFY messages for its own NOTIFYs. These can be ignored, if not useful,
105  * by comparing be_pid in the NOTIFY message to the application's own backend's
106  * PID. (As of FE/BE protocol 2.0, the backend's PID is provided to the
107  * frontend during startup.) The above design guarantees that notifies from
108  * other backends will never be missed by ignoring self-notifies.
109  *
110  * The amount of shared memory used for notify management (NUM_NOTIFY_BUFFERS)
111  * can be varied without affecting anything but performance. The maximum
112  * amount of notification data that can be queued at one time is determined
113  * by slru.c's wraparound limit; see QUEUE_MAX_PAGE below.
114  *-------------------------------------------------------------------------
115  */
116 
117 #include "postgres.h"
118 
119 #include <limits.h>
120 #include <unistd.h>
121 #include <signal.h>
122 
123 #include "access/parallel.h"
124 #include "access/slru.h"
125 #include "access/transam.h"
126 #include "access/xact.h"
127 #include "catalog/pg_database.h"
128 #include "commands/async.h"
129 #include "common/hashfn.h"
130 #include "funcapi.h"
131 #include "libpq/libpq.h"
132 #include "libpq/pqformat.h"
133 #include "miscadmin.h"
134 #include "storage/ipc.h"
135 #include "storage/lmgr.h"
136 #include "storage/proc.h"
137 #include "storage/procarray.h"
138 #include "storage/procsignal.h"
139 #include "storage/sinval.h"
140 #include "tcop/tcopprot.h"
141 #include "utils/builtins.h"
142 #include "utils/memutils.h"
143 #include "utils/ps_status.h"
144 #include "utils/snapmgr.h"
145 #include "utils/timestamp.h"
146 
147 
148 /*
149  * Maximum size of a NOTIFY payload, including terminating NULL. This
150  * must be kept small enough so that a notification message fits on one
151  * SLRU page. The magic fudge factor here is noncritical as long as it's
152  * more than AsyncQueueEntryEmptySize --- we make it significantly bigger
153  * than that, so changes in that data structure won't affect user-visible
154  * restrictions.
155  */
156 #define NOTIFY_PAYLOAD_MAX_LENGTH (BLCKSZ - NAMEDATALEN - 128)
157 
158 /*
159  * Struct representing an entry in the global notify queue
160  *
161  * This struct declaration has the maximal length, but in a real queue entry
162  * the data area is only big enough for the actual channel and payload strings
163  * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
164  * entry size, if both channel and payload strings are empty (but note it
165  * doesn't include alignment padding).
166  *
167  * The "length" field should always be rounded up to the next QUEUEALIGN
168  * multiple so that all fields are properly aligned.
169  */
170 typedef struct AsyncQueueEntry
171 {
172  int length; /* total allocated length of entry */
173  Oid dboid; /* sender's database OID */
174  TransactionId xid; /* sender's XID */
175  int32 srcPid; /* sender's PID */
178 
179 /* Currently, no field of AsyncQueueEntry requires more than int alignment */
180 #define QUEUEALIGN(len) INTALIGN(len)
181 
182 #define AsyncQueueEntryEmptySize (offsetof(AsyncQueueEntry, data) + 2)
183 
184 /*
185  * Struct describing a queue position, and assorted macros for working with it
186  */
187 typedef struct QueuePosition
188 {
189  int page; /* SLRU page number */
190  int offset; /* byte offset within page */
191 } QueuePosition;
192 
193 #define QUEUE_POS_PAGE(x) ((x).page)
194 #define QUEUE_POS_OFFSET(x) ((x).offset)
195 
196 #define SET_QUEUE_POS(x,y,z) \
197  do { \
198  (x).page = (y); \
199  (x).offset = (z); \
200  } while (0)
201 
202 #define QUEUE_POS_EQUAL(x,y) \
203  ((x).page == (y).page && (x).offset == (y).offset)
204 
205 #define QUEUE_POS_IS_ZERO(x) \
206  ((x).page == 0 && (x).offset == 0)
207 
208 /* choose logically smaller QueuePosition */
209 #define QUEUE_POS_MIN(x,y) \
210  (asyncQueuePagePrecedes((x).page, (y).page) ? (x) : \
211  (x).page != (y).page ? (y) : \
212  (x).offset < (y).offset ? (x) : (y))
213 
214 /* choose logically larger QueuePosition */
215 #define QUEUE_POS_MAX(x,y) \
216  (asyncQueuePagePrecedes((x).page, (y).page) ? (y) : \
217  (x).page != (y).page ? (x) : \
218  (x).offset > (y).offset ? (x) : (y))
219 
220 /*
221  * Parameter determining how often we try to advance the tail pointer:
222  * we do that after every QUEUE_CLEANUP_DELAY pages of NOTIFY data. This is
223  * also the distance by which a backend in another database needs to be
224  * behind before we'll decide we need to wake it up to advance its pointer.
225  *
226  * Resist the temptation to make this really large. While that would save
227  * work in some places, it would add cost in others. In particular, this
228  * should likely be less than NUM_NOTIFY_BUFFERS, to ensure that backends
229  * catch up before the pages they'll need to read fall out of SLRU cache.
230  */
231 #define QUEUE_CLEANUP_DELAY 4
232 
233 /*
234  * Struct describing a listening backend's status
235  */
236 typedef struct QueueBackendStatus
237 {
238  int32 pid; /* either a PID or InvalidPid */
239  Oid dboid; /* backend's database OID, or InvalidOid */
240  BackendId nextListener; /* id of next listener, or InvalidBackendId */
241  QueuePosition pos; /* backend has read queue up to here */
243 
244 /*
245  * Shared memory state for LISTEN/NOTIFY (excluding its SLRU stuff)
246  *
247  * The AsyncQueueControl structure is protected by the NotifyQueueLock and
248  * NotifyQueueTailLock.
249  *
250  * When holding NotifyQueueLock in SHARED mode, backends may only inspect
251  * their own entries as well as the head and tail pointers. Consequently we
252  * can allow a backend to update its own record while holding only SHARED lock
253  * (since no other backend will inspect it).
254  *
255  * When holding NotifyQueueLock in EXCLUSIVE mode, backends can inspect the
256  * entries of other backends and also change the head pointer. When holding
257  * both NotifyQueueLock and NotifyQueueTailLock in EXCLUSIVE mode, backends
258  * can change the tail pointers.
259  *
260  * NotifySLRULock is used as the control lock for the pg_notify SLRU buffers.
261  * In order to avoid deadlocks, whenever we need multiple locks, we first get
262  * NotifyQueueTailLock, then NotifyQueueLock, and lastly NotifySLRULock.
263  *
264  * Each backend uses the backend[] array entry with index equal to its
265  * BackendId (which can range from 1 to MaxBackends). We rely on this to make
266  * SendProcSignal fast.
267  *
268  * The backend[] array entries for actively-listening backends are threaded
269  * together using firstListener and the nextListener links, so that we can
270  * scan them without having to iterate over inactive entries. We keep this
271  * list in order by BackendId so that the scan is cache-friendly when there
272  * are many active entries.
273  */
274 typedef struct AsyncQueueControl
275 {
276  QueuePosition head; /* head points to the next free location */
277  QueuePosition tail; /* tail must be <= the queue position of every
278  * listening backend */
279  int stopPage; /* oldest unrecycled page; must be <=
280  * tail.page */
281  BackendId firstListener; /* id of first listener, or InvalidBackendId */
282  TimestampTz lastQueueFillWarn; /* time of last queue-full msg */
284  /* backend[0] is not used; used entries are from [1] to [MaxBackends] */
286 
288 
289 #define QUEUE_HEAD (asyncQueueControl->head)
290 #define QUEUE_TAIL (asyncQueueControl->tail)
291 #define QUEUE_STOP_PAGE (asyncQueueControl->stopPage)
292 #define QUEUE_FIRST_LISTENER (asyncQueueControl->firstListener)
293 #define QUEUE_BACKEND_PID(i) (asyncQueueControl->backend[i].pid)
294 #define QUEUE_BACKEND_DBOID(i) (asyncQueueControl->backend[i].dboid)
295 #define QUEUE_NEXT_LISTENER(i) (asyncQueueControl->backend[i].nextListener)
296 #define QUEUE_BACKEND_POS(i) (asyncQueueControl->backend[i].pos)
297 
298 /*
299  * The SLRU buffer area through which we access the notification queue
300  */
302 
303 #define NotifyCtl (&NotifyCtlData)
304 #define QUEUE_PAGESIZE BLCKSZ
305 #define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */
306 
307 /*
308  * Use segments 0000 through FFFF. Each contains SLRU_PAGES_PER_SEGMENT pages
309  * which gives us the pages from 0 to SLRU_PAGES_PER_SEGMENT * 0x10000 - 1.
310  * We could use as many segments as SlruScanDirectory() allows, but this gives
311  * us so much space already that it doesn't seem worth the trouble.
312  *
313  * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
314  * pages, because more than that would confuse slru.c into thinking there
315  * was a wraparound condition. With the default BLCKSZ this means there
316  * can be up to 8GB of queued-and-not-read data.
317  *
318  * Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
319  * SLRU_PAGES_PER_SEGMENT, for easier testing of queue-full behaviour.
320  */
321 #define QUEUE_MAX_PAGE (SLRU_PAGES_PER_SEGMENT * 0x10000 - 1)
322 
323 /*
324  * listenChannels identifies the channels we are actually listening to
325  * (ie, have committed a LISTEN on). It is a simple list of channel names,
326  * allocated in TopMemoryContext.
327  */
328 static List *listenChannels = NIL; /* list of C strings */
329 
330 /*
331  * State for pending LISTEN/UNLISTEN actions consists of an ordered list of
332  * all actions requested in the current transaction. As explained above,
333  * we don't actually change listenChannels until we reach transaction commit.
334  *
335  * The list is kept in CurTransactionContext. In subtransactions, each
336  * subtransaction has its own list in its own CurTransactionContext, but
337  * successful subtransactions attach their lists to their parent's list.
338  * Failed subtransactions simply discard their lists.
339  */
340 typedef enum
341 {
346 
347 typedef struct
348 {
350  char channel[FLEXIBLE_ARRAY_MEMBER]; /* nul-terminated string */
351 } ListenAction;
352 
353 typedef struct ActionList
354 {
355  int nestingLevel; /* current transaction nesting depth */
356  List *actions; /* list of ListenAction structs */
357  struct ActionList *upper; /* details for upper transaction levels */
358 } ActionList;
359 
360 static ActionList *pendingActions = NULL;
361 
362 /*
363  * State for outbound notifies consists of a list of all channels+payloads
364  * NOTIFYed in the current transaction. We do not actually perform a NOTIFY
365  * until and unless the transaction commits. pendingNotifies is NULL if no
366  * NOTIFYs have been done in the current (sub) transaction.
367  *
368  * We discard duplicate notify events issued in the same transaction.
369  * Hence, in addition to the list proper (which we need to track the order
370  * of the events, since we guarantee to deliver them in order), we build a
371  * hash table which we can probe to detect duplicates. Since building the
372  * hash table is somewhat expensive, we do so only once we have at least
373  * MIN_HASHABLE_NOTIFIES events queued in the current (sub) transaction;
374  * before that we just scan the events linearly.
375  *
376  * The list is kept in CurTransactionContext. In subtransactions, each
377  * subtransaction has its own list in its own CurTransactionContext, but
378  * successful subtransactions add their entries to their parent's list.
379  * Failed subtransactions simply discard their lists. Since these lists
380  * are independent, there may be notify events in a subtransaction's list
381  * that duplicate events in some ancestor (sub) transaction; we get rid of
382  * the dups when merging the subtransaction's list into its parent's.
383  *
384  * Note: the action and notify lists do not interact within a transaction.
385  * In particular, if a transaction does NOTIFY and then LISTEN on the same
386  * condition name, it will get a self-notify at commit. This is a bit odd
387  * but is consistent with our historical behavior.
388  */
389 typedef struct Notification
390 {
391  uint16 channel_len; /* length of channel-name string */
392  uint16 payload_len; /* length of payload string */
393  /* null-terminated channel name, then null-terminated payload follow */
395 } Notification;
396 
397 typedef struct NotificationList
398 {
399  int nestingLevel; /* current transaction nesting depth */
400  List *events; /* list of Notification structs */
401  HTAB *hashtab; /* hash of NotificationHash structs, or NULL */
402  struct NotificationList *upper; /* details for upper transaction levels */
404 
405 #define MIN_HASHABLE_NOTIFIES 16 /* threshold to build hashtab */
406 
407 typedef struct NotificationHash
408 {
409  Notification *event; /* => the actual Notification struct */
411 
413 
414 /*
415  * Inbound notifications are initially processed by HandleNotifyInterrupt(),
416  * called from inside a signal handler. That just sets the
417  * notifyInterruptPending flag and sets the process
418  * latch. ProcessNotifyInterrupt() will then be called whenever it's safe to
419  * actually deal with the interrupt.
420  */
421 volatile sig_atomic_t notifyInterruptPending = false;
422 
423 /* True if we've registered an on_shmem_exit cleanup */
424 static bool unlistenExitRegistered = false;
425 
426 /* True if we're currently registered as a listener in asyncQueueControl */
427 static bool amRegisteredListener = false;
428 
429 /* has this backend sent notifications in the current transaction? */
430 static bool backendHasSentNotifications = false;
431 
432 /* have we advanced to a page that's a multiple of QUEUE_CLEANUP_DELAY? */
433 static bool backendTryAdvanceTail = false;
434 
435 /* GUC parameter */
436 bool Trace_notify = false;
437 
438 /* local function prototypes */
439 static int asyncQueuePageDiff(int p, int q);
440 static bool asyncQueuePagePrecedes(int p, int q);
441 static void queue_listen(ListenActionKind action, const char *channel);
442 static void Async_UnlistenOnExit(int code, Datum arg);
443 static void Exec_ListenPreCommit(void);
444 static void Exec_ListenCommit(const char *channel);
445 static void Exec_UnlistenCommit(const char *channel);
446 static void Exec_UnlistenAllCommit(void);
447 static bool IsListeningOn(const char *channel);
448 static void asyncQueueUnregister(void);
449 static bool asyncQueueIsFull(void);
450 static bool asyncQueueAdvance(volatile QueuePosition *position, int entryLength);
452 static ListCell *asyncQueueAddEntries(ListCell *nextNotify);
453 static double asyncQueueUsage(void);
454 static void asyncQueueFillWarning(void);
455 static void SignalBackends(void);
456 static void asyncQueueReadAllNotifications(void);
457 static bool asyncQueueProcessPageEntries(volatile QueuePosition *current,
458  QueuePosition stop,
459  char *page_buffer,
460  Snapshot snapshot);
461 static void asyncQueueAdvanceTail(void);
462 static void ProcessIncomingNotify(void);
463 static bool AsyncExistsPendingNotify(Notification *n);
465 static uint32 notification_hash(const void *key, Size keysize);
466 static int notification_match(const void *key1, const void *key2, Size keysize);
467 static void ClearPendingActionsAndNotifies(void);
468 
469 /*
470  * Compute the difference between two queue page numbers (i.e., p - q),
471  * accounting for wraparound.
472  */
473 static int
474 asyncQueuePageDiff(int p, int q)
475 {
476  int diff;
477 
478  /*
479  * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
480  * in the range 0..QUEUE_MAX_PAGE.
481  */
482  Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
483  Assert(q >= 0 && q <= QUEUE_MAX_PAGE);
484 
485  diff = p - q;
486  if (diff >= ((QUEUE_MAX_PAGE + 1) / 2))
487  diff -= QUEUE_MAX_PAGE + 1;
488  else if (diff < -((QUEUE_MAX_PAGE + 1) / 2))
489  diff += QUEUE_MAX_PAGE + 1;
490  return diff;
491 }
492 
493 /*
494  * Is p < q, accounting for wraparound?
495  *
496  * Since asyncQueueIsFull() blocks creation of a page that could precede any
497  * extant page, we need not assess entries within a page.
498  */
499 static bool
501 {
502  return asyncQueuePageDiff(p, q) < 0;
503 }
504 
505 /*
506  * Report space needed for our shared memory area
507  */
508 Size
510 {
511  Size size;
512 
513  /* This had better match AsyncShmemInit */
514  size = mul_size(MaxBackends + 1, sizeof(QueueBackendStatus));
515  size = add_size(size, offsetof(AsyncQueueControl, backend));
516 
518 
519  return size;
520 }
521 
522 /*
523  * Initialize our shared memory area
524  */
525 void
527 {
528  bool found;
529  Size size;
530 
531  /*
532  * Create or attach to the AsyncQueueControl structure.
533  *
534  * The used entries in the backend[] array run from 1 to MaxBackends; the
535  * zero'th entry is unused but must be allocated.
536  */
537  size = mul_size(MaxBackends + 1, sizeof(QueueBackendStatus));
538  size = add_size(size, offsetof(AsyncQueueControl, backend));
539 
540  asyncQueueControl = (AsyncQueueControl *)
541  ShmemInitStruct("Async Queue Control", size, &found);
542 
543  if (!found)
544  {
545  /* First time through, so initialize it */
546  SET_QUEUE_POS(QUEUE_HEAD, 0, 0);
547  SET_QUEUE_POS(QUEUE_TAIL, 0, 0);
548  QUEUE_STOP_PAGE = 0;
550  asyncQueueControl->lastQueueFillWarn = 0;
551  /* zero'th entry won't be used, but let's initialize it anyway */
552  for (int i = 0; i <= MaxBackends; i++)
553  {
558  }
559  }
560 
561  /*
562  * Set up SLRU management of the pg_notify data.
563  */
564  NotifyCtl->PagePrecedes = asyncQueuePagePrecedes;
566  NotifySLRULock, "pg_notify", LWTRANCHE_NOTIFY_BUFFER,
568 
569  if (!found)
570  {
571  /*
572  * During start or reboot, clean out the pg_notify directory.
573  */
575  }
576 }
577 
578 
579 /*
580  * pg_notify -
581  * SQL function to send a notification event
582  */
583 Datum
585 {
586  const char *channel;
587  const char *payload;
588 
589  if (PG_ARGISNULL(0))
590  channel = "";
591  else
592  channel = text_to_cstring(PG_GETARG_TEXT_PP(0));
593 
594  if (PG_ARGISNULL(1))
595  payload = "";
596  else
597  payload = text_to_cstring(PG_GETARG_TEXT_PP(1));
598 
599  /* For NOTIFY as a statement, this is checked in ProcessUtility */
601 
602  Async_Notify(channel, payload);
603 
604  PG_RETURN_VOID();
605 }
606 
607 
608 /*
609  * Async_Notify
610  *
611  * This is executed by the SQL notify command.
612  *
613  * Adds the message to the list of pending notifies.
614  * Actual notification happens during transaction commit.
615  * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
616  */
617 void
618 Async_Notify(const char *channel, const char *payload)
619 {
620  int my_level = GetCurrentTransactionNestLevel();
621  size_t channel_len;
622  size_t payload_len;
623  Notification *n;
624  MemoryContext oldcontext;
625 
626  if (IsParallelWorker())
627  elog(ERROR, "cannot send notifications from a parallel worker");
628 
629  if (Trace_notify)
630  elog(DEBUG1, "Async_Notify(%s)", channel);
631 
632  channel_len = channel ? strlen(channel) : 0;
633  payload_len = payload ? strlen(payload) : 0;
634 
635  /* a channel name must be specified */
636  if (channel_len == 0)
637  ereport(ERROR,
638  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
639  errmsg("channel name cannot be empty")));
640 
641  /* enforce length limits */
642  if (channel_len >= NAMEDATALEN)
643  ereport(ERROR,
644  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
645  errmsg("channel name too long")));
646 
647  if (payload_len >= NOTIFY_PAYLOAD_MAX_LENGTH)
648  ereport(ERROR,
649  (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
650  errmsg("payload string too long")));
651 
652  /*
653  * We must construct the Notification entry, even if we end up not using
654  * it, in order to compare it cheaply to existing list entries.
655  *
656  * The notification list needs to live until end of transaction, so store
657  * it in the transaction context.
658  */
660 
662  channel_len + payload_len + 2);
663  n->channel_len = channel_len;
664  n->payload_len = payload_len;
665  strcpy(n->data, channel);
666  if (payload)
667  strcpy(n->data + channel_len + 1, payload);
668  else
669  n->data[channel_len + 1] = '\0';
670 
671  if (pendingNotifies == NULL || my_level > pendingNotifies->nestingLevel)
672  {
673  NotificationList *notifies;
674 
675  /*
676  * First notify event in current (sub)xact. Note that we allocate the
677  * NotificationList in TopTransactionContext; the nestingLevel might
678  * get changed later by AtSubCommit_Notify.
679  */
680  notifies = (NotificationList *)
682  sizeof(NotificationList));
683  notifies->nestingLevel = my_level;
684  notifies->events = list_make1(n);
685  /* We certainly don't need a hashtable yet */
686  notifies->hashtab = NULL;
687  notifies->upper = pendingNotifies;
688  pendingNotifies = notifies;
689  }
690  else
691  {
692  /* Now check for duplicates */
694  {
695  /* It's a dup, so forget it */
696  pfree(n);
697  MemoryContextSwitchTo(oldcontext);
698  return;
699  }
700 
701  /* Append more events to existing list */
703  }
704 
705  MemoryContextSwitchTo(oldcontext);
706 }
707 
708 /*
709  * queue_listen
710  * Common code for listen, unlisten, unlisten all commands.
711  *
712  * Adds the request to the list of pending actions.
713  * Actual update of the listenChannels list happens during transaction
714  * commit.
715  */
716 static void
717 queue_listen(ListenActionKind action, const char *channel)
718 {
719  MemoryContext oldcontext;
720  ListenAction *actrec;
721  int my_level = GetCurrentTransactionNestLevel();
722 
723  /*
724  * Unlike Async_Notify, we don't try to collapse out duplicates. It would
725  * be too complicated to ensure we get the right interactions of
726  * conflicting LISTEN/UNLISTEN/UNLISTEN_ALL, and it's unlikely that there
727  * would be any performance benefit anyway in sane applications.
728  */
730 
731  /* space for terminating null is included in sizeof(ListenAction) */
732  actrec = (ListenAction *) palloc(offsetof(ListenAction, channel) +
733  strlen(channel) + 1);
734  actrec->action = action;
735  strcpy(actrec->channel, channel);
736 
737  if (pendingActions == NULL || my_level > pendingActions->nestingLevel)
738  {
739  ActionList *actions;
740 
741  /*
742  * First action in current sub(xact). Note that we allocate the
743  * ActionList in TopTransactionContext; the nestingLevel might get
744  * changed later by AtSubCommit_Notify.
745  */
746  actions = (ActionList *)
748  actions->nestingLevel = my_level;
749  actions->actions = list_make1(actrec);
750  actions->upper = pendingActions;
751  pendingActions = actions;
752  }
753  else
754  pendingActions->actions = lappend(pendingActions->actions, actrec);
755 
756  MemoryContextSwitchTo(oldcontext);
757 }
758 
759 /*
760  * Async_Listen
761  *
762  * This is executed by the SQL listen command.
763  */
764 void
765 Async_Listen(const char *channel)
766 {
767  if (Trace_notify)
768  elog(DEBUG1, "Async_Listen(%s,%d)", channel, MyProcPid);
769 
770  queue_listen(LISTEN_LISTEN, channel);
771 }
772 
773 /*
774  * Async_Unlisten
775  *
776  * This is executed by the SQL unlisten command.
777  */
778 void
779 Async_Unlisten(const char *channel)
780 {
781  if (Trace_notify)
782  elog(DEBUG1, "Async_Unlisten(%s,%d)", channel, MyProcPid);
783 
784  /* If we couldn't possibly be listening, no need to queue anything */
785  if (pendingActions == NULL && !unlistenExitRegistered)
786  return;
787 
788  queue_listen(LISTEN_UNLISTEN, channel);
789 }
790 
791 /*
792  * Async_UnlistenAll
793  *
794  * This is invoked by UNLISTEN * command, and also at backend exit.
795  */
796 void
798 {
799  if (Trace_notify)
800  elog(DEBUG1, "Async_UnlistenAll(%d)", MyProcPid);
801 
802  /* If we couldn't possibly be listening, no need to queue anything */
803  if (pendingActions == NULL && !unlistenExitRegistered)
804  return;
805 
807 }
808 
809 /*
810  * SQL function: return a set of the channel names this backend is actively
811  * listening to.
812  *
813  * Note: this coding relies on the fact that the listenChannels list cannot
814  * change within a transaction.
815  */
816 Datum
818 {
819  FuncCallContext *funcctx;
820 
821  /* stuff done only on the first call of the function */
822  if (SRF_IS_FIRSTCALL())
823  {
824  /* create a function context for cross-call persistence */
825  funcctx = SRF_FIRSTCALL_INIT();
826  }
827 
828  /* stuff done on every call of the function */
829  funcctx = SRF_PERCALL_SETUP();
830 
831  if (funcctx->call_cntr < list_length(listenChannels))
832  {
833  char *channel = (char *) list_nth(listenChannels,
834  funcctx->call_cntr);
835 
836  SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(channel));
837  }
838 
839  SRF_RETURN_DONE(funcctx);
840 }
841 
842 /*
843  * Async_UnlistenOnExit
844  *
845  * This is executed at backend exit if we have done any LISTENs in this
846  * backend. It might not be necessary anymore, if the user UNLISTENed
847  * everything, but we don't try to detect that case.
848  */
849 static void
851 {
854 }
855 
856 /*
857  * AtPrepare_Notify
858  *
859  * This is called at the prepare phase of a two-phase
860  * transaction. Save the state for possible commit later.
861  */
862 void
864 {
865  /* It's not allowed to have any pending LISTEN/UNLISTEN/NOTIFY actions */
866  if (pendingActions || pendingNotifies)
867  ereport(ERROR,
868  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
869  errmsg("cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY")));
870 }
871 
872 /*
873  * PreCommit_Notify
874  *
875  * This is called at transaction commit, before actually committing to
876  * clog.
877  *
878  * If there are pending LISTEN actions, make sure we are listed in the
879  * shared-memory listener array. This must happen before commit to
880  * ensure we don't miss any notifies from transactions that commit
881  * just after ours.
882  *
883  * If there are outbound notify requests in the pendingNotifies list,
884  * add them to the global queue. We do that before commit so that
885  * we can still throw error if we run out of queue space.
886  */
887 void
889 {
890  ListCell *p;
891 
892  if (!pendingActions && !pendingNotifies)
893  return; /* no relevant statements in this xact */
894 
895  if (Trace_notify)
896  elog(DEBUG1, "PreCommit_Notify");
897 
898  /* Preflight for any pending listen/unlisten actions */
899  if (pendingActions != NULL)
900  {
901  foreach(p, pendingActions->actions)
902  {
903  ListenAction *actrec = (ListenAction *) lfirst(p);
904 
905  switch (actrec->action)
906  {
907  case LISTEN_LISTEN:
909  break;
910  case LISTEN_UNLISTEN:
911  /* there is no Exec_UnlistenPreCommit() */
912  break;
913  case LISTEN_UNLISTEN_ALL:
914  /* there is no Exec_UnlistenAllPreCommit() */
915  break;
916  }
917  }
918  }
919 
920  /* Queue any pending notifies (must happen after the above) */
921  if (pendingNotifies)
922  {
923  ListCell *nextNotify;
924 
925  /*
926  * Make sure that we have an XID assigned to the current transaction.
927  * GetCurrentTransactionId is cheap if we already have an XID, but not
928  * so cheap if we don't, and we'd prefer not to do that work while
929  * holding NotifyQueueLock.
930  */
931  (void) GetCurrentTransactionId();
932 
933  /*
934  * Serialize writers by acquiring a special lock that we hold till
935  * after commit. This ensures that queue entries appear in commit
936  * order, and in particular that there are never uncommitted queue
937  * entries ahead of committed ones, so an uncommitted transaction
938  * can't block delivery of deliverable notifications.
939  *
940  * We use a heavyweight lock so that it'll automatically be released
941  * after either commit or abort. This also allows deadlocks to be
942  * detected, though really a deadlock shouldn't be possible here.
943  *
944  * The lock is on "database 0", which is pretty ugly but it doesn't
945  * seem worth inventing a special locktag category just for this.
946  * (Historical note: before PG 9.0, a similar lock on "database 0" was
947  * used by the flatfiles mechanism.)
948  */
949  LockSharedObject(DatabaseRelationId, InvalidOid, 0,
951 
952  /* Now push the notifications into the queue */
954 
955  nextNotify = list_head(pendingNotifies->events);
956  while (nextNotify != NULL)
957  {
958  /*
959  * Add the pending notifications to the queue. We acquire and
960  * release NotifyQueueLock once per page, which might be overkill
961  * but it does allow readers to get in while we're doing this.
962  *
963  * A full queue is very uncommon and should really not happen,
964  * given that we have so much space available in the SLRU pages.
965  * Nevertheless we need to deal with this possibility. Note that
966  * when we get here we are in the process of committing our
967  * transaction, but we have not yet committed to clog, so at this
968  * point in time we can still roll the transaction back.
969  */
970  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
972  if (asyncQueueIsFull())
973  ereport(ERROR,
974  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
975  errmsg("too many notifications in the NOTIFY queue")));
976  nextNotify = asyncQueueAddEntries(nextNotify);
977  LWLockRelease(NotifyQueueLock);
978  }
979  }
980 }
981 
982 /*
983  * AtCommit_Notify
984  *
985  * This is called at transaction commit, after committing to clog.
986  *
987  * Update listenChannels and clear transaction-local state.
988  */
989 void
991 {
992  ListCell *p;
993 
994  /*
995  * Allow transactions that have not executed LISTEN/UNLISTEN/NOTIFY to
996  * return as soon as possible
997  */
998  if (!pendingActions && !pendingNotifies)
999  return;
1000 
1001  if (Trace_notify)
1002  elog(DEBUG1, "AtCommit_Notify");
1003 
1004  /* Perform any pending listen/unlisten actions */
1005  if (pendingActions != NULL)
1006  {
1007  foreach(p, pendingActions->actions)
1008  {
1009  ListenAction *actrec = (ListenAction *) lfirst(p);
1010 
1011  switch (actrec->action)
1012  {
1013  case LISTEN_LISTEN:
1014  Exec_ListenCommit(actrec->channel);
1015  break;
1016  case LISTEN_UNLISTEN:
1017  Exec_UnlistenCommit(actrec->channel);
1018  break;
1019  case LISTEN_UNLISTEN_ALL:
1021  break;
1022  }
1023  }
1024  }
1025 
1026  /* If no longer listening to anything, get out of listener array */
1027  if (amRegisteredListener && listenChannels == NIL)
1029 
1030  /* And clean up */
1032 }
1033 
1034 /*
1035  * Exec_ListenPreCommit --- subroutine for PreCommit_Notify
1036  *
1037  * This function must make sure we are ready to catch any incoming messages.
1038  */
1039 static void
1041 {
1042  QueuePosition head;
1043  QueuePosition max;
1044  BackendId prevListener;
1045 
1046  /*
1047  * Nothing to do if we are already listening to something, nor if we
1048  * already ran this routine in this transaction.
1049  */
1051  return;
1052 
1053  if (Trace_notify)
1054  elog(DEBUG1, "Exec_ListenPreCommit(%d)", MyProcPid);
1055 
1056  /*
1057  * Before registering, make sure we will unlisten before dying. (Note:
1058  * this action does not get undone if we abort later.)
1059  */
1061  {
1063  unlistenExitRegistered = true;
1064  }
1065 
1066  /*
1067  * This is our first LISTEN, so establish our pointer.
1068  *
1069  * We set our pointer to the global tail pointer and then move it forward
1070  * over already-committed notifications. This ensures we cannot miss any
1071  * not-yet-committed notifications. We might get a few more but that
1072  * doesn't hurt.
1073  *
1074  * In some scenarios there might be a lot of committed notifications that
1075  * have not yet been pruned away (because some backend is being lazy about
1076  * reading them). To reduce our startup time, we can look at other
1077  * backends and adopt the maximum "pos" pointer of any backend that's in
1078  * our database; any notifications it's already advanced over are surely
1079  * committed and need not be re-examined by us. (We must consider only
1080  * backends connected to our DB, because others will not have bothered to
1081  * check committed-ness of notifications in our DB.)
1082  *
1083  * We need exclusive lock here so we can look at other backends' entries
1084  * and manipulate the list links.
1085  */
1086  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
1087  head = QUEUE_HEAD;
1088  max = QUEUE_TAIL;
1089  prevListener = InvalidBackendId;
1091  {
1093  max = QUEUE_POS_MAX(max, QUEUE_BACKEND_POS(i));
1094  /* Also find last listening backend before this one */
1095  if (i < MyBackendId)
1096  prevListener = i;
1097  }
1101  /* Insert backend into list of listeners at correct position */
1102  if (prevListener > 0)
1103  {
1105  QUEUE_NEXT_LISTENER(prevListener) = MyBackendId;
1106  }
1107  else
1108  {
1111  }
1112  LWLockRelease(NotifyQueueLock);
1113 
1114  /* Now we are listed in the global array, so remember we're listening */
1115  amRegisteredListener = true;
1116 
1117  /*
1118  * Try to move our pointer forward as far as possible. This will skip
1119  * over already-committed notifications, which we want to do because they
1120  * might be quite stale. Note that we are not yet listening on anything,
1121  * so we won't deliver such notifications to our frontend. Also, although
1122  * our transaction might have executed NOTIFY, those message(s) aren't
1123  * queued yet so we won't skip them here.
1124  */
1125  if (!QUEUE_POS_EQUAL(max, head))
1127 }
1128 
1129 /*
1130  * Exec_ListenCommit --- subroutine for AtCommit_Notify
1131  *
1132  * Add the channel to the list of channels we are listening on.
1133  */
1134 static void
1135 Exec_ListenCommit(const char *channel)
1136 {
1137  MemoryContext oldcontext;
1138 
1139  /* Do nothing if we are already listening on this channel */
1140  if (IsListeningOn(channel))
1141  return;
1142 
1143  /*
1144  * Add the new channel name to listenChannels.
1145  *
1146  * XXX It is theoretically possible to get an out-of-memory failure here,
1147  * which would be bad because we already committed. For the moment it
1148  * doesn't seem worth trying to guard against that, but maybe improve this
1149  * later.
1150  */
1152  listenChannels = lappend(listenChannels, pstrdup(channel));
1153  MemoryContextSwitchTo(oldcontext);
1154 }
1155 
1156 /*
1157  * Exec_UnlistenCommit --- subroutine for AtCommit_Notify
1158  *
1159  * Remove the specified channel name from listenChannels.
1160  */
1161 static void
1162 Exec_UnlistenCommit(const char *channel)
1163 {
1164  ListCell *q;
1165 
1166  if (Trace_notify)
1167  elog(DEBUG1, "Exec_UnlistenCommit(%s,%d)", channel, MyProcPid);
1168 
1169  foreach(q, listenChannels)
1170  {
1171  char *lchan = (char *) lfirst(q);
1172 
1173  if (strcmp(lchan, channel) == 0)
1174  {
1175  listenChannels = foreach_delete_current(listenChannels, q);
1176  pfree(lchan);
1177  break;
1178  }
1179  }
1180 
1181  /*
1182  * We do not complain about unlistening something not being listened;
1183  * should we?
1184  */
1185 }
1186 
1187 /*
1188  * Exec_UnlistenAllCommit --- subroutine for AtCommit_Notify
1189  *
1190  * Unlisten on all channels for this backend.
1191  */
1192 static void
1194 {
1195  if (Trace_notify)
1196  elog(DEBUG1, "Exec_UnlistenAllCommit(%d)", MyProcPid);
1197 
1198  list_free_deep(listenChannels);
1199  listenChannels = NIL;
1200 }
1201 
1202 /*
1203  * ProcessCompletedNotifies --- send out signals and self-notifies
1204  *
1205  * This is called from postgres.c just before going idle at the completion
1206  * of a transaction. If we issued any notifications in the just-completed
1207  * transaction, send signals to other backends to process them, and also
1208  * process the queue ourselves to send messages to our own frontend.
1209  * Also, if we filled enough queue pages with new notifies, try to advance
1210  * the queue tail pointer.
1211  *
1212  * The reason that this is not done in AtCommit_Notify is that there is
1213  * a nonzero chance of errors here (for example, encoding conversion errors
1214  * while trying to format messages to our frontend). An error during
1215  * AtCommit_Notify would be a PANIC condition. The timing is also arranged
1216  * to ensure that a transaction's self-notifies are delivered to the frontend
1217  * before it gets the terminating ReadyForQuery message.
1218  *
1219  * Note that we send signals and process the queue even if the transaction
1220  * eventually aborted. This is because we need to clean out whatever got
1221  * added to the queue.
1222  *
1223  * NOTE: we are outside of any transaction here.
1224  */
1225 void
1227 {
1228  MemoryContext caller_context;
1229 
1230  /* Nothing to do if we didn't send any notifications */
1232  return;
1233 
1234  /*
1235  * We reset the flag immediately; otherwise, if any sort of error occurs
1236  * below, we'd be locked up in an infinite loop, because control will come
1237  * right back here after error cleanup.
1238  */
1240 
1241  /*
1242  * We must preserve the caller's memory context (probably MessageContext)
1243  * across the transaction we do here.
1244  */
1245  caller_context = CurrentMemoryContext;
1246 
1247  if (Trace_notify)
1248  elog(DEBUG1, "ProcessCompletedNotifies");
1249 
1250  /*
1251  * We must run asyncQueueReadAllNotifications inside a transaction, else
1252  * bad things happen if it gets an error.
1253  */
1255 
1256  /* Send signals to other backends */
1257  SignalBackends();
1258 
1259  if (listenChannels != NIL)
1260  {
1261  /* Read the queue ourselves, and send relevant stuff to the frontend */
1263  }
1264 
1265  /*
1266  * If it's time to try to advance the global tail pointer, do that.
1267  */
1269  {
1270  backendTryAdvanceTail = false;
1272  }
1273 
1275 
1276  MemoryContextSwitchTo(caller_context);
1277 
1278  /* We don't need pq_flush() here since postgres.c will do one shortly */
1279 }
1280 
1281 /*
1282  * Test whether we are actively listening on the given channel name.
1283  *
1284  * Note: this function is executed for every notification found in the queue.
1285  * Perhaps it is worth further optimization, eg convert the list to a sorted
1286  * array so we can binary-search it. In practice the list is likely to be
1287  * fairly short, though.
1288  */
1289 static bool
1290 IsListeningOn(const char *channel)
1291 {
1292  ListCell *p;
1293 
1294  foreach(p, listenChannels)
1295  {
1296  char *lchan = (char *) lfirst(p);
1297 
1298  if (strcmp(lchan, channel) == 0)
1299  return true;
1300  }
1301  return false;
1302 }
1303 
1304 /*
1305  * Remove our entry from the listeners array when we are no longer listening
1306  * on any channel. NB: must not fail if we're already not listening.
1307  */
1308 static void
1310 {
1311  Assert(listenChannels == NIL); /* else caller error */
1312 
1313  if (!amRegisteredListener) /* nothing to do */
1314  return;
1315 
1316  /*
1317  * Need exclusive lock here to manipulate list links.
1318  */
1319  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
1320  /* Mark our entry as invalid */
1323  /* and remove it from the list */
1326  else
1327  {
1329  {
1331  {
1333  break;
1334  }
1335  }
1336  }
1338  LWLockRelease(NotifyQueueLock);
1339 
1340  /* mark ourselves as no longer listed in the global array */
1341  amRegisteredListener = false;
1342 }
1343 
1344 /*
1345  * Test whether there is room to insert more notification messages.
1346  *
1347  * Caller must hold at least shared NotifyQueueLock.
1348  */
1349 static bool
1351 {
1352  int nexthead;
1353  int boundary;
1354 
1355  /*
1356  * The queue is full if creating a new head page would create a page that
1357  * logically precedes the current global tail pointer, ie, the head
1358  * pointer would wrap around compared to the tail. We cannot create such
1359  * a head page for fear of confusing slru.c. For safety we round the tail
1360  * pointer back to a segment boundary (truncation logic in
1361  * asyncQueueAdvanceTail does not do this, so doing it here is optional).
1362  *
1363  * Note that this test is *not* dependent on how much space there is on
1364  * the current head page. This is necessary because asyncQueueAddEntries
1365  * might try to create the next head page in any case.
1366  */
1367  nexthead = QUEUE_POS_PAGE(QUEUE_HEAD) + 1;
1368  if (nexthead > QUEUE_MAX_PAGE)
1369  nexthead = 0; /* wrap around */
1370  boundary = QUEUE_STOP_PAGE;
1371  boundary -= boundary % SLRU_PAGES_PER_SEGMENT;
1372  return asyncQueuePagePrecedes(nexthead, boundary);
1373 }
1374 
1375 /*
1376  * Advance the QueuePosition to the next entry, assuming that the current
1377  * entry is of length entryLength. If we jump to a new page the function
1378  * returns true, else false.
1379  */
1380 static bool
1381 asyncQueueAdvance(volatile QueuePosition *position, int entryLength)
1382 {
1383  int pageno = QUEUE_POS_PAGE(*position);
1384  int offset = QUEUE_POS_OFFSET(*position);
1385  bool pageJump = false;
1386 
1387  /*
1388  * Move to the next writing position: First jump over what we have just
1389  * written or read.
1390  */
1391  offset += entryLength;
1392  Assert(offset <= QUEUE_PAGESIZE);
1393 
1394  /*
1395  * In a second step check if another entry can possibly be written to the
1396  * page. If so, stay here, we have reached the next position. If not, then
1397  * we need to move on to the next page.
1398  */
1400  {
1401  pageno++;
1402  if (pageno > QUEUE_MAX_PAGE)
1403  pageno = 0; /* wrap around */
1404  offset = 0;
1405  pageJump = true;
1406  }
1407 
1408  SET_QUEUE_POS(*position, pageno, offset);
1409  return pageJump;
1410 }
1411 
1412 /*
1413  * Fill the AsyncQueueEntry at *qe with an outbound notification message.
1414  */
1415 static void
1417 {
1418  size_t channellen = n->channel_len;
1419  size_t payloadlen = n->payload_len;
1420  int entryLength;
1421 
1422  Assert(channellen < NAMEDATALEN);
1423  Assert(payloadlen < NOTIFY_PAYLOAD_MAX_LENGTH);
1424 
1425  /* The terminators are already included in AsyncQueueEntryEmptySize */
1426  entryLength = AsyncQueueEntryEmptySize + payloadlen + channellen;
1427  entryLength = QUEUEALIGN(entryLength);
1428  qe->length = entryLength;
1429  qe->dboid = MyDatabaseId;
1430  qe->xid = GetCurrentTransactionId();
1431  qe->srcPid = MyProcPid;
1432  memcpy(qe->data, n->data, channellen + payloadlen + 2);
1433 }
1434 
1435 /*
1436  * Add pending notifications to the queue.
1437  *
1438  * We go page by page here, i.e. we stop once we have to go to a new page but
1439  * we will be called again and then fill that next page. If an entry does not
1440  * fit into the current page, we write a dummy entry with an InvalidOid as the
1441  * database OID in order to fill the page. So every page is always used up to
1442  * the last byte which simplifies reading the page later.
1443  *
1444  * We are passed the list cell (in pendingNotifies->events) containing the next
1445  * notification to write and return the first still-unwritten cell back.
1446  * Eventually we will return NULL indicating all is done.
1447  *
1448  * We are holding NotifyQueueLock already from the caller and grab
1449  * NotifySLRULock locally in this function.
1450  */
1451 static ListCell *
1453 {
1454  AsyncQueueEntry qe;
1455  QueuePosition queue_head;
1456  int pageno;
1457  int offset;
1458  int slotno;
1459 
1460  /* We hold both NotifyQueueLock and NotifySLRULock during this operation */
1461  LWLockAcquire(NotifySLRULock, LW_EXCLUSIVE);
1462 
1463  /*
1464  * We work with a local copy of QUEUE_HEAD, which we write back to shared
1465  * memory upon exiting. The reason for this is that if we have to advance
1466  * to a new page, SimpleLruZeroPage might fail (out of disk space, for
1467  * instance), and we must not advance QUEUE_HEAD if it does. (Otherwise,
1468  * subsequent insertions would try to put entries into a page that slru.c
1469  * thinks doesn't exist yet.) So, use a local position variable. Note
1470  * that if we do fail, any already-inserted queue entries are forgotten;
1471  * this is okay, since they'd be useless anyway after our transaction
1472  * rolls back.
1473  */
1474  queue_head = QUEUE_HEAD;
1475 
1476  /*
1477  * If this is the first write since the postmaster started, we need to
1478  * initialize the first page of the async SLRU. Otherwise, the current
1479  * page should be initialized already, so just fetch it.
1480  *
1481  * (We could also take the first path when the SLRU position has just
1482  * wrapped around, but re-zeroing the page is harmless in that case.)
1483  */
1484  pageno = QUEUE_POS_PAGE(queue_head);
1485  if (QUEUE_POS_IS_ZERO(queue_head))
1486  slotno = SimpleLruZeroPage(NotifyCtl, pageno);
1487  else
1488  slotno = SimpleLruReadPage(NotifyCtl, pageno, true,
1490 
1491  /* Note we mark the page dirty before writing in it */
1492  NotifyCtl->shared->page_dirty[slotno] = true;
1493 
1494  while (nextNotify != NULL)
1495  {
1496  Notification *n = (Notification *) lfirst(nextNotify);
1497 
1498  /* Construct a valid queue entry in local variable qe */
1500 
1501  offset = QUEUE_POS_OFFSET(queue_head);
1502 
1503  /* Check whether the entry really fits on the current page */
1504  if (offset + qe.length <= QUEUE_PAGESIZE)
1505  {
1506  /* OK, so advance nextNotify past this item */
1507  nextNotify = lnext(pendingNotifies->events, nextNotify);
1508  }
1509  else
1510  {
1511  /*
1512  * Write a dummy entry to fill up the page. Actually readers will
1513  * only check dboid and since it won't match any reader's database
1514  * OID, they will ignore this entry and move on.
1515  */
1516  qe.length = QUEUE_PAGESIZE - offset;
1517  qe.dboid = InvalidOid;
1518  qe.data[0] = '\0'; /* empty channel */
1519  qe.data[1] = '\0'; /* empty payload */
1520  }
1521 
1522  /* Now copy qe into the shared buffer page */
1523  memcpy(NotifyCtl->shared->page_buffer[slotno] + offset,
1524  &qe,
1525  qe.length);
1526 
1527  /* Advance queue_head appropriately, and detect if page is full */
1528  if (asyncQueueAdvance(&(queue_head), qe.length))
1529  {
1530  /*
1531  * Page is full, so we're done here, but first fill the next page
1532  * with zeroes. The reason to do this is to ensure that slru.c's
1533  * idea of the head page is always the same as ours, which avoids
1534  * boundary problems in SimpleLruTruncate. The test in
1535  * asyncQueueIsFull() ensured that there is room to create this
1536  * page without overrunning the queue.
1537  */
1538  slotno = SimpleLruZeroPage(NotifyCtl, QUEUE_POS_PAGE(queue_head));
1539 
1540  /*
1541  * If the new page address is a multiple of QUEUE_CLEANUP_DELAY,
1542  * set flag to remember that we should try to advance the tail
1543  * pointer (we don't want to actually do that right here).
1544  */
1545  if (QUEUE_POS_PAGE(queue_head) % QUEUE_CLEANUP_DELAY == 0)
1546  backendTryAdvanceTail = true;
1547 
1548  /* And exit the loop */
1549  break;
1550  }
1551  }
1552 
1553  /* Success, so update the global QUEUE_HEAD */
1554  QUEUE_HEAD = queue_head;
1555 
1556  LWLockRelease(NotifySLRULock);
1557 
1558  return nextNotify;
1559 }
1560 
1561 /*
1562  * SQL function to return the fraction of the notification queue currently
1563  * occupied.
1564  */
1565 Datum
1567 {
1568  double usage;
1569 
1570  /* Advance the queue tail so we don't report a too-large result */
1572 
1573  LWLockAcquire(NotifyQueueLock, LW_SHARED);
1574  usage = asyncQueueUsage();
1575  LWLockRelease(NotifyQueueLock);
1576 
1577  PG_RETURN_FLOAT8(usage);
1578 }
1579 
1580 /*
1581  * Return the fraction of the queue that is currently occupied.
1582  *
1583  * The caller must hold NotifyQueueLock in (at least) shared mode.
1584  *
1585  * Note: we measure the distance to the logical tail page, not the physical
1586  * tail page. In some sense that's wrong, but the relative position of the
1587  * physical tail is affected by details such as SLRU segment boundaries,
1588  * so that a result based on that is unpleasantly unstable.
1589  */
1590 static double
1592 {
1593  int headPage = QUEUE_POS_PAGE(QUEUE_HEAD);
1594  int tailPage = QUEUE_POS_PAGE(QUEUE_TAIL);
1595  int occupied;
1596 
1597  occupied = headPage - tailPage;
1598 
1599  if (occupied == 0)
1600  return (double) 0; /* fast exit for common case */
1601 
1602  if (occupied < 0)
1603  {
1604  /* head has wrapped around, tail not yet */
1605  occupied += QUEUE_MAX_PAGE + 1;
1606  }
1607 
1608  return (double) occupied / (double) ((QUEUE_MAX_PAGE + 1) / 2);
1609 }
1610 
1611 /*
1612  * Check whether the queue is at least half full, and emit a warning if so.
1613  *
1614  * This is unlikely given the size of the queue, but possible.
1615  * The warnings show up at most once every QUEUE_FULL_WARN_INTERVAL.
1616  *
1617  * Caller must hold exclusive NotifyQueueLock.
1618  */
1619 static void
1621 {
1622  double fillDegree;
1623  TimestampTz t;
1624 
1625  fillDegree = asyncQueueUsage();
1626  if (fillDegree < 0.5)
1627  return;
1628 
1629  t = GetCurrentTimestamp();
1630 
1631  if (TimestampDifferenceExceeds(asyncQueueControl->lastQueueFillWarn,
1633  {
1634  QueuePosition min = QUEUE_HEAD;
1635  int32 minPid = InvalidPid;
1636 
1638  {
1640  min = QUEUE_POS_MIN(min, QUEUE_BACKEND_POS(i));
1641  if (QUEUE_POS_EQUAL(min, QUEUE_BACKEND_POS(i)))
1642  minPid = QUEUE_BACKEND_PID(i);
1643  }
1644 
1645  ereport(WARNING,
1646  (errmsg("NOTIFY queue is %.0f%% full", fillDegree * 100),
1647  (minPid != InvalidPid ?
1648  errdetail("The server process with PID %d is among those with the oldest transactions.", minPid)
1649  : 0),
1650  (minPid != InvalidPid ?
1651  errhint("The NOTIFY queue cannot be emptied until that process ends its current transaction.")
1652  : 0)));
1653 
1654  asyncQueueControl->lastQueueFillWarn = t;
1655  }
1656 }
1657 
1658 /*
1659  * Send signals to listening backends.
1660  *
1661  * We never signal our own process; that should be handled by our caller.
1662  *
1663  * Normally we signal only backends in our own database, since only those
1664  * backends could be interested in notifies we send. However, if there's
1665  * notify traffic in our database but no traffic in another database that
1666  * does have listener(s), those listeners will fall further and further
1667  * behind. Waken them anyway if they're far enough behind, so that they'll
1668  * advance their queue position pointers, allowing the global tail to advance.
1669  *
1670  * Since we know the BackendId and the Pid the signaling is quite cheap.
1671  */
1672 static void
1674 {
1675  int32 *pids;
1676  BackendId *ids;
1677  int count;
1678 
1679  /*
1680  * Identify backends that we need to signal. We don't want to send
1681  * signals while holding the NotifyQueueLock, so this loop just builds a
1682  * list of target PIDs.
1683  *
1684  * XXX in principle these pallocs could fail, which would be bad. Maybe
1685  * preallocate the arrays? But in practice this is only run in trivial
1686  * transactions, so there should surely be space available.
1687  */
1688  pids = (int32 *) palloc(MaxBackends * sizeof(int32));
1689  ids = (BackendId *) palloc(MaxBackends * sizeof(BackendId));
1690  count = 0;
1691 
1692  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
1694  {
1695  int32 pid = QUEUE_BACKEND_PID(i);
1696  QueuePosition pos;
1697 
1698  Assert(pid != InvalidPid);
1699  if (pid == MyProcPid)
1700  continue; /* never signal self */
1701  pos = QUEUE_BACKEND_POS(i);
1703  {
1704  /*
1705  * Always signal listeners in our own database, unless they're
1706  * already caught up (unlikely, but possible).
1707  */
1708  if (QUEUE_POS_EQUAL(pos, QUEUE_HEAD))
1709  continue;
1710  }
1711  else
1712  {
1713  /*
1714  * Listeners in other databases should be signaled only if they
1715  * are far behind.
1716  */
1719  continue;
1720  }
1721  /* OK, need to signal this one */
1722  pids[count] = pid;
1723  ids[count] = i;
1724  count++;
1725  }
1726  LWLockRelease(NotifyQueueLock);
1727 
1728  /* Now send signals */
1729  for (int i = 0; i < count; i++)
1730  {
1731  int32 pid = pids[i];
1732 
1733  /*
1734  * Note: assuming things aren't broken, a signal failure here could
1735  * only occur if the target backend exited since we released
1736  * NotifyQueueLock; which is unlikely but certainly possible. So we
1737  * just log a low-level debug message if it happens.
1738  */
1739  if (SendProcSignal(pid, PROCSIG_NOTIFY_INTERRUPT, ids[i]) < 0)
1740  elog(DEBUG3, "could not signal backend with PID %d: %m", pid);
1741  }
1742 
1743  pfree(pids);
1744  pfree(ids);
1745 }
1746 
1747 /*
1748  * AtAbort_Notify
1749  *
1750  * This is called at transaction abort.
1751  *
1752  * Gets rid of pending actions and outbound notifies that we would have
1753  * executed if the transaction got committed.
1754  */
1755 void
1757 {
1758  /*
1759  * If we LISTEN but then roll back the transaction after PreCommit_Notify,
1760  * we have registered as a listener but have not made any entry in
1761  * listenChannels. In that case, deregister again.
1762  */
1763  if (amRegisteredListener && listenChannels == NIL)
1765 
1766  /* And clean up */
1768 }
1769 
1770 /*
1771  * AtSubCommit_Notify() --- Take care of subtransaction commit.
1772  *
1773  * Reassign all items in the pending lists to the parent transaction.
1774  */
1775 void
1777 {
1778  int my_level = GetCurrentTransactionNestLevel();
1779 
1780  /* If there are actions at our nesting level, we must reparent them. */
1781  if (pendingActions != NULL &&
1782  pendingActions->nestingLevel >= my_level)
1783  {
1784  if (pendingActions->upper == NULL ||
1785  pendingActions->upper->nestingLevel < my_level - 1)
1786  {
1787  /* nothing to merge; give the whole thing to the parent */
1788  --pendingActions->nestingLevel;
1789  }
1790  else
1791  {
1792  ActionList *childPendingActions = pendingActions;
1793 
1794  pendingActions = pendingActions->upper;
1795 
1796  /*
1797  * Mustn't try to eliminate duplicates here --- see queue_listen()
1798  */
1799  pendingActions->actions =
1800  list_concat(pendingActions->actions,
1801  childPendingActions->actions);
1802  pfree(childPendingActions);
1803  }
1804  }
1805 
1806  /* If there are notifies at our nesting level, we must reparent them. */
1807  if (pendingNotifies != NULL &&
1808  pendingNotifies->nestingLevel >= my_level)
1809  {
1810  Assert(pendingNotifies->nestingLevel == my_level);
1811 
1812  if (pendingNotifies->upper == NULL ||
1813  pendingNotifies->upper->nestingLevel < my_level - 1)
1814  {
1815  /* nothing to merge; give the whole thing to the parent */
1816  --pendingNotifies->nestingLevel;
1817  }
1818  else
1819  {
1820  /*
1821  * Formerly, we didn't bother to eliminate duplicates here, but
1822  * now we must, else we fall foul of "Assert(!found)", either here
1823  * or during a later attempt to build the parent-level hashtable.
1824  */
1825  NotificationList *childPendingNotifies = pendingNotifies;
1826  ListCell *l;
1827 
1828  pendingNotifies = pendingNotifies->upper;
1829  /* Insert all the subxact's events into parent, except for dups */
1830  foreach(l, childPendingNotifies->events)
1831  {
1832  Notification *childn = (Notification *) lfirst(l);
1833 
1834  if (!AsyncExistsPendingNotify(childn))
1835  AddEventToPendingNotifies(childn);
1836  }
1837  pfree(childPendingNotifies);
1838  }
1839  }
1840 }
1841 
1842 /*
1843  * AtSubAbort_Notify() --- Take care of subtransaction abort.
1844  */
1845 void
1847 {
1848  int my_level = GetCurrentTransactionNestLevel();
1849 
1850  /*
1851  * All we have to do is pop the stack --- the actions/notifies made in
1852  * this subxact are no longer interesting, and the space will be freed
1853  * when CurTransactionContext is recycled. We still have to free the
1854  * ActionList and NotificationList objects themselves, though, because
1855  * those are allocated in TopTransactionContext.
1856  *
1857  * Note that there might be no entries at all, or no entries for the
1858  * current subtransaction level, either because none were ever created, or
1859  * because we reentered this routine due to trouble during subxact abort.
1860  */
1861  while (pendingActions != NULL &&
1862  pendingActions->nestingLevel >= my_level)
1863  {
1864  ActionList *childPendingActions = pendingActions;
1865 
1866  pendingActions = pendingActions->upper;
1867  pfree(childPendingActions);
1868  }
1869 
1870  while (pendingNotifies != NULL &&
1871  pendingNotifies->nestingLevel >= my_level)
1872  {
1873  NotificationList *childPendingNotifies = pendingNotifies;
1874 
1875  pendingNotifies = pendingNotifies->upper;
1876  pfree(childPendingNotifies);
1877  }
1878 }
1879 
1880 /*
1881  * HandleNotifyInterrupt
1882  *
1883  * Signal handler portion of interrupt handling. Let the backend know
1884  * that there's a pending notify interrupt. If we're currently reading
1885  * from the client, this will interrupt the read and
1886  * ProcessClientReadInterrupt() will call ProcessNotifyInterrupt().
1887  */
1888 void
1890 {
1891  /*
1892  * Note: this is called by a SIGNAL HANDLER. You must be very wary what
1893  * you do here.
1894  */
1895 
1896  /* signal that work needs to be done */
1897  notifyInterruptPending = true;
1898 
1899  /* make sure the event is processed in due course */
1900  SetLatch(MyLatch);
1901 }
1902 
1903 /*
1904  * ProcessNotifyInterrupt
1905  *
1906  * This is called if we see notifyInterruptPending set, just before
1907  * transmitting ReadyForQuery at the end of a frontend command, and
1908  * also if a notify signal occurs while reading from the frontend.
1909  * HandleNotifyInterrupt() will cause the read to be interrupted
1910  * via the process's latch, and this routine will get called.
1911  * If we are truly idle (ie, *not* inside a transaction block),
1912  * process the incoming notifies.
1913  */
1914 void
1916 {
1918  return; /* not really idle */
1919 
1920  while (notifyInterruptPending)
1922 }
1923 
1924 
1925 /*
1926  * Read all pending notifications from the queue, and deliver appropriate
1927  * ones to my frontend. Stop when we reach queue head or an uncommitted
1928  * notification.
1929  */
1930 static void
1932 {
1933  volatile QueuePosition pos;
1934  QueuePosition head;
1935  Snapshot snapshot;
1936 
1937  /* page_buffer must be adequately aligned, so use a union */
1938  union
1939  {
1940  char buf[QUEUE_PAGESIZE];
1941  AsyncQueueEntry align;
1942  } page_buffer;
1943 
1944  /* Fetch current state */
1945  LWLockAcquire(NotifyQueueLock, LW_SHARED);
1946  /* Assert checks that we have a valid state entry */
1949  head = QUEUE_HEAD;
1950  LWLockRelease(NotifyQueueLock);
1951 
1952  if (QUEUE_POS_EQUAL(pos, head))
1953  {
1954  /* Nothing to do, we have read all notifications already. */
1955  return;
1956  }
1957 
1958  /*----------
1959  * Get snapshot we'll use to decide which xacts are still in progress.
1960  * This is trickier than it might seem, because of race conditions.
1961  * Consider the following example:
1962  *
1963  * Backend 1: Backend 2:
1964  *
1965  * transaction starts
1966  * UPDATE foo SET ...;
1967  * NOTIFY foo;
1968  * commit starts
1969  * queue the notify message
1970  * transaction starts
1971  * LISTEN foo; -- first LISTEN in session
1972  * SELECT * FROM foo WHERE ...;
1973  * commit to clog
1974  * commit starts
1975  * add backend 2 to array of listeners
1976  * advance to queue head (this code)
1977  * commit to clog
1978  *
1979  * Transaction 2's SELECT has not seen the UPDATE's effects, since that
1980  * wasn't committed yet. Ideally we'd ensure that client 2 would
1981  * eventually get transaction 1's notify message, but there's no way
1982  * to do that; until we're in the listener array, there's no guarantee
1983  * that the notify message doesn't get removed from the queue.
1984  *
1985  * Therefore the coding technique transaction 2 is using is unsafe:
1986  * applications must commit a LISTEN before inspecting database state,
1987  * if they want to ensure they will see notifications about subsequent
1988  * changes to that state.
1989  *
1990  * What we do guarantee is that we'll see all notifications from
1991  * transactions committing after the snapshot we take here.
1992  * Exec_ListenPreCommit has already added us to the listener array,
1993  * so no not-yet-committed messages can be removed from the queue
1994  * before we see them.
1995  *----------
1996  */
1997  snapshot = RegisterSnapshot(GetLatestSnapshot());
1998 
1999  /*
2000  * It is possible that we fail while trying to send a message to our
2001  * frontend (for example, because of encoding conversion failure). If
2002  * that happens it is critical that we not try to send the same message
2003  * over and over again. Therefore, we place a PG_TRY block here that will
2004  * forcibly advance our queue position before we lose control to an error.
2005  * (We could alternatively retake NotifyQueueLock and move the position
2006  * before handling each individual message, but that seems like too much
2007  * lock traffic.)
2008  */
2009  PG_TRY();
2010  {
2011  bool reachedStop;
2012 
2013  do
2014  {
2015  int curpage = QUEUE_POS_PAGE(pos);
2016  int curoffset = QUEUE_POS_OFFSET(pos);
2017  int slotno;
2018  int copysize;
2019 
2020  /*
2021  * We copy the data from SLRU into a local buffer, so as to avoid
2022  * holding the NotifySLRULock while we are examining the entries
2023  * and possibly transmitting them to our frontend. Copy only the
2024  * part of the page we will actually inspect.
2025  */
2026  slotno = SimpleLruReadPage_ReadOnly(NotifyCtl, curpage,
2028  if (curpage == QUEUE_POS_PAGE(head))
2029  {
2030  /* we only want to read as far as head */
2031  copysize = QUEUE_POS_OFFSET(head) - curoffset;
2032  if (copysize < 0)
2033  copysize = 0; /* just for safety */
2034  }
2035  else
2036  {
2037  /* fetch all the rest of the page */
2038  copysize = QUEUE_PAGESIZE - curoffset;
2039  }
2040  memcpy(page_buffer.buf + curoffset,
2041  NotifyCtl->shared->page_buffer[slotno] + curoffset,
2042  copysize);
2043  /* Release lock that we got from SimpleLruReadPage_ReadOnly() */
2044  LWLockRelease(NotifySLRULock);
2045 
2046  /*
2047  * Process messages up to the stop position, end of page, or an
2048  * uncommitted message.
2049  *
2050  * Our stop position is what we found to be the head's position
2051  * when we entered this function. It might have changed already.
2052  * But if it has, we will receive (or have already received and
2053  * queued) another signal and come here again.
2054  *
2055  * We are not holding NotifyQueueLock here! The queue can only
2056  * extend beyond the head pointer (see above) and we leave our
2057  * backend's pointer where it is so nobody will truncate or
2058  * rewrite pages under us. Especially we don't want to hold a lock
2059  * while sending the notifications to the frontend.
2060  */
2061  reachedStop = asyncQueueProcessPageEntries(&pos, head,
2062  page_buffer.buf,
2063  snapshot);
2064  } while (!reachedStop);
2065  }
2066  PG_FINALLY();
2067  {
2068  /* Update shared state */
2069  LWLockAcquire(NotifyQueueLock, LW_SHARED);
2071  LWLockRelease(NotifyQueueLock);
2072  }
2073  PG_END_TRY();
2074 
2075  /* Done with snapshot */
2076  UnregisterSnapshot(snapshot);
2077 }
2078 
2079 /*
2080  * Fetch notifications from the shared queue, beginning at position current,
2081  * and deliver relevant ones to my frontend.
2082  *
2083  * The current page must have been fetched into page_buffer from shared
2084  * memory. (We could access the page right in shared memory, but that
2085  * would imply holding the NotifySLRULock throughout this routine.)
2086  *
2087  * We stop if we reach the "stop" position, or reach a notification from an
2088  * uncommitted transaction, or reach the end of the page.
2089  *
2090  * The function returns true once we have reached the stop position or an
2091  * uncommitted notification, and false if we have finished with the page.
2092  * In other words: once it returns true there is no need to look further.
2093  * The QueuePosition *current is advanced past all processed messages.
2094  */
2095 static bool
2097  QueuePosition stop,
2098  char *page_buffer,
2099  Snapshot snapshot)
2100 {
2101  bool reachedStop = false;
2102  bool reachedEndOfPage;
2103  AsyncQueueEntry *qe;
2104 
2105  do
2106  {
2107  QueuePosition thisentry = *current;
2108 
2109  if (QUEUE_POS_EQUAL(thisentry, stop))
2110  break;
2111 
2112  qe = (AsyncQueueEntry *) (page_buffer + QUEUE_POS_OFFSET(thisentry));
2113 
2114  /*
2115  * Advance *current over this message, possibly to the next page. As
2116  * noted in the comments for asyncQueueReadAllNotifications, we must
2117  * do this before possibly failing while processing the message.
2118  */
2119  reachedEndOfPage = asyncQueueAdvance(current, qe->length);
2120 
2121  /* Ignore messages destined for other databases */
2122  if (qe->dboid == MyDatabaseId)
2123  {
2124  if (XidInMVCCSnapshot(qe->xid, snapshot))
2125  {
2126  /*
2127  * The source transaction is still in progress, so we can't
2128  * process this message yet. Break out of the loop, but first
2129  * back up *current so we will reprocess the message next
2130  * time. (Note: it is unlikely but not impossible for
2131  * TransactionIdDidCommit to fail, so we can't really avoid
2132  * this advance-then-back-up behavior when dealing with an
2133  * uncommitted message.)
2134  *
2135  * Note that we must test XidInMVCCSnapshot before we test
2136  * TransactionIdDidCommit, else we might return a message from
2137  * a transaction that is not yet visible to snapshots; compare
2138  * the comments at the head of heapam_visibility.c.
2139  *
2140  * Also, while our own xact won't be listed in the snapshot,
2141  * we need not check for TransactionIdIsCurrentTransactionId
2142  * because our transaction cannot (yet) have queued any
2143  * messages.
2144  */
2145  *current = thisentry;
2146  reachedStop = true;
2147  break;
2148  }
2149  else if (TransactionIdDidCommit(qe->xid))
2150  {
2151  /* qe->data is the null-terminated channel name */
2152  char *channel = qe->data;
2153 
2154  if (IsListeningOn(channel))
2155  {
2156  /* payload follows channel name */
2157  char *payload = qe->data + strlen(channel) + 1;
2158 
2159  NotifyMyFrontEnd(channel, payload, qe->srcPid);
2160  }
2161  }
2162  else
2163  {
2164  /*
2165  * The source transaction aborted or crashed, so we just
2166  * ignore its notifications.
2167  */
2168  }
2169  }
2170 
2171  /* Loop back if we're not at end of page */
2172  } while (!reachedEndOfPage);
2173 
2174  if (QUEUE_POS_EQUAL(*current, stop))
2175  reachedStop = true;
2176 
2177  return reachedStop;
2178 }
2179 
2180 /*
2181  * Advance the shared queue tail variable to the minimum of all the
2182  * per-backend tail pointers. Truncate pg_notify space if possible.
2183  */
2184 static void
2186 {
2187  QueuePosition min;
2188  int oldtailpage;
2189  int newtailpage;
2190  int boundary;
2191 
2192  /* Restrict task to one backend per cluster; see SimpleLruTruncate(). */
2193  LWLockAcquire(NotifyQueueTailLock, LW_EXCLUSIVE);
2194 
2195  /*
2196  * Compute the new tail. Pre-v13, it's essential that QUEUE_TAIL be exact
2197  * (ie, exactly match at least one backend's queue position), so it must
2198  * be updated atomically with the actual computation. Since v13, we could
2199  * get away with not doing it like that, but it seems prudent to keep it
2200  * so.
2201  *
2202  * Also, because incoming backends will scan forward from QUEUE_TAIL, that
2203  * must be advanced before we can truncate any data. Thus, QUEUE_TAIL is
2204  * the logical tail, while QUEUE_STOP_PAGE is the physical tail, or oldest
2205  * un-truncated page. When QUEUE_STOP_PAGE != QUEUE_POS_PAGE(QUEUE_TAIL),
2206  * there are pages we can truncate but haven't yet finished doing so.
2207  *
2208  * For concurrency's sake, we don't want to hold NotifyQueueLock while
2209  * performing SimpleLruTruncate. This is OK because no backend will try
2210  * to access the pages we are in the midst of truncating.
2211  */
2212  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
2213  min = QUEUE_HEAD;
2215  {
2217  min = QUEUE_POS_MIN(min, QUEUE_BACKEND_POS(i));
2218  }
2219  QUEUE_TAIL = min;
2220  oldtailpage = QUEUE_STOP_PAGE;
2221  LWLockRelease(NotifyQueueLock);
2222 
2223  /*
2224  * We can truncate something if the global tail advanced across an SLRU
2225  * segment boundary.
2226  *
2227  * XXX it might be better to truncate only once every several segments, to
2228  * reduce the number of directory scans.
2229  */
2230  newtailpage = QUEUE_POS_PAGE(min);
2231  boundary = newtailpage - (newtailpage % SLRU_PAGES_PER_SEGMENT);
2232  if (asyncQueuePagePrecedes(oldtailpage, boundary))
2233  {
2234  /*
2235  * SimpleLruTruncate() will ask for NotifySLRULock but will also
2236  * release the lock again.
2237  */
2238  SimpleLruTruncate(NotifyCtl, newtailpage);
2239 
2240  /*
2241  * Update QUEUE_STOP_PAGE. This changes asyncQueueIsFull()'s verdict
2242  * for the segment immediately prior to the old tail, allowing fresh
2243  * data into that segment.
2244  */
2245  LWLockAcquire(NotifyQueueLock, LW_EXCLUSIVE);
2246  QUEUE_STOP_PAGE = newtailpage;
2247  LWLockRelease(NotifyQueueLock);
2248  }
2249 
2250  LWLockRelease(NotifyQueueTailLock);
2251 }
2252 
2253 /*
2254  * ProcessIncomingNotify
2255  *
2256  * Deal with arriving NOTIFYs from other backends as soon as it's safe to
2257  * do so. This used to be called from the PROCSIG_NOTIFY_INTERRUPT
2258  * signal handler, but isn't anymore.
2259  *
2260  * Scan the queue for arriving notifications and report them to my front
2261  * end.
2262  *
2263  * NOTE: since we are outside any transaction, we must create our own.
2264  */
2265 static void
2267 {
2268  /* We *must* reset the flag */
2269  notifyInterruptPending = false;
2270 
2271  /* Do nothing else if we aren't actively listening */
2272  if (listenChannels == NIL)
2273  return;
2274 
2275  if (Trace_notify)
2276  elog(DEBUG1, "ProcessIncomingNotify");
2277 
2278  set_ps_display("notify interrupt");
2279 
2280  /*
2281  * We must run asyncQueueReadAllNotifications inside a transaction, else
2282  * bad things happen if it gets an error.
2283  */
2285 
2287 
2289 
2290  /*
2291  * Must flush the notify messages to ensure frontend gets them promptly.
2292  */
2293  pq_flush();
2294 
2295  set_ps_display("idle");
2296 
2297  if (Trace_notify)
2298  elog(DEBUG1, "ProcessIncomingNotify: done");
2299 }
2300 
2301 /*
2302  * Send NOTIFY message to my front end.
2303  */
2304 void
2305 NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
2306 {
2308  {
2310 
2311  pq_beginmessage(&buf, 'A');
2312  pq_sendint32(&buf, srcPid);
2313  pq_sendstring(&buf, channel);
2314  pq_sendstring(&buf, payload);
2315  pq_endmessage(&buf);
2316 
2317  /*
2318  * NOTE: we do not do pq_flush() here. For a self-notify, it will
2319  * happen at the end of the transaction, and for incoming notifies
2320  * ProcessIncomingNotify will do it after finding all the notifies.
2321  */
2322  }
2323  else
2324  elog(INFO, "NOTIFY for \"%s\" payload \"%s\"", channel, payload);
2325 }
2326 
2327 /* Does pendingNotifies include a match for the given event? */
2328 static bool
2330 {
2331  if (pendingNotifies == NULL)
2332  return false;
2333 
2334  if (pendingNotifies->hashtab != NULL)
2335  {
2336  /* Use the hash table to probe for a match */
2337  if (hash_search(pendingNotifies->hashtab,
2338  &n,
2339  HASH_FIND,
2340  NULL))
2341  return true;
2342  }
2343  else
2344  {
2345  /* Must scan the event list */
2346  ListCell *l;
2347 
2348  foreach(l, pendingNotifies->events)
2349  {
2350  Notification *oldn = (Notification *) lfirst(l);
2351 
2352  if (n->channel_len == oldn->channel_len &&
2353  n->payload_len == oldn->payload_len &&
2354  memcmp(n->data, oldn->data,
2355  n->channel_len + n->payload_len + 2) == 0)
2356  return true;
2357  }
2358  }
2359 
2360  return false;
2361 }
2362 
2363 /*
2364  * Add a notification event to a pre-existing pendingNotifies list.
2365  *
2366  * Because pendingNotifies->events is already nonempty, this works
2367  * correctly no matter what CurrentMemoryContext is.
2368  */
2369 static void
2371 {
2372  Assert(pendingNotifies->events != NIL);
2373 
2374  /* Create the hash table if it's time to */
2375  if (list_length(pendingNotifies->events) >= MIN_HASHABLE_NOTIFIES &&
2376  pendingNotifies->hashtab == NULL)
2377  {
2378  HASHCTL hash_ctl;
2379  ListCell *l;
2380 
2381  /* Create the hash table */
2382  hash_ctl.keysize = sizeof(Notification *);
2383  hash_ctl.entrysize = sizeof(NotificationHash);
2384  hash_ctl.hash = notification_hash;
2385  hash_ctl.match = notification_match;
2386  hash_ctl.hcxt = CurTransactionContext;
2387  pendingNotifies->hashtab =
2388  hash_create("Pending Notifies",
2389  256L,
2390  &hash_ctl,
2392 
2393  /* Insert all the already-existing events */
2394  foreach(l, pendingNotifies->events)
2395  {
2396  Notification *oldn = (Notification *) lfirst(l);
2397  NotificationHash *hentry;
2398  bool found;
2399 
2400  hentry = (NotificationHash *) hash_search(pendingNotifies->hashtab,
2401  &oldn,
2402  HASH_ENTER,
2403  &found);
2404  Assert(!found);
2405  hentry->event = oldn;
2406  }
2407  }
2408 
2409  /* Add new event to the list, in order */
2410  pendingNotifies->events = lappend(pendingNotifies->events, n);
2411 
2412  /* Add event to the hash table if needed */
2413  if (pendingNotifies->hashtab != NULL)
2414  {
2415  NotificationHash *hentry;
2416  bool found;
2417 
2418  hentry = (NotificationHash *) hash_search(pendingNotifies->hashtab,
2419  &n,
2420  HASH_ENTER,
2421  &found);
2422  Assert(!found);
2423  hentry->event = n;
2424  }
2425 }
2426 
2427 /*
2428  * notification_hash: hash function for notification hash table
2429  *
2430  * The hash "keys" are pointers to Notification structs.
2431  */
2432 static uint32
2433 notification_hash(const void *key, Size keysize)
2434 {
2435  const Notification *k = *(const Notification *const *) key;
2436 
2437  Assert(keysize == sizeof(Notification *));
2438  /* We don't bother to include the payload's trailing null in the hash */
2439  return DatumGetUInt32(hash_any((const unsigned char *) k->data,
2440  k->channel_len + k->payload_len + 1));
2441 }
2442 
2443 /*
2444  * notification_match: match function to use with notification_hash
2445  */
2446 static int
2447 notification_match(const void *key1, const void *key2, Size keysize)
2448 {
2449  const Notification *k1 = *(const Notification *const *) key1;
2450  const Notification *k2 = *(const Notification *const *) key2;
2451 
2452  Assert(keysize == sizeof(Notification *));
2453  if (k1->channel_len == k2->channel_len &&
2454  k1->payload_len == k2->payload_len &&
2455  memcmp(k1->data, k2->data,
2456  k1->channel_len + k1->payload_len + 2) == 0)
2457  return 0; /* equal */
2458  return 1; /* not equal */
2459 }
2460 
2461 /* Clear the pendingActions and pendingNotifies lists. */
2462 static void
2464 {
2465  /*
2466  * Everything's allocated in either TopTransactionContext or the context
2467  * for the subtransaction to which it corresponds. So, there's nothing to
2468  * do here except reset the pointers; the space will be reclaimed when the
2469  * contexts are deleted.
2470  */
2471  pendingActions = NULL;
2472  pendingNotifies = NULL;
2473 }
uint64 call_cntr
Definition: funcapi.h:65
#define DatumGetUInt32(X)
Definition: postgres.h:530
struct QueueBackendStatus QueueBackendStatus
#define NIL
Definition: pg_list.h:65
#define QUEUE_TAIL
Definition: async.c:290
BackendId firstListener
Definition: async.c:281
char data[NAMEDATALEN+NOTIFY_PAYLOAD_MAX_LENGTH]
Definition: async.c:176
bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
Definition: snapmgr.c:2242
#define QUEUE_POS_IS_ZERO(x)
Definition: async.c:205
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:43
int errhint(const char *fmt,...)
Definition: elog.c:1156
static void queue_listen(ListenActionKind action, const char *channel)
Definition: async.c:717
BackendId MyBackendId
Definition: globals.c:84
struct NotificationHash NotificationHash
List * events
Definition: async.c:400
#define pq_flush()
Definition: libpq.h:46
MemoryContext TopTransactionContext
Definition: mcxt.c:53
#define QUEUE_BACKEND_PID(i)
Definition: async.c:293
int page
Definition: async.c:189
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
uint32 TransactionId
Definition: c.h:587
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:810
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:322
MemoryContext hcxt
Definition: hsearch.h:86
bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1529
#define DEBUG3
Definition: elog.h:23
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
static ActionList * pendingActions
Definition: async.c:360
void AsyncShmemInit(void)
Definition: async.c:526
int64 TimestampTz
Definition: timestamp.h:39
#define SRF_IS_FIRSTCALL()
Definition: funcapi.h:294
struct NotificationList NotificationList
char * pstrdup(const char *in)
Definition: mcxt.c:1299
void CommitTransactionCommand(void)
Definition: xact.c:2939
void SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
Definition: slru.c:1225
#define PG_RETURN_FLOAT8(x)
Definition: fmgr.h:367
char data[FLEXIBLE_ARRAY_MEMBER]
Definition: async.c:394
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
static void Exec_UnlistenAllCommit(void)
Definition: async.c:1193
int offset
Definition: async.c:190
Size entrysize
Definition: hsearch.h:76
MemoryContext CurTransactionContext
Definition: mcxt.c:54
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:350
static List * listenChannels
Definition: async.c:328
List * list_concat(List *list1, const List *list2)
Definition: list.c:530
void AtPrepare_Notify(void)
Definition: async.c:863
int errcode(int sqlerrcode)
Definition: elog.c:698
Datum pg_notification_queue_usage(PG_FUNCTION_ARGS)
Definition: async.c:1566
bool IsTransactionOrTransactionBlock(void)
Definition: xact.c:4701
#define INFO
Definition: elog.h:33
static double asyncQueueUsage(void)
Definition: async.c:1591
void pq_sendstring(StringInfo buf, const char *str)
Definition: pqformat.c:197
void Async_Listen(const char *channel)
Definition: async.c:765
static void ClearPendingActionsAndNotifies(void)
Definition: async.c:2463
static void asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
Definition: async.c:1416
Notification * event
Definition: async.c:409
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
static bool asyncQueueAdvance(volatile QueuePosition *position, int entryLength)
Definition: async.c:1381
#define QUEUE_HEAD
Definition: async.c:289
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static void Exec_UnlistenCommit(const char *channel)
Definition: async.c:1162
unsigned int Oid
Definition: postgres_ext.h:31
void SetLatch(Latch *latch)
Definition: latch.c:567
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
Size SimpleLruShmemSize(int nslots, int nlsns)
Definition: slru.c:155
void list_free_deep(List *list)
Definition: list.c:1405
static bool IsListeningOn(const char *channel)
Definition: async.c:1290
#define SRF_PERCALL_SETUP()
Definition: funcapi.h:298
void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, LWLock *ctllock, const char *subdir, int tranche_id, SyncRequestHandler sync_handler)
Definition: slru.c:186
static bool unlistenExitRegistered
Definition: async.c:424
static bool asyncQueueIsFull(void)
Definition: async.c:1350
void pq_beginmessage(StringInfo buf, char msgtype)
Definition: pqformat.c:87
static bool AsyncExistsPendingNotify(Notification *n)
Definition: async.c:2329
static void asyncQueueFillWarning(void)
Definition: async.c:1620
signed int int32
Definition: c.h:429
int nestingLevel
Definition: async.c:399
#define PG_GETARG_TEXT_PP(n)
Definition: fmgr.h:309
static SlruCtlData NotifyCtlData
Definition: async.c:301
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1816
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:369
#define list_make1(x1)
Definition: pg_list.h:206
QueuePosition pos
Definition: async.c:241
#define NAMEDATALEN
#define SRF_RETURN_NEXT(_funcctx, _result)
Definition: funcapi.h:300
void set_ps_display(const char *activity)
Definition: ps_status.c:349
static void pq_sendint32(StringInfo buf, uint32 i)
Definition: pqformat.h:145
#define NotifyCtl
Definition: async.c:303
static int asyncQueuePageDiff(int p, int q)
Definition: async.c:474
void PreCommit_Notify(void)
Definition: async.c:888
#define QUEUE_POS_OFFSET(x)
Definition: async.c:194
#define NUM_NOTIFY_BUFFERS
Definition: async.h:21
Definition: dynahash.c:219
int SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
Definition: procsignal.c:261
unsigned short uint16
Definition: c.h:440
void pfree(void *pointer)
Definition: mcxt.c:1169
static NotificationList * pendingNotifies
Definition: async.c:412
#define AsyncQueueEntryEmptySize
Definition: async.c:182
#define ERROR
Definition: elog.h:46
void PreventCommandDuringRecovery(const char *cmdname)
Definition: utility.c:445
void ProcessNotifyInterrupt(void)
Definition: async.c:1915
struct ActionList ActionList
#define QUEUE_FIRST_LISTENER
Definition: async.c:292
static void * list_nth(const List *list, int n)
Definition: pg_list.h:278
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid)
Definition: slru.c:394
static void Async_UnlistenOnExit(int code, Datum arg)
Definition: async.c:850
BackendId nextListener
Definition: async.c:240
void AtSubCommit_Notify(void)
Definition: async.c:1776
static bool backendHasSentNotifications
Definition: async.c:430
int MaxBackends
Definition: globals.c:139
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
static char * buf
Definition: pg_test_fsync.c:68
static bool backendTryAdvanceTail
Definition: async.c:433
#define QUEUE_PAGESIZE
Definition: async.c:304
#define SET_QUEUE_POS(x, y, z)
Definition: async.c:196
List * actions
Definition: async.c:356
static void AddEventToPendingNotifies(Notification *n)
Definition: async.c:2370
int errdetail(const char *fmt,...)
Definition: elog.c:1042
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:333
#define InvalidTransactionId
Definition: transam.h:31
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
unsigned int uint32
Definition: c.h:441
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
static Datum hash_any(const unsigned char *k, int keylen)
Definition: hashfn.h:31
#define NOTIFY_PAYLOAD_MAX_LENGTH
Definition: async.c:156
static bool asyncQueuePagePrecedes(int p, int q)
Definition: async.c:500
struct NotificationList * upper
Definition: async.c:402
static AsyncQueueControl * asyncQueueControl
Definition: async.c:287
static void asyncQueueAdvanceTail(void)
Definition: async.c:2185
static void Exec_ListenCommit(const char *channel)
Definition: async.c:1135
#define IsParallelWorker()
Definition: parallel.h:61
static void SignalBackends(void)
Definition: async.c:1673
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
#define QUEUE_FULL_WARN_INTERVAL
Definition: async.c:305
List * lappend(List *list, void *datum)
Definition: list.c:336
struct AsyncQueueControl AsyncQueueControl
#define WARNING
Definition: elog.h:40
#define MIN_HASHABLE_NOTIFIES
Definition: async.c:405
struct QueuePosition QueuePosition
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
#define PG_FINALLY()
Definition: elog.h:330
void AtSubAbort_Notify(void)
Definition: async.c:1846
#define InvalidBackendId
Definition: backendid.h:23
ListenActionKind
Definition: async.c:340
int nestingLevel
Definition: async.c:355
static void ProcessIncomingNotify(void)
Definition: async.c:2266
uintptr_t Datum
Definition: postgres.h:411
#define QUEUE_CLEANUP_DELAY
Definition: async.c:231
static void asyncQueueUnregister(void)
Definition: async.c:1309
static bool amRegisteredListener
Definition: async.c:427
void AtAbort_Notify(void)
Definition: async.c:1756
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
int BackendId
Definition: backendid.h:21
#define QUEUEALIGN(len)
Definition: async.c:180
Oid MyDatabaseId
Definition: globals.c:88
int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:494
Size keysize
Definition: hsearch.h:75
void LockSharedObject(Oid classid, Oid objid, uint16 objsubid, LOCKMODE lockmode)
Definition: lmgr.c:1017
HashCompareFunc match
Definition: hsearch.h:80
#define QUEUE_BACKEND_DBOID(i)
Definition: async.c:294
QueuePosition head
Definition: async.c:276
#define QUEUE_STOP_PAGE
Definition: async.c:291
static bool asyncQueueProcessPageEntries(volatile QueuePosition *current, QueuePosition stop, char *page_buffer, Snapshot snapshot)
Definition: async.c:2096
#define InvalidOid
Definition: postgres_ext.h:36
static void asyncQueueReadAllNotifications(void)
Definition: async.c:1931
struct ActionList * upper
Definition: async.c:357
#define QUEUE_NEXT_LISTENER(i)
Definition: async.c:295
#define ereport(elevel,...)
Definition: elog.h:157
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:857
#define PG_RETURN_VOID()
Definition: fmgr.h:349
Datum pg_notify(PG_FUNCTION_ARGS)
Definition: async.c:584
HTAB * hashtab
Definition: async.c:401
static int notification_match(const void *key1, const void *key2, Size keysize)
Definition: async.c:2447
#define PG_ARGISNULL(n)
Definition: fmgr.h:209
static ListCell * asyncQueueAddEntries(ListCell *nextNotify)
Definition: async.c:1452
#define Assert(condition)
Definition: c.h:804
void ProcessCompletedNotifies(void)
Definition: async.c:1226
#define lfirst(lc)
Definition: pg_list.h:169
bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
Definition: slru.c:1552
static uint32 notification_hash(const void *key, Size keysize)
Definition: async.c:2433
uint16 channel_len
Definition: async.c:391
static void Exec_ListenPreCommit(void)
Definition: async.c:1040
void StartTransactionCommand(void)
Definition: xact.c:2838
uint16 payload_len
Definition: async.c:392
#define HASH_COMPARE
Definition: hsearch.h:99
struct AsyncQueueEntry AsyncQueueEntry
Size AsyncShmemSize(void)
Definition: async.c:509
size_t Size
Definition: c.h:540
void Async_UnlistenAll(void)
Definition: async.c:797
static int list_length(const List *l)
Definition: pg_list.h:149
struct Notification Notification
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1203
bool Trace_notify
Definition: async.c:436
Snapshot GetLatestSnapshot(void)
Definition: snapmgr.c:325
void Async_Notify(const char *channel, const char *payload)
Definition: async.c:618
Datum pg_listening_channels(PG_FUNCTION_ARGS)
Definition: async.c:817
TransactionId xid
Definition: async.c:174
char * text_to_cstring(const text *t)
Definition: varlena.c:223
#define AccessExclusiveLock
Definition: lockdefs.h:45
void AtCommit_Notify(void)
Definition: async.c:990
void HandleNotifyInterrupt(void)
Definition: async.c:1889
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
void pq_endmessage(StringInfo buf)
Definition: pqformat.c:298
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
#define elog(elevel,...)
Definition: elog.h:232
char channel[FLEXIBLE_ARRAY_MEMBER]
Definition: async.c:350
int i
ListenActionKind action
Definition: async.c:349
#define CStringGetTextDatum(s)
Definition: builtins.h:82
int32 srcPid
Definition: async.c:175
static void usage(const char *progname)
Definition: vacuumlo.c:417
void * arg
struct Latch * MyLatch
Definition: globals.c:57
volatile sig_atomic_t notifyInterruptPending
Definition: async.c:421
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
#define SLRU_PAGES_PER_SEGMENT
Definition: slru.h:34
TimestampTz lastQueueFillWarn
Definition: async.c:282
CommandDest whereToSendOutput
Definition: postgres.c:92
#define QUEUE_MAX_PAGE
Definition: async.c:321
#define PG_TRY()
Definition: elog.h:313
QueuePosition tail
Definition: async.c:277
#define QUEUE_BACKEND_POS(i)
Definition: async.c:296
Definition: pg_list.h:50
int SimpleLruZeroPage(SlruCtl ctl, int pageno)
Definition: slru.c:279
#define PG_END_TRY()
Definition: elog.h:338
#define QUEUE_POS_PAGE(x)
Definition: async.c:193
#define QUEUE_POS_EQUAL(x, y)
Definition: async.c:202
#define offsetof(type, field)
Definition: c.h:727
#define QUEUE_POS_MIN(x, y)
Definition: async.c:209
void Async_Unlisten(const char *channel)
Definition: async.c:779
void NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
Definition: async.c:2305
HashValueFunc hash
Definition: hsearch.h:78
#define SRF_RETURN_DONE(_funcctx)
Definition: funcapi.h:318
#define HASH_FUNCTION
Definition: hsearch.h:98
#define InvalidPid
Definition: miscadmin.h:32
#define SRF_FIRSTCALL_INIT()
Definition: funcapi.h:296
#define QUEUE_POS_MAX(x, y)
Definition: async.c:215