PostgreSQL Source Code  git master
launcher.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  * launcher.c
3  * PostgreSQL logical replication worker launcher process
4  *
5  * Copyright (c) 2016-2023, PostgreSQL Global Development Group
6  *
7  * IDENTIFICATION
8  * src/backend/replication/logical/launcher.c
9  *
10  * NOTES
11  * This module contains the logical replication worker launcher which
12  * uses the background worker infrastructure to start the logical
13  * replication workers for every enabled subscription.
14  *
15  *-------------------------------------------------------------------------
16  */
17 
18 #include "postgres.h"
19 
20 #include "access/heapam.h"
21 #include "access/htup.h"
22 #include "access/htup_details.h"
23 #include "access/tableam.h"
24 #include "access/xact.h"
27 #include "funcapi.h"
28 #include "lib/dshash.h"
29 #include "libpq/pqsignal.h"
30 #include "miscadmin.h"
31 #include "pgstat.h"
32 #include "postmaster/bgworker.h"
34 #include "postmaster/interrupt.h"
35 #include "postmaster/postmaster.h"
38 #include "replication/slot.h"
41 #include "storage/ipc.h"
42 #include "storage/proc.h"
43 #include "storage/procarray.h"
44 #include "storage/procsignal.h"
45 #include "tcop/tcopprot.h"
46 #include "utils/builtins.h"
47 #include "utils/memutils.h"
48 #include "utils/pg_lsn.h"
49 #include "utils/ps_status.h"
50 #include "utils/snapmgr.h"
51 #include "utils/timeout.h"
52 
53 /* max sleep time between cycles (3min) */
54 #define DEFAULT_NAPTIME_PER_CYCLE 180000L
55 
56 /* GUC variables */
60 
62 
63 typedef struct LogicalRepCtxStruct
64 {
65  /* Supervisor process. */
66  pid_t launcher_pid;
67 
68  /* Hash table holding last start times of subscriptions' apply workers. */
71 
72  /* Background workers. */
75 
77 
78 /* an entry in the last-start-times shared hash table */
80 {
81  Oid subid; /* OID of logrep subscription (hash key) */
82  TimestampTz last_start_time; /* last time its apply worker was started */
84 
85 /* parameters for the last-start-times shared hash table */
86 static const dshash_parameters dsh_params = {
87  sizeof(Oid),
92 };
93 
96 
97 static bool on_commit_launcher_wakeup = false;
98 
99 
100 static void ApplyLauncherWakeup(void);
101 static void logicalrep_launcher_onexit(int code, Datum arg);
102 static void logicalrep_worker_onexit(int code, Datum arg);
103 static void logicalrep_worker_detach(void);
104 static void logicalrep_worker_cleanup(LogicalRepWorker *worker);
105 static int logicalrep_pa_worker_count(Oid subid);
106 static void logicalrep_launcher_attach_dshmem(void);
109 
110 
111 /*
112  * Load the list of subscriptions.
113  *
114  * Only the fields interesting for worker start/stop functions are filled for
115  * each subscription.
116  */
117 static List *
119 {
120  List *res = NIL;
121  Relation rel;
122  TableScanDesc scan;
123  HeapTuple tup;
124  MemoryContext resultcxt;
125 
126  /* This is the context that we will allocate our output data in */
127  resultcxt = CurrentMemoryContext;
128 
129  /*
130  * Start a transaction so we can access pg_database, and get a snapshot.
131  * We don't have a use for the snapshot itself, but we're interested in
132  * the secondary effect that it sets RecentGlobalXmin. (This is critical
133  * for anything that reads heap pages, because HOT may decide to prune
134  * them even if the process doesn't attempt to modify any tuples.)
135  *
136  * FIXME: This comment is inaccurate / the code buggy. A snapshot that is
137  * not pushed/active does not reliably prevent HOT pruning (->xmin could
138  * e.g. be cleared when cache invalidations are processed).
139  */
141  (void) GetTransactionSnapshot();
142 
143  rel = table_open(SubscriptionRelationId, AccessShareLock);
144  scan = table_beginscan_catalog(rel, 0, NULL);
145 
147  {
149  Subscription *sub;
150  MemoryContext oldcxt;
151 
152  /*
153  * Allocate our results in the caller's context, not the
154  * transaction's. We do this inside the loop, and restore the original
155  * context at the end, so that leaky things like heap_getnext() are
156  * not called in a potentially long-lived context.
157  */
158  oldcxt = MemoryContextSwitchTo(resultcxt);
159 
160  sub = (Subscription *) palloc0(sizeof(Subscription));
161  sub->oid = subform->oid;
162  sub->dbid = subform->subdbid;
163  sub->owner = subform->subowner;
164  sub->enabled = subform->subenabled;
165  sub->name = pstrdup(NameStr(subform->subname));
166  /* We don't fill fields we are not interested in. */
167 
168  res = lappend(res, sub);
169  MemoryContextSwitchTo(oldcxt);
170  }
171 
172  table_endscan(scan);
174 
176 
177  return res;
178 }
179 
180 /*
181  * Wait for a background worker to start up and attach to the shmem context.
182  *
183  * This is only needed for cleaning up the shared memory in case the worker
184  * fails to attach.
185  *
186  * Returns whether the attach was successful.
187  */
188 static bool
190  uint16 generation,
191  BackgroundWorkerHandle *handle)
192 {
194  int rc;
195 
196  for (;;)
197  {
198  pid_t pid;
199 
201 
202  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
203 
204  /* Worker either died or has started. Return false if died. */
205  if (!worker->in_use || worker->proc)
206  {
207  LWLockRelease(LogicalRepWorkerLock);
208  return worker->in_use;
209  }
210 
211  LWLockRelease(LogicalRepWorkerLock);
212 
213  /* Check if worker has died before attaching, and clean up after it. */
214  status = GetBackgroundWorkerPid(handle, &pid);
215 
216  if (status == BGWH_STOPPED)
217  {
218  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
219  /* Ensure that this was indeed the worker we waited for. */
220  if (generation == worker->generation)
222  LWLockRelease(LogicalRepWorkerLock);
223  return false;
224  }
225 
226  /*
227  * We need timeout because we generally don't get notified via latch
228  * about the worker attach. But we don't expect to have to wait long.
229  */
230  rc = WaitLatch(MyLatch,
233 
234  if (rc & WL_LATCH_SET)
235  {
238  }
239  }
240 }
241 
242 /*
243  * Walks the workers array and searches for one that matches given
244  * subscription id and relid.
245  *
246  * We are only interested in the leader apply worker or table sync worker.
247  */
249 logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
250 {
251  int i;
252  LogicalRepWorker *res = NULL;
253 
254  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
255 
256  /* Search for attached worker for a given subscription id. */
257  for (i = 0; i < max_logical_replication_workers; i++)
258  {
260 
261  /* Skip parallel apply workers. */
262  if (isParallelApplyWorker(w))
263  continue;
264 
265  if (w->in_use && w->subid == subid && w->relid == relid &&
266  (!only_running || w->proc))
267  {
268  res = w;
269  break;
270  }
271  }
272 
273  return res;
274 }
275 
276 /*
277  * Similar to logicalrep_worker_find(), but returns a list of all workers for
278  * the subscription, instead of just one.
279  */
280 List *
281 logicalrep_workers_find(Oid subid, bool only_running)
282 {
283  int i;
284  List *res = NIL;
285 
286  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
287 
288  /* Search for attached worker for a given subscription id. */
289  for (i = 0; i < max_logical_replication_workers; i++)
290  {
292 
293  if (w->in_use && w->subid == subid && (!only_running || w->proc))
294  res = lappend(res, w);
295  }
296 
297  return res;
298 }
299 
300 /*
301  * Start new logical replication background worker, if possible.
302  *
303  * Returns true on success, false on failure.
304  */
305 bool
306 logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
307  Oid relid, dsm_handle subworker_dsm)
308 {
309  BackgroundWorker bgw;
310  BackgroundWorkerHandle *bgw_handle;
311  uint16 generation;
312  int i;
313  int slot = 0;
314  LogicalRepWorker *worker = NULL;
315  int nsyncworkers;
316  int nparallelapplyworkers;
318  bool is_parallel_apply_worker = (subworker_dsm != DSM_HANDLE_INVALID);
319 
320  /* Sanity check - tablesync worker cannot be a subworker */
321  Assert(!(is_parallel_apply_worker && OidIsValid(relid)));
322 
323  ereport(DEBUG1,
324  (errmsg_internal("starting logical replication worker for subscription \"%s\"",
325  subname)));
326 
327  /* Report this after the initial starting message for consistency. */
328  if (max_replication_slots == 0)
329  ereport(ERROR,
330  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
331  errmsg("cannot start logical replication workers when max_replication_slots = 0")));
332 
333  /*
334  * We need to do the modification of the shared memory under lock so that
335  * we have consistent view.
336  */
337  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
338 
339 retry:
340  /* Find unused worker slot. */
341  for (i = 0; i < max_logical_replication_workers; i++)
342  {
344 
345  if (!w->in_use)
346  {
347  worker = w;
348  slot = i;
349  break;
350  }
351  }
352 
353  nsyncworkers = logicalrep_sync_worker_count(subid);
354 
356 
357  /*
358  * If we didn't find a free slot, try to do garbage collection. The
359  * reason we do this is because if some worker failed to start up and its
360  * parent has crashed while waiting, the in_use state was never cleared.
361  */
362  if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
363  {
364  bool did_cleanup = false;
365 
366  for (i = 0; i < max_logical_replication_workers; i++)
367  {
369 
370  /*
371  * If the worker was marked in use but didn't manage to attach in
372  * time, clean it up.
373  */
374  if (w->in_use && !w->proc &&
377  {
378  elog(WARNING,
379  "logical replication worker for subscription %u took too long to start; canceled",
380  w->subid);
381 
383  did_cleanup = true;
384  }
385  }
386 
387  if (did_cleanup)
388  goto retry;
389  }
390 
391  /*
392  * We don't allow to invoke more sync workers once we have reached the
393  * sync worker limit per subscription. So, just return silently as we
394  * might get here because of an otherwise harmless race condition.
395  */
396  if (OidIsValid(relid) && nsyncworkers >= max_sync_workers_per_subscription)
397  {
398  LWLockRelease(LogicalRepWorkerLock);
399  return false;
400  }
401 
402  nparallelapplyworkers = logicalrep_pa_worker_count(subid);
403 
404  /*
405  * Return false if the number of parallel apply workers reached the limit
406  * per subscription.
407  */
408  if (is_parallel_apply_worker &&
409  nparallelapplyworkers >= max_parallel_apply_workers_per_subscription)
410  {
411  LWLockRelease(LogicalRepWorkerLock);
412  return false;
413  }
414 
415  /*
416  * However if there are no more free worker slots, inform user about it
417  * before exiting.
418  */
419  if (worker == NULL)
420  {
421  LWLockRelease(LogicalRepWorkerLock);
423  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
424  errmsg("out of logical replication worker slots"),
425  errhint("You might need to increase max_logical_replication_workers.")));
426  return false;
427  }
428 
429  /* Prepare the worker slot. */
430  worker->launch_time = now;
431  worker->in_use = true;
432  worker->generation++;
433  worker->proc = NULL;
434  worker->dbid = dbid;
435  worker->userid = userid;
436  worker->subid = subid;
437  worker->relid = relid;
438  worker->relstate = SUBREL_STATE_UNKNOWN;
440  worker->stream_fileset = NULL;
441  worker->leader_pid = is_parallel_apply_worker ? MyProcPid : InvalidPid;
442  worker->parallel_apply = is_parallel_apply_worker;
443  worker->last_lsn = InvalidXLogRecPtr;
446  worker->reply_lsn = InvalidXLogRecPtr;
447  TIMESTAMP_NOBEGIN(worker->reply_time);
448 
449  /* Before releasing lock, remember generation for future identification. */
450  generation = worker->generation;
451 
452  LWLockRelease(LogicalRepWorkerLock);
453 
454  /* Register the new dynamic worker. */
455  memset(&bgw, 0, sizeof(bgw));
459  snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
460 
461  if (is_parallel_apply_worker)
462  snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ParallelApplyWorkerMain");
463  else
464  snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
465 
466  if (OidIsValid(relid))
468  "logical replication worker for subscription %u sync %u", subid, relid);
469  else if (is_parallel_apply_worker)
471  "logical replication parallel apply worker for subscription %u", subid);
472  else
474  "logical replication apply worker for subscription %u", subid);
475 
476  if (is_parallel_apply_worker)
477  snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication parallel worker");
478  else
479  snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication worker");
480 
483  bgw.bgw_main_arg = Int32GetDatum(slot);
484 
485  if (is_parallel_apply_worker)
486  memcpy(bgw.bgw_extra, &subworker_dsm, sizeof(dsm_handle));
487 
488  if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle))
489  {
490  /* Failed to start worker, so clean up the worker slot. */
491  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
492  Assert(generation == worker->generation);
494  LWLockRelease(LogicalRepWorkerLock);
495 
497  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
498  errmsg("out of background worker slots"),
499  errhint("You might need to increase max_worker_processes.")));
500  return false;
501  }
502 
503  /* Now wait until it attaches. */
504  return WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
505 }
506 
507 /*
508  * Internal function to stop the worker and wait until it detaches from the
509  * slot.
510  */
511 static void
513 {
514  uint16 generation;
515 
516  Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_SHARED));
517 
518  /*
519  * Remember which generation was our worker so we can check if what we see
520  * is still the same one.
521  */
522  generation = worker->generation;
523 
524  /*
525  * If we found a worker but it does not have proc set then it is still
526  * starting up; wait for it to finish starting and then kill it.
527  */
528  while (worker->in_use && !worker->proc)
529  {
530  int rc;
531 
532  LWLockRelease(LogicalRepWorkerLock);
533 
534  /* Wait a bit --- we don't expect to have to wait long. */
535  rc = WaitLatch(MyLatch,
538 
539  if (rc & WL_LATCH_SET)
540  {
543  }
544 
545  /* Recheck worker status. */
546  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
547 
548  /*
549  * Check whether the worker slot is no longer used, which would mean
550  * that the worker has exited, or whether the worker generation is
551  * different, meaning that a different worker has taken the slot.
552  */
553  if (!worker->in_use || worker->generation != generation)
554  return;
555 
556  /* Worker has assigned proc, so it has started. */
557  if (worker->proc)
558  break;
559  }
560 
561  /* Now terminate the worker ... */
562  kill(worker->proc->pid, signo);
563 
564  /* ... and wait for it to die. */
565  for (;;)
566  {
567  int rc;
568 
569  /* is it gone? */
570  if (!worker->proc || worker->generation != generation)
571  break;
572 
573  LWLockRelease(LogicalRepWorkerLock);
574 
575  /* Wait a bit --- we don't expect to have to wait long. */
576  rc = WaitLatch(MyLatch,
579 
580  if (rc & WL_LATCH_SET)
581  {
584  }
585 
586  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
587  }
588 }
589 
590 /*
591  * Stop the logical replication worker for subid/relid, if any.
592  */
593 void
595 {
596  LogicalRepWorker *worker;
597 
598  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
599 
600  worker = logicalrep_worker_find(subid, relid, false);
601 
602  if (worker)
603  {
604  Assert(!isParallelApplyWorker(worker));
605  logicalrep_worker_stop_internal(worker, SIGTERM);
606  }
607 
608  LWLockRelease(LogicalRepWorkerLock);
609 }
610 
611 /*
612  * Stop the logical replication parallel apply worker corresponding to the
613  * input slot number.
614  *
615  * Node that the function sends SIGINT instead of SIGTERM to the parallel apply
616  * worker so that the worker exits cleanly.
617  */
618 void
619 logicalrep_pa_worker_stop(int slot_no, uint16 generation)
620 {
621  LogicalRepWorker *worker;
622 
623  Assert(slot_no >= 0 && slot_no < max_logical_replication_workers);
624 
625  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
626 
627  worker = &LogicalRepCtx->workers[slot_no];
628  Assert(isParallelApplyWorker(worker));
629 
630  /*
631  * Only stop the worker if the generation matches and the worker is alive.
632  */
633  if (worker->generation == generation && worker->proc)
634  logicalrep_worker_stop_internal(worker, SIGINT);
635 
636  LWLockRelease(LogicalRepWorkerLock);
637 }
638 
639 /*
640  * Wake up (using latch) any logical replication worker for specified sub/rel.
641  */
642 void
644 {
645  LogicalRepWorker *worker;
646 
647  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
648 
649  worker = logicalrep_worker_find(subid, relid, true);
650 
651  if (worker)
653 
654  LWLockRelease(LogicalRepWorkerLock);
655 }
656 
657 /*
658  * Wake up (using latch) the specified logical replication worker.
659  *
660  * Caller must hold lock, else worker->proc could change under us.
661  */
662 void
664 {
665  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
666 
667  SetLatch(&worker->proc->procLatch);
668 }
669 
670 /*
671  * Attach to a slot.
672  */
673 void
675 {
676  /* Block concurrent access. */
677  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
678 
679  Assert(slot >= 0 && slot < max_logical_replication_workers);
681 
683  {
684  LWLockRelease(LogicalRepWorkerLock);
685  ereport(ERROR,
686  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
687  errmsg("logical replication worker slot %d is empty, cannot attach",
688  slot)));
689  }
690 
692  {
693  LWLockRelease(LogicalRepWorkerLock);
694  ereport(ERROR,
695  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
696  errmsg("logical replication worker slot %d is already used by "
697  "another worker, cannot attach", slot)));
698  }
699 
702 
703  LWLockRelease(LogicalRepWorkerLock);
704 }
705 
706 /*
707  * Stop the parallel apply workers if any, and detach the leader apply worker
708  * (cleans up the worker info).
709  */
710 static void
712 {
713  /* Stop the parallel apply workers. */
715  {
716  List *workers;
717  ListCell *lc;
718 
719  /*
720  * Detach from the error_mq_handle for all parallel apply workers
721  * before terminating them. This prevents the leader apply worker from
722  * receiving the worker termination message and sending it to logs
723  * when the same is already done by the parallel worker.
724  */
726 
727  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
728 
730  foreach(lc, workers)
731  {
733 
734  if (isParallelApplyWorker(w))
736  }
737 
738  LWLockRelease(LogicalRepWorkerLock);
739  }
740 
741  /* Block concurrent access. */
742  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
743 
745 
746  LWLockRelease(LogicalRepWorkerLock);
747 }
748 
749 /*
750  * Clean up worker info.
751  */
752 static void
754 {
755  Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_EXCLUSIVE));
756 
757  worker->in_use = false;
758  worker->proc = NULL;
759  worker->dbid = InvalidOid;
760  worker->userid = InvalidOid;
761  worker->subid = InvalidOid;
762  worker->relid = InvalidOid;
763  worker->leader_pid = InvalidPid;
764  worker->parallel_apply = false;
765 }
766 
767 /*
768  * Cleanup function for logical replication launcher.
769  *
770  * Called on logical replication launcher exit.
771  */
772 static void
774 {
776 }
777 
778 /*
779  * Cleanup function.
780  *
781  * Called on logical replication worker exit.
782  */
783 static void
785 {
786  /* Disconnect gracefully from the remote side. */
789 
791 
792  /* Cleanup fileset used for streaming transactions. */
793  if (MyLogicalRepWorker->stream_fileset != NULL)
795 
796  /*
797  * Session level locks may be acquired outside of a transaction in
798  * parallel apply mode and will not be released when the worker
799  * terminates, so manually release all locks before the worker exits.
800  */
802 
804 }
805 
806 /*
807  * Count the number of registered (not necessarily running) sync workers
808  * for a subscription.
809  */
810 int
812 {
813  int i;
814  int res = 0;
815 
816  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
817 
818  /* Search for attached worker for a given subscription id. */
819  for (i = 0; i < max_logical_replication_workers; i++)
820  {
822 
823  if (w->subid == subid && OidIsValid(w->relid))
824  res++;
825  }
826 
827  return res;
828 }
829 
830 /*
831  * Count the number of registered (but not necessarily running) parallel apply
832  * workers for a subscription.
833  */
834 static int
836 {
837  int i;
838  int res = 0;
839 
840  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
841 
842  /*
843  * Scan all attached parallel apply workers, only counting those which
844  * have the given subscription id.
845  */
846  for (i = 0; i < max_logical_replication_workers; i++)
847  {
849 
850  if (w->subid == subid && isParallelApplyWorker(w))
851  res++;
852  }
853 
854  return res;
855 }
856 
857 /*
858  * ApplyLauncherShmemSize
859  * Compute space needed for replication launcher shared memory
860  */
861 Size
863 {
864  Size size;
865 
866  /*
867  * Need the fixed struct and the array of LogicalRepWorker.
868  */
869  size = sizeof(LogicalRepCtxStruct);
870  size = MAXALIGN(size);
872  sizeof(LogicalRepWorker)));
873  return size;
874 }
875 
876 /*
877  * ApplyLauncherRegister
878  * Register a background worker running the logical replication launcher.
879  */
880 void
882 {
883  BackgroundWorker bgw;
884 
886  return;
887 
888  memset(&bgw, 0, sizeof(bgw));
892  snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
893  snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
895  "logical replication launcher");
897  "logical replication launcher");
898  bgw.bgw_restart_time = 5;
899  bgw.bgw_notify_pid = 0;
900  bgw.bgw_main_arg = (Datum) 0;
901 
903 }
904 
905 /*
906  * ApplyLauncherShmemInit
907  * Allocate and initialize replication launcher shared memory
908  */
909 void
911 {
912  bool found;
913 
915  ShmemInitStruct("Logical Replication Launcher Data",
917  &found);
918 
919  if (!found)
920  {
921  int slot;
922 
924 
927 
928  /* Initialize memory and spin locks for each worker slot. */
929  for (slot = 0; slot < max_logical_replication_workers; slot++)
930  {
931  LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
932 
933  memset(worker, 0, sizeof(LogicalRepWorker));
934  SpinLockInit(&worker->relmutex);
935  }
936  }
937 }
938 
939 /*
940  * Initialize or attach to the dynamic shared hash table that stores the
941  * last-start times, if not already done.
942  * This must be called before accessing the table.
943  */
944 static void
946 {
947  MemoryContext oldcontext;
948 
949  /* Quick exit if we already did this. */
951  last_start_times != NULL)
952  return;
953 
954  /* Otherwise, use a lock to ensure only one process creates the table. */
955  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
956 
957  /* Be sure any local memory allocated by DSA routines is persistent. */
959 
961  {
962  /* Initialize dynamic shared hash table for last-start times. */
967 
968  /* Store handles in shared memory for other backends to use. */
971  }
972  else if (!last_start_times)
973  {
974  /* Attach to existing dynamic shared hash table. */
979  }
980 
981  MemoryContextSwitchTo(oldcontext);
982  LWLockRelease(LogicalRepWorkerLock);
983 }
984 
985 /*
986  * Set the last-start time for the subscription.
987  */
988 static void
990 {
992  bool found;
993 
995 
996  entry = dshash_find_or_insert(last_start_times, &subid, &found);
997  entry->last_start_time = start_time;
999 }
1000 
1001 /*
1002  * Return the last-start time for the subscription, or 0 if there isn't one.
1003  */
1004 static TimestampTz
1006 {
1008  TimestampTz ret;
1009 
1011 
1012  entry = dshash_find(last_start_times, &subid, false);
1013  if (entry == NULL)
1014  return 0;
1015 
1016  ret = entry->last_start_time;
1018 
1019  return ret;
1020 }
1021 
1022 /*
1023  * Remove the last-start-time entry for the subscription, if one exists.
1024  *
1025  * This has two use-cases: to remove the entry related to a subscription
1026  * that's been deleted or disabled (just to avoid leaking shared memory),
1027  * and to allow immediate restart of an apply worker that has exited
1028  * due to subscription parameter changes.
1029  */
1030 void
1032 {
1034 
1035  (void) dshash_delete_key(last_start_times, &subid);
1036 }
1037 
1038 /*
1039  * Wakeup the launcher on commit if requested.
1040  */
1041 void
1043 {
1044  if (isCommit)
1045  {
1048  }
1049 
1050  on_commit_launcher_wakeup = false;
1051 }
1052 
1053 /*
1054  * Request wakeup of the launcher on commit of the transaction.
1055  *
1056  * This is used to send launcher signal to stop sleeping and process the
1057  * subscriptions when current transaction commits. Should be used when new
1058  * tuple was added to the pg_subscription catalog.
1059 */
1060 void
1062 {
1065 }
1066 
1067 static void
1069 {
1070  if (LogicalRepCtx->launcher_pid != 0)
1072 }
1073 
1074 /*
1075  * Main loop for the apply launcher process.
1076  */
1077 void
1079 {
1080  ereport(DEBUG1,
1081  (errmsg_internal("logical replication launcher started")));
1082 
1084 
1087 
1088  /* Establish signal handlers. */
1090  pqsignal(SIGTERM, die);
1092 
1093  /*
1094  * Establish connection to nailed catalogs (we only ever access
1095  * pg_subscription).
1096  */
1097  BackgroundWorkerInitializeConnection(NULL, NULL, 0);
1098 
1099  /* Enter main loop */
1100  for (;;)
1101  {
1102  int rc;
1103  List *sublist;
1104  ListCell *lc;
1105  MemoryContext subctx;
1106  MemoryContext oldctx;
1107  long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
1108 
1110 
1111  /* Use temporary context to avoid leaking memory across cycles. */
1113  "Logical Replication Launcher sublist",
1115  oldctx = MemoryContextSwitchTo(subctx);
1116 
1117  /* Start any missing workers for enabled subscriptions. */
1118  sublist = get_subscription_list();
1119  foreach(lc, sublist)
1120  {
1121  Subscription *sub = (Subscription *) lfirst(lc);
1122  LogicalRepWorker *w;
1123  TimestampTz last_start;
1124  TimestampTz now;
1125  long elapsed;
1126 
1127  if (!sub->enabled)
1128  continue;
1129 
1130  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1131  w = logicalrep_worker_find(sub->oid, InvalidOid, false);
1132  LWLockRelease(LogicalRepWorkerLock);
1133 
1134  if (w != NULL)
1135  continue; /* worker is running already */
1136 
1137  /*
1138  * If the worker is eligible to start now, launch it. Otherwise,
1139  * adjust wait_time so that we'll wake up as soon as it can be
1140  * started.
1141  *
1142  * Each subscription's apply worker can only be restarted once per
1143  * wal_retrieve_retry_interval, so that errors do not cause us to
1144  * repeatedly restart the worker as fast as possible. In cases
1145  * where a restart is expected (e.g., subscription parameter
1146  * changes), another process should remove the last-start entry
1147  * for the subscription so that the worker can be restarted
1148  * without waiting for wal_retrieve_retry_interval to elapse.
1149  */
1150  last_start = ApplyLauncherGetWorkerStartTime(sub->oid);
1152  if (last_start == 0 ||
1154  {
1156  logicalrep_worker_launch(sub->dbid, sub->oid, sub->name,
1157  sub->owner, InvalidOid,
1159  }
1160  else
1161  {
1162  wait_time = Min(wait_time,
1163  wal_retrieve_retry_interval - elapsed);
1164  }
1165  }
1166 
1167  /* Switch back to original memory context. */
1168  MemoryContextSwitchTo(oldctx);
1169  /* Clean the temporary memory. */
1170  MemoryContextDelete(subctx);
1171 
1172  /* Wait for more work. */
1173  rc = WaitLatch(MyLatch,
1175  wait_time,
1177 
1178  if (rc & WL_LATCH_SET)
1179  {
1182  }
1183 
1184  if (ConfigReloadPending)
1185  {
1186  ConfigReloadPending = false;
1188  }
1189  }
1190 
1191  /* Not reachable */
1192 }
1193 
1194 /*
1195  * Is current process the logical replication launcher?
1196  */
1197 bool
1199 {
1201 }
1202 
1203 /*
1204  * Return the pid of the leader apply worker if the given pid is the pid of a
1205  * parallel apply worker, otherwise, return InvalidPid.
1206  */
1207 pid_t
1209 {
1210  int leader_pid = InvalidPid;
1211  int i;
1212 
1213  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1214 
1215  for (i = 0; i < max_logical_replication_workers; i++)
1216  {
1218 
1219  if (isParallelApplyWorker(w) && w->proc && pid == w->proc->pid)
1220  {
1221  leader_pid = w->leader_pid;
1222  break;
1223  }
1224  }
1225 
1226  LWLockRelease(LogicalRepWorkerLock);
1227 
1228  return leader_pid;
1229 }
1230 
1231 /*
1232  * Returns state of the subscriptions.
1233  */
1234 Datum
1236 {
1237 #define PG_STAT_GET_SUBSCRIPTION_COLS 9
1238  Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
1239  int i;
1240  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1241 
1242  InitMaterializedSRF(fcinfo, 0);
1243 
1244  /* Make sure we get consistent view of the workers. */
1245  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1246 
1247  for (i = 0; i < max_logical_replication_workers; i++)
1248  {
1249  /* for each row */
1251  bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS] = {0};
1252  int worker_pid;
1253  LogicalRepWorker worker;
1254 
1255  memcpy(&worker, &LogicalRepCtx->workers[i],
1256  sizeof(LogicalRepWorker));
1257  if (!worker.proc || !IsBackendPid(worker.proc->pid))
1258  continue;
1259 
1260  if (OidIsValid(subid) && worker.subid != subid)
1261  continue;
1262 
1263  worker_pid = worker.proc->pid;
1264 
1265  values[0] = ObjectIdGetDatum(worker.subid);
1266  if (OidIsValid(worker.relid))
1267  values[1] = ObjectIdGetDatum(worker.relid);
1268  else
1269  nulls[1] = true;
1270  values[2] = Int32GetDatum(worker_pid);
1271 
1272  if (isParallelApplyWorker(&worker))
1273  values[3] = Int32GetDatum(worker.leader_pid);
1274  else
1275  nulls[3] = true;
1276 
1277  if (XLogRecPtrIsInvalid(worker.last_lsn))
1278  nulls[4] = true;
1279  else
1280  values[4] = LSNGetDatum(worker.last_lsn);
1281  if (worker.last_send_time == 0)
1282  nulls[5] = true;
1283  else
1285  if (worker.last_recv_time == 0)
1286  nulls[6] = true;
1287  else
1289  if (XLogRecPtrIsInvalid(worker.reply_lsn))
1290  nulls[7] = true;
1291  else
1292  values[7] = LSNGetDatum(worker.reply_lsn);
1293  if (worker.reply_time == 0)
1294  nulls[8] = true;
1295  else
1296  values[8] = TimestampTzGetDatum(worker.reply_time);
1297 
1298  tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
1299  values, nulls);
1300 
1301  /*
1302  * If only a single subscription was requested, and we found it,
1303  * break.
1304  */
1305  if (OidIsValid(subid))
1306  break;
1307  }
1308 
1309  LWLockRelease(LogicalRepWorkerLock);
1310 
1311  return (Datum) 0;
1312 }
void pa_detach_all_error_mq(void)
WalReceiverConn * LogRepWorkerWalRcvConn
Definition: worker.c:312
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition: timestamp.c:1703
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1727
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1582
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1546
void RegisterBackgroundWorker(BackgroundWorker *worker)
Definition: bgworker.c:875
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1071
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:959
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_STOPPED
Definition: bgworker.h:107
@ BgWorkerStart_RecoveryFinished
Definition: bgworker.h:81
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:60
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
static Datum values[MAXATTR]
Definition: bootstrap.c:156
#define NameStr(name)
Definition: c.h:730
unsigned short uint16
Definition: c.h:489
#define Min(x, y)
Definition: c.h:988
#define MAXALIGN(LEN)
Definition: c.h:795
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:382
#define OidIsValid(objectId)
Definition: c.h:759
size_t Size
Definition: c.h:589
int64 TimestampTz
Definition: timestamp.h:39
#define TIMESTAMP_NOBEGIN(j)
Definition: timestamp.h:158
dsa_area * dsa_attach(dsa_handle handle)
Definition: dsa.c:518
dsa_area * dsa_create(int tranche_id)
Definition: dsa.c:432
void dsa_pin_mapping(dsa_area *area)
Definition: dsa.c:643
dsa_handle dsa_get_handle(dsa_area *area)
Definition: dsa.c:506
void dsa_pin(dsa_area *area)
Definition: dsa.c:977
dsm_handle dsa_handle
Definition: dsa.h:100
#define DSA_HANDLE_INVALID
Definition: dsa.h:103
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
#define DSHASH_HANDLE_INVALID
Definition: dshash.h:27
dsa_pointer dshash_table_handle
Definition: dshash.h:24
uint32 dsm_handle
Definition: dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define WARNING
Definition: elog.h:36
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void FileSetDeleteAll(FileSet *fileset)
Definition: fileset.c:152
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define PG_ARGISNULL(n)
Definition: fmgr.h:209
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition: funcapi.c:76
int MyProcPid
Definition: globals.c:44
struct Latch * MyLatch
Definition: globals.c:58
@ PGC_SIGHUP
Definition: guc.h:71
void ProcessConfigFile(GucContext context)
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1093
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:653
volatile sig_atomic_t ConfigReloadPending
Definition: interrupt.c:27
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition: interrupt.c:61
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:333
int i
Definition: isn.c:73
void SetLatch(Latch *latch)
Definition: latch.c:607
void ResetLatch(Latch *latch)
Definition: latch.c:699
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:492
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
Datum pg_stat_get_subscription(PG_FUNCTION_ARGS)
Definition: launcher.c:1235
void logicalrep_pa_worker_stop(int slot_no, uint16 generation)
Definition: launcher.c:619
#define DEFAULT_NAPTIME_PER_CYCLE
Definition: launcher.c:54
LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
Definition: launcher.c:249
void AtEOXact_ApplyLauncher(bool isCommit)
Definition: launcher.c:1042
void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
Definition: launcher.c:663
Size ApplyLauncherShmemSize(void)
Definition: launcher.c:862
bool IsLogicalLauncher(void)
Definition: launcher.c:1198
void logicalrep_worker_attach(int slot)
Definition: launcher.c:674
static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time)
Definition: launcher.c:989
static List * get_subscription_list(void)
Definition: launcher.c:118
bool logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid, dsm_handle subworker_dsm)
Definition: launcher.c:306
static void logicalrep_launcher_onexit(int code, Datum arg)
Definition: launcher.c:773
static dsa_area * last_start_times_dsa
Definition: launcher.c:94
void ApplyLauncherMain(Datum main_arg)
Definition: launcher.c:1078
#define PG_STAT_GET_SUBSCRIPTION_COLS
int max_logical_replication_workers
Definition: launcher.c:57
List * logicalrep_workers_find(Oid subid, bool only_running)
Definition: launcher.c:281
static int logicalrep_pa_worker_count(Oid subid)
Definition: launcher.c:835
static bool on_commit_launcher_wakeup
Definition: launcher.c:97
struct LogicalRepCtxStruct LogicalRepCtxStruct
static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid)
Definition: launcher.c:1005
void logicalrep_worker_wakeup(Oid subid, Oid relid)
Definition: launcher.c:643
void ApplyLauncherShmemInit(void)
Definition: launcher.c:910
static void logicalrep_worker_stop_internal(LogicalRepWorker *worker, int signo)
Definition: launcher.c:512
static dshash_table * last_start_times
Definition: launcher.c:95
void logicalrep_worker_stop(Oid subid, Oid relid)
Definition: launcher.c:594
LogicalRepWorker * MyLogicalRepWorker
Definition: launcher.c:61
void ApplyLauncherWakeupAtCommit(void)
Definition: launcher.c:1061
static const dshash_parameters dsh_params
Definition: launcher.c:86
static LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:76
static void logicalrep_worker_onexit(int code, Datum arg)
Definition: launcher.c:784
pid_t GetLeaderApplyWorkerPid(pid_t pid)
Definition: launcher.c:1208
int max_sync_workers_per_subscription
Definition: launcher.c:58
static void logicalrep_worker_detach(void)
Definition: launcher.c:711
static bool WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
Definition: launcher.c:189
int logicalrep_sync_worker_count(Oid subid)
Definition: launcher.c:811
void ApplyLauncherForgetWorkerStartTime(Oid subid)
Definition: launcher.c:1031
void ApplyLauncherRegister(void)
Definition: launcher.c:881
struct LauncherLastStartTimesEntry LauncherLastStartTimesEntry
static void ApplyLauncherWakeup(void)
Definition: launcher.c:1068
static void logicalrep_launcher_attach_dshmem(void)
Definition: launcher.c:945
int max_parallel_apply_workers_per_subscription
Definition: launcher.c:59
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition: launcher.c:753
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend(List *list, void *datum)
Definition: list.c:338
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2154
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
#define AccessShareLock
Definition: lockdefs.h:36
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1919
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1963
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
@ LWTRANCHE_LAUNCHER_HASH
Definition: lwlock.h:208
@ LWTRANCHE_LAUNCHER_DSA
Definition: lwlock.h:207
@ LW_SHARED
Definition: lwlock.h:116
@ LW_EXCLUSIVE
Definition: lwlock.h:115
char * pstrdup(const char *in)
Definition: mcxt.c:1624
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc0(Size size)
Definition: mcxt.c:1241
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:387
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define InvalidPid
Definition: miscadmin.h:32
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
void * arg
static time_t start_time
Definition: pg_ctl.c:94
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
static Datum LSNGetDatum(XLogRecPtr X)
Definition: pg_lsn.h:28
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
NameData subname
FormData_pg_subscription * Form_pg_subscription
#define die(msg)
Definition: pg_test_fsync.c:95
pqsigfunc pqsignal(int signo, pqsigfunc func)
#define snprintf
Definition: port.h:238
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:212
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags)
Definition: postmaster.c:5600
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5660
bool IsBackendPid(int pid)
Definition: procarray.c:3280
@ ForwardScanDirection
Definition: sdir.h:28
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
int max_replication_slots
Definition: slot.c:101
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:251
#define SpinLockInit(lock)
Definition: spin.h:60
PGPROC * MyProc
Definition: proc.c:66
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:91
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:99
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[BGW_MAXLEN]
Definition: bgworker.h:96
TimestampTz last_start_time
Definition: launcher.c:82
Definition: pg_list.h:54
dsa_handle last_start_dsa
Definition: launcher.c:69
dshash_table_handle last_start_dsh
Definition: launcher.c:70
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:73
XLogRecPtr relstate_lsn
TimestampTz last_recv_time
TimestampTz launch_time
TimestampTz reply_time
FileSet * stream_fileset
XLogRecPtr reply_lsn
XLogRecPtr last_lsn
TimestampTz last_send_time
int pid
Definition: proc.h:186
Latch procLatch
Definition: proc.h:170
TupleDesc setDesc
Definition: execnodes.h:332
Tuplestorestate * setResult
Definition: execnodes.h:331
Definition: dsa.c:367
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:112
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:993
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull)
Definition: tuplestore.c:750
static Datum TimestampTzGetDatum(TimestampTz X)
Definition: timestamp.h:52
@ WAIT_EVENT_LOGICAL_LAUNCHER_MAIN
Definition: wait_event.h:44
@ WAIT_EVENT_BGWORKER_STARTUP
Definition: wait_event.h:88
@ WAIT_EVENT_BGWORKER_SHUTDOWN
Definition: wait_event.h:87
int wal_receiver_timeout
Definition: walreceiver.c:91
#define walrcv_disconnect(conn)
Definition: walreceiver.h:436
#define SIGHUP
Definition: win32_port.h:176
#define kill(pid, sig)
Definition: win32_port.h:489
#define SIGUSR1
Definition: win32_port.h:188
#define isParallelApplyWorker(worker)
static bool am_leader_apply_worker(void)
void StartTransactionCommand(void)
Definition: xact.c:2944
void CommitTransactionCommand(void)
Definition: xact.c:3041
int wal_retrieve_retry_interval
Definition: xlog.c:137
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28