PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
launcher.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 * launcher.c
3 * PostgreSQL logical replication worker launcher process
4 *
5 * Copyright (c) 2016-2024, PostgreSQL Global Development Group
6 *
7 * IDENTIFICATION
8 * src/backend/replication/logical/launcher.c
9 *
10 * NOTES
11 * This module contains the logical replication worker launcher which
12 * uses the background worker infrastructure to start the logical
13 * replication workers for every enabled subscription.
14 *
15 *-------------------------------------------------------------------------
16 */
17
18#include "postgres.h"
19
20#include "access/heapam.h"
21#include "access/htup.h"
22#include "access/htup_details.h"
23#include "access/tableam.h"
24#include "access/xact.h"
27#include "funcapi.h"
28#include "lib/dshash.h"
29#include "miscadmin.h"
30#include "pgstat.h"
31#include "postmaster/bgworker.h"
34#include "replication/slot.h"
37#include "storage/ipc.h"
38#include "storage/proc.h"
39#include "storage/procarray.h"
40#include "tcop/tcopprot.h"
41#include "utils/builtins.h"
42#include "utils/memutils.h"
43#include "utils/pg_lsn.h"
44#include "utils/snapmgr.h"
45
46/* max sleep time between cycles (3min) */
47#define DEFAULT_NAPTIME_PER_CYCLE 180000L
48
49/* GUC variables */
53
55
56typedef struct LogicalRepCtxStruct
57{
58 /* Supervisor process. */
60
61 /* Hash table holding last start times of subscriptions' apply workers. */
64
65 /* Background workers. */
68
70
71/* an entry in the last-start-times shared hash table */
73{
74 Oid subid; /* OID of logrep subscription (hash key) */
75 TimestampTz last_start_time; /* last time its apply worker was started */
77
78/* parameters for the last-start-times shared hash table */
80 sizeof(Oid),
86};
87
90
91static bool on_commit_launcher_wakeup = false;
92
93
94static void ApplyLauncherWakeup(void);
95static void logicalrep_launcher_onexit(int code, Datum arg);
96static void logicalrep_worker_onexit(int code, Datum arg);
97static void logicalrep_worker_detach(void);
99static int logicalrep_pa_worker_count(Oid subid);
100static void logicalrep_launcher_attach_dshmem(void);
103
104
105/*
106 * Load the list of subscriptions.
107 *
108 * Only the fields interesting for worker start/stop functions are filled for
109 * each subscription.
110 */
111static List *
113{
114 List *res = NIL;
115 Relation rel;
116 TableScanDesc scan;
117 HeapTuple tup;
118 MemoryContext resultcxt;
119
120 /* This is the context that we will allocate our output data in */
121 resultcxt = CurrentMemoryContext;
122
123 /*
124 * Start a transaction so we can access pg_subscription.
125 */
127
128 rel = table_open(SubscriptionRelationId, AccessShareLock);
129 scan = table_beginscan_catalog(rel, 0, NULL);
130
132 {
134 Subscription *sub;
135 MemoryContext oldcxt;
136
137 /*
138 * Allocate our results in the caller's context, not the
139 * transaction's. We do this inside the loop, and restore the original
140 * context at the end, so that leaky things like heap_getnext() are
141 * not called in a potentially long-lived context.
142 */
143 oldcxt = MemoryContextSwitchTo(resultcxt);
144
145 sub = (Subscription *) palloc0(sizeof(Subscription));
146 sub->oid = subform->oid;
147 sub->dbid = subform->subdbid;
148 sub->owner = subform->subowner;
149 sub->enabled = subform->subenabled;
150 sub->name = pstrdup(NameStr(subform->subname));
151 /* We don't fill fields we are not interested in. */
152
153 res = lappend(res, sub);
154 MemoryContextSwitchTo(oldcxt);
155 }
156
157 table_endscan(scan);
159
161
162 return res;
163}
164
165/*
166 * Wait for a background worker to start up and attach to the shmem context.
167 *
168 * This is only needed for cleaning up the shared memory in case the worker
169 * fails to attach.
170 *
171 * Returns whether the attach was successful.
172 */
173static bool
175 uint16 generation,
177{
178 BgwHandleStatus status;
179 int rc;
180
181 for (;;)
182 {
183 pid_t pid;
184
186
187 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
188
189 /* Worker either died or has started. Return false if died. */
190 if (!worker->in_use || worker->proc)
191 {
192 LWLockRelease(LogicalRepWorkerLock);
193 return worker->in_use;
194 }
195
196 LWLockRelease(LogicalRepWorkerLock);
197
198 /* Check if worker has died before attaching, and clean up after it. */
199 status = GetBackgroundWorkerPid(handle, &pid);
200
201 if (status == BGWH_STOPPED)
202 {
203 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
204 /* Ensure that this was indeed the worker we waited for. */
205 if (generation == worker->generation)
207 LWLockRelease(LogicalRepWorkerLock);
208 return false;
209 }
210
211 /*
212 * We need timeout because we generally don't get notified via latch
213 * about the worker attach. But we don't expect to have to wait long.
214 */
215 rc = WaitLatch(MyLatch,
217 10L, WAIT_EVENT_BGWORKER_STARTUP);
218
219 if (rc & WL_LATCH_SET)
220 {
223 }
224 }
225}
226
227/*
228 * Walks the workers array and searches for one that matches given
229 * subscription id and relid.
230 *
231 * We are only interested in the leader apply worker or table sync worker.
232 */
234logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
235{
236 int i;
237 LogicalRepWorker *res = NULL;
238
239 Assert(LWLockHeldByMe(LogicalRepWorkerLock));
240
241 /* Search for attached worker for a given subscription id. */
242 for (i = 0; i < max_logical_replication_workers; i++)
243 {
245
246 /* Skip parallel apply workers. */
248 continue;
249
250 if (w->in_use && w->subid == subid && w->relid == relid &&
251 (!only_running || w->proc))
252 {
253 res = w;
254 break;
255 }
256 }
257
258 return res;
259}
260
261/*
262 * Similar to logicalrep_worker_find(), but returns a list of all workers for
263 * the subscription, instead of just one.
264 */
265List *
266logicalrep_workers_find(Oid subid, bool only_running, bool acquire_lock)
267{
268 int i;
269 List *res = NIL;
270
271 if (acquire_lock)
272 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
273
274 Assert(LWLockHeldByMe(LogicalRepWorkerLock));
275
276 /* Search for attached worker for a given subscription id. */
277 for (i = 0; i < max_logical_replication_workers; i++)
278 {
280
281 if (w->in_use && w->subid == subid && (!only_running || w->proc))
282 res = lappend(res, w);
283 }
284
285 if (acquire_lock)
286 LWLockRelease(LogicalRepWorkerLock);
287
288 return res;
289}
290
291/*
292 * Start new logical replication background worker, if possible.
293 *
294 * Returns true on success, false on failure.
295 */
296bool
298 Oid dbid, Oid subid, const char *subname, Oid userid,
299 Oid relid, dsm_handle subworker_dsm)
300{
302 BackgroundWorkerHandle *bgw_handle;
303 uint16 generation;
304 int i;
305 int slot = 0;
306 LogicalRepWorker *worker = NULL;
307 int nsyncworkers;
308 int nparallelapplyworkers;
310 bool is_tablesync_worker = (wtype == WORKERTYPE_TABLESYNC);
311 bool is_parallel_apply_worker = (wtype == WORKERTYPE_PARALLEL_APPLY);
312
313 /*----------
314 * Sanity checks:
315 * - must be valid worker type
316 * - tablesync workers are only ones to have relid
317 * - parallel apply worker is the only kind of subworker
318 */
319 Assert(wtype != WORKERTYPE_UNKNOWN);
320 Assert(is_tablesync_worker == OidIsValid(relid));
321 Assert(is_parallel_apply_worker == (subworker_dsm != DSM_HANDLE_INVALID));
322
324 (errmsg_internal("starting logical replication worker for subscription \"%s\"",
325 subname)));
326
327 /* Report this after the initial starting message for consistency. */
328 if (max_replication_slots == 0)
330 (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
331 errmsg("cannot start logical replication workers when \"max_replication_slots\"=0")));
332
333 /*
334 * We need to do the modification of the shared memory under lock so that
335 * we have consistent view.
336 */
337 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
338
339retry:
340 /* Find unused worker slot. */
341 for (i = 0; i < max_logical_replication_workers; i++)
342 {
344
345 if (!w->in_use)
346 {
347 worker = w;
348 slot = i;
349 break;
350 }
351 }
352
353 nsyncworkers = logicalrep_sync_worker_count(subid);
354
356
357 /*
358 * If we didn't find a free slot, try to do garbage collection. The
359 * reason we do this is because if some worker failed to start up and its
360 * parent has crashed while waiting, the in_use state was never cleared.
361 */
362 if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
363 {
364 bool did_cleanup = false;
365
366 for (i = 0; i < max_logical_replication_workers; i++)
367 {
369
370 /*
371 * If the worker was marked in use but didn't manage to attach in
372 * time, clean it up.
373 */
374 if (w->in_use && !w->proc &&
377 {
379 "logical replication worker for subscription %u took too long to start; canceled",
380 w->subid);
381
383 did_cleanup = true;
384 }
385 }
386
387 if (did_cleanup)
388 goto retry;
389 }
390
391 /*
392 * We don't allow to invoke more sync workers once we have reached the
393 * sync worker limit per subscription. So, just return silently as we
394 * might get here because of an otherwise harmless race condition.
395 */
396 if (is_tablesync_worker && nsyncworkers >= max_sync_workers_per_subscription)
397 {
398 LWLockRelease(LogicalRepWorkerLock);
399 return false;
400 }
401
402 nparallelapplyworkers = logicalrep_pa_worker_count(subid);
403
404 /*
405 * Return false if the number of parallel apply workers reached the limit
406 * per subscription.
407 */
408 if (is_parallel_apply_worker &&
409 nparallelapplyworkers >= max_parallel_apply_workers_per_subscription)
410 {
411 LWLockRelease(LogicalRepWorkerLock);
412 return false;
413 }
414
415 /*
416 * However if there are no more free worker slots, inform user about it
417 * before exiting.
418 */
419 if (worker == NULL)
420 {
421 LWLockRelease(LogicalRepWorkerLock);
423 (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
424 errmsg("out of logical replication worker slots"),
425 errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
426 return false;
427 }
428
429 /* Prepare the worker slot. */
430 worker->type = wtype;
431 worker->launch_time = now;
432 worker->in_use = true;
433 worker->generation++;
434 worker->proc = NULL;
435 worker->dbid = dbid;
436 worker->userid = userid;
437 worker->subid = subid;
438 worker->relid = relid;
439 worker->relstate = SUBREL_STATE_UNKNOWN;
441 worker->stream_fileset = NULL;
442 worker->leader_pid = is_parallel_apply_worker ? MyProcPid : InvalidPid;
443 worker->parallel_apply = is_parallel_apply_worker;
444 worker->last_lsn = InvalidXLogRecPtr;
449
450 /* Before releasing lock, remember generation for future identification. */
451 generation = worker->generation;
452
453 LWLockRelease(LogicalRepWorkerLock);
454
455 /* Register the new dynamic worker. */
456 memset(&bgw, 0, sizeof(bgw));
460 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
461
462 switch (worker->type)
463 {
464 case WORKERTYPE_APPLY:
465 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
467 "logical replication apply worker for subscription %u",
468 subid);
469 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication apply worker");
470 break;
471
473 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ParallelApplyWorkerMain");
475 "logical replication parallel apply worker for subscription %u",
476 subid);
477 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication parallel worker");
478
479 memcpy(bgw.bgw_extra, &subworker_dsm, sizeof(dsm_handle));
480 break;
481
483 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "TablesyncWorkerMain");
485 "logical replication tablesync worker for subscription %u sync %u",
486 subid,
487 relid);
488 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication tablesync worker");
489 break;
490
492 /* Should never happen. */
493 elog(ERROR, "unknown worker type");
494 }
495
498 bgw.bgw_main_arg = Int32GetDatum(slot);
499
500 if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle))
501 {
502 /* Failed to start worker, so clean up the worker slot. */
503 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
504 Assert(generation == worker->generation);
506 LWLockRelease(LogicalRepWorkerLock);
507
509 (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
510 errmsg("out of background worker slots"),
511 errhint("You might need to increase \"%s\".", "max_worker_processes")));
512 return false;
513 }
514
515 /* Now wait until it attaches. */
516 return WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
517}
518
519/*
520 * Internal function to stop the worker and wait until it detaches from the
521 * slot.
522 */
523static void
525{
526 uint16 generation;
527
528 Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_SHARED));
529
530 /*
531 * Remember which generation was our worker so we can check if what we see
532 * is still the same one.
533 */
534 generation = worker->generation;
535
536 /*
537 * If we found a worker but it does not have proc set then it is still
538 * starting up; wait for it to finish starting and then kill it.
539 */
540 while (worker->in_use && !worker->proc)
541 {
542 int rc;
543
544 LWLockRelease(LogicalRepWorkerLock);
545
546 /* Wait a bit --- we don't expect to have to wait long. */
547 rc = WaitLatch(MyLatch,
549 10L, WAIT_EVENT_BGWORKER_STARTUP);
550
551 if (rc & WL_LATCH_SET)
552 {
555 }
556
557 /* Recheck worker status. */
558 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
559
560 /*
561 * Check whether the worker slot is no longer used, which would mean
562 * that the worker has exited, or whether the worker generation is
563 * different, meaning that a different worker has taken the slot.
564 */
565 if (!worker->in_use || worker->generation != generation)
566 return;
567
568 /* Worker has assigned proc, so it has started. */
569 if (worker->proc)
570 break;
571 }
572
573 /* Now terminate the worker ... */
574 kill(worker->proc->pid, signo);
575
576 /* ... and wait for it to die. */
577 for (;;)
578 {
579 int rc;
580
581 /* is it gone? */
582 if (!worker->proc || worker->generation != generation)
583 break;
584
585 LWLockRelease(LogicalRepWorkerLock);
586
587 /* Wait a bit --- we don't expect to have to wait long. */
588 rc = WaitLatch(MyLatch,
590 10L, WAIT_EVENT_BGWORKER_SHUTDOWN);
591
592 if (rc & WL_LATCH_SET)
593 {
596 }
597
598 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
599 }
600}
601
602/*
603 * Stop the logical replication worker for subid/relid, if any.
604 */
605void
607{
608 LogicalRepWorker *worker;
609
610 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
611
612 worker = logicalrep_worker_find(subid, relid, false);
613
614 if (worker)
615 {
617 logicalrep_worker_stop_internal(worker, SIGTERM);
618 }
619
620 LWLockRelease(LogicalRepWorkerLock);
621}
622
623/*
624 * Stop the given logical replication parallel apply worker.
625 *
626 * Node that the function sends SIGINT instead of SIGTERM to the parallel apply
627 * worker so that the worker exits cleanly.
628 */
629void
631{
632 int slot_no;
633 uint16 generation;
634 LogicalRepWorker *worker;
635
636 SpinLockAcquire(&winfo->shared->mutex);
637 generation = winfo->shared->logicalrep_worker_generation;
638 slot_no = winfo->shared->logicalrep_worker_slot_no;
639 SpinLockRelease(&winfo->shared->mutex);
640
641 Assert(slot_no >= 0 && slot_no < max_logical_replication_workers);
642
643 /*
644 * Detach from the error_mq_handle for the parallel apply worker before
645 * stopping it. This prevents the leader apply worker from trying to
646 * receive the message from the error queue that might already be detached
647 * by the parallel apply worker.
648 */
649 if (winfo->error_mq_handle)
650 {
652 winfo->error_mq_handle = NULL;
653 }
654
655 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
656
657 worker = &LogicalRepCtx->workers[slot_no];
659
660 /*
661 * Only stop the worker if the generation matches and the worker is alive.
662 */
663 if (worker->generation == generation && worker->proc)
664 logicalrep_worker_stop_internal(worker, SIGINT);
665
666 LWLockRelease(LogicalRepWorkerLock);
667}
668
669/*
670 * Wake up (using latch) any logical replication worker for specified sub/rel.
671 */
672void
674{
675 LogicalRepWorker *worker;
676
677 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
678
679 worker = logicalrep_worker_find(subid, relid, true);
680
681 if (worker)
683
684 LWLockRelease(LogicalRepWorkerLock);
685}
686
687/*
688 * Wake up (using latch) the specified logical replication worker.
689 *
690 * Caller must hold lock, else worker->proc could change under us.
691 */
692void
694{
695 Assert(LWLockHeldByMe(LogicalRepWorkerLock));
696
697 SetLatch(&worker->proc->procLatch);
698}
699
700/*
701 * Attach to a slot.
702 */
703void
705{
706 /* Block concurrent access. */
707 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
708
709 Assert(slot >= 0 && slot < max_logical_replication_workers);
711
713 {
714 LWLockRelease(LogicalRepWorkerLock);
716 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
717 errmsg("logical replication worker slot %d is empty, cannot attach",
718 slot)));
719 }
720
722 {
723 LWLockRelease(LogicalRepWorkerLock);
725 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
726 errmsg("logical replication worker slot %d is already used by "
727 "another worker, cannot attach", slot)));
728 }
729
732
733 LWLockRelease(LogicalRepWorkerLock);
734}
735
736/*
737 * Stop the parallel apply workers if any, and detach the leader apply worker
738 * (cleans up the worker info).
739 */
740static void
742{
743 /* Stop the parallel apply workers. */
745 {
746 List *workers;
747 ListCell *lc;
748
749 /*
750 * Detach from the error_mq_handle for all parallel apply workers
751 * before terminating them. This prevents the leader apply worker from
752 * receiving the worker termination message and sending it to logs
753 * when the same is already done by the parallel worker.
754 */
756
757 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
758
759 workers = logicalrep_workers_find(MyLogicalRepWorker->subid, true, false);
760 foreach(lc, workers)
761 {
763
766 }
767
768 LWLockRelease(LogicalRepWorkerLock);
769 }
770
771 /* Block concurrent access. */
772 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
773
775
776 LWLockRelease(LogicalRepWorkerLock);
777}
778
779/*
780 * Clean up worker info.
781 */
782static void
784{
785 Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_EXCLUSIVE));
786
787 worker->type = WORKERTYPE_UNKNOWN;
788 worker->in_use = false;
789 worker->proc = NULL;
790 worker->dbid = InvalidOid;
791 worker->userid = InvalidOid;
792 worker->subid = InvalidOid;
793 worker->relid = InvalidOid;
794 worker->leader_pid = InvalidPid;
795 worker->parallel_apply = false;
796}
797
798/*
799 * Cleanup function for logical replication launcher.
800 *
801 * Called on logical replication launcher exit.
802 */
803static void
805{
807}
808
809/*
810 * Cleanup function.
811 *
812 * Called on logical replication worker exit.
813 */
814static void
816{
817 /* Disconnect gracefully from the remote side. */
820
822
823 /* Cleanup fileset used for streaming transactions. */
826
827 /*
828 * Session level locks may be acquired outside of a transaction in
829 * parallel apply mode and will not be released when the worker
830 * terminates, so manually release all locks before the worker exits.
831 *
832 * The locks will be acquired once the worker is initialized.
833 */
836
838}
839
840/*
841 * Count the number of registered (not necessarily running) sync workers
842 * for a subscription.
843 */
844int
846{
847 int i;
848 int res = 0;
849
850 Assert(LWLockHeldByMe(LogicalRepWorkerLock));
851
852 /* Search for attached worker for a given subscription id. */
853 for (i = 0; i < max_logical_replication_workers; i++)
854 {
856
857 if (isTablesyncWorker(w) && w->subid == subid)
858 res++;
859 }
860
861 return res;
862}
863
864/*
865 * Count the number of registered (but not necessarily running) parallel apply
866 * workers for a subscription.
867 */
868static int
870{
871 int i;
872 int res = 0;
873
874 Assert(LWLockHeldByMe(LogicalRepWorkerLock));
875
876 /*
877 * Scan all attached parallel apply workers, only counting those which
878 * have the given subscription id.
879 */
880 for (i = 0; i < max_logical_replication_workers; i++)
881 {
883
884 if (isParallelApplyWorker(w) && w->subid == subid)
885 res++;
886 }
887
888 return res;
889}
890
891/*
892 * ApplyLauncherShmemSize
893 * Compute space needed for replication launcher shared memory
894 */
895Size
897{
898 Size size;
899
900 /*
901 * Need the fixed struct and the array of LogicalRepWorker.
902 */
903 size = sizeof(LogicalRepCtxStruct);
904 size = MAXALIGN(size);
906 sizeof(LogicalRepWorker)));
907 return size;
908}
909
910/*
911 * ApplyLauncherRegister
912 * Register a background worker running the logical replication launcher.
913 */
914void
916{
918
919 /*
920 * The logical replication launcher is disabled during binary upgrades, to
921 * prevent logical replication workers from running on the source cluster.
922 * That could cause replication origins to move forward after having been
923 * copied to the target cluster, potentially creating conflicts with the
924 * copied data files.
925 */
927 return;
928
929 memset(&bgw, 0, sizeof(bgw));
933 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
934 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
936 "logical replication launcher");
938 "logical replication launcher");
939 bgw.bgw_restart_time = 5;
940 bgw.bgw_notify_pid = 0;
941 bgw.bgw_main_arg = (Datum) 0;
942
944}
945
946/*
947 * ApplyLauncherShmemInit
948 * Allocate and initialize replication launcher shared memory
949 */
950void
952{
953 bool found;
954
956 ShmemInitStruct("Logical Replication Launcher Data",
958 &found);
959
960 if (!found)
961 {
962 int slot;
963
965
968
969 /* Initialize memory and spin locks for each worker slot. */
970 for (slot = 0; slot < max_logical_replication_workers; slot++)
971 {
972 LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
973
974 memset(worker, 0, sizeof(LogicalRepWorker));
975 SpinLockInit(&worker->relmutex);
976 }
977 }
978}
979
980/*
981 * Initialize or attach to the dynamic shared hash table that stores the
982 * last-start times, if not already done.
983 * This must be called before accessing the table.
984 */
985static void
987{
988 MemoryContext oldcontext;
989
990 /* Quick exit if we already did this. */
992 last_start_times != NULL)
993 return;
994
995 /* Otherwise, use a lock to ensure only one process creates the table. */
996 LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
997
998 /* Be sure any local memory allocated by DSA routines is persistent. */
1000
1002 {
1003 /* Initialize dynamic shared hash table for last-start times. */
1008
1009 /* Store handles in shared memory for other backends to use. */
1012 }
1013 else if (!last_start_times)
1014 {
1015 /* Attach to existing dynamic shared hash table. */
1020 }
1021
1022 MemoryContextSwitchTo(oldcontext);
1023 LWLockRelease(LogicalRepWorkerLock);
1024}
1025
1026/*
1027 * Set the last-start time for the subscription.
1028 */
1029static void
1031{
1033 bool found;
1034
1036
1037 entry = dshash_find_or_insert(last_start_times, &subid, &found);
1038 entry->last_start_time = start_time;
1040}
1041
1042/*
1043 * Return the last-start time for the subscription, or 0 if there isn't one.
1044 */
1045static TimestampTz
1047{
1049 TimestampTz ret;
1050
1052
1053 entry = dshash_find(last_start_times, &subid, false);
1054 if (entry == NULL)
1055 return 0;
1056
1057 ret = entry->last_start_time;
1059
1060 return ret;
1061}
1062
1063/*
1064 * Remove the last-start-time entry for the subscription, if one exists.
1065 *
1066 * This has two use-cases: to remove the entry related to a subscription
1067 * that's been deleted or disabled (just to avoid leaking shared memory),
1068 * and to allow immediate restart of an apply worker that has exited
1069 * due to subscription parameter changes.
1070 */
1071void
1073{
1075
1076 (void) dshash_delete_key(last_start_times, &subid);
1077}
1078
1079/*
1080 * Wakeup the launcher on commit if requested.
1081 */
1082void
1084{
1085 if (isCommit)
1086 {
1089 }
1090
1092}
1093
1094/*
1095 * Request wakeup of the launcher on commit of the transaction.
1096 *
1097 * This is used to send launcher signal to stop sleeping and process the
1098 * subscriptions when current transaction commits. Should be used when new
1099 * tuple was added to the pg_subscription catalog.
1100*/
1101void
1103{
1106}
1107
1108static void
1110{
1111 if (LogicalRepCtx->launcher_pid != 0)
1113}
1114
1115/*
1116 * Main loop for the apply launcher process.
1117 */
1118void
1120{
1122 (errmsg_internal("logical replication launcher started")));
1123
1125
1128
1129 /* Establish signal handlers. */
1131 pqsignal(SIGTERM, die);
1133
1134 /*
1135 * Establish connection to nailed catalogs (we only ever access
1136 * pg_subscription).
1137 */
1139
1140 /* Enter main loop */
1141 for (;;)
1142 {
1143 int rc;
1144 List *sublist;
1145 ListCell *lc;
1146 MemoryContext subctx;
1147 MemoryContext oldctx;
1148 long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
1149
1151
1152 /* Use temporary context to avoid leaking memory across cycles. */
1154 "Logical Replication Launcher sublist",
1156 oldctx = MemoryContextSwitchTo(subctx);
1157
1158 /* Start any missing workers for enabled subscriptions. */
1159 sublist = get_subscription_list();
1160 foreach(lc, sublist)
1161 {
1162 Subscription *sub = (Subscription *) lfirst(lc);
1164 TimestampTz last_start;
1166 long elapsed;
1167
1168 if (!sub->enabled)
1169 continue;
1170
1171 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1172 w = logicalrep_worker_find(sub->oid, InvalidOid, false);
1173 LWLockRelease(LogicalRepWorkerLock);
1174
1175 if (w != NULL)
1176 continue; /* worker is running already */
1177
1178 /*
1179 * If the worker is eligible to start now, launch it. Otherwise,
1180 * adjust wait_time so that we'll wake up as soon as it can be
1181 * started.
1182 *
1183 * Each subscription's apply worker can only be restarted once per
1184 * wal_retrieve_retry_interval, so that errors do not cause us to
1185 * repeatedly restart the worker as fast as possible. In cases
1186 * where a restart is expected (e.g., subscription parameter
1187 * changes), another process should remove the last-start entry
1188 * for the subscription so that the worker can be restarted
1189 * without waiting for wal_retrieve_retry_interval to elapse.
1190 */
1191 last_start = ApplyLauncherGetWorkerStartTime(sub->oid);
1193 if (last_start == 0 ||
1195 {
1198 sub->dbid, sub->oid, sub->name,
1199 sub->owner, InvalidOid,
1201 }
1202 else
1203 {
1204 wait_time = Min(wait_time,
1205 wal_retrieve_retry_interval - elapsed);
1206 }
1207 }
1208
1209 /* Switch back to original memory context. */
1210 MemoryContextSwitchTo(oldctx);
1211 /* Clean the temporary memory. */
1212 MemoryContextDelete(subctx);
1213
1214 /* Wait for more work. */
1215 rc = WaitLatch(MyLatch,
1217 wait_time,
1218 WAIT_EVENT_LOGICAL_LAUNCHER_MAIN);
1219
1220 if (rc & WL_LATCH_SET)
1221 {
1224 }
1225
1227 {
1228 ConfigReloadPending = false;
1230 }
1231 }
1232
1233 /* Not reachable */
1234}
1235
1236/*
1237 * Is current process the logical replication launcher?
1238 */
1239bool
1241{
1243}
1244
1245/*
1246 * Return the pid of the leader apply worker if the given pid is the pid of a
1247 * parallel apply worker, otherwise, return InvalidPid.
1248 */
1249pid_t
1251{
1252 int leader_pid = InvalidPid;
1253 int i;
1254
1255 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1256
1257 for (i = 0; i < max_logical_replication_workers; i++)
1258 {
1260
1261 if (isParallelApplyWorker(w) && w->proc && pid == w->proc->pid)
1262 {
1263 leader_pid = w->leader_pid;
1264 break;
1265 }
1266 }
1267
1268 LWLockRelease(LogicalRepWorkerLock);
1269
1270 return leader_pid;
1271}
1272
1273/*
1274 * Returns state of the subscriptions.
1275 */
1276Datum
1278{
1279#define PG_STAT_GET_SUBSCRIPTION_COLS 10
1280 Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
1281 int i;
1282 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1283
1284 InitMaterializedSRF(fcinfo, 0);
1285
1286 /* Make sure we get consistent view of the workers. */
1287 LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1288
1289 for (i = 0; i < max_logical_replication_workers; i++)
1290 {
1291 /* for each row */
1293 bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS] = {0};
1294 int worker_pid;
1295 LogicalRepWorker worker;
1296
1297 memcpy(&worker, &LogicalRepCtx->workers[i],
1298 sizeof(LogicalRepWorker));
1299 if (!worker.proc || !IsBackendPid(worker.proc->pid))
1300 continue;
1301
1302 if (OidIsValid(subid) && worker.subid != subid)
1303 continue;
1304
1305 worker_pid = worker.proc->pid;
1306
1307 values[0] = ObjectIdGetDatum(worker.subid);
1308 if (isTablesyncWorker(&worker))
1309 values[1] = ObjectIdGetDatum(worker.relid);
1310 else
1311 nulls[1] = true;
1312 values[2] = Int32GetDatum(worker_pid);
1313
1314 if (isParallelApplyWorker(&worker))
1315 values[3] = Int32GetDatum(worker.leader_pid);
1316 else
1317 nulls[3] = true;
1318
1319 if (XLogRecPtrIsInvalid(worker.last_lsn))
1320 nulls[4] = true;
1321 else
1322 values[4] = LSNGetDatum(worker.last_lsn);
1323 if (worker.last_send_time == 0)
1324 nulls[5] = true;
1325 else
1327 if (worker.last_recv_time == 0)
1328 nulls[6] = true;
1329 else
1331 if (XLogRecPtrIsInvalid(worker.reply_lsn))
1332 nulls[7] = true;
1333 else
1334 values[7] = LSNGetDatum(worker.reply_lsn);
1335 if (worker.reply_time == 0)
1336 nulls[8] = true;
1337 else
1339
1340 switch (worker.type)
1341 {
1342 case WORKERTYPE_APPLY:
1343 values[9] = CStringGetTextDatum("apply");
1344 break;
1346 values[9] = CStringGetTextDatum("parallel apply");
1347 break;
1349 values[9] = CStringGetTextDatum("table synchronization");
1350 break;
1351 case WORKERTYPE_UNKNOWN:
1352 /* Should never happen. */
1353 elog(ERROR, "unknown worker type");
1354 }
1355
1356 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
1357 values, nulls);
1358
1359 /*
1360 * If only a single subscription was requested, and we found it,
1361 * break.
1362 */
1363 if (OidIsValid(subid))
1364 break;
1365 }
1366
1367 LWLockRelease(LogicalRepWorkerLock);
1368
1369 return (Datum) 0;
1370}
void pa_detach_all_error_mq(void)
bool InitializingApplyWorker
Definition: worker.c:319
WalReceiverConn * LogRepWorkerWalRcvConn
Definition: worker.c:297
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition: timestamp.c:1756
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1608
void RegisterBackgroundWorker(BackgroundWorker *worker)
Definition: bgworker.c:939
void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags)
Definition: bgworker.c:852
void BackgroundWorkerUnblockSignals(void)
Definition: bgworker.c:926
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1157
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:1045
#define BGW_NEVER_RESTART
Definition: bgworker.h:85
BgwHandleStatus
Definition: bgworker.h:104
@ BGWH_STOPPED
Definition: bgworker.h:107
@ BgWorkerStart_RecoveryFinished
Definition: bgworker.h:81
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:60
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:53
#define BGW_MAXLEN
Definition: bgworker.h:86
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define CStringGetTextDatum(s)
Definition: builtins.h:97
#define NameStr(name)
Definition: c.h:700
#define Min(x, y)
Definition: c.h:958
#define MAXALIGN(LEN)
Definition: c.h:765
#define Assert(condition)
Definition: c.h:812
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:417
uint16_t uint16
Definition: c.h:484
#define OidIsValid(objectId)
Definition: c.h:729
size_t Size
Definition: c.h:559
int64 TimestampTz
Definition: timestamp.h:39
#define TIMESTAMP_NOBEGIN(j)
Definition: timestamp.h:159
dsa_area * dsa_attach(dsa_handle handle)
Definition: dsa.c:510
void dsa_pin_mapping(dsa_area *area)
Definition: dsa.c:635
dsa_handle dsa_get_handle(dsa_area *area)
Definition: dsa.c:498
void dsa_pin(dsa_area *area)
Definition: dsa.c:975
dsm_handle dsa_handle
Definition: dsa.h:136
#define DSA_HANDLE_INVALID
Definition: dsa.h:139
#define dsa_create(tranch_id)
Definition: dsa.h:117
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
#define DSHASH_HANDLE_INVALID
Definition: dshash.h:27
dsa_pointer dshash_table_handle
Definition: dshash.h:24
uint32 dsm_handle
Definition: dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define WARNING
Definition: elog.h:36
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
void FileSetDeleteAll(FileSet *fileset)
Definition: fileset.c:150
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define PG_ARGISNULL(n)
Definition: fmgr.h:209
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition: funcapi.c:76
bool IsBinaryUpgrade
Definition: globals.c:120
int MyProcPid
Definition: globals.c:46
struct Latch * MyLatch
Definition: globals.c:62
void ProcessConfigFile(GucContext context)
Definition: guc-file.l:120
@ PGC_SIGHUP
Definition: guc.h:71
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1246
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:653
volatile sig_atomic_t ConfigReloadPending
Definition: interrupt.c:27
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition: interrupt.c:61
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:337
int i
Definition: isn.c:72
void SetLatch(Latch *latch)
Definition: latch.c:632
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_TIMEOUT
Definition: latch.h:130
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
Datum pg_stat_get_subscription(PG_FUNCTION_ARGS)
Definition: launcher.c:1277
bool logicalrep_worker_launch(LogicalRepWorkerType wtype, Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid, dsm_handle subworker_dsm)
Definition: launcher.c:297
#define DEFAULT_NAPTIME_PER_CYCLE
Definition: launcher.c:47
List * logicalrep_workers_find(Oid subid, bool only_running, bool acquire_lock)
Definition: launcher.c:266
void AtEOXact_ApplyLauncher(bool isCommit)
Definition: launcher.c:1083
void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
Definition: launcher.c:693
Size ApplyLauncherShmemSize(void)
Definition: launcher.c:896
bool IsLogicalLauncher(void)
Definition: launcher.c:1240
void logicalrep_worker_attach(int slot)
Definition: launcher.c:704
static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time)
Definition: launcher.c:1030
static void logicalrep_launcher_onexit(int code, Datum arg)
Definition: launcher.c:804
static dsa_area * last_start_times_dsa
Definition: launcher.c:88
void ApplyLauncherMain(Datum main_arg)
Definition: launcher.c:1119
#define PG_STAT_GET_SUBSCRIPTION_COLS
int max_logical_replication_workers
Definition: launcher.c:50
void logicalrep_pa_worker_stop(ParallelApplyWorkerInfo *winfo)
Definition: launcher.c:630
static int logicalrep_pa_worker_count(Oid subid)
Definition: launcher.c:869
LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
Definition: launcher.c:234
static bool on_commit_launcher_wakeup
Definition: launcher.c:91
struct LogicalRepCtxStruct LogicalRepCtxStruct
static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid)
Definition: launcher.c:1046
void logicalrep_worker_wakeup(Oid subid, Oid relid)
Definition: launcher.c:673
void ApplyLauncherShmemInit(void)
Definition: launcher.c:951
static void logicalrep_worker_stop_internal(LogicalRepWorker *worker, int signo)
Definition: launcher.c:524
static dshash_table * last_start_times
Definition: launcher.c:89
void logicalrep_worker_stop(Oid subid, Oid relid)
Definition: launcher.c:606
LogicalRepWorker * MyLogicalRepWorker
Definition: launcher.c:54
void ApplyLauncherWakeupAtCommit(void)
Definition: launcher.c:1102
static const dshash_parameters dsh_params
Definition: launcher.c:79
static LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:69
static void logicalrep_worker_onexit(int code, Datum arg)
Definition: launcher.c:815
pid_t GetLeaderApplyWorkerPid(pid_t pid)
Definition: launcher.c:1250
int max_sync_workers_per_subscription
Definition: launcher.c:51
static void logicalrep_worker_detach(void)
Definition: launcher.c:741
static bool WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
Definition: launcher.c:174
int logicalrep_sync_worker_count(Oid subid)
Definition: launcher.c:845
void ApplyLauncherForgetWorkerStartTime(Oid subid)
Definition: launcher.c:1072
void ApplyLauncherRegister(void)
Definition: launcher.c:915
struct LauncherLastStartTimesEntry LauncherLastStartTimesEntry
static void ApplyLauncherWakeup(void)
Definition: launcher.c:1109
static void logicalrep_launcher_attach_dshmem(void)
Definition: launcher.c:986
static List * get_subscription_list(void)
Definition: launcher.c:112
int max_parallel_apply_workers_per_subscription
Definition: launcher.c:52
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition: launcher.c:783
List * lappend(List *list, void *datum)
Definition: list.c:339
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2216
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
#define AccessShareLock
Definition: lockdefs.h:36
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1893
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1937
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LWTRANCHE_LAUNCHER_HASH
Definition: lwlock.h:207
@ LWTRANCHE_LAUNCHER_DSA
Definition: lwlock.h:206
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext TopMemoryContext
Definition: mcxt.c:149
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define InvalidPid
Definition: miscadmin.h:32
void * arg
#define MAXPGPATH
static time_t start_time
Definition: pg_ctl.c:95
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
static Datum LSNGetDatum(XLogRecPtr X)
Definition: pg_lsn.h:28
NameData subname
FormData_pg_subscription * Form_pg_subscription
#define die(msg)
pqsigfunc pqsignal(int signo, pqsigfunc func)
#define snprintf
Definition: port.h:238
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:212
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
bool IsBackendPid(int pid)
Definition: procarray.c:3291
MemoryContextSwitchTo(old_ctx)
@ ForwardScanDirection
Definition: sdir.h:28
void shm_mq_detach(shm_mq_handle *mqh)
Definition: shm_mq.c:843
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:382
static pg_noinline void Size size
Definition: slab.c:607
int max_replication_slots
Definition: slot.c:141
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:66
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:97
Datum bgw_main_arg
Definition: bgworker.h:98
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:91
int bgw_restart_time
Definition: bgworker.h:95
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:92
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:94
char bgw_extra[BGW_EXTRALEN]
Definition: bgworker.h:99
pid_t bgw_notify_pid
Definition: bgworker.h:100
char bgw_library_name[MAXPGPATH]
Definition: bgworker.h:96
TimestampTz last_start_time
Definition: launcher.c:75
Definition: pg_list.h:54
dsa_handle last_start_dsa
Definition: launcher.c:62
dshash_table_handle last_start_dsh
Definition: launcher.c:63
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:66
XLogRecPtr relstate_lsn
TimestampTz last_recv_time
LogicalRepWorkerType type
TimestampTz launch_time
TimestampTz reply_time
FileSet * stream_fileset
XLogRecPtr reply_lsn
XLogRecPtr last_lsn
TimestampTz last_send_time
int pid
Definition: proc.h:182
Latch procLatch
Definition: proc.h:169
shm_mq_handle * error_mq_handle
ParallelApplyWorkerShared * shared
TupleDesc setDesc
Definition: execnodes.h:343
Tuplestorestate * setResult
Definition: execnodes.h:342
Definition: dsa.c:348
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:112
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:1024
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition: tuplestore.c:784
static Datum TimestampTzGetDatum(TimestampTz X)
Definition: timestamp.h:52
int wal_receiver_timeout
Definition: walreceiver.c:88
#define walrcv_disconnect(conn)
Definition: walreceiver.h:467
#define SIGHUP
Definition: win32_port.h:168
#define kill(pid, sig)
Definition: win32_port.h:503
#define SIGUSR1
Definition: win32_port.h:180
#define isParallelApplyWorker(worker)
LogicalRepWorkerType
@ WORKERTYPE_TABLESYNC
@ WORKERTYPE_UNKNOWN
@ WORKERTYPE_PARALLEL_APPLY
@ WORKERTYPE_APPLY
#define isTablesyncWorker(worker)
static bool am_leader_apply_worker(void)
void StartTransactionCommand(void)
Definition: xact.c:3051
void CommitTransactionCommand(void)
Definition: xact.c:3149
int wal_retrieve_retry_interval
Definition: xlog.c:134
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28