PostgreSQL Source Code git master
Loading...
Searching...
No Matches
launcher.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 * launcher.c
3 * PostgreSQL logical replication worker launcher process
4 *
5 * Copyright (c) 2016-2026, PostgreSQL Global Development Group
6 *
7 * IDENTIFICATION
8 * src/backend/replication/logical/launcher.c
9 *
10 * NOTES
11 * This module contains the logical replication worker launcher which
12 * uses the background worker infrastructure to start the logical
13 * replication workers for every enabled subscription.
14 *
15 *-------------------------------------------------------------------------
16 */
17
18#include "postgres.h"
19
20#include "access/heapam.h"
21#include "access/htup.h"
22#include "access/htup_details.h"
23#include "access/tableam.h"
24#include "access/xact.h"
27#include "funcapi.h"
28#include "lib/dshash.h"
29#include "miscadmin.h"
30#include "pgstat.h"
31#include "postmaster/bgworker.h"
34#include "replication/origin.h"
35#include "replication/slot.h"
38#include "storage/ipc.h"
39#include "storage/proc.h"
40#include "storage/procarray.h"
41#include "tcop/tcopprot.h"
42#include "utils/builtins.h"
43#include "utils/memutils.h"
44#include "utils/pg_lsn.h"
45#include "utils/snapmgr.h"
46#include "utils/syscache.h"
47#include "utils/wait_event.h"
48
49/* max sleep time between cycles (3min) */
50#define DEFAULT_NAPTIME_PER_CYCLE 180000L
51
52/* GUC variables */
56
58
59typedef struct LogicalRepCtxStruct
60{
61 /* Supervisor process. */
63
64 /* Hash table holding last start times of subscriptions' apply workers. */
67
68 /* Background workers. */
71
73
74/* an entry in the last-start-times shared hash table */
76{
77 Oid subid; /* OID of logrep subscription (hash key) */
78 TimestampTz last_start_time; /* last time its apply worker was started */
80
81/* parameters for the last-start-times shared hash table */
90
93
94static bool on_commit_launcher_wakeup = false;
95
96
97static void logicalrep_launcher_onexit(int code, Datum arg);
98static void logicalrep_worker_onexit(int code, Datum arg);
99static void logicalrep_worker_detach(void);
101static int logicalrep_pa_worker_count(Oid subid);
102static void logicalrep_launcher_attach_dshmem(void);
106static bool acquire_conflict_slot_if_exists(void);
108static void init_conflict_slot_xmin(void);
109
110
111/*
112 * Load the list of subscriptions.
113 *
114 * Only the fields interesting for worker start/stop functions are filled for
115 * each subscription.
116 */
117static List *
119{
120 List *res = NIL;
121 Relation rel;
122 TableScanDesc scan;
125
126 /* This is the context that we will allocate our output data in */
128
129 /*
130 * Start a transaction so we can access pg_subscription.
131 */
133
135 scan = table_beginscan_catalog(rel, 0, NULL);
136
138 {
140 Subscription *sub;
142
143 /*
144 * Allocate our results in the caller's context, not the
145 * transaction's. We do this inside the loop, and restore the original
146 * context at the end, so that leaky things like heap_getnext() are
147 * not called in a potentially long-lived context.
148 */
150
152 sub->oid = subform->oid;
153 sub->dbid = subform->subdbid;
154 sub->owner = subform->subowner;
155 sub->enabled = subform->subenabled;
156 sub->name = pstrdup(NameStr(subform->subname));
157 sub->retaindeadtuples = subform->subretaindeadtuples;
158 sub->retentionactive = subform->subretentionactive;
159 /* We don't fill fields we are not interested in. */
160
161 res = lappend(res, sub);
163 }
164
165 table_endscan(scan);
167
169
170 return res;
171}
172
173/*
174 * Wait for a background worker to start up and attach to the shmem context.
175 *
176 * This is only needed for cleaning up the shared memory in case the worker
177 * fails to attach.
178 *
179 * Returns whether the attach was successful.
180 */
181static bool
183 uint16 generation,
185{
186 bool result = false;
187 bool dropped_latch = false;
188
189 for (;;)
190 {
191 BgwHandleStatus status;
192 pid_t pid;
193 int rc;
194
196
198
199 /* Worker either died or has started. Return false if died. */
200 if (!worker->in_use || worker->proc)
201 {
202 result = worker->in_use;
204 break;
205 }
206
208
209 /* Check if worker has died before attaching, and clean up after it. */
210 status = GetBackgroundWorkerPid(handle, &pid);
211
212 if (status == BGWH_STOPPED)
213 {
215 /* Ensure that this was indeed the worker we waited for. */
216 if (generation == worker->generation)
219 break; /* result is already false */
220 }
221
222 /*
223 * We need timeout because we generally don't get notified via latch
224 * about the worker attach. But we don't expect to have to wait long.
225 */
226 rc = WaitLatch(MyLatch,
229
230 if (rc & WL_LATCH_SET)
231 {
234 dropped_latch = true;
235 }
236 }
237
238 /*
239 * If we had to clear a latch event in order to wait, be sure to restore
240 * it before exiting. Otherwise caller may miss events.
241 */
242 if (dropped_latch)
244
245 return result;
246}
247
248/*
249 * Walks the workers array and searches for one that matches given worker type,
250 * subscription id, and relation id.
251 *
252 * For both apply workers and sequencesync workers, the relid should be set to
253 * InvalidOid, as these workers handle changes across all tables and sequences
254 * respectively, rather than targeting a specific relation. For tablesync
255 * workers, the relid should be set to the OID of the relation being
256 * synchronized.
257 */
260 bool only_running)
261{
262 int i;
263 LogicalRepWorker *res = NULL;
264
265 /* relid must be valid only for table sync workers */
268
269 /* Search for an attached worker that matches the specified criteria. */
270 for (i = 0; i < max_logical_replication_workers; i++)
271 {
273
274 /* Skip parallel apply workers. */
276 continue;
277
278 if (w->in_use && w->subid == subid && w->relid == relid &&
279 w->type == wtype && (!only_running || w->proc))
280 {
281 res = w;
282 break;
283 }
284 }
285
286 return res;
287}
288
289/*
290 * Similar to logicalrep_worker_find(), but returns a list of all workers for
291 * the subscription, instead of just one.
292 */
293List *
295{
296 int i;
297 List *res = NIL;
298
299 if (acquire_lock)
301
303
304 /* Search for attached worker for a given subscription id. */
305 for (i = 0; i < max_logical_replication_workers; i++)
306 {
308
309 if (w->in_use && w->subid == subid && (!only_running || w->proc))
310 res = lappend(res, w);
311 }
312
313 if (acquire_lock)
315
316 return res;
317}
318
319/*
320 * Start new logical replication background worker, if possible.
321 *
322 * Returns true on success, false on failure.
323 */
324bool
326 Oid dbid, Oid subid, const char *subname, Oid userid,
329{
332 uint16 generation;
333 int i;
334 int slot = 0;
335 LogicalRepWorker *worker = NULL;
336 int nsyncworkers;
342
343 /*----------
344 * Sanity checks:
345 * - must be valid worker type
346 * - tablesync workers are only ones to have relid
347 * - parallel apply worker is the only kind of subworker
348 * - The replication slot used in conflict detection is created when
349 * retain_dead_tuples is enabled
350 */
355
357 (errmsg_internal("starting logical replication worker for subscription \"%s\"",
358 subname)));
359
360 /* Report this after the initial starting message for consistency. */
364 errmsg("cannot start logical replication workers when \"max_active_replication_origins\" is 0")));
365
366 /*
367 * We need to do the modification of the shared memory under lock so that
368 * we have consistent view.
369 */
371
372retry:
373 /* Find unused worker slot. */
374 for (i = 0; i < max_logical_replication_workers; i++)
375 {
377
378 if (!w->in_use)
379 {
380 worker = w;
381 slot = i;
382 break;
383 }
384 }
385
387
389
390 /*
391 * If we didn't find a free slot, try to do garbage collection. The
392 * reason we do this is because if some worker failed to start up and its
393 * parent has crashed while waiting, the in_use state was never cleared.
394 */
396 {
397 bool did_cleanup = false;
398
399 for (i = 0; i < max_logical_replication_workers; i++)
400 {
402
403 /*
404 * If the worker was marked in use but didn't manage to attach in
405 * time, clean it up.
406 */
407 if (w->in_use && !w->proc &&
410 {
412 "logical replication worker for subscription %u took too long to start; canceled",
413 w->subid);
414
416 did_cleanup = true;
417 }
418 }
419
420 if (did_cleanup)
421 goto retry;
422 }
423
424 /*
425 * We don't allow to invoke more sync workers once we have reached the
426 * sync worker limit per subscription. So, just return silently as we
427 * might get here because of an otherwise harmless race condition.
428 */
431 {
433 return false;
434 }
435
437
438 /*
439 * Return false if the number of parallel apply workers reached the limit
440 * per subscription.
441 */
444 {
446 return false;
447 }
448
449 /*
450 * However if there are no more free worker slots, inform user about it
451 * before exiting.
452 */
453 if (worker == NULL)
454 {
458 errmsg("out of logical replication worker slots"),
459 errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
460 return false;
461 }
462
463 /* Prepare the worker slot. */
464 worker->type = wtype;
465 worker->launch_time = now;
466 worker->in_use = true;
467 worker->generation++;
468 worker->proc = NULL;
469 worker->dbid = dbid;
470 worker->userid = userid;
471 worker->subid = subid;
472 worker->relid = relid;
475 worker->stream_fileset = NULL;
481 worker->last_lsn = InvalidXLogRecPtr;
486 worker->last_seqsync_start_time = 0;
487
488 /* Before releasing lock, remember generation for future identification. */
489 generation = worker->generation;
490
492
493 /* Register the new dynamic worker. */
494 memset(&bgw, 0, sizeof(bgw));
495 bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
497 bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
498 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
499
500 switch (worker->type)
501 {
502 case WORKERTYPE_APPLY:
503 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
504 snprintf(bgw.bgw_name, BGW_MAXLEN,
505 "logical replication apply worker for subscription %u",
506 subid);
507 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication apply worker");
508 break;
509
511 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ParallelApplyWorkerMain");
512 snprintf(bgw.bgw_name, BGW_MAXLEN,
513 "logical replication parallel apply worker for subscription %u",
514 subid);
515 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication parallel worker");
516
517 memcpy(bgw.bgw_extra, &subworker_dsm, sizeof(dsm_handle));
518 break;
519
521 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "SequenceSyncWorkerMain");
522 snprintf(bgw.bgw_name, BGW_MAXLEN,
523 "logical replication sequencesync worker for subscription %u",
524 subid);
525 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication sequencesync worker");
526 break;
527
529 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "TableSyncWorkerMain");
530 snprintf(bgw.bgw_name, BGW_MAXLEN,
531 "logical replication tablesync worker for subscription %u sync %u",
532 subid,
533 relid);
534 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication tablesync worker");
535 break;
536
538 /* Should never happen. */
539 elog(ERROR, "unknown worker type");
540 }
541
542 bgw.bgw_restart_time = BGW_NEVER_RESTART;
543 bgw.bgw_notify_pid = MyProcPid;
544 bgw.bgw_main_arg = Int32GetDatum(slot);
545
547 {
548 /* Failed to start worker, so clean up the worker slot. */
550 Assert(generation == worker->generation);
553
556 errmsg("out of background worker slots"),
557 errhint("You might need to increase \"%s\".", "max_worker_processes")));
558 return false;
559 }
560
561 /* Now wait until it attaches. */
562 return WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
563}
564
565/*
566 * Internal function to stop the worker and wait until it detaches from the
567 * slot.
568 */
569static void
571{
572 uint16 generation;
573
575
576 /*
577 * Remember which generation was our worker so we can check if what we see
578 * is still the same one.
579 */
580 generation = worker->generation;
581
582 /*
583 * If we found a worker but it does not have proc set then it is still
584 * starting up; wait for it to finish starting and then kill it.
585 */
586 while (worker->in_use && !worker->proc)
587 {
588 int rc;
589
591
592 /* Wait a bit --- we don't expect to have to wait long. */
593 rc = WaitLatch(MyLatch,
596
597 if (rc & WL_LATCH_SET)
598 {
601 }
602
603 /* Recheck worker status. */
605
606 /*
607 * Check whether the worker slot is no longer used, which would mean
608 * that the worker has exited, or whether the worker generation is
609 * different, meaning that a different worker has taken the slot.
610 */
611 if (!worker->in_use || worker->generation != generation)
612 return;
613
614 /* Worker has assigned proc, so it has started. */
615 if (worker->proc)
616 break;
617 }
618
619 /* Now terminate the worker ... */
620 kill(worker->proc->pid, signo);
621
622 /* ... and wait for it to die. */
623 for (;;)
624 {
625 int rc;
626
627 /* is it gone? */
628 if (!worker->proc || worker->generation != generation)
629 break;
630
632
633 /* Wait a bit --- we don't expect to have to wait long. */
634 rc = WaitLatch(MyLatch,
637
638 if (rc & WL_LATCH_SET)
639 {
642 }
643
645 }
646}
647
648/*
649 * Stop the logical replication worker that matches the specified worker type,
650 * subscription id, and relation id.
651 */
652void
654{
655 LogicalRepWorker *worker;
656
657 /* relid must be valid only for table sync workers */
659
661
662 worker = logicalrep_worker_find(wtype, subid, relid, false);
663
664 if (worker)
665 {
668 }
669
671}
672
673/*
674 * Stop the given logical replication parallel apply worker.
675 *
676 * Node that the function sends SIGUSR2 instead of SIGTERM to the parallel apply
677 * worker so that the worker exits cleanly.
678 */
679void
681{
682 int slot_no;
683 uint16 generation;
684 LogicalRepWorker *worker;
685
686 SpinLockAcquire(&winfo->shared->mutex);
687 generation = winfo->shared->logicalrep_worker_generation;
689 SpinLockRelease(&winfo->shared->mutex);
690
692
693 /*
694 * Detach from the error_mq_handle for the parallel apply worker before
695 * stopping it. This prevents the leader apply worker from trying to
696 * receive the message from the error queue that might already be detached
697 * by the parallel apply worker.
698 */
699 if (winfo->error_mq_handle)
700 {
702 winfo->error_mq_handle = NULL;
703 }
704
706
707 worker = &LogicalRepCtx->workers[slot_no];
709
710 /*
711 * Only stop the worker if the generation matches and the worker is alive.
712 */
713 if (worker->generation == generation && worker->proc)
715
717}
718
719/*
720 * Wake up (using latch) any logical replication worker that matches the
721 * specified worker type, subscription id, and relation id.
722 */
723void
725{
726 LogicalRepWorker *worker;
727
728 /* relid must be valid only for table sync workers */
730
732
733 worker = logicalrep_worker_find(wtype, subid, relid, true);
734
735 if (worker)
737
739}
740
741/*
742 * Wake up (using latch) the specified logical replication worker.
743 *
744 * Caller must hold lock, else worker->proc could change under us.
745 */
746void
753
754/*
755 * Attach to a slot.
756 */
757void
759{
760 /* Block concurrent access. */
762
763 Assert(slot >= 0 && slot < max_logical_replication_workers);
765
767 {
771 errmsg("logical replication worker slot %d is empty, cannot attach",
772 slot)));
773 }
774
776 {
780 errmsg("logical replication worker slot %d is already used by "
781 "another worker, cannot attach", slot)));
782 }
783
786
788}
789
790/*
791 * Stop the parallel apply workers if any, and detach the leader apply worker
792 * (cleans up the worker info).
793 */
794static void
796{
797 /* Stop the parallel apply workers. */
799 {
800 List *workers;
801 ListCell *lc;
802
803 /*
804 * Detach from the error_mq_handle for all parallel apply workers
805 * before terminating them. This prevents the leader apply worker from
806 * receiving the worker termination message and sending it to logs
807 * when the same is already done by the parallel worker.
808 */
810
812
813 workers = logicalrep_workers_find(MyLogicalRepWorker->subid, true, false);
814 foreach(lc, workers)
815 {
817
820 }
821
823
824 list_free(workers);
825 }
826
827 /* Block concurrent access. */
829
831
833}
834
835/*
836 * Clean up worker info.
837 */
838static void
840{
842
843 worker->type = WORKERTYPE_UNKNOWN;
844 worker->in_use = false;
845 worker->proc = NULL;
846 worker->dbid = InvalidOid;
847 worker->userid = InvalidOid;
848 worker->subid = InvalidOid;
849 worker->relid = InvalidOid;
850 worker->leader_pid = InvalidPid;
851 worker->parallel_apply = false;
852}
853
854/*
855 * Cleanup function for logical replication launcher.
856 *
857 * Called on logical replication launcher exit.
858 */
859static void
864
865/*
866 * Reset the last_seqsync_start_time of the sequencesync worker in the
867 * subscription's apply worker.
868 *
869 * Note that this value is not stored in the sequencesync worker, because that
870 * has finished already and is about to exit.
871 */
872void
874{
875 LogicalRepWorker *worker;
876
877 /*
878 * The apply worker can't access last_seqsync_start_time concurrently, so
879 * it is okay to use SHARED lock here. See ProcessSequencesForSync().
880 */
882
885 true);
886 if (worker)
887 worker->last_seqsync_start_time = 0;
888
890}
891
892/*
893 * Cleanup function.
894 *
895 * Called on logical replication worker exit.
896 */
897static void
899{
900 /* Disconnect gracefully from the remote side. */
903
905
906 /* Cleanup fileset used for streaming transactions. */
909
910 /*
911 * Session level locks may be acquired outside of a transaction in
912 * parallel apply mode and will not be released when the worker
913 * terminates, so manually release all locks before the worker exits.
914 *
915 * The locks will be acquired once the worker is initialized.
916 */
919
921}
922
923/*
924 * Count the number of registered (not necessarily running) sync workers
925 * for a subscription.
926 */
927int
929{
930 int i;
931 int res = 0;
932
934
935 /* Search for attached worker for a given subscription id. */
936 for (i = 0; i < max_logical_replication_workers; i++)
937 {
939
940 if (w->subid == subid && (isTableSyncWorker(w) || isSequenceSyncWorker(w)))
941 res++;
942 }
943
944 return res;
945}
946
947/*
948 * Count the number of registered (but not necessarily running) parallel apply
949 * workers for a subscription.
950 */
951static int
953{
954 int i;
955 int res = 0;
956
958
959 /*
960 * Scan all attached parallel apply workers, only counting those which
961 * have the given subscription id.
962 */
963 for (i = 0; i < max_logical_replication_workers; i++)
964 {
966
967 if (isParallelApplyWorker(w) && w->subid == subid)
968 res++;
969 }
970
971 return res;
972}
973
974/*
975 * ApplyLauncherShmemSize
976 * Compute space needed for replication launcher shared memory
977 */
978Size
980{
981 Size size;
982
983 /*
984 * Need the fixed struct and the array of LogicalRepWorker.
985 */
986 size = sizeof(LogicalRepCtxStruct);
987 size = MAXALIGN(size);
989 sizeof(LogicalRepWorker)));
990 return size;
991}
992
993/*
994 * ApplyLauncherRegister
995 * Register a background worker running the logical replication launcher.
996 */
997void
999{
1001
1002 /*
1003 * The logical replication launcher is disabled during binary upgrades, to
1004 * prevent logical replication workers from running on the source cluster.
1005 * That could cause replication origins to move forward after having been
1006 * copied to the target cluster, potentially creating conflicts with the
1007 * copied data files.
1008 */
1010 return;
1011
1012 memset(&bgw, 0, sizeof(bgw));
1013 bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
1015 bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
1016 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
1017 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
1018 snprintf(bgw.bgw_name, BGW_MAXLEN,
1019 "logical replication launcher");
1020 snprintf(bgw.bgw_type, BGW_MAXLEN,
1021 "logical replication launcher");
1022 bgw.bgw_restart_time = 5;
1023 bgw.bgw_notify_pid = 0;
1024 bgw.bgw_main_arg = (Datum) 0;
1025
1027}
1028
1029/*
1030 * ApplyLauncherShmemInit
1031 * Allocate and initialize replication launcher shared memory
1032 */
1033void
1035{
1036 bool found;
1037
1039 ShmemInitStruct("Logical Replication Launcher Data",
1041 &found);
1042
1043 if (!found)
1044 {
1045 int slot;
1046
1048
1051
1052 /* Initialize memory and spin locks for each worker slot. */
1053 for (slot = 0; slot < max_logical_replication_workers; slot++)
1054 {
1055 LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
1056
1057 memset(worker, 0, sizeof(LogicalRepWorker));
1058 SpinLockInit(&worker->relmutex);
1059 }
1060 }
1061}
1062
1063/*
1064 * Initialize or attach to the dynamic shared hash table that stores the
1065 * last-start times, if not already done.
1066 * This must be called before accessing the table.
1067 */
1068static void
1070{
1071 MemoryContext oldcontext;
1072
1073 /* Quick exit if we already did this. */
1076 return;
1077
1078 /* Otherwise, use a lock to ensure only one process creates the table. */
1080
1081 /* Be sure any local memory allocated by DSA routines is persistent. */
1083
1085 {
1086 /* Initialize dynamic shared hash table for last-start times. */
1091
1092 /* Store handles in shared memory for other backends to use. */
1095 }
1096 else if (!last_start_times)
1097 {
1098 /* Attach to existing dynamic shared hash table. */
1103 }
1104
1105 MemoryContextSwitchTo(oldcontext);
1107}
1108
1109/*
1110 * Set the last-start time for the subscription.
1111 */
1112static void
1124
1125/*
1126 * Return the last-start time for the subscription, or 0 if there isn't one.
1127 */
1128static TimestampTz
1130{
1132 TimestampTz ret;
1133
1135
1136 entry = dshash_find(last_start_times, &subid, false);
1137 if (entry == NULL)
1138 return 0;
1139
1140 ret = entry->last_start_time;
1142
1143 return ret;
1144}
1145
1146/*
1147 * Remove the last-start-time entry for the subscription, if one exists.
1148 *
1149 * This has two use-cases: to remove the entry related to a subscription
1150 * that's been deleted or disabled (just to avoid leaking shared memory),
1151 * and to allow immediate restart of an apply worker that has exited
1152 * due to subscription parameter changes.
1153 */
1154void
1161
1162/*
1163 * Wakeup the launcher on commit if requested.
1164 */
1165void
1167{
1168 if (isCommit)
1169 {
1172 }
1173
1175}
1176
1177/*
1178 * Request wakeup of the launcher on commit of the transaction.
1179 *
1180 * This is used to send launcher signal to stop sleeping and process the
1181 * subscriptions when current transaction commits. Should be used when new
1182 * tuple was added to the pg_subscription catalog.
1183*/
1184void
1190
1191/*
1192 * Wakeup the launcher immediately.
1193 */
1194void
1200
1201/*
1202 * Main loop for the apply launcher process.
1203 */
1204void
1206{
1208 (errmsg_internal("logical replication launcher started")));
1209
1211
1214
1215 /* Establish signal handlers. */
1218
1219 /*
1220 * Establish connection to nailed catalogs (we only ever access
1221 * pg_subscription).
1222 */
1224
1225 /*
1226 * Acquire the conflict detection slot at startup to ensure it can be
1227 * dropped if no longer needed after a restart.
1228 */
1230
1231 /* Enter main loop */
1232 for (;;)
1233 {
1234 int rc;
1235 List *sublist;
1236 ListCell *lc;
1239 long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
1240 bool can_update_xmin = true;
1241 bool retain_dead_tuples = false;
1243
1245
1246 /* Use temporary context to avoid leaking memory across cycles. */
1248 "Logical Replication Launcher sublist",
1251
1252 /*
1253 * Start any missing workers for enabled subscriptions.
1254 *
1255 * Also, during the iteration through all subscriptions, we compute
1256 * the minimum XID required to protect deleted tuples for conflict
1257 * detection if one of the subscription enables retain_dead_tuples
1258 * option.
1259 */
1261 foreach(lc, sublist)
1262 {
1263 Subscription *sub = (Subscription *) lfirst(lc);
1267 long elapsed;
1268
1269 if (sub->retaindeadtuples)
1270 {
1271 retain_dead_tuples = true;
1272
1273 /*
1274 * Create a replication slot to retain information necessary
1275 * for conflict detection such as dead tuples, commit
1276 * timestamps, and origins.
1277 *
1278 * The slot is created before starting the apply worker to
1279 * prevent it from unnecessarily maintaining its
1280 * oldest_nonremovable_xid.
1281 *
1282 * The slot is created even for a disabled subscription to
1283 * ensure that conflict-related information is available when
1284 * applying remote changes that occurred before the
1285 * subscription was enabled.
1286 */
1288
1289 if (sub->retentionactive)
1290 {
1291 /*
1292 * Can't advance xmin of the slot unless all the
1293 * subscriptions actively retaining dead tuples are
1294 * enabled. This is required to ensure that we don't
1295 * advance the xmin of CONFLICT_DETECTION_SLOT if one of
1296 * the subscriptions is not enabled. Otherwise, we won't
1297 * be able to detect conflicts reliably for such a
1298 * subscription even though it has set the
1299 * retain_dead_tuples option.
1300 */
1301 can_update_xmin &= sub->enabled;
1302
1303 /*
1304 * Initialize the slot once the subscription activates
1305 * retention.
1306 */
1309 }
1310 }
1311
1312 if (!sub->enabled)
1313 continue;
1314
1317 false);
1318
1319 if (w != NULL)
1320 {
1321 /*
1322 * Compute the minimum xmin required to protect dead tuples
1323 * required for conflict detection among all running apply
1324 * workers. This computation is performed while holding
1325 * LogicalRepWorkerLock to prevent accessing invalid worker
1326 * data, in scenarios where a worker might exit and reset its
1327 * state concurrently.
1328 */
1329 if (sub->retaindeadtuples &&
1330 sub->retentionactive &&
1333
1335
1336 /* worker is running already */
1337 continue;
1338 }
1339
1341
1342 /*
1343 * Can't advance xmin of the slot unless all the workers
1344 * corresponding to subscriptions actively retaining dead tuples
1345 * are running, disabling the further computation of the minimum
1346 * nonremovable xid.
1347 */
1348 if (sub->retaindeadtuples && sub->retentionactive)
1349 can_update_xmin = false;
1350
1351 /*
1352 * If the worker is eligible to start now, launch it. Otherwise,
1353 * adjust wait_time so that we'll wake up as soon as it can be
1354 * started.
1355 *
1356 * Each subscription's apply worker can only be restarted once per
1357 * wal_retrieve_retry_interval, so that errors do not cause us to
1358 * repeatedly restart the worker as fast as possible. In cases
1359 * where a restart is expected (e.g., subscription parameter
1360 * changes), another process should remove the last-start entry
1361 * for the subscription so that the worker can be restarted
1362 * without waiting for wal_retrieve_retry_interval to elapse.
1363 */
1366 if (last_start == 0 ||
1368 {
1371 sub->dbid, sub->oid, sub->name,
1372 sub->owner, InvalidOid,
1374 sub->retaindeadtuples &&
1375 sub->retentionactive))
1376 {
1377 /*
1378 * We get here either if we failed to launch a worker
1379 * (perhaps for resource-exhaustion reasons) or if we
1380 * launched one but it immediately quit. Either way, it
1381 * seems appropriate to try again after
1382 * wal_retrieve_retry_interval.
1383 */
1384 wait_time = Min(wait_time,
1386 }
1387 }
1388 else
1389 {
1390 wait_time = Min(wait_time,
1392 }
1393 }
1394
1395 /*
1396 * Drop the CONFLICT_DETECTION_SLOT slot if there is no subscription
1397 * that requires us to retain dead tuples. Otherwise, if required,
1398 * advance the slot's xmin to protect dead tuples required for the
1399 * conflict detection.
1400 *
1401 * Additionally, if all apply workers for subscriptions with
1402 * retain_dead_tuples enabled have requested to stop retention, the
1403 * slot's xmin will be set to InvalidTransactionId allowing the
1404 * removal of dead tuples.
1405 */
1407 {
1408 if (!retain_dead_tuples)
1410 else if (can_update_xmin)
1412 }
1413
1414 /* Switch back to original memory context. */
1416 /* Clean the temporary memory. */
1418
1419 /* Wait for more work. */
1420 rc = WaitLatch(MyLatch,
1422 wait_time,
1424
1425 if (rc & WL_LATCH_SET)
1426 {
1429 }
1430
1432 {
1433 ConfigReloadPending = false;
1435 }
1436 }
1437
1438 /* Not reachable */
1439}
1440
1441/*
1442 * Determine the minimum non-removable transaction ID across all apply workers
1443 * for subscriptions that have retain_dead_tuples enabled. Store the result
1444 * in *xmin.
1445 */
1446static void
1448{
1450
1451 Assert(worker != NULL);
1452
1453 /*
1454 * The replication slot for conflict detection must be created before the
1455 * worker starts.
1456 */
1458
1459 SpinLockAcquire(&worker->relmutex);
1461 SpinLockRelease(&worker->relmutex);
1462
1463 /*
1464 * Return if the apply worker has stopped retention concurrently.
1465 *
1466 * Although this function is invoked only when retentionactive is true,
1467 * the apply worker might stop retention after the launcher fetches the
1468 * retentionactive flag.
1469 */
1471 return;
1472
1473 if (!TransactionIdIsValid(*xmin) ||
1475 *xmin = nonremovable_xid;
1476}
1477
1478/*
1479 * Acquire the replication slot used to retain information for conflict
1480 * detection, if it exists.
1481 *
1482 * Return true if successfully acquired, otherwise return false.
1483 */
1484static bool
1486{
1488 return false;
1489
1491 return true;
1492}
1493
1494/*
1495 * Update the xmin the replication slot used to retain information required
1496 * for conflict detection.
1497 */
1498static void
1500{
1504
1505 /* Return if the xmin value of the slot cannot be updated */
1507 return;
1508
1513
1514 elog(DEBUG1, "updated xmin: %u", MyReplicationSlot->data.xmin);
1515
1518
1519 /*
1520 * Like PhysicalConfirmReceivedLocation(), do not save slot information
1521 * each time. This is acceptable because all concurrent transactions on
1522 * the publisher that require the data preceding the slot's xmin should
1523 * have already been applied and flushed on the subscriber before the xmin
1524 * is advanced. So, even if the slot's xmin regresses after a restart, it
1525 * will be advanced again in the next cycle. Therefore, no data required
1526 * for conflict detection will be prematurely removed.
1527 */
1528 return;
1529}
1530
1531/*
1532 * Initialize the xmin for the conflict detection slot.
1533 */
1534static void
1562
1563/*
1564 * Create and acquire the replication slot used to retain information for
1565 * conflict detection, if not yet.
1566 */
1567void
1569{
1570 /* Exit early, if the replication slot is already created and acquired */
1572 return;
1573
1574 ereport(LOG,
1575 errmsg("creating replication conflict detection slot"));
1576
1578 false, false);
1579
1581}
1582
1583/*
1584 * Is current process the logical replication launcher?
1585 */
1586bool
1588{
1590}
1591
1592/*
1593 * Return the pid of the leader apply worker if the given pid is the pid of a
1594 * parallel apply worker, otherwise, return InvalidPid.
1595 */
1596pid_t
1598{
1599 int leader_pid = InvalidPid;
1600 int i;
1601
1603
1604 for (i = 0; i < max_logical_replication_workers; i++)
1605 {
1607
1608 if (isParallelApplyWorker(w) && w->proc && pid == w->proc->pid)
1609 {
1610 leader_pid = w->leader_pid;
1611 break;
1612 }
1613 }
1614
1616
1617 return leader_pid;
1618}
1619
1620/*
1621 * Returns state of the subscriptions.
1622 */
1623Datum
1625{
1626#define PG_STAT_GET_SUBSCRIPTION_COLS 10
1627 Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
1628 int i;
1629 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1630
1631 InitMaterializedSRF(fcinfo, 0);
1632
1633 /* Make sure we get consistent view of the workers. */
1635
1636 for (i = 0; i < max_logical_replication_workers; i++)
1637 {
1638 /* for each row */
1640 bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS] = {0};
1641 int worker_pid;
1642 LogicalRepWorker worker;
1643
1644 memcpy(&worker, &LogicalRepCtx->workers[i],
1645 sizeof(LogicalRepWorker));
1646 if (!worker.proc || !IsBackendPid(worker.proc->pid))
1647 continue;
1648
1649 if (OidIsValid(subid) && worker.subid != subid)
1650 continue;
1651
1652 worker_pid = worker.proc->pid;
1653
1654 values[0] = ObjectIdGetDatum(worker.subid);
1655 if (isTableSyncWorker(&worker))
1656 values[1] = ObjectIdGetDatum(worker.relid);
1657 else
1658 nulls[1] = true;
1660
1661 if (isParallelApplyWorker(&worker))
1662 values[3] = Int32GetDatum(worker.leader_pid);
1663 else
1664 nulls[3] = true;
1665
1666 if (!XLogRecPtrIsValid(worker.last_lsn))
1667 nulls[4] = true;
1668 else
1669 values[4] = LSNGetDatum(worker.last_lsn);
1670 if (worker.last_send_time == 0)
1671 nulls[5] = true;
1672 else
1674 if (worker.last_recv_time == 0)
1675 nulls[6] = true;
1676 else
1678 if (!XLogRecPtrIsValid(worker.reply_lsn))
1679 nulls[7] = true;
1680 else
1681 values[7] = LSNGetDatum(worker.reply_lsn);
1682 if (worker.reply_time == 0)
1683 nulls[8] = true;
1684 else
1686
1687 switch (worker.type)
1688 {
1689 case WORKERTYPE_APPLY:
1690 values[9] = CStringGetTextDatum("apply");
1691 break;
1693 values[9] = CStringGetTextDatum("parallel apply");
1694 break;
1696 values[9] = CStringGetTextDatum("sequence synchronization");
1697 break;
1699 values[9] = CStringGetTextDatum("table synchronization");
1700 break;
1701 case WORKERTYPE_UNKNOWN:
1702 /* Should never happen. */
1703 elog(ERROR, "unknown worker type");
1704 }
1705
1706 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
1707 values, nulls);
1708
1709 /*
1710 * If only a single subscription was requested, and we found it,
1711 * break.
1712 */
1713 if (OidIsValid(subid))
1714 break;
1715 }
1716
1718
1719 return (Datum) 0;
1720}
void pa_detach_all_error_mq(void)
bool InitializingApplyWorker
Definition worker.c:503
WalReceiverConn * LogRepWorkerWalRcvConn
Definition worker.c:481
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition timestamp.c:1751
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1775
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1639
Datum now(PG_FUNCTION_ARGS)
Definition timestamp.c:1603
void RegisterBackgroundWorker(BackgroundWorker *worker)
Definition bgworker.c:947
void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags)
Definition bgworker.c:860
void BackgroundWorkerUnblockSignals(void)
Definition bgworker.c:934
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition bgworker.c:1165
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition bgworker.c:1053
#define BGW_NEVER_RESTART
Definition bgworker.h:92
BgwHandleStatus
Definition bgworker.h:111
@ BGWH_STOPPED
Definition bgworker.h:114
@ BgWorkerStart_RecoveryFinished
Definition bgworker.h:88
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition bgworker.h:60
#define BGWORKER_SHMEM_ACCESS
Definition bgworker.h:53
#define BGW_MAXLEN
Definition bgworker.h:93
static Datum values[MAXATTR]
Definition bootstrap.c:188
#define CStringGetTextDatum(s)
Definition builtins.h:98
#define NameStr(name)
Definition c.h:837
#define Min(x, y)
Definition c.h:1093
#define MAXALIGN(LEN)
Definition c.h:898
#define Assert(condition)
Definition c.h:945
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:552
uint16_t uint16
Definition c.h:617
uint32 TransactionId
Definition c.h:738
#define OidIsValid(objectId)
Definition c.h:860
size_t Size
Definition c.h:691
int64 TimestampTz
Definition timestamp.h:39
#define TIMESTAMP_NOBEGIN(j)
Definition timestamp.h:159
dsa_area * dsa_attach(dsa_handle handle)
Definition dsa.c:510
void dsa_pin_mapping(dsa_area *area)
Definition dsa.c:650
dsa_handle dsa_get_handle(dsa_area *area)
Definition dsa.c:498
void dsa_pin(dsa_area *area)
Definition dsa.c:990
#define dsa_create(tranche_id)
Definition dsa.h:117
dsm_handle dsa_handle
Definition dsa.h:136
#define DSA_HANDLE_INVALID
Definition dsa.h:139
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:524
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:611
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:579
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:394
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:371
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:274
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:602
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:210
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:593
#define DSHASH_HANDLE_INVALID
Definition dshash.h:27
dsa_pointer dshash_table_handle
Definition dshash.h:24
#define dshash_find_or_insert(hash_table, key, found)
Definition dshash.h:109
uint32 dsm_handle
Definition dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition dsm_impl.h:58
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
#define LOG
Definition elog.h:31
int errhint(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define palloc0_object(type)
Definition fe_memutils.h:75
void FileSetDeleteAll(FileSet *fileset)
Definition fileset.c:151
#define PG_GETARG_OID(n)
Definition fmgr.h:275
#define PG_ARGISNULL(n)
Definition fmgr.h:209
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition funcapi.c:76
bool IsBinaryUpgrade
Definition globals.c:121
int MyProcPid
Definition globals.c:47
struct Latch * MyLatch
Definition globals.c:63
void ProcessConfigFile(GucContext context)
Definition guc-file.l:120
@ PGC_SIGHUP
Definition guc.h:75
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition heapam.c:1420
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
volatile sig_atomic_t ConfigReloadPending
Definition interrupt.c:27
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition interrupt.c:61
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:344
int i
Definition isn.c:77
void SetLatch(Latch *latch)
Definition latch.c:290
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
Datum pg_stat_get_subscription(PG_FUNCTION_ARGS)
Definition launcher.c:1624
#define DEFAULT_NAPTIME_PER_CYCLE
Definition launcher.c:50
List * logicalrep_workers_find(Oid subid, bool only_running, bool acquire_lock)
Definition launcher.c:294
void AtEOXact_ApplyLauncher(bool isCommit)
Definition launcher.c:1166
void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
Definition launcher.c:747
Size ApplyLauncherShmemSize(void)
Definition launcher.c:979
bool logicalrep_worker_launch(LogicalRepWorkerType wtype, Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid, dsm_handle subworker_dsm, bool retain_dead_tuples)
Definition launcher.c:325
bool IsLogicalLauncher(void)
Definition launcher.c:1587
void logicalrep_worker_attach(int slot)
Definition launcher.c:758
void ApplyLauncherWakeup(void)
Definition launcher.c:1195
static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time)
Definition launcher.c:1113
LogicalRepWorker * logicalrep_worker_find(LogicalRepWorkerType wtype, Oid subid, Oid relid, bool only_running)
Definition launcher.c:259
void logicalrep_worker_wakeup(LogicalRepWorkerType wtype, Oid subid, Oid relid)
Definition launcher.c:724
static void update_conflict_slot_xmin(TransactionId new_xmin)
Definition launcher.c:1499
static void compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin)
Definition launcher.c:1447
static void logicalrep_launcher_onexit(int code, Datum arg)
Definition launcher.c:860
static dsa_area * last_start_times_dsa
Definition launcher.c:91
void ApplyLauncherMain(Datum main_arg)
Definition launcher.c:1205
void CreateConflictDetectionSlot(void)
Definition launcher.c:1568
#define PG_STAT_GET_SUBSCRIPTION_COLS
int max_logical_replication_workers
Definition launcher.c:53
static void init_conflict_slot_xmin(void)
Definition launcher.c:1535
void logicalrep_pa_worker_stop(ParallelApplyWorkerInfo *winfo)
Definition launcher.c:680
static int logicalrep_pa_worker_count(Oid subid)
Definition launcher.c:952
static bool on_commit_launcher_wakeup
Definition launcher.c:94
void logicalrep_reset_seqsync_start_time(void)
Definition launcher.c:873
static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid)
Definition launcher.c:1129
void ApplyLauncherShmemInit(void)
Definition launcher.c:1034
static void logicalrep_worker_stop_internal(LogicalRepWorker *worker, int signo)
Definition launcher.c:570
static dshash_table * last_start_times
Definition launcher.c:92
LogicalRepWorker * MyLogicalRepWorker
Definition launcher.c:57
void ApplyLauncherWakeupAtCommit(void)
Definition launcher.c:1185
static const dshash_parameters dsh_params
Definition launcher.c:82
void logicalrep_worker_stop(LogicalRepWorkerType wtype, Oid subid, Oid relid)
Definition launcher.c:653
static LogicalRepCtxStruct * LogicalRepCtx
Definition launcher.c:72
static void logicalrep_worker_onexit(int code, Datum arg)
Definition launcher.c:898
pid_t GetLeaderApplyWorkerPid(pid_t pid)
Definition launcher.c:1597
int max_sync_workers_per_subscription
Definition launcher.c:54
static void logicalrep_worker_detach(void)
Definition launcher.c:795
static bool WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
Definition launcher.c:182
int logicalrep_sync_worker_count(Oid subid)
Definition launcher.c:928
void ApplyLauncherForgetWorkerStartTime(Oid subid)
Definition launcher.c:1155
static bool acquire_conflict_slot_if_exists(void)
Definition launcher.c:1485
void ApplyLauncherRegister(void)
Definition launcher.c:998
static void logicalrep_launcher_attach_dshmem(void)
Definition launcher.c:1069
static List * get_subscription_list(void)
Definition launcher.c:118
int max_parallel_apply_workers_per_subscription
Definition launcher.c:55
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition launcher.c:839
List * lappend(List *list, void *datum)
Definition list.c:339
void list_free(List *list)
Definition list.c:1546
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2319
#define AccessShareLock
Definition lockdefs.h:36
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
bool LWLockHeldByMe(LWLock *lock)
Definition lwlock.c:1888
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1153
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1932
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1770
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
char * pstrdup(const char *in)
Definition mcxt.c:1781
MemoryContext TopMemoryContext
Definition mcxt.c:166
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define InvalidPid
Definition miscadmin.h:32
static char * errmsg
int max_active_replication_origins
Definition origin.c:105
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define MAXPGPATH
static time_t start_time
Definition pg_ctl.c:96
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
static Datum LSNGetDatum(XLogRecPtr X)
Definition pg_lsn.h:31
NameData subname
END_CATALOG_STRUCT typedef FormData_pg_subscription * Form_pg_subscription
#define pqsignal
Definition port.h:547
#define snprintf
Definition port.h:260
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
uint64_t Datum
Definition postgres.h:70
static Datum Int32GetDatum(int32 X)
Definition postgres.h:212
#define InvalidOid
unsigned int Oid
static int fb(int x)
TransactionId GetOldestSafeDecodingTransactionId(bool catalogOnly)
Definition procarray.c:2906
bool IsBackendPid(int pid)
Definition procarray.c:3251
@ ForwardScanDirection
Definition sdir.h:28
void shm_mq_detach(shm_mq_handle *mqh)
Definition shm_mq.c:845
Size add_size(Size s1, Size s2)
Definition shmem.c:455
Size mul_size(Size s1, Size s2)
Definition shmem.c:470
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition shmem.c:380
void ReplicationSlotAcquire(const char *name, bool nowait, bool error_if_invalid)
Definition slot.c:622
void ReplicationSlotCreate(const char *name, bool db_specific, ReplicationSlotPersistency persistency, bool two_phase, bool failover, bool synced)
Definition slot.c:380
void ReplicationSlotDropAcquired(void)
Definition slot.c:1035
void ReplicationSlotMarkDirty(void)
Definition slot.c:1177
void ReplicationSlotsComputeRequiredXmin(bool already_locked)
Definition slot.c:1219
ReplicationSlot * MyReplicationSlot
Definition slot.c:149
void ReplicationSlotSave(void)
Definition slot.c:1159
ReplicationSlot * SearchNamedReplicationSlot(const char *name, bool need_lock)
Definition slot.c:542
#define CONFLICT_DETECTION_SLOT
Definition slot.h:28
@ RS_PERSISTENT
Definition slot.h:45
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
PGPROC * MyProc
Definition proc.c:69
TimestampTz last_start_time
Definition launcher.c:78
Definition pg_list.h:54
dsa_handle last_start_dsa
Definition launcher.c:65
dshash_table_handle last_start_dsh
Definition launcher.c:66
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition launcher.c:69
XLogRecPtr relstate_lsn
TimestampTz last_recv_time
TimestampTz last_seqsync_start_time
LogicalRepWorkerType type
TimestampTz launch_time
TimestampTz reply_time
FileSet * stream_fileset
TransactionId oldest_nonremovable_xid
TimestampTz last_send_time
int pid
Definition proc.h:196
Latch procLatch
Definition proc.h:255
shm_mq_handle * error_mq_handle
ParallelApplyWorkerShared * shared
slock_t mutex
Definition slot.h:183
TransactionId effective_xmin
Definition slot.h:209
ReplicationSlotPersistentData data
Definition slot.h:213
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKeyData *key)
Definition tableam.c:113
static void table_endscan(TableScanDesc scan)
Definition tableam.h:1004
#define InvalidTransactionId
Definition transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:282
#define TransactionIdEquals(id1, id2)
Definition transam.h:43
#define TransactionIdIsValid(xid)
Definition transam.h:41
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition tuplestore.c:785
static Datum TimestampTzGetDatum(TimestampTz X)
Definition timestamp.h:52
#define WL_TIMEOUT
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
int wal_receiver_timeout
Definition walreceiver.c:91
#define walrcv_disconnect(conn)
#define SIGHUP
Definition win32_port.h:158
#define kill(pid, sig)
Definition win32_port.h:490
#define SIGUSR1
Definition win32_port.h:170
#define SIGUSR2
Definition win32_port.h:171
#define isParallelApplyWorker(worker)
#define isSequenceSyncWorker(worker)
LogicalRepWorkerType
@ WORKERTYPE_TABLESYNC
@ WORKERTYPE_UNKNOWN
@ WORKERTYPE_SEQUENCESYNC
@ WORKERTYPE_PARALLEL_APPLY
@ WORKERTYPE_APPLY
static bool am_leader_apply_worker(void)
#define isTableSyncWorker(worker)
void StartTransactionCommand(void)
Definition xact.c:3081
void CommitTransactionCommand(void)
Definition xact.c:3179
int wal_retrieve_retry_interval
Definition xlog.c:138
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define InvalidXLogRecPtr
Definition xlogdefs.h:28