PostgreSQL Source Code git master
Loading...
Searching...
No Matches
launcher.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 * launcher.c
3 * PostgreSQL logical replication worker launcher process
4 *
5 * Copyright (c) 2016-2026, PostgreSQL Global Development Group
6 *
7 * IDENTIFICATION
8 * src/backend/replication/logical/launcher.c
9 *
10 * NOTES
11 * This module contains the logical replication worker launcher which
12 * uses the background worker infrastructure to start the logical
13 * replication workers for every enabled subscription.
14 *
15 *-------------------------------------------------------------------------
16 */
17
18#include "postgres.h"
19
20#include "access/heapam.h"
21#include "access/htup.h"
22#include "access/htup_details.h"
23#include "access/tableam.h"
24#include "access/xact.h"
27#include "funcapi.h"
28#include "lib/dshash.h"
29#include "miscadmin.h"
30#include "pgstat.h"
31#include "postmaster/bgworker.h"
34#include "replication/origin.h"
35#include "replication/slot.h"
38#include "storage/ipc.h"
39#include "storage/proc.h"
40#include "storage/procarray.h"
41#include "storage/subsystems.h"
42#include "tcop/tcopprot.h"
43#include "utils/builtins.h"
44#include "utils/memutils.h"
45#include "utils/pg_lsn.h"
46#include "utils/snapmgr.h"
47#include "utils/syscache.h"
48#include "utils/wait_event.h"
49
50/* max sleep time between cycles (3min) */
51#define DEFAULT_NAPTIME_PER_CYCLE 180000L
52
53/* GUC variables */
57
59
60typedef struct LogicalRepCtxStruct
61{
62 /* Supervisor process. */
64
65 /* Hash table holding last start times of subscriptions' apply workers. */
68
69 /* Background workers. */
72
74
75static void ApplyLauncherShmemRequest(void *arg);
76static void ApplyLauncherShmemInit(void *arg);
77
82
83/* an entry in the last-start-times shared hash table */
85{
86 Oid subid; /* OID of logrep subscription (hash key) */
87 TimestampTz last_start_time; /* last time its apply worker was started */
89
90/* parameters for the last-start-times shared hash table */
99
102
103static bool on_commit_launcher_wakeup = false;
104
105
106static void logicalrep_launcher_onexit(int code, Datum arg);
107static void logicalrep_worker_onexit(int code, Datum arg);
108static void logicalrep_worker_detach(void);
110static int logicalrep_pa_worker_count(Oid subid);
111static void logicalrep_launcher_attach_dshmem(void);
115static bool acquire_conflict_slot_if_exists(void);
117static void init_conflict_slot_xmin(void);
118
119
120/*
121 * Load the list of subscriptions.
122 *
123 * Only the fields interesting for worker start/stop functions are filled for
124 * each subscription.
125 */
126static List *
128{
129 List *res = NIL;
130 Relation rel;
131 TableScanDesc scan;
134
135 /* This is the context that we will allocate our output data in */
137
138 /*
139 * Start a transaction so we can access pg_subscription.
140 */
142
144 scan = table_beginscan_catalog(rel, 0, NULL);
145
147 {
149 Subscription *sub;
151
152 /*
153 * Allocate our results in the caller's context, not the
154 * transaction's. We do this inside the loop, and restore the original
155 * context at the end, so that leaky things like heap_getnext() are
156 * not called in a potentially long-lived context.
157 */
159
161 sub->oid = subform->oid;
162 sub->dbid = subform->subdbid;
163 sub->owner = subform->subowner;
164 sub->enabled = subform->subenabled;
165 sub->name = pstrdup(NameStr(subform->subname));
166 sub->retaindeadtuples = subform->subretaindeadtuples;
167 sub->retentionactive = subform->subretentionactive;
168 /* We don't fill fields we are not interested in. */
169
170 res = lappend(res, sub);
172 }
173
174 table_endscan(scan);
176
178
179 return res;
180}
181
182/*
183 * Wait for a background worker to start up and attach to the shmem context.
184 *
185 * This is only needed for cleaning up the shared memory in case the worker
186 * fails to attach.
187 *
188 * Returns whether the attach was successful.
189 */
190static bool
192 uint16 generation,
194{
195 bool result = false;
196 bool dropped_latch = false;
197
198 for (;;)
199 {
200 BgwHandleStatus status;
201 pid_t pid;
202 int rc;
203
205
207
208 /* Worker either died or has started. Return false if died. */
209 if (!worker->in_use || worker->proc)
210 {
211 result = worker->in_use;
213 break;
214 }
215
217
218 /* Check if worker has died before attaching, and clean up after it. */
219 status = GetBackgroundWorkerPid(handle, &pid);
220
221 if (status == BGWH_STOPPED)
222 {
224 /* Ensure that this was indeed the worker we waited for. */
225 if (generation == worker->generation)
228 break; /* result is already false */
229 }
230
231 /*
232 * We need timeout because we generally don't get notified via latch
233 * about the worker attach. But we don't expect to have to wait long.
234 */
235 rc = WaitLatch(MyLatch,
238
239 if (rc & WL_LATCH_SET)
240 {
243 dropped_latch = true;
244 }
245 }
246
247 /*
248 * If we had to clear a latch event in order to wait, be sure to restore
249 * it before exiting. Otherwise caller may miss events.
250 */
251 if (dropped_latch)
253
254 return result;
255}
256
257/*
258 * Walks the workers array and searches for one that matches given worker type,
259 * subscription id, and relation id.
260 *
261 * For both apply workers and sequencesync workers, the relid should be set to
262 * InvalidOid, as these workers handle changes across all tables and sequences
263 * respectively, rather than targeting a specific relation. For tablesync
264 * workers, the relid should be set to the OID of the relation being
265 * synchronized.
266 */
269 bool only_running)
270{
271 int i;
272 LogicalRepWorker *res = NULL;
273
274 /* relid must be valid only for table sync workers */
277
278 /* Search for an attached worker that matches the specified criteria. */
279 for (i = 0; i < max_logical_replication_workers; i++)
280 {
282
283 /* Skip parallel apply workers. */
285 continue;
286
287 if (w->in_use && w->subid == subid && w->relid == relid &&
288 w->type == wtype && (!only_running || w->proc))
289 {
290 res = w;
291 break;
292 }
293 }
294
295 return res;
296}
297
298/*
299 * Similar to logicalrep_worker_find(), but returns a list of all workers for
300 * the subscription, instead of just one.
301 */
302List *
304{
305 int i;
306 List *res = NIL;
307
308 if (acquire_lock)
310
312
313 /* Search for attached worker for a given subscription id. */
314 for (i = 0; i < max_logical_replication_workers; i++)
315 {
317
318 if (w->in_use && w->subid == subid && (!only_running || w->proc))
319 res = lappend(res, w);
320 }
321
322 if (acquire_lock)
324
325 return res;
326}
327
328/*
329 * Start new logical replication background worker, if possible.
330 *
331 * Returns true on success, false on failure.
332 */
333bool
335 Oid dbid, Oid subid, const char *subname, Oid userid,
338{
341 uint16 generation;
342 int i;
343 int slot = 0;
344 LogicalRepWorker *worker = NULL;
345 int nsyncworkers;
351
352 /*----------
353 * Sanity checks:
354 * - must be valid worker type
355 * - tablesync workers are only ones to have relid
356 * - parallel apply worker is the only kind of subworker
357 * - The replication slot used in conflict detection is created when
358 * retain_dead_tuples is enabled
359 */
364
366 (errmsg_internal("starting logical replication worker for subscription \"%s\"",
367 subname)));
368
369 /* Report this after the initial starting message for consistency. */
373 errmsg("cannot start logical replication workers when \"max_active_replication_origins\" is 0")));
374
375 /*
376 * We need to do the modification of the shared memory under lock so that
377 * we have consistent view.
378 */
380
381retry:
382 /* Find unused worker slot. */
383 for (i = 0; i < max_logical_replication_workers; i++)
384 {
386
387 if (!w->in_use)
388 {
389 worker = w;
390 slot = i;
391 break;
392 }
393 }
394
396
398
399 /*
400 * If we didn't find a free slot, try to do garbage collection. The
401 * reason we do this is because if some worker failed to start up and its
402 * parent has crashed while waiting, the in_use state was never cleared.
403 */
405 {
406 bool did_cleanup = false;
407
408 for (i = 0; i < max_logical_replication_workers; i++)
409 {
411
412 /*
413 * If the worker was marked in use but didn't manage to attach in
414 * time, clean it up.
415 */
416 if (w->in_use && !w->proc &&
419 {
421 "logical replication worker for subscription %u took too long to start; canceled",
422 w->subid);
423
425 did_cleanup = true;
426 }
427 }
428
429 if (did_cleanup)
430 goto retry;
431 }
432
433 /*
434 * We don't allow to invoke more sync workers once we have reached the
435 * sync worker limit per subscription. So, just return silently as we
436 * might get here because of an otherwise harmless race condition.
437 */
440 {
442 return false;
443 }
444
446
447 /*
448 * Return false if the number of parallel apply workers reached the limit
449 * per subscription.
450 */
453 {
455 return false;
456 }
457
458 /*
459 * However if there are no more free worker slots, inform user about it
460 * before exiting.
461 */
462 if (worker == NULL)
463 {
467 errmsg("out of logical replication worker slots"),
468 errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
469 return false;
470 }
471
472 /* Prepare the worker slot. */
473 worker->type = wtype;
474 worker->launch_time = now;
475 worker->in_use = true;
476 worker->generation++;
477 worker->proc = NULL;
478 worker->dbid = dbid;
479 worker->userid = userid;
480 worker->subid = subid;
481 worker->relid = relid;
484 worker->stream_fileset = NULL;
490 worker->last_lsn = InvalidXLogRecPtr;
495 worker->last_seqsync_start_time = 0;
496
497 /* Before releasing lock, remember generation for future identification. */
498 generation = worker->generation;
499
501
502 /* Register the new dynamic worker. */
503 memset(&bgw, 0, sizeof(bgw));
504 bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
506 bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
507 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
508
509 switch (worker->type)
510 {
511 case WORKERTYPE_APPLY:
512 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
513 snprintf(bgw.bgw_name, BGW_MAXLEN,
514 "logical replication apply worker for subscription %u",
515 subid);
516 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication apply worker");
517 break;
518
520 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ParallelApplyWorkerMain");
521 snprintf(bgw.bgw_name, BGW_MAXLEN,
522 "logical replication parallel apply worker for subscription %u",
523 subid);
524 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication parallel worker");
525
526 memcpy(bgw.bgw_extra, &subworker_dsm, sizeof(dsm_handle));
527 break;
528
530 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "SequenceSyncWorkerMain");
531 snprintf(bgw.bgw_name, BGW_MAXLEN,
532 "logical replication sequencesync worker for subscription %u",
533 subid);
534 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication sequencesync worker");
535 break;
536
538 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "TableSyncWorkerMain");
539 snprintf(bgw.bgw_name, BGW_MAXLEN,
540 "logical replication tablesync worker for subscription %u sync %u",
541 subid,
542 relid);
543 snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication tablesync worker");
544 break;
545
547 /* Should never happen. */
548 elog(ERROR, "unknown worker type");
549 }
550
551 bgw.bgw_restart_time = BGW_NEVER_RESTART;
552 bgw.bgw_notify_pid = MyProcPid;
553 bgw.bgw_main_arg = Int32GetDatum(slot);
554
556 {
557 /* Failed to start worker, so clean up the worker slot. */
559 Assert(generation == worker->generation);
562
565 errmsg("out of background worker slots"),
566 errhint("You might need to increase \"%s\".", "max_worker_processes")));
567 return false;
568 }
569
570 /* Now wait until it attaches. */
571 return WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
572}
573
574/*
575 * Internal function to stop the worker and wait until it detaches from the
576 * slot.
577 */
578static void
580{
581 uint16 generation;
582
584
585 /*
586 * Remember which generation was our worker so we can check if what we see
587 * is still the same one.
588 */
589 generation = worker->generation;
590
591 /*
592 * If we found a worker but it does not have proc set then it is still
593 * starting up; wait for it to finish starting and then kill it.
594 */
595 while (worker->in_use && !worker->proc)
596 {
597 int rc;
598
600
601 /* Wait a bit --- we don't expect to have to wait long. */
602 rc = WaitLatch(MyLatch,
605
606 if (rc & WL_LATCH_SET)
607 {
610 }
611
612 /* Recheck worker status. */
614
615 /*
616 * Check whether the worker slot is no longer used, which would mean
617 * that the worker has exited, or whether the worker generation is
618 * different, meaning that a different worker has taken the slot.
619 */
620 if (!worker->in_use || worker->generation != generation)
621 return;
622
623 /* Worker has assigned proc, so it has started. */
624 if (worker->proc)
625 break;
626 }
627
628 /* Now terminate the worker ... */
629 kill(worker->proc->pid, signo);
630
631 /* ... and wait for it to die. */
632 for (;;)
633 {
634 int rc;
635
636 /* is it gone? */
637 if (!worker->proc || worker->generation != generation)
638 break;
639
641
642 /* Wait a bit --- we don't expect to have to wait long. */
643 rc = WaitLatch(MyLatch,
646
647 if (rc & WL_LATCH_SET)
648 {
651 }
652
654 }
655}
656
657/*
658 * Stop the logical replication worker that matches the specified worker type,
659 * subscription id, and relation id.
660 */
661void
663{
664 LogicalRepWorker *worker;
665
666 /* relid must be valid only for table sync workers */
668
670
671 worker = logicalrep_worker_find(wtype, subid, relid, false);
672
673 if (worker)
674 {
677 }
678
680}
681
682/*
683 * Stop the given logical replication parallel apply worker.
684 *
685 * Node that the function sends SIGUSR2 instead of SIGTERM to the parallel apply
686 * worker so that the worker exits cleanly.
687 */
688void
690{
691 int slot_no;
692 uint16 generation;
693 LogicalRepWorker *worker;
694
695 SpinLockAcquire(&winfo->shared->mutex);
696 generation = winfo->shared->logicalrep_worker_generation;
698 SpinLockRelease(&winfo->shared->mutex);
699
701
702 /*
703 * Detach from the error_mq_handle for the parallel apply worker before
704 * stopping it. This prevents the leader apply worker from trying to
705 * receive the message from the error queue that might already be detached
706 * by the parallel apply worker.
707 */
708 if (winfo->error_mq_handle)
709 {
711 winfo->error_mq_handle = NULL;
712 }
713
715
716 worker = &LogicalRepCtx->workers[slot_no];
718
719 /*
720 * Only stop the worker if the generation matches and the worker is alive.
721 */
722 if (worker->generation == generation && worker->proc)
724
726}
727
728/*
729 * Wake up (using latch) any logical replication worker that matches the
730 * specified worker type, subscription id, and relation id.
731 */
732void
734{
735 LogicalRepWorker *worker;
736
737 /* relid must be valid only for table sync workers */
739
741
742 worker = logicalrep_worker_find(wtype, subid, relid, true);
743
744 if (worker)
746
748}
749
750/*
751 * Wake up (using latch) the specified logical replication worker.
752 *
753 * Caller must hold lock, else worker->proc could change under us.
754 */
755void
762
763/*
764 * Attach to a slot.
765 */
766void
768{
769 /* Block concurrent access. */
771
772 Assert(slot >= 0 && slot < max_logical_replication_workers);
774
776 {
780 errmsg("logical replication worker slot %d is empty, cannot attach",
781 slot)));
782 }
783
785 {
789 errmsg("logical replication worker slot %d is already used by "
790 "another worker, cannot attach", slot)));
791 }
792
795
797}
798
799/*
800 * Stop the parallel apply workers if any, and detach the leader apply worker
801 * (cleans up the worker info).
802 */
803static void
805{
806 /* Stop the parallel apply workers. */
808 {
809 List *workers;
810 ListCell *lc;
811
812 /*
813 * Detach from the error_mq_handle for all parallel apply workers
814 * before terminating them. This prevents the leader apply worker from
815 * receiving the worker termination message and sending it to logs
816 * when the same is already done by the parallel worker.
817 */
819
821
822 workers = logicalrep_workers_find(MyLogicalRepWorker->subid, true, false);
823 foreach(lc, workers)
824 {
826
829 }
830
832
833 list_free(workers);
834 }
835
836 /* Block concurrent access. */
838
840
842}
843
844/*
845 * Clean up worker info.
846 */
847static void
849{
851
852 worker->type = WORKERTYPE_UNKNOWN;
853 worker->in_use = false;
854 worker->proc = NULL;
855 worker->dbid = InvalidOid;
856 worker->userid = InvalidOid;
857 worker->subid = InvalidOid;
858 worker->relid = InvalidOid;
859 worker->leader_pid = InvalidPid;
860 worker->parallel_apply = false;
861}
862
863/*
864 * Cleanup function for logical replication launcher.
865 *
866 * Called on logical replication launcher exit.
867 */
868static void
873
874/*
875 * Reset the last_seqsync_start_time of the sequencesync worker in the
876 * subscription's apply worker.
877 *
878 * Note that this value is not stored in the sequencesync worker, because that
879 * has finished already and is about to exit.
880 */
881void
883{
884 LogicalRepWorker *worker;
885
886 /*
887 * The apply worker can't access last_seqsync_start_time concurrently, so
888 * it is okay to use SHARED lock here. See ProcessSequencesForSync().
889 */
891
894 true);
895 if (worker)
896 worker->last_seqsync_start_time = 0;
897
899}
900
901/*
902 * Cleanup function.
903 *
904 * Called on logical replication worker exit.
905 */
906static void
908{
909 /* Disconnect gracefully from the remote side. */
912
914
915 /* Cleanup fileset used for streaming transactions. */
918
919 /*
920 * Session level locks may be acquired outside of a transaction in
921 * parallel apply mode and will not be released when the worker
922 * terminates, so manually release all locks before the worker exits.
923 *
924 * The locks will be acquired once the worker is initialized.
925 */
928
930}
931
932/*
933 * Count the number of registered (not necessarily running) sync workers
934 * for a subscription.
935 */
936int
938{
939 int i;
940 int res = 0;
941
943
944 /* Search for attached worker for a given subscription id. */
945 for (i = 0; i < max_logical_replication_workers; i++)
946 {
948
949 if (w->subid == subid && (isTableSyncWorker(w) || isSequenceSyncWorker(w)))
950 res++;
951 }
952
953 return res;
954}
955
956/*
957 * Count the number of registered (but not necessarily running) parallel apply
958 * workers for a subscription.
959 */
960static int
962{
963 int i;
964 int res = 0;
965
967
968 /*
969 * Scan all attached parallel apply workers, only counting those which
970 * have the given subscription id.
971 */
972 for (i = 0; i < max_logical_replication_workers; i++)
973 {
975
976 if (isParallelApplyWorker(w) && w->subid == subid)
977 res++;
978 }
979
980 return res;
981}
982
983/*
984 * ApplyLauncherShmemRequest
985 * Register shared memory space needed for replication launcher
986 */
987static void
989{
990 Size size;
991
992 /*
993 * Need the fixed struct and the array of LogicalRepWorker.
994 */
995 size = sizeof(LogicalRepCtxStruct);
996 size = MAXALIGN(size);
998 sizeof(LogicalRepWorker)));
999 ShmemRequestStruct(.name = "Logical Replication Launcher Data",
1000 .size = size,
1001 .ptr = (void **) &LogicalRepCtx,
1002 );
1003}
1004
1005/*
1006 * ApplyLauncherRegister
1007 * Register a background worker running the logical replication launcher.
1008 */
1009void
1011{
1013
1014 /*
1015 * The logical replication launcher is disabled during binary upgrades, to
1016 * prevent logical replication workers from running on the source cluster.
1017 * That could cause replication origins to move forward after having been
1018 * copied to the target cluster, potentially creating conflicts with the
1019 * copied data files.
1020 */
1022 return;
1023
1024 memset(&bgw, 0, sizeof(bgw));
1025 bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
1027 bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
1028 snprintf(bgw.bgw_library_name, MAXPGPATH, "postgres");
1029 snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
1030 snprintf(bgw.bgw_name, BGW_MAXLEN,
1031 "logical replication launcher");
1032 snprintf(bgw.bgw_type, BGW_MAXLEN,
1033 "logical replication launcher");
1034 bgw.bgw_restart_time = 5;
1035 bgw.bgw_notify_pid = 0;
1036 bgw.bgw_main_arg = (Datum) 0;
1037
1039}
1040
1041/*
1042 * ApplyLauncherShmemInit
1043 * Initialize replication launcher shared memory
1044 */
1045static void
1047{
1048 int slot;
1049
1052
1053 /* Initialize memory and spin locks for each worker slot. */
1054 for (slot = 0; slot < max_logical_replication_workers; slot++)
1055 {
1056 LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
1057
1058 memset(worker, 0, sizeof(LogicalRepWorker));
1059 SpinLockInit(&worker->relmutex);
1060 }
1061}
1062
1063/*
1064 * Initialize or attach to the dynamic shared hash table that stores the
1065 * last-start times, if not already done.
1066 * This must be called before accessing the table.
1067 */
1068static void
1070{
1071 MemoryContext oldcontext;
1072
1073 /* Quick exit if we already did this. */
1076 return;
1077
1078 /* Otherwise, use a lock to ensure only one process creates the table. */
1080
1081 /* Be sure any local memory allocated by DSA routines is persistent. */
1083
1085 {
1086 /* Initialize dynamic shared hash table for last-start times. */
1091
1092 /* Store handles in shared memory for other backends to use. */
1095 }
1096 else if (!last_start_times)
1097 {
1098 /* Attach to existing dynamic shared hash table. */
1103 }
1104
1105 MemoryContextSwitchTo(oldcontext);
1107}
1108
1109/*
1110 * Set the last-start time for the subscription.
1111 */
1112static void
1124
1125/*
1126 * Return the last-start time for the subscription, or 0 if there isn't one.
1127 */
1128static TimestampTz
1130{
1132 TimestampTz ret;
1133
1135
1136 entry = dshash_find(last_start_times, &subid, false);
1137 if (entry == NULL)
1138 return 0;
1139
1140 ret = entry->last_start_time;
1142
1143 return ret;
1144}
1145
1146/*
1147 * Remove the last-start-time entry for the subscription, if one exists.
1148 *
1149 * This has two use-cases: to remove the entry related to a subscription
1150 * that's been deleted or disabled (just to avoid leaking shared memory),
1151 * and to allow immediate restart of an apply worker that has exited
1152 * due to subscription parameter changes.
1153 */
1154void
1161
1162/*
1163 * Wakeup the launcher on commit if requested.
1164 */
1165void
1167{
1168 if (isCommit)
1169 {
1172 }
1173
1175}
1176
1177/*
1178 * Request wakeup of the launcher on commit of the transaction.
1179 *
1180 * This is used to send launcher signal to stop sleeping and process the
1181 * subscriptions when current transaction commits. Should be used when new
1182 * tuple was added to the pg_subscription catalog.
1183*/
1184void
1190
1191/*
1192 * Wakeup the launcher immediately.
1193 */
1194void
1200
1201/*
1202 * Main loop for the apply launcher process.
1203 */
1204void
1206{
1208 (errmsg_internal("logical replication launcher started")));
1209
1211
1214
1215 /* Establish signal handlers. */
1218
1219 /*
1220 * Establish connection to nailed catalogs (we only ever access
1221 * pg_subscription).
1222 */
1224
1225 /*
1226 * Acquire the conflict detection slot at startup to ensure it can be
1227 * dropped if no longer needed after a restart.
1228 */
1230
1231 /* Enter main loop */
1232 for (;;)
1233 {
1234 int rc;
1235 List *sublist;
1236 ListCell *lc;
1239 long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
1240 bool can_update_xmin = true;
1241 bool retain_dead_tuples = false;
1243
1245
1246 /* Use temporary context to avoid leaking memory across cycles. */
1248 "Logical Replication Launcher sublist",
1251
1252 /*
1253 * Start any missing workers for enabled subscriptions.
1254 *
1255 * Also, during the iteration through all subscriptions, we compute
1256 * the minimum XID required to protect deleted tuples for conflict
1257 * detection if one of the subscription enables retain_dead_tuples
1258 * option.
1259 */
1261 foreach(lc, sublist)
1262 {
1263 Subscription *sub = (Subscription *) lfirst(lc);
1267 long elapsed;
1268
1269 if (sub->retaindeadtuples)
1270 {
1271 retain_dead_tuples = true;
1272
1273 /*
1274 * Create a replication slot to retain information necessary
1275 * for conflict detection such as dead tuples, commit
1276 * timestamps, and origins.
1277 *
1278 * The slot is created before starting the apply worker to
1279 * prevent it from unnecessarily maintaining its
1280 * oldest_nonremovable_xid.
1281 *
1282 * The slot is created even for a disabled subscription to
1283 * ensure that conflict-related information is available when
1284 * applying remote changes that occurred before the
1285 * subscription was enabled.
1286 */
1288
1289 if (sub->retentionactive)
1290 {
1291 /*
1292 * Can't advance xmin of the slot unless all the
1293 * subscriptions actively retaining dead tuples are
1294 * enabled. This is required to ensure that we don't
1295 * advance the xmin of CONFLICT_DETECTION_SLOT if one of
1296 * the subscriptions is not enabled. Otherwise, we won't
1297 * be able to detect conflicts reliably for such a
1298 * subscription even though it has set the
1299 * retain_dead_tuples option.
1300 */
1301 can_update_xmin &= sub->enabled;
1302
1303 /*
1304 * Initialize the slot once the subscription activates
1305 * retention.
1306 */
1309 }
1310 }
1311
1312 if (!sub->enabled)
1313 continue;
1314
1317 false);
1318
1319 if (w != NULL)
1320 {
1321 /*
1322 * Compute the minimum xmin required to protect dead tuples
1323 * required for conflict detection among all running apply
1324 * workers. This computation is performed while holding
1325 * LogicalRepWorkerLock to prevent accessing invalid worker
1326 * data, in scenarios where a worker might exit and reset its
1327 * state concurrently.
1328 */
1329 if (sub->retaindeadtuples &&
1330 sub->retentionactive &&
1333
1335
1336 /* worker is running already */
1337 continue;
1338 }
1339
1341
1342 /*
1343 * Can't advance xmin of the slot unless all the workers
1344 * corresponding to subscriptions actively retaining dead tuples
1345 * are running, disabling the further computation of the minimum
1346 * nonremovable xid.
1347 */
1348 if (sub->retaindeadtuples && sub->retentionactive)
1349 can_update_xmin = false;
1350
1351 /*
1352 * If the worker is eligible to start now, launch it. Otherwise,
1353 * adjust wait_time so that we'll wake up as soon as it can be
1354 * started.
1355 *
1356 * Each subscription's apply worker can only be restarted once per
1357 * wal_retrieve_retry_interval, so that errors do not cause us to
1358 * repeatedly restart the worker as fast as possible. In cases
1359 * where a restart is expected (e.g., subscription parameter
1360 * changes), another process should remove the last-start entry
1361 * for the subscription so that the worker can be restarted
1362 * without waiting for wal_retrieve_retry_interval to elapse.
1363 */
1366 if (last_start == 0 ||
1368 {
1371 sub->dbid, sub->oid, sub->name,
1372 sub->owner, InvalidOid,
1374 sub->retaindeadtuples &&
1375 sub->retentionactive))
1376 {
1377 /*
1378 * We get here either if we failed to launch a worker
1379 * (perhaps for resource-exhaustion reasons) or if we
1380 * launched one but it immediately quit. Either way, it
1381 * seems appropriate to try again after
1382 * wal_retrieve_retry_interval.
1383 */
1384 wait_time = Min(wait_time,
1386 }
1387 }
1388 else
1389 {
1390 wait_time = Min(wait_time,
1392 }
1393 }
1394
1395 /*
1396 * Drop the CONFLICT_DETECTION_SLOT slot if there is no subscription
1397 * that requires us to retain dead tuples. Otherwise, if required,
1398 * advance the slot's xmin to protect dead tuples required for the
1399 * conflict detection.
1400 *
1401 * Additionally, if all apply workers for subscriptions with
1402 * retain_dead_tuples enabled have requested to stop retention, the
1403 * slot's xmin will be set to InvalidTransactionId allowing the
1404 * removal of dead tuples.
1405 */
1407 {
1408 if (!retain_dead_tuples)
1410 else if (can_update_xmin)
1412 }
1413
1414 /* Switch back to original memory context. */
1416 /* Clean the temporary memory. */
1418
1419 /* Wait for more work. */
1420 rc = WaitLatch(MyLatch,
1422 wait_time,
1424
1425 if (rc & WL_LATCH_SET)
1426 {
1429 }
1430
1432 {
1433 ConfigReloadPending = false;
1435 }
1436 }
1437
1438 /* Not reachable */
1439}
1440
1441/*
1442 * Determine the minimum non-removable transaction ID across all apply workers
1443 * for subscriptions that have retain_dead_tuples enabled. Store the result
1444 * in *xmin.
1445 */
1446static void
1448{
1450
1451 Assert(worker != NULL);
1452
1453 /*
1454 * The replication slot for conflict detection must be created before the
1455 * worker starts.
1456 */
1458
1459 SpinLockAcquire(&worker->relmutex);
1461 SpinLockRelease(&worker->relmutex);
1462
1463 /*
1464 * Return if the apply worker has stopped retention concurrently.
1465 *
1466 * Although this function is invoked only when retentionactive is true,
1467 * the apply worker might stop retention after the launcher fetches the
1468 * retentionactive flag.
1469 */
1471 return;
1472
1473 if (!TransactionIdIsValid(*xmin) ||
1475 *xmin = nonremovable_xid;
1476}
1477
1478/*
1479 * Acquire the replication slot used to retain information for conflict
1480 * detection, if it exists.
1481 *
1482 * Return true if successfully acquired, otherwise return false.
1483 */
1484static bool
1486{
1488 return false;
1489
1491 return true;
1492}
1493
1494/*
1495 * Update the xmin the replication slot used to retain information required
1496 * for conflict detection.
1497 */
1498static void
1500{
1504
1505 /* Return if the xmin value of the slot cannot be updated */
1507 return;
1508
1513
1514 elog(DEBUG1, "updated xmin: %u", MyReplicationSlot->data.xmin);
1515
1518
1519 /*
1520 * Like PhysicalConfirmReceivedLocation(), do not save slot information
1521 * each time. This is acceptable because all concurrent transactions on
1522 * the publisher that require the data preceding the slot's xmin should
1523 * have already been applied and flushed on the subscriber before the xmin
1524 * is advanced. So, even if the slot's xmin regresses after a restart, it
1525 * will be advanced again in the next cycle. Therefore, no data required
1526 * for conflict detection will be prematurely removed.
1527 */
1528 return;
1529}
1530
1531/*
1532 * Initialize the xmin for the conflict detection slot.
1533 */
1534static void
1562
1563/*
1564 * Create and acquire the replication slot used to retain information for
1565 * conflict detection, if not yet.
1566 */
1567void
1569{
1570 /* Exit early, if the replication slot is already created and acquired */
1572 return;
1573
1574 ereport(LOG,
1575 errmsg("creating replication conflict detection slot"));
1576
1578 false, false, false);
1579
1581}
1582
1583/*
1584 * Is current process the logical replication launcher?
1585 */
1586bool
1588{
1590}
1591
1592/*
1593 * Return the pid of the leader apply worker if the given pid is the pid of a
1594 * parallel apply worker, otherwise, return InvalidPid.
1595 */
1596pid_t
1598{
1599 int leader_pid = InvalidPid;
1600 int i;
1601
1603
1604 for (i = 0; i < max_logical_replication_workers; i++)
1605 {
1607
1608 if (isParallelApplyWorker(w) && w->proc && pid == w->proc->pid)
1609 {
1610 leader_pid = w->leader_pid;
1611 break;
1612 }
1613 }
1614
1616
1617 return leader_pid;
1618}
1619
1620/*
1621 * Returns state of the subscriptions.
1622 */
1623Datum
1625{
1626#define PG_STAT_GET_SUBSCRIPTION_COLS 10
1627 Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
1628 int i;
1629 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1630
1631 InitMaterializedSRF(fcinfo, 0);
1632
1633 /* Make sure we get consistent view of the workers. */
1635
1636 for (i = 0; i < max_logical_replication_workers; i++)
1637 {
1638 /* for each row */
1640 bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS] = {0};
1641 int worker_pid;
1642 LogicalRepWorker worker;
1643
1644 memcpy(&worker, &LogicalRepCtx->workers[i],
1645 sizeof(LogicalRepWorker));
1646 if (!worker.proc || !IsBackendPid(worker.proc->pid))
1647 continue;
1648
1649 if (OidIsValid(subid) && worker.subid != subid)
1650 continue;
1651
1652 worker_pid = worker.proc->pid;
1653
1654 values[0] = ObjectIdGetDatum(worker.subid);
1655 if (isTableSyncWorker(&worker))
1656 values[1] = ObjectIdGetDatum(worker.relid);
1657 else
1658 nulls[1] = true;
1659 values[2] = Int32GetDatum(worker_pid);
1660
1661 if (isParallelApplyWorker(&worker))
1662 values[3] = Int32GetDatum(worker.leader_pid);
1663 else
1664 nulls[3] = true;
1665
1666 if (!XLogRecPtrIsValid(worker.last_lsn))
1667 nulls[4] = true;
1668 else
1669 values[4] = LSNGetDatum(worker.last_lsn);
1670 if (worker.last_send_time == 0)
1671 nulls[5] = true;
1672 else
1674 if (worker.last_recv_time == 0)
1675 nulls[6] = true;
1676 else
1678 if (!XLogRecPtrIsValid(worker.reply_lsn))
1679 nulls[7] = true;
1680 else
1681 values[7] = LSNGetDatum(worker.reply_lsn);
1682 if (worker.reply_time == 0)
1683 nulls[8] = true;
1684 else
1686
1687 switch (worker.type)
1688 {
1689 case WORKERTYPE_APPLY:
1690 values[9] = CStringGetTextDatum("apply");
1691 break;
1693 values[9] = CStringGetTextDatum("parallel apply");
1694 break;
1696 values[9] = CStringGetTextDatum("sequence synchronization");
1697 break;
1699 values[9] = CStringGetTextDatum("table synchronization");
1700 break;
1701 case WORKERTYPE_UNKNOWN:
1702 /* Should never happen. */
1703 elog(ERROR, "unknown worker type");
1704 }
1705
1706 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
1707 values, nulls);
1708
1709 /*
1710 * If only a single subscription was requested, and we found it,
1711 * break.
1712 */
1713 if (OidIsValid(subid))
1714 break;
1715 }
1716
1718
1719 return (Datum) 0;
1720}
void pa_detach_all_error_mq(void)
bool InitializingApplyWorker
Definition worker.c:504
WalReceiverConn * LogRepWorkerWalRcvConn
Definition worker.c:482
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition timestamp.c:1751
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1775
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1639
Datum now(PG_FUNCTION_ARGS)
Definition timestamp.c:1603
void RegisterBackgroundWorker(BackgroundWorker *worker)
Definition bgworker.c:962
void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags)
Definition bgworker.c:875
void BackgroundWorkerUnblockSignals(void)
Definition bgworker.c:949
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition bgworker.c:1180
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition bgworker.c:1068
#define BGW_NEVER_RESTART
Definition bgworker.h:92
BgwHandleStatus
Definition bgworker.h:111
@ BGWH_STOPPED
Definition bgworker.h:114
@ BgWorkerStart_RecoveryFinished
Definition bgworker.h:88
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition bgworker.h:60
#define BGWORKER_SHMEM_ACCESS
Definition bgworker.h:53
#define BGW_MAXLEN
Definition bgworker.h:93
static Datum values[MAXATTR]
Definition bootstrap.c:190
#define CStringGetTextDatum(s)
Definition builtins.h:98
#define NameStr(name)
Definition c.h:835
#define Min(x, y)
Definition c.h:1091
#define MAXALIGN(LEN)
Definition c.h:896
#define Assert(condition)
Definition c.h:943
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:558
uint16_t uint16
Definition c.h:623
uint32 TransactionId
Definition c.h:736
#define OidIsValid(objectId)
Definition c.h:858
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int64 TimestampTz
Definition timestamp.h:39
#define TIMESTAMP_NOBEGIN(j)
Definition timestamp.h:159
dsa_area * dsa_attach(dsa_handle handle)
Definition dsa.c:510
void dsa_pin_mapping(dsa_area *area)
Definition dsa.c:650
dsa_handle dsa_get_handle(dsa_area *area)
Definition dsa.c:498
void dsa_pin(dsa_area *area)
Definition dsa.c:990
#define dsa_create(tranche_id)
Definition dsa.h:117
dsm_handle dsa_handle
Definition dsa.h:136
#define DSA_HANDLE_INVALID
Definition dsa.h:139
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:524
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:611
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:579
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:394
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:371
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:274
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:602
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:210
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:593
#define DSHASH_HANDLE_INVALID
Definition dshash.h:27
dsa_pointer dshash_table_handle
Definition dshash.h:24
#define dshash_find_or_insert(hash_table, key, found)
Definition dshash.h:109
uint32 dsm_handle
Definition dsm_impl.h:55
#define DSM_HANDLE_INVALID
Definition dsm_impl.h:58
Datum arg
Definition elog.c:1323
int errcode(int sqlerrcode)
Definition elog.c:875
#define LOG
Definition elog.h:32
int errhint(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:37
#define DEBUG1
Definition elog.h:31
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
#define palloc0_object(type)
Definition fe_memutils.h:75
void FileSetDeleteAll(FileSet *fileset)
Definition fileset.c:151
#define PG_GETARG_OID(n)
Definition fmgr.h:275
#define PG_ARGISNULL(n)
Definition fmgr.h:209
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, uint32 flags)
Definition funcapi.c:76
bool IsBinaryUpgrade
Definition globals.c:123
int MyProcPid
Definition globals.c:49
struct Latch * MyLatch
Definition globals.c:65
void ProcessConfigFile(GucContext context)
Definition guc-file.l:120
@ PGC_SIGHUP
Definition guc.h:75
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition heapam.c:1435
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
volatile sig_atomic_t ConfigReloadPending
Definition interrupt.c:27
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition interrupt.c:61
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:344
int i
Definition isn.c:77
void SetLatch(Latch *latch)
Definition latch.c:290
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
Datum pg_stat_get_subscription(PG_FUNCTION_ARGS)
Definition launcher.c:1624
#define DEFAULT_NAPTIME_PER_CYCLE
Definition launcher.c:51
List * logicalrep_workers_find(Oid subid, bool only_running, bool acquire_lock)
Definition launcher.c:303
void AtEOXact_ApplyLauncher(bool isCommit)
Definition launcher.c:1166
void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
Definition launcher.c:756
bool logicalrep_worker_launch(LogicalRepWorkerType wtype, Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid, dsm_handle subworker_dsm, bool retain_dead_tuples)
Definition launcher.c:334
bool IsLogicalLauncher(void)
Definition launcher.c:1587
void logicalrep_worker_attach(int slot)
Definition launcher.c:767
void ApplyLauncherWakeup(void)
Definition launcher.c:1195
static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time)
Definition launcher.c:1113
LogicalRepWorker * logicalrep_worker_find(LogicalRepWorkerType wtype, Oid subid, Oid relid, bool only_running)
Definition launcher.c:268
static void ApplyLauncherShmemInit(void *arg)
Definition launcher.c:1046
void logicalrep_worker_wakeup(LogicalRepWorkerType wtype, Oid subid, Oid relid)
Definition launcher.c:733
static void update_conflict_slot_xmin(TransactionId new_xmin)
Definition launcher.c:1499
static void compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin)
Definition launcher.c:1447
static void logicalrep_launcher_onexit(int code, Datum arg)
Definition launcher.c:869
static dsa_area * last_start_times_dsa
Definition launcher.c:100
void ApplyLauncherMain(Datum main_arg)
Definition launcher.c:1205
void CreateConflictDetectionSlot(void)
Definition launcher.c:1568
const ShmemCallbacks ApplyLauncherShmemCallbacks
Definition launcher.c:78
#define PG_STAT_GET_SUBSCRIPTION_COLS
int max_logical_replication_workers
Definition launcher.c:54
static void init_conflict_slot_xmin(void)
Definition launcher.c:1535
void logicalrep_pa_worker_stop(ParallelApplyWorkerInfo *winfo)
Definition launcher.c:689
static int logicalrep_pa_worker_count(Oid subid)
Definition launcher.c:961
static bool on_commit_launcher_wakeup
Definition launcher.c:103
void logicalrep_reset_seqsync_start_time(void)
Definition launcher.c:882
static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid)
Definition launcher.c:1129
static void logicalrep_worker_stop_internal(LogicalRepWorker *worker, int signo)
Definition launcher.c:579
static dshash_table * last_start_times
Definition launcher.c:101
LogicalRepWorker * MyLogicalRepWorker
Definition launcher.c:58
void ApplyLauncherWakeupAtCommit(void)
Definition launcher.c:1185
static const dshash_parameters dsh_params
Definition launcher.c:91
void logicalrep_worker_stop(LogicalRepWorkerType wtype, Oid subid, Oid relid)
Definition launcher.c:662
static void ApplyLauncherShmemRequest(void *arg)
Definition launcher.c:988
static LogicalRepCtxStruct * LogicalRepCtx
Definition launcher.c:73
static void logicalrep_worker_onexit(int code, Datum arg)
Definition launcher.c:907
pid_t GetLeaderApplyWorkerPid(pid_t pid)
Definition launcher.c:1597
int max_sync_workers_per_subscription
Definition launcher.c:55
static void logicalrep_worker_detach(void)
Definition launcher.c:804
static bool WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
Definition launcher.c:191
int logicalrep_sync_worker_count(Oid subid)
Definition launcher.c:937
void ApplyLauncherForgetWorkerStartTime(Oid subid)
Definition launcher.c:1155
static bool acquire_conflict_slot_if_exists(void)
Definition launcher.c:1485
void ApplyLauncherRegister(void)
Definition launcher.c:1010
static void logicalrep_launcher_attach_dshmem(void)
Definition launcher.c:1069
static List * get_subscription_list(void)
Definition launcher.c:127
int max_parallel_apply_workers_per_subscription
Definition launcher.c:56
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition launcher.c:848
List * lappend(List *list, void *datum)
Definition list.c:339
void list_free(List *list)
Definition list.c:1546
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2316
#define AccessShareLock
Definition lockdefs.h:36
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
bool LWLockHeldByMe(LWLock *lock)
Definition lwlock.c:1885
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1929
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
char * pstrdup(const char *in)
Definition mcxt.c:1781
MemoryContext TopMemoryContext
Definition mcxt.c:166
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
#define InvalidPid
Definition miscadmin.h:32
static char * errmsg
int max_active_replication_origins
Definition origin.c:106
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define MAXPGPATH
static time_t start_time
Definition pg_ctl.c:96
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
static Datum LSNGetDatum(XLogRecPtr X)
Definition pg_lsn.h:31
NameData subname
END_CATALOG_STRUCT typedef FormData_pg_subscription * Form_pg_subscription
#define pqsignal
Definition port.h:547
#define snprintf
Definition port.h:260
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
uint64_t Datum
Definition postgres.h:70
static Datum Int32GetDatum(int32 X)
Definition postgres.h:212
#define InvalidOid
unsigned int Oid
static int fb(int x)
TransactionId GetOldestSafeDecodingTransactionId(bool catalogOnly)
Definition procarray.c:2919
bool IsBackendPid(int pid)
Definition procarray.c:3264
@ ForwardScanDirection
Definition sdir.h:28
void shm_mq_detach(shm_mq_handle *mqh)
Definition shm_mq.c:845
Size add_size(Size s1, Size s2)
Definition shmem.c:1048
Size mul_size(Size s1, Size s2)
Definition shmem.c:1063
#define ShmemRequestStruct(...)
Definition shmem.h:176
void ReplicationSlotAcquire(const char *name, bool nowait, bool error_if_invalid)
Definition slot.c:629
void ReplicationSlotDropAcquired(void)
Definition slot.c:1042
void ReplicationSlotMarkDirty(void)
Definition slot.c:1184
void ReplicationSlotCreate(const char *name, bool db_specific, ReplicationSlotPersistency persistency, bool two_phase, bool repack, bool failover, bool synced)
Definition slot.c:378
void ReplicationSlotsComputeRequiredXmin(bool already_locked)
Definition slot.c:1226
ReplicationSlot * MyReplicationSlot
Definition slot.c:158
void ReplicationSlotSave(void)
Definition slot.c:1166
ReplicationSlot * SearchNamedReplicationSlot(const char *name, bool need_lock)
Definition slot.c:548
#define CONFLICT_DETECTION_SLOT
Definition slot.h:28
@ RS_PERSISTENT
Definition slot.h:45
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
PGPROC * MyProc
Definition proc.c:71
TimestampTz last_start_time
Definition launcher.c:87
Definition pg_list.h:54
dsa_handle last_start_dsa
Definition launcher.c:66
dshash_table_handle last_start_dsh
Definition launcher.c:67
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition launcher.c:70
XLogRecPtr relstate_lsn
TimestampTz last_recv_time
TimestampTz last_seqsync_start_time
LogicalRepWorkerType type
TimestampTz launch_time
TimestampTz reply_time
FileSet * stream_fileset
TransactionId oldest_nonremovable_xid
TimestampTz last_send_time
int pid
Definition proc.h:197
Latch procLatch
Definition proc.h:256
shm_mq_handle * error_mq_handle
ParallelApplyWorkerShared * shared
slock_t mutex
Definition slot.h:183
TransactionId effective_xmin
Definition slot.h:209
ReplicationSlotPersistentData data
Definition slot.h:213
ShmemRequestCallback request_fn
Definition shmem.h:133
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKeyData *key)
Definition tableam.c:113
static void table_endscan(TableScanDesc scan)
Definition tableam.h:1061
#define InvalidTransactionId
Definition transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:282
#define TransactionIdEquals(id1, id2)
Definition transam.h:43
#define TransactionIdIsValid(xid)
Definition transam.h:41
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition tuplestore.c:785
static Datum TimestampTzGetDatum(TimestampTz X)
Definition timestamp.h:52
const char * name
#define WL_TIMEOUT
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
int wal_receiver_timeout
Definition walreceiver.c:91
#define walrcv_disconnect(conn)
#define SIGHUP
Definition win32_port.h:158
#define kill(pid, sig)
Definition win32_port.h:490
#define SIGUSR1
Definition win32_port.h:170
#define SIGUSR2
Definition win32_port.h:171
#define isParallelApplyWorker(worker)
#define isSequenceSyncWorker(worker)
LogicalRepWorkerType
@ WORKERTYPE_TABLESYNC
@ WORKERTYPE_UNKNOWN
@ WORKERTYPE_SEQUENCESYNC
@ WORKERTYPE_PARALLEL_APPLY
@ WORKERTYPE_APPLY
static bool am_leader_apply_worker(void)
#define isTableSyncWorker(worker)
void StartTransactionCommand(void)
Definition xact.c:3109
void CommitTransactionCommand(void)
Definition xact.c:3207
int wal_retrieve_retry_interval
Definition xlog.c:141
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define InvalidXLogRecPtr
Definition xlogdefs.h:28