PostgreSQL Source Code  git master
launcher.c File Reference
Include dependency graph for launcher.c:

Go to the source code of this file.

Data Structures

struct  LogicalRepCtxStruct
 
struct  LogicalRepWorkerId
 
struct  StopWorkersData
 

Macros

#define DEFAULT_NAPTIME_PER_CYCLE   180000L
 
#define PG_STAT_GET_SUBSCRIPTION_COLS   8
 

Typedefs

typedef struct LogicalRepCtxStruct LogicalRepCtxStruct
 
typedef struct LogicalRepWorkerId LogicalRepWorkerId
 
typedef struct StopWorkersData StopWorkersData
 

Functions

static void ApplyLauncherWakeup (void)
 
static void logicalrep_launcher_onexit (int code, Datum arg)
 
static void logicalrep_worker_onexit (int code, Datum arg)
 
static void logicalrep_worker_detach (void)
 
static void logicalrep_worker_cleanup (LogicalRepWorker *worker)
 
Datum pg_stat_get_subscription (PG_FUNCTION_ARGS)
 
static Listget_subscription_list (void)
 
static void WaitForReplicationWorkerAttach (LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
 
LogicalRepWorkerlogicalrep_worker_find (Oid subid, Oid relid, bool only_running)
 
Listlogicalrep_workers_find (Oid subid, bool only_running)
 
void logicalrep_worker_launch (Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid)
 
void logicalrep_worker_stop (Oid subid, Oid relid)
 
void logicalrep_worker_stop_at_commit (Oid subid, Oid relid)
 
void logicalrep_worker_wakeup (Oid subid, Oid relid)
 
void logicalrep_worker_wakeup_ptr (LogicalRepWorker *worker)
 
void logicalrep_worker_attach (int slot)
 
int logicalrep_sync_worker_count (Oid subid)
 
Size ApplyLauncherShmemSize (void)
 
void ApplyLauncherRegister (void)
 
void ApplyLauncherShmemInit (void)
 
bool XactManipulatesLogicalReplicationWorkers (void)
 
void AtEOXact_ApplyLauncher (bool isCommit)
 
void AtEOSubXact_ApplyLauncher (bool isCommit, int nestDepth)
 
void ApplyLauncherWakeupAtCommit (void)
 
void ApplyLauncherMain (Datum main_arg)
 
bool IsLogicalLauncher (void)
 

Variables

int max_logical_replication_workers = 4
 
int max_sync_workers_per_subscription = 2
 
LogicalRepWorkerMyLogicalRepWorker = NULL
 
LogicalRepCtxStructLogicalRepCtx
 
static StopWorkersDataon_commit_stop_workers = NULL
 
static bool on_commit_launcher_wakeup = false
 

Macro Definition Documentation

◆ DEFAULT_NAPTIME_PER_CYCLE

#define DEFAULT_NAPTIME_PER_CYCLE   180000L

Definition at line 52 of file launcher.c.

Referenced by ApplyLauncherMain().

◆ PG_STAT_GET_SUBSCRIPTION_COLS

#define PG_STAT_GET_SUBSCRIPTION_COLS   8

Typedef Documentation

◆ LogicalRepCtxStruct

◆ LogicalRepWorkerId

◆ StopWorkersData

Function Documentation

◆ ApplyLauncherMain()

void ApplyLauncherMain ( Datum  main_arg)

Definition at line 950 of file launcher.c.

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, BackgroundWorkerInitializeConnection(), BackgroundWorkerUnblockSignals(), before_shmem_exit(), CHECK_FOR_INTERRUPTS, ConfigReloadPending, Subscription::dbid, DEBUG1, DEFAULT_NAPTIME_PER_CYCLE, die, Subscription::enabled, ereport, errmsg(), get_subscription_list(), GetCurrentTimestamp(), InvalidOid, LogicalRepCtxStruct::launcher_pid, lfirst, logicalrep_launcher_onexit(), logicalrep_worker_find(), logicalrep_worker_launch(), LW_SHARED, LWLockAcquire(), LWLockRelease(), MemoryContextDelete(), MemoryContextSwitchTo(), MyLatch, MyProcPid, Subscription::name, now(), Subscription::oid, Subscription::owner, PGC_SIGHUP, pqsignal(), ProcessConfigFile(), ResetLatch(), SIGHUP, SignalHandlerForConfigReload(), TimestampDifferenceExceeds(), TopMemoryContext, WAIT_EVENT_LOGICAL_LAUNCHER_MAIN, WaitLatch(), wal_retrieve_retry_interval, WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

951 {
952  TimestampTz last_start_time = 0;
953 
954  ereport(DEBUG1,
955  (errmsg("logical replication launcher started")));
956 
958 
961 
962  /* Establish signal handlers. */
964  pqsignal(SIGTERM, die);
966 
967  /*
968  * Establish connection to nailed catalogs (we only ever access
969  * pg_subscription).
970  */
972 
973  /* Enter main loop */
974  for (;;)
975  {
976  int rc;
977  List *sublist;
978  ListCell *lc;
979  MemoryContext subctx;
980  MemoryContext oldctx;
982  long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
983 
985 
986  now = GetCurrentTimestamp();
987 
988  /* Limit the start retry to once a wal_retrieve_retry_interval */
989  if (TimestampDifferenceExceeds(last_start_time, now,
991  {
992  /* Use temporary context for the database list and worker info. */
994  "Logical Replication Launcher sublist",
996  oldctx = MemoryContextSwitchTo(subctx);
997 
998  /* search for subscriptions to start or stop. */
999  sublist = get_subscription_list();
1000 
1001  /* Start the missing workers for enabled subscriptions. */
1002  foreach(lc, sublist)
1003  {
1004  Subscription *sub = (Subscription *) lfirst(lc);
1005  LogicalRepWorker *w;
1006 
1007  if (!sub->enabled)
1008  continue;
1009 
1010  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1011  w = logicalrep_worker_find(sub->oid, InvalidOid, false);
1012  LWLockRelease(LogicalRepWorkerLock);
1013 
1014  if (w == NULL)
1015  {
1016  last_start_time = now;
1017  wait_time = wal_retrieve_retry_interval;
1018 
1019  logicalrep_worker_launch(sub->dbid, sub->oid, sub->name,
1020  sub->owner, InvalidOid);
1021  }
1022  }
1023 
1024  /* Switch back to original memory context. */
1025  MemoryContextSwitchTo(oldctx);
1026  /* Clean the temporary memory. */
1027  MemoryContextDelete(subctx);
1028  }
1029  else
1030  {
1031  /*
1032  * The wait in previous cycle was interrupted in less than
1033  * wal_retrieve_retry_interval since last worker was started, this
1034  * usually means crash of the worker, so we should retry in
1035  * wal_retrieve_retry_interval again.
1036  */
1037  wait_time = wal_retrieve_retry_interval;
1038  }
1039 
1040  /* Wait for more work. */
1041  rc = WaitLatch(MyLatch,
1043  wait_time,
1045 
1046  if (rc & WL_LATCH_SET)
1047  {
1050  }
1051 
1052  if (ConfigReloadPending)
1053  {
1054  ConfigReloadPending = false;
1056  }
1057  }
1058 
1059  /* Not reachable */
1060 }
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:211
#define AllocSetContextCreate
Definition: memutils.h:170
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:40
#define WL_TIMEOUT
Definition: latch.h:127
void ProcessConfigFile(GucContext context)
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1574
int64 TimestampTz
Definition: timestamp.h:39
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition: interrupt.c:56
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1673
void ResetLatch(Latch *latch)
Definition: latch.c:588
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:390
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
#define SIGHUP
Definition: win32_port.h:153
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:333
static void logicalrep_launcher_onexit(int code, Datum arg)
Definition: launcher.c:697
MemoryContext TopMemoryContext
Definition: mcxt.c:44
Definition: guc.h:72
int wal_retrieve_retry_interval
Definition: xlog.c:109
void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags)
Definition: postmaster.c:5725
#define DEFAULT_NAPTIME_PER_CYCLE
Definition: launcher.c:52
uintptr_t Datum
Definition: postgres.h:367
#define InvalidOid
Definition: postgres_ext.h:36
#define ereport(elevel,...)
Definition: elog.h:144
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
static List * get_subscription_list(void)
Definition: launcher.c:108
void logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid)
Definition: launcher.c:286
#define Assert(condition)
Definition: c.h:745
#define lfirst(lc)
Definition: pg_list.h:190
LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
Definition: launcher.c:235
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
int errmsg(const char *fmt,...)
Definition: elog.c:824
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
volatile sig_atomic_t ConfigReloadPending
Definition: interrupt.c:26
struct Latch * MyLatch
Definition: globals.c:54
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
Definition: pg_list.h:50
#define WL_LATCH_SET
Definition: latch.h:124
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1538
#define die(msg)
Definition: pg_test_fsync.c:96
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129
void BackgroundWorkerUnblockSignals(void)
Definition: postmaster.c:5777

◆ ApplyLauncherRegister()

void ApplyLauncherRegister ( void  )

Definition at line 767 of file launcher.c.

References BackgroundWorker::bgw_flags, BackgroundWorker::bgw_function_name, BackgroundWorker::bgw_library_name, BackgroundWorker::bgw_main_arg, BGW_MAXLEN, BackgroundWorker::bgw_name, BackgroundWorker::bgw_notify_pid, BackgroundWorker::bgw_restart_time, BackgroundWorker::bgw_start_time, BackgroundWorker::bgw_type, BGWORKER_BACKEND_DATABASE_CONNECTION, BGWORKER_SHMEM_ACCESS, BgWorkerStart_RecoveryFinished, max_logical_replication_workers, RegisterBackgroundWorker(), and snprintf.

Referenced by PostmasterMain().

768 {
769  BackgroundWorker bgw;
770 
772  return;
773 
774  memset(&bgw, 0, sizeof(bgw));
778  snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
779  snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
781  "logical replication launcher");
783  "logical replication launcher");
784  bgw.bgw_restart_time = 5;
785  bgw.bgw_notify_pid = 0;
786  bgw.bgw_main_arg = (Datum) 0;
787 
789 }
void RegisterBackgroundWorker(BackgroundWorker *worker)
Definition: bgworker.c:827
int bgw_restart_time
Definition: bgworker.h:94
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:52
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:96
Datum bgw_main_arg
Definition: bgworker.h:97
uintptr_t Datum
Definition: postgres.h:367
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:90
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:59
int max_logical_replication_workers
Definition: launcher.c:54
#define BGW_MAXLEN
Definition: bgworker.h:85
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:93
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:91
pid_t bgw_notify_pid
Definition: bgworker.h:99
char bgw_library_name[BGW_MAXLEN]
Definition: bgworker.h:95
#define snprintf
Definition: port.h:193

◆ ApplyLauncherShmemInit()

void ApplyLauncherShmemInit ( void  )

Definition at line 796 of file launcher.c.

References ApplyLauncherShmemSize(), max_logical_replication_workers, LogicalRepWorker::relmutex, ShmemInitStruct(), SpinLockInit, and LogicalRepCtxStruct::workers.

Referenced by CreateSharedMemoryAndSemaphores().

797 {
798  bool found;
799 
801  ShmemInitStruct("Logical Replication Launcher Data",
803  &found);
804 
805  if (!found)
806  {
807  int slot;
808 
810 
811  /* Initialize memory and spin locks for each worker slot. */
812  for (slot = 0; slot < max_logical_replication_workers; slot++)
813  {
814  LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
815 
816  memset(worker, 0, sizeof(LogicalRepWorker));
817  SpinLockInit(&worker->relmutex);
818  }
819  }
820 }
#define SpinLockInit(lock)
Definition: spin.h:60
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
Size ApplyLauncherShmemSize(void)
Definition: launcher.c:748
int max_logical_replication_workers
Definition: launcher.c:54
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68

◆ ApplyLauncherShmemSize()

Size ApplyLauncherShmemSize ( void  )

Definition at line 748 of file launcher.c.

References add_size(), max_logical_replication_workers, MAXALIGN, and mul_size().

Referenced by ApplyLauncherShmemInit(), and CreateSharedMemoryAndSemaphores().

749 {
750  Size size;
751 
752  /*
753  * Need the fixed struct and the array of LogicalRepWorker.
754  */
755  size = sizeof(LogicalRepCtxStruct);
756  size = MAXALIGN(size);
758  sizeof(LogicalRepWorker)));
759  return size;
760 }
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
int max_logical_replication_workers
Definition: launcher.c:54
size_t Size
Definition: c.h:473
#define MAXALIGN(LEN)
Definition: c.h:698
struct LogicalRepCtxStruct LogicalRepCtxStruct

◆ ApplyLauncherWakeup()

static void ApplyLauncherWakeup ( void  )
static

Definition at line 940 of file launcher.c.

References kill, LogicalRepCtxStruct::launcher_pid, and SIGUSR1.

Referenced by AtEOXact_ApplyLauncher(), and logicalrep_worker_onexit().

941 {
942  if (LogicalRepCtx->launcher_pid != 0)
944 }
#define SIGUSR1
Definition: win32_port.h:165
#define kill(pid, sig)
Definition: win32_port.h:426
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68

◆ ApplyLauncherWakeupAtCommit()

void ApplyLauncherWakeupAtCommit ( void  )

Definition at line 933 of file launcher.c.

References on_commit_launcher_wakeup.

Referenced by AlterSubscription(), and CreateSubscription().

934 {
937 }
static bool on_commit_launcher_wakeup
Definition: launcher.c:96

◆ AtEOSubXact_ApplyLauncher()

void AtEOSubXact_ApplyLauncher ( bool  isCommit,
int  nestDepth 
)

Definition at line 878 of file launcher.c.

References Assert, list_concat(), list_free_deep(), StopWorkersData::nestDepth, StopWorkersData::parent, pfree(), and StopWorkersData::workers.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

879 {
880  StopWorkersData *parent;
881 
882  /* Exit immediately if there's no work to do at this level. */
883  if (on_commit_stop_workers == NULL ||
884  on_commit_stop_workers->nestDepth < nestDepth)
885  return;
886 
887  Assert(on_commit_stop_workers->nestDepth == nestDepth);
888 
889  parent = on_commit_stop_workers->parent;
890 
891  if (isCommit)
892  {
893  /*
894  * If the upper stack element is not an immediate parent
895  * subtransaction, just decrement the notional nesting depth without
896  * doing any real work. Else, we need to merge the current workers
897  * list into the parent.
898  */
899  if (!parent || parent->nestDepth < nestDepth - 1)
900  {
902  return;
903  }
904 
905  parent->workers =
907  }
908  else
909  {
910  /*
911  * Abandon everything that was done at this nesting level. Explicitly
912  * free memory to avoid a transaction-lifespan leak.
913  */
915  }
916 
917  /*
918  * We have taken care of the current subtransaction workers list for both
919  * abort or commit. So we are ready to pop the stack.
920  */
922  on_commit_stop_workers = parent;
923 }
List * workers
Definition: launcher.c:79
List * list_concat(List *list1, const List *list2)
Definition: list.c:515
static StopWorkersData * on_commit_stop_workers
Definition: launcher.c:88
void list_free_deep(List *list)
Definition: list.c:1390
void pfree(void *pointer)
Definition: mcxt.c:1056
#define Assert(condition)
Definition: c.h:745
struct StopWorkersData * parent
Definition: launcher.c:80

◆ AtEOXact_ApplyLauncher()

void AtEOXact_ApplyLauncher ( bool  isCommit)

Definition at line 836 of file launcher.c.

References ApplyLauncherWakeup(), Assert, lfirst, logicalrep_worker_stop(), StopWorkersData::nestDepth, on_commit_launcher_wakeup, StopWorkersData::parent, LogicalRepWorkerId::relid, LogicalRepWorkerId::subid, LogicalRepCtxStruct::workers, and StopWorkersData::workers.

Referenced by AbortTransaction(), and CommitTransaction().

837 {
838 
839  Assert(on_commit_stop_workers == NULL ||
841  on_commit_stop_workers->parent == NULL));
842 
843  if (isCommit)
844  {
845  ListCell *lc;
846 
847  if (on_commit_stop_workers != NULL)
848  {
849  List *workers = on_commit_stop_workers->workers;
850 
851  foreach(lc, workers)
852  {
853  LogicalRepWorkerId *wid = lfirst(lc);
854 
855  logicalrep_worker_stop(wid->subid, wid->relid);
856  }
857  }
858 
861  }
862 
863  /*
864  * No need to pfree on_commit_stop_workers. It was allocated in
865  * transaction memory context, which is going to be cleaned soon.
866  */
867  on_commit_stop_workers = NULL;
869 }
List * workers
Definition: launcher.c:79
static StopWorkersData * on_commit_stop_workers
Definition: launcher.c:88
void logicalrep_worker_stop(Oid subid, Oid relid)
Definition: launcher.c:456
static bool on_commit_launcher_wakeup
Definition: launcher.c:96
#define Assert(condition)
Definition: c.h:745
#define lfirst(lc)
Definition: pg_list.h:190
struct StopWorkersData * parent
Definition: launcher.c:80
static void ApplyLauncherWakeup(void)
Definition: launcher.c:940
Definition: pg_list.h:50

◆ get_subscription_list()

static List* get_subscription_list ( void  )
static

Definition at line 108 of file launcher.c.

References AccessShareLock, CommitTransactionCommand(), CurrentMemoryContext, Subscription::dbid, Subscription::enabled, ForwardScanDirection, GETSTRUCT, GetTransactionSnapshot(), heap_getnext(), HeapTupleIsValid, lappend(), MemoryContextSwitchTo(), Subscription::name, NameStr, NIL, Subscription::oid, Subscription::owner, palloc0(), pstrdup(), StartTransactionCommand(), table_beginscan_catalog(), table_close(), table_endscan(), and table_open().

Referenced by ApplyLauncherMain().

109 {
110  List *res = NIL;
111  Relation rel;
112  TableScanDesc scan;
113  HeapTuple tup;
114  MemoryContext resultcxt;
115 
116  /* This is the context that we will allocate our output data in */
117  resultcxt = CurrentMemoryContext;
118 
119  /*
120  * Start a transaction so we can access pg_database, and get a snapshot.
121  * We don't have a use for the snapshot itself, but we're interested in
122  * the secondary effect that it sets RecentGlobalXmin. (This is critical
123  * for anything that reads heap pages, because HOT may decide to prune
124  * them even if the process doesn't attempt to modify any tuples.)
125  *
126  * FIXME: This comment is inaccurate / the code buggy. A snapshot that is
127  * not pushed/active does not reliably prevent HOT pruning (->xmin could
128  * e.g. be cleared when cache invalidations are processed).
129  */
131  (void) GetTransactionSnapshot();
132 
133  rel = table_open(SubscriptionRelationId, AccessShareLock);
134  scan = table_beginscan_catalog(rel, 0, NULL);
135 
137  {
139  Subscription *sub;
140  MemoryContext oldcxt;
141 
142  /*
143  * Allocate our results in the caller's context, not the
144  * transaction's. We do this inside the loop, and restore the original
145  * context at the end, so that leaky things like heap_getnext() are
146  * not called in a potentially long-lived context.
147  */
148  oldcxt = MemoryContextSwitchTo(resultcxt);
149 
150  sub = (Subscription *) palloc0(sizeof(Subscription));
151  sub->oid = subform->oid;
152  sub->dbid = subform->subdbid;
153  sub->owner = subform->subowner;
154  sub->enabled = subform->subenabled;
155  sub->name = pstrdup(NameStr(subform->subname));
156  /* We don't fill fields we are not interested in. */
157 
158  res = lappend(res, sub);
159  MemoryContextSwitchTo(oldcxt);
160  }
161 
162  table_endscan(scan);
164 
166 
167  return res;
168 }
#define NIL
Definition: pg_list.h:65
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
#define GETSTRUCT(TUP)
Definition: htup_details.h:655
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
Definition: tableam.c:112
char * pstrdup(const char *in)
Definition: mcxt.c:1186
void CommitTransactionCommand(void)
Definition: xact.c:2947
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define AccessShareLock
Definition: lockdefs.h:36
FormData_pg_subscription * Form_pg_subscription
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:299
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1286
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
List * lappend(List *list, void *datum)
Definition: list.c:321
void * palloc0(Size size)
Definition: mcxt.c:980
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void StartTransactionCommand(void)
Definition: xact.c:2846
static void table_endscan(TableScanDesc scan)
Definition: tableam.h:863
#define NameStr(name)
Definition: c.h:622
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
Definition: pg_list.h:50

◆ IsLogicalLauncher()

bool IsLogicalLauncher ( void  )

Definition at line 1066 of file launcher.c.

References LogicalRepCtxStruct::launcher_pid, and MyProcPid.

Referenced by ProcessInterrupts().

1067 {
1069 }
int MyProcPid
Definition: globals.c:40
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68

◆ logicalrep_launcher_onexit()

static void logicalrep_launcher_onexit ( int  code,
Datum  arg 
)
static

Definition at line 697 of file launcher.c.

References LogicalRepCtxStruct::launcher_pid.

Referenced by ApplyLauncherMain().

698 {
700 }
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68

◆ logicalrep_sync_worker_count()

int logicalrep_sync_worker_count ( Oid  subid)

Definition at line 724 of file launcher.c.

References Assert, i, LWLockHeldByMe(), max_logical_replication_workers, OidIsValid, LogicalRepWorker::relid, LogicalRepWorker::subid, and LogicalRepCtxStruct::workers.

Referenced by logicalrep_worker_launch(), and process_syncing_tables_for_apply().

725 {
726  int i;
727  int res = 0;
728 
729  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
730 
731  /* Search for attached worker for a given subscription id. */
732  for (i = 0; i < max_logical_replication_workers; i++)
733  {
735 
736  if (w->subid == subid && OidIsValid(w->relid))
737  res++;
738  }
739 
740  return res;
741 }
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
#define OidIsValid(objectId)
Definition: c.h:651
#define Assert(condition)
Definition: c.h:745
int max_logical_replication_workers
Definition: launcher.c:54
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
int i

◆ logicalrep_worker_attach()

void logicalrep_worker_attach ( int  slot)

Definition at line 629 of file launcher.c.

References Assert, before_shmem_exit(), ereport, errcode(), errmsg(), ERROR, LogicalRepWorker::in_use, logicalrep_worker_onexit(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), max_logical_replication_workers, MyProc, LogicalRepWorker::proc, and LogicalRepCtxStruct::workers.

Referenced by ApplyWorkerMain().

630 {
631  /* Block concurrent access. */
632  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
633 
634  Assert(slot >= 0 && slot < max_logical_replication_workers);
636 
638  {
639  LWLockRelease(LogicalRepWorkerLock);
640  ereport(ERROR,
641  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
642  errmsg("logical replication worker slot %d is empty, cannot attach",
643  slot)));
644  }
645 
647  {
648  LWLockRelease(LogicalRepWorkerLock);
649  ereport(ERROR,
650  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
651  errmsg("logical replication worker slot %d is already used by "
652  "another worker, cannot attach", slot)));
653  }
654 
657 
658  LWLockRelease(LogicalRepWorkerLock);
659 }
PGPROC * MyProc
Definition: proc.c:67
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
int errcode(int sqlerrcode)
Definition: elog.c:610
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
LogicalRepWorker * MyLogicalRepWorker
Definition: launcher.c:57
#define ERROR
Definition: elog.h:43
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:333
static void logicalrep_worker_onexit(int code, Datum arg)
Definition: launcher.c:708
uintptr_t Datum
Definition: postgres.h:367
#define ereport(elevel,...)
Definition: elog.h:144
#define Assert(condition)
Definition: c.h:745
int max_logical_replication_workers
Definition: launcher.c:54
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
int errmsg(const char *fmt,...)
Definition: elog.c:824
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68

◆ logicalrep_worker_cleanup()

static void logicalrep_worker_cleanup ( LogicalRepWorker worker)
static

Definition at line 679 of file launcher.c.

References Assert, LogicalRepWorker::dbid, LogicalRepWorker::in_use, InvalidOid, LW_EXCLUSIVE, LWLockHeldByMeInMode(), LogicalRepWorker::proc, LogicalRepWorker::relid, LogicalRepWorker::subid, and LogicalRepWorker::userid.

Referenced by logicalrep_worker_detach(), logicalrep_worker_launch(), and WaitForReplicationWorkerAttach().

680 {
681  Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_EXCLUSIVE));
682 
683  worker->in_use = false;
684  worker->proc = NULL;
685  worker->dbid = InvalidOid;
686  worker->userid = InvalidOid;
687  worker->subid = InvalidOid;
688  worker->relid = InvalidOid;
689 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1946
#define InvalidOid
Definition: postgres_ext.h:36
#define Assert(condition)
Definition: c.h:745

◆ logicalrep_worker_detach()

static void logicalrep_worker_detach ( void  )
static

Definition at line 665 of file launcher.c.

References logicalrep_worker_cleanup(), LW_EXCLUSIVE, LWLockAcquire(), and LWLockRelease().

Referenced by logicalrep_worker_onexit().

666 {
667  /* Block concurrent access. */
668  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
669 
671 
672  LWLockRelease(LogicalRepWorkerLock);
673 }
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
LogicalRepWorker * MyLogicalRepWorker
Definition: launcher.c:57
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition: launcher.c:679
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208

◆ logicalrep_worker_find()

LogicalRepWorker* logicalrep_worker_find ( Oid  subid,
Oid  relid,
bool  only_running 
)

Definition at line 235 of file launcher.c.

References Assert, i, LogicalRepWorker::in_use, LWLockHeldByMe(), max_logical_replication_workers, LogicalRepWorker::proc, LogicalRepWorker::relid, LogicalRepWorker::subid, and LogicalRepCtxStruct::workers.

Referenced by ApplyLauncherMain(), logicalrep_worker_stop(), logicalrep_worker_wakeup(), process_syncing_tables_for_apply(), wait_for_relation_state_change(), and wait_for_worker_state_change().

236 {
237  int i;
238  LogicalRepWorker *res = NULL;
239 
240  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
241 
242  /* Search for attached worker for a given subscription id. */
243  for (i = 0; i < max_logical_replication_workers; i++)
244  {
246 
247  if (w->in_use && w->subid == subid && w->relid == relid &&
248  (!only_running || w->proc))
249  {
250  res = w;
251  break;
252  }
253  }
254 
255  return res;
256 }
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
#define Assert(condition)
Definition: c.h:745
int max_logical_replication_workers
Definition: launcher.c:54
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
int i

◆ logicalrep_worker_launch()

void logicalrep_worker_launch ( Oid  dbid,
Oid  subid,
const char *  subname,
Oid  userid,
Oid  relid 
)

Definition at line 286 of file launcher.c.

References Assert, BackgroundWorker::bgw_flags, BackgroundWorker::bgw_function_name, BackgroundWorker::bgw_library_name, BackgroundWorker::bgw_main_arg, BGW_MAXLEN, BackgroundWorker::bgw_name, BGW_NEVER_RESTART, BackgroundWorker::bgw_notify_pid, BackgroundWorker::bgw_restart_time, BackgroundWorker::bgw_start_time, BackgroundWorker::bgw_type, BGWORKER_BACKEND_DATABASE_CONNECTION, BGWORKER_SHMEM_ACCESS, BgWorkerStart_RecoveryFinished, LogicalRepWorker::dbid, DEBUG1, elog, ereport, errcode(), errhint(), errmsg(), ERROR, LogicalRepWorker::generation, GetCurrentTimestamp(), i, LogicalRepWorker::in_use, Int32GetDatum, InvalidXLogRecPtr, LogicalRepWorker::last_lsn, LogicalRepWorker::last_recv_time, LogicalRepWorker::last_send_time, LogicalRepWorker::launch_time, logicalrep_sync_worker_count(), logicalrep_worker_cleanup(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), max_logical_replication_workers, max_replication_slots, max_sync_workers_per_subscription, MyProcPid, now(), OidIsValid, LogicalRepWorker::proc, RegisterDynamicBackgroundWorker(), LogicalRepWorker::relid, LogicalRepWorker::relstate, LogicalRepWorker::relstate_lsn, LogicalRepWorker::reply_lsn, LogicalRepWorker::reply_time, snprintf, LogicalRepWorker::subid, TIMESTAMP_NOBEGIN, TimestampDifferenceExceeds(), LogicalRepWorker::userid, WaitForReplicationWorkerAttach(), wal_receiver_timeout, WARNING, and LogicalRepCtxStruct::workers.

Referenced by ApplyLauncherMain(), and process_syncing_tables_for_apply().

288 {
289  BackgroundWorker bgw;
290  BackgroundWorkerHandle *bgw_handle;
291  uint16 generation;
292  int i;
293  int slot = 0;
294  LogicalRepWorker *worker = NULL;
295  int nsyncworkers;
297 
298  ereport(DEBUG1,
299  (errmsg("starting logical replication worker for subscription \"%s\"",
300  subname)));
301 
302  /* Report this after the initial starting message for consistency. */
303  if (max_replication_slots == 0)
304  ereport(ERROR,
305  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
306  errmsg("cannot start logical replication workers when max_replication_slots = 0")));
307 
308  /*
309  * We need to do the modification of the shared memory under lock so that
310  * we have consistent view.
311  */
312  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
313 
314 retry:
315  /* Find unused worker slot. */
316  for (i = 0; i < max_logical_replication_workers; i++)
317  {
319 
320  if (!w->in_use)
321  {
322  worker = w;
323  slot = i;
324  break;
325  }
326  }
327 
328  nsyncworkers = logicalrep_sync_worker_count(subid);
329 
330  now = GetCurrentTimestamp();
331 
332  /*
333  * If we didn't find a free slot, try to do garbage collection. The
334  * reason we do this is because if some worker failed to start up and its
335  * parent has crashed while waiting, the in_use state was never cleared.
336  */
337  if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
338  {
339  bool did_cleanup = false;
340 
341  for (i = 0; i < max_logical_replication_workers; i++)
342  {
344 
345  /*
346  * If the worker was marked in use but didn't manage to attach in
347  * time, clean it up.
348  */
349  if (w->in_use && !w->proc &&
352  {
353  elog(WARNING,
354  "logical replication worker for subscription %u took too long to start; canceled",
355  w->subid);
356 
358  did_cleanup = true;
359  }
360  }
361 
362  if (did_cleanup)
363  goto retry;
364  }
365 
366  /*
367  * If we reached the sync worker limit per subscription, just exit
368  * silently as we might get here because of an otherwise harmless race
369  * condition.
370  */
371  if (nsyncworkers >= max_sync_workers_per_subscription)
372  {
373  LWLockRelease(LogicalRepWorkerLock);
374  return;
375  }
376 
377  /*
378  * However if there are no more free worker slots, inform user about it
379  * before exiting.
380  */
381  if (worker == NULL)
382  {
383  LWLockRelease(LogicalRepWorkerLock);
385  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
386  errmsg("out of logical replication worker slots"),
387  errhint("You might need to increase max_logical_replication_workers.")));
388  return;
389  }
390 
391  /* Prepare the worker slot. */
392  worker->launch_time = now;
393  worker->in_use = true;
394  worker->generation++;
395  worker->proc = NULL;
396  worker->dbid = dbid;
397  worker->userid = userid;
398  worker->subid = subid;
399  worker->relid = relid;
400  worker->relstate = SUBREL_STATE_UNKNOWN;
402  worker->last_lsn = InvalidXLogRecPtr;
405  worker->reply_lsn = InvalidXLogRecPtr;
406  TIMESTAMP_NOBEGIN(worker->reply_time);
407 
408  /* Before releasing lock, remember generation for future identification. */
409  generation = worker->generation;
410 
411  LWLockRelease(LogicalRepWorkerLock);
412 
413  /* Register the new dynamic worker. */
414  memset(&bgw, 0, sizeof(bgw));
418  snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
419  snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
420  if (OidIsValid(relid))
422  "logical replication worker for subscription %u sync %u", subid, relid);
423  else
425  "logical replication worker for subscription %u", subid);
426  snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication worker");
427 
430  bgw.bgw_main_arg = Int32GetDatum(slot);
431 
432  if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle))
433  {
434  /* Failed to start worker, so clean up the worker slot. */
435  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
436  Assert(generation == worker->generation);
438  LWLockRelease(LogicalRepWorkerLock);
439 
441  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
442  errmsg("out of background worker slots"),
443  errhint("You might need to increase max_worker_processes.")));
444  return;
445  }
446 
447  /* Now wait until it attaches. */
448  WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
449 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:40
int errhint(const char *fmt,...)
Definition: elog.c:1071
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1574
int64 TimestampTz
Definition: timestamp.h:39
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
TimestampTz last_send_time
XLogRecPtr last_lsn
int bgw_restart_time
Definition: bgworker.h:94
int errcode(int sqlerrcode)
Definition: elog.c:610
NameData subname
#define BGWORKER_SHMEM_ACCESS
Definition: bgworker.h:52
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1673
#define OidIsValid(objectId)
Definition: c.h:651
char bgw_function_name[BGW_MAXLEN]
Definition: bgworker.h:96
int wal_receiver_timeout
Definition: walreceiver.c:89
XLogRecPtr relstate_lsn
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
Datum bgw_main_arg
Definition: bgworker.h:97
unsigned short uint16
Definition: c.h:373
#define ERROR
Definition: elog.h:43
int max_sync_workers_per_subscription
Definition: launcher.c:55
XLogRecPtr reply_lsn
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition: launcher.c:679
#define BGW_NEVER_RESTART
Definition: bgworker.h:84
#define TIMESTAMP_NOBEGIN(j)
Definition: timestamp.h:112
#define WARNING
Definition: elog.h:40
int logicalrep_sync_worker_count(Oid subid)
Definition: launcher.c:724
static void WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle)
Definition: launcher.c:177
#define ereport(elevel,...)
Definition: elog.h:144
TimestampTz launch_time
int max_replication_slots
Definition: slot.c:99
TimestampTz last_recv_time
char bgw_name[BGW_MAXLEN]
Definition: bgworker.h:90
#define Assert(condition)
Definition: c.h:745
#define BGWORKER_BACKEND_DATABASE_CONNECTION
Definition: bgworker.h:59
int max_logical_replication_workers
Definition: launcher.c:54
#define BGW_MAXLEN
Definition: bgworker.h:85
BgWorkerStartTime bgw_start_time
Definition: bgworker.h:93
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
Definition: bgworker.c:911
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
#define Int32GetDatum(X)
Definition: postgres.h:479
char bgw_type[BGW_MAXLEN]
Definition: bgworker.h:91
int errmsg(const char *fmt,...)
Definition: elog.c:824
pid_t bgw_notify_pid
Definition: bgworker.h:99
#define elog(elevel,...)
Definition: elog.h:214
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
int i
char bgw_library_name[BGW_MAXLEN]
Definition: bgworker.h:95
#define snprintf
Definition: port.h:193
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1538
TimestampTz reply_time

◆ logicalrep_worker_onexit()

static void logicalrep_worker_onexit ( int  code,
Datum  arg 
)
static

Definition at line 708 of file launcher.c.

References ApplyLauncherWakeup(), logicalrep_worker_detach(), walrcv_disconnect, and wrconn.

Referenced by logicalrep_worker_attach().

709 {
710  /* Disconnect gracefully from the remote side. */
711  if (wrconn)
713 
715 
717 }
WalReceiverConn * wrconn
Definition: worker.c:105
static void logicalrep_worker_detach(void)
Definition: launcher.c:665
#define walrcv_disconnect(conn)
Definition: walreceiver.h:425
static void ApplyLauncherWakeup(void)
Definition: launcher.c:940

◆ logicalrep_worker_stop()

void logicalrep_worker_stop ( Oid  subid,
Oid  relid 
)

Definition at line 456 of file launcher.c.

References CHECK_FOR_INTERRUPTS, LogicalRepWorker::generation, LogicalRepWorker::in_use, kill, logicalrep_worker_find(), LW_SHARED, LWLockAcquire(), LWLockRelease(), MyLatch, PGPROC::pid, LogicalRepWorker::proc, ResetLatch(), WAIT_EVENT_BGWORKER_SHUTDOWN, WAIT_EVENT_BGWORKER_STARTUP, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by AtEOXact_ApplyLauncher(), and DropSubscription().

457 {
458  LogicalRepWorker *worker;
459  uint16 generation;
460 
461  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
462 
463  worker = logicalrep_worker_find(subid, relid, false);
464 
465  /* No worker, nothing to do. */
466  if (!worker)
467  {
468  LWLockRelease(LogicalRepWorkerLock);
469  return;
470  }
471 
472  /*
473  * Remember which generation was our worker so we can check if what we see
474  * is still the same one.
475  */
476  generation = worker->generation;
477 
478  /*
479  * If we found a worker but it does not have proc set then it is still
480  * starting up; wait for it to finish starting and then kill it.
481  */
482  while (worker->in_use && !worker->proc)
483  {
484  int rc;
485 
486  LWLockRelease(LogicalRepWorkerLock);
487 
488  /* Wait a bit --- we don't expect to have to wait long. */
489  rc = WaitLatch(MyLatch,
492 
493  if (rc & WL_LATCH_SET)
494  {
497  }
498 
499  /* Recheck worker status. */
500  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
501 
502  /*
503  * Check whether the worker slot is no longer used, which would mean
504  * that the worker has exited, or whether the worker generation is
505  * different, meaning that a different worker has taken the slot.
506  */
507  if (!worker->in_use || worker->generation != generation)
508  {
509  LWLockRelease(LogicalRepWorkerLock);
510  return;
511  }
512 
513  /* Worker has assigned proc, so it has started. */
514  if (worker->proc)
515  break;
516  }
517 
518  /* Now terminate the worker ... */
519  kill(worker->proc->pid, SIGTERM);
520 
521  /* ... and wait for it to die. */
522  for (;;)
523  {
524  int rc;
525 
526  /* is it gone? */
527  if (!worker->proc || worker->generation != generation)
528  break;
529 
530  LWLockRelease(LogicalRepWorkerLock);
531 
532  /* Wait a bit --- we don't expect to have to wait long. */
533  rc = WaitLatch(MyLatch,
534  WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
536 
537  if (rc & WL_LATCH_SET)
538  {
541  }
542 
543  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
544  }
545 
546  LWLockRelease(LogicalRepWorkerLock);
547 }
#define WL_TIMEOUT
Definition: latch.h:127
#define kill(pid, sig)
Definition: win32_port.h:426
void ResetLatch(Latch *latch)
Definition: latch.c:588
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:390
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
unsigned short uint16
Definition: c.h:373
LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
Definition: launcher.c:235
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
struct Latch * MyLatch
Definition: globals.c:54
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
int pid
Definition: proc.h:137
#define WL_LATCH_SET
Definition: latch.h:124
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129

◆ logicalrep_worker_stop_at_commit()

void logicalrep_worker_stop_at_commit ( Oid  subid,
Oid  relid 
)

Definition at line 553 of file launcher.c.

References Assert, GetCurrentTransactionNestLevel(), lappend(), MemoryContextSwitchTo(), StopWorkersData::nestDepth, NIL, on_commit_stop_workers, palloc(), StopWorkersData::parent, LogicalRepWorkerId::relid, LogicalRepWorkerId::subid, TopTransactionContext, and StopWorkersData::workers.

Referenced by AlterSubscription_refresh().

554 {
555  int nestDepth = GetCurrentTransactionNestLevel();
556  LogicalRepWorkerId *wid;
557  MemoryContext oldctx;
558 
559  /* Make sure we store the info in context that survives until commit. */
561 
562  /* Check that previous transactions were properly cleaned up. */
563  Assert(on_commit_stop_workers == NULL ||
564  nestDepth >= on_commit_stop_workers->nestDepth);
565 
566  /*
567  * Push a new stack element if we don't already have one for the current
568  * nestDepth.
569  */
570  if (on_commit_stop_workers == NULL ||
571  nestDepth > on_commit_stop_workers->nestDepth)
572  {
573  StopWorkersData *newdata = palloc(sizeof(StopWorkersData));
574 
575  newdata->nestDepth = nestDepth;
576  newdata->workers = NIL;
577  newdata->parent = on_commit_stop_workers;
578  on_commit_stop_workers = newdata;
579  }
580 
581  /*
582  * Finally add a new worker into the worker list of the current
583  * subtransaction.
584  */
585  wid = palloc(sizeof(LogicalRepWorkerId));
586  wid->subid = subid;
587  wid->relid = relid;
590 
591  MemoryContextSwitchTo(oldctx);
592 }
#define NIL
Definition: pg_list.h:65
MemoryContext TopTransactionContext
Definition: mcxt.c:49
List * workers
Definition: launcher.c:79
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
static StopWorkersData * on_commit_stop_workers
Definition: launcher.c:88
List * lappend(List *list, void *datum)
Definition: list.c:321
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:857
#define Assert(condition)
Definition: c.h:745
struct StopWorkersData * parent
Definition: launcher.c:80
void * palloc(Size size)
Definition: mcxt.c:949

◆ logicalrep_worker_wakeup()

void logicalrep_worker_wakeup ( Oid  subid,
Oid  relid 
)

Definition at line 598 of file launcher.c.

References logicalrep_worker_find(), logicalrep_worker_wakeup_ptr(), LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by pg_attribute_noreturn().

599 {
600  LogicalRepWorker *worker;
601 
602  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
603 
604  worker = logicalrep_worker_find(subid, relid, true);
605 
606  if (worker)
608 
609  LWLockRelease(LogicalRepWorkerLock);
610 }
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
Definition: launcher.c:618
LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
Definition: launcher.c:235
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208

◆ logicalrep_worker_wakeup_ptr()

void logicalrep_worker_wakeup_ptr ( LogicalRepWorker worker)

Definition at line 618 of file launcher.c.

References Assert, LWLockHeldByMe(), LogicalRepWorker::proc, PGPROC::procLatch, and SetLatch().

Referenced by logicalrep_worker_wakeup(), process_syncing_tables_for_apply(), and wait_for_worker_state_change().

619 {
620  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
621 
622  SetLatch(&worker->proc->procLatch);
623 }
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
void SetLatch(Latch *latch)
Definition: latch.c:505
Latch procLatch
Definition: proc.h:121
#define Assert(condition)
Definition: c.h:745

◆ logicalrep_workers_find()

List* logicalrep_workers_find ( Oid  subid,
bool  only_running 
)

Definition at line 263 of file launcher.c.

References Assert, i, LogicalRepWorker::in_use, lappend(), LWLockHeldByMe(), max_logical_replication_workers, NIL, LogicalRepWorker::proc, LogicalRepWorker::subid, and LogicalRepCtxStruct::workers.

Referenced by DropSubscription().

264 {
265  int i;
266  List *res = NIL;
267 
268  Assert(LWLockHeldByMe(LogicalRepWorkerLock));
269 
270  /* Search for attached worker for a given subscription id. */
271  for (i = 0; i < max_logical_replication_workers; i++)
272  {
274 
275  if (w->in_use && w->subid == subid && (!only_running || w->proc))
276  res = lappend(res, w);
277  }
278 
279  return res;
280 }
#define NIL
Definition: pg_list.h:65
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
List * lappend(List *list, void *datum)
Definition: list.c:321
#define Assert(condition)
Definition: c.h:745
int max_logical_replication_workers
Definition: launcher.c:54
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
int i
Definition: pg_list.h:50

◆ pg_stat_get_subscription()

Datum pg_stat_get_subscription ( PG_FUNCTION_ARGS  )

Definition at line 1075 of file launcher.c.

References ReturnSetInfo::allowedModes, ReturnSetInfo::econtext, ExprContext::ecxt_per_query_memory, elog, ereport, errcode(), errmsg(), ERROR, get_call_result_type(), i, Int32GetDatum, InvalidOid, IsA, IsBackendPid(), LogicalRepWorker::last_lsn, LogicalRepWorker::last_recv_time, LogicalRepWorker::last_send_time, LSNGetDatum, LW_SHARED, LWLockAcquire(), LWLockRelease(), max_logical_replication_workers, MemoryContextSwitchTo(), MemSet, ObjectIdGetDatum, OidIsValid, PG_ARGISNULL, PG_GETARG_OID, PG_STAT_GET_SUBSCRIPTION_COLS, PGPROC::pid, LogicalRepWorker::proc, LogicalRepWorker::relid, LogicalRepWorker::reply_lsn, LogicalRepWorker::reply_time, ReturnSetInfo::returnMode, ReturnSetInfo::setDesc, ReturnSetInfo::setResult, SFRM_Materialize, LogicalRepWorker::subid, TimestampTzGetDatum, tuplestore_begin_heap(), tuplestore_donestoring, tuplestore_putvalues(), TYPEFUNC_COMPOSITE, values, work_mem, LogicalRepCtxStruct::workers, and XLogRecPtrIsInvalid.

1076 {
1077 #define PG_STAT_GET_SUBSCRIPTION_COLS 8
1078  Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
1079  int i;
1080  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1081  TupleDesc tupdesc;
1082  Tuplestorestate *tupstore;
1083  MemoryContext per_query_ctx;
1084  MemoryContext oldcontext;
1085 
1086  /* check to see if caller supports us returning a tuplestore */
1087  if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1088  ereport(ERROR,
1089  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1090  errmsg("set-valued function called in context that cannot accept a set")));
1091  if (!(rsinfo->allowedModes & SFRM_Materialize))
1092  ereport(ERROR,
1093  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1094  errmsg("materialize mode required, but it is not allowed in this context")));
1095 
1096  /* Build a tuple descriptor for our result type */
1097  if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
1098  elog(ERROR, "return type must be a row type");
1099 
1100  per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1101  oldcontext = MemoryContextSwitchTo(per_query_ctx);
1102 
1103  tupstore = tuplestore_begin_heap(true, false, work_mem);
1104  rsinfo->returnMode = SFRM_Materialize;
1105  rsinfo->setResult = tupstore;
1106  rsinfo->setDesc = tupdesc;
1107 
1108  MemoryContextSwitchTo(oldcontext);
1109 
1110  /* Make sure we get consistent view of the workers. */
1111  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
1112 
1113  for (i = 0; i <= max_logical_replication_workers; i++)
1114  {
1115  /* for each row */
1117  bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS];
1118  int worker_pid;
1119  LogicalRepWorker worker;
1120 
1121  memcpy(&worker, &LogicalRepCtx->workers[i],
1122  sizeof(LogicalRepWorker));
1123  if (!worker.proc || !IsBackendPid(worker.proc->pid))
1124  continue;
1125 
1126  if (OidIsValid(subid) && worker.subid != subid)
1127  continue;
1128 
1129  worker_pid = worker.proc->pid;
1130 
1131  MemSet(values, 0, sizeof(values));
1132  MemSet(nulls, 0, sizeof(nulls));
1133 
1134  values[0] = ObjectIdGetDatum(worker.subid);
1135  if (OidIsValid(worker.relid))
1136  values[1] = ObjectIdGetDatum(worker.relid);
1137  else
1138  nulls[1] = true;
1139  values[2] = Int32GetDatum(worker_pid);
1140  if (XLogRecPtrIsInvalid(worker.last_lsn))
1141  nulls[3] = true;
1142  else
1143  values[3] = LSNGetDatum(worker.last_lsn);
1144  if (worker.last_send_time == 0)
1145  nulls[4] = true;
1146  else
1147  values[4] = TimestampTzGetDatum(worker.last_send_time);
1148  if (worker.last_recv_time == 0)
1149  nulls[5] = true;
1150  else
1151  values[5] = TimestampTzGetDatum(worker.last_recv_time);
1152  if (XLogRecPtrIsInvalid(worker.reply_lsn))
1153  nulls[6] = true;
1154  else
1155  values[6] = LSNGetDatum(worker.reply_lsn);
1156  if (worker.reply_time == 0)
1157  nulls[7] = true;
1158  else
1159  values[7] = TimestampTzGetDatum(worker.reply_time);
1160 
1161  tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1162 
1163  /*
1164  * If only a single subscription was requested, and we found it,
1165  * break.
1166  */
1167  if (OidIsValid(subid))
1168  break;
1169  }
1170 
1171  LWLockRelease(LogicalRepWorkerLock);
1172 
1173  /* clean up and return the tuplestore */
1174  tuplestore_donestoring(tupstore);
1175 
1176  return (Datum) 0;
1177 }
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull)
Definition: tuplestore.c:750
#define IsA(nodeptr, _type_)
Definition: nodes.h:580
TypeFuncClass get_call_result_type(FunctionCallInfo fcinfo, Oid *resultTypeId, TupleDesc *resultTupleDesc)
Definition: funcapi.c:205
#define tuplestore_donestoring(state)
Definition: tuplestore.h:60
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]
Definition: launcher.c:65
TimestampTz last_send_time
XLogRecPtr last_lsn
int errcode(int sqlerrcode)
Definition: elog.c:610
#define LSNGetDatum(X)
Definition: pg_lsn.h:22
#define MemSet(start, val, len)
Definition: c.h:949
unsigned int Oid
Definition: postgres_ext.h:31
#define OidIsValid(objectId)
Definition: c.h:651
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
#define ObjectIdGetDatum(X)
Definition: postgres.h:507
#define ERROR
Definition: elog.h:43
#define TimestampTzGetDatum(X)
Definition: timestamp.h:32
XLogRecPtr reply_lsn
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:318
uintptr_t Datum
Definition: postgres.h:367
#define PG_STAT_GET_SUBSCRIPTION_COLS
int work_mem
Definition: globals.c:121
#define InvalidOid
Definition: postgres_ext.h:36
#define ereport(elevel,...)
Definition: elog.h:144
int allowedModes
Definition: execnodes.h:305
SetFunctionReturnMode returnMode
Definition: execnodes.h:307
TimestampTz last_recv_time
#define PG_ARGISNULL(n)
Definition: fmgr.h:209
int max_logical_replication_workers
Definition: launcher.c:54
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
MemoryContext ecxt_per_query_memory
Definition: execnodes.h:233
Tuplestorestate * setResult
Definition: execnodes.h:310
static Datum values[MAXATTR]
Definition: bootstrap.c:167
ExprContext * econtext
Definition: execnodes.h:303
#define Int32GetDatum(X)
Definition: postgres.h:479
TupleDesc setDesc
Definition: execnodes.h:311
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define elog(elevel,...)
Definition: elog.h:214
LogicalRepCtxStruct * LogicalRepCtx
Definition: launcher.c:68
int i
bool IsBackendPid(int pid)
Definition: procarray.c:2974
int pid
Definition: proc.h:137
TimestampTz reply_time

◆ WaitForReplicationWorkerAttach()

static void WaitForReplicationWorkerAttach ( LogicalRepWorker worker,
uint16  generation,
BackgroundWorkerHandle handle 
)
static

Definition at line 177 of file launcher.c.

References BGWH_STOPPED, CHECK_FOR_INTERRUPTS, LogicalRepWorker::generation, GetBackgroundWorkerPid(), LogicalRepWorker::in_use, logicalrep_worker_cleanup(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), MyLatch, LogicalRepWorker::proc, ResetLatch(), status(), WAIT_EVENT_BGWORKER_STARTUP, WaitLatch(), WL_EXIT_ON_PM_DEATH, WL_LATCH_SET, and WL_TIMEOUT.

Referenced by logicalrep_worker_launch().

180 {
182  int rc;
183 
184  for (;;)
185  {
186  pid_t pid;
187 
189 
190  LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
191 
192  /* Worker either died or has started; no need to do anything. */
193  if (!worker->in_use || worker->proc)
194  {
195  LWLockRelease(LogicalRepWorkerLock);
196  return;
197  }
198 
199  LWLockRelease(LogicalRepWorkerLock);
200 
201  /* Check if worker has died before attaching, and clean up after it. */
202  status = GetBackgroundWorkerPid(handle, &pid);
203 
204  if (status == BGWH_STOPPED)
205  {
206  LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
207  /* Ensure that this was indeed the worker we waited for. */
208  if (generation == worker->generation)
210  LWLockRelease(LogicalRepWorkerLock);
211  return;
212  }
213 
214  /*
215  * We need timeout because we generally don't get notified via latch
216  * about the worker attach. But we don't expect to have to wait long.
217  */
218  rc = WaitLatch(MyLatch,
221 
222  if (rc & WL_LATCH_SET)
223  {
226  }
227  }
228 }
#define WL_TIMEOUT
Definition: latch.h:127
void ResetLatch(Latch *latch)
Definition: latch.c:588
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:390
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
static void logicalrep_worker_cleanup(LogicalRepWorker *worker)
Definition: launcher.c:679
BgwHandleStatus
Definition: bgworker.h:102
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
struct Latch * MyLatch
Definition: globals.c:54
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
#define WL_LATCH_SET
Definition: latch.h:124
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
Definition: bgworker.c:1023
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129

◆ XactManipulatesLogicalReplicationWorkers()

bool XactManipulatesLogicalReplicationWorkers ( void  )

Definition at line 827 of file launcher.c.

Referenced by PrepareTransaction().

828 {
829  return (on_commit_stop_workers != NULL);
830 }
static StopWorkersData * on_commit_stop_workers
Definition: launcher.c:88

Variable Documentation

◆ LogicalRepCtx

LogicalRepCtxStruct* LogicalRepCtx

Definition at line 68 of file launcher.c.

◆ max_logical_replication_workers

◆ max_sync_workers_per_subscription

int max_sync_workers_per_subscription = 2

Definition at line 55 of file launcher.c.

Referenced by logicalrep_worker_launch(), and process_syncing_tables_for_apply().

◆ MyLogicalRepWorker

◆ on_commit_launcher_wakeup

bool on_commit_launcher_wakeup = false
static

Definition at line 96 of file launcher.c.

Referenced by ApplyLauncherWakeupAtCommit(), and AtEOXact_ApplyLauncher().

◆ on_commit_stop_workers

StopWorkersData* on_commit_stop_workers = NULL
static

Definition at line 88 of file launcher.c.

Referenced by logicalrep_worker_stop_at_commit().