PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  *
19  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20  * the lock wakes the process up again (and gives it an error code so it knows
21  * whether it was awoken on an error condition).
22  *
23  * Interface (b):
24  *
25  * ProcReleaseLocks -- frees the locks associated with current transaction
26  *
27  * ProcKill -- destroys the shared memory state (and locks)
28  * associated with the process.
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 #include <sys/time.h>
35 
36 #include "access/transam.h"
37 #include "access/twophase.h"
38 #include "access/xlogutils.h"
39 #include "commands/waitlsn.h"
40 #include "miscadmin.h"
41 #include "pgstat.h"
42 #include "postmaster/autovacuum.h"
43 #include "replication/slotsync.h"
44 #include "replication/syncrep.h"
46 #include "storage/ipc.h"
47 #include "storage/lmgr.h"
48 #include "storage/pmsignal.h"
49 #include "storage/proc.h"
50 #include "storage/procarray.h"
51 #include "storage/procsignal.h"
52 #include "storage/spin.h"
53 #include "storage/standby.h"
54 #include "utils/timeout.h"
55 #include "utils/timestamp.h"
56 
57 /* GUC variables */
58 int DeadlockTimeout = 1000;
60 int LockTimeout = 0;
64 bool log_lock_waits = false;
65 
66 /* Pointer to this process's PGPROC struct, if any */
67 PGPROC *MyProc = NULL;
68 
69 /*
70  * This spinlock protects the freelist of recycled PGPROC structures.
71  * We cannot use an LWLock because the LWLock manager depends on already
72  * having a PGPROC and a wait semaphore! But these structures are touched
73  * relatively infrequently (only at backend startup or shutdown) and not for
74  * very long, so a spinlock is okay.
75  */
77 
78 /* Pointers to shared-memory structures */
82 
83 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
84 static LOCALLOCK *lockAwaited = NULL;
85 
87 
88 /* Is a deadlock check pending? */
89 static volatile sig_atomic_t got_deadlock_timeout;
90 
91 static void RemoveProcFromArray(int code, Datum arg);
92 static void ProcKill(int code, Datum arg);
93 static void AuxiliaryProcKill(int code, Datum arg);
94 static void CheckDeadLock(void);
95 
96 
97 /*
98  * Report shared-memory space needed by InitProcGlobal.
99  */
100 Size
102 {
103  Size size = 0;
104  Size TotalProcs =
106  Size fpLockBitsSize,
107  fpRelIdSize;
108 
109  /* ProcGlobal */
110  size = add_size(size, sizeof(PROC_HDR));
111  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
112  size = add_size(size, sizeof(slock_t));
113 
114  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
115  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
116  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
117 
118  /*
119  * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
120  * nicely aligned in each backend.
121  */
122  fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
124 
125  size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
126 
127  return size;
128 }
129 
130 /*
131  * Report number of semaphores needed by InitProcGlobal.
132  */
133 int
135 {
136  /*
137  * We need a sema per backend (including autovacuum), plus one for each
138  * auxiliary process.
139  */
141 }
142 
143 /*
144  * InitProcGlobal -
145  * Initialize the global process table during postmaster or standalone
146  * backend startup.
147  *
148  * We also create all the per-process semaphores we will need to support
149  * the requested number of backends. We used to allocate semaphores
150  * only when backends were actually started up, but that is bad because
151  * it lets Postgres fail under load --- a lot of Unix systems are
152  * (mis)configured with small limits on the number of semaphores, and
153  * running out when trying to start another backend is a common failure.
154  * So, now we grab enough semaphores to support the desired max number
155  * of backends immediately at initialization --- if the sysadmin has set
156  * MaxConnections, max_worker_processes, max_wal_senders, or
157  * autovacuum_max_workers higher than his kernel will support, he'll
158  * find out sooner rather than later.
159  *
160  * Another reason for creating semaphores here is that the semaphore
161  * implementation typically requires us to create semaphores in the
162  * postmaster, not in backends.
163  *
164  * Note: this is NOT called by individual backends under a postmaster,
165  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
166  * pointers must be propagated specially for EXEC_BACKEND operation.
167  */
168 void
170 {
171  PGPROC *procs;
172  int i,
173  j;
174  bool found;
176 
177  /* Used for setup of per-backend fast-path slots. */
178  char *fpPtr,
179  *fpEndPtr PG_USED_FOR_ASSERTS_ONLY;
180  Size fpLockBitsSize,
181  fpRelIdSize;
182 
183  /* Create the ProcGlobal shared structure */
184  ProcGlobal = (PROC_HDR *)
185  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
186  Assert(!found);
187 
188  /*
189  * Initialize the data structures.
190  */
197  ProcGlobal->walwriterLatch = NULL;
201 
202  /*
203  * Create and initialize all the PGPROC structures we'll need. There are
204  * five separate consumers: (1) normal backends, (2) autovacuum workers
205  * and the autovacuum launcher, (3) background workers, (4) auxiliary
206  * processes, and (5) prepared transactions. Each PGPROC structure is
207  * dedicated to exactly one of these purposes, and they do not move
208  * between groups.
209  */
210  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
211  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
212  ProcGlobal->allProcs = procs;
213  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
215 
216  /*
217  * Allocate arrays mirroring PGPROC fields in a dense manner. See
218  * PROC_HDR.
219  *
220  * XXX: It might make sense to increase padding for these arrays, given
221  * how hotly they are accessed.
222  */
223  ProcGlobal->xids =
224  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
225  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
227  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
228  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
229  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
230 
231  /*
232  * Allocate arrays for fast-path locks. Those are variable-length, so
233  * can't be included in PGPROC directly. We allocate a separate piece of
234  * shared memory and then divide that between backends.
235  */
236  fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
238 
239  fpPtr = ShmemAlloc(TotalProcs * (fpLockBitsSize + fpRelIdSize));
240  MemSet(fpPtr, 0, TotalProcs * (fpLockBitsSize + fpRelIdSize));
241 
242  /* For asserts checking we did not overflow. */
243  fpEndPtr = fpPtr + (TotalProcs * (fpLockBitsSize + fpRelIdSize));
244 
245  for (i = 0; i < TotalProcs; i++)
246  {
247  PGPROC *proc = &procs[i];
248 
249  /* Common initialization for all PGPROCs, regardless of type. */
250 
251  /*
252  * Set the fast-path lock arrays, and move the pointer. We interleave
253  * the two arrays, to (hopefully) get some locality for each backend.
254  */
255  proc->fpLockBits = (uint64 *) fpPtr;
256  fpPtr += fpLockBitsSize;
257 
258  proc->fpRelId = (Oid *) fpPtr;
259  fpPtr += fpRelIdSize;
260 
261  Assert(fpPtr <= fpEndPtr);
262 
263  /*
264  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
265  * dummy PGPROCs don't need these though - they're never associated
266  * with a real process
267  */
269  {
270  proc->sem = PGSemaphoreCreate();
271  InitSharedLatch(&(proc->procLatch));
273  }
274 
275  /*
276  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
277  * must be queued up on the appropriate free list. Because there can
278  * only ever be a small, fixed number of auxiliary processes, no free
279  * list is used in that case; InitAuxiliaryProcess() instead uses a
280  * linear search. PGPROCs for prepared transactions are added to a
281  * free list by TwoPhaseShmemInit().
282  */
283  if (i < MaxConnections)
284  {
285  /* PGPROC for normal backend, add to freeProcs list */
288  }
289  else if (i < MaxConnections + autovacuum_max_workers + 1)
290  {
291  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
294  }
296  {
297  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
300  }
301  else if (i < MaxBackends)
302  {
303  /* PGPROC for walsender, add to walsenderFreeProcs list */
306  }
307 
308  /* Initialize myProcLocks[] shared memory queues. */
309  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
310  dlist_init(&(proc->myProcLocks[j]));
311 
312  /* Initialize lockGroupMembers list. */
314 
315  /*
316  * Initialize the atomic variables, otherwise, it won't be safe to
317  * access them for backends that aren't currently in use.
318  */
321  pg_atomic_init_u64(&(proc->waitStart), 0);
322  }
323 
324  /* Should have consumed exactly the expected amount of fast-path memory. */
325  Assert(fpPtr == fpEndPtr);
326 
327  /*
328  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
329  * processes and prepared transactions.
330  */
331  AuxiliaryProcs = &procs[MaxBackends];
333 
334  /* Create ProcStructLock spinlock, too */
335  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
337 }
338 
339 /*
340  * InitProcess -- initialize a per-process PGPROC entry for this backend
341  */
342 void
344 {
345  dlist_head *procgloballist;
346 
347  /*
348  * ProcGlobal should be set up already (if we are a backend, we inherit
349  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
350  */
351  if (ProcGlobal == NULL)
352  elog(PANIC, "proc header uninitialized");
353 
354  if (MyProc != NULL)
355  elog(ERROR, "you already exist");
356 
357  /*
358  * Before we start accessing the shared memory in a serious way, mark
359  * ourselves as an active postmaster child; this is so that the postmaster
360  * can detect it if we exit without cleaning up. (XXX autovac launcher
361  * currently doesn't participate in this; it probably should.)
362  *
363  * Slot sync worker also does not participate in it, see comments atop
364  * 'struct bkend' in postmaster.c.
365  */
369 
370  /* Decide which list should supply our PGPROC. */
372  procgloballist = &ProcGlobal->autovacFreeProcs;
373  else if (AmBackgroundWorkerProcess())
374  procgloballist = &ProcGlobal->bgworkerFreeProcs;
375  else if (AmWalSenderProcess())
376  procgloballist = &ProcGlobal->walsenderFreeProcs;
377  else
378  procgloballist = &ProcGlobal->freeProcs;
379 
380  /*
381  * Try to get a proc struct from the appropriate free list. If this
382  * fails, we must be out of PGPROC structures (not to mention semaphores).
383  *
384  * While we are holding the ProcStructLock, also copy the current shared
385  * estimate of spins_per_delay to local storage.
386  */
388 
390 
391  if (!dlist_is_empty(procgloballist))
392  {
395  }
396  else
397  {
398  /*
399  * If we reach here, all the PGPROCs are in use. This is one of the
400  * possible places to detect "too many backends", so give the standard
401  * error message. XXX do we need to give a different failure message
402  * in the autovacuum case?
403  */
405  if (AmWalSenderProcess())
406  ereport(FATAL,
407  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
408  errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
409  max_wal_senders)));
410  ereport(FATAL,
411  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
412  errmsg("sorry, too many clients already")));
413  }
415 
416  /*
417  * Cross-check that the PGPROC is of the type we expect; if this were not
418  * the case, it would get returned to the wrong list.
419  */
420  Assert(MyProc->procgloballist == procgloballist);
421 
422  /*
423  * Initialize all fields of MyProc, except for those previously
424  * initialized by InitProcGlobal.
425  */
428  MyProc->fpVXIDLock = false;
432  MyProc->pid = MyProcPid;
435  /* databaseId and roleId will be filled in later */
440  MyProc->delayChkptFlags = 0;
441  MyProc->statusFlags = 0;
442  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
446  MyProc->lwWaitMode = 0;
447  MyProc->waitLock = NULL;
448  MyProc->waitProcLock = NULL;
450 #ifdef USE_ASSERT_CHECKING
451  {
452  int i;
453 
454  /* Last process should have released all locks. */
455  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
457  }
458 #endif
460 
461  /* Initialize fields for sync rep */
462  MyProc->waitLSN = 0;
465 
466  /* Initialize fields for group XID clearing. */
467  MyProc->procArrayGroupMember = false;
470 
471  /* Check that group locking fields are in a proper initial state. */
472  Assert(MyProc->lockGroupLeader == NULL);
474 
475  /* Initialize wait event information. */
476  MyProc->wait_event_info = 0;
477 
478  /* Initialize fields for group transaction status update. */
479  MyProc->clogGroupMember = false;
485 
486  /*
487  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
488  * on it. That allows us to repoint the process latch, which so far
489  * points to process local one, to the shared one.
490  */
493 
494  /* now that we have a proc, report wait events to shared memory */
496 
497  /*
498  * We might be reusing a semaphore that belonged to a failed process. So
499  * be careful and reinitialize its value here. (This is not strictly
500  * necessary anymore, but seems like a good idea for cleanliness.)
501  */
503 
504  /*
505  * Arrange to clean up at backend exit.
506  */
508 
509  /*
510  * Now that we have a PGPROC, we could try to acquire locks, so initialize
511  * local state needed for LWLocks, and the deadlock checker.
512  */
515 
516 #ifdef EXEC_BACKEND
517 
518  /*
519  * Initialize backend-local pointers to all the shared data structures.
520  * (We couldn't do this until now because it needs LWLocks.)
521  */
522  if (IsUnderPostmaster)
523  AttachSharedMemoryStructs();
524 #endif
525 }
526 
527 /*
528  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
529  *
530  * This is separate from InitProcess because we can't acquire LWLocks until
531  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
532  * work until after we've done AttachSharedMemoryStructs.
533  */
534 void
536 {
537  Assert(MyProc != NULL);
538 
539  /*
540  * Add our PGPROC to the PGPROC array in shared memory.
541  */
543 
544  /*
545  * Arrange to clean that up at backend exit.
546  */
548 }
549 
550 /*
551  * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
552  *
553  * This is called by bgwriter and similar processes so that they will have a
554  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
555  * and sema that are assigned are one of the extra ones created during
556  * InitProcGlobal.
557  *
558  * Auxiliary processes are presently not expected to wait for real (lockmgr)
559  * locks, so we need not set up the deadlock checker. They are never added
560  * to the ProcArray or the sinval messaging mechanism, either. They also
561  * don't get a VXID assigned, since this is only useful when we actually
562  * hold lockmgr locks.
563  *
564  * Startup process however uses locks but never waits for them in the
565  * normal backend sense. Startup process also takes part in sinval messaging
566  * as a sendOnly process, so never reads messages from sinval queue. So
567  * Startup process does have a VXID and does show up in pg_locks.
568  */
569 void
571 {
572  PGPROC *auxproc;
573  int proctype;
574 
575  /*
576  * ProcGlobal should be set up already (if we are a backend, we inherit
577  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
578  */
579  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
580  elog(PANIC, "proc header uninitialized");
581 
582  if (MyProc != NULL)
583  elog(ERROR, "you already exist");
584 
585  /*
586  * We use the ProcStructLock to protect assignment and releasing of
587  * AuxiliaryProcs entries.
588  *
589  * While we are holding the ProcStructLock, also copy the current shared
590  * estimate of spins_per_delay to local storage.
591  */
593 
595 
596  /*
597  * Find a free auxproc ... *big* trouble if there isn't one ...
598  */
599  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
600  {
601  auxproc = &AuxiliaryProcs[proctype];
602  if (auxproc->pid == 0)
603  break;
604  }
605  if (proctype >= NUM_AUXILIARY_PROCS)
606  {
608  elog(FATAL, "all AuxiliaryProcs are in use");
609  }
610 
611  /* Mark auxiliary proc as in use by me */
612  /* use volatile pointer to prevent code rearrangement */
613  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
614 
616 
617  MyProc = auxproc;
619 
620  /*
621  * Initialize all fields of MyProc, except for those previously
622  * initialized by InitProcGlobal.
623  */
626  MyProc->fpVXIDLock = false;
636  MyProc->delayChkptFlags = 0;
637  MyProc->statusFlags = 0;
639  MyProc->lwWaitMode = 0;
640  MyProc->waitLock = NULL;
641  MyProc->waitProcLock = NULL;
643 #ifdef USE_ASSERT_CHECKING
644  {
645  int i;
646 
647  /* Last process should have released all locks. */
648  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
650  }
651 #endif
652 
653  /*
654  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
655  * on it. That allows us to repoint the process latch, which so far
656  * points to process local one, to the shared one.
657  */
660 
661  /* now that we have a proc, report wait events to shared memory */
663 
664  /* Check that group locking fields are in a proper initial state. */
665  Assert(MyProc->lockGroupLeader == NULL);
667 
668  /*
669  * We might be reusing a semaphore that belonged to a failed process. So
670  * be careful and reinitialize its value here. (This is not strictly
671  * necessary anymore, but seems like a good idea for cleanliness.)
672  */
674 
675  /*
676  * Arrange to clean up at process exit.
677  */
679 
680  /*
681  * Now that we have a PGPROC, we could try to acquire lightweight locks.
682  * Initialize local state needed for them. (Heavyweight locks cannot be
683  * acquired in aux processes.)
684  */
686 
687 #ifdef EXEC_BACKEND
688 
689  /*
690  * Initialize backend-local pointers to all the shared data structures.
691  * (We couldn't do this until now because it needs LWLocks.)
692  */
693  if (IsUnderPostmaster)
694  AttachSharedMemoryStructs();
695 #endif
696 }
697 
698 /*
699  * Used from bufmgr to share the value of the buffer that Startup waits on,
700  * or to reset the value to "not waiting" (-1). This allows processing
701  * of recovery conflicts for buffer pins. Set is made before backends look
702  * at this value, so locking not required, especially since the set is
703  * an atomic integer set operation.
704  */
705 void
707 {
708  /* use volatile pointer to prevent code rearrangement */
709  volatile PROC_HDR *procglobal = ProcGlobal;
710 
711  procglobal->startupBufferPinWaitBufId = bufid;
712 }
713 
714 /*
715  * Used by backends when they receive a request to check for buffer pin waits.
716  */
717 int
719 {
720  /* use volatile pointer to prevent code rearrangement */
721  volatile PROC_HDR *procglobal = ProcGlobal;
722 
723  return procglobal->startupBufferPinWaitBufId;
724 }
725 
726 /*
727  * Check whether there are at least N free PGPROC objects. If false is
728  * returned, *nfree will be set to the number of free PGPROC objects.
729  * Otherwise, *nfree will be set to n.
730  *
731  * Note: this is designed on the assumption that N will generally be small.
732  */
733 bool
734 HaveNFreeProcs(int n, int *nfree)
735 {
736  dlist_iter iter;
737 
738  Assert(n > 0);
739  Assert(nfree);
740 
742 
743  *nfree = 0;
745  {
746  (*nfree)++;
747  if (*nfree == n)
748  break;
749  }
750 
752 
753  return (*nfree == n);
754 }
755 
756 /*
757  * Check if the current process is awaiting a lock.
758  */
759 bool
761 {
762  if (lockAwaited == NULL)
763  return false;
764 
765  return true;
766 }
767 
768 /*
769  * Cancel any pending wait for lock, when aborting a transaction, and revert
770  * any strong lock count acquisition for a lock being acquired.
771  *
772  * (Normally, this would only happen if we accept a cancel/die
773  * interrupt while waiting; but an ereport(ERROR) before or during the lock
774  * wait is within the realm of possibility, too.)
775  */
776 void
778 {
779  LWLock *partitionLock;
780  DisableTimeoutParams timeouts[2];
781 
782  HOLD_INTERRUPTS();
783 
785 
786  /* Nothing to do if we weren't waiting for a lock */
787  if (lockAwaited == NULL)
788  {
790  return;
791  }
792 
793  /*
794  * Turn off the deadlock and lock timeout timers, if they are still
795  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
796  * indicator flag, since this function is executed before
797  * ProcessInterrupts when responding to SIGINT; else we'd lose the
798  * knowledge that the SIGINT came from a lock timeout and not an external
799  * source.
800  */
801  timeouts[0].id = DEADLOCK_TIMEOUT;
802  timeouts[0].keep_indicator = false;
803  timeouts[1].id = LOCK_TIMEOUT;
804  timeouts[1].keep_indicator = true;
805  disable_timeouts(timeouts, 2);
806 
807  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
808  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
809  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
810 
812  {
813  /* We could not have been granted the lock yet */
815  }
816  else
817  {
818  /*
819  * Somebody kicked us off the lock queue already. Perhaps they
820  * granted us the lock, or perhaps they detected a deadlock. If they
821  * did grant us the lock, we'd better remember it in our local lock
822  * table.
823  */
826  }
827 
828  lockAwaited = NULL;
829 
830  LWLockRelease(partitionLock);
831 
833 }
834 
835 
836 /*
837  * ProcReleaseLocks() -- release locks associated with current transaction
838  * at main transaction commit or abort
839  *
840  * At main transaction commit, we release standard locks except session locks.
841  * At main transaction abort, we release all locks including session locks.
842  *
843  * Advisory locks are released only if they are transaction-level;
844  * session-level holds remain, whether this is a commit or not.
845  *
846  * At subtransaction commit, we don't release any locks (so this func is not
847  * needed at all); we will defer the releasing to the parent transaction.
848  * At subtransaction abort, we release all locks held by the subtransaction;
849  * this is implemented by retail releasing of the locks under control of
850  * the ResourceOwner mechanism.
851  */
852 void
853 ProcReleaseLocks(bool isCommit)
854 {
855  if (!MyProc)
856  return;
857  /* If waiting, get off wait queue (should only be needed after error) */
859  /* Release standard locks, including session-level if aborting */
861  /* Release transaction-level advisory locks */
863 }
864 
865 
866 /*
867  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
868  */
869 static void
871 {
872  Assert(MyProc != NULL);
874 }
875 
876 /*
877  * ProcKill() -- Destroy the per-proc data structure for
878  * this process. Release any of its held LW locks.
879  */
880 static void
881 ProcKill(int code, Datum arg)
882 {
883  PGPROC *proc;
884  dlist_head *procgloballist;
885 
886  Assert(MyProc != NULL);
887 
888  /* not safe if forked by system(), etc. */
889  if (MyProc->pid != (int) getpid())
890  elog(PANIC, "ProcKill() called in child process");
891 
892  /* Make sure we're out of the sync rep lists */
894 
895 #ifdef USE_ASSERT_CHECKING
896  {
897  int i;
898 
899  /* Last process should have released all locks. */
900  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
902  }
903 #endif
904 
905  /*
906  * Release any LW locks I am holding. There really shouldn't be any, but
907  * it's cheap to check again before we cut the knees off the LWLock
908  * facility by releasing our PGPROC ...
909  */
911 
912  /*
913  * Cleanup waiting for LSN if any.
914  */
915  WaitLSNCleanup();
916 
917  /* Cancel any pending condition variable sleep, too */
919 
920  /*
921  * Detach from any lock group of which we are a member. If the leader
922  * exits before all other group members, its PGPROC will remain allocated
923  * until the last group process exits; that process must return the
924  * leader's PGPROC to the appropriate list.
925  */
926  if (MyProc->lockGroupLeader != NULL)
927  {
928  PGPROC *leader = MyProc->lockGroupLeader;
929  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
930 
931  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
934  if (dlist_is_empty(&leader->lockGroupMembers))
935  {
936  leader->lockGroupLeader = NULL;
937  if (leader != MyProc)
938  {
939  procgloballist = leader->procgloballist;
940 
941  /* Leader exited first; return its PGPROC. */
943  dlist_push_head(procgloballist, &leader->links);
945  }
946  }
947  else if (leader != MyProc)
948  MyProc->lockGroupLeader = NULL;
949  LWLockRelease(leader_lwlock);
950  }
951 
952  /*
953  * Reset MyLatch to the process local one. This is so that signal
954  * handlers et al can continue using the latch after the shared latch
955  * isn't ours anymore.
956  *
957  * Similarly, stop reporting wait events to MyProc->wait_event_info.
958  *
959  * After that clear MyProc and disown the shared latch.
960  */
963 
964  proc = MyProc;
965  MyProc = NULL;
967  DisownLatch(&proc->procLatch);
968 
969  /* Mark the proc no longer in use */
970  proc->pid = 0;
973 
974  procgloballist = proc->procgloballist;
976 
977  /*
978  * If we're still a member of a locking group, that means we're a leader
979  * which has somehow exited before its children. The last remaining child
980  * will release our PGPROC. Otherwise, release it now.
981  */
982  if (proc->lockGroupLeader == NULL)
983  {
984  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
986 
987  /* Return PGPROC structure (and semaphore) to appropriate freelist */
988  dlist_push_tail(procgloballist, &proc->links);
989  }
990 
991  /* Update shared estimate of spins_per_delay */
993 
995 
996  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
997  if (AutovacuumLauncherPid != 0)
999 }
1000 
1001 /*
1002  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
1003  * processes (bgwriter, etc). The PGPROC and sema are not released, only
1004  * marked as not-in-use.
1005  */
1006 static void
1008 {
1009  int proctype = DatumGetInt32(arg);
1011  PGPROC *proc;
1012 
1013  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
1014 
1015  /* not safe if forked by system(), etc. */
1016  if (MyProc->pid != (int) getpid())
1017  elog(PANIC, "AuxiliaryProcKill() called in child process");
1018 
1019  auxproc = &AuxiliaryProcs[proctype];
1020 
1021  Assert(MyProc == auxproc);
1022 
1023  /* Release any LW locks I am holding (see notes above) */
1024  LWLockReleaseAll();
1025 
1026  /* Cancel any pending condition variable sleep, too */
1028 
1029  /* look at the equivalent ProcKill() code for comments */
1032 
1033  proc = MyProc;
1034  MyProc = NULL;
1036  DisownLatch(&proc->procLatch);
1037 
1039 
1040  /* Mark auxiliary proc no longer in use */
1041  proc->pid = 0;
1043  proc->vxid.lxid = InvalidTransactionId;
1044 
1045  /* Update shared estimate of spins_per_delay */
1047 
1049 }
1050 
1051 /*
1052  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1053  * given its PID
1054  *
1055  * Returns NULL if not found.
1056  */
1057 PGPROC *
1059 {
1060  PGPROC *result = NULL;
1061  int index;
1062 
1063  if (pid == 0) /* never match dummy PGPROCs */
1064  return NULL;
1065 
1066  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1067  {
1068  PGPROC *proc = &AuxiliaryProcs[index];
1069 
1070  if (proc->pid == pid)
1071  {
1072  result = proc;
1073  break;
1074  }
1075  }
1076  return result;
1077 }
1078 
1079 
1080 /*
1081  * ProcSleep -- put a process to sleep on the specified lock
1082  *
1083  * Caller must have set MyProc->heldLocks to reflect locks already held
1084  * on the lockable object by this process (under all XIDs).
1085  *
1086  * It's not actually guaranteed that we need to wait when this function is
1087  * called, because it could be that when we try to find a position at which
1088  * to insert ourself into the wait queue, we discover that we must be inserted
1089  * ahead of everyone who wants a lock that conflict with ours. In that case,
1090  * we get the lock immediately. Because of this, it's sensible for this function
1091  * to have a dontWait argument, despite the name.
1092  *
1093  * The lock table's partition lock must be held at entry, and will be held
1094  * at exit.
1095  *
1096  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR
1097  * if not (if dontWait = true, we would have had to wait; if dontWait = false,
1098  * this is a deadlock).
1099  *
1100  * ASSUME: that no one will fiddle with the queue until after
1101  * we release the partition lock.
1102  *
1103  * NOTES: The process queue is now a priority queue for locking.
1104  */
1106 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1107 {
1108  LOCKMODE lockmode = locallock->tag.mode;
1109  LOCK *lock = locallock->lock;
1110  PROCLOCK *proclock = locallock->proclock;
1111  uint32 hashcode = locallock->hashcode;
1112  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1113  dclist_head *waitQueue = &lock->waitProcs;
1114  PGPROC *insert_before = NULL;
1115  LOCKMASK myHeldLocks = MyProc->heldLocks;
1116  TimestampTz standbyWaitStart = 0;
1117  bool early_deadlock = false;
1118  bool allow_autovacuum_cancel = true;
1119  bool logged_recovery_conflict = false;
1120  ProcWaitStatus myWaitStatus;
1121  PGPROC *leader = MyProc->lockGroupLeader;
1122 
1123  /*
1124  * If group locking is in use, locks held by members of my locking group
1125  * need to be included in myHeldLocks. This is not required for relation
1126  * extension lock which conflict among group members. However, including
1127  * them in myHeldLocks will give group members the priority to get those
1128  * locks as compared to other backends which are also trying to acquire
1129  * those locks. OTOH, we can avoid giving priority to group members for
1130  * that kind of locks, but there doesn't appear to be a clear advantage of
1131  * the same.
1132  */
1133  if (leader != NULL)
1134  {
1135  dlist_iter iter;
1136 
1137  dlist_foreach(iter, &lock->procLocks)
1138  {
1139  PROCLOCK *otherproclock;
1140 
1141  otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1142 
1143  if (otherproclock->groupLeader == leader)
1144  myHeldLocks |= otherproclock->holdMask;
1145  }
1146  }
1147 
1148  /*
1149  * Determine where to add myself in the wait queue.
1150  *
1151  * Normally I should go at the end of the queue. However, if I already
1152  * hold locks that conflict with the request of any previous waiter, put
1153  * myself in the queue just in front of the first such waiter. This is not
1154  * a necessary step, since deadlock detection would move me to before that
1155  * waiter anyway; but it's relatively cheap to detect such a conflict
1156  * immediately, and avoid delaying till deadlock timeout.
1157  *
1158  * Special case: if I find I should go in front of some waiter, check to
1159  * see if I conflict with already-held locks or the requests before that
1160  * waiter. If not, then just grant myself the requested lock immediately.
1161  * This is the same as the test for immediate grant in LockAcquire, except
1162  * we are only considering the part of the wait queue before my insertion
1163  * point.
1164  */
1165  if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1166  {
1167  LOCKMASK aheadRequests = 0;
1168  dlist_iter iter;
1169 
1170  dclist_foreach(iter, waitQueue)
1171  {
1172  PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1173 
1174  /*
1175  * If we're part of the same locking group as this waiter, its
1176  * locks neither conflict with ours nor contribute to
1177  * aheadRequests.
1178  */
1179  if (leader != NULL && leader == proc->lockGroupLeader)
1180  continue;
1181 
1182  /* Must he wait for me? */
1183  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1184  {
1185  /* Must I wait for him ? */
1186  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1187  {
1188  /*
1189  * Yes, so we have a deadlock. Easiest way to clean up
1190  * correctly is to call RemoveFromWaitQueue(), but we
1191  * can't do that until we are *on* the wait queue. So, set
1192  * a flag to check below, and break out of loop. Also,
1193  * record deadlock info for later message.
1194  */
1195  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1196  early_deadlock = true;
1197  break;
1198  }
1199  /* I must go before this waiter. Check special case. */
1200  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1201  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1202  proclock))
1203  {
1204  /* Skip the wait and just grant myself the lock. */
1205  GrantLock(lock, proclock, lockmode);
1206  GrantAwaitedLock();
1207  return PROC_WAIT_STATUS_OK;
1208  }
1209 
1210  /* Put myself into wait queue before conflicting process */
1211  insert_before = proc;
1212  break;
1213  }
1214  /* Nope, so advance to next waiter */
1215  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1216  }
1217  }
1218 
1219  /*
1220  * At this point we know that we'd really need to sleep. If we've been
1221  * commanded not to do that, bail out.
1222  */
1223  if (dontWait)
1224  return PROC_WAIT_STATUS_ERROR;
1225 
1226  /*
1227  * Insert self into queue, at the position determined above.
1228  */
1229  if (insert_before)
1230  dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1231  else
1232  dclist_push_tail(waitQueue, &MyProc->links);
1233 
1234  lock->waitMask |= LOCKBIT_ON(lockmode);
1235 
1236  /* Set up wait information in PGPROC object, too */
1237  MyProc->waitLock = lock;
1238  MyProc->waitProcLock = proclock;
1239  MyProc->waitLockMode = lockmode;
1240 
1242 
1243  /*
1244  * If we detected deadlock, give up without waiting. This must agree with
1245  * CheckDeadLock's recovery code.
1246  */
1247  if (early_deadlock)
1248  {
1249  RemoveFromWaitQueue(MyProc, hashcode);
1250  return PROC_WAIT_STATUS_ERROR;
1251  }
1252 
1253  /* mark that we are waiting for a lock */
1254  lockAwaited = locallock;
1255 
1256  /*
1257  * Release the lock table's partition lock.
1258  *
1259  * NOTE: this may also cause us to exit critical-section state, possibly
1260  * allowing a cancel/die interrupt to be accepted. This is OK because we
1261  * have recorded the fact that we are waiting for a lock, and so
1262  * LockErrorCleanup will clean up if cancel/die happens.
1263  */
1264  LWLockRelease(partitionLock);
1265 
1266  /*
1267  * Also, now that we will successfully clean up after an ereport, it's
1268  * safe to check to see if there's a buffer pin deadlock against the
1269  * Startup process. Of course, that's only necessary if we're doing Hot
1270  * Standby and are not the Startup process ourselves.
1271  */
1272  if (RecoveryInProgress() && !InRecovery)
1274 
1275  /* Reset deadlock_state before enabling the timeout handler */
1277  got_deadlock_timeout = false;
1278 
1279  /*
1280  * Set timer so we can wake up after awhile and check for a deadlock. If a
1281  * deadlock is detected, the handler sets MyProc->waitStatus =
1282  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1283  * rather than success.
1284  *
1285  * By delaying the check until we've waited for a bit, we can avoid
1286  * running the rather expensive deadlock-check code in most cases.
1287  *
1288  * If LockTimeout is set, also enable the timeout for that. We can save a
1289  * few cycles by enabling both timeout sources in one call.
1290  *
1291  * If InHotStandby we set lock waits slightly later for clarity with other
1292  * code.
1293  */
1294  if (!InHotStandby)
1295  {
1296  if (LockTimeout > 0)
1297  {
1298  EnableTimeoutParams timeouts[2];
1299 
1300  timeouts[0].id = DEADLOCK_TIMEOUT;
1301  timeouts[0].type = TMPARAM_AFTER;
1302  timeouts[0].delay_ms = DeadlockTimeout;
1303  timeouts[1].id = LOCK_TIMEOUT;
1304  timeouts[1].type = TMPARAM_AFTER;
1305  timeouts[1].delay_ms = LockTimeout;
1306  enable_timeouts(timeouts, 2);
1307  }
1308  else
1310 
1311  /*
1312  * Use the current time obtained for the deadlock timeout timer as
1313  * waitStart (i.e., the time when this process started waiting for the
1314  * lock). Since getting the current time newly can cause overhead, we
1315  * reuse the already-obtained time to avoid that overhead.
1316  *
1317  * Note that waitStart is updated without holding the lock table's
1318  * partition lock, to avoid the overhead by additional lock
1319  * acquisition. This can cause "waitstart" in pg_locks to become NULL
1320  * for a very short period of time after the wait started even though
1321  * "granted" is false. This is OK in practice because we can assume
1322  * that users are likely to look at "waitstart" when waiting for the
1323  * lock for a long time.
1324  */
1327  }
1328  else if (log_recovery_conflict_waits)
1329  {
1330  /*
1331  * Set the wait start timestamp if logging is enabled and in hot
1332  * standby.
1333  */
1334  standbyWaitStart = GetCurrentTimestamp();
1335  }
1336 
1337  /*
1338  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1339  * will not wait. But a set latch does not necessarily mean that the lock
1340  * is free now, as there are many other sources for latch sets than
1341  * somebody releasing the lock.
1342  *
1343  * We process interrupts whenever the latch has been set, so cancel/die
1344  * interrupts are processed quickly. This means we must not mind losing
1345  * control to a cancel/die interrupt here. We don't, because we have no
1346  * shared-state-change work to do after being granted the lock (the
1347  * grantor did it all). We do have to worry about canceling the deadlock
1348  * timeout and updating the locallock table, but if we lose control to an
1349  * error, LockErrorCleanup will fix that up.
1350  */
1351  do
1352  {
1353  if (InHotStandby)
1354  {
1355  bool maybe_log_conflict =
1356  (standbyWaitStart != 0 && !logged_recovery_conflict);
1357 
1358  /* Set a timer and wait for that or for the lock to be granted */
1360  maybe_log_conflict);
1361 
1362  /*
1363  * Emit the log message if the startup process is waiting longer
1364  * than deadlock_timeout for recovery conflict on lock.
1365  */
1366  if (maybe_log_conflict)
1367  {
1369 
1370  if (TimestampDifferenceExceeds(standbyWaitStart, now,
1371  DeadlockTimeout))
1372  {
1373  VirtualTransactionId *vxids;
1374  int cnt;
1375 
1376  vxids = GetLockConflicts(&locallock->tag.lock,
1377  AccessExclusiveLock, &cnt);
1378 
1379  /*
1380  * Log the recovery conflict and the list of PIDs of
1381  * backends holding the conflicting lock. Note that we do
1382  * logging even if there are no such backends right now
1383  * because the startup process here has already waited
1384  * longer than deadlock_timeout.
1385  */
1387  standbyWaitStart, now,
1388  cnt > 0 ? vxids : NULL, true);
1389  logged_recovery_conflict = true;
1390  }
1391  }
1392  }
1393  else
1394  {
1396  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1398  /* check for deadlocks first, as that's probably log-worthy */
1400  {
1401  CheckDeadLock();
1402  got_deadlock_timeout = false;
1403  }
1405  }
1406 
1407  /*
1408  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1409  * else asynchronously. Read it just once per loop to prevent
1410  * surprising behavior (such as missing log messages).
1411  */
1412  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1413 
1414  /*
1415  * If we are not deadlocked, but are waiting on an autovacuum-induced
1416  * task, send a signal to interrupt it.
1417  */
1418  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1419  {
1420  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1421  uint8 statusFlags;
1422  uint8 lockmethod_copy;
1423  LOCKTAG locktag_copy;
1424 
1425  /*
1426  * Grab info we need, then release lock immediately. Note this
1427  * coding means that there is a tiny chance that the process
1428  * terminates its current transaction and starts a different one
1429  * before we have a change to send the signal; the worst possible
1430  * consequence is that a for-wraparound vacuum is canceled. But
1431  * that could happen in any case unless we were to do kill() with
1432  * the lock held, which is much more undesirable.
1433  */
1434  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1435  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1436  lockmethod_copy = lock->tag.locktag_lockmethodid;
1437  locktag_copy = lock->tag;
1438  LWLockRelease(ProcArrayLock);
1439 
1440  /*
1441  * Only do it if the worker is not working to protect against Xid
1442  * wraparound.
1443  */
1444  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1445  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1446  {
1447  int pid = autovac->pid;
1448 
1449  /* report the case, if configured to do so */
1451  {
1452  StringInfoData locktagbuf;
1453  StringInfoData logbuf; /* errdetail for server log */
1454 
1455  initStringInfo(&locktagbuf);
1456  initStringInfo(&logbuf);
1457  DescribeLockTag(&locktagbuf, &locktag_copy);
1458  appendStringInfo(&logbuf,
1459  "Process %d waits for %s on %s.",
1460  MyProcPid,
1461  GetLockmodeName(lockmethod_copy, lockmode),
1462  locktagbuf.data);
1463 
1464  ereport(DEBUG1,
1465  (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1466  pid),
1467  errdetail_log("%s", logbuf.data)));
1468 
1469  pfree(locktagbuf.data);
1470  pfree(logbuf.data);
1471  }
1472 
1473  /* send the autovacuum worker Back to Old Kent Road */
1474  if (kill(pid, SIGINT) < 0)
1475  {
1476  /*
1477  * There's a race condition here: once we release the
1478  * ProcArrayLock, it's possible for the autovac worker to
1479  * close up shop and exit before we can do the kill().
1480  * Therefore, we do not whinge about no-such-process.
1481  * Other errors such as EPERM could conceivably happen if
1482  * the kernel recycles the PID fast enough, but such cases
1483  * seem improbable enough that it's probably best to issue
1484  * a warning if we see some other errno.
1485  */
1486  if (errno != ESRCH)
1487  ereport(WARNING,
1488  (errmsg("could not send signal to process %d: %m",
1489  pid)));
1490  }
1491  }
1492 
1493  /* prevent signal from being sent again more than once */
1494  allow_autovacuum_cancel = false;
1495  }
1496 
1497  /*
1498  * If awoken after the deadlock check interrupt has run, and
1499  * log_lock_waits is on, then report about the wait.
1500  */
1502  {
1504  lock_waiters_sbuf,
1505  lock_holders_sbuf;
1506  const char *modename;
1507  long secs;
1508  int usecs;
1509  long msecs;
1510  dlist_iter proc_iter;
1511  PROCLOCK *curproclock;
1512  bool first_holder = true,
1513  first_waiter = true;
1514  int lockHoldersNum = 0;
1515 
1516  initStringInfo(&buf);
1517  initStringInfo(&lock_waiters_sbuf);
1518  initStringInfo(&lock_holders_sbuf);
1519 
1520  DescribeLockTag(&buf, &locallock->tag.lock);
1521  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1522  lockmode);
1525  &secs, &usecs);
1526  msecs = secs * 1000 + usecs / 1000;
1527  usecs = usecs % 1000;
1528 
1529  /*
1530  * we loop over the lock's procLocks to gather a list of all
1531  * holders and waiters. Thus we will be able to provide more
1532  * detailed information for lock debugging purposes.
1533  *
1534  * lock->procLocks contains all processes which hold or wait for
1535  * this lock.
1536  */
1537 
1538  LWLockAcquire(partitionLock, LW_SHARED);
1539 
1540  dlist_foreach(proc_iter, &lock->procLocks)
1541  {
1542  curproclock =
1543  dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1544 
1545  /*
1546  * we are a waiter if myProc->waitProcLock == curproclock; we
1547  * are a holder if it is NULL or something different
1548  */
1549  if (curproclock->tag.myProc->waitProcLock == curproclock)
1550  {
1551  if (first_waiter)
1552  {
1553  appendStringInfo(&lock_waiters_sbuf, "%d",
1554  curproclock->tag.myProc->pid);
1555  first_waiter = false;
1556  }
1557  else
1558  appendStringInfo(&lock_waiters_sbuf, ", %d",
1559  curproclock->tag.myProc->pid);
1560  }
1561  else
1562  {
1563  if (first_holder)
1564  {
1565  appendStringInfo(&lock_holders_sbuf, "%d",
1566  curproclock->tag.myProc->pid);
1567  first_holder = false;
1568  }
1569  else
1570  appendStringInfo(&lock_holders_sbuf, ", %d",
1571  curproclock->tag.myProc->pid);
1572 
1573  lockHoldersNum++;
1574  }
1575  }
1576 
1577  LWLockRelease(partitionLock);
1578 
1580  ereport(LOG,
1581  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1582  MyProcPid, modename, buf.data, msecs, usecs),
1583  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1584  "Processes holding the lock: %s. Wait queue: %s.",
1585  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1586  else if (deadlock_state == DS_HARD_DEADLOCK)
1587  {
1588  /*
1589  * This message is a bit redundant with the error that will be
1590  * reported subsequently, but in some cases the error report
1591  * might not make it to the log (eg, if it's caught by an
1592  * exception handler), and we want to ensure all long-wait
1593  * events get logged.
1594  */
1595  ereport(LOG,
1596  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1597  MyProcPid, modename, buf.data, msecs, usecs),
1598  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1599  "Processes holding the lock: %s. Wait queue: %s.",
1600  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1601  }
1602 
1603  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1604  ereport(LOG,
1605  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1606  MyProcPid, modename, buf.data, msecs, usecs),
1607  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1608  "Processes holding the lock: %s. Wait queue: %s.",
1609  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1610  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1611  ereport(LOG,
1612  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1613  MyProcPid, modename, buf.data, msecs, usecs)));
1614  else
1615  {
1616  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1617 
1618  /*
1619  * Currently, the deadlock checker always kicks its own
1620  * process, which means that we'll only see
1621  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1622  * DS_HARD_DEADLOCK, and there's no need to print redundant
1623  * messages. But for completeness and future-proofing, print
1624  * a message if it looks like someone else kicked us off the
1625  * lock.
1626  */
1628  ereport(LOG,
1629  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1630  MyProcPid, modename, buf.data, msecs, usecs),
1631  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1632  "Processes holding the lock: %s. Wait queue: %s.",
1633  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1634  }
1635 
1636  /*
1637  * At this point we might still need to wait for the lock. Reset
1638  * state so we don't print the above messages again.
1639  */
1641 
1642  pfree(buf.data);
1643  pfree(lock_holders_sbuf.data);
1644  pfree(lock_waiters_sbuf.data);
1645  }
1646  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1647 
1648  /*
1649  * Disable the timers, if they are still running. As in LockErrorCleanup,
1650  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1651  * already caused QueryCancelPending to become set, we want the cancel to
1652  * be reported as a lock timeout, not a user cancel.
1653  */
1654  if (!InHotStandby)
1655  {
1656  if (LockTimeout > 0)
1657  {
1658  DisableTimeoutParams timeouts[2];
1659 
1660  timeouts[0].id = DEADLOCK_TIMEOUT;
1661  timeouts[0].keep_indicator = false;
1662  timeouts[1].id = LOCK_TIMEOUT;
1663  timeouts[1].keep_indicator = true;
1664  disable_timeouts(timeouts, 2);
1665  }
1666  else
1668  }
1669 
1670  /*
1671  * Emit the log message if recovery conflict on lock was resolved but the
1672  * startup process waited longer than deadlock_timeout for it.
1673  */
1674  if (InHotStandby && logged_recovery_conflict)
1676  standbyWaitStart, GetCurrentTimestamp(),
1677  NULL, false);
1678 
1679  /*
1680  * Re-acquire the lock table's partition lock. We have to do this to hold
1681  * off cancel/die interrupts before we can mess with lockAwaited (else we
1682  * might have a missed or duplicated locallock update).
1683  */
1684  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1685 
1686  /*
1687  * We no longer want LockErrorCleanup to do anything.
1688  */
1689  lockAwaited = NULL;
1690 
1691  /*
1692  * If we got the lock, be sure to remember it in the locallock table.
1693  */
1695  GrantAwaitedLock();
1696 
1697  /*
1698  * We don't have to do anything else, because the awaker did all the
1699  * necessary update of the lock table and MyProc.
1700  */
1701  return MyProc->waitStatus;
1702 }
1703 
1704 
1705 /*
1706  * ProcWakeup -- wake up a process by setting its latch.
1707  *
1708  * Also remove the process from the wait queue and set its links invalid.
1709  *
1710  * The appropriate lock partition lock must be held by caller.
1711  *
1712  * XXX: presently, this code is only used for the "success" case, and only
1713  * works correctly for that case. To clean up in failure case, would need
1714  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1715  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1716  */
1717 void
1719 {
1720  if (dlist_node_is_detached(&proc->links))
1721  return;
1722 
1724 
1725  /* Remove process from wait queue */
1727 
1728  /* Clean up process' state and pass it the ok/fail signal */
1729  proc->waitLock = NULL;
1730  proc->waitProcLock = NULL;
1731  proc->waitStatus = waitStatus;
1733 
1734  /* And awaken it */
1735  SetLatch(&proc->procLatch);
1736 }
1737 
1738 /*
1739  * ProcLockWakeup -- routine for waking up processes when a lock is
1740  * released (or a prior waiter is aborted). Scan all waiters
1741  * for lock, waken any that are no longer blocked.
1742  *
1743  * The appropriate lock partition lock must be held by caller.
1744  */
1745 void
1746 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1747 {
1748  dclist_head *waitQueue = &lock->waitProcs;
1749  LOCKMASK aheadRequests = 0;
1750  dlist_mutable_iter miter;
1751 
1752  if (dclist_is_empty(waitQueue))
1753  return;
1754 
1755  dclist_foreach_modify(miter, waitQueue)
1756  {
1757  PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
1758  LOCKMODE lockmode = proc->waitLockMode;
1759 
1760  /*
1761  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1762  * (b) doesn't conflict with already-held locks.
1763  */
1764  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1765  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1766  proc->waitProcLock))
1767  {
1768  /* OK to waken */
1769  GrantLock(lock, proc->waitProcLock, lockmode);
1770  /* removes proc from the lock's waiting process queue */
1772  }
1773  else
1774  {
1775  /*
1776  * Lock conflicts: Don't wake, but remember requested mode for
1777  * later checks.
1778  */
1779  aheadRequests |= LOCKBIT_ON(lockmode);
1780  }
1781  }
1782 }
1783 
1784 /*
1785  * CheckDeadLock
1786  *
1787  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1788  * lock to be released by some other process. Check if there's a deadlock; if
1789  * not, just return. (But signal ProcSleep to log a message, if
1790  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1791  * the lock's wait queue and signal an error to ProcSleep.
1792  */
1793 static void
1795 {
1796  int i;
1797 
1798  /*
1799  * Acquire exclusive lock on the entire shared lock data structures. Must
1800  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1801  *
1802  * Note that the deadlock check interrupt had better not be enabled
1803  * anywhere that this process itself holds lock partition locks, else this
1804  * will wait forever. Also note that LWLockAcquire creates a critical
1805  * section, so that this routine cannot be interrupted by cancel/die
1806  * interrupts.
1807  */
1808  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1810 
1811  /*
1812  * Check to see if we've been awoken by anyone in the interim.
1813  *
1814  * If we have, we can return and resume our transaction -- happy day.
1815  * Before we are awoken the process releasing the lock grants it to us so
1816  * we know that we don't have to wait anymore.
1817  *
1818  * We check by looking to see if we've been unlinked from the wait queue.
1819  * This is safe because we hold the lock partition lock.
1820  */
1821  if (MyProc->links.prev == NULL ||
1822  MyProc->links.next == NULL)
1823  goto check_done;
1824 
1825 #ifdef LOCK_DEBUG
1826  if (Debug_deadlocks)
1827  DumpAllLocks();
1828 #endif
1829 
1830  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1832 
1834  {
1835  /*
1836  * Oops. We have a deadlock.
1837  *
1838  * Get this process out of wait state. (Note: we could do this more
1839  * efficiently by relying on lockAwaited, but use this coding to
1840  * preserve the flexibility to kill some other transaction than the
1841  * one detecting the deadlock.)
1842  *
1843  * RemoveFromWaitQueue sets MyProc->waitStatus to
1844  * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1845  * return from the signal handler.
1846  */
1847  Assert(MyProc->waitLock != NULL);
1849 
1850  /*
1851  * We're done here. Transaction abort caused by the error that
1852  * ProcSleep will raise will cause any other locks we hold to be
1853  * released, thus allowing other processes to wake up; we don't need
1854  * to do that here. NOTE: an exception is that releasing locks we
1855  * hold doesn't consider the possibility of waiters that were blocked
1856  * behind us on the lock we just failed to get, and might now be
1857  * wakable because we're not in front of them anymore. However,
1858  * RemoveFromWaitQueue took care of waking up any such processes.
1859  */
1860  }
1861 
1862  /*
1863  * And release locks. We do this in reverse order for two reasons: (1)
1864  * Anyone else who needs more than one of the locks will be trying to lock
1865  * them in increasing order; we don't want to release the other process
1866  * until it can get all the locks it needs. (2) This avoids O(N^2)
1867  * behavior inside LWLockRelease.
1868  */
1869 check_done:
1870  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1872 }
1873 
1874 /*
1875  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1876  *
1877  * NB: Runs inside a signal handler, be careful.
1878  */
1879 void
1881 {
1882  int save_errno = errno;
1883 
1884  got_deadlock_timeout = true;
1885 
1886  /*
1887  * Have to set the latch again, even if handle_sig_alarm already did. Back
1888  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1889  * ever would be a problem, but setting a set latch again is cheap.
1890  *
1891  * Note that, when this function runs inside procsignal_sigusr1_handler(),
1892  * the handler function sets the latch again after the latch is set here.
1893  */
1894  SetLatch(MyLatch);
1895  errno = save_errno;
1896 }
1897 
1898 /*
1899  * ProcWaitForSignal - wait for a signal from another backend.
1900  *
1901  * As this uses the generic process latch the caller has to be robust against
1902  * unrelated wakeups: Always check that the desired state has occurred, and
1903  * wait again if not.
1904  */
1905 void
1906 ProcWaitForSignal(uint32 wait_event_info)
1907 {
1909  wait_event_info);
1912 }
1913 
1914 /*
1915  * ProcSendSignal - set the latch of a backend identified by ProcNumber
1916  */
1917 void
1919 {
1920  if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
1921  elog(ERROR, "procNumber out of range");
1922 
1923  SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
1924 }
1925 
1926 /*
1927  * BecomeLockGroupLeader - designate process as lock group leader
1928  *
1929  * Once this function has returned, other processes can join the lock group
1930  * by calling BecomeLockGroupMember.
1931  */
1932 void
1934 {
1935  LWLock *leader_lwlock;
1936 
1937  /* If we already did it, we don't need to do it again. */
1938  if (MyProc->lockGroupLeader == MyProc)
1939  return;
1940 
1941  /* We had better not be a follower. */
1942  Assert(MyProc->lockGroupLeader == NULL);
1943 
1944  /* Create single-member group, containing only ourselves. */
1945  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1946  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1949  LWLockRelease(leader_lwlock);
1950 }
1951 
1952 /*
1953  * BecomeLockGroupMember - designate process as lock group member
1954  *
1955  * This is pretty straightforward except for the possibility that the leader
1956  * whose group we're trying to join might exit before we manage to do so;
1957  * and the PGPROC might get recycled for an unrelated process. To avoid
1958  * that, we require the caller to pass the PID of the intended PGPROC as
1959  * an interlock. Returns true if we successfully join the intended lock
1960  * group, and false if not.
1961  */
1962 bool
1964 {
1965  LWLock *leader_lwlock;
1966  bool ok = false;
1967 
1968  /* Group leader can't become member of group */
1969  Assert(MyProc != leader);
1970 
1971  /* Can't already be a member of a group */
1972  Assert(MyProc->lockGroupLeader == NULL);
1973 
1974  /* PID must be valid. */
1975  Assert(pid != 0);
1976 
1977  /*
1978  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1979  * calculates the proc number based on the PGPROC slot without looking at
1980  * its contents, so we will acquire the correct lock even if the leader
1981  * PGPROC is in process of being recycled.
1982  */
1983  leader_lwlock = LockHashPartitionLockByProc(leader);
1984  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1985 
1986  /* Is this the leader we're looking for? */
1987  if (leader->pid == pid && leader->lockGroupLeader == leader)
1988  {
1989  /* OK, join the group */
1990  ok = true;
1991  MyProc->lockGroupLeader = leader;
1993  }
1994  LWLockRelease(leader_lwlock);
1995 
1996  return ok;
1997 }
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:485
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:453
int AutovacuumLauncherPid
Definition: autovacuum.c:314
int autovacuum_max_workers
Definition: autovacuum.c:118
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1720
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1608
unsigned int uint32
Definition: c.h:506
#define MAXALIGN(LEN)
Definition: c.h:802
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:185
#define Assert(condition)
Definition: c.h:849
unsigned char uint8
Definition: c.h:504
#define MemSet(start, val, len)
Definition: c.h:1011
uint32 TransactionId
Definition: c.h:643
size_t Size
Definition: c.h:596
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
bool ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:287
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1144
void InitDeadLockChecking(void)
Definition: deadlock.c:143
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
bool message_level_is_interesting(int elevel)
Definition: elog.c:272
int errcode(int sqlerrcode)
Definition: elog.c:853
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1272
int errmsg(const char *fmt,...)
Definition: elog.c:1070
int errdetail_log(const char *fmt,...)
Definition: elog.c:1251
#define LOG
Definition: elog.h:31
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:46
ProcNumber MyProcNumber
Definition: globals.c:89
bool IsUnderPostmaster
Definition: globals.c:119
int MaxConnections
Definition: globals.c:142
int MaxBackends
Definition: globals.c:145
struct Latch * MyLatch
Definition: globals.c:62
int max_worker_processes
Definition: globals.c:143
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition: ilist.h:525
static dlist_node * dlist_pop_head_node(dlist_head *head)
Definition: ilist.h:450
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_insert_before(dclist_head *head, dlist_node *before, dlist_node *node)
Definition: ilist.h:745
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
static void dlist_node_init(dlist_node *node)
Definition: ilist.h:325
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
int j
Definition: isn.c:74
int i
Definition: isn.c:73
void OwnLatch(Latch *latch)
Definition: latch.c:463
void DisownLatch(Latch *latch)
Definition: latch.c:489
void InitSharedLatch(Latch *latch)
Definition: latch.c:430
void SetLatch(Latch *latch)
Definition: latch.c:632
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1233
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2979
void GrantAwaitedLock(void)
Definition: lock.c:1838
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1607
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1957
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2218
void AbortStrongLockAcquire(void)
Definition: lock.c:1809
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4152
int FastPathLockGroupsPerBackend
Definition: lock.c:201
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:553
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1478
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
#define USER_LOCKMETHOD
Definition: lock.h:126
#define InvalidLocalTransactionId
Definition: lock.h:65
DeadLockState
Definition: lock.h:509
@ DS_HARD_DEADLOCK
Definition: lock.h:513
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:514
@ DS_NO_DEADLOCK
Definition: lock.h:511
@ DS_NOT_YET_CHECKED
Definition: lock.h:510
@ DS_SOFT_DEADLOCK
Definition: lock.h:512
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:541
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
int LOCKMASK
Definition: lockdefs.h:25
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
void LWLockReleaseAll(void)
Definition: lwlock.c:1876
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
void InitLWLockAccess(void)
Definition: lwlock.c:557
@ LW_WS_NOT_WAITING
Definition: lwlock.h:30
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
@ LWTRANCHE_LOCK_FASTPATH
Definition: lwlock.h:190
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
void pfree(void *pointer)
Definition: mcxt.c:1521
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:372
#define AmBackgroundWorkerProcess()
Definition: miscadmin.h:373
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define AmWalSenderProcess()
Definition: miscadmin.h:374
#define AmLogicalSlotSyncWorkerProcess()
Definition: miscadmin.h:375
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
#define AmAutoVacuumLauncherProcess()
Definition: miscadmin.h:371
void SwitchToSharedLatch(void)
Definition: miscinit.c:221
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:248
void * arg
static char * buf
Definition: pg_test_fsync.c:73
void RegisterPostmasterChildActive(void)
Definition: pmsignal.c:329
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:294
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:261
uintptr_t Datum
Definition: postgres.h:64
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:212
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define NON_EXEC_STATIC
Definition: postgres.h:576
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define NUM_AUXILIARY_PROCS
Definition: proc.h:444
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:60
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:84
#define GetNumberFromPGProc(proc)
Definition: proc.h:433
ProcWaitStatus
Definition: proc.h:123
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
#define PROC_IS_AUTOVACUUM
Definition: proc.h:57
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:468
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:565
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
int ProcNumber
Definition: procnumber.h:24
@ PROCSIG_RECOVERY_CONFLICT_LOCK
Definition: procsignal.h:44
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:208
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:219
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:714
void * ShmemAlloc(Size size)
Definition: shmem.c:152
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
static pg_noinline void Size size
Definition: slab.c:607
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
void ProcSendSignal(ProcNumber procNumber)
Definition: proc.c:1918
bool log_lock_waits
Definition: proc.c:64
int IdleSessionTimeout
Definition: proc.c:63
PGPROC * MyProc
Definition: proc.c:67
Size ProcGlobalShmemSize(void)
Definition: proc.c:101
void ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1718
bool IsWaitingForLock(void)
Definition: proc.c:760
int StatementTimeout
Definition: proc.c:59
bool HaveNFreeProcs(int n, int *nfree)
Definition: proc.c:734
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:870
void InitAuxiliaryProcess(void)
Definition: proc.c:570
PGPROC * PreparedXactProcs
Definition: proc.c:81
static DeadLockState deadlock_state
Definition: proc.c:86
int IdleInTransactionSessionTimeout
Definition: proc.c:61
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1106
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:80
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:718
int DeadlockTimeout
Definition: proc.c:58
int TransactionTimeout
Definition: proc.c:62
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1746
PROC_HDR * ProcGlobal
Definition: proc.c:79
static void CheckDeadLock(void)
Definition: proc.c:1794
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:76
int ProcGlobalSemas(void)
Definition: proc.c:134
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:853
void LockErrorCleanup(void)
Definition: proc.c:777
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1963
void BecomeLockGroupLeader(void)
Definition: proc.c:1933
static LOCALLOCK * lockAwaited
Definition: proc.c:84
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:1058
static void ProcKill(int code, Datum arg)
Definition: proc.c:881
void InitProcess(void)
Definition: proc.c:343
void CheckDeadLockAlert(void)
Definition: proc.c:1880
void InitProcessPhase2(void)
Definition: proc.c:535
void InitProcGlobal(void)
Definition: proc.c:169
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:89
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:706
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1906
int LockTimeout
Definition: proc.c:60
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:1007
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:904
bool log_recovery_conflict_waits
Definition: standby.c:41
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:273
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition: standby.c:622
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:97
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
TimeoutId id
Definition: timeout.h:71
TimeoutType type
Definition: timeout.h:61
TimeoutId id
Definition: timeout.h:60
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
uint32 hashcode
Definition: lock.h:432
LOCK * lock
Definition: lock.h:433
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
uint8 locktag_lockmethodid
Definition: lock.h:171
Definition: lock.h:309
LOCKTAG tag
Definition: lock.h:311
dclist_head waitProcs
Definition: lock.h:317
LOCKMASK waitMask
Definition: lock.h:315
dlist_head procLocks
Definition: lock.h:316
Definition: lwlock.h:42
const LOCKMASK * conflictTab
Definition: lock.h:111
Definition: proc.h:162
LWLock fpInfoLock
Definition: proc.h:293
TransactionId xmin
Definition: proc.h:177
bool procArrayGroupMember
Definition: proc.h:269
LocalTransactionId lxid
Definition: proc.h:200
PROCLOCK * waitProcLock
Definition: proc.h:233
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:289
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:271
uint8 lwWaitMode
Definition: proc.h:224
dlist_head lockGroupMembers
Definition: proc.h:305
uint32 wait_event_info
Definition: proc.h:279
dlist_head * procgloballist
Definition: proc.h:164
Oid * fpRelId
Definition: proc.h:295
uint8 statusFlags
Definition: proc.h:242
bool recoveryConflictPending
Definition: proc.h:220
TransactionId clogGroupMemberXid
Definition: proc.h:284
Oid databaseId
Definition: proc.h:207
int64 clogGroupMemberPage
Definition: proc.h:287
bool clogGroupMember
Definition: proc.h:282
uint64 * fpLockBits
Definition: proc.h:294
pg_atomic_uint64 waitStart
Definition: proc.h:237
bool fpVXIDLock
Definition: proc.h:296
ProcNumber procNumber
Definition: proc.h:195
int pid
Definition: proc.h:182
XLogRecPtr waitLSN
Definition: proc.h:252
dlist_node syncRepLinks
Definition: proc.h:254
bool isBackgroundWorker
Definition: proc.h:213
int syncRepState
Definition: proc.h:253
pg_atomic_uint32 clogGroupNext
Definition: proc.h:283
dlist_node lockGroupLink
Definition: proc.h:306
XidStatus clogGroupMemberXidStatus
Definition: proc.h:285
int pgxactoff
Definition: proc.h:184
LOCK * waitLock
Definition: proc.h:232
TransactionId xid
Definition: proc.h:172
LOCKMODE waitLockMode
Definition: proc.h:234
int delayChkptFlags
Definition: proc.h:240
struct PGPROC::@119 vxid
PGPROC * lockGroupLeader
Definition: proc.h:304
LocalTransactionId fpLocalTransactionId
Definition: proc.h:297
TransactionId procArrayGroupMemberXid
Definition: proc.h:277
LOCKMASK heldLocks
Definition: proc.h:235
PGSemaphore sem
Definition: proc.h:166
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:261
Oid roleId
Definition: proc.h:208
ProcWaitStatus waitStatus
Definition: proc.h:167
Oid tempNamespaceId
Definition: proc.h:210
dlist_node links
Definition: proc.h:163
uint8 lwWaiting
Definition: proc.h:223
Latch procLatch
Definition: proc.h:169
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PGPROC * groupLeader
Definition: lock.h:375
PROCLOCKTAG tag
Definition: lock.h:372
Definition: proc.h:382
uint8 * statusFlags
Definition: proc.h:399
XidCacheStatus * subxidStates
Definition: proc.h:393
dlist_head autovacFreeProcs
Definition: proc.h:406
Latch * walwriterLatch
Definition: proc.h:416
dlist_head freeProcs
Definition: proc.h:404
int startupBufferPinWaitBufId
Definition: proc.h:422
PGPROC * allProcs
Definition: proc.h:384
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:414
int spins_per_delay
Definition: proc.h:420
TransactionId * xids
Definition: proc.h:387
Latch * checkpointerLatch
Definition: proc.h:418
dlist_head walsenderFreeProcs
Definition: proc.h:410
dlist_head bgworkerFreeProcs
Definition: proc.h:408
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:412
uint32 allProcCount
Definition: proc.h:402
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
dlist_node * prev
Definition: ilist.h:139
Definition: type.h:95
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:373
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:30
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:560
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:813
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:685
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:630
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:718
@ LOCK_TIMEOUT
Definition: timeout.h:28
@ DEADLOCK_TIMEOUT
Definition: timeout.h:27
@ TMPARAM_AFTER
Definition: timeout.h:53
#define InvalidTransactionId
Definition: transam.h:31
int max_prepared_xacts
Definition: twophase.c:115
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition: wait_event.c:350
void pgstat_reset_wait_event_storage(void)
Definition: wait_event.c:362
#define PG_WAIT_LOCK
Definition: wait_event.h:19
void WaitLSNCleanup(void)
Definition: waitlsn.c:204
int max_wal_senders
Definition: walsender.c:121
#define kill(pid, sig)
Definition: win32_port.h:503
#define SIGUSR2
Definition: win32_port.h:181
bool RecoveryInProgress(void)
Definition: xlog.c:6333
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
bool InRecovery
Definition: xlogutils.c:50
#define InHotStandby
Definition: xlogutils.h:60
static struct link * links
Definition: zic.c:299