PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  *
19  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20  * the lock wakes the process up again (and gives it an error code so it knows
21  * whether it was awoken on an error condition).
22  *
23  * Interface (b):
24  *
25  * ProcReleaseLocks -- frees the locks associated with current transaction
26  *
27  * ProcKill -- destroys the shared memory state (and locks)
28  * associated with the process.
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 #include <sys/time.h>
35 
36 #include "access/transam.h"
37 #include "access/twophase.h"
38 #include "access/xlogutils.h"
39 #include "miscadmin.h"
40 #include "pgstat.h"
41 #include "postmaster/autovacuum.h"
42 #include "replication/slotsync.h"
43 #include "replication/syncrep.h"
45 #include "storage/ipc.h"
46 #include "storage/lmgr.h"
47 #include "storage/pmsignal.h"
48 #include "storage/proc.h"
49 #include "storage/procarray.h"
50 #include "storage/procsignal.h"
51 #include "storage/spin.h"
52 #include "storage/standby.h"
53 #include "utils/timeout.h"
54 #include "utils/timestamp.h"
55 
56 /* GUC variables */
57 int DeadlockTimeout = 1000;
59 int LockTimeout = 0;
63 bool log_lock_waits = false;
64 
65 /* Pointer to this process's PGPROC struct, if any */
66 PGPROC *MyProc = NULL;
67 
68 /*
69  * This spinlock protects the freelist of recycled PGPROC structures.
70  * We cannot use an LWLock because the LWLock manager depends on already
71  * having a PGPROC and a wait semaphore! But these structures are touched
72  * relatively infrequently (only at backend startup or shutdown) and not for
73  * very long, so a spinlock is okay.
74  */
76 
77 /* Pointers to shared-memory structures */
81 
82 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
83 static LOCALLOCK *lockAwaited = NULL;
84 
86 
87 /* Is a deadlock check pending? */
88 static volatile sig_atomic_t got_deadlock_timeout;
89 
90 static void RemoveProcFromArray(int code, Datum arg);
91 static void ProcKill(int code, Datum arg);
92 static void AuxiliaryProcKill(int code, Datum arg);
93 static void CheckDeadLock(void);
94 
95 
96 /*
97  * Report shared-memory space needed by InitProcGlobal.
98  */
99 Size
101 {
102  Size size = 0;
103  Size TotalProcs =
105 
106  /* ProcGlobal */
107  size = add_size(size, sizeof(PROC_HDR));
108  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
109  size = add_size(size, sizeof(slock_t));
110 
111  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
112  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
113  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
114 
115  return size;
116 }
117 
118 /*
119  * Report number of semaphores needed by InitProcGlobal.
120  */
121 int
123 {
124  /*
125  * We need a sema per backend (including autovacuum), plus one for each
126  * auxiliary process.
127  */
129 }
130 
131 /*
132  * InitProcGlobal -
133  * Initialize the global process table during postmaster or standalone
134  * backend startup.
135  *
136  * We also create all the per-process semaphores we will need to support
137  * the requested number of backends. We used to allocate semaphores
138  * only when backends were actually started up, but that is bad because
139  * it lets Postgres fail under load --- a lot of Unix systems are
140  * (mis)configured with small limits on the number of semaphores, and
141  * running out when trying to start another backend is a common failure.
142  * So, now we grab enough semaphores to support the desired max number
143  * of backends immediately at initialization --- if the sysadmin has set
144  * MaxConnections, max_worker_processes, max_wal_senders, or
145  * autovacuum_max_workers higher than his kernel will support, he'll
146  * find out sooner rather than later.
147  *
148  * Another reason for creating semaphores here is that the semaphore
149  * implementation typically requires us to create semaphores in the
150  * postmaster, not in backends.
151  *
152  * Note: this is NOT called by individual backends under a postmaster,
153  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
154  * pointers must be propagated specially for EXEC_BACKEND operation.
155  */
156 void
158 {
159  PGPROC *procs;
160  int i,
161  j;
162  bool found;
164 
165  /* Create the ProcGlobal shared structure */
166  ProcGlobal = (PROC_HDR *)
167  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
168  Assert(!found);
169 
170  /*
171  * Initialize the data structures.
172  */
179  ProcGlobal->walwriterLatch = NULL;
183 
184  /*
185  * Create and initialize all the PGPROC structures we'll need. There are
186  * five separate consumers: (1) normal backends, (2) autovacuum workers
187  * and the autovacuum launcher, (3) background workers, (4) auxiliary
188  * processes, and (5) prepared transactions. Each PGPROC structure is
189  * dedicated to exactly one of these purposes, and they do not move
190  * between groups.
191  */
192  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
193  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
194  ProcGlobal->allProcs = procs;
195  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
197 
198  /*
199  * Allocate arrays mirroring PGPROC fields in a dense manner. See
200  * PROC_HDR.
201  *
202  * XXX: It might make sense to increase padding for these arrays, given
203  * how hotly they are accessed.
204  */
205  ProcGlobal->xids =
206  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
207  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
209  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
210  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
211  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
212 
213  for (i = 0; i < TotalProcs; i++)
214  {
215  PGPROC *proc = &procs[i];
216 
217  /* Common initialization for all PGPROCs, regardless of type. */
218 
219  /*
220  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
221  * dummy PGPROCs don't need these though - they're never associated
222  * with a real process
223  */
225  {
226  proc->sem = PGSemaphoreCreate();
227  InitSharedLatch(&(proc->procLatch));
229  }
230 
231  /*
232  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
233  * must be queued up on the appropriate free list. Because there can
234  * only ever be a small, fixed number of auxiliary processes, no free
235  * list is used in that case; InitAuxiliaryProcess() instead uses a
236  * linear search. PGPROCs for prepared transactions are added to a
237  * free list by TwoPhaseShmemInit().
238  */
239  if (i < MaxConnections)
240  {
241  /* PGPROC for normal backend, add to freeProcs list */
244  }
245  else if (i < MaxConnections + autovacuum_max_workers + 1)
246  {
247  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
250  }
252  {
253  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
256  }
257  else if (i < MaxBackends)
258  {
259  /* PGPROC for walsender, add to walsenderFreeProcs list */
262  }
263 
264  /* Initialize myProcLocks[] shared memory queues. */
265  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
266  dlist_init(&(proc->myProcLocks[j]));
267 
268  /* Initialize lockGroupMembers list. */
270 
271  /*
272  * Initialize the atomic variables, otherwise, it won't be safe to
273  * access them for backends that aren't currently in use.
274  */
277  pg_atomic_init_u64(&(proc->waitStart), 0);
278  }
279 
280  /*
281  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
282  * processes and prepared transactions.
283  */
284  AuxiliaryProcs = &procs[MaxBackends];
286 
287  /* Create ProcStructLock spinlock, too */
288  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
290 }
291 
292 /*
293  * InitProcess -- initialize a per-process PGPROC entry for this backend
294  */
295 void
297 {
298  dlist_head *procgloballist;
299 
300  /*
301  * ProcGlobal should be set up already (if we are a backend, we inherit
302  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
303  */
304  if (ProcGlobal == NULL)
305  elog(PANIC, "proc header uninitialized");
306 
307  if (MyProc != NULL)
308  elog(ERROR, "you already exist");
309 
310  /* Decide which list should supply our PGPROC. */
312  procgloballist = &ProcGlobal->autovacFreeProcs;
313  else if (AmBackgroundWorkerProcess())
314  procgloballist = &ProcGlobal->bgworkerFreeProcs;
315  else if (AmWalSenderProcess())
316  procgloballist = &ProcGlobal->walsenderFreeProcs;
317  else
318  procgloballist = &ProcGlobal->freeProcs;
319 
320  /*
321  * Try to get a proc struct from the appropriate free list. If this
322  * fails, we must be out of PGPROC structures (not to mention semaphores).
323  *
324  * While we are holding the ProcStructLock, also copy the current shared
325  * estimate of spins_per_delay to local storage.
326  */
328 
330 
331  if (!dlist_is_empty(procgloballist))
332  {
333  MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
335  }
336  else
337  {
338  /*
339  * If we reach here, all the PGPROCs are in use. This is one of the
340  * possible places to detect "too many backends", so give the standard
341  * error message. XXX do we need to give a different failure message
342  * in the autovacuum case?
343  */
345  if (AmWalSenderProcess())
346  ereport(FATAL,
347  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
348  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
349  max_wal_senders)));
350  ereport(FATAL,
351  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
352  errmsg("sorry, too many clients already")));
353  }
355 
356  /*
357  * Cross-check that the PGPROC is of the type we expect; if this were not
358  * the case, it would get returned to the wrong list.
359  */
360  Assert(MyProc->procgloballist == procgloballist);
361 
362  /*
363  * Now that we have a PGPROC, mark ourselves as an active postmaster
364  * child; this is so that the postmaster can detect it if we exit without
365  * cleaning up. (XXX autovac launcher currently doesn't participate in
366  * this; it probably should.)
367  *
368  * Slot sync worker also does not participate in it, see comments atop
369  * 'struct bkend' in postmaster.c.
370  */
374 
375  /*
376  * Initialize all fields of MyProc, except for those previously
377  * initialized by InitProcGlobal.
378  */
381  MyProc->fpVXIDLock = false;
385  MyProc->pid = MyProcPid;
388  /* databaseId and roleId will be filled in later */
393  MyProc->delayChkptFlags = 0;
394  MyProc->statusFlags = 0;
395  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
399  MyProc->lwWaitMode = 0;
400  MyProc->waitLock = NULL;
401  MyProc->waitProcLock = NULL;
403 #ifdef USE_ASSERT_CHECKING
404  {
405  int i;
406 
407  /* Last process should have released all locks. */
408  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
410  }
411 #endif
413 
414  /* Initialize fields for sync rep */
415  MyProc->waitLSN = 0;
418 
419  /* Initialize fields for group XID clearing. */
420  MyProc->procArrayGroupMember = false;
423 
424  /* Check that group locking fields are in a proper initial state. */
425  Assert(MyProc->lockGroupLeader == NULL);
427 
428  /* Initialize wait event information. */
429  MyProc->wait_event_info = 0;
430 
431  /* Initialize fields for group transaction status update. */
432  MyProc->clogGroupMember = false;
438 
439  /*
440  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
441  * on it. That allows us to repoint the process latch, which so far
442  * points to process local one, to the shared one.
443  */
446 
447  /* now that we have a proc, report wait events to shared memory */
449 
450  /*
451  * We might be reusing a semaphore that belonged to a failed process. So
452  * be careful and reinitialize its value here. (This is not strictly
453  * necessary anymore, but seems like a good idea for cleanliness.)
454  */
456 
457  /*
458  * Arrange to clean up at backend exit.
459  */
461 
462  /*
463  * Now that we have a PGPROC, we could try to acquire locks, so initialize
464  * local state needed for LWLocks, and the deadlock checker.
465  */
468 
469 #ifdef EXEC_BACKEND
470 
471  /*
472  * Initialize backend-local pointers to all the shared data structures.
473  * (We couldn't do this until now because it needs LWLocks.)
474  */
475  if (IsUnderPostmaster)
476  AttachSharedMemoryStructs();
477 #endif
478 }
479 
480 /*
481  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
482  *
483  * This is separate from InitProcess because we can't acquire LWLocks until
484  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
485  * work until after we've done AttachSharedMemoryStructs.
486  */
487 void
489 {
490  Assert(MyProc != NULL);
491 
492  /*
493  * Add our PGPROC to the PGPROC array in shared memory.
494  */
496 
497  /*
498  * Arrange to clean that up at backend exit.
499  */
501 }
502 
503 /*
504  * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
505  *
506  * This is called by bgwriter and similar processes so that they will have a
507  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
508  * and sema that are assigned are one of the extra ones created during
509  * InitProcGlobal.
510  *
511  * Auxiliary processes are presently not expected to wait for real (lockmgr)
512  * locks, so we need not set up the deadlock checker. They are never added
513  * to the ProcArray or the sinval messaging mechanism, either. They also
514  * don't get a VXID assigned, since this is only useful when we actually
515  * hold lockmgr locks.
516  *
517  * Startup process however uses locks but never waits for them in the
518  * normal backend sense. Startup process also takes part in sinval messaging
519  * as a sendOnly process, so never reads messages from sinval queue. So
520  * Startup process does have a VXID and does show up in pg_locks.
521  */
522 void
524 {
525  PGPROC *auxproc;
526  int proctype;
527 
528  /*
529  * ProcGlobal should be set up already (if we are a backend, we inherit
530  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
531  */
532  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
533  elog(PANIC, "proc header uninitialized");
534 
535  if (MyProc != NULL)
536  elog(ERROR, "you already exist");
537 
538  /*
539  * We use the ProcStructLock to protect assignment and releasing of
540  * AuxiliaryProcs entries.
541  *
542  * While we are holding the ProcStructLock, also copy the current shared
543  * estimate of spins_per_delay to local storage.
544  */
546 
548 
549  /*
550  * Find a free auxproc ... *big* trouble if there isn't one ...
551  */
552  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
553  {
554  auxproc = &AuxiliaryProcs[proctype];
555  if (auxproc->pid == 0)
556  break;
557  }
558  if (proctype >= NUM_AUXILIARY_PROCS)
559  {
561  elog(FATAL, "all AuxiliaryProcs are in use");
562  }
563 
564  /* Mark auxiliary proc as in use by me */
565  /* use volatile pointer to prevent code rearrangement */
566  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
567 
569 
570  MyProc = auxproc;
572 
573  /*
574  * Initialize all fields of MyProc, except for those previously
575  * initialized by InitProcGlobal.
576  */
579  MyProc->fpVXIDLock = false;
589  MyProc->delayChkptFlags = 0;
590  MyProc->statusFlags = 0;
592  MyProc->lwWaitMode = 0;
593  MyProc->waitLock = NULL;
594  MyProc->waitProcLock = NULL;
596 #ifdef USE_ASSERT_CHECKING
597  {
598  int i;
599 
600  /* Last process should have released all locks. */
601  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
603  }
604 #endif
605 
606  /*
607  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
608  * on it. That allows us to repoint the process latch, which so far
609  * points to process local one, to the shared one.
610  */
613 
614  /* now that we have a proc, report wait events to shared memory */
616 
617  /* Check that group locking fields are in a proper initial state. */
618  Assert(MyProc->lockGroupLeader == NULL);
620 
621  /*
622  * We might be reusing a semaphore that belonged to a failed process. So
623  * be careful and reinitialize its value here. (This is not strictly
624  * necessary anymore, but seems like a good idea for cleanliness.)
625  */
627 
628  /*
629  * Arrange to clean up at process exit.
630  */
632 
633  /*
634  * Now that we have a PGPROC, we could try to acquire lightweight locks.
635  * Initialize local state needed for them. (Heavyweight locks cannot be
636  * acquired in aux processes.)
637  */
639 
640 #ifdef EXEC_BACKEND
641 
642  /*
643  * Initialize backend-local pointers to all the shared data structures.
644  * (We couldn't do this until now because it needs LWLocks.)
645  */
646  if (IsUnderPostmaster)
647  AttachSharedMemoryStructs();
648 #endif
649 }
650 
651 /*
652  * Used from bufmgr to share the value of the buffer that Startup waits on,
653  * or to reset the value to "not waiting" (-1). This allows processing
654  * of recovery conflicts for buffer pins. Set is made before backends look
655  * at this value, so locking not required, especially since the set is
656  * an atomic integer set operation.
657  */
658 void
660 {
661  /* use volatile pointer to prevent code rearrangement */
662  volatile PROC_HDR *procglobal = ProcGlobal;
663 
664  procglobal->startupBufferPinWaitBufId = bufid;
665 }
666 
667 /*
668  * Used by backends when they receive a request to check for buffer pin waits.
669  */
670 int
672 {
673  /* use volatile pointer to prevent code rearrangement */
674  volatile PROC_HDR *procglobal = ProcGlobal;
675 
676  return procglobal->startupBufferPinWaitBufId;
677 }
678 
679 /*
680  * Check whether there are at least N free PGPROC objects. If false is
681  * returned, *nfree will be set to the number of free PGPROC objects.
682  * Otherwise, *nfree will be set to n.
683  *
684  * Note: this is designed on the assumption that N will generally be small.
685  */
686 bool
687 HaveNFreeProcs(int n, int *nfree)
688 {
689  dlist_iter iter;
690 
691  Assert(n > 0);
692  Assert(nfree);
693 
695 
696  *nfree = 0;
698  {
699  (*nfree)++;
700  if (*nfree == n)
701  break;
702  }
703 
705 
706  return (*nfree == n);
707 }
708 
709 /*
710  * Check if the current process is awaiting a lock.
711  */
712 bool
714 {
715  if (lockAwaited == NULL)
716  return false;
717 
718  return true;
719 }
720 
721 /*
722  * Cancel any pending wait for lock, when aborting a transaction, and revert
723  * any strong lock count acquisition for a lock being acquired.
724  *
725  * (Normally, this would only happen if we accept a cancel/die
726  * interrupt while waiting; but an ereport(ERROR) before or during the lock
727  * wait is within the realm of possibility, too.)
728  */
729 void
731 {
732  LWLock *partitionLock;
733  DisableTimeoutParams timeouts[2];
734 
735  HOLD_INTERRUPTS();
736 
738 
739  /* Nothing to do if we weren't waiting for a lock */
740  if (lockAwaited == NULL)
741  {
743  return;
744  }
745 
746  /*
747  * Turn off the deadlock and lock timeout timers, if they are still
748  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
749  * indicator flag, since this function is executed before
750  * ProcessInterrupts when responding to SIGINT; else we'd lose the
751  * knowledge that the SIGINT came from a lock timeout and not an external
752  * source.
753  */
754  timeouts[0].id = DEADLOCK_TIMEOUT;
755  timeouts[0].keep_indicator = false;
756  timeouts[1].id = LOCK_TIMEOUT;
757  timeouts[1].keep_indicator = true;
758  disable_timeouts(timeouts, 2);
759 
760  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
761  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
762  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
763 
765  {
766  /* We could not have been granted the lock yet */
768  }
769  else
770  {
771  /*
772  * Somebody kicked us off the lock queue already. Perhaps they
773  * granted us the lock, or perhaps they detected a deadlock. If they
774  * did grant us the lock, we'd better remember it in our local lock
775  * table.
776  */
779  }
780 
781  lockAwaited = NULL;
782 
783  LWLockRelease(partitionLock);
784 
786 }
787 
788 
789 /*
790  * ProcReleaseLocks() -- release locks associated with current transaction
791  * at main transaction commit or abort
792  *
793  * At main transaction commit, we release standard locks except session locks.
794  * At main transaction abort, we release all locks including session locks.
795  *
796  * Advisory locks are released only if they are transaction-level;
797  * session-level holds remain, whether this is a commit or not.
798  *
799  * At subtransaction commit, we don't release any locks (so this func is not
800  * needed at all); we will defer the releasing to the parent transaction.
801  * At subtransaction abort, we release all locks held by the subtransaction;
802  * this is implemented by retail releasing of the locks under control of
803  * the ResourceOwner mechanism.
804  */
805 void
806 ProcReleaseLocks(bool isCommit)
807 {
808  if (!MyProc)
809  return;
810  /* If waiting, get off wait queue (should only be needed after error) */
812  /* Release standard locks, including session-level if aborting */
814  /* Release transaction-level advisory locks */
816 }
817 
818 
819 /*
820  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
821  */
822 static void
824 {
825  Assert(MyProc != NULL);
827 }
828 
829 /*
830  * ProcKill() -- Destroy the per-proc data structure for
831  * this process. Release any of its held LW locks.
832  */
833 static void
834 ProcKill(int code, Datum arg)
835 {
836  PGPROC *proc;
837  dlist_head *procgloballist;
838 
839  Assert(MyProc != NULL);
840 
841  /* not safe if forked by system(), etc. */
842  if (MyProc->pid != (int) getpid())
843  elog(PANIC, "ProcKill() called in child process");
844 
845  /* Make sure we're out of the sync rep lists */
847 
848 #ifdef USE_ASSERT_CHECKING
849  {
850  int i;
851 
852  /* Last process should have released all locks. */
853  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
855  }
856 #endif
857 
858  /*
859  * Release any LW locks I am holding. There really shouldn't be any, but
860  * it's cheap to check again before we cut the knees off the LWLock
861  * facility by releasing our PGPROC ...
862  */
864 
865  /* Cancel any pending condition variable sleep, too */
867 
868  /*
869  * Detach from any lock group of which we are a member. If the leader
870  * exits before all other group members, its PGPROC will remain allocated
871  * until the last group process exits; that process must return the
872  * leader's PGPROC to the appropriate list.
873  */
874  if (MyProc->lockGroupLeader != NULL)
875  {
876  PGPROC *leader = MyProc->lockGroupLeader;
877  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
878 
879  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
882  if (dlist_is_empty(&leader->lockGroupMembers))
883  {
884  leader->lockGroupLeader = NULL;
885  if (leader != MyProc)
886  {
887  procgloballist = leader->procgloballist;
888 
889  /* Leader exited first; return its PGPROC. */
891  dlist_push_head(procgloballist, &leader->links);
893  }
894  }
895  else if (leader != MyProc)
896  MyProc->lockGroupLeader = NULL;
897  LWLockRelease(leader_lwlock);
898  }
899 
900  /*
901  * Reset MyLatch to the process local one. This is so that signal
902  * handlers et al can continue using the latch after the shared latch
903  * isn't ours anymore.
904  *
905  * Similarly, stop reporting wait events to MyProc->wait_event_info.
906  *
907  * After that clear MyProc and disown the shared latch.
908  */
911 
912  proc = MyProc;
913  MyProc = NULL;
915  DisownLatch(&proc->procLatch);
916 
917  /* Mark the proc no longer in use */
918  proc->pid = 0;
921 
922  procgloballist = proc->procgloballist;
924 
925  /*
926  * If we're still a member of a locking group, that means we're a leader
927  * which has somehow exited before its children. The last remaining child
928  * will release our PGPROC. Otherwise, release it now.
929  */
930  if (proc->lockGroupLeader == NULL)
931  {
932  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
934 
935  /* Return PGPROC structure (and semaphore) to appropriate freelist */
936  dlist_push_tail(procgloballist, &proc->links);
937  }
938 
939  /* Update shared estimate of spins_per_delay */
941 
943 
944  /*
945  * This process is no longer present in shared memory in any meaningful
946  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
947  * autovac launcher should be included here someday)
948  *
949  * Slot sync worker is also not a postmaster child, so skip this shared
950  * memory related processing here.
951  */
955 
956  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
957  if (AutovacuumLauncherPid != 0)
959 }
960 
961 /*
962  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
963  * processes (bgwriter, etc). The PGPROC and sema are not released, only
964  * marked as not-in-use.
965  */
966 static void
968 {
969  int proctype = DatumGetInt32(arg);
971  PGPROC *proc;
972 
973  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
974 
975  /* not safe if forked by system(), etc. */
976  if (MyProc->pid != (int) getpid())
977  elog(PANIC, "AuxiliaryProcKill() called in child process");
978 
979  auxproc = &AuxiliaryProcs[proctype];
980 
981  Assert(MyProc == auxproc);
982 
983  /* Release any LW locks I am holding (see notes above) */
985 
986  /* Cancel any pending condition variable sleep, too */
988 
989  /* look at the equivalent ProcKill() code for comments */
992 
993  proc = MyProc;
994  MyProc = NULL;
996  DisownLatch(&proc->procLatch);
997 
999 
1000  /* Mark auxiliary proc no longer in use */
1001  proc->pid = 0;
1003  proc->vxid.lxid = InvalidTransactionId;
1004 
1005  /* Update shared estimate of spins_per_delay */
1007 
1009 }
1010 
1011 /*
1012  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1013  * given its PID
1014  *
1015  * Returns NULL if not found.
1016  */
1017 PGPROC *
1019 {
1020  PGPROC *result = NULL;
1021  int index;
1022 
1023  if (pid == 0) /* never match dummy PGPROCs */
1024  return NULL;
1025 
1026  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1027  {
1028  PGPROC *proc = &AuxiliaryProcs[index];
1029 
1030  if (proc->pid == pid)
1031  {
1032  result = proc;
1033  break;
1034  }
1035  }
1036  return result;
1037 }
1038 
1039 
1040 /*
1041  * ProcSleep -- put a process to sleep on the specified lock
1042  *
1043  * Caller must have set MyProc->heldLocks to reflect locks already held
1044  * on the lockable object by this process (under all XIDs).
1045  *
1046  * It's not actually guaranteed that we need to wait when this function is
1047  * called, because it could be that when we try to find a position at which
1048  * to insert ourself into the wait queue, we discover that we must be inserted
1049  * ahead of everyone who wants a lock that conflict with ours. In that case,
1050  * we get the lock immediately. Beause of this, it's sensible for this function
1051  * to have a dontWait argument, despite the name.
1052  *
1053  * The lock table's partition lock must be held at entry, and will be held
1054  * at exit.
1055  *
1056  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR
1057  * if not (if dontWait = true, this is a deadlock; if dontWait = false, we
1058  * would have had to wait).
1059  *
1060  * ASSUME: that no one will fiddle with the queue until after
1061  * we release the partition lock.
1062  *
1063  * NOTES: The process queue is now a priority queue for locking.
1064  */
1066 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1067 {
1068  LOCKMODE lockmode = locallock->tag.mode;
1069  LOCK *lock = locallock->lock;
1070  PROCLOCK *proclock = locallock->proclock;
1071  uint32 hashcode = locallock->hashcode;
1072  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1073  dclist_head *waitQueue = &lock->waitProcs;
1074  PGPROC *insert_before = NULL;
1075  LOCKMASK myHeldLocks = MyProc->heldLocks;
1076  TimestampTz standbyWaitStart = 0;
1077  bool early_deadlock = false;
1078  bool allow_autovacuum_cancel = true;
1079  bool logged_recovery_conflict = false;
1080  ProcWaitStatus myWaitStatus;
1081  PGPROC *leader = MyProc->lockGroupLeader;
1082 
1083  /*
1084  * If group locking is in use, locks held by members of my locking group
1085  * need to be included in myHeldLocks. This is not required for relation
1086  * extension lock which conflict among group members. However, including
1087  * them in myHeldLocks will give group members the priority to get those
1088  * locks as compared to other backends which are also trying to acquire
1089  * those locks. OTOH, we can avoid giving priority to group members for
1090  * that kind of locks, but there doesn't appear to be a clear advantage of
1091  * the same.
1092  */
1093  if (leader != NULL)
1094  {
1095  dlist_iter iter;
1096 
1097  dlist_foreach(iter, &lock->procLocks)
1098  {
1099  PROCLOCK *otherproclock;
1100 
1101  otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1102 
1103  if (otherproclock->groupLeader == leader)
1104  myHeldLocks |= otherproclock->holdMask;
1105  }
1106  }
1107 
1108  /*
1109  * Determine where to add myself in the wait queue.
1110  *
1111  * Normally I should go at the end of the queue. However, if I already
1112  * hold locks that conflict with the request of any previous waiter, put
1113  * myself in the queue just in front of the first such waiter. This is not
1114  * a necessary step, since deadlock detection would move me to before that
1115  * waiter anyway; but it's relatively cheap to detect such a conflict
1116  * immediately, and avoid delaying till deadlock timeout.
1117  *
1118  * Special case: if I find I should go in front of some waiter, check to
1119  * see if I conflict with already-held locks or the requests before that
1120  * waiter. If not, then just grant myself the requested lock immediately.
1121  * This is the same as the test for immediate grant in LockAcquire, except
1122  * we are only considering the part of the wait queue before my insertion
1123  * point.
1124  */
1125  if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1126  {
1127  LOCKMASK aheadRequests = 0;
1128  dlist_iter iter;
1129 
1130  dclist_foreach(iter, waitQueue)
1131  {
1132  PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1133 
1134  /*
1135  * If we're part of the same locking group as this waiter, its
1136  * locks neither conflict with ours nor contribute to
1137  * aheadRequests.
1138  */
1139  if (leader != NULL && leader == proc->lockGroupLeader)
1140  continue;
1141 
1142  /* Must he wait for me? */
1143  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1144  {
1145  /* Must I wait for him ? */
1146  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1147  {
1148  /*
1149  * Yes, so we have a deadlock. Easiest way to clean up
1150  * correctly is to call RemoveFromWaitQueue(), but we
1151  * can't do that until we are *on* the wait queue. So, set
1152  * a flag to check below, and break out of loop. Also,
1153  * record deadlock info for later message.
1154  */
1155  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1156  early_deadlock = true;
1157  break;
1158  }
1159  /* I must go before this waiter. Check special case. */
1160  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1161  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1162  proclock))
1163  {
1164  /* Skip the wait and just grant myself the lock. */
1165  GrantLock(lock, proclock, lockmode);
1166  GrantAwaitedLock();
1167  return PROC_WAIT_STATUS_OK;
1168  }
1169 
1170  /* Put myself into wait queue before conflicting process */
1171  insert_before = proc;
1172  break;
1173  }
1174  /* Nope, so advance to next waiter */
1175  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1176  }
1177  }
1178 
1179  /*
1180  * At this point we know that we'd really need to sleep. If we've been
1181  * commanded not to do that, bail out.
1182  */
1183  if (dontWait)
1184  return PROC_WAIT_STATUS_ERROR;
1185 
1186  /*
1187  * Insert self into queue, at the position determined above.
1188  */
1189  if (insert_before)
1190  dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1191  else
1192  dclist_push_tail(waitQueue, &MyProc->links);
1193 
1194  lock->waitMask |= LOCKBIT_ON(lockmode);
1195 
1196  /* Set up wait information in PGPROC object, too */
1197  MyProc->waitLock = lock;
1198  MyProc->waitProcLock = proclock;
1199  MyProc->waitLockMode = lockmode;
1200 
1202 
1203  /*
1204  * If we detected deadlock, give up without waiting. This must agree with
1205  * CheckDeadLock's recovery code.
1206  */
1207  if (early_deadlock)
1208  {
1209  RemoveFromWaitQueue(MyProc, hashcode);
1210  return PROC_WAIT_STATUS_ERROR;
1211  }
1212 
1213  /* mark that we are waiting for a lock */
1214  lockAwaited = locallock;
1215 
1216  /*
1217  * Release the lock table's partition lock.
1218  *
1219  * NOTE: this may also cause us to exit critical-section state, possibly
1220  * allowing a cancel/die interrupt to be accepted. This is OK because we
1221  * have recorded the fact that we are waiting for a lock, and so
1222  * LockErrorCleanup will clean up if cancel/die happens.
1223  */
1224  LWLockRelease(partitionLock);
1225 
1226  /*
1227  * Also, now that we will successfully clean up after an ereport, it's
1228  * safe to check to see if there's a buffer pin deadlock against the
1229  * Startup process. Of course, that's only necessary if we're doing Hot
1230  * Standby and are not the Startup process ourselves.
1231  */
1232  if (RecoveryInProgress() && !InRecovery)
1234 
1235  /* Reset deadlock_state before enabling the timeout handler */
1237  got_deadlock_timeout = false;
1238 
1239  /*
1240  * Set timer so we can wake up after awhile and check for a deadlock. If a
1241  * deadlock is detected, the handler sets MyProc->waitStatus =
1242  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1243  * rather than success.
1244  *
1245  * By delaying the check until we've waited for a bit, we can avoid
1246  * running the rather expensive deadlock-check code in most cases.
1247  *
1248  * If LockTimeout is set, also enable the timeout for that. We can save a
1249  * few cycles by enabling both timeout sources in one call.
1250  *
1251  * If InHotStandby we set lock waits slightly later for clarity with other
1252  * code.
1253  */
1254  if (!InHotStandby)
1255  {
1256  if (LockTimeout > 0)
1257  {
1258  EnableTimeoutParams timeouts[2];
1259 
1260  timeouts[0].id = DEADLOCK_TIMEOUT;
1261  timeouts[0].type = TMPARAM_AFTER;
1262  timeouts[0].delay_ms = DeadlockTimeout;
1263  timeouts[1].id = LOCK_TIMEOUT;
1264  timeouts[1].type = TMPARAM_AFTER;
1265  timeouts[1].delay_ms = LockTimeout;
1266  enable_timeouts(timeouts, 2);
1267  }
1268  else
1270 
1271  /*
1272  * Use the current time obtained for the deadlock timeout timer as
1273  * waitStart (i.e., the time when this process started waiting for the
1274  * lock). Since getting the current time newly can cause overhead, we
1275  * reuse the already-obtained time to avoid that overhead.
1276  *
1277  * Note that waitStart is updated without holding the lock table's
1278  * partition lock, to avoid the overhead by additional lock
1279  * acquisition. This can cause "waitstart" in pg_locks to become NULL
1280  * for a very short period of time after the wait started even though
1281  * "granted" is false. This is OK in practice because we can assume
1282  * that users are likely to look at "waitstart" when waiting for the
1283  * lock for a long time.
1284  */
1287  }
1288  else if (log_recovery_conflict_waits)
1289  {
1290  /*
1291  * Set the wait start timestamp if logging is enabled and in hot
1292  * standby.
1293  */
1294  standbyWaitStart = GetCurrentTimestamp();
1295  }
1296 
1297  /*
1298  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1299  * will not wait. But a set latch does not necessarily mean that the lock
1300  * is free now, as there are many other sources for latch sets than
1301  * somebody releasing the lock.
1302  *
1303  * We process interrupts whenever the latch has been set, so cancel/die
1304  * interrupts are processed quickly. This means we must not mind losing
1305  * control to a cancel/die interrupt here. We don't, because we have no
1306  * shared-state-change work to do after being granted the lock (the
1307  * grantor did it all). We do have to worry about canceling the deadlock
1308  * timeout and updating the locallock table, but if we lose control to an
1309  * error, LockErrorCleanup will fix that up.
1310  */
1311  do
1312  {
1313  if (InHotStandby)
1314  {
1315  bool maybe_log_conflict =
1316  (standbyWaitStart != 0 && !logged_recovery_conflict);
1317 
1318  /* Set a timer and wait for that or for the lock to be granted */
1320  maybe_log_conflict);
1321 
1322  /*
1323  * Emit the log message if the startup process is waiting longer
1324  * than deadlock_timeout for recovery conflict on lock.
1325  */
1326  if (maybe_log_conflict)
1327  {
1329 
1330  if (TimestampDifferenceExceeds(standbyWaitStart, now,
1331  DeadlockTimeout))
1332  {
1333  VirtualTransactionId *vxids;
1334  int cnt;
1335 
1336  vxids = GetLockConflicts(&locallock->tag.lock,
1337  AccessExclusiveLock, &cnt);
1338 
1339  /*
1340  * Log the recovery conflict and the list of PIDs of
1341  * backends holding the conflicting lock. Note that we do
1342  * logging even if there are no such backends right now
1343  * because the startup process here has already waited
1344  * longer than deadlock_timeout.
1345  */
1347  standbyWaitStart, now,
1348  cnt > 0 ? vxids : NULL, true);
1349  logged_recovery_conflict = true;
1350  }
1351  }
1352  }
1353  else
1354  {
1356  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1358  /* check for deadlocks first, as that's probably log-worthy */
1360  {
1361  CheckDeadLock();
1362  got_deadlock_timeout = false;
1363  }
1365  }
1366 
1367  /*
1368  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1369  * else asynchronously. Read it just once per loop to prevent
1370  * surprising behavior (such as missing log messages).
1371  */
1372  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1373 
1374  /*
1375  * If we are not deadlocked, but are waiting on an autovacuum-induced
1376  * task, send a signal to interrupt it.
1377  */
1378  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1379  {
1380  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1381  uint8 statusFlags;
1382  uint8 lockmethod_copy;
1383  LOCKTAG locktag_copy;
1384 
1385  /*
1386  * Grab info we need, then release lock immediately. Note this
1387  * coding means that there is a tiny chance that the process
1388  * terminates its current transaction and starts a different one
1389  * before we have a change to send the signal; the worst possible
1390  * consequence is that a for-wraparound vacuum is canceled. But
1391  * that could happen in any case unless we were to do kill() with
1392  * the lock held, which is much more undesirable.
1393  */
1394  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1395  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1396  lockmethod_copy = lock->tag.locktag_lockmethodid;
1397  locktag_copy = lock->tag;
1398  LWLockRelease(ProcArrayLock);
1399 
1400  /*
1401  * Only do it if the worker is not working to protect against Xid
1402  * wraparound.
1403  */
1404  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1405  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1406  {
1407  int pid = autovac->pid;
1408 
1409  /* report the case, if configured to do so */
1411  {
1412  StringInfoData locktagbuf;
1413  StringInfoData logbuf; /* errdetail for server log */
1414 
1415  initStringInfo(&locktagbuf);
1416  initStringInfo(&logbuf);
1417  DescribeLockTag(&locktagbuf, &locktag_copy);
1418  appendStringInfo(&logbuf,
1419  "Process %d waits for %s on %s.",
1420  MyProcPid,
1421  GetLockmodeName(lockmethod_copy, lockmode),
1422  locktagbuf.data);
1423 
1424  ereport(DEBUG1,
1425  (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1426  pid),
1427  errdetail_log("%s", logbuf.data)));
1428 
1429  pfree(locktagbuf.data);
1430  pfree(logbuf.data);
1431  }
1432 
1433  /* send the autovacuum worker Back to Old Kent Road */
1434  if (kill(pid, SIGINT) < 0)
1435  {
1436  /*
1437  * There's a race condition here: once we release the
1438  * ProcArrayLock, it's possible for the autovac worker to
1439  * close up shop and exit before we can do the kill().
1440  * Therefore, we do not whinge about no-such-process.
1441  * Other errors such as EPERM could conceivably happen if
1442  * the kernel recycles the PID fast enough, but such cases
1443  * seem improbable enough that it's probably best to issue
1444  * a warning if we see some other errno.
1445  */
1446  if (errno != ESRCH)
1447  ereport(WARNING,
1448  (errmsg("could not send signal to process %d: %m",
1449  pid)));
1450  }
1451  }
1452 
1453  /* prevent signal from being sent again more than once */
1454  allow_autovacuum_cancel = false;
1455  }
1456 
1457  /*
1458  * If awoken after the deadlock check interrupt has run, and
1459  * log_lock_waits is on, then report about the wait.
1460  */
1462  {
1464  lock_waiters_sbuf,
1465  lock_holders_sbuf;
1466  const char *modename;
1467  long secs;
1468  int usecs;
1469  long msecs;
1470  dlist_iter proc_iter;
1471  PROCLOCK *curproclock;
1472  bool first_holder = true,
1473  first_waiter = true;
1474  int lockHoldersNum = 0;
1475 
1476  initStringInfo(&buf);
1477  initStringInfo(&lock_waiters_sbuf);
1478  initStringInfo(&lock_holders_sbuf);
1479 
1480  DescribeLockTag(&buf, &locallock->tag.lock);
1481  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1482  lockmode);
1485  &secs, &usecs);
1486  msecs = secs * 1000 + usecs / 1000;
1487  usecs = usecs % 1000;
1488 
1489  /*
1490  * we loop over the lock's procLocks to gather a list of all
1491  * holders and waiters. Thus we will be able to provide more
1492  * detailed information for lock debugging purposes.
1493  *
1494  * lock->procLocks contains all processes which hold or wait for
1495  * this lock.
1496  */
1497 
1498  LWLockAcquire(partitionLock, LW_SHARED);
1499 
1500  dlist_foreach(proc_iter, &lock->procLocks)
1501  {
1502  curproclock =
1503  dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1504 
1505  /*
1506  * we are a waiter if myProc->waitProcLock == curproclock; we
1507  * are a holder if it is NULL or something different
1508  */
1509  if (curproclock->tag.myProc->waitProcLock == curproclock)
1510  {
1511  if (first_waiter)
1512  {
1513  appendStringInfo(&lock_waiters_sbuf, "%d",
1514  curproclock->tag.myProc->pid);
1515  first_waiter = false;
1516  }
1517  else
1518  appendStringInfo(&lock_waiters_sbuf, ", %d",
1519  curproclock->tag.myProc->pid);
1520  }
1521  else
1522  {
1523  if (first_holder)
1524  {
1525  appendStringInfo(&lock_holders_sbuf, "%d",
1526  curproclock->tag.myProc->pid);
1527  first_holder = false;
1528  }
1529  else
1530  appendStringInfo(&lock_holders_sbuf, ", %d",
1531  curproclock->tag.myProc->pid);
1532 
1533  lockHoldersNum++;
1534  }
1535  }
1536 
1537  LWLockRelease(partitionLock);
1538 
1540  ereport(LOG,
1541  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1542  MyProcPid, modename, buf.data, msecs, usecs),
1543  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1544  "Processes holding the lock: %s. Wait queue: %s.",
1545  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1546  else if (deadlock_state == DS_HARD_DEADLOCK)
1547  {
1548  /*
1549  * This message is a bit redundant with the error that will be
1550  * reported subsequently, but in some cases the error report
1551  * might not make it to the log (eg, if it's caught by an
1552  * exception handler), and we want to ensure all long-wait
1553  * events get logged.
1554  */
1555  ereport(LOG,
1556  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1557  MyProcPid, modename, buf.data, msecs, usecs),
1558  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1559  "Processes holding the lock: %s. Wait queue: %s.",
1560  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1561  }
1562 
1563  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1564  ereport(LOG,
1565  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1566  MyProcPid, modename, buf.data, msecs, usecs),
1567  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1568  "Processes holding the lock: %s. Wait queue: %s.",
1569  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1570  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1571  ereport(LOG,
1572  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1573  MyProcPid, modename, buf.data, msecs, usecs)));
1574  else
1575  {
1576  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1577 
1578  /*
1579  * Currently, the deadlock checker always kicks its own
1580  * process, which means that we'll only see
1581  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1582  * DS_HARD_DEADLOCK, and there's no need to print redundant
1583  * messages. But for completeness and future-proofing, print
1584  * a message if it looks like someone else kicked us off the
1585  * lock.
1586  */
1588  ereport(LOG,
1589  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1590  MyProcPid, modename, buf.data, msecs, usecs),
1591  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1592  "Processes holding the lock: %s. Wait queue: %s.",
1593  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1594  }
1595 
1596  /*
1597  * At this point we might still need to wait for the lock. Reset
1598  * state so we don't print the above messages again.
1599  */
1601 
1602  pfree(buf.data);
1603  pfree(lock_holders_sbuf.data);
1604  pfree(lock_waiters_sbuf.data);
1605  }
1606  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1607 
1608  /*
1609  * Disable the timers, if they are still running. As in LockErrorCleanup,
1610  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1611  * already caused QueryCancelPending to become set, we want the cancel to
1612  * be reported as a lock timeout, not a user cancel.
1613  */
1614  if (!InHotStandby)
1615  {
1616  if (LockTimeout > 0)
1617  {
1618  DisableTimeoutParams timeouts[2];
1619 
1620  timeouts[0].id = DEADLOCK_TIMEOUT;
1621  timeouts[0].keep_indicator = false;
1622  timeouts[1].id = LOCK_TIMEOUT;
1623  timeouts[1].keep_indicator = true;
1624  disable_timeouts(timeouts, 2);
1625  }
1626  else
1628  }
1629 
1630  /*
1631  * Emit the log message if recovery conflict on lock was resolved but the
1632  * startup process waited longer than deadlock_timeout for it.
1633  */
1634  if (InHotStandby && logged_recovery_conflict)
1636  standbyWaitStart, GetCurrentTimestamp(),
1637  NULL, false);
1638 
1639  /*
1640  * Re-acquire the lock table's partition lock. We have to do this to hold
1641  * off cancel/die interrupts before we can mess with lockAwaited (else we
1642  * might have a missed or duplicated locallock update).
1643  */
1644  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1645 
1646  /*
1647  * We no longer want LockErrorCleanup to do anything.
1648  */
1649  lockAwaited = NULL;
1650 
1651  /*
1652  * If we got the lock, be sure to remember it in the locallock table.
1653  */
1655  GrantAwaitedLock();
1656 
1657  /*
1658  * We don't have to do anything else, because the awaker did all the
1659  * necessary update of the lock table and MyProc.
1660  */
1661  return MyProc->waitStatus;
1662 }
1663 
1664 
1665 /*
1666  * ProcWakeup -- wake up a process by setting its latch.
1667  *
1668  * Also remove the process from the wait queue and set its links invalid.
1669  *
1670  * The appropriate lock partition lock must be held by caller.
1671  *
1672  * XXX: presently, this code is only used for the "success" case, and only
1673  * works correctly for that case. To clean up in failure case, would need
1674  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1675  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1676  */
1677 void
1679 {
1680  if (dlist_node_is_detached(&proc->links))
1681  return;
1682 
1684 
1685  /* Remove process from wait queue */
1687 
1688  /* Clean up process' state and pass it the ok/fail signal */
1689  proc->waitLock = NULL;
1690  proc->waitProcLock = NULL;
1691  proc->waitStatus = waitStatus;
1693 
1694  /* And awaken it */
1695  SetLatch(&proc->procLatch);
1696 }
1697 
1698 /*
1699  * ProcLockWakeup -- routine for waking up processes when a lock is
1700  * released (or a prior waiter is aborted). Scan all waiters
1701  * for lock, waken any that are no longer blocked.
1702  *
1703  * The appropriate lock partition lock must be held by caller.
1704  */
1705 void
1706 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1707 {
1708  dclist_head *waitQueue = &lock->waitProcs;
1709  LOCKMASK aheadRequests = 0;
1710  dlist_mutable_iter miter;
1711 
1712  if (dclist_is_empty(waitQueue))
1713  return;
1714 
1715  dclist_foreach_modify(miter, waitQueue)
1716  {
1717  PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
1718  LOCKMODE lockmode = proc->waitLockMode;
1719 
1720  /*
1721  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1722  * (b) doesn't conflict with already-held locks.
1723  */
1724  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1725  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1726  proc->waitProcLock))
1727  {
1728  /* OK to waken */
1729  GrantLock(lock, proc->waitProcLock, lockmode);
1730  /* removes proc from the lock's waiting process queue */
1732  }
1733  else
1734  {
1735  /*
1736  * Lock conflicts: Don't wake, but remember requested mode for
1737  * later checks.
1738  */
1739  aheadRequests |= LOCKBIT_ON(lockmode);
1740  }
1741  }
1742 }
1743 
1744 /*
1745  * CheckDeadLock
1746  *
1747  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1748  * lock to be released by some other process. Check if there's a deadlock; if
1749  * not, just return. (But signal ProcSleep to log a message, if
1750  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1751  * the lock's wait queue and signal an error to ProcSleep.
1752  */
1753 static void
1755 {
1756  int i;
1757 
1758  /*
1759  * Acquire exclusive lock on the entire shared lock data structures. Must
1760  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1761  *
1762  * Note that the deadlock check interrupt had better not be enabled
1763  * anywhere that this process itself holds lock partition locks, else this
1764  * will wait forever. Also note that LWLockAcquire creates a critical
1765  * section, so that this routine cannot be interrupted by cancel/die
1766  * interrupts.
1767  */
1768  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1770 
1771  /*
1772  * Check to see if we've been awoken by anyone in the interim.
1773  *
1774  * If we have, we can return and resume our transaction -- happy day.
1775  * Before we are awoken the process releasing the lock grants it to us so
1776  * we know that we don't have to wait anymore.
1777  *
1778  * We check by looking to see if we've been unlinked from the wait queue.
1779  * This is safe because we hold the lock partition lock.
1780  */
1781  if (MyProc->links.prev == NULL ||
1782  MyProc->links.next == NULL)
1783  goto check_done;
1784 
1785 #ifdef LOCK_DEBUG
1786  if (Debug_deadlocks)
1787  DumpAllLocks();
1788 #endif
1789 
1790  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1792 
1794  {
1795  /*
1796  * Oops. We have a deadlock.
1797  *
1798  * Get this process out of wait state. (Note: we could do this more
1799  * efficiently by relying on lockAwaited, but use this coding to
1800  * preserve the flexibility to kill some other transaction than the
1801  * one detecting the deadlock.)
1802  *
1803  * RemoveFromWaitQueue sets MyProc->waitStatus to
1804  * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1805  * return from the signal handler.
1806  */
1807  Assert(MyProc->waitLock != NULL);
1809 
1810  /*
1811  * We're done here. Transaction abort caused by the error that
1812  * ProcSleep will raise will cause any other locks we hold to be
1813  * released, thus allowing other processes to wake up; we don't need
1814  * to do that here. NOTE: an exception is that releasing locks we
1815  * hold doesn't consider the possibility of waiters that were blocked
1816  * behind us on the lock we just failed to get, and might now be
1817  * wakable because we're not in front of them anymore. However,
1818  * RemoveFromWaitQueue took care of waking up any such processes.
1819  */
1820  }
1821 
1822  /*
1823  * And release locks. We do this in reverse order for two reasons: (1)
1824  * Anyone else who needs more than one of the locks will be trying to lock
1825  * them in increasing order; we don't want to release the other process
1826  * until it can get all the locks it needs. (2) This avoids O(N^2)
1827  * behavior inside LWLockRelease.
1828  */
1829 check_done:
1830  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1832 }
1833 
1834 /*
1835  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1836  *
1837  * NB: Runs inside a signal handler, be careful.
1838  */
1839 void
1841 {
1842  int save_errno = errno;
1843 
1844  got_deadlock_timeout = true;
1845 
1846  /*
1847  * Have to set the latch again, even if handle_sig_alarm already did. Back
1848  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1849  * ever would be a problem, but setting a set latch again is cheap.
1850  *
1851  * Note that, when this function runs inside procsignal_sigusr1_handler(),
1852  * the handler function sets the latch again after the latch is set here.
1853  */
1854  SetLatch(MyLatch);
1855  errno = save_errno;
1856 }
1857 
1858 /*
1859  * ProcWaitForSignal - wait for a signal from another backend.
1860  *
1861  * As this uses the generic process latch the caller has to be robust against
1862  * unrelated wakeups: Always check that the desired state has occurred, and
1863  * wait again if not.
1864  */
1865 void
1866 ProcWaitForSignal(uint32 wait_event_info)
1867 {
1869  wait_event_info);
1872 }
1873 
1874 /*
1875  * ProcSendSignal - set the latch of a backend identified by ProcNumber
1876  */
1877 void
1879 {
1880  if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
1881  elog(ERROR, "procNumber out of range");
1882 
1883  SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
1884 }
1885 
1886 /*
1887  * BecomeLockGroupLeader - designate process as lock group leader
1888  *
1889  * Once this function has returned, other processes can join the lock group
1890  * by calling BecomeLockGroupMember.
1891  */
1892 void
1894 {
1895  LWLock *leader_lwlock;
1896 
1897  /* If we already did it, we don't need to do it again. */
1898  if (MyProc->lockGroupLeader == MyProc)
1899  return;
1900 
1901  /* We had better not be a follower. */
1902  Assert(MyProc->lockGroupLeader == NULL);
1903 
1904  /* Create single-member group, containing only ourselves. */
1905  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1906  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1909  LWLockRelease(leader_lwlock);
1910 }
1911 
1912 /*
1913  * BecomeLockGroupMember - designate process as lock group member
1914  *
1915  * This is pretty straightforward except for the possibility that the leader
1916  * whose group we're trying to join might exit before we manage to do so;
1917  * and the PGPROC might get recycled for an unrelated process. To avoid
1918  * that, we require the caller to pass the PID of the intended PGPROC as
1919  * an interlock. Returns true if we successfully join the intended lock
1920  * group, and false if not.
1921  */
1922 bool
1924 {
1925  LWLock *leader_lwlock;
1926  bool ok = false;
1927 
1928  /* Group leader can't become member of group */
1929  Assert(MyProc != leader);
1930 
1931  /* Can't already be a member of a group */
1932  Assert(MyProc->lockGroupLeader == NULL);
1933 
1934  /* PID must be valid. */
1935  Assert(pid != 0);
1936 
1937  /*
1938  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1939  * calculates the proc number based on the PGPROC slot without looking at
1940  * its contents, so we will acquire the correct lock even if the leader
1941  * PGPROC is in process of being recycled.
1942  */
1943  leader_lwlock = LockHashPartitionLockByProc(leader);
1944  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1945 
1946  /* Is this the leader we're looking for? */
1947  if (leader->pid == pid && leader->lockGroupLeader == leader)
1948  {
1949  /* OK, join the group */
1950  ok = true;
1951  MyProc->lockGroupLeader = leader;
1953  }
1954  LWLockRelease(leader_lwlock);
1955 
1956  return ok;
1957 }
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:480
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:216
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:234
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:448
int AutovacuumLauncherPid
Definition: autovacuum.c:311
int autovacuum_max_workers
Definition: autovacuum.c:116
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1730
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1790
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1654
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1618
unsigned int uint32
Definition: c.h:493
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:169
unsigned char uint8
Definition: c.h:491
#define MemSet(start, val, len)
Definition: c.h:1007
uint32 TransactionId
Definition: c.h:639
size_t Size
Definition: c.h:592
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
bool ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:287
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1144
void InitDeadLockChecking(void)
Definition: deadlock.c:143
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1159
bool message_level_is_interesting(int elevel)
Definition: elog.c:276
int errcode(int sqlerrcode)
Definition: elog.c:859
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1274
int errmsg(const char *fmt,...)
Definition: elog.c:1072
int errdetail_log(const char *fmt,...)
Definition: elog.c:1253
#define LOG
Definition: elog.h:31
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:45
ProcNumber MyProcNumber
Definition: globals.c:87
bool IsUnderPostmaster
Definition: globals.c:117
int MaxConnections
Definition: globals.c:140
int MaxBackends
Definition: globals.c:143
struct Latch * MyLatch
Definition: globals.c:60
int max_worker_processes
Definition: globals.c:141
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition: ilist.h:525
static dlist_node * dlist_pop_head_node(dlist_head *head)
Definition: ilist.h:450
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_insert_before(dclist_head *head, dlist_node *before, dlist_node *node)
Definition: ilist.h:745
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
static void dlist_node_init(dlist_node *node)
Definition: ilist.h:325
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
int j
Definition: isn.c:74
int i
Definition: isn.c:73
void OwnLatch(Latch *latch)
Definition: latch.c:463
void DisownLatch(Latch *latch)
Definition: latch.c:489
void InitSharedLatch(Latch *latch)
Definition: latch.c:430
void SetLatch(Latch *latch)
Definition: latch.c:632
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
Assert(fmt[strlen(fmt) - 1] !='\n')
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1205
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2872
void GrantAwaitedLock(void)
Definition: lock.c:1767
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1536
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1886
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2147
void AbortStrongLockAcquire(void)
Definition: lock.c:1738
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4038
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:504
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1407
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
#define USER_LOCKMETHOD
Definition: lock.h:126
#define InvalidLocalTransactionId
Definition: lock.h:65
DeadLockState
Definition: lock.h:509
@ DS_HARD_DEADLOCK
Definition: lock.h:513
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:514
@ DS_NO_DEADLOCK
Definition: lock.h:511
@ DS_NOT_YET_CHECKED
Definition: lock.h:510
@ DS_SOFT_DEADLOCK
Definition: lock.h:512
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:541
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
int LOCKMASK
Definition: lockdefs.h:25
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1172
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1785
void LWLockReleaseAll(void)
Definition: lwlock.c:1880
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
void InitLWLockAccess(void)
Definition: lwlock.c:558
@ LW_WS_NOT_WAITING
Definition: lwlock.h:29
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:99
@ LWTRANCHE_LOCK_FASTPATH
Definition: lwlock.h:192
@ LW_SHARED
Definition: lwlock.h:117
@ LW_EXCLUSIVE
Definition: lwlock.h:116
void pfree(void *pointer)
Definition: mcxt.c:1508
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define AmAutoVacuumWorkerProcess()
Definition: miscadmin.h:372
#define AmBackgroundWorkerProcess()
Definition: miscadmin.h:373
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define AmWalSenderProcess()
Definition: miscadmin.h:374
#define AmLogicalSlotSyncWorkerProcess()
Definition: miscadmin.h:375
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
#define AmAutoVacuumLauncherProcess()
Definition: miscadmin.h:371
void SwitchToSharedLatch(void)
Definition: miscinit.c:221
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:248
void * arg
static char * buf
Definition: pg_test_fsync.c:73
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:323
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:356
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
uintptr_t Datum
Definition: postgres.h:64
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:212
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define NON_EXEC_STATIC
Definition: postgres.h:576
#define InvalidOid
Definition: postgres_ext.h:36
#define NUM_AUXILIARY_PROCS
Definition: proc.h:440
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:60
#define GetNumberFromPGProc(proc)
Definition: proc.h:429
ProcWaitStatus
Definition: proc.h:118
@ PROC_WAIT_STATUS_OK
Definition: proc.h:119
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:120
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:121
#define PROC_IS_AUTOVACUUM
Definition: proc.h:57
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:468
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:565
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
int ProcNumber
Definition: procnumber.h:24
@ PROCSIG_RECOVERY_CONFLICT_LOCK
Definition: procsignal.h:44
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:213
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:224
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:812
int slock_t
Definition: s_lock.h:735
void * ShmemAlloc(Size size)
Definition: shmem.c:152
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
static pg_noinline void Size size
Definition: slab.c:607
#define SpinLockInit(lock)
Definition: spin.h:60
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
void ProcSendSignal(ProcNumber procNumber)
Definition: proc.c:1878
bool log_lock_waits
Definition: proc.c:63
int IdleSessionTimeout
Definition: proc.c:62
PGPROC * MyProc
Definition: proc.c:66
Size ProcGlobalShmemSize(void)
Definition: proc.c:100
void ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1678
bool IsWaitingForLock(void)
Definition: proc.c:713
int StatementTimeout
Definition: proc.c:58
bool HaveNFreeProcs(int n, int *nfree)
Definition: proc.c:687
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:823
void InitAuxiliaryProcess(void)
Definition: proc.c:523
PGPROC * PreparedXactProcs
Definition: proc.c:80
static DeadLockState deadlock_state
Definition: proc.c:85
int IdleInTransactionSessionTimeout
Definition: proc.c:60
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1066
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:79
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:671
int DeadlockTimeout
Definition: proc.c:57
int TransactionTimeout
Definition: proc.c:61
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1706
PROC_HDR * ProcGlobal
Definition: proc.c:78
static void CheckDeadLock(void)
Definition: proc.c:1754
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:75
int ProcGlobalSemas(void)
Definition: proc.c:122
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:806
void LockErrorCleanup(void)
Definition: proc.c:730
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1923
void BecomeLockGroupLeader(void)
Definition: proc.c:1893
static LOCALLOCK * lockAwaited
Definition: proc.c:83
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:1018
static void ProcKill(int code, Datum arg)
Definition: proc.c:834
void InitProcess(void)
Definition: proc.c:296
void CheckDeadLockAlert(void)
Definition: proc.c:1840
void InitProcessPhase2(void)
Definition: proc.c:488
void InitProcGlobal(void)
Definition: proc.c:157
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:88
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:659
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1866
int LockTimeout
Definition: proc.c:59
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:967
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:904
bool log_recovery_conflict_waits
Definition: standby.c:41
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:273
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition: standby.c:622
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:97
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
TimeoutId id
Definition: timeout.h:71
TimeoutType type
Definition: timeout.h:61
TimeoutId id
Definition: timeout.h:60
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
uint32 hashcode
Definition: lock.h:432
LOCK * lock
Definition: lock.h:433
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
uint8 locktag_lockmethodid
Definition: lock.h:171
Definition: lock.h:309
LOCKTAG tag
Definition: lock.h:311
dclist_head waitProcs
Definition: lock.h:317
LOCKMASK waitMask
Definition: lock.h:315
dlist_head procLocks
Definition: lock.h:316
Definition: lwlock.h:41
const LOCKMASK * conflictTab
Definition: lock.h:111
Definition: proc.h:157
LWLock fpInfoLock
Definition: proc.h:289
TransactionId xmin
Definition: proc.h:173
bool procArrayGroupMember
Definition: proc.h:265
LocalTransactionId lxid
Definition: proc.h:196
PROCLOCK * waitProcLock
Definition: proc.h:229
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:285
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:267
uint8 lwWaitMode
Definition: proc.h:220
dlist_head lockGroupMembers
Definition: proc.h:301
uint32 wait_event_info
Definition: proc.h:275
dlist_head * procgloballist
Definition: proc.h:160
uint8 statusFlags
Definition: proc.h:238
bool recoveryConflictPending
Definition: proc.h:216
TransactionId clogGroupMemberXid
Definition: proc.h:280
Oid databaseId
Definition: proc.h:203
int64 clogGroupMemberPage
Definition: proc.h:283
bool clogGroupMember
Definition: proc.h:278
struct PGPROC::@112 vxid
pg_atomic_uint64 waitStart
Definition: proc.h:233
bool fpVXIDLock
Definition: proc.h:292
ProcNumber procNumber
Definition: proc.h:191
int pid
Definition: proc.h:178
XLogRecPtr waitLSN
Definition: proc.h:248
dlist_node syncRepLinks
Definition: proc.h:250
bool isBackgroundWorker
Definition: proc.h:209
int syncRepState
Definition: proc.h:249
pg_atomic_uint32 clogGroupNext
Definition: proc.h:279
dlist_node lockGroupLink
Definition: proc.h:302
XidStatus clogGroupMemberXidStatus
Definition: proc.h:281
int pgxactoff
Definition: proc.h:180
LOCK * waitLock
Definition: proc.h:228
TransactionId xid
Definition: proc.h:168
LOCKMODE waitLockMode
Definition: proc.h:230
int delayChkptFlags
Definition: proc.h:236
PGPROC * lockGroupLeader
Definition: proc.h:300
LocalTransactionId fpLocalTransactionId
Definition: proc.h:293
TransactionId procArrayGroupMemberXid
Definition: proc.h:273
LOCKMASK heldLocks
Definition: proc.h:231
PGSemaphore sem
Definition: proc.h:162
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:257
Oid roleId
Definition: proc.h:204
ProcWaitStatus waitStatus
Definition: proc.h:163
Oid tempNamespaceId
Definition: proc.h:206
dlist_node links
Definition: proc.h:159
uint8 lwWaiting
Definition: proc.h:219
Latch procLatch
Definition: proc.h:165
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PGPROC * groupLeader
Definition: lock.h:375
PROCLOCKTAG tag
Definition: lock.h:372
Definition: proc.h:378
uint8 * statusFlags
Definition: proc.h:395
XidCacheStatus * subxidStates
Definition: proc.h:389
dlist_head autovacFreeProcs
Definition: proc.h:402
Latch * walwriterLatch
Definition: proc.h:412
dlist_head freeProcs
Definition: proc.h:400
int startupBufferPinWaitBufId
Definition: proc.h:418
PGPROC * allProcs
Definition: proc.h:380
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:410
int spins_per_delay
Definition: proc.h:416
TransactionId * xids
Definition: proc.h:383
Latch * checkpointerLatch
Definition: proc.h:414
dlist_head walsenderFreeProcs
Definition: proc.h:406
dlist_head bgworkerFreeProcs
Definition: proc.h:404
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:408
uint32 allProcCount
Definition: proc.h:398
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
dlist_node * prev
Definition: ilist.h:139
Definition: type.h:95
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:373
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:30
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:560
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:813
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:685
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:630
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:718
@ LOCK_TIMEOUT
Definition: timeout.h:28
@ DEADLOCK_TIMEOUT
Definition: timeout.h:27
@ TMPARAM_AFTER
Definition: timeout.h:53
#define InvalidTransactionId
Definition: transam.h:31
int max_prepared_xacts
Definition: twophase.c:115
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition: wait_event.c:314
void pgstat_reset_wait_event_storage(void)
Definition: wait_event.c:326
#define PG_WAIT_LOCK
Definition: wait_event.h:19
int max_wal_senders
Definition: walsender.c:121
#define kill(pid, sig)
Definition: win32_port.h:485
#define SIGUSR2
Definition: win32_port.h:181
bool RecoveryInProgress(void)
Definition: xlog.c:6201
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
bool InRecovery
Definition: xlogutils.c:50
#define InHotStandby
Definition: xlogutils.h:57
static struct link * links
Definition: zic.c:299