PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  *
19  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20  * the lock wakes the process up again (and gives it an error code so it knows
21  * whether it was awoken on an error condition).
22  *
23  * Interface (b):
24  *
25  * ProcReleaseLocks -- frees the locks associated with current transaction
26  *
27  * ProcKill -- destroys the shared memory state (and locks)
28  * associated with the process.
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 #include <sys/time.h>
35 
36 #include "access/transam.h"
37 #include "access/twophase.h"
38 #include "access/xlogutils.h"
39 #include "miscadmin.h"
40 #include "pgstat.h"
41 #include "postmaster/autovacuum.h"
42 #include "replication/slot.h"
43 #include "replication/slotsync.h"
44 #include "replication/syncrep.h"
45 #include "replication/walsender.h"
47 #include "storage/ipc.h"
48 #include "storage/lmgr.h"
49 #include "storage/pmsignal.h"
50 #include "storage/proc.h"
51 #include "storage/procarray.h"
52 #include "storage/procsignal.h"
53 #include "storage/spin.h"
54 #include "storage/standby.h"
55 #include "utils/timeout.h"
56 #include "utils/timestamp.h"
57 
58 /* GUC variables */
59 int DeadlockTimeout = 1000;
61 int LockTimeout = 0;
65 bool log_lock_waits = false;
66 
67 /* Pointer to this process's PGPROC struct, if any */
68 PGPROC *MyProc = NULL;
70 
71 /*
72  * This spinlock protects the freelist of recycled PGPROC structures.
73  * We cannot use an LWLock because the LWLock manager depends on already
74  * having a PGPROC and a wait semaphore! But these structures are touched
75  * relatively infrequently (only at backend startup or shutdown) and not for
76  * very long, so a spinlock is okay.
77  */
79 
80 /* Pointers to shared-memory structures */
84 
85 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
86 static LOCALLOCK *lockAwaited = NULL;
87 
89 
90 /* Is a deadlock check pending? */
91 static volatile sig_atomic_t got_deadlock_timeout;
92 
93 static void RemoveProcFromArray(int code, Datum arg);
94 static void ProcKill(int code, Datum arg);
95 static void AuxiliaryProcKill(int code, Datum arg);
96 static void CheckDeadLock(void);
97 
98 
99 /*
100  * Report shared-memory space needed by InitProcGlobal.
101  */
102 Size
104 {
105  Size size = 0;
106  Size TotalProcs =
108 
109  /* ProcGlobal */
110  size = add_size(size, sizeof(PROC_HDR));
111  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
112  size = add_size(size, sizeof(slock_t));
113 
114  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
115  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
116  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
117 
118  return size;
119 }
120 
121 /*
122  * Report number of semaphores needed by InitProcGlobal.
123  */
124 int
126 {
127  /*
128  * We need a sema per backend (including autovacuum), plus one for each
129  * auxiliary process.
130  */
132 }
133 
134 /*
135  * InitProcGlobal -
136  * Initialize the global process table during postmaster or standalone
137  * backend startup.
138  *
139  * We also create all the per-process semaphores we will need to support
140  * the requested number of backends. We used to allocate semaphores
141  * only when backends were actually started up, but that is bad because
142  * it lets Postgres fail under load --- a lot of Unix systems are
143  * (mis)configured with small limits on the number of semaphores, and
144  * running out when trying to start another backend is a common failure.
145  * So, now we grab enough semaphores to support the desired max number
146  * of backends immediately at initialization --- if the sysadmin has set
147  * MaxConnections, max_worker_processes, max_wal_senders, or
148  * autovacuum_max_workers higher than his kernel will support, he'll
149  * find out sooner rather than later.
150  *
151  * Another reason for creating semaphores here is that the semaphore
152  * implementation typically requires us to create semaphores in the
153  * postmaster, not in backends.
154  *
155  * Note: this is NOT called by individual backends under a postmaster,
156  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
157  * pointers must be propagated specially for EXEC_BACKEND operation.
158  */
159 void
161 {
162  PGPROC *procs;
163  int i,
164  j;
165  bool found;
167 
168  /* Create the ProcGlobal shared structure */
169  ProcGlobal = (PROC_HDR *)
170  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
171  Assert(!found);
172 
173  /*
174  * Initialize the data structures.
175  */
182  ProcGlobal->walwriterLatch = NULL;
186 
187  /*
188  * Create and initialize all the PGPROC structures we'll need. There are
189  * five separate consumers: (1) normal backends, (2) autovacuum workers
190  * and the autovacuum launcher, (3) background workers, (4) auxiliary
191  * processes, and (5) prepared transactions. Each PGPROC structure is
192  * dedicated to exactly one of these purposes, and they do not move
193  * between groups.
194  */
195  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
196  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
197  ProcGlobal->allProcs = procs;
198  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
200 
201  /*
202  * Allocate arrays mirroring PGPROC fields in a dense manner. See
203  * PROC_HDR.
204  *
205  * XXX: It might make sense to increase padding for these arrays, given
206  * how hotly they are accessed.
207  */
208  ProcGlobal->xids =
209  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
210  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
212  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
213  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
214  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
215 
216  for (i = 0; i < TotalProcs; i++)
217  {
218  PGPROC *proc = &procs[i];
219 
220  /* Common initialization for all PGPROCs, regardless of type. */
221 
222  /*
223  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
224  * dummy PGPROCs don't need these though - they're never associated
225  * with a real process
226  */
228  {
229  proc->sem = PGSemaphoreCreate();
230  InitSharedLatch(&(proc->procLatch));
232  }
233 
234  /*
235  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
236  * must be queued up on the appropriate free list. Because there can
237  * only ever be a small, fixed number of auxiliary processes, no free
238  * list is used in that case; InitAuxiliaryProcess() instead uses a
239  * linear search. PGPROCs for prepared transactions are added to a
240  * free list by TwoPhaseShmemInit().
241  */
242  if (i < MaxConnections)
243  {
244  /* PGPROC for normal backend, add to freeProcs list */
247  }
248  else if (i < MaxConnections + autovacuum_max_workers + 1)
249  {
250  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
253  }
255  {
256  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
259  }
260  else if (i < MaxBackends)
261  {
262  /* PGPROC for walsender, add to walsenderFreeProcs list */
265  }
266 
267  /* Initialize myProcLocks[] shared memory queues. */
268  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
269  dlist_init(&(proc->myProcLocks[j]));
270 
271  /* Initialize lockGroupMembers list. */
273 
274  /*
275  * Initialize the atomic variables, otherwise, it won't be safe to
276  * access them for backends that aren't currently in use.
277  */
280  pg_atomic_init_u64(&(proc->waitStart), 0);
281  }
282 
283  /*
284  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
285  * processes and prepared transactions.
286  */
287  AuxiliaryProcs = &procs[MaxBackends];
289 
290  /* Create ProcStructLock spinlock, too */
291  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
293 }
294 
295 /*
296  * InitProcess -- initialize a per-process PGPROC entry for this backend
297  */
298 void
300 {
301  dlist_head *procgloballist;
302 
303  /*
304  * ProcGlobal should be set up already (if we are a backend, we inherit
305  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
306  */
307  if (ProcGlobal == NULL)
308  elog(PANIC, "proc header uninitialized");
309 
310  if (MyProc != NULL)
311  elog(ERROR, "you already exist");
312 
313  /* Decide which list should supply our PGPROC. */
315  procgloballist = &ProcGlobal->autovacFreeProcs;
316  else if (IsBackgroundWorker)
317  procgloballist = &ProcGlobal->bgworkerFreeProcs;
318  else if (am_walsender)
319  procgloballist = &ProcGlobal->walsenderFreeProcs;
320  else
321  procgloballist = &ProcGlobal->freeProcs;
322 
323  /*
324  * Try to get a proc struct from the appropriate free list. If this
325  * fails, we must be out of PGPROC structures (not to mention semaphores).
326  *
327  * While we are holding the ProcStructLock, also copy the current shared
328  * estimate of spins_per_delay to local storage.
329  */
331 
333 
334  if (!dlist_is_empty(procgloballist))
335  {
336  MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
338  }
339  else
340  {
341  /*
342  * If we reach here, all the PGPROCs are in use. This is one of the
343  * possible places to detect "too many backends", so give the standard
344  * error message. XXX do we need to give a different failure message
345  * in the autovacuum case?
346  */
348  if (am_walsender)
349  ereport(FATAL,
350  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
351  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
352  max_wal_senders)));
353  ereport(FATAL,
354  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
355  errmsg("sorry, too many clients already")));
356  }
358 
359  /*
360  * Cross-check that the PGPROC is of the type we expect; if this were not
361  * the case, it would get returned to the wrong list.
362  */
363  Assert(MyProc->procgloballist == procgloballist);
364 
365  /*
366  * Now that we have a PGPROC, mark ourselves as an active postmaster
367  * child; this is so that the postmaster can detect it if we exit without
368  * cleaning up. (XXX autovac launcher currently doesn't participate in
369  * this; it probably should.)
370  *
371  * Slot sync worker also does not participate in it, see comments atop
372  * 'struct bkend' in postmaster.c.
373  */
377 
378  /*
379  * Initialize all fields of MyProc, except for those previously
380  * initialized by InitProcGlobal.
381  */
385  MyProc->fpVXIDLock = false;
389  MyProc->pid = MyProcPid;
390  /* backendId, databaseId and roleId will be filled in later */
396  MyProc->delayChkptFlags = 0;
397  MyProc->statusFlags = 0;
398  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
402  MyProc->lwWaitMode = 0;
403  MyProc->waitLock = NULL;
404  MyProc->waitProcLock = NULL;
406 #ifdef USE_ASSERT_CHECKING
407  {
408  int i;
409 
410  /* Last process should have released all locks. */
411  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
413  }
414 #endif
416 
417  /* Initialize fields for sync rep */
418  MyProc->waitLSN = 0;
421 
422  /* Initialize fields for group XID clearing. */
423  MyProc->procArrayGroupMember = false;
426 
427  /* Check that group locking fields are in a proper initial state. */
428  Assert(MyProc->lockGroupLeader == NULL);
430 
431  /* Initialize wait event information. */
432  MyProc->wait_event_info = 0;
433 
434  /* Initialize fields for group transaction status update. */
435  MyProc->clogGroupMember = false;
441 
442  /*
443  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
444  * on it. That allows us to repoint the process latch, which so far
445  * points to process local one, to the shared one.
446  */
449 
450  /* now that we have a proc, report wait events to shared memory */
452 
453  /*
454  * We might be reusing a semaphore that belonged to a failed process. So
455  * be careful and reinitialize its value here. (This is not strictly
456  * necessary anymore, but seems like a good idea for cleanliness.)
457  */
459 
460  /*
461  * Arrange to clean up at backend exit.
462  */
464 
465  /*
466  * Now that we have a PGPROC, we could try to acquire locks, so initialize
467  * local state needed for LWLocks, and the deadlock checker.
468  */
471 
472 #ifdef EXEC_BACKEND
473 
474  /*
475  * Initialize backend-local pointers to all the shared data structures.
476  * (We couldn't do this until now because it needs LWLocks.)
477  */
478  if (IsUnderPostmaster)
479  AttachSharedMemoryStructs();
480 #endif
481 }
482 
483 /*
484  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
485  *
486  * This is separate from InitProcess because we can't acquire LWLocks until
487  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
488  * work until after we've done AttachSharedMemoryStructs.
489  */
490 void
492 {
493  Assert(MyProc != NULL);
494 
495  /*
496  * Add our PGPROC to the PGPROC array in shared memory.
497  */
499 
500  /*
501  * Arrange to clean that up at backend exit.
502  */
504 }
505 
506 /*
507  * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
508  *
509  * This is called by bgwriter and similar processes so that they will have a
510  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
511  * and sema that are assigned are one of the extra ones created during
512  * InitProcGlobal.
513  *
514  * Auxiliary processes are presently not expected to wait for real (lockmgr)
515  * locks, so we need not set up the deadlock checker. They are never added
516  * to the ProcArray or the sinval messaging mechanism, either. They also
517  * don't get a VXID assigned, since this is only useful when we actually
518  * hold lockmgr locks.
519  *
520  * Startup process however uses locks but never waits for them in the
521  * normal backend sense. Startup process also takes part in sinval messaging
522  * as a sendOnly process, so never reads messages from sinval queue. So
523  * Startup process does have a VXID and does show up in pg_locks.
524  */
525 void
527 {
528  PGPROC *auxproc;
529  int proctype;
530 
531  /*
532  * ProcGlobal should be set up already (if we are a backend, we inherit
533  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
534  */
535  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
536  elog(PANIC, "proc header uninitialized");
537 
538  if (MyProc != NULL)
539  elog(ERROR, "you already exist");
540 
541  /*
542  * We use the ProcStructLock to protect assignment and releasing of
543  * AuxiliaryProcs entries.
544  *
545  * While we are holding the ProcStructLock, also copy the current shared
546  * estimate of spins_per_delay to local storage.
547  */
549 
551 
552  /*
553  * Find a free auxproc ... *big* trouble if there isn't one ...
554  */
555  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
556  {
557  auxproc = &AuxiliaryProcs[proctype];
558  if (auxproc->pid == 0)
559  break;
560  }
561  if (proctype >= NUM_AUXILIARY_PROCS)
562  {
564  elog(FATAL, "all AuxiliaryProcs are in use");
565  }
566 
567  /* Mark auxiliary proc as in use by me */
568  /* use volatile pointer to prevent code rearrangement */
569  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
570 
571  MyProc = auxproc;
572 
574 
576 
577  /*
578  * Initialize all fields of MyProc, except for those previously
579  * initialized by InitProcGlobal.
580  */
584  MyProc->fpVXIDLock = false;
593  MyProc->delayChkptFlags = 0;
594  MyProc->statusFlags = 0;
596  MyProc->lwWaitMode = 0;
597  MyProc->waitLock = NULL;
598  MyProc->waitProcLock = NULL;
600 #ifdef USE_ASSERT_CHECKING
601  {
602  int i;
603 
604  /* Last process should have released all locks. */
605  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
607  }
608 #endif
609 
610  /*
611  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
612  * on it. That allows us to repoint the process latch, which so far
613  * points to process local one, to the shared one.
614  */
617 
618  /* now that we have a proc, report wait events to shared memory */
620 
621  /* Check that group locking fields are in a proper initial state. */
622  Assert(MyProc->lockGroupLeader == NULL);
624 
625  /*
626  * We might be reusing a semaphore that belonged to a failed process. So
627  * be careful and reinitialize its value here. (This is not strictly
628  * necessary anymore, but seems like a good idea for cleanliness.)
629  */
631 
632  /*
633  * Arrange to clean up at process exit.
634  */
636 
637  /*
638  * Now that we have a PGPROC, we could try to acquire lightweight locks.
639  * Initialize local state needed for them. (Heavyweight locks cannot be
640  * acquired in aux processes.)
641  */
643 
644 #ifdef EXEC_BACKEND
645 
646  /*
647  * Initialize backend-local pointers to all the shared data structures.
648  * (We couldn't do this until now because it needs LWLocks.)
649  */
650  if (IsUnderPostmaster)
651  AttachSharedMemoryStructs();
652 #endif
653 }
654 
655 /*
656  * Used from bufmgr to share the value of the buffer that Startup waits on,
657  * or to reset the value to "not waiting" (-1). This allows processing
658  * of recovery conflicts for buffer pins. Set is made before backends look
659  * at this value, so locking not required, especially since the set is
660  * an atomic integer set operation.
661  */
662 void
664 {
665  /* use volatile pointer to prevent code rearrangement */
666  volatile PROC_HDR *procglobal = ProcGlobal;
667 
668  procglobal->startupBufferPinWaitBufId = bufid;
669 }
670 
671 /*
672  * Used by backends when they receive a request to check for buffer pin waits.
673  */
674 int
676 {
677  /* use volatile pointer to prevent code rearrangement */
678  volatile PROC_HDR *procglobal = ProcGlobal;
679 
680  return procglobal->startupBufferPinWaitBufId;
681 }
682 
683 /*
684  * Check whether there are at least N free PGPROC objects. If false is
685  * returned, *nfree will be set to the number of free PGPROC objects.
686  * Otherwise, *nfree will be set to n.
687  *
688  * Note: this is designed on the assumption that N will generally be small.
689  */
690 bool
691 HaveNFreeProcs(int n, int *nfree)
692 {
693  dlist_iter iter;
694 
695  Assert(n > 0);
696  Assert(nfree);
697 
699 
700  *nfree = 0;
702  {
703  (*nfree)++;
704  if (*nfree == n)
705  break;
706  }
707 
709 
710  return (*nfree == n);
711 }
712 
713 /*
714  * Check if the current process is awaiting a lock.
715  */
716 bool
718 {
719  if (lockAwaited == NULL)
720  return false;
721 
722  return true;
723 }
724 
725 /*
726  * Cancel any pending wait for lock, when aborting a transaction, and revert
727  * any strong lock count acquisition for a lock being acquired.
728  *
729  * (Normally, this would only happen if we accept a cancel/die
730  * interrupt while waiting; but an ereport(ERROR) before or during the lock
731  * wait is within the realm of possibility, too.)
732  */
733 void
735 {
736  LWLock *partitionLock;
737  DisableTimeoutParams timeouts[2];
738 
739  HOLD_INTERRUPTS();
740 
742 
743  /* Nothing to do if we weren't waiting for a lock */
744  if (lockAwaited == NULL)
745  {
747  return;
748  }
749 
750  /*
751  * Turn off the deadlock and lock timeout timers, if they are still
752  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
753  * indicator flag, since this function is executed before
754  * ProcessInterrupts when responding to SIGINT; else we'd lose the
755  * knowledge that the SIGINT came from a lock timeout and not an external
756  * source.
757  */
758  timeouts[0].id = DEADLOCK_TIMEOUT;
759  timeouts[0].keep_indicator = false;
760  timeouts[1].id = LOCK_TIMEOUT;
761  timeouts[1].keep_indicator = true;
762  disable_timeouts(timeouts, 2);
763 
764  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
765  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
766  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
767 
769  {
770  /* We could not have been granted the lock yet */
772  }
773  else
774  {
775  /*
776  * Somebody kicked us off the lock queue already. Perhaps they
777  * granted us the lock, or perhaps they detected a deadlock. If they
778  * did grant us the lock, we'd better remember it in our local lock
779  * table.
780  */
783  }
784 
785  lockAwaited = NULL;
786 
787  LWLockRelease(partitionLock);
788 
790 }
791 
792 
793 /*
794  * ProcReleaseLocks() -- release locks associated with current transaction
795  * at main transaction commit or abort
796  *
797  * At main transaction commit, we release standard locks except session locks.
798  * At main transaction abort, we release all locks including session locks.
799  *
800  * Advisory locks are released only if they are transaction-level;
801  * session-level holds remain, whether this is a commit or not.
802  *
803  * At subtransaction commit, we don't release any locks (so this func is not
804  * needed at all); we will defer the releasing to the parent transaction.
805  * At subtransaction abort, we release all locks held by the subtransaction;
806  * this is implemented by retail releasing of the locks under control of
807  * the ResourceOwner mechanism.
808  */
809 void
810 ProcReleaseLocks(bool isCommit)
811 {
812  if (!MyProc)
813  return;
814  /* If waiting, get off wait queue (should only be needed after error) */
816  /* Release standard locks, including session-level if aborting */
818  /* Release transaction-level advisory locks */
820 }
821 
822 
823 /*
824  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
825  */
826 static void
828 {
829  Assert(MyProc != NULL);
831 }
832 
833 /*
834  * ProcKill() -- Destroy the per-proc data structure for
835  * this process. Release any of its held LW locks.
836  */
837 static void
838 ProcKill(int code, Datum arg)
839 {
840  PGPROC *proc;
841  dlist_head *procgloballist;
842 
843  Assert(MyProc != NULL);
844 
845  /* not safe if forked by system(), etc. */
846  if (MyProc->pid != (int) getpid())
847  elog(PANIC, "ProcKill() called in child process");
848 
849  /* Make sure we're out of the sync rep lists */
851 
852 #ifdef USE_ASSERT_CHECKING
853  {
854  int i;
855 
856  /* Last process should have released all locks. */
857  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
859  }
860 #endif
861 
862  /*
863  * Release any LW locks I am holding. There really shouldn't be any, but
864  * it's cheap to check again before we cut the knees off the LWLock
865  * facility by releasing our PGPROC ...
866  */
868 
869  /* Cancel any pending condition variable sleep, too */
871 
872  /*
873  * Detach from any lock group of which we are a member. If the leader
874  * exits before all other group members, its PGPROC will remain allocated
875  * until the last group process exits; that process must return the
876  * leader's PGPROC to the appropriate list.
877  */
878  if (MyProc->lockGroupLeader != NULL)
879  {
880  PGPROC *leader = MyProc->lockGroupLeader;
881  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
882 
883  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
886  if (dlist_is_empty(&leader->lockGroupMembers))
887  {
888  leader->lockGroupLeader = NULL;
889  if (leader != MyProc)
890  {
891  procgloballist = leader->procgloballist;
892 
893  /* Leader exited first; return its PGPROC. */
895  dlist_push_head(procgloballist, &leader->links);
897  }
898  }
899  else if (leader != MyProc)
900  MyProc->lockGroupLeader = NULL;
901  LWLockRelease(leader_lwlock);
902  }
903 
904  /*
905  * Reset MyLatch to the process local one. This is so that signal
906  * handlers et al can continue using the latch after the shared latch
907  * isn't ours anymore.
908  *
909  * Similarly, stop reporting wait events to MyProc->wait_event_info.
910  *
911  * After that clear MyProc and disown the shared latch.
912  */
915 
916  proc = MyProc;
917  MyProc = NULL;
919  DisownLatch(&proc->procLatch);
920 
921  procgloballist = proc->procgloballist;
923 
924  /*
925  * If we're still a member of a locking group, that means we're a leader
926  * which has somehow exited before its children. The last remaining child
927  * will release our PGPROC. Otherwise, release it now.
928  */
929  if (proc->lockGroupLeader == NULL)
930  {
931  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
933 
934  /* Return PGPROC structure (and semaphore) to appropriate freelist */
935  dlist_push_tail(procgloballist, &proc->links);
936  }
937 
938  /* Update shared estimate of spins_per_delay */
940 
942 
943  /*
944  * This process is no longer present in shared memory in any meaningful
945  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
946  * autovac launcher should be included here someday)
947  *
948  * Slot sync worker is also not a postmaster child, so skip this shared
949  * memory related processing here.
950  */
954 
955  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
956  if (AutovacuumLauncherPid != 0)
958 }
959 
960 /*
961  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
962  * processes (bgwriter, etc). The PGPROC and sema are not released, only
963  * marked as not-in-use.
964  */
965 static void
967 {
968  int proctype = DatumGetInt32(arg);
970  PGPROC *proc;
971 
972  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
973 
974  /* not safe if forked by system(), etc. */
975  if (MyProc->pid != (int) getpid())
976  elog(PANIC, "AuxiliaryProcKill() called in child process");
977 
978  auxproc = &AuxiliaryProcs[proctype];
979 
980  Assert(MyProc == auxproc);
981 
982  /* Release any LW locks I am holding (see notes above) */
984 
985  /* Cancel any pending condition variable sleep, too */
987 
988  /* look at the equivalent ProcKill() code for comments */
991 
992  proc = MyProc;
993  MyProc = NULL;
995  DisownLatch(&proc->procLatch);
996 
998 
999  /* Mark auxiliary proc no longer in use */
1000  proc->pid = 0;
1001 
1002  /* Update shared estimate of spins_per_delay */
1004 
1006 }
1007 
1008 /*
1009  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1010  * given its PID
1011  *
1012  * Returns NULL if not found.
1013  */
1014 PGPROC *
1016 {
1017  PGPROC *result = NULL;
1018  int index;
1019 
1020  if (pid == 0) /* never match dummy PGPROCs */
1021  return NULL;
1022 
1023  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1024  {
1025  PGPROC *proc = &AuxiliaryProcs[index];
1026 
1027  if (proc->pid == pid)
1028  {
1029  result = proc;
1030  break;
1031  }
1032  }
1033  return result;
1034 }
1035 
1036 
1037 /*
1038  * ProcSleep -- put a process to sleep on the specified lock
1039  *
1040  * Caller must have set MyProc->heldLocks to reflect locks already held
1041  * on the lockable object by this process (under all XIDs).
1042  *
1043  * The lock table's partition lock must be held at entry, and will be held
1044  * at exit.
1045  *
1046  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
1047  *
1048  * ASSUME: that no one will fiddle with the queue until after
1049  * we release the partition lock.
1050  *
1051  * NOTES: The process queue is now a priority queue for locking.
1052  */
1054 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1055 {
1056  LOCKMODE lockmode = locallock->tag.mode;
1057  LOCK *lock = locallock->lock;
1058  PROCLOCK *proclock = locallock->proclock;
1059  uint32 hashcode = locallock->hashcode;
1060  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1061  dclist_head *waitQueue = &lock->waitProcs;
1062  PGPROC *insert_before = NULL;
1063  LOCKMASK myHeldLocks = MyProc->heldLocks;
1064  TimestampTz standbyWaitStart = 0;
1065  bool early_deadlock = false;
1066  bool allow_autovacuum_cancel = true;
1067  bool logged_recovery_conflict = false;
1068  ProcWaitStatus myWaitStatus;
1069  PGPROC *leader = MyProc->lockGroupLeader;
1070 
1071  /*
1072  * If group locking is in use, locks held by members of my locking group
1073  * need to be included in myHeldLocks. This is not required for relation
1074  * extension lock which conflict among group members. However, including
1075  * them in myHeldLocks will give group members the priority to get those
1076  * locks as compared to other backends which are also trying to acquire
1077  * those locks. OTOH, we can avoid giving priority to group members for
1078  * that kind of locks, but there doesn't appear to be a clear advantage of
1079  * the same.
1080  */
1081  if (leader != NULL)
1082  {
1083  dlist_iter iter;
1084 
1085  dlist_foreach(iter, &lock->procLocks)
1086  {
1087  PROCLOCK *otherproclock;
1088 
1089  otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1090 
1091  if (otherproclock->groupLeader == leader)
1092  myHeldLocks |= otherproclock->holdMask;
1093  }
1094  }
1095 
1096  /*
1097  * Determine where to add myself in the wait queue.
1098  *
1099  * Normally I should go at the end of the queue. However, if I already
1100  * hold locks that conflict with the request of any previous waiter, put
1101  * myself in the queue just in front of the first such waiter. This is not
1102  * a necessary step, since deadlock detection would move me to before that
1103  * waiter anyway; but it's relatively cheap to detect such a conflict
1104  * immediately, and avoid delaying till deadlock timeout.
1105  *
1106  * Special case: if I find I should go in front of some waiter, check to
1107  * see if I conflict with already-held locks or the requests before that
1108  * waiter. If not, then just grant myself the requested lock immediately.
1109  * This is the same as the test for immediate grant in LockAcquire, except
1110  * we are only considering the part of the wait queue before my insertion
1111  * point.
1112  */
1113  if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1114  {
1115  LOCKMASK aheadRequests = 0;
1116  dlist_iter iter;
1117 
1118  dclist_foreach(iter, waitQueue)
1119  {
1120  PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1121 
1122  /*
1123  * If we're part of the same locking group as this waiter, its
1124  * locks neither conflict with ours nor contribute to
1125  * aheadRequests.
1126  */
1127  if (leader != NULL && leader == proc->lockGroupLeader)
1128  continue;
1129 
1130  /* Must he wait for me? */
1131  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1132  {
1133  /* Must I wait for him ? */
1134  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1135  {
1136  /*
1137  * Yes, so we have a deadlock. Easiest way to clean up
1138  * correctly is to call RemoveFromWaitQueue(), but we
1139  * can't do that until we are *on* the wait queue. So, set
1140  * a flag to check below, and break out of loop. Also,
1141  * record deadlock info for later message.
1142  */
1143  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1144  early_deadlock = true;
1145  break;
1146  }
1147  /* I must go before this waiter. Check special case. */
1148  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1149  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1150  proclock))
1151  {
1152  /* Skip the wait and just grant myself the lock. */
1153  GrantLock(lock, proclock, lockmode);
1154  GrantAwaitedLock();
1155  return PROC_WAIT_STATUS_OK;
1156  }
1157 
1158  /* Put myself into wait queue before conflicting process */
1159  insert_before = proc;
1160  break;
1161  }
1162  /* Nope, so advance to next waiter */
1163  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1164  }
1165  }
1166 
1167  /*
1168  * Insert self into queue, at the position determined above.
1169  */
1170  if (insert_before)
1171  dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1172  else
1173  dclist_push_tail(waitQueue, &MyProc->links);
1174 
1175  lock->waitMask |= LOCKBIT_ON(lockmode);
1176 
1177  /* Set up wait information in PGPROC object, too */
1178  MyProc->waitLock = lock;
1179  MyProc->waitProcLock = proclock;
1180  MyProc->waitLockMode = lockmode;
1181 
1183 
1184  /*
1185  * If we detected deadlock, give up without waiting. This must agree with
1186  * CheckDeadLock's recovery code.
1187  */
1188  if (early_deadlock)
1189  {
1190  RemoveFromWaitQueue(MyProc, hashcode);
1191  return PROC_WAIT_STATUS_ERROR;
1192  }
1193 
1194  /* mark that we are waiting for a lock */
1195  lockAwaited = locallock;
1196 
1197  /*
1198  * Release the lock table's partition lock.
1199  *
1200  * NOTE: this may also cause us to exit critical-section state, possibly
1201  * allowing a cancel/die interrupt to be accepted. This is OK because we
1202  * have recorded the fact that we are waiting for a lock, and so
1203  * LockErrorCleanup will clean up if cancel/die happens.
1204  */
1205  LWLockRelease(partitionLock);
1206 
1207  /*
1208  * Also, now that we will successfully clean up after an ereport, it's
1209  * safe to check to see if there's a buffer pin deadlock against the
1210  * Startup process. Of course, that's only necessary if we're doing Hot
1211  * Standby and are not the Startup process ourselves.
1212  */
1213  if (RecoveryInProgress() && !InRecovery)
1215 
1216  /* Reset deadlock_state before enabling the timeout handler */
1218  got_deadlock_timeout = false;
1219 
1220  /*
1221  * Set timer so we can wake up after awhile and check for a deadlock. If a
1222  * deadlock is detected, the handler sets MyProc->waitStatus =
1223  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1224  * rather than success.
1225  *
1226  * By delaying the check until we've waited for a bit, we can avoid
1227  * running the rather expensive deadlock-check code in most cases.
1228  *
1229  * If LockTimeout is set, also enable the timeout for that. We can save a
1230  * few cycles by enabling both timeout sources in one call.
1231  *
1232  * If InHotStandby we set lock waits slightly later for clarity with other
1233  * code.
1234  */
1235  if (!InHotStandby)
1236  {
1237  if (LockTimeout > 0)
1238  {
1239  EnableTimeoutParams timeouts[2];
1240 
1241  timeouts[0].id = DEADLOCK_TIMEOUT;
1242  timeouts[0].type = TMPARAM_AFTER;
1243  timeouts[0].delay_ms = DeadlockTimeout;
1244  timeouts[1].id = LOCK_TIMEOUT;
1245  timeouts[1].type = TMPARAM_AFTER;
1246  timeouts[1].delay_ms = LockTimeout;
1247  enable_timeouts(timeouts, 2);
1248  }
1249  else
1251 
1252  /*
1253  * Use the current time obtained for the deadlock timeout timer as
1254  * waitStart (i.e., the time when this process started waiting for the
1255  * lock). Since getting the current time newly can cause overhead, we
1256  * reuse the already-obtained time to avoid that overhead.
1257  *
1258  * Note that waitStart is updated without holding the lock table's
1259  * partition lock, to avoid the overhead by additional lock
1260  * acquisition. This can cause "waitstart" in pg_locks to become NULL
1261  * for a very short period of time after the wait started even though
1262  * "granted" is false. This is OK in practice because we can assume
1263  * that users are likely to look at "waitstart" when waiting for the
1264  * lock for a long time.
1265  */
1268  }
1269  else if (log_recovery_conflict_waits)
1270  {
1271  /*
1272  * Set the wait start timestamp if logging is enabled and in hot
1273  * standby.
1274  */
1275  standbyWaitStart = GetCurrentTimestamp();
1276  }
1277 
1278  /*
1279  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1280  * will not wait. But a set latch does not necessarily mean that the lock
1281  * is free now, as there are many other sources for latch sets than
1282  * somebody releasing the lock.
1283  *
1284  * We process interrupts whenever the latch has been set, so cancel/die
1285  * interrupts are processed quickly. This means we must not mind losing
1286  * control to a cancel/die interrupt here. We don't, because we have no
1287  * shared-state-change work to do after being granted the lock (the
1288  * grantor did it all). We do have to worry about canceling the deadlock
1289  * timeout and updating the locallock table, but if we lose control to an
1290  * error, LockErrorCleanup will fix that up.
1291  */
1292  do
1293  {
1294  if (InHotStandby)
1295  {
1296  bool maybe_log_conflict =
1297  (standbyWaitStart != 0 && !logged_recovery_conflict);
1298 
1299  /* Set a timer and wait for that or for the lock to be granted */
1301  maybe_log_conflict);
1302 
1303  /*
1304  * Emit the log message if the startup process is waiting longer
1305  * than deadlock_timeout for recovery conflict on lock.
1306  */
1307  if (maybe_log_conflict)
1308  {
1310 
1311  if (TimestampDifferenceExceeds(standbyWaitStart, now,
1312  DeadlockTimeout))
1313  {
1314  VirtualTransactionId *vxids;
1315  int cnt;
1316 
1317  vxids = GetLockConflicts(&locallock->tag.lock,
1318  AccessExclusiveLock, &cnt);
1319 
1320  /*
1321  * Log the recovery conflict and the list of PIDs of
1322  * backends holding the conflicting lock. Note that we do
1323  * logging even if there are no such backends right now
1324  * because the startup process here has already waited
1325  * longer than deadlock_timeout.
1326  */
1328  standbyWaitStart, now,
1329  cnt > 0 ? vxids : NULL, true);
1330  logged_recovery_conflict = true;
1331  }
1332  }
1333  }
1334  else
1335  {
1337  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1339  /* check for deadlocks first, as that's probably log-worthy */
1341  {
1342  CheckDeadLock();
1343  got_deadlock_timeout = false;
1344  }
1346  }
1347 
1348  /*
1349  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1350  * else asynchronously. Read it just once per loop to prevent
1351  * surprising behavior (such as missing log messages).
1352  */
1353  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1354 
1355  /*
1356  * If we are not deadlocked, but are waiting on an autovacuum-induced
1357  * task, send a signal to interrupt it.
1358  */
1359  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1360  {
1361  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1362  uint8 statusFlags;
1363  uint8 lockmethod_copy;
1364  LOCKTAG locktag_copy;
1365 
1366  /*
1367  * Grab info we need, then release lock immediately. Note this
1368  * coding means that there is a tiny chance that the process
1369  * terminates its current transaction and starts a different one
1370  * before we have a change to send the signal; the worst possible
1371  * consequence is that a for-wraparound vacuum is canceled. But
1372  * that could happen in any case unless we were to do kill() with
1373  * the lock held, which is much more undesirable.
1374  */
1375  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1376  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1377  lockmethod_copy = lock->tag.locktag_lockmethodid;
1378  locktag_copy = lock->tag;
1379  LWLockRelease(ProcArrayLock);
1380 
1381  /*
1382  * Only do it if the worker is not working to protect against Xid
1383  * wraparound.
1384  */
1385  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1386  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1387  {
1388  int pid = autovac->pid;
1389 
1390  /* report the case, if configured to do so */
1392  {
1393  StringInfoData locktagbuf;
1394  StringInfoData logbuf; /* errdetail for server log */
1395 
1396  initStringInfo(&locktagbuf);
1397  initStringInfo(&logbuf);
1398  DescribeLockTag(&locktagbuf, &locktag_copy);
1399  appendStringInfo(&logbuf,
1400  "Process %d waits for %s on %s.",
1401  MyProcPid,
1402  GetLockmodeName(lockmethod_copy, lockmode),
1403  locktagbuf.data);
1404 
1405  ereport(DEBUG1,
1406  (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1407  pid),
1408  errdetail_log("%s", logbuf.data)));
1409 
1410  pfree(locktagbuf.data);
1411  pfree(logbuf.data);
1412  }
1413 
1414  /* send the autovacuum worker Back to Old Kent Road */
1415  if (kill(pid, SIGINT) < 0)
1416  {
1417  /*
1418  * There's a race condition here: once we release the
1419  * ProcArrayLock, it's possible for the autovac worker to
1420  * close up shop and exit before we can do the kill().
1421  * Therefore, we do not whinge about no-such-process.
1422  * Other errors such as EPERM could conceivably happen if
1423  * the kernel recycles the PID fast enough, but such cases
1424  * seem improbable enough that it's probably best to issue
1425  * a warning if we see some other errno.
1426  */
1427  if (errno != ESRCH)
1428  ereport(WARNING,
1429  (errmsg("could not send signal to process %d: %m",
1430  pid)));
1431  }
1432  }
1433 
1434  /* prevent signal from being sent again more than once */
1435  allow_autovacuum_cancel = false;
1436  }
1437 
1438  /*
1439  * If awoken after the deadlock check interrupt has run, and
1440  * log_lock_waits is on, then report about the wait.
1441  */
1443  {
1445  lock_waiters_sbuf,
1446  lock_holders_sbuf;
1447  const char *modename;
1448  long secs;
1449  int usecs;
1450  long msecs;
1451  dlist_iter proc_iter;
1452  PROCLOCK *curproclock;
1453  bool first_holder = true,
1454  first_waiter = true;
1455  int lockHoldersNum = 0;
1456 
1457  initStringInfo(&buf);
1458  initStringInfo(&lock_waiters_sbuf);
1459  initStringInfo(&lock_holders_sbuf);
1460 
1461  DescribeLockTag(&buf, &locallock->tag.lock);
1462  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1463  lockmode);
1466  &secs, &usecs);
1467  msecs = secs * 1000 + usecs / 1000;
1468  usecs = usecs % 1000;
1469 
1470  /*
1471  * we loop over the lock's procLocks to gather a list of all
1472  * holders and waiters. Thus we will be able to provide more
1473  * detailed information for lock debugging purposes.
1474  *
1475  * lock->procLocks contains all processes which hold or wait for
1476  * this lock.
1477  */
1478 
1479  LWLockAcquire(partitionLock, LW_SHARED);
1480 
1481  dlist_foreach(proc_iter, &lock->procLocks)
1482  {
1483  curproclock =
1484  dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1485 
1486  /*
1487  * we are a waiter if myProc->waitProcLock == curproclock; we
1488  * are a holder if it is NULL or something different
1489  */
1490  if (curproclock->tag.myProc->waitProcLock == curproclock)
1491  {
1492  if (first_waiter)
1493  {
1494  appendStringInfo(&lock_waiters_sbuf, "%d",
1495  curproclock->tag.myProc->pid);
1496  first_waiter = false;
1497  }
1498  else
1499  appendStringInfo(&lock_waiters_sbuf, ", %d",
1500  curproclock->tag.myProc->pid);
1501  }
1502  else
1503  {
1504  if (first_holder)
1505  {
1506  appendStringInfo(&lock_holders_sbuf, "%d",
1507  curproclock->tag.myProc->pid);
1508  first_holder = false;
1509  }
1510  else
1511  appendStringInfo(&lock_holders_sbuf, ", %d",
1512  curproclock->tag.myProc->pid);
1513 
1514  lockHoldersNum++;
1515  }
1516  }
1517 
1518  LWLockRelease(partitionLock);
1519 
1521  ereport(LOG,
1522  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1523  MyProcPid, modename, buf.data, msecs, usecs),
1524  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1525  "Processes holding the lock: %s. Wait queue: %s.",
1526  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1527  else if (deadlock_state == DS_HARD_DEADLOCK)
1528  {
1529  /*
1530  * This message is a bit redundant with the error that will be
1531  * reported subsequently, but in some cases the error report
1532  * might not make it to the log (eg, if it's caught by an
1533  * exception handler), and we want to ensure all long-wait
1534  * events get logged.
1535  */
1536  ereport(LOG,
1537  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1538  MyProcPid, modename, buf.data, msecs, usecs),
1539  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1540  "Processes holding the lock: %s. Wait queue: %s.",
1541  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1542  }
1543 
1544  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1545  ereport(LOG,
1546  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1547  MyProcPid, modename, buf.data, msecs, usecs),
1548  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1549  "Processes holding the lock: %s. Wait queue: %s.",
1550  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1551  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1552  ereport(LOG,
1553  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1554  MyProcPid, modename, buf.data, msecs, usecs)));
1555  else
1556  {
1557  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1558 
1559  /*
1560  * Currently, the deadlock checker always kicks its own
1561  * process, which means that we'll only see
1562  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1563  * DS_HARD_DEADLOCK, and there's no need to print redundant
1564  * messages. But for completeness and future-proofing, print
1565  * a message if it looks like someone else kicked us off the
1566  * lock.
1567  */
1569  ereport(LOG,
1570  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1571  MyProcPid, modename, buf.data, msecs, usecs),
1572  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1573  "Processes holding the lock: %s. Wait queue: %s.",
1574  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1575  }
1576 
1577  /*
1578  * At this point we might still need to wait for the lock. Reset
1579  * state so we don't print the above messages again.
1580  */
1582 
1583  pfree(buf.data);
1584  pfree(lock_holders_sbuf.data);
1585  pfree(lock_waiters_sbuf.data);
1586  }
1587  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1588 
1589  /*
1590  * Disable the timers, if they are still running. As in LockErrorCleanup,
1591  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1592  * already caused QueryCancelPending to become set, we want the cancel to
1593  * be reported as a lock timeout, not a user cancel.
1594  */
1595  if (!InHotStandby)
1596  {
1597  if (LockTimeout > 0)
1598  {
1599  DisableTimeoutParams timeouts[2];
1600 
1601  timeouts[0].id = DEADLOCK_TIMEOUT;
1602  timeouts[0].keep_indicator = false;
1603  timeouts[1].id = LOCK_TIMEOUT;
1604  timeouts[1].keep_indicator = true;
1605  disable_timeouts(timeouts, 2);
1606  }
1607  else
1609  }
1610 
1611  /*
1612  * Emit the log message if recovery conflict on lock was resolved but the
1613  * startup process waited longer than deadlock_timeout for it.
1614  */
1615  if (InHotStandby && logged_recovery_conflict)
1617  standbyWaitStart, GetCurrentTimestamp(),
1618  NULL, false);
1619 
1620  /*
1621  * Re-acquire the lock table's partition lock. We have to do this to hold
1622  * off cancel/die interrupts before we can mess with lockAwaited (else we
1623  * might have a missed or duplicated locallock update).
1624  */
1625  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1626 
1627  /*
1628  * We no longer want LockErrorCleanup to do anything.
1629  */
1630  lockAwaited = NULL;
1631 
1632  /*
1633  * If we got the lock, be sure to remember it in the locallock table.
1634  */
1636  GrantAwaitedLock();
1637 
1638  /*
1639  * We don't have to do anything else, because the awaker did all the
1640  * necessary update of the lock table and MyProc.
1641  */
1642  return MyProc->waitStatus;
1643 }
1644 
1645 
1646 /*
1647  * ProcWakeup -- wake up a process by setting its latch.
1648  *
1649  * Also remove the process from the wait queue and set its links invalid.
1650  *
1651  * The appropriate lock partition lock must be held by caller.
1652  *
1653  * XXX: presently, this code is only used for the "success" case, and only
1654  * works correctly for that case. To clean up in failure case, would need
1655  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1656  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1657  */
1658 void
1660 {
1661  if (dlist_node_is_detached(&proc->links))
1662  return;
1663 
1665 
1666  /* Remove process from wait queue */
1668 
1669  /* Clean up process' state and pass it the ok/fail signal */
1670  proc->waitLock = NULL;
1671  proc->waitProcLock = NULL;
1672  proc->waitStatus = waitStatus;
1674 
1675  /* And awaken it */
1676  SetLatch(&proc->procLatch);
1677 }
1678 
1679 /*
1680  * ProcLockWakeup -- routine for waking up processes when a lock is
1681  * released (or a prior waiter is aborted). Scan all waiters
1682  * for lock, waken any that are no longer blocked.
1683  *
1684  * The appropriate lock partition lock must be held by caller.
1685  */
1686 void
1687 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1688 {
1689  dclist_head *waitQueue = &lock->waitProcs;
1690  LOCKMASK aheadRequests = 0;
1691  dlist_mutable_iter miter;
1692 
1693  if (dclist_is_empty(waitQueue))
1694  return;
1695 
1696  dclist_foreach_modify(miter, waitQueue)
1697  {
1698  PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
1699  LOCKMODE lockmode = proc->waitLockMode;
1700 
1701  /*
1702  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1703  * (b) doesn't conflict with already-held locks.
1704  */
1705  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1706  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1707  proc->waitProcLock))
1708  {
1709  /* OK to waken */
1710  GrantLock(lock, proc->waitProcLock, lockmode);
1711  /* removes proc from the lock's waiting process queue */
1713  }
1714  else
1715  {
1716  /*
1717  * Lock conflicts: Don't wake, but remember requested mode for
1718  * later checks.
1719  */
1720  aheadRequests |= LOCKBIT_ON(lockmode);
1721  }
1722  }
1723 }
1724 
1725 /*
1726  * CheckDeadLock
1727  *
1728  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1729  * lock to be released by some other process. Check if there's a deadlock; if
1730  * not, just return. (But signal ProcSleep to log a message, if
1731  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1732  * the lock's wait queue and signal an error to ProcSleep.
1733  */
1734 static void
1736 {
1737  int i;
1738 
1739  /*
1740  * Acquire exclusive lock on the entire shared lock data structures. Must
1741  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1742  *
1743  * Note that the deadlock check interrupt had better not be enabled
1744  * anywhere that this process itself holds lock partition locks, else this
1745  * will wait forever. Also note that LWLockAcquire creates a critical
1746  * section, so that this routine cannot be interrupted by cancel/die
1747  * interrupts.
1748  */
1749  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1751 
1752  /*
1753  * Check to see if we've been awoken by anyone in the interim.
1754  *
1755  * If we have, we can return and resume our transaction -- happy day.
1756  * Before we are awoken the process releasing the lock grants it to us so
1757  * we know that we don't have to wait anymore.
1758  *
1759  * We check by looking to see if we've been unlinked from the wait queue.
1760  * This is safe because we hold the lock partition lock.
1761  */
1762  if (MyProc->links.prev == NULL ||
1763  MyProc->links.next == NULL)
1764  goto check_done;
1765 
1766 #ifdef LOCK_DEBUG
1767  if (Debug_deadlocks)
1768  DumpAllLocks();
1769 #endif
1770 
1771  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1773 
1775  {
1776  /*
1777  * Oops. We have a deadlock.
1778  *
1779  * Get this process out of wait state. (Note: we could do this more
1780  * efficiently by relying on lockAwaited, but use this coding to
1781  * preserve the flexibility to kill some other transaction than the
1782  * one detecting the deadlock.)
1783  *
1784  * RemoveFromWaitQueue sets MyProc->waitStatus to
1785  * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1786  * return from the signal handler.
1787  */
1788  Assert(MyProc->waitLock != NULL);
1790 
1791  /*
1792  * We're done here. Transaction abort caused by the error that
1793  * ProcSleep will raise will cause any other locks we hold to be
1794  * released, thus allowing other processes to wake up; we don't need
1795  * to do that here. NOTE: an exception is that releasing locks we
1796  * hold doesn't consider the possibility of waiters that were blocked
1797  * behind us on the lock we just failed to get, and might now be
1798  * wakable because we're not in front of them anymore. However,
1799  * RemoveFromWaitQueue took care of waking up any such processes.
1800  */
1801  }
1802 
1803  /*
1804  * And release locks. We do this in reverse order for two reasons: (1)
1805  * Anyone else who needs more than one of the locks will be trying to lock
1806  * them in increasing order; we don't want to release the other process
1807  * until it can get all the locks it needs. (2) This avoids O(N^2)
1808  * behavior inside LWLockRelease.
1809  */
1810 check_done:
1811  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1813 }
1814 
1815 /*
1816  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1817  *
1818  * NB: Runs inside a signal handler, be careful.
1819  */
1820 void
1822 {
1823  int save_errno = errno;
1824 
1825  got_deadlock_timeout = true;
1826 
1827  /*
1828  * Have to set the latch again, even if handle_sig_alarm already did. Back
1829  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1830  * ever would be a problem, but setting a set latch again is cheap.
1831  *
1832  * Note that, when this function runs inside procsignal_sigusr1_handler(),
1833  * the handler function sets the latch again after the latch is set here.
1834  */
1835  SetLatch(MyLatch);
1836  errno = save_errno;
1837 }
1838 
1839 /*
1840  * ProcWaitForSignal - wait for a signal from another backend.
1841  *
1842  * As this uses the generic process latch the caller has to be robust against
1843  * unrelated wakeups: Always check that the desired state has occurred, and
1844  * wait again if not.
1845  */
1846 void
1847 ProcWaitForSignal(uint32 wait_event_info)
1848 {
1850  wait_event_info);
1853 }
1854 
1855 /*
1856  * ProcSendSignal - set the latch of a backend identified by pgprocno
1857  */
1858 void
1859 ProcSendSignal(int pgprocno)
1860 {
1861  if (pgprocno < 0 || pgprocno >= ProcGlobal->allProcCount)
1862  elog(ERROR, "pgprocno out of range");
1863 
1864  SetLatch(&ProcGlobal->allProcs[pgprocno].procLatch);
1865 }
1866 
1867 /*
1868  * BecomeLockGroupLeader - designate process as lock group leader
1869  *
1870  * Once this function has returned, other processes can join the lock group
1871  * by calling BecomeLockGroupMember.
1872  */
1873 void
1875 {
1876  LWLock *leader_lwlock;
1877 
1878  /* If we already did it, we don't need to do it again. */
1879  if (MyProc->lockGroupLeader == MyProc)
1880  return;
1881 
1882  /* We had better not be a follower. */
1883  Assert(MyProc->lockGroupLeader == NULL);
1884 
1885  /* Create single-member group, containing only ourselves. */
1886  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1887  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1890  LWLockRelease(leader_lwlock);
1891 }
1892 
1893 /*
1894  * BecomeLockGroupMember - designate process as lock group member
1895  *
1896  * This is pretty straightforward except for the possibility that the leader
1897  * whose group we're trying to join might exit before we manage to do so;
1898  * and the PGPROC might get recycled for an unrelated process. To avoid
1899  * that, we require the caller to pass the PID of the intended PGPROC as
1900  * an interlock. Returns true if we successfully join the intended lock
1901  * group, and false if not.
1902  */
1903 bool
1905 {
1906  LWLock *leader_lwlock;
1907  bool ok = false;
1908 
1909  /* Group leader can't become member of group */
1910  Assert(MyProc != leader);
1911 
1912  /* Can't already be a member of a group */
1913  Assert(MyProc->lockGroupLeader == NULL);
1914 
1915  /* PID must be valid. */
1916  Assert(pid != 0);
1917 
1918  /*
1919  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1920  * calculates the proc number based on the PGPROC slot without looking at
1921  * its contents, so we will acquire the correct lock even if the leader
1922  * PGPROC is in process of being recycled.
1923  */
1924  leader_lwlock = LockHashPartitionLockByProc(leader);
1925  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1926 
1927  /* Is this the leader we're looking for? */
1928  if (leader->pid == pid && leader->lockGroupLeader == leader)
1929  {
1930  /* OK, join the group */
1931  ok = true;
1932  MyProc->lockGroupLeader = leader;
1934  }
1935  LWLockRelease(leader_lwlock);
1936 
1937  return ok;
1938 }
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:433
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:218
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:410
int AutovacuumLauncherPid
Definition: autovacuum.c:317
int autovacuum_max_workers
Definition: autovacuum.c:118
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3361
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3367
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:55
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1731
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1791
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1655
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1619
#define InvalidBackendId
Definition: backendid.h:23
unsigned int uint32
Definition: c.h:495
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
unsigned char uint8
Definition: c.h:493
#define MemSet(start, val, len)
Definition: c.h:1009
uint32 TransactionId
Definition: c.h:641
size_t Size
Definition: c.h:594
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
bool ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:287
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1144
void InitDeadLockChecking(void)
Definition: deadlock.c:143
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1162
bool message_level_is_interesting(int elevel)
Definition: elog.c:277
int errcode(int sqlerrcode)
Definition: elog.c:860
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1277
int errmsg(const char *fmt,...)
Definition: elog.c:1075
int errdetail_log(const char *fmt,...)
Definition: elog.c:1256
#define LOG
Definition: elog.h:31
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:45
bool IsUnderPostmaster
Definition: globals.c:116
int MaxConnections
Definition: globals.c:140
bool IsBackgroundWorker
Definition: globals.c:118
int MaxBackends
Definition: globals.c:143
struct Latch * MyLatch
Definition: globals.c:59
int max_worker_processes
Definition: globals.c:141
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition: ilist.h:525
static dlist_node * dlist_pop_head_node(dlist_head *head)
Definition: ilist.h:450
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_insert_before(dclist_head *head, dlist_node *before, dlist_node *node)
Definition: ilist.h:745
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
static void dlist_node_init(dlist_node *node)
Definition: ilist.h:325
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
int j
Definition: isn.c:74
int i
Definition: isn.c:73
void OwnLatch(Latch *latch)
Definition: latch.c:464
void DisownLatch(Latch *latch)
Definition: latch.c:490
void InitSharedLatch(Latch *latch)
Definition: latch.c:431
void SetLatch(Latch *latch)
Definition: latch.c:633
void ResetLatch(Latch *latch)
Definition: latch.c:725
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:518
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
Assert(fmt[strlen(fmt) - 1] !='\n')
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1206
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2855
void GrantAwaitedLock(void)
Definition: lock.c:1757
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1526
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1869
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2130
void AbortStrongLockAcquire(void)
Definition: lock.c:1728
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4021
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:505
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1397
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
#define LockHashPartitionLock(hashcode)
Definition: lock.h:527
#define USER_LOCKMETHOD
Definition: lock.h:126
#define InvalidLocalTransactionId
Definition: lock.h:65
DeadLockState
Definition: lock.h:510
@ DS_HARD_DEADLOCK
Definition: lock.h:514
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:515
@ DS_NO_DEADLOCK
Definition: lock.h:512
@ DS_NOT_YET_CHECKED
Definition: lock.h:511
@ DS_SOFT_DEADLOCK
Definition: lock.h:513
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:542
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:530
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
int LOCKMASK
Definition: lockdefs.h:25
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
void LWLockReleaseAll(void)
Definition: lwlock.c:1876
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:703
void InitLWLockAccess(void)
Definition: lwlock.c:554
@ LW_WS_NOT_WAITING
Definition: lwlock.h:29
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:99
@ LWTRANCHE_LOCK_FASTPATH
Definition: lwlock.h:192
@ LW_SHARED
Definition: lwlock.h:117
@ LW_EXCLUSIVE
Definition: lwlock.h:116
void pfree(void *pointer)
Definition: mcxt.c:1431
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
void SwitchToSharedLatch(void)
Definition: miscinit.c:222
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:249
void * arg
static char * buf
Definition: pg_test_fsync.c:73
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:323
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:356
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
uintptr_t Datum
Definition: postgres.h:64
static Datum Int32GetDatum(int32 X)
Definition: postgres.h:212
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define NON_EXEC_STATIC
Definition: postgres.h:576
#define InvalidOid
Definition: postgres_ext.h:36
#define NUM_AUXILIARY_PROCS
Definition: proc.h:416
#define INVALID_PGPROCNO
Definition: proc.h:85
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:59
#define GetNumberFromPGProc(proc)
Definition: proc.h:405
ProcWaitStatus
Definition: proc.h:123
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
#define PROC_IS_AUTOVACUUM
Definition: proc.h:56
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:469
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:566
@ PROCSIG_RECOVERY_CONFLICT_LOCK
Definition: procsignal.h:44
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:213
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:224
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:831
int slock_t
Definition: s_lock.h:754
void * ShmemAlloc(Size size)
Definition: shmem.c:153
Size add_size(Size s1, Size s2)
Definition: shmem.c:494
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:388
Size mul_size(Size s1, Size s2)
Definition: shmem.c:511
bool IsLogicalSlotSyncWorker(void)
Definition: slotsync.c:1438
#define SpinLockInit(lock)
Definition: spin.h:60
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
bool log_lock_waits
Definition: proc.c:65
int IdleSessionTimeout
Definition: proc.c:64
PGPROC * MyProc
Definition: proc.c:68
void ProcSendSignal(int pgprocno)
Definition: proc.c:1859
Size ProcGlobalShmemSize(void)
Definition: proc.c:103
void ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1659
bool IsWaitingForLock(void)
Definition: proc.c:717
int StatementTimeout
Definition: proc.c:60
bool HaveNFreeProcs(int n, int *nfree)
Definition: proc.c:691
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:827
void InitAuxiliaryProcess(void)
Definition: proc.c:526
PGPROC * PreparedXactProcs
Definition: proc.c:83
static DeadLockState deadlock_state
Definition: proc.c:88
int IdleInTransactionSessionTimeout
Definition: proc.c:62
int MyProcNumber
Definition: proc.c:69
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:82
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:675
int DeadlockTimeout
Definition: proc.c:59
int TransactionTimeout
Definition: proc.c:63
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1687
PROC_HDR * ProcGlobal
Definition: proc.c:81
static void CheckDeadLock(void)
Definition: proc.c:1735
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:78
int ProcGlobalSemas(void)
Definition: proc.c:125
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:810
void LockErrorCleanup(void)
Definition: proc.c:734
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1904
void BecomeLockGroupLeader(void)
Definition: proc.c:1874
static LOCALLOCK * lockAwaited
Definition: proc.c:86
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:1015
static void ProcKill(int code, Datum arg)
Definition: proc.c:838
void InitProcess(void)
Definition: proc.c:299
void CheckDeadLockAlert(void)
Definition: proc.c:1821
void InitProcessPhase2(void)
Definition: proc.c:491
void InitProcGlobal(void)
Definition: proc.c:160
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1054
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:91
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:663
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1847
int LockTimeout
Definition: proc.c:61
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:966
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:905
bool log_recovery_conflict_waits
Definition: standby.c:43
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:274
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition: standby.c:623
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:97
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
TimeoutId id
Definition: timeout.h:71
TimeoutType type
Definition: timeout.h:61
TimeoutId id
Definition: timeout.h:60
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
uint32 hashcode
Definition: lock.h:432
LOCK * lock
Definition: lock.h:433
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
uint8 locktag_lockmethodid
Definition: lock.h:171
Definition: lock.h:309
LOCKTAG tag
Definition: lock.h:311
dclist_head waitProcs
Definition: lock.h:317
LOCKMASK waitMask
Definition: lock.h:315
dlist_head procLocks
Definition: lock.h:316
Definition: lwlock.h:41
const LOCKMASK * conflictTab
Definition: lock.h:111
Definition: proc.h:162
LWLock fpInfoLock
Definition: proc.h:279
TransactionId xmin
Definition: proc.h:178
bool procArrayGroupMember
Definition: proc.h:255
LocalTransactionId lxid
Definition: proc.h:183
PROCLOCK * waitProcLock
Definition: proc.h:219
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:275
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:257
uint8 lwWaitMode
Definition: proc.h:210
dlist_head lockGroupMembers
Definition: proc.h:291
uint32 wait_event_info
Definition: proc.h:265
dlist_head * procgloballist
Definition: proc.h:165
uint8 statusFlags
Definition: proc.h:228
bool recoveryConflictPending
Definition: proc.h:206
TransactionId clogGroupMemberXid
Definition: proc.h:270
Oid databaseId
Definition: proc.h:193
int64 clogGroupMemberPage
Definition: proc.h:273
bool clogGroupMember
Definition: proc.h:268
pg_atomic_uint64 waitStart
Definition: proc.h:223
bool fpVXIDLock
Definition: proc.h:282
BackendId backendId
Definition: proc.h:192
int pid
Definition: proc.h:186
XLogRecPtr waitLSN
Definition: proc.h:238
dlist_node syncRepLinks
Definition: proc.h:240
bool isBackgroundWorker
Definition: proc.h:199
int syncRepState
Definition: proc.h:239
pg_atomic_uint32 clogGroupNext
Definition: proc.h:269
dlist_node lockGroupLink
Definition: proc.h:292
XidStatus clogGroupMemberXidStatus
Definition: proc.h:271
int pgxactoff
Definition: proc.h:188
LOCK * waitLock
Definition: proc.h:218
TransactionId xid
Definition: proc.h:173
LOCKMODE waitLockMode
Definition: proc.h:220
int delayChkptFlags
Definition: proc.h:226
PGPROC * lockGroupLeader
Definition: proc.h:290
LocalTransactionId fpLocalTransactionId
Definition: proc.h:283
TransactionId procArrayGroupMemberXid
Definition: proc.h:263
LOCKMASK heldLocks
Definition: proc.h:221
PGSemaphore sem
Definition: proc.h:167
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:247
Oid roleId
Definition: proc.h:194
ProcWaitStatus waitStatus
Definition: proc.h:168
Oid tempNamespaceId
Definition: proc.h:196
dlist_node links
Definition: proc.h:164
uint8 lwWaiting
Definition: proc.h:209
Latch procLatch
Definition: proc.h:170
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PGPROC * groupLeader
Definition: lock.h:375
PROCLOCKTAG tag
Definition: lock.h:372
Definition: proc.h:356
uint8 * statusFlags
Definition: proc.h:373
XidCacheStatus * subxidStates
Definition: proc.h:367
dlist_head autovacFreeProcs
Definition: proc.h:380
Latch * walwriterLatch
Definition: proc.h:390
dlist_head freeProcs
Definition: proc.h:378
int startupBufferPinWaitBufId
Definition: proc.h:396
PGPROC * allProcs
Definition: proc.h:358
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:388
int spins_per_delay
Definition: proc.h:394
TransactionId * xids
Definition: proc.h:361
Latch * checkpointerLatch
Definition: proc.h:392
dlist_head walsenderFreeProcs
Definition: proc.h:384
dlist_head bgworkerFreeProcs
Definition: proc.h:382
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:386
uint32 allProcCount
Definition: proc.h:376
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
dlist_node * prev
Definition: ilist.h:139
Definition: type.h:95
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:375
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:30
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:560
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:813
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:685
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:630
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:718
@ LOCK_TIMEOUT
Definition: timeout.h:28
@ DEADLOCK_TIMEOUT
Definition: timeout.h:27
@ TMPARAM_AFTER
Definition: timeout.h:53
#define InvalidTransactionId
Definition: transam.h:31
int max_prepared_xacts
Definition: twophase.c:118
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition: wait_event.c:316
void pgstat_reset_wait_event_storage(void)
Definition: wait_event.c:328
#define PG_WAIT_LOCK
Definition: wait_event.h:19
bool am_walsender
Definition: walsender.c:118
int max_wal_senders
Definition: walsender.c:124
#define kill(pid, sig)
Definition: win32_port.h:485
#define SIGUSR2
Definition: win32_port.h:181
bool RecoveryInProgress(void)
Definition: xlog.c:6211
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
bool InRecovery
Definition: xlogutils.c:53
#define InHotStandby
Definition: xlogutils.h:57
static struct link * links
Definition: zic.c:299