PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xlogutils.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
46 #include "replication/walsender.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "storage/standby.h"
56 #include "utils/timeout.h"
57 #include "utils/timestamp.h"
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
65 bool log_lock_waits = false;
66 
67 /* Pointer to this process's PGPROC struct, if any */
68 PGPROC *MyProc = NULL;
69 
70 /*
71  * This spinlock protects the freelist of recycled PGPROC structures.
72  * We cannot use an LWLock because the LWLock manager depends on already
73  * having a PGPROC and a wait semaphore! But these structures are touched
74  * relatively infrequently (only at backend startup or shutdown) and not for
75  * very long, so a spinlock is okay.
76  */
78 
79 /* Pointers to shared-memory structures */
83 
84 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
85 static LOCALLOCK *lockAwaited = NULL;
86 
88 
89 /* Is a deadlock check pending? */
90 static volatile sig_atomic_t got_deadlock_timeout;
91 
92 static void RemoveProcFromArray(int code, Datum arg);
93 static void ProcKill(int code, Datum arg);
94 static void AuxiliaryProcKill(int code, Datum arg);
95 static void CheckDeadLock(void);
96 
97 
98 /*
99  * Report shared-memory space needed by InitProcGlobal.
100  */
101 Size
103 {
104  Size size = 0;
105  Size TotalProcs =
107 
108  /* ProcGlobal */
109  size = add_size(size, sizeof(PROC_HDR));
110  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
111  size = add_size(size, sizeof(slock_t));
112 
113  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
114  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
115  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
116 
117  return size;
118 }
119 
120 /*
121  * Report number of semaphores needed by InitProcGlobal.
122  */
123 int
125 {
126  /*
127  * We need a sema per backend (including autovacuum), plus one for each
128  * auxiliary process.
129  */
131 }
132 
133 /*
134  * InitProcGlobal -
135  * Initialize the global process table during postmaster or standalone
136  * backend startup.
137  *
138  * We also create all the per-process semaphores we will need to support
139  * the requested number of backends. We used to allocate semaphores
140  * only when backends were actually started up, but that is bad because
141  * it lets Postgres fail under load --- a lot of Unix systems are
142  * (mis)configured with small limits on the number of semaphores, and
143  * running out when trying to start another backend is a common failure.
144  * So, now we grab enough semaphores to support the desired max number
145  * of backends immediately at initialization --- if the sysadmin has set
146  * MaxConnections, max_worker_processes, max_wal_senders, or
147  * autovacuum_max_workers higher than his kernel will support, he'll
148  * find out sooner rather than later.
149  *
150  * Another reason for creating semaphores here is that the semaphore
151  * implementation typically requires us to create semaphores in the
152  * postmaster, not in backends.
153  *
154  * Note: this is NOT called by individual backends under a postmaster,
155  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
156  * pointers must be propagated specially for EXEC_BACKEND operation.
157  */
158 void
160 {
161  PGPROC *procs;
162  int i,
163  j;
164  bool found;
166 
167  /* Create the ProcGlobal shared structure */
168  ProcGlobal = (PROC_HDR *)
169  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
170  Assert(!found);
171 
172  /*
173  * Initialize the data structures.
174  */
176  ProcGlobal->freeProcs = NULL;
181  ProcGlobal->walwriterLatch = NULL;
185 
186  /*
187  * Create and initialize all the PGPROC structures we'll need. There are
188  * five separate consumers: (1) normal backends, (2) autovacuum workers
189  * and the autovacuum launcher, (3) background workers, (4) auxiliary
190  * processes, and (5) prepared transactions. Each PGPROC structure is
191  * dedicated to exactly one of these purposes, and they do not move
192  * between groups.
193  */
194  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
195  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
196  ProcGlobal->allProcs = procs;
197  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
199 
200  /*
201  * Allocate arrays mirroring PGPROC fields in a dense manner. See
202  * PROC_HDR.
203  *
204  * XXX: It might make sense to increase padding for these arrays, given
205  * how hotly they are accessed.
206  */
207  ProcGlobal->xids =
208  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
209  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
211  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
212  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
213  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
214 
215  for (i = 0; i < TotalProcs; i++)
216  {
217  /* Common initialization for all PGPROCs, regardless of type. */
218 
219  /*
220  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
221  * dummy PGPROCs don't need these though - they're never associated
222  * with a real process
223  */
225  {
226  procs[i].sem = PGSemaphoreCreate();
227  InitSharedLatch(&(procs[i].procLatch));
228  LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
229  }
230  procs[i].pgprocno = i;
231 
232  /*
233  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
234  * must be queued up on the appropriate free list. Because there can
235  * only ever be a small, fixed number of auxiliary processes, no free
236  * list is used in that case; InitAuxiliaryProcess() instead uses a
237  * linear search. PGPROCs for prepared transactions are added to a
238  * free list by TwoPhaseShmemInit().
239  */
240  if (i < MaxConnections)
241  {
242  /* PGPROC for normal backend, add to freeProcs list */
243  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
244  ProcGlobal->freeProcs = &procs[i];
246  }
247  else if (i < MaxConnections + autovacuum_max_workers + 1)
248  {
249  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
251  ProcGlobal->autovacFreeProcs = &procs[i];
253  }
255  {
256  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
258  ProcGlobal->bgworkerFreeProcs = &procs[i];
260  }
261  else if (i < MaxBackends)
262  {
263  /* PGPROC for walsender, add to walsenderFreeProcs list */
265  ProcGlobal->walsenderFreeProcs = &procs[i];
267  }
268 
269  /* Initialize myProcLocks[] shared memory queues. */
270  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
271  SHMQueueInit(&(procs[i].myProcLocks[j]));
272 
273  /* Initialize lockGroupMembers list. */
274  dlist_init(&procs[i].lockGroupMembers);
275 
276  /*
277  * Initialize the atomic variables, otherwise, it won't be safe to
278  * access them for backends that aren't currently in use.
279  */
280  pg_atomic_init_u32(&(procs[i].procArrayGroupNext), INVALID_PGPROCNO);
281  pg_atomic_init_u32(&(procs[i].clogGroupNext), INVALID_PGPROCNO);
282  pg_atomic_init_u64(&(procs[i].waitStart), 0);
283  }
284 
285  /*
286  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
287  * processes and prepared transactions.
288  */
289  AuxiliaryProcs = &procs[MaxBackends];
291 
292  /* Create ProcStructLock spinlock, too */
293  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
295 }
296 
297 /*
298  * InitProcess -- initialize a per-process data structure for this backend
299  */
300 void
302 {
303  PGPROC *volatile *procgloballist;
304 
305  /*
306  * ProcGlobal should be set up already (if we are a backend, we inherit
307  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
308  */
309  if (ProcGlobal == NULL)
310  elog(PANIC, "proc header uninitialized");
311 
312  if (MyProc != NULL)
313  elog(ERROR, "you already exist");
314 
315  /* Decide which list should supply our PGPROC. */
317  procgloballist = &ProcGlobal->autovacFreeProcs;
318  else if (IsBackgroundWorker)
319  procgloballist = &ProcGlobal->bgworkerFreeProcs;
320  else if (am_walsender)
321  procgloballist = &ProcGlobal->walsenderFreeProcs;
322  else
323  procgloballist = &ProcGlobal->freeProcs;
324 
325  /*
326  * Try to get a proc struct from the appropriate free list. If this
327  * fails, we must be out of PGPROC structures (not to mention semaphores).
328  *
329  * While we are holding the ProcStructLock, also copy the current shared
330  * estimate of spins_per_delay to local storage.
331  */
333 
335 
336  MyProc = *procgloballist;
337 
338  if (MyProc != NULL)
339  {
340  *procgloballist = (PGPROC *) MyProc->links.next;
342  }
343  else
344  {
345  /*
346  * If we reach here, all the PGPROCs are in use. This is one of the
347  * possible places to detect "too many backends", so give the standard
348  * error message. XXX do we need to give a different failure message
349  * in the autovacuum case?
350  */
352  if (am_walsender)
353  ereport(FATAL,
354  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
355  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
356  max_wal_senders)));
357  ereport(FATAL,
358  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
359  errmsg("sorry, too many clients already")));
360  }
361 
362  /*
363  * Cross-check that the PGPROC is of the type we expect; if this were not
364  * the case, it would get returned to the wrong list.
365  */
366  Assert(MyProc->procgloballist == procgloballist);
367 
368  /*
369  * Now that we have a PGPROC, mark ourselves as an active postmaster
370  * child; this is so that the postmaster can detect it if we exit without
371  * cleaning up. (XXX autovac launcher currently doesn't participate in
372  * this; it probably should.)
373  */
376 
377  /*
378  * Initialize all fields of MyProc, except for those previously
379  * initialized by InitProcGlobal.
380  */
384  MyProc->fpVXIDLock = false;
388  MyProc->pid = MyProcPid;
389  /* backendId, databaseId and roleId will be filled in later */
395  MyProc->delayChkpt = false;
396  MyProc->statusFlags = 0;
397  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
400  MyProc->lwWaiting = false;
401  MyProc->lwWaitMode = 0;
402  MyProc->waitLock = NULL;
403  MyProc->waitProcLock = NULL;
405 #ifdef USE_ASSERT_CHECKING
406  {
407  int i;
408 
409  /* Last process should have released all locks. */
410  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
412  }
413 #endif
415 
416  /* Initialize fields for sync rep */
417  MyProc->waitLSN = 0;
420 
421  /* Initialize fields for group XID clearing. */
422  MyProc->procArrayGroupMember = false;
425 
426  /* Check that group locking fields are in a proper initial state. */
427  Assert(MyProc->lockGroupLeader == NULL);
429 
430  /* Initialize wait event information. */
431  MyProc->wait_event_info = 0;
432 
433  /* Initialize fields for group transaction status update. */
434  MyProc->clogGroupMember = false;
440 
441  /*
442  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
443  * on it. That allows us to repoint the process latch, which so far
444  * points to process local one, to the shared one.
445  */
448 
449  /* now that we have a proc, report wait events to shared memory */
451 
452  /*
453  * We might be reusing a semaphore that belonged to a failed process. So
454  * be careful and reinitialize its value here. (This is not strictly
455  * necessary anymore, but seems like a good idea for cleanliness.)
456  */
458 
459  /*
460  * Arrange to clean up at backend exit.
461  */
463 
464  /*
465  * Now that we have a PGPROC, we could try to acquire locks, so initialize
466  * local state needed for LWLocks, and the deadlock checker.
467  */
470 }
471 
472 /*
473  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
474  *
475  * This is separate from InitProcess because we can't acquire LWLocks until
476  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
477  * work until after we've done CreateSharedMemoryAndSemaphores.
478  */
479 void
481 {
482  Assert(MyProc != NULL);
483 
484  /*
485  * Add our PGPROC to the PGPROC array in shared memory.
486  */
488 
489  /*
490  * Arrange to clean that up at backend exit.
491  */
493 }
494 
495 /*
496  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
497  *
498  * This is called by bgwriter and similar processes so that they will have a
499  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
500  * and sema that are assigned are one of the extra ones created during
501  * InitProcGlobal.
502  *
503  * Auxiliary processes are presently not expected to wait for real (lockmgr)
504  * locks, so we need not set up the deadlock checker. They are never added
505  * to the ProcArray or the sinval messaging mechanism, either. They also
506  * don't get a VXID assigned, since this is only useful when we actually
507  * hold lockmgr locks.
508  *
509  * Startup process however uses locks but never waits for them in the
510  * normal backend sense. Startup process also takes part in sinval messaging
511  * as a sendOnly process, so never reads messages from sinval queue. So
512  * Startup process does have a VXID and does show up in pg_locks.
513  */
514 void
516 {
517  PGPROC *auxproc;
518  int proctype;
519 
520  /*
521  * ProcGlobal should be set up already (if we are a backend, we inherit
522  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
523  */
524  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
525  elog(PANIC, "proc header uninitialized");
526 
527  if (MyProc != NULL)
528  elog(ERROR, "you already exist");
529 
530  /*
531  * We use the ProcStructLock to protect assignment and releasing of
532  * AuxiliaryProcs entries.
533  *
534  * While we are holding the ProcStructLock, also copy the current shared
535  * estimate of spins_per_delay to local storage.
536  */
538 
540 
541  /*
542  * Find a free auxproc ... *big* trouble if there isn't one ...
543  */
544  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
545  {
546  auxproc = &AuxiliaryProcs[proctype];
547  if (auxproc->pid == 0)
548  break;
549  }
550  if (proctype >= NUM_AUXILIARY_PROCS)
551  {
553  elog(FATAL, "all AuxiliaryProcs are in use");
554  }
555 
556  /* Mark auxiliary proc as in use by me */
557  /* use volatile pointer to prevent code rearrangement */
558  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
559 
560  MyProc = auxproc;
561 
563 
564  /*
565  * Initialize all fields of MyProc, except for those previously
566  * initialized by InitProcGlobal.
567  */
571  MyProc->fpVXIDLock = false;
580  MyProc->delayChkpt = false;
581  MyProc->statusFlags = 0;
582  MyProc->lwWaiting = false;
583  MyProc->lwWaitMode = 0;
584  MyProc->waitLock = NULL;
585  MyProc->waitProcLock = NULL;
587 #ifdef USE_ASSERT_CHECKING
588  {
589  int i;
590 
591  /* Last process should have released all locks. */
592  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
594  }
595 #endif
596 
597  /*
598  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
599  * on it. That allows us to repoint the process latch, which so far
600  * points to process local one, to the shared one.
601  */
604 
605  /* now that we have a proc, report wait events to shared memory */
607 
608  /* Check that group locking fields are in a proper initial state. */
609  Assert(MyProc->lockGroupLeader == NULL);
611 
612  /*
613  * We might be reusing a semaphore that belonged to a failed process. So
614  * be careful and reinitialize its value here. (This is not strictly
615  * necessary anymore, but seems like a good idea for cleanliness.)
616  */
618 
619  /*
620  * Arrange to clean up at process exit.
621  */
623 }
624 
625 /*
626  * Used from bufmgr to share the value of the buffer that Startup waits on,
627  * or to reset the value to "not waiting" (-1). This allows processing
628  * of recovery conflicts for buffer pins. Set is made before backends look
629  * at this value, so locking not required, especially since the set is
630  * an atomic integer set operation.
631  */
632 void
634 {
635  /* use volatile pointer to prevent code rearrangement */
636  volatile PROC_HDR *procglobal = ProcGlobal;
637 
638  procglobal->startupBufferPinWaitBufId = bufid;
639 }
640 
641 /*
642  * Used by backends when they receive a request to check for buffer pin waits.
643  */
644 int
646 {
647  /* use volatile pointer to prevent code rearrangement */
648  volatile PROC_HDR *procglobal = ProcGlobal;
649 
650  return procglobal->startupBufferPinWaitBufId;
651 }
652 
653 /*
654  * Check whether there are at least N free PGPROC objects.
655  *
656  * Note: this is designed on the assumption that N will generally be small.
657  */
658 bool
660 {
661  PGPROC *proc;
662 
664 
665  proc = ProcGlobal->freeProcs;
666 
667  while (n > 0 && proc != NULL)
668  {
669  proc = (PGPROC *) proc->links.next;
670  n--;
671  }
672 
674 
675  return (n <= 0);
676 }
677 
678 /*
679  * Check if the current process is awaiting a lock.
680  */
681 bool
683 {
684  if (lockAwaited == NULL)
685  return false;
686 
687  return true;
688 }
689 
690 /*
691  * Cancel any pending wait for lock, when aborting a transaction, and revert
692  * any strong lock count acquisition for a lock being acquired.
693  *
694  * (Normally, this would only happen if we accept a cancel/die
695  * interrupt while waiting; but an ereport(ERROR) before or during the lock
696  * wait is within the realm of possibility, too.)
697  */
698 void
700 {
701  LWLock *partitionLock;
702  DisableTimeoutParams timeouts[2];
703 
704  HOLD_INTERRUPTS();
705 
707 
708  /* Nothing to do if we weren't waiting for a lock */
709  if (lockAwaited == NULL)
710  {
712  return;
713  }
714 
715  /*
716  * Turn off the deadlock and lock timeout timers, if they are still
717  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
718  * indicator flag, since this function is executed before
719  * ProcessInterrupts when responding to SIGINT; else we'd lose the
720  * knowledge that the SIGINT came from a lock timeout and not an external
721  * source.
722  */
723  timeouts[0].id = DEADLOCK_TIMEOUT;
724  timeouts[0].keep_indicator = false;
725  timeouts[1].id = LOCK_TIMEOUT;
726  timeouts[1].keep_indicator = true;
727  disable_timeouts(timeouts, 2);
728 
729  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
730  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
731  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
732 
733  if (MyProc->links.next != NULL)
734  {
735  /* We could not have been granted the lock yet */
737  }
738  else
739  {
740  /*
741  * Somebody kicked us off the lock queue already. Perhaps they
742  * granted us the lock, or perhaps they detected a deadlock. If they
743  * did grant us the lock, we'd better remember it in our local lock
744  * table.
745  */
748  }
749 
750  lockAwaited = NULL;
751 
752  LWLockRelease(partitionLock);
753 
755 }
756 
757 
758 /*
759  * ProcReleaseLocks() -- release locks associated with current transaction
760  * at main transaction commit or abort
761  *
762  * At main transaction commit, we release standard locks except session locks.
763  * At main transaction abort, we release all locks including session locks.
764  *
765  * Advisory locks are released only if they are transaction-level;
766  * session-level holds remain, whether this is a commit or not.
767  *
768  * At subtransaction commit, we don't release any locks (so this func is not
769  * needed at all); we will defer the releasing to the parent transaction.
770  * At subtransaction abort, we release all locks held by the subtransaction;
771  * this is implemented by retail releasing of the locks under control of
772  * the ResourceOwner mechanism.
773  */
774 void
775 ProcReleaseLocks(bool isCommit)
776 {
777  if (!MyProc)
778  return;
779  /* If waiting, get off wait queue (should only be needed after error) */
781  /* Release standard locks, including session-level if aborting */
783  /* Release transaction-level advisory locks */
785 }
786 
787 
788 /*
789  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
790  */
791 static void
793 {
794  Assert(MyProc != NULL);
796 }
797 
798 /*
799  * ProcKill() -- Destroy the per-proc data structure for
800  * this process. Release any of its held LW locks.
801  */
802 static void
803 ProcKill(int code, Datum arg)
804 {
805  PGPROC *proc;
806  PGPROC *volatile *procgloballist;
807 
808  Assert(MyProc != NULL);
809 
810  /* Make sure we're out of the sync rep lists */
812 
813 #ifdef USE_ASSERT_CHECKING
814  {
815  int i;
816 
817  /* Last process should have released all locks. */
818  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
820  }
821 #endif
822 
823  /*
824  * Release any LW locks I am holding. There really shouldn't be any, but
825  * it's cheap to check again before we cut the knees off the LWLock
826  * facility by releasing our PGPROC ...
827  */
829 
830  /* Cancel any pending condition variable sleep, too */
832 
833  /* Make sure active replication slots are released */
834  if (MyReplicationSlot != NULL)
836 
837  /* Also cleanup all the temporary slots. */
839 
840  /*
841  * Detach from any lock group of which we are a member. If the leader
842  * exist before all other group members, its PGPROC will remain allocated
843  * until the last group process exits; that process must return the
844  * leader's PGPROC to the appropriate list.
845  */
846  if (MyProc->lockGroupLeader != NULL)
847  {
848  PGPROC *leader = MyProc->lockGroupLeader;
849  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
850 
851  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
854  if (dlist_is_empty(&leader->lockGroupMembers))
855  {
856  leader->lockGroupLeader = NULL;
857  if (leader != MyProc)
858  {
859  procgloballist = leader->procgloballist;
860 
861  /* Leader exited first; return its PGPROC. */
863  leader->links.next = (SHM_QUEUE *) *procgloballist;
864  *procgloballist = leader;
866  }
867  }
868  else if (leader != MyProc)
869  MyProc->lockGroupLeader = NULL;
870  LWLockRelease(leader_lwlock);
871  }
872 
873  /*
874  * Reset MyLatch to the process local one. This is so that signal
875  * handlers et al can continue using the latch after the shared latch
876  * isn't ours anymore.
877  *
878  * Similarly, stop reporting wait events to MyProc->wait_event_info.
879  *
880  * After that clear MyProc and disown the shared latch.
881  */
884 
885  proc = MyProc;
886  MyProc = NULL;
887  DisownLatch(&proc->procLatch);
888 
889  procgloballist = proc->procgloballist;
891 
892  /*
893  * If we're still a member of a locking group, that means we're a leader
894  * which has somehow exited before its children. The last remaining child
895  * will release our PGPROC. Otherwise, release it now.
896  */
897  if (proc->lockGroupLeader == NULL)
898  {
899  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
901 
902  /* Return PGPROC structure (and semaphore) to appropriate freelist */
903  proc->links.next = (SHM_QUEUE *) *procgloballist;
904  *procgloballist = proc;
905  }
906 
907  /* Update shared estimate of spins_per_delay */
909 
911 
912  /*
913  * This process is no longer present in shared memory in any meaningful
914  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
915  * autovac launcher should be included here someday)
916  */
919 
920  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
921  if (AutovacuumLauncherPid != 0)
923 }
924 
925 /*
926  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
927  * processes (bgwriter, etc). The PGPROC and sema are not released, only
928  * marked as not-in-use.
929  */
930 static void
932 {
933  int proctype = DatumGetInt32(arg);
935  PGPROC *proc;
936 
937  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
938 
939  auxproc = &AuxiliaryProcs[proctype];
940 
941  Assert(MyProc == auxproc);
942 
943  /* Release any LW locks I am holding (see notes above) */
945 
946  /* Cancel any pending condition variable sleep, too */
948 
949  /* look at the equivalent ProcKill() code for comments */
952 
953  proc = MyProc;
954  MyProc = NULL;
955  DisownLatch(&proc->procLatch);
956 
958 
959  /* Mark auxiliary proc no longer in use */
960  proc->pid = 0;
961 
962  /* Update shared estimate of spins_per_delay */
964 
966 }
967 
968 /*
969  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
970  * given its PID
971  *
972  * Returns NULL if not found.
973  */
974 PGPROC *
976 {
977  PGPROC *result = NULL;
978  int index;
979 
980  if (pid == 0) /* never match dummy PGPROCs */
981  return NULL;
982 
983  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
984  {
985  PGPROC *proc = &AuxiliaryProcs[index];
986 
987  if (proc->pid == pid)
988  {
989  result = proc;
990  break;
991  }
992  }
993  return result;
994 }
995 
996 /*
997  * ProcQueue package: routines for putting processes to sleep
998  * and waking them up
999  */
1000 
1001 /*
1002  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
1003  *
1004  * Returns: a pointer to the queue
1005  * Side Effects: Initializes the queue if it wasn't there before
1006  */
1007 #ifdef NOT_USED
1008 PROC_QUEUE *
1009 ProcQueueAlloc(const char *name)
1010 {
1011  PROC_QUEUE *queue;
1012  bool found;
1013 
1014  queue = (PROC_QUEUE *)
1015  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
1016 
1017  if (!found)
1018  ProcQueueInit(queue);
1019 
1020  return queue;
1021 }
1022 #endif
1023 
1024 /*
1025  * ProcQueueInit -- initialize a shared memory process queue
1026  */
1027 void
1029 {
1030  SHMQueueInit(&(queue->links));
1031  queue->size = 0;
1032 }
1033 
1034 
1035 /*
1036  * ProcSleep -- put a process to sleep on the specified lock
1037  *
1038  * Caller must have set MyProc->heldLocks to reflect locks already held
1039  * on the lockable object by this process (under all XIDs).
1040  *
1041  * The lock table's partition lock must be held at entry, and will be held
1042  * at exit.
1043  *
1044  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
1045  *
1046  * ASSUME: that no one will fiddle with the queue until after
1047  * we release the partition lock.
1048  *
1049  * NOTES: The process queue is now a priority queue for locking.
1050  */
1052 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1053 {
1054  LOCKMODE lockmode = locallock->tag.mode;
1055  LOCK *lock = locallock->lock;
1056  PROCLOCK *proclock = locallock->proclock;
1057  uint32 hashcode = locallock->hashcode;
1058  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1059  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1060  LOCKMASK myHeldLocks = MyProc->heldLocks;
1061  TimestampTz standbyWaitStart = 0;
1062  bool early_deadlock = false;
1063  bool allow_autovacuum_cancel = true;
1064  bool logged_recovery_conflict = false;
1065  ProcWaitStatus myWaitStatus;
1066  PGPROC *proc;
1067  PGPROC *leader = MyProc->lockGroupLeader;
1068  int i;
1069 
1070  /*
1071  * If group locking is in use, locks held by members of my locking group
1072  * need to be included in myHeldLocks. This is not required for relation
1073  * extension or page locks which conflict among group members. However,
1074  * including them in myHeldLocks will give group members the priority to
1075  * get those locks as compared to other backends which are also trying to
1076  * acquire those locks. OTOH, we can avoid giving priority to group
1077  * members for that kind of locks, but there doesn't appear to be a clear
1078  * advantage of the same.
1079  */
1080  if (leader != NULL)
1081  {
1082  SHM_QUEUE *procLocks = &(lock->procLocks);
1083  PROCLOCK *otherproclock;
1084 
1085  otherproclock = (PROCLOCK *)
1086  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1087  while (otherproclock != NULL)
1088  {
1089  if (otherproclock->groupLeader == leader)
1090  myHeldLocks |= otherproclock->holdMask;
1091  otherproclock = (PROCLOCK *)
1092  SHMQueueNext(procLocks, &otherproclock->lockLink,
1093  offsetof(PROCLOCK, lockLink));
1094  }
1095  }
1096 
1097  /*
1098  * Determine where to add myself in the wait queue.
1099  *
1100  * Normally I should go at the end of the queue. However, if I already
1101  * hold locks that conflict with the request of any previous waiter, put
1102  * myself in the queue just in front of the first such waiter. This is not
1103  * a necessary step, since deadlock detection would move me to before that
1104  * waiter anyway; but it's relatively cheap to detect such a conflict
1105  * immediately, and avoid delaying till deadlock timeout.
1106  *
1107  * Special case: if I find I should go in front of some waiter, check to
1108  * see if I conflict with already-held locks or the requests before that
1109  * waiter. If not, then just grant myself the requested lock immediately.
1110  * This is the same as the test for immediate grant in LockAcquire, except
1111  * we are only considering the part of the wait queue before my insertion
1112  * point.
1113  */
1114  if (myHeldLocks != 0)
1115  {
1116  LOCKMASK aheadRequests = 0;
1117 
1118  proc = (PGPROC *) waitQueue->links.next;
1119  for (i = 0; i < waitQueue->size; i++)
1120  {
1121  /*
1122  * If we're part of the same locking group as this waiter, its
1123  * locks neither conflict with ours nor contribute to
1124  * aheadRequests.
1125  */
1126  if (leader != NULL && leader == proc->lockGroupLeader)
1127  {
1128  proc = (PGPROC *) proc->links.next;
1129  continue;
1130  }
1131  /* Must he wait for me? */
1132  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1133  {
1134  /* Must I wait for him ? */
1135  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1136  {
1137  /*
1138  * Yes, so we have a deadlock. Easiest way to clean up
1139  * correctly is to call RemoveFromWaitQueue(), but we
1140  * can't do that until we are *on* the wait queue. So, set
1141  * a flag to check below, and break out of loop. Also,
1142  * record deadlock info for later message.
1143  */
1144  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1145  early_deadlock = true;
1146  break;
1147  }
1148  /* I must go before this waiter. Check special case. */
1149  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1150  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1151  proclock))
1152  {
1153  /* Skip the wait and just grant myself the lock. */
1154  GrantLock(lock, proclock, lockmode);
1155  GrantAwaitedLock();
1156  return PROC_WAIT_STATUS_OK;
1157  }
1158  /* Break out of loop to put myself before him */
1159  break;
1160  }
1161  /* Nope, so advance to next waiter */
1162  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1163  proc = (PGPROC *) proc->links.next;
1164  }
1165 
1166  /*
1167  * If we fall out of loop normally, proc points to waitQueue head, so
1168  * we will insert at tail of queue as desired.
1169  */
1170  }
1171  else
1172  {
1173  /* I hold no locks, so I can't push in front of anyone. */
1174  proc = (PGPROC *) &(waitQueue->links);
1175  }
1176 
1177  /*
1178  * Insert self into queue, ahead of the given proc (or at tail of queue).
1179  */
1180  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1181  waitQueue->size++;
1182 
1183  lock->waitMask |= LOCKBIT_ON(lockmode);
1184 
1185  /* Set up wait information in PGPROC object, too */
1186  MyProc->waitLock = lock;
1187  MyProc->waitProcLock = proclock;
1188  MyProc->waitLockMode = lockmode;
1189 
1191 
1192  /*
1193  * If we detected deadlock, give up without waiting. This must agree with
1194  * CheckDeadLock's recovery code.
1195  */
1196  if (early_deadlock)
1197  {
1198  RemoveFromWaitQueue(MyProc, hashcode);
1199  return PROC_WAIT_STATUS_ERROR;
1200  }
1201 
1202  /* mark that we are waiting for a lock */
1203  lockAwaited = locallock;
1204 
1205  /*
1206  * Release the lock table's partition lock.
1207  *
1208  * NOTE: this may also cause us to exit critical-section state, possibly
1209  * allowing a cancel/die interrupt to be accepted. This is OK because we
1210  * have recorded the fact that we are waiting for a lock, and so
1211  * LockErrorCleanup will clean up if cancel/die happens.
1212  */
1213  LWLockRelease(partitionLock);
1214 
1215  /*
1216  * Also, now that we will successfully clean up after an ereport, it's
1217  * safe to check to see if there's a buffer pin deadlock against the
1218  * Startup process. Of course, that's only necessary if we're doing Hot
1219  * Standby and are not the Startup process ourselves.
1220  */
1221  if (RecoveryInProgress() && !InRecovery)
1223 
1224  /* Reset deadlock_state before enabling the timeout handler */
1226  got_deadlock_timeout = false;
1227 
1228  /*
1229  * Set timer so we can wake up after awhile and check for a deadlock. If a
1230  * deadlock is detected, the handler sets MyProc->waitStatus =
1231  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1232  * rather than success.
1233  *
1234  * By delaying the check until we've waited for a bit, we can avoid
1235  * running the rather expensive deadlock-check code in most cases.
1236  *
1237  * If LockTimeout is set, also enable the timeout for that. We can save a
1238  * few cycles by enabling both timeout sources in one call.
1239  *
1240  * If InHotStandby we set lock waits slightly later for clarity with other
1241  * code.
1242  */
1243  if (!InHotStandby)
1244  {
1245  if (LockTimeout > 0)
1246  {
1247  EnableTimeoutParams timeouts[2];
1248 
1249  timeouts[0].id = DEADLOCK_TIMEOUT;
1250  timeouts[0].type = TMPARAM_AFTER;
1251  timeouts[0].delay_ms = DeadlockTimeout;
1252  timeouts[1].id = LOCK_TIMEOUT;
1253  timeouts[1].type = TMPARAM_AFTER;
1254  timeouts[1].delay_ms = LockTimeout;
1255  enable_timeouts(timeouts, 2);
1256  }
1257  else
1259 
1260  /*
1261  * Use the current time obtained for the deadlock timeout timer as
1262  * waitStart (i.e., the time when this process started waiting for the
1263  * lock). Since getting the current time newly can cause overhead, we
1264  * reuse the already-obtained time to avoid that overhead.
1265  *
1266  * Note that waitStart is updated without holding the lock table's
1267  * partition lock, to avoid the overhead by additional lock
1268  * acquisition. This can cause "waitstart" in pg_locks to become NULL
1269  * for a very short period of time after the wait started even though
1270  * "granted" is false. This is OK in practice because we can assume
1271  * that users are likely to look at "waitstart" when waiting for the
1272  * lock for a long time.
1273  */
1276  }
1277  else if (log_recovery_conflict_waits)
1278  {
1279  /*
1280  * Set the wait start timestamp if logging is enabled and in hot
1281  * standby.
1282  */
1283  standbyWaitStart = GetCurrentTimestamp();
1284  }
1285 
1286  /*
1287  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1288  * will not wait. But a set latch does not necessarily mean that the lock
1289  * is free now, as there are many other sources for latch sets than
1290  * somebody releasing the lock.
1291  *
1292  * We process interrupts whenever the latch has been set, so cancel/die
1293  * interrupts are processed quickly. This means we must not mind losing
1294  * control to a cancel/die interrupt here. We don't, because we have no
1295  * shared-state-change work to do after being granted the lock (the
1296  * grantor did it all). We do have to worry about canceling the deadlock
1297  * timeout and updating the locallock table, but if we lose control to an
1298  * error, LockErrorCleanup will fix that up.
1299  */
1300  do
1301  {
1302  if (InHotStandby)
1303  {
1304  bool maybe_log_conflict =
1305  (standbyWaitStart != 0 && !logged_recovery_conflict);
1306 
1307  /* Set a timer and wait for that or for the lock to be granted */
1309  maybe_log_conflict);
1310 
1311  /*
1312  * Emit the log message if the startup process is waiting longer
1313  * than deadlock_timeout for recovery conflict on lock.
1314  */
1315  if (maybe_log_conflict)
1316  {
1318 
1319  if (TimestampDifferenceExceeds(standbyWaitStart, now,
1320  DeadlockTimeout))
1321  {
1322  VirtualTransactionId *vxids;
1323  int cnt;
1324 
1325  vxids = GetLockConflicts(&locallock->tag.lock,
1326  AccessExclusiveLock, &cnt);
1327 
1328  /*
1329  * Log the recovery conflict and the list of PIDs of
1330  * backends holding the conflicting lock. Note that we do
1331  * logging even if there are no such backends right now
1332  * because the startup process here has already waited
1333  * longer than deadlock_timeout.
1334  */
1336  standbyWaitStart, now,
1337  cnt > 0 ? vxids : NULL, true);
1338  logged_recovery_conflict = true;
1339  }
1340  }
1341  }
1342  else
1343  {
1345  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1347  /* check for deadlocks first, as that's probably log-worthy */
1349  {
1350  CheckDeadLock();
1351  got_deadlock_timeout = false;
1352  }
1354  }
1355 
1356  /*
1357  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1358  * else asynchronously. Read it just once per loop to prevent
1359  * surprising behavior (such as missing log messages).
1360  */
1361  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1362 
1363  /*
1364  * If we are not deadlocked, but are waiting on an autovacuum-induced
1365  * task, send a signal to interrupt it.
1366  */
1367  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1368  {
1369  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1370  uint8 statusFlags;
1371  uint8 lockmethod_copy;
1372  LOCKTAG locktag_copy;
1373 
1374  /*
1375  * Grab info we need, then release lock immediately. Note this
1376  * coding means that there is a tiny chance that the process
1377  * terminates its current transaction and starts a different one
1378  * before we have a change to send the signal; the worst possible
1379  * consequence is that a for-wraparound vacuum is cancelled. But
1380  * that could happen in any case unless we were to do kill() with
1381  * the lock held, which is much more undesirable.
1382  */
1383  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1384  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1385  lockmethod_copy = lock->tag.locktag_lockmethodid;
1386  locktag_copy = lock->tag;
1387  LWLockRelease(ProcArrayLock);
1388 
1389  /*
1390  * Only do it if the worker is not working to protect against Xid
1391  * wraparound.
1392  */
1393  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1394  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1395  {
1396  int pid = autovac->pid;
1397 
1398  /* report the case, if configured to do so */
1400  {
1401  StringInfoData locktagbuf;
1402  StringInfoData logbuf; /* errdetail for server log */
1403 
1404  initStringInfo(&locktagbuf);
1405  initStringInfo(&logbuf);
1406  DescribeLockTag(&locktagbuf, &locktag_copy);
1407  appendStringInfo(&logbuf,
1408  "Process %d waits for %s on %s.",
1409  MyProcPid,
1410  GetLockmodeName(lockmethod_copy, lockmode),
1411  locktagbuf.data);
1412 
1413  ereport(DEBUG1,
1414  (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1415  pid),
1416  errdetail_log("%s", logbuf.data)));
1417 
1418  pfree(locktagbuf.data);
1419  pfree(logbuf.data);
1420  }
1421 
1422  /* send the autovacuum worker Back to Old Kent Road */
1423  if (kill(pid, SIGINT) < 0)
1424  {
1425  /*
1426  * There's a race condition here: once we release the
1427  * ProcArrayLock, it's possible for the autovac worker to
1428  * close up shop and exit before we can do the kill().
1429  * Therefore, we do not whinge about no-such-process.
1430  * Other errors such as EPERM could conceivably happen if
1431  * the kernel recycles the PID fast enough, but such cases
1432  * seem improbable enough that it's probably best to issue
1433  * a warning if we see some other errno.
1434  */
1435  if (errno != ESRCH)
1436  ereport(WARNING,
1437  (errmsg("could not send signal to process %d: %m",
1438  pid)));
1439  }
1440  }
1441 
1442  /* prevent signal from being sent again more than once */
1443  allow_autovacuum_cancel = false;
1444  }
1445 
1446  /*
1447  * If awoken after the deadlock check interrupt has run, and
1448  * log_lock_waits is on, then report about the wait.
1449  */
1451  {
1453  lock_waiters_sbuf,
1454  lock_holders_sbuf;
1455  const char *modename;
1456  long secs;
1457  int usecs;
1458  long msecs;
1459  SHM_QUEUE *procLocks;
1460  PROCLOCK *proclock;
1461  bool first_holder = true,
1462  first_waiter = true;
1463  int lockHoldersNum = 0;
1464 
1465  initStringInfo(&buf);
1466  initStringInfo(&lock_waiters_sbuf);
1467  initStringInfo(&lock_holders_sbuf);
1468 
1469  DescribeLockTag(&buf, &locallock->tag.lock);
1470  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1471  lockmode);
1474  &secs, &usecs);
1475  msecs = secs * 1000 + usecs / 1000;
1476  usecs = usecs % 1000;
1477 
1478  /*
1479  * we loop over the lock's procLocks to gather a list of all
1480  * holders and waiters. Thus we will be able to provide more
1481  * detailed information for lock debugging purposes.
1482  *
1483  * lock->procLocks contains all processes which hold or wait for
1484  * this lock.
1485  */
1486 
1487  LWLockAcquire(partitionLock, LW_SHARED);
1488 
1489  procLocks = &(lock->procLocks);
1490  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1491  offsetof(PROCLOCK, lockLink));
1492 
1493  while (proclock)
1494  {
1495  /*
1496  * we are a waiter if myProc->waitProcLock == proclock; we are
1497  * a holder if it is NULL or something different
1498  */
1499  if (proclock->tag.myProc->waitProcLock == proclock)
1500  {
1501  if (first_waiter)
1502  {
1503  appendStringInfo(&lock_waiters_sbuf, "%d",
1504  proclock->tag.myProc->pid);
1505  first_waiter = false;
1506  }
1507  else
1508  appendStringInfo(&lock_waiters_sbuf, ", %d",
1509  proclock->tag.myProc->pid);
1510  }
1511  else
1512  {
1513  if (first_holder)
1514  {
1515  appendStringInfo(&lock_holders_sbuf, "%d",
1516  proclock->tag.myProc->pid);
1517  first_holder = false;
1518  }
1519  else
1520  appendStringInfo(&lock_holders_sbuf, ", %d",
1521  proclock->tag.myProc->pid);
1522 
1523  lockHoldersNum++;
1524  }
1525 
1526  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1527  offsetof(PROCLOCK, lockLink));
1528  }
1529 
1530  LWLockRelease(partitionLock);
1531 
1533  ereport(LOG,
1534  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1535  MyProcPid, modename, buf.data, msecs, usecs),
1536  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1537  "Processes holding the lock: %s. Wait queue: %s.",
1538  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1539  else if (deadlock_state == DS_HARD_DEADLOCK)
1540  {
1541  /*
1542  * This message is a bit redundant with the error that will be
1543  * reported subsequently, but in some cases the error report
1544  * might not make it to the log (eg, if it's caught by an
1545  * exception handler), and we want to ensure all long-wait
1546  * events get logged.
1547  */
1548  ereport(LOG,
1549  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1550  MyProcPid, modename, buf.data, msecs, usecs),
1551  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1552  "Processes holding the lock: %s. Wait queue: %s.",
1553  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1554  }
1555 
1556  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1557  ereport(LOG,
1558  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1559  MyProcPid, modename, buf.data, msecs, usecs),
1560  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1561  "Processes holding the lock: %s. Wait queue: %s.",
1562  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1563  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1564  ereport(LOG,
1565  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1566  MyProcPid, modename, buf.data, msecs, usecs)));
1567  else
1568  {
1569  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1570 
1571  /*
1572  * Currently, the deadlock checker always kicks its own
1573  * process, which means that we'll only see
1574  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1575  * DS_HARD_DEADLOCK, and there's no need to print redundant
1576  * messages. But for completeness and future-proofing, print
1577  * a message if it looks like someone else kicked us off the
1578  * lock.
1579  */
1581  ereport(LOG,
1582  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1583  MyProcPid, modename, buf.data, msecs, usecs),
1584  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1585  "Processes holding the lock: %s. Wait queue: %s.",
1586  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1587  }
1588 
1589  /*
1590  * At this point we might still need to wait for the lock. Reset
1591  * state so we don't print the above messages again.
1592  */
1594 
1595  pfree(buf.data);
1596  pfree(lock_holders_sbuf.data);
1597  pfree(lock_waiters_sbuf.data);
1598  }
1599  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1600 
1601  /*
1602  * Disable the timers, if they are still running. As in LockErrorCleanup,
1603  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1604  * already caused QueryCancelPending to become set, we want the cancel to
1605  * be reported as a lock timeout, not a user cancel.
1606  */
1607  if (!InHotStandby)
1608  {
1609  if (LockTimeout > 0)
1610  {
1611  DisableTimeoutParams timeouts[2];
1612 
1613  timeouts[0].id = DEADLOCK_TIMEOUT;
1614  timeouts[0].keep_indicator = false;
1615  timeouts[1].id = LOCK_TIMEOUT;
1616  timeouts[1].keep_indicator = true;
1617  disable_timeouts(timeouts, 2);
1618  }
1619  else
1621  }
1622 
1623  /*
1624  * Emit the log message if recovery conflict on lock was resolved but the
1625  * startup process waited longer than deadlock_timeout for it.
1626  */
1627  if (InHotStandby && logged_recovery_conflict)
1629  standbyWaitStart, GetCurrentTimestamp(),
1630  NULL, false);
1631 
1632  /*
1633  * Re-acquire the lock table's partition lock. We have to do this to hold
1634  * off cancel/die interrupts before we can mess with lockAwaited (else we
1635  * might have a missed or duplicated locallock update).
1636  */
1637  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1638 
1639  /*
1640  * We no longer want LockErrorCleanup to do anything.
1641  */
1642  lockAwaited = NULL;
1643 
1644  /*
1645  * If we got the lock, be sure to remember it in the locallock table.
1646  */
1648  GrantAwaitedLock();
1649 
1650  /*
1651  * We don't have to do anything else, because the awaker did all the
1652  * necessary update of the lock table and MyProc.
1653  */
1654  return MyProc->waitStatus;
1655 }
1656 
1657 
1658 /*
1659  * ProcWakeup -- wake up a process by setting its latch.
1660  *
1661  * Also remove the process from the wait queue and set its links invalid.
1662  * RETURN: the next process in the wait queue.
1663  *
1664  * The appropriate lock partition lock must be held by caller.
1665  *
1666  * XXX: presently, this code is only used for the "success" case, and only
1667  * works correctly for that case. To clean up in failure case, would need
1668  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1669  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1670  */
1671 PGPROC *
1673 {
1674  PGPROC *retProc;
1675 
1676  /* Proc should be sleeping ... */
1677  if (proc->links.prev == NULL ||
1678  proc->links.next == NULL)
1679  return NULL;
1681 
1682  /* Save next process before we zap the list link */
1683  retProc = (PGPROC *) proc->links.next;
1684 
1685  /* Remove process from wait queue */
1686  SHMQueueDelete(&(proc->links));
1687  (proc->waitLock->waitProcs.size)--;
1688 
1689  /* Clean up process' state and pass it the ok/fail signal */
1690  proc->waitLock = NULL;
1691  proc->waitProcLock = NULL;
1692  proc->waitStatus = waitStatus;
1694 
1695  /* And awaken it */
1696  SetLatch(&proc->procLatch);
1697 
1698  return retProc;
1699 }
1700 
1701 /*
1702  * ProcLockWakeup -- routine for waking up processes when a lock is
1703  * released (or a prior waiter is aborted). Scan all waiters
1704  * for lock, waken any that are no longer blocked.
1705  *
1706  * The appropriate lock partition lock must be held by caller.
1707  */
1708 void
1709 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1710 {
1711  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1712  int queue_size = waitQueue->size;
1713  PGPROC *proc;
1714  LOCKMASK aheadRequests = 0;
1715 
1716  Assert(queue_size >= 0);
1717 
1718  if (queue_size == 0)
1719  return;
1720 
1721  proc = (PGPROC *) waitQueue->links.next;
1722 
1723  while (queue_size-- > 0)
1724  {
1725  LOCKMODE lockmode = proc->waitLockMode;
1726 
1727  /*
1728  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1729  * (b) doesn't conflict with already-held locks.
1730  */
1731  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1732  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1733  proc->waitProcLock))
1734  {
1735  /* OK to waken */
1736  GrantLock(lock, proc->waitProcLock, lockmode);
1737  proc = ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1738 
1739  /*
1740  * ProcWakeup removes proc from the lock's waiting process queue
1741  * and returns the next proc in chain; don't use proc's next-link,
1742  * because it's been cleared.
1743  */
1744  }
1745  else
1746  {
1747  /*
1748  * Cannot wake this guy. Remember his request for later checks.
1749  */
1750  aheadRequests |= LOCKBIT_ON(lockmode);
1751  proc = (PGPROC *) proc->links.next;
1752  }
1753  }
1754 
1755  Assert(waitQueue->size >= 0);
1756 }
1757 
1758 /*
1759  * CheckDeadLock
1760  *
1761  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1762  * lock to be released by some other process. Check if there's a deadlock; if
1763  * not, just return. (But signal ProcSleep to log a message, if
1764  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1765  * the lock's wait queue and signal an error to ProcSleep.
1766  */
1767 static void
1769 {
1770  int i;
1771 
1772  /*
1773  * Acquire exclusive lock on the entire shared lock data structures. Must
1774  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1775  *
1776  * Note that the deadlock check interrupt had better not be enabled
1777  * anywhere that this process itself holds lock partition locks, else this
1778  * will wait forever. Also note that LWLockAcquire creates a critical
1779  * section, so that this routine cannot be interrupted by cancel/die
1780  * interrupts.
1781  */
1782  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1784 
1785  /*
1786  * Check to see if we've been awoken by anyone in the interim.
1787  *
1788  * If we have, we can return and resume our transaction -- happy day.
1789  * Before we are awoken the process releasing the lock grants it to us so
1790  * we know that we don't have to wait anymore.
1791  *
1792  * We check by looking to see if we've been unlinked from the wait queue.
1793  * This is safe because we hold the lock partition lock.
1794  */
1795  if (MyProc->links.prev == NULL ||
1796  MyProc->links.next == NULL)
1797  goto check_done;
1798 
1799 #ifdef LOCK_DEBUG
1800  if (Debug_deadlocks)
1801  DumpAllLocks();
1802 #endif
1803 
1804  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1806 
1808  {
1809  /*
1810  * Oops. We have a deadlock.
1811  *
1812  * Get this process out of wait state. (Note: we could do this more
1813  * efficiently by relying on lockAwaited, but use this coding to
1814  * preserve the flexibility to kill some other transaction than the
1815  * one detecting the deadlock.)
1816  *
1817  * RemoveFromWaitQueue sets MyProc->waitStatus to
1818  * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1819  * return from the signal handler.
1820  */
1821  Assert(MyProc->waitLock != NULL);
1823 
1824  /*
1825  * We're done here. Transaction abort caused by the error that
1826  * ProcSleep will raise will cause any other locks we hold to be
1827  * released, thus allowing other processes to wake up; we don't need
1828  * to do that here. NOTE: an exception is that releasing locks we
1829  * hold doesn't consider the possibility of waiters that were blocked
1830  * behind us on the lock we just failed to get, and might now be
1831  * wakable because we're not in front of them anymore. However,
1832  * RemoveFromWaitQueue took care of waking up any such processes.
1833  */
1834  }
1835 
1836  /*
1837  * And release locks. We do this in reverse order for two reasons: (1)
1838  * Anyone else who needs more than one of the locks will be trying to lock
1839  * them in increasing order; we don't want to release the other process
1840  * until it can get all the locks it needs. (2) This avoids O(N^2)
1841  * behavior inside LWLockRelease.
1842  */
1843 check_done:
1844  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1846 }
1847 
1848 /*
1849  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1850  *
1851  * NB: Runs inside a signal handler, be careful.
1852  */
1853 void
1855 {
1856  int save_errno = errno;
1857 
1858  got_deadlock_timeout = true;
1859 
1860  /*
1861  * Have to set the latch again, even if handle_sig_alarm already did. Back
1862  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1863  * ever would be a problem, but setting a set latch again is cheap.
1864  *
1865  * Note that, when this function runs inside procsignal_sigusr1_handler(),
1866  * the handler function sets the latch again after the latch is set here.
1867  */
1868  SetLatch(MyLatch);
1869  errno = save_errno;
1870 }
1871 
1872 /*
1873  * ProcWaitForSignal - wait for a signal from another backend.
1874  *
1875  * As this uses the generic process latch the caller has to be robust against
1876  * unrelated wakeups: Always check that the desired state has occurred, and
1877  * wait again if not.
1878  */
1879 void
1880 ProcWaitForSignal(uint32 wait_event_info)
1881 {
1883  wait_event_info);
1886 }
1887 
1888 /*
1889  * ProcSendSignal - set the latch of a backend identified by pgprocno
1890  */
1891 void
1892 ProcSendSignal(int pgprocno)
1893 {
1894  if (pgprocno < 0 || pgprocno >= ProcGlobal->allProcCount)
1895  elog(ERROR, "pgprocno out of range");
1896 
1897  SetLatch(&ProcGlobal->allProcs[pgprocno].procLatch);
1898 }
1899 
1900 /*
1901  * BecomeLockGroupLeader - designate process as lock group leader
1902  *
1903  * Once this function has returned, other processes can join the lock group
1904  * by calling BecomeLockGroupMember.
1905  */
1906 void
1908 {
1909  LWLock *leader_lwlock;
1910 
1911  /* If we already did it, we don't need to do it again. */
1912  if (MyProc->lockGroupLeader == MyProc)
1913  return;
1914 
1915  /* We had better not be a follower. */
1916  Assert(MyProc->lockGroupLeader == NULL);
1917 
1918  /* Create single-member group, containing only ourselves. */
1919  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1920  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1923  LWLockRelease(leader_lwlock);
1924 }
1925 
1926 /*
1927  * BecomeLockGroupMember - designate process as lock group member
1928  *
1929  * This is pretty straightforward except for the possibility that the leader
1930  * whose group we're trying to join might exit before we manage to do so;
1931  * and the PGPROC might get recycled for an unrelated process. To avoid
1932  * that, we require the caller to pass the PID of the intended PGPROC as
1933  * an interlock. Returns true if we successfully join the intended lock
1934  * group, and false if not.
1935  */
1936 bool
1938 {
1939  LWLock *leader_lwlock;
1940  bool ok = false;
1941 
1942  /* Group leader can't become member of group */
1943  Assert(MyProc != leader);
1944 
1945  /* Can't already be a member of a group */
1946  Assert(MyProc->lockGroupLeader == NULL);
1947 
1948  /* PID must be valid. */
1949  Assert(pid != 0);
1950 
1951  /*
1952  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1953  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1954  * because all PGPROCs' pgprocno fields are set during shared memory
1955  * initialization and never change thereafter; so we will acquire the
1956  * correct lock even if the leader PGPROC is in process of being recycled.
1957  */
1958  leader_lwlock = LockHashPartitionLockByProc(leader);
1959  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1960 
1961  /* Is this the leader we're looking for? */
1962  if (leader->pid == pid && leader->lockGroupLeader == leader)
1963  {
1964  /* OK, join the group */
1965  ok = true;
1966  MyProc->lockGroupLeader = leader;
1968  }
1969  LWLockRelease(leader_lwlock);
1970 
1971  return ok;
1972 }
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
int AutovacuumLauncherPid
Definition: autovacuum.c:305
int autovacuum_max_workers
Definition: autovacuum.c:115
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3405
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3411
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:55
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1656
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
#define InvalidBackendId
Definition: backendid.h:23
unsigned int uint32
Definition: c.h:441
#define offsetof(type, field)
Definition: c.h:727
unsigned char uint8
Definition: c.h:439
#define MemSet(start, val, len)
Definition: c.h:1008
uint32 TransactionId
Definition: c.h:587
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:155
#define NON_EXEC_STATIC
Definition: c.h:1380
size_t Size
Definition: c.h:540
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
void ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1162
void InitDeadLockChecking(void)
Definition: deadlock.c:143
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
int errmsg_internal(const char *fmt,...)
Definition: elog.c:991
bool message_level_is_interesting(int elevel)
Definition: elog.c:265
int errcode(int sqlerrcode)
Definition: elog.c:693
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1106
int errmsg(const char *fmt,...)
Definition: elog.c:904
int errdetail_log(const char *fmt,...)
Definition: elog.c:1085
#define LOG
Definition: elog.h:25
#define FATAL
Definition: elog.h:35
#define WARNING
Definition: elog.h:30
#define PANIC
Definition: elog.h:36
#define DEBUG1
Definition: elog.h:24
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
const char * name
Definition: encode.c:561
int MyProcPid
Definition: globals.c:43
bool IsUnderPostmaster
Definition: globals.c:112
int MaxConnections
Definition: globals.c:136
bool IsBackgroundWorker
Definition: globals.c:114
int MaxBackends
Definition: globals.c:139
struct Latch * MyLatch
Definition: globals.c:57
int max_worker_processes
Definition: globals.c:137
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
int j
Definition: isn.c:74
int i
Definition: isn.c:73
void OwnLatch(Latch *latch)
Definition: latch.c:404
void DisownLatch(Latch *latch)
Definition: latch.c:424
void InitSharedLatch(Latch *latch)
Definition: latch.c:371
void SetLatch(Latch *latch)
Definition: latch.c:567
void ResetLatch(Latch *latch)
Definition: latch.c:660
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:452
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
Assert(fmt[strlen(fmt) - 1] !='\n')
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1101
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2914
void GrantAwaitedLock(void)
Definition: lock.c:1786
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1555
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1918
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2180
void AbortStrongLockAcquire(void)
Definition: lock.c:1757
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4100
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1420
#define DEFAULT_LOCKMETHOD
Definition: lock.h:130
#define LockHashPartitionLock(hashcode)
Definition: lock.h:519
#define USER_LOCKMETHOD
Definition: lock.h:131
#define InvalidLocalTransactionId
Definition: lock.h:70
DeadLockState
Definition: lock.h:502
@ DS_HARD_DEADLOCK
Definition: lock.h:506
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:507
@ DS_NO_DEADLOCK
Definition: lock.h:504
@ DS_NOT_YET_CHECKED
Definition: lock.h:503
@ DS_SOFT_DEADLOCK
Definition: lock.h:505
#define LOCKBIT_ON(lockmode)
Definition: lock.h:89
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:534
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:522
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
int LOCKMASK
Definition: lockdefs.h:25
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
void LWLockReleaseAll(void)
Definition: lwlock.c:1902
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:736
void InitLWLockAccess(void)
Definition: lwlock.c:579
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:87
@ LWTRANCHE_LOCK_FASTPATH
Definition: lwlock.h:179
@ LW_SHARED
Definition: lwlock.h:105
@ LW_EXCLUSIVE
Definition: lwlock.h:104
void pfree(void *pointer)
Definition: mcxt.c:1169
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:133
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:131
void SwitchToSharedLatch(void)
Definition: miscinit.c:214
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:234
void * arg
while(p+4<=pend)
static char * buf
Definition: pg_test_fsync.c:70
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:291
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:324
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
uintptr_t Datum
Definition: postgres.h:411
#define DatumGetInt32(X)
Definition: postgres.h:516
#define Int32GetDatum(X)
Definition: postgres.h:523
#define InvalidOid
Definition: postgres_ext.h:36
#define NUM_AUXILIARY_PROCS
Definition: proc.h:377
#define INVALID_PGPROCNO
Definition: proc.h:83
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:57
ProcWaitStatus
Definition: proc.h:86
@ PROC_WAIT_STATUS_OK
Definition: proc.h:87
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:88
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:89
#define PROC_IS_AUTOVACUUM
Definition: proc.h:54
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:456
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:552
@ PROCSIG_RECOVERY_CONFLICT_LOCK
Definition: procsignal.h:42
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:196
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:207
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:1036
int slock_t
Definition: s_lock.h:958
void * ShmemAlloc(Size size)
Definition: shmem.c:161
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
void ReplicationSlotCleanup(void)
Definition: slot.c:525
ReplicationSlot * MyReplicationSlot
Definition: slot.c:96
void ReplicationSlotRelease(void)
Definition: slot.c:469
#define SpinLockInit(lock)
Definition: spin.h:60
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
bool log_lock_waits
Definition: proc.c:65
int IdleSessionTimeout
Definition: proc.c:64
PGPROC * MyProc
Definition: proc.c:68
void ProcSendSignal(int pgprocno)
Definition: proc.c:1892
Size ProcGlobalShmemSize(void)
Definition: proc.c:102
bool IsWaitingForLock(void)
Definition: proc.c:682
int StatementTimeout
Definition: proc.c:61
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:792
void InitAuxiliaryProcess(void)
Definition: proc.c:515
PGPROC * PreparedXactProcs
Definition: proc.c:82
static DeadLockState deadlock_state
Definition: proc.c:87
int IdleInTransactionSessionTimeout
Definition: proc.c:63
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:81
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:645
int DeadlockTimeout
Definition: proc.c:60
PGPROC * ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1672
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1709
PROC_HDR * ProcGlobal
Definition: proc.c:80
static void CheckDeadLock(void)
Definition: proc.c:1768
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:77
int ProcGlobalSemas(void)
Definition: proc.c:124
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:775
void LockErrorCleanup(void)
Definition: proc.c:699
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1937
void BecomeLockGroupLeader(void)
Definition: proc.c:1907
static LOCALLOCK * lockAwaited
Definition: proc.c:85
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:975
static void ProcKill(int code, Datum arg)
Definition: proc.c:803
void InitProcess(void)
Definition: proc.c:301
void CheckDeadLockAlert(void)
Definition: proc.c:1854
void InitProcessPhase2(void)
Definition: proc.c:480
void InitProcGlobal(void)
Definition: proc.c:159
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1052
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:90
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1028
bool HaveNFreeProcs(int n)
Definition: proc.c:659
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:633
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1880
int LockTimeout
Definition: proc.c:62
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:931
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:862
bool log_recovery_conflict_waits
Definition: standby.c:42
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition: standby.c:583
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
TimeoutId id
Definition: timeout.h:69
TimeoutType type
Definition: timeout.h:59
TimeoutId id
Definition: timeout.h:58
LOCKTAG lock
Definition: lock.h:402
LOCKMODE mode
Definition: lock.h:403
uint32 hashcode
Definition: lock.h:424
LOCK * lock
Definition: lock.h:425
PROCLOCK * proclock
Definition: lock.h:426
LOCALLOCKTAG tag
Definition: lock.h:421
Definition: lock.h:168
uint8 locktag_type
Definition: lock.h:173
uint8 locktag_lockmethodid
Definition: lock.h:174
Definition: lock.h:301
LOCKTAG tag
Definition: lock.h:303
PROC_QUEUE waitProcs
Definition: lock.h:309
SHM_QUEUE procLocks
Definition: lock.h:308
LOCKMASK waitMask
Definition: lock.h:307
Definition: lwlock.h:32
const LOCKMASK * conflictTab
Definition: lock.h:116
Definition: proc.h:125
PGPROC ** procgloballist
Definition: proc.h:128
TransactionId xmin
Definition: proc.h:141
bool procArrayGroupMember
Definition: proc.h:219
LocalTransactionId lxid
Definition: proc.h:146
PROCLOCK * waitProcLock
Definition: proc.h:183
SHM_QUEUE links
Definition: proc.h:127
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:239
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:221
uint8 lwWaitMode
Definition: proc.h:174
dlist_head lockGroupMembers
Definition: proc.h:255
int clogGroupMemberPage
Definition: proc.h:237
uint32 wait_event_info
Definition: proc.h:229
uint8 statusFlags
Definition: proc.h:192
bool recoveryConflictPending
Definition: proc.h:170
TransactionId clogGroupMemberXid
Definition: proc.h:234
Oid databaseId
Definition: proc.h:157
bool clogGroupMember
Definition: proc.h:232
pg_atomic_uint64 waitStart
Definition: proc.h:187
bool delayChkpt
Definition: proc.h:190
bool fpVXIDLock
Definition: proc.h:246
BackendId backendId
Definition: proc.h:156
int pid
Definition: proc.h:149
XLogRecPtr waitLSN
Definition: proc.h:202
bool isBackgroundWorker
Definition: proc.h:163
int syncRepState
Definition: proc.h:203
pg_atomic_uint32 clogGroupNext
Definition: proc.h:233
dlist_node lockGroupLink
Definition: proc.h:256
SHM_QUEUE syncRepLinks
Definition: proc.h:204
XidStatus clogGroupMemberXidStatus
Definition: proc.h:235
int pgxactoff
Definition: proc.h:151
LOCK * waitLock
Definition: proc.h:182
TransactionId xid
Definition: proc.h:136
LOCKMODE waitLockMode
Definition: proc.h:184
int pgprocno
Definition: proc.h:153
bool lwWaiting
Definition: proc.h:173
PGPROC * lockGroupLeader
Definition: proc.h:254
LocalTransactionId fpLocalTransactionId
Definition: proc.h:247
TransactionId procArrayGroupMemberXid
Definition: proc.h:227
LOCKMASK heldLocks
Definition: proc.h:185
PGSemaphore sem
Definition: proc.h:130
Oid roleId
Definition: proc.h:158
ProcWaitStatus waitStatus
Definition: proc.h:131
Oid tempNamespaceId
Definition: proc.h:160
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:211
Latch procLatch
Definition: proc.h:133
PGPROC * myProc
Definition: lock.h:358
Definition: lock.h:362
LOCKMASK holdMask
Definition: lock.h:368
PGPROC * groupLeader
Definition: lock.h:367
SHM_QUEUE lockLink
Definition: lock.h:370
PROCLOCKTAG tag
Definition: lock.h:364
Definition: proc.h:319
uint8 * statusFlags
Definition: proc.h:336
XidCacheStatus * subxidStates
Definition: proc.h:330
PGPROC * walsenderFreeProcs
Definition: proc.h:347
Latch * walwriterLatch
Definition: proc.h:353
PGPROC * autovacFreeProcs
Definition: proc.h:343
PGPROC * bgworkerFreeProcs
Definition: proc.h:345
PGPROC * freeProcs
Definition: proc.h:341
int startupBufferPinWaitBufId
Definition: proc.h:359
PGPROC * allProcs
Definition: proc.h:321
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:351
int spins_per_delay
Definition: proc.h:357
TransactionId * xids
Definition: proc.h:324
Latch * checkpointerLatch
Definition: proc.h:355
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:349
uint32 allProcCount
Definition: proc.h:339
SHM_QUEUE links
Definition: lock.h:32
int size
Definition: lock.h:33
struct SHM_QUEUE * next
Definition: shmem.h:31
struct SHM_QUEUE * prev
Definition: shmem.h:30
Definition: type.h:90
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:382
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:551
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:804
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:676
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:621
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:709
@ LOCK_TIMEOUT
Definition: timeout.h:28
@ DEADLOCK_TIMEOUT
Definition: timeout.h:27
@ TMPARAM_AFTER
Definition: timeout.h:51
#define InvalidTransactionId
Definition: transam.h:31
int max_prepared_xacts
Definition: twophase.c:117
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition: wait_event.c:50
void pgstat_reset_wait_event_storage(void)
Definition: wait_event.c:62
#define PG_WAIT_LOCK
Definition: wait_event.h:19
bool am_walsender
Definition: walsender.c:115
int max_wal_senders
Definition: walsender.c:121
#define kill(pid, sig)
Definition: win32_port.h:464
#define SIGUSR2
Definition: win32_port.h:180
bool RecoveryInProgress(void)
Definition: xlog.c:8404
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
bool InRecovery
Definition: xlogutils.c:52
#define InHotStandby
Definition: xlogutils.h:57