PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xact.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
46 #include "replication/walsender.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "storage/standby.h"
56 #include "utils/timeout.h"
57 #include "utils/timestamp.h"
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
64 bool log_lock_waits = false;
65 
66 /* Pointer to this process's PGPROC struct, if any */
67 PGPROC *MyProc = NULL;
68 
69 /*
70  * This spinlock protects the freelist of recycled PGPROC structures.
71  * We cannot use an LWLock because the LWLock manager depends on already
72  * having a PGPROC and a wait semaphore! But these structures are touched
73  * relatively infrequently (only at backend startup or shutdown) and not for
74  * very long, so a spinlock is okay.
75  */
77 
78 /* Pointers to shared-memory structures */
82 
83 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
84 static LOCALLOCK *lockAwaited = NULL;
85 
87 
88 /* Is a deadlock check pending? */
89 static volatile sig_atomic_t got_deadlock_timeout;
90 
91 static void RemoveProcFromArray(int code, Datum arg);
92 static void ProcKill(int code, Datum arg);
93 static void AuxiliaryProcKill(int code, Datum arg);
94 static void CheckDeadLock(void);
95 
96 
97 /*
98  * Report shared-memory space needed by InitProcGlobal.
99  */
100 Size
102 {
103  Size size = 0;
104  Size TotalProcs =
106 
107  /* ProcGlobal */
108  size = add_size(size, sizeof(PROC_HDR));
109  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
110  size = add_size(size, sizeof(slock_t));
111 
112  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
113  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
114  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
115 
116  return size;
117 }
118 
119 /*
120  * Report number of semaphores needed by InitProcGlobal.
121  */
122 int
124 {
125  /*
126  * We need a sema per backend (including autovacuum), plus one for each
127  * auxiliary process.
128  */
130 }
131 
132 /*
133  * InitProcGlobal -
134  * Initialize the global process table during postmaster or standalone
135  * backend startup.
136  *
137  * We also create all the per-process semaphores we will need to support
138  * the requested number of backends. We used to allocate semaphores
139  * only when backends were actually started up, but that is bad because
140  * it lets Postgres fail under load --- a lot of Unix systems are
141  * (mis)configured with small limits on the number of semaphores, and
142  * running out when trying to start another backend is a common failure.
143  * So, now we grab enough semaphores to support the desired max number
144  * of backends immediately at initialization --- if the sysadmin has set
145  * MaxConnections, max_worker_processes, max_wal_senders, or
146  * autovacuum_max_workers higher than his kernel will support, he'll
147  * find out sooner rather than later.
148  *
149  * Another reason for creating semaphores here is that the semaphore
150  * implementation typically requires us to create semaphores in the
151  * postmaster, not in backends.
152  *
153  * Note: this is NOT called by individual backends under a postmaster,
154  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
155  * pointers must be propagated specially for EXEC_BACKEND operation.
156  */
157 void
159 {
160  PGPROC *procs;
161  int i,
162  j;
163  bool found;
165 
166  /* Create the ProcGlobal shared structure */
167  ProcGlobal = (PROC_HDR *)
168  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
169  Assert(!found);
170 
171  /*
172  * Initialize the data structures.
173  */
175  ProcGlobal->freeProcs = NULL;
176  ProcGlobal->autovacFreeProcs = NULL;
177  ProcGlobal->bgworkerFreeProcs = NULL;
178  ProcGlobal->walsenderFreeProcs = NULL;
179  ProcGlobal->startupProc = NULL;
180  ProcGlobal->startupProcPid = 0;
181  ProcGlobal->startupBufferPinWaitBufId = -1;
182  ProcGlobal->walwriterLatch = NULL;
183  ProcGlobal->checkpointerLatch = NULL;
186 
187  /*
188  * Create and initialize all the PGPROC structures we'll need. There are
189  * five separate consumers: (1) normal backends, (2) autovacuum workers
190  * and the autovacuum launcher, (3) background workers, (4) auxiliary
191  * processes, and (5) prepared transactions. Each PGPROC structure is
192  * dedicated to exactly one of these purposes, and they do not move
193  * between groups.
194  */
195  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
196  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
197  ProcGlobal->allProcs = procs;
198  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
200 
201  /*
202  * Allocate arrays mirroring PGPROC fields in a dense manner. See
203  * PROC_HDR.
204  *
205  * XXX: It might make sense to increase padding for these arrays, given
206  * how hotly they are accessed.
207  */
208  ProcGlobal->xids =
209  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
210  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
211  ProcGlobal->subxidStates = (XidCacheStatus *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->subxidStates));
212  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
213  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
214  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
215 
216  for (i = 0; i < TotalProcs; i++)
217  {
218  /* Common initialization for all PGPROCs, regardless of type. */
219 
220  /*
221  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
222  * dummy PGPROCs don't need these though - they're never associated
223  * with a real process
224  */
225  if (i < MaxBackends + NUM_AUXILIARY_PROCS)
226  {
227  procs[i].sem = PGSemaphoreCreate();
228  InitSharedLatch(&(procs[i].procLatch));
229  LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
230  }
231  procs[i].pgprocno = i;
232 
233  /*
234  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
235  * must be queued up on the appropriate free list. Because there can
236  * only ever be a small, fixed number of auxiliary processes, no free
237  * list is used in that case; InitAuxiliaryProcess() instead uses a
238  * linear search. PGPROCs for prepared transactions are added to a
239  * free list by TwoPhaseShmemInit().
240  */
241  if (i < MaxConnections)
242  {
243  /* PGPROC for normal backend, add to freeProcs list */
244  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
245  ProcGlobal->freeProcs = &procs[i];
246  procs[i].procgloballist = &ProcGlobal->freeProcs;
247  }
248  else if (i < MaxConnections + autovacuum_max_workers + 1)
249  {
250  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
251  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
252  ProcGlobal->autovacFreeProcs = &procs[i];
253  procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
254  }
256  {
257  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
258  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
259  ProcGlobal->bgworkerFreeProcs = &procs[i];
260  procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
261  }
262  else if (i < MaxBackends)
263  {
264  /* PGPROC for walsender, add to walsenderFreeProcs list */
265  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
266  ProcGlobal->walsenderFreeProcs = &procs[i];
267  procs[i].procgloballist = &ProcGlobal->walsenderFreeProcs;
268  }
269 
270  /* Initialize myProcLocks[] shared memory queues. */
271  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
272  SHMQueueInit(&(procs[i].myProcLocks[j]));
273 
274  /* Initialize lockGroupMembers list. */
275  dlist_init(&procs[i].lockGroupMembers);
276 
277  /*
278  * Initialize the atomic variables, otherwise, it won't be safe to
279  * access them for backends that aren't currently in use.
280  */
281  pg_atomic_init_u32(&(procs[i].procArrayGroupNext), INVALID_PGPROCNO);
282  pg_atomic_init_u32(&(procs[i].clogGroupNext), INVALID_PGPROCNO);
283  }
284 
285  /*
286  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
287  * processes and prepared transactions.
288  */
289  AuxiliaryProcs = &procs[MaxBackends];
290  PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
291 
292  /* Create ProcStructLock spinlock, too */
293  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
295 }
296 
297 /*
298  * InitProcess -- initialize a per-process data structure for this backend
299  */
300 void
302 {
303  PGPROC *volatile *procgloballist;
304 
305  /*
306  * ProcGlobal should be set up already (if we are a backend, we inherit
307  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
308  */
309  if (ProcGlobal == NULL)
310  elog(PANIC, "proc header uninitialized");
311 
312  if (MyProc != NULL)
313  elog(ERROR, "you already exist");
314 
315  /* Decide which list should supply our PGPROC. */
317  procgloballist = &ProcGlobal->autovacFreeProcs;
318  else if (IsBackgroundWorker)
319  procgloballist = &ProcGlobal->bgworkerFreeProcs;
320  else if (am_walsender)
321  procgloballist = &ProcGlobal->walsenderFreeProcs;
322  else
323  procgloballist = &ProcGlobal->freeProcs;
324 
325  /*
326  * Try to get a proc struct from the appropriate free list. If this
327  * fails, we must be out of PGPROC structures (not to mention semaphores).
328  *
329  * While we are holding the ProcStructLock, also copy the current shared
330  * estimate of spins_per_delay to local storage.
331  */
333 
335 
336  MyProc = *procgloballist;
337 
338  if (MyProc != NULL)
339  {
340  *procgloballist = (PGPROC *) MyProc->links.next;
342  }
343  else
344  {
345  /*
346  * If we reach here, all the PGPROCs are in use. This is one of the
347  * possible places to detect "too many backends", so give the standard
348  * error message. XXX do we need to give a different failure message
349  * in the autovacuum case?
350  */
352  if (am_walsender)
353  ereport(FATAL,
354  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
355  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
356  max_wal_senders)));
357  ereport(FATAL,
358  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
359  errmsg("sorry, too many clients already")));
360  }
361 
362  /*
363  * Cross-check that the PGPROC is of the type we expect; if this were not
364  * the case, it would get returned to the wrong list.
365  */
366  Assert(MyProc->procgloballist == procgloballist);
367 
368  /*
369  * Now that we have a PGPROC, mark ourselves as an active postmaster
370  * child; this is so that the postmaster can detect it if we exit without
371  * cleaning up. (XXX autovac launcher currently doesn't participate in
372  * this; it probably should.)
373  */
376 
377  /*
378  * Initialize all fields of MyProc, except for those previously
379  * initialized by InitProcGlobal.
380  */
381  SHMQueueElemInit(&(MyProc->links));
384  MyProc->fpVXIDLock = false;
386  MyProc->xid = InvalidTransactionId;
387  MyProc->xmin = InvalidTransactionId;
388  MyProc->pid = MyProcPid;
389  /* backendId, databaseId and roleId will be filled in later */
390  MyProc->backendId = InvalidBackendId;
391  MyProc->databaseId = InvalidOid;
392  MyProc->roleId = InvalidOid;
393  MyProc->tempNamespaceId = InvalidOid;
395  MyProc->delayChkpt = false;
396  MyProc->statusFlags = 0;
397  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
399  MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
400  MyProc->lwWaiting = false;
401  MyProc->lwWaitMode = 0;
402  MyProc->waitLock = NULL;
403  MyProc->waitProcLock = NULL;
404 #ifdef USE_ASSERT_CHECKING
405  {
406  int i;
407 
408  /* Last process should have released all locks. */
409  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
410  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
411  }
412 #endif
413  MyProc->recoveryConflictPending = false;
414 
415  /* Initialize fields for sync rep */
416  MyProc->waitLSN = 0;
418  SHMQueueElemInit(&(MyProc->syncRepLinks));
419 
420  /* Initialize fields for group XID clearing. */
421  MyProc->procArrayGroupMember = false;
424 
425  /* Check that group locking fields are in a proper initial state. */
426  Assert(MyProc->lockGroupLeader == NULL);
428 
429  /* Initialize wait event information. */
430  MyProc->wait_event_info = 0;
431 
432  /* Initialize fields for group transaction status update. */
433  MyProc->clogGroupMember = false;
436  MyProc->clogGroupMemberPage = -1;
439 
440  /*
441  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
442  * on it. That allows us to repoint the process latch, which so far
443  * points to process local one, to the shared one.
444  */
445  OwnLatch(&MyProc->procLatch);
447 
448  /*
449  * We might be reusing a semaphore that belonged to a failed process. So
450  * be careful and reinitialize its value here. (This is not strictly
451  * necessary anymore, but seems like a good idea for cleanliness.)
452  */
453  PGSemaphoreReset(MyProc->sem);
454 
455  /*
456  * Arrange to clean up at backend exit.
457  */
459 
460  /*
461  * Now that we have a PGPROC, we could try to acquire locks, so initialize
462  * local state needed for LWLocks, and the deadlock checker.
463  */
466 }
467 
468 /*
469  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
470  *
471  * This is separate from InitProcess because we can't acquire LWLocks until
472  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
473  * work until after we've done CreateSharedMemoryAndSemaphores.
474  */
475 void
477 {
478  Assert(MyProc != NULL);
479 
480  /*
481  * Add our PGPROC to the PGPROC array in shared memory.
482  */
483  ProcArrayAdd(MyProc);
484 
485  /*
486  * Arrange to clean that up at backend exit.
487  */
489 }
490 
491 /*
492  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
493  *
494  * This is called by bgwriter and similar processes so that they will have a
495  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
496  * and sema that are assigned are one of the extra ones created during
497  * InitProcGlobal.
498  *
499  * Auxiliary processes are presently not expected to wait for real (lockmgr)
500  * locks, so we need not set up the deadlock checker. They are never added
501  * to the ProcArray or the sinval messaging mechanism, either. They also
502  * don't get a VXID assigned, since this is only useful when we actually
503  * hold lockmgr locks.
504  *
505  * Startup process however uses locks but never waits for them in the
506  * normal backend sense. Startup process also takes part in sinval messaging
507  * as a sendOnly process, so never reads messages from sinval queue. So
508  * Startup process does have a VXID and does show up in pg_locks.
509  */
510 void
512 {
513  PGPROC *auxproc;
514  int proctype;
515 
516  /*
517  * ProcGlobal should be set up already (if we are a backend, we inherit
518  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
519  */
520  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
521  elog(PANIC, "proc header uninitialized");
522 
523  if (MyProc != NULL)
524  elog(ERROR, "you already exist");
525 
526  /*
527  * We use the ProcStructLock to protect assignment and releasing of
528  * AuxiliaryProcs entries.
529  *
530  * While we are holding the ProcStructLock, also copy the current shared
531  * estimate of spins_per_delay to local storage.
532  */
534 
536 
537  /*
538  * Find a free auxproc ... *big* trouble if there isn't one ...
539  */
540  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
541  {
542  auxproc = &AuxiliaryProcs[proctype];
543  if (auxproc->pid == 0)
544  break;
545  }
546  if (proctype >= NUM_AUXILIARY_PROCS)
547  {
549  elog(FATAL, "all AuxiliaryProcs are in use");
550  }
551 
552  /* Mark auxiliary proc as in use by me */
553  /* use volatile pointer to prevent code rearrangement */
554  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
555 
556  MyProc = auxproc;
557 
559 
560  /*
561  * Initialize all fields of MyProc, except for those previously
562  * initialized by InitProcGlobal.
563  */
564  SHMQueueElemInit(&(MyProc->links));
567  MyProc->fpVXIDLock = false;
569  MyProc->xid = InvalidTransactionId;
570  MyProc->xmin = InvalidTransactionId;
571  MyProc->backendId = InvalidBackendId;
572  MyProc->databaseId = InvalidOid;
573  MyProc->roleId = InvalidOid;
574  MyProc->tempNamespaceId = InvalidOid;
576  MyProc->delayChkpt = false;
577  MyProc->statusFlags = 0;
578  MyProc->lwWaiting = false;
579  MyProc->lwWaitMode = 0;
580  MyProc->waitLock = NULL;
581  MyProc->waitProcLock = NULL;
582 #ifdef USE_ASSERT_CHECKING
583  {
584  int i;
585 
586  /* Last process should have released all locks. */
587  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
588  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
589  }
590 #endif
591 
592  /*
593  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
594  * on it. That allows us to repoint the process latch, which so far
595  * points to process local one, to the shared one.
596  */
597  OwnLatch(&MyProc->procLatch);
599 
600  /* Check that group locking fields are in a proper initial state. */
601  Assert(MyProc->lockGroupLeader == NULL);
603 
604  /*
605  * We might be reusing a semaphore that belonged to a failed process. So
606  * be careful and reinitialize its value here. (This is not strictly
607  * necessary anymore, but seems like a good idea for cleanliness.)
608  */
609  PGSemaphoreReset(MyProc->sem);
610 
611  /*
612  * Arrange to clean up at process exit.
613  */
615 }
616 
617 /*
618  * Record the PID and PGPROC structures for the Startup process, for use in
619  * ProcSendSignal(). See comments there for further explanation.
620  */
621 void
623 {
625 
626  ProcGlobal->startupProc = MyProc;
627  ProcGlobal->startupProcPid = MyProcPid;
628 
630 }
631 
632 /*
633  * Used from bufmgr to share the value of the buffer that Startup waits on,
634  * or to reset the value to "not waiting" (-1). This allows processing
635  * of recovery conflicts for buffer pins. Set is made before backends look
636  * at this value, so locking not required, especially since the set is
637  * an atomic integer set operation.
638  */
639 void
641 {
642  /* use volatile pointer to prevent code rearrangement */
643  volatile PROC_HDR *procglobal = ProcGlobal;
644 
645  procglobal->startupBufferPinWaitBufId = bufid;
646 }
647 
648 /*
649  * Used by backends when they receive a request to check for buffer pin waits.
650  */
651 int
653 {
654  /* use volatile pointer to prevent code rearrangement */
655  volatile PROC_HDR *procglobal = ProcGlobal;
656 
657  return procglobal->startupBufferPinWaitBufId;
658 }
659 
660 /*
661  * Check whether there are at least N free PGPROC objects.
662  *
663  * Note: this is designed on the assumption that N will generally be small.
664  */
665 bool
667 {
668  PGPROC *proc;
669 
671 
672  proc = ProcGlobal->freeProcs;
673 
674  while (n > 0 && proc != NULL)
675  {
676  proc = (PGPROC *) proc->links.next;
677  n--;
678  }
679 
681 
682  return (n <= 0);
683 }
684 
685 /*
686  * Check if the current process is awaiting a lock.
687  */
688 bool
690 {
691  if (lockAwaited == NULL)
692  return false;
693 
694  return true;
695 }
696 
697 /*
698  * Cancel any pending wait for lock, when aborting a transaction, and revert
699  * any strong lock count acquisition for a lock being acquired.
700  *
701  * (Normally, this would only happen if we accept a cancel/die
702  * interrupt while waiting; but an ereport(ERROR) before or during the lock
703  * wait is within the realm of possibility, too.)
704  */
705 void
707 {
708  LWLock *partitionLock;
709  DisableTimeoutParams timeouts[2];
710 
711  HOLD_INTERRUPTS();
712 
714 
715  /* Nothing to do if we weren't waiting for a lock */
716  if (lockAwaited == NULL)
717  {
719  return;
720  }
721 
722  /*
723  * Turn off the deadlock and lock timeout timers, if they are still
724  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
725  * indicator flag, since this function is executed before
726  * ProcessInterrupts when responding to SIGINT; else we'd lose the
727  * knowledge that the SIGINT came from a lock timeout and not an external
728  * source.
729  */
730  timeouts[0].id = DEADLOCK_TIMEOUT;
731  timeouts[0].keep_indicator = false;
732  timeouts[1].id = LOCK_TIMEOUT;
733  timeouts[1].keep_indicator = true;
734  disable_timeouts(timeouts, 2);
735 
736  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
737  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
738  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
739 
740  if (MyProc->links.next != NULL)
741  {
742  /* We could not have been granted the lock yet */
743  RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
744  }
745  else
746  {
747  /*
748  * Somebody kicked us off the lock queue already. Perhaps they
749  * granted us the lock, or perhaps they detected a deadlock. If they
750  * did grant us the lock, we'd better remember it in our local lock
751  * table.
752  */
753  if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
755  }
756 
757  lockAwaited = NULL;
758 
759  LWLockRelease(partitionLock);
760 
762 }
763 
764 
765 /*
766  * ProcReleaseLocks() -- release locks associated with current transaction
767  * at main transaction commit or abort
768  *
769  * At main transaction commit, we release standard locks except session locks.
770  * At main transaction abort, we release all locks including session locks.
771  *
772  * Advisory locks are released only if they are transaction-level;
773  * session-level holds remain, whether this is a commit or not.
774  *
775  * At subtransaction commit, we don't release any locks (so this func is not
776  * needed at all); we will defer the releasing to the parent transaction.
777  * At subtransaction abort, we release all locks held by the subtransaction;
778  * this is implemented by retail releasing of the locks under control of
779  * the ResourceOwner mechanism.
780  */
781 void
782 ProcReleaseLocks(bool isCommit)
783 {
784  if (!MyProc)
785  return;
786  /* If waiting, get off wait queue (should only be needed after error) */
788  /* Release standard locks, including session-level if aborting */
790  /* Release transaction-level advisory locks */
792 }
793 
794 
795 /*
796  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
797  */
798 static void
800 {
801  Assert(MyProc != NULL);
803 }
804 
805 /*
806  * ProcKill() -- Destroy the per-proc data structure for
807  * this process. Release any of its held LW locks.
808  */
809 static void
810 ProcKill(int code, Datum arg)
811 {
812  PGPROC *proc;
813  PGPROC *volatile *procgloballist;
814 
815  Assert(MyProc != NULL);
816 
817  /* Make sure we're out of the sync rep lists */
819 
820 #ifdef USE_ASSERT_CHECKING
821  {
822  int i;
823 
824  /* Last process should have released all locks. */
825  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
826  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
827  }
828 #endif
829 
830  /*
831  * Release any LW locks I am holding. There really shouldn't be any, but
832  * it's cheap to check again before we cut the knees off the LWLock
833  * facility by releasing our PGPROC ...
834  */
836 
837  /* Cancel any pending condition variable sleep, too */
839 
840  /* Make sure active replication slots are released */
841  if (MyReplicationSlot != NULL)
843 
844  /* Also cleanup all the temporary slots. */
846 
847  /*
848  * Detach from any lock group of which we are a member. If the leader
849  * exist before all other group members, its PGPROC will remain allocated
850  * until the last group process exits; that process must return the
851  * leader's PGPROC to the appropriate list.
852  */
853  if (MyProc->lockGroupLeader != NULL)
854  {
855  PGPROC *leader = MyProc->lockGroupLeader;
856  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
857 
858  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
860  dlist_delete(&MyProc->lockGroupLink);
861  if (dlist_is_empty(&leader->lockGroupMembers))
862  {
863  leader->lockGroupLeader = NULL;
864  if (leader != MyProc)
865  {
866  procgloballist = leader->procgloballist;
867 
868  /* Leader exited first; return its PGPROC. */
870  leader->links.next = (SHM_QUEUE *) *procgloballist;
871  *procgloballist = leader;
873  }
874  }
875  else if (leader != MyProc)
876  MyProc->lockGroupLeader = NULL;
877  LWLockRelease(leader_lwlock);
878  }
879 
880  /*
881  * Reset MyLatch to the process local one. This is so that signal
882  * handlers et al can continue using the latch after the shared latch
883  * isn't ours anymore. After that clear MyProc and disown the shared
884  * latch.
885  */
887  proc = MyProc;
888  MyProc = NULL;
889  DisownLatch(&proc->procLatch);
890 
891  procgloballist = proc->procgloballist;
893 
894  /*
895  * If we're still a member of a locking group, that means we're a leader
896  * which has somehow exited before its children. The last remaining child
897  * will release our PGPROC. Otherwise, release it now.
898  */
899  if (proc->lockGroupLeader == NULL)
900  {
901  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
903 
904  /* Return PGPROC structure (and semaphore) to appropriate freelist */
905  proc->links.next = (SHM_QUEUE *) *procgloballist;
906  *procgloballist = proc;
907  }
908 
909  /* Update shared estimate of spins_per_delay */
910  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
911 
913 
914  /*
915  * This process is no longer present in shared memory in any meaningful
916  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
917  * autovac launcher should be included here someday)
918  */
921 
922  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
923  if (AutovacuumLauncherPid != 0)
925 }
926 
927 /*
928  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
929  * processes (bgwriter, etc). The PGPROC and sema are not released, only
930  * marked as not-in-use.
931  */
932 static void
934 {
935  int proctype = DatumGetInt32(arg);
937  PGPROC *proc;
938 
939  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
940 
941  auxproc = &AuxiliaryProcs[proctype];
942 
943  Assert(MyProc == auxproc);
944 
945  /* Release any LW locks I am holding (see notes above) */
947 
948  /* Cancel any pending condition variable sleep, too */
950 
951  /*
952  * Reset MyLatch to the process local one. This is so that signal
953  * handlers et al can continue using the latch after the shared latch
954  * isn't ours anymore. After that clear MyProc and disown the shared
955  * latch.
956  */
958  proc = MyProc;
959  MyProc = NULL;
960  DisownLatch(&proc->procLatch);
961 
963 
964  /* Mark auxiliary proc no longer in use */
965  proc->pid = 0;
966 
967  /* Update shared estimate of spins_per_delay */
968  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
969 
971 }
972 
973 /*
974  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
975  * given its PID
976  *
977  * Returns NULL if not found.
978  */
979 PGPROC *
981 {
982  PGPROC *result = NULL;
983  int index;
984 
985  if (pid == 0) /* never match dummy PGPROCs */
986  return NULL;
987 
988  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
989  {
990  PGPROC *proc = &AuxiliaryProcs[index];
991 
992  if (proc->pid == pid)
993  {
994  result = proc;
995  break;
996  }
997  }
998  return result;
999 }
1000 
1001 /*
1002  * ProcQueue package: routines for putting processes to sleep
1003  * and waking them up
1004  */
1005 
1006 /*
1007  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
1008  *
1009  * Returns: a pointer to the queue
1010  * Side Effects: Initializes the queue if it wasn't there before
1011  */
1012 #ifdef NOT_USED
1013 PROC_QUEUE *
1014 ProcQueueAlloc(const char *name)
1015 {
1016  PROC_QUEUE *queue;
1017  bool found;
1018 
1019  queue = (PROC_QUEUE *)
1020  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
1021 
1022  if (!found)
1023  ProcQueueInit(queue);
1024 
1025  return queue;
1026 }
1027 #endif
1028 
1029 /*
1030  * ProcQueueInit -- initialize a shared memory process queue
1031  */
1032 void
1034 {
1035  SHMQueueInit(&(queue->links));
1036  queue->size = 0;
1037 }
1038 
1039 
1040 /*
1041  * ProcSleep -- put a process to sleep on the specified lock
1042  *
1043  * Caller must have set MyProc->heldLocks to reflect locks already held
1044  * on the lockable object by this process (under all XIDs).
1045  *
1046  * The lock table's partition lock must be held at entry, and will be held
1047  * at exit.
1048  *
1049  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
1050  *
1051  * ASSUME: that no one will fiddle with the queue until after
1052  * we release the partition lock.
1053  *
1054  * NOTES: The process queue is now a priority queue for locking.
1055  */
1057 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1058 {
1059  LOCKMODE lockmode = locallock->tag.mode;
1060  LOCK *lock = locallock->lock;
1061  PROCLOCK *proclock = locallock->proclock;
1062  uint32 hashcode = locallock->hashcode;
1063  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1064  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1065  LOCKMASK myHeldLocks = MyProc->heldLocks;
1066  bool early_deadlock = false;
1067  bool allow_autovacuum_cancel = true;
1068  ProcWaitStatus myWaitStatus;
1069  PGPROC *proc;
1070  PGPROC *leader = MyProc->lockGroupLeader;
1071  int i;
1072 
1073  /*
1074  * If group locking is in use, locks held by members of my locking group
1075  * need to be included in myHeldLocks. This is not required for relation
1076  * extension or page locks which conflict among group members. However,
1077  * including them in myHeldLocks will give group members the priority to
1078  * get those locks as compared to other backends which are also trying to
1079  * acquire those locks. OTOH, we can avoid giving priority to group
1080  * members for that kind of locks, but there doesn't appear to be a clear
1081  * advantage of the same.
1082  */
1083  if (leader != NULL)
1084  {
1085  SHM_QUEUE *procLocks = &(lock->procLocks);
1086  PROCLOCK *otherproclock;
1087 
1088  otherproclock = (PROCLOCK *)
1089  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1090  while (otherproclock != NULL)
1091  {
1092  if (otherproclock->groupLeader == leader)
1093  myHeldLocks |= otherproclock->holdMask;
1094  otherproclock = (PROCLOCK *)
1095  SHMQueueNext(procLocks, &otherproclock->lockLink,
1096  offsetof(PROCLOCK, lockLink));
1097  }
1098  }
1099 
1100  /*
1101  * Determine where to add myself in the wait queue.
1102  *
1103  * Normally I should go at the end of the queue. However, if I already
1104  * hold locks that conflict with the request of any previous waiter, put
1105  * myself in the queue just in front of the first such waiter. This is not
1106  * a necessary step, since deadlock detection would move me to before that
1107  * waiter anyway; but it's relatively cheap to detect such a conflict
1108  * immediately, and avoid delaying till deadlock timeout.
1109  *
1110  * Special case: if I find I should go in front of some waiter, check to
1111  * see if I conflict with already-held locks or the requests before that
1112  * waiter. If not, then just grant myself the requested lock immediately.
1113  * This is the same as the test for immediate grant in LockAcquire, except
1114  * we are only considering the part of the wait queue before my insertion
1115  * point.
1116  */
1117  if (myHeldLocks != 0)
1118  {
1119  LOCKMASK aheadRequests = 0;
1120 
1121  proc = (PGPROC *) waitQueue->links.next;
1122  for (i = 0; i < waitQueue->size; i++)
1123  {
1124  /*
1125  * If we're part of the same locking group as this waiter, its
1126  * locks neither conflict with ours nor contribute to
1127  * aheadRequests.
1128  */
1129  if (leader != NULL && leader == proc->lockGroupLeader)
1130  {
1131  proc = (PGPROC *) proc->links.next;
1132  continue;
1133  }
1134  /* Must he wait for me? */
1135  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1136  {
1137  /* Must I wait for him ? */
1138  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1139  {
1140  /*
1141  * Yes, so we have a deadlock. Easiest way to clean up
1142  * correctly is to call RemoveFromWaitQueue(), but we
1143  * can't do that until we are *on* the wait queue. So, set
1144  * a flag to check below, and break out of loop. Also,
1145  * record deadlock info for later message.
1146  */
1147  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1148  early_deadlock = true;
1149  break;
1150  }
1151  /* I must go before this waiter. Check special case. */
1152  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1153  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1154  proclock))
1155  {
1156  /* Skip the wait and just grant myself the lock. */
1157  GrantLock(lock, proclock, lockmode);
1158  GrantAwaitedLock();
1159  return PROC_WAIT_STATUS_OK;
1160  }
1161  /* Break out of loop to put myself before him */
1162  break;
1163  }
1164  /* Nope, so advance to next waiter */
1165  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1166  proc = (PGPROC *) proc->links.next;
1167  }
1168 
1169  /*
1170  * If we fall out of loop normally, proc points to waitQueue head, so
1171  * we will insert at tail of queue as desired.
1172  */
1173  }
1174  else
1175  {
1176  /* I hold no locks, so I can't push in front of anyone. */
1177  proc = (PGPROC *) &(waitQueue->links);
1178  }
1179 
1180  /*
1181  * Insert self into queue, ahead of the given proc (or at tail of queue).
1182  */
1183  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1184  waitQueue->size++;
1185 
1186  lock->waitMask |= LOCKBIT_ON(lockmode);
1187 
1188  /* Set up wait information in PGPROC object, too */
1189  MyProc->waitLock = lock;
1190  MyProc->waitProcLock = proclock;
1191  MyProc->waitLockMode = lockmode;
1192 
1194 
1195  /*
1196  * If we detected deadlock, give up without waiting. This must agree with
1197  * CheckDeadLock's recovery code.
1198  */
1199  if (early_deadlock)
1200  {
1201  RemoveFromWaitQueue(MyProc, hashcode);
1202  return PROC_WAIT_STATUS_ERROR;
1203  }
1204 
1205  /* mark that we are waiting for a lock */
1206  lockAwaited = locallock;
1207 
1208  /*
1209  * Release the lock table's partition lock.
1210  *
1211  * NOTE: this may also cause us to exit critical-section state, possibly
1212  * allowing a cancel/die interrupt to be accepted. This is OK because we
1213  * have recorded the fact that we are waiting for a lock, and so
1214  * LockErrorCleanup will clean up if cancel/die happens.
1215  */
1216  LWLockRelease(partitionLock);
1217 
1218  /*
1219  * Also, now that we will successfully clean up after an ereport, it's
1220  * safe to check to see if there's a buffer pin deadlock against the
1221  * Startup process. Of course, that's only necessary if we're doing Hot
1222  * Standby and are not the Startup process ourselves.
1223  */
1224  if (RecoveryInProgress() && !InRecovery)
1226 
1227  /* Reset deadlock_state before enabling the timeout handler */
1229  got_deadlock_timeout = false;
1230 
1231  /*
1232  * Set timer so we can wake up after awhile and check for a deadlock. If a
1233  * deadlock is detected, the handler sets MyProc->waitStatus =
1234  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure rather
1235  * than success.
1236  *
1237  * By delaying the check until we've waited for a bit, we can avoid
1238  * running the rather expensive deadlock-check code in most cases.
1239  *
1240  * If LockTimeout is set, also enable the timeout for that. We can save a
1241  * few cycles by enabling both timeout sources in one call.
1242  *
1243  * If InHotStandby we set lock waits slightly later for clarity with other
1244  * code.
1245  */
1246  if (!InHotStandby)
1247  {
1248  if (LockTimeout > 0)
1249  {
1250  EnableTimeoutParams timeouts[2];
1251 
1252  timeouts[0].id = DEADLOCK_TIMEOUT;
1253  timeouts[0].type = TMPARAM_AFTER;
1254  timeouts[0].delay_ms = DeadlockTimeout;
1255  timeouts[1].id = LOCK_TIMEOUT;
1256  timeouts[1].type = TMPARAM_AFTER;
1257  timeouts[1].delay_ms = LockTimeout;
1258  enable_timeouts(timeouts, 2);
1259  }
1260  else
1262  }
1263 
1264  /*
1265  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1266  * will not wait. But a set latch does not necessarily mean that the lock
1267  * is free now, as there are many other sources for latch sets than
1268  * somebody releasing the lock.
1269  *
1270  * We process interrupts whenever the latch has been set, so cancel/die
1271  * interrupts are processed quickly. This means we must not mind losing
1272  * control to a cancel/die interrupt here. We don't, because we have no
1273  * shared-state-change work to do after being granted the lock (the
1274  * grantor did it all). We do have to worry about canceling the deadlock
1275  * timeout and updating the locallock table, but if we lose control to an
1276  * error, LockErrorCleanup will fix that up.
1277  */
1278  do
1279  {
1280  if (InHotStandby)
1281  {
1282  /* Set a timer and wait for that or for the Lock to be granted */
1284  }
1285  else
1286  {
1288  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1290  /* check for deadlocks first, as that's probably log-worthy */
1292  {
1293  CheckDeadLock();
1294  got_deadlock_timeout = false;
1295  }
1297  }
1298 
1299  /*
1300  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something else
1301  * asynchronously. Read it just once per loop to prevent surprising
1302  * behavior (such as missing log messages).
1303  */
1304  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1305 
1306  /*
1307  * If we are not deadlocked, but are waiting on an autovacuum-induced
1308  * task, send a signal to interrupt it.
1309  */
1310  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1311  {
1312  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1313  uint8 statusFlags;
1314  uint8 lockmethod_copy;
1315  LOCKTAG locktag_copy;
1316 
1317  /*
1318  * Grab info we need, then release lock immediately. Note this
1319  * coding means that there is a tiny chance that the process
1320  * terminates its current transaction and starts a different one
1321  * before we have a change to send the signal; the worst possible
1322  * consequence is that a for-wraparound vacuum is cancelled. But
1323  * that could happen in any case unless we were to do kill() with
1324  * the lock held, which is much more undesirable.
1325  */
1326  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1327  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1328  lockmethod_copy = lock->tag.locktag_lockmethodid;
1329  locktag_copy = lock->tag;
1330  LWLockRelease(ProcArrayLock);
1331 
1332  /*
1333  * Only do it if the worker is not working to protect against Xid
1334  * wraparound.
1335  */
1336  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1337  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1338  {
1339  int pid = autovac->pid;
1340 
1341  /* report the case, if configured to do so */
1343  {
1344  StringInfoData locktagbuf;
1345  StringInfoData logbuf; /* errdetail for server log */
1346 
1347  initStringInfo(&locktagbuf);
1348  initStringInfo(&logbuf);
1349  DescribeLockTag(&locktagbuf, &locktag_copy);
1350  appendStringInfo(&logbuf,
1351  _("Process %d waits for %s on %s."),
1352  MyProcPid,
1353  GetLockmodeName(lockmethod_copy, lockmode),
1354  locktagbuf.data);
1355 
1356  ereport(DEBUG1,
1357  (errmsg("sending cancel to blocking autovacuum PID %d",
1358  pid),
1359  errdetail_log("%s", logbuf.data)));
1360 
1361  pfree(locktagbuf.data);
1362  pfree(logbuf.data);
1363  }
1364 
1365  /* send the autovacuum worker Back to Old Kent Road */
1366  if (kill(pid, SIGINT) < 0)
1367  {
1368  /*
1369  * There's a race condition here: once we release the
1370  * ProcArrayLock, it's possible for the autovac worker to
1371  * close up shop and exit before we can do the kill().
1372  * Therefore, we do not whinge about no-such-process.
1373  * Other errors such as EPERM could conceivably happen if
1374  * the kernel recycles the PID fast enough, but such cases
1375  * seem improbable enough that it's probably best to issue
1376  * a warning if we see some other errno.
1377  */
1378  if (errno != ESRCH)
1379  ereport(WARNING,
1380  (errmsg("could not send signal to process %d: %m",
1381  pid)));
1382  }
1383  }
1384 
1385  /* prevent signal from being sent again more than once */
1386  allow_autovacuum_cancel = false;
1387  }
1388 
1389  /*
1390  * If awoken after the deadlock check interrupt has run, and
1391  * log_lock_waits is on, then report about the wait.
1392  */
1394  {
1396  lock_waiters_sbuf,
1397  lock_holders_sbuf;
1398  const char *modename;
1399  long secs;
1400  int usecs;
1401  long msecs;
1402  SHM_QUEUE *procLocks;
1403  PROCLOCK *proclock;
1404  bool first_holder = true,
1405  first_waiter = true;
1406  int lockHoldersNum = 0;
1407 
1408  initStringInfo(&buf);
1409  initStringInfo(&lock_waiters_sbuf);
1410  initStringInfo(&lock_holders_sbuf);
1411 
1412  DescribeLockTag(&buf, &locallock->tag.lock);
1413  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1414  lockmode);
1417  &secs, &usecs);
1418  msecs = secs * 1000 + usecs / 1000;
1419  usecs = usecs % 1000;
1420 
1421  /*
1422  * we loop over the lock's procLocks to gather a list of all
1423  * holders and waiters. Thus we will be able to provide more
1424  * detailed information for lock debugging purposes.
1425  *
1426  * lock->procLocks contains all processes which hold or wait for
1427  * this lock.
1428  */
1429 
1430  LWLockAcquire(partitionLock, LW_SHARED);
1431 
1432  procLocks = &(lock->procLocks);
1433  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1434  offsetof(PROCLOCK, lockLink));
1435 
1436  while (proclock)
1437  {
1438  /*
1439  * we are a waiter if myProc->waitProcLock == proclock; we are
1440  * a holder if it is NULL or something different
1441  */
1442  if (proclock->tag.myProc->waitProcLock == proclock)
1443  {
1444  if (first_waiter)
1445  {
1446  appendStringInfo(&lock_waiters_sbuf, "%d",
1447  proclock->tag.myProc->pid);
1448  first_waiter = false;
1449  }
1450  else
1451  appendStringInfo(&lock_waiters_sbuf, ", %d",
1452  proclock->tag.myProc->pid);
1453  }
1454  else
1455  {
1456  if (first_holder)
1457  {
1458  appendStringInfo(&lock_holders_sbuf, "%d",
1459  proclock->tag.myProc->pid);
1460  first_holder = false;
1461  }
1462  else
1463  appendStringInfo(&lock_holders_sbuf, ", %d",
1464  proclock->tag.myProc->pid);
1465 
1466  lockHoldersNum++;
1467  }
1468 
1469  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1470  offsetof(PROCLOCK, lockLink));
1471  }
1472 
1473  LWLockRelease(partitionLock);
1474 
1476  ereport(LOG,
1477  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1478  MyProcPid, modename, buf.data, msecs, usecs),
1479  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1480  "Processes holding the lock: %s. Wait queue: %s.",
1481  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1482  else if (deadlock_state == DS_HARD_DEADLOCK)
1483  {
1484  /*
1485  * This message is a bit redundant with the error that will be
1486  * reported subsequently, but in some cases the error report
1487  * might not make it to the log (eg, if it's caught by an
1488  * exception handler), and we want to ensure all long-wait
1489  * events get logged.
1490  */
1491  ereport(LOG,
1492  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1493  MyProcPid, modename, buf.data, msecs, usecs),
1494  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1495  "Processes holding the lock: %s. Wait queue: %s.",
1496  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1497  }
1498 
1499  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1500  ereport(LOG,
1501  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1502  MyProcPid, modename, buf.data, msecs, usecs),
1503  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1504  "Processes holding the lock: %s. Wait queue: %s.",
1505  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1506  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1507  ereport(LOG,
1508  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1509  MyProcPid, modename, buf.data, msecs, usecs)));
1510  else
1511  {
1512  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1513 
1514  /*
1515  * Currently, the deadlock checker always kicks its own
1516  * process, which means that we'll only see PROC_WAIT_STATUS_ERROR when
1517  * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
1518  * print redundant messages. But for completeness and
1519  * future-proofing, print a message if it looks like someone
1520  * else kicked us off the lock.
1521  */
1523  ereport(LOG,
1524  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1525  MyProcPid, modename, buf.data, msecs, usecs),
1526  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1527  "Processes holding the lock: %s. Wait queue: %s.",
1528  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1529  }
1530 
1531  /*
1532  * At this point we might still need to wait for the lock. Reset
1533  * state so we don't print the above messages again.
1534  */
1536 
1537  pfree(buf.data);
1538  pfree(lock_holders_sbuf.data);
1539  pfree(lock_waiters_sbuf.data);
1540  }
1541  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1542 
1543  /*
1544  * Disable the timers, if they are still running. As in LockErrorCleanup,
1545  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1546  * already caused QueryCancelPending to become set, we want the cancel to
1547  * be reported as a lock timeout, not a user cancel.
1548  */
1549  if (!InHotStandby)
1550  {
1551  if (LockTimeout > 0)
1552  {
1553  DisableTimeoutParams timeouts[2];
1554 
1555  timeouts[0].id = DEADLOCK_TIMEOUT;
1556  timeouts[0].keep_indicator = false;
1557  timeouts[1].id = LOCK_TIMEOUT;
1558  timeouts[1].keep_indicator = true;
1559  disable_timeouts(timeouts, 2);
1560  }
1561  else
1563  }
1564 
1565  /*
1566  * Re-acquire the lock table's partition lock. We have to do this to hold
1567  * off cancel/die interrupts before we can mess with lockAwaited (else we
1568  * might have a missed or duplicated locallock update).
1569  */
1570  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1571 
1572  /*
1573  * We no longer want LockErrorCleanup to do anything.
1574  */
1575  lockAwaited = NULL;
1576 
1577  /*
1578  * If we got the lock, be sure to remember it in the locallock table.
1579  */
1580  if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
1581  GrantAwaitedLock();
1582 
1583  /*
1584  * We don't have to do anything else, because the awaker did all the
1585  * necessary update of the lock table and MyProc.
1586  */
1587  return MyProc->waitStatus;
1588 }
1589 
1590 
1591 /*
1592  * ProcWakeup -- wake up a process by setting its latch.
1593  *
1594  * Also remove the process from the wait queue and set its links invalid.
1595  * RETURN: the next process in the wait queue.
1596  *
1597  * The appropriate lock partition lock must be held by caller.
1598  *
1599  * XXX: presently, this code is only used for the "success" case, and only
1600  * works correctly for that case. To clean up in failure case, would need
1601  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1602  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1603  */
1604 PGPROC *
1606 {
1607  PGPROC *retProc;
1608 
1609  /* Proc should be sleeping ... */
1610  if (proc->links.prev == NULL ||
1611  proc->links.next == NULL)
1612  return NULL;
1614 
1615  /* Save next process before we zap the list link */
1616  retProc = (PGPROC *) proc->links.next;
1617 
1618  /* Remove process from wait queue */
1619  SHMQueueDelete(&(proc->links));
1620  (proc->waitLock->waitProcs.size)--;
1621 
1622  /* Clean up process' state and pass it the ok/fail signal */
1623  proc->waitLock = NULL;
1624  proc->waitProcLock = NULL;
1625  proc->waitStatus = waitStatus;
1626 
1627  /* And awaken it */
1628  SetLatch(&proc->procLatch);
1629 
1630  return retProc;
1631 }
1632 
1633 /*
1634  * ProcLockWakeup -- routine for waking up processes when a lock is
1635  * released (or a prior waiter is aborted). Scan all waiters
1636  * for lock, waken any that are no longer blocked.
1637  *
1638  * The appropriate lock partition lock must be held by caller.
1639  */
1640 void
1641 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1642 {
1643  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1644  int queue_size = waitQueue->size;
1645  PGPROC *proc;
1646  LOCKMASK aheadRequests = 0;
1647 
1648  Assert(queue_size >= 0);
1649 
1650  if (queue_size == 0)
1651  return;
1652 
1653  proc = (PGPROC *) waitQueue->links.next;
1654 
1655  while (queue_size-- > 0)
1656  {
1657  LOCKMODE lockmode = proc->waitLockMode;
1658 
1659  /*
1660  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1661  * (b) doesn't conflict with already-held locks.
1662  */
1663  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1664  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1665  proc->waitProcLock))
1666  {
1667  /* OK to waken */
1668  GrantLock(lock, proc->waitProcLock, lockmode);
1669  proc = ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1670 
1671  /*
1672  * ProcWakeup removes proc from the lock's waiting process queue
1673  * and returns the next proc in chain; don't use proc's next-link,
1674  * because it's been cleared.
1675  */
1676  }
1677  else
1678  {
1679  /*
1680  * Cannot wake this guy. Remember his request for later checks.
1681  */
1682  aheadRequests |= LOCKBIT_ON(lockmode);
1683  proc = (PGPROC *) proc->links.next;
1684  }
1685  }
1686 
1687  Assert(waitQueue->size >= 0);
1688 }
1689 
1690 /*
1691  * CheckDeadLock
1692  *
1693  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1694  * lock to be released by some other process. Check if there's a deadlock; if
1695  * not, just return. (But signal ProcSleep to log a message, if
1696  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1697  * the lock's wait queue and signal an error to ProcSleep.
1698  */
1699 static void
1701 {
1702  int i;
1703 
1704  /*
1705  * Acquire exclusive lock on the entire shared lock data structures. Must
1706  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1707  *
1708  * Note that the deadlock check interrupt had better not be enabled
1709  * anywhere that this process itself holds lock partition locks, else this
1710  * will wait forever. Also note that LWLockAcquire creates a critical
1711  * section, so that this routine cannot be interrupted by cancel/die
1712  * interrupts.
1713  */
1714  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1716 
1717  /*
1718  * Check to see if we've been awoken by anyone in the interim.
1719  *
1720  * If we have, we can return and resume our transaction -- happy day.
1721  * Before we are awoken the process releasing the lock grants it to us so
1722  * we know that we don't have to wait anymore.
1723  *
1724  * We check by looking to see if we've been unlinked from the wait queue.
1725  * This is safe because we hold the lock partition lock.
1726  */
1727  if (MyProc->links.prev == NULL ||
1728  MyProc->links.next == NULL)
1729  goto check_done;
1730 
1731 #ifdef LOCK_DEBUG
1732  if (Debug_deadlocks)
1733  DumpAllLocks();
1734 #endif
1735 
1736  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1737  deadlock_state = DeadLockCheck(MyProc);
1738 
1740  {
1741  /*
1742  * Oops. We have a deadlock.
1743  *
1744  * Get this process out of wait state. (Note: we could do this more
1745  * efficiently by relying on lockAwaited, but use this coding to
1746  * preserve the flexibility to kill some other transaction than the
1747  * one detecting the deadlock.)
1748  *
1749  * RemoveFromWaitQueue sets MyProc->waitStatus to PROC_WAIT_STATUS_ERROR, so
1750  * ProcSleep will report an error after we return from the signal
1751  * handler.
1752  */
1753  Assert(MyProc->waitLock != NULL);
1754  RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1755 
1756  /*
1757  * We're done here. Transaction abort caused by the error that
1758  * ProcSleep will raise will cause any other locks we hold to be
1759  * released, thus allowing other processes to wake up; we don't need
1760  * to do that here. NOTE: an exception is that releasing locks we
1761  * hold doesn't consider the possibility of waiters that were blocked
1762  * behind us on the lock we just failed to get, and might now be
1763  * wakable because we're not in front of them anymore. However,
1764  * RemoveFromWaitQueue took care of waking up any such processes.
1765  */
1766  }
1767 
1768  /*
1769  * And release locks. We do this in reverse order for two reasons: (1)
1770  * Anyone else who needs more than one of the locks will be trying to lock
1771  * them in increasing order; we don't want to release the other process
1772  * until it can get all the locks it needs. (2) This avoids O(N^2)
1773  * behavior inside LWLockRelease.
1774  */
1775 check_done:
1776  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1778 }
1779 
1780 /*
1781  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1782  *
1783  * NB: Runs inside a signal handler, be careful.
1784  */
1785 void
1787 {
1788  int save_errno = errno;
1789 
1790  got_deadlock_timeout = true;
1791 
1792  /*
1793  * Have to set the latch again, even if handle_sig_alarm already did. Back
1794  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1795  * ever would be a problem, but setting a set latch again is cheap.
1796  */
1797  SetLatch(MyLatch);
1798  errno = save_errno;
1799 }
1800 
1801 /*
1802  * ProcWaitForSignal - wait for a signal from another backend.
1803  *
1804  * As this uses the generic process latch the caller has to be robust against
1805  * unrelated wakeups: Always check that the desired state has occurred, and
1806  * wait again if not.
1807  */
1808 void
1809 ProcWaitForSignal(uint32 wait_event_info)
1810 {
1812  wait_event_info);
1815 }
1816 
1817 /*
1818  * ProcSendSignal - send a signal to a backend identified by PID
1819  */
1820 void
1822 {
1823  PGPROC *proc = NULL;
1824 
1825  if (RecoveryInProgress())
1826  {
1828 
1829  /*
1830  * Check to see whether it is the Startup process we wish to signal.
1831  * This call is made by the buffer manager when it wishes to wake up a
1832  * process that has been waiting for a pin in so it can obtain a
1833  * cleanup lock using LockBufferForCleanup(). Startup is not a normal
1834  * backend, so BackendPidGetProc() will not return any pid at all. So
1835  * we remember the information for this special case.
1836  */
1837  if (pid == ProcGlobal->startupProcPid)
1838  proc = ProcGlobal->startupProc;
1839 
1841  }
1842 
1843  if (proc == NULL)
1844  proc = BackendPidGetProc(pid);
1845 
1846  if (proc != NULL)
1847  {
1848  SetLatch(&proc->procLatch);
1849  }
1850 }
1851 
1852 /*
1853  * BecomeLockGroupLeader - designate process as lock group leader
1854  *
1855  * Once this function has returned, other processes can join the lock group
1856  * by calling BecomeLockGroupMember.
1857  */
1858 void
1860 {
1861  LWLock *leader_lwlock;
1862 
1863  /* If we already did it, we don't need to do it again. */
1864  if (MyProc->lockGroupLeader == MyProc)
1865  return;
1866 
1867  /* We had better not be a follower. */
1868  Assert(MyProc->lockGroupLeader == NULL);
1869 
1870  /* Create single-member group, containing only ourselves. */
1871  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1872  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1873  MyProc->lockGroupLeader = MyProc;
1874  dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1875  LWLockRelease(leader_lwlock);
1876 }
1877 
1878 /*
1879  * BecomeLockGroupMember - designate process as lock group member
1880  *
1881  * This is pretty straightforward except for the possibility that the leader
1882  * whose group we're trying to join might exit before we manage to do so;
1883  * and the PGPROC might get recycled for an unrelated process. To avoid
1884  * that, we require the caller to pass the PID of the intended PGPROC as
1885  * an interlock. Returns true if we successfully join the intended lock
1886  * group, and false if not.
1887  */
1888 bool
1890 {
1891  LWLock *leader_lwlock;
1892  bool ok = false;
1893 
1894  /* Group leader can't become member of group */
1895  Assert(MyProc != leader);
1896 
1897  /* Can't already be a member of a group */
1898  Assert(MyProc->lockGroupLeader == NULL);
1899 
1900  /* PID must be valid. */
1901  Assert(pid != 0);
1902 
1903  /*
1904  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1905  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1906  * because all PGPROCs' pgprocno fields are set during shared memory
1907  * initialization and never change thereafter; so we will acquire the
1908  * correct lock even if the leader PGPROC is in process of being recycled.
1909  */
1910  leader_lwlock = LockHashPartitionLockByProc(leader);
1911  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1912 
1913  /* Is this the leader we're looking for? */
1914  if (leader->pid == pid && leader->lockGroupLeader == leader)
1915  {
1916  /* OK, join the group */
1917  ok = true;
1918  MyProc->lockGroupLeader = leader;
1919  dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1920  }
1921  LWLockRelease(leader_lwlock);
1922 
1923  return ok;
1924 }
void InitAuxiliaryProcess(void)
Definition: proc.c:511
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:668
PROCLOCKTAG tag
Definition: lock.h:361
int slock_t
Definition: s_lock.h:934
void ResolveRecoveryConflictWithLock(LOCKTAG locktag)
Definition: standby.c:403
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:933
void InitSharedLatch(Latch *latch)
Definition: latch.c:306
uint32 hashcode
Definition: lock.h:421
bool procArrayGroupMember
Definition: proc.h:213
#define PG_WAIT_LOCK
Definition: pgstat.h:856
static void ProcKill(int code, Datum arg)
Definition: proc.c:810
Definition: lwlock.h:31
TimeoutId id
Definition: timeout.h:54
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
LOCALLOCKTAG tag
Definition: lock.h:418
XidStatus clogGroupMemberXidStatus
Definition: proc.h:229
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:195
bool IsWaitingForLock(void)
Definition: proc.c:689
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:40
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:799
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4015
LOCKTAG lock
Definition: lock.h:399
void GrantAwaitedLock(void)
Definition: lock.c:1786
BackendId backendId
Definition: proc.h:152
uint32 TransactionId
Definition: c.h:575
uint32 wait_event_info
Definition: proc.h:223
#define DatumGetInt32(X)
Definition: postgres.h:472
int LOCKMODE
Definition: lockdefs.h:26
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:233
dlist_head lockGroupMembers
Definition: proc.h:249
LOCKMODE mode
Definition: lock.h:400
XidCacheStatus * subxidStates
Definition: proc.h:324
PROCLOCK * proclock
Definition: lock.h:423
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1578
Oid tempNamespaceId
Definition: proc.h:156
SHM_QUEUE links
Definition: lock.h:31
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:3036
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:365
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1555
SHM_QUEUE links
Definition: proc.h:123
TimeoutType type
Definition: timeout.h:55
struct SHM_QUEUE * next
Definition: shmem.h:31
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:180
bool lwWaiting
Definition: proc.h:169
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool InRecovery
Definition: xlog.c:205
LOCKTAG tag
Definition: lock.h:300
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:59
unsigned char uint8
Definition: c.h:427
Definition: lock.h:164
PGPROC * PreparedXactProcs
Definition: proc.c:81
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:514
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
SHM_QUEUE lockLink
Definition: lock.h:367
PGPROC * bgworkerFreeProcs
Definition: proc.h:339
#define InHotStandby
Definition: xlog.h:74
Oid roleId
Definition: proc.h:154
int errcode(int sqlerrcode)
Definition: elog.c:691
PROC_HDR * ProcGlobal
Definition: proc.c:79
#define MemSet(start, val, len)
Definition: c.h:1004
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:289
#define kill(pid, sig)
Definition: win32_port.h:454
uint8 statusFlags
Definition: proc.h:186
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1889
uint8 lwWaitMode
Definition: proc.h:170
void DisownLatch(Latch *latch)
Definition: latch.c:362
void * ShmemAlloc(Size size)
Definition: shmem.c:161
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
pg_atomic_uint32 clogGroupNext
Definition: proc.h:227
ProcWaitStatus waitStatus
Definition: proc.h:127
#define SIGUSR2
Definition: win32_port.h:172
bool fpVXIDLock
Definition: proc.h:240
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:8070
void SetLatch(Latch *latch)
Definition: latch.c:505
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:565
void PublishStartupProcessInformation(void)
Definition: proc.c:622
#define PANIC
Definition: elog.h:53
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
PGPROC * autovacFreeProcs
Definition: proc.h:337
bool HaveNFreeProcs(int n)
Definition: proc.c:666
PGPROC * walsenderFreeProcs
Definition: proc.h:341
void ResetLatch(Latch *latch)
Definition: latch.c:588
Latch procLatch
Definition: proc.h:129
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:390
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
PROC_QUEUE waitProcs
Definition: lock.h:306
bool IsBackgroundWorker
Definition: globals.c:111
void InitProcGlobal(void)
Definition: proc.c:158
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1162
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1811
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:118
bool isBackgroundWorker
Definition: proc.h:159
void ProcSendSignal(int pid)
Definition: proc.c:1821
bool am_walsender
Definition: walsender.c:115
#define SpinLockAcquire(lock)
Definition: spin.h:62
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:216
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:517
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:483
PGPROC * ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1605
void pfree(void *pointer)
Definition: mcxt.c:1057
dlist_node lockGroupLink
Definition: proc.h:250
Latch * walwriterLatch
Definition: proc.h:347
void ConditionVariableCancelSleep(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
int spins_per_delay
Definition: proc.h:351
#define ERROR
Definition: elog.h:43
bool delayChkpt
Definition: proc.h:184
int max_prepared_xacts
Definition: twophase.c:117
int AutovacuumLauncherPid
Definition: autovacuum.c:305
void OwnLatch(Latch *latch)
Definition: latch.c:342
int IdleInTransactionSessionTimeout
Definition: proc.c:63
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1057
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
static DeadLockState deadlock_state
Definition: proc.c:86
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:525
#define FATAL
Definition: elog.h:52
TimeoutId id
Definition: timeout.h:65
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1100
int MaxBackends
Definition: globals.c:136
TransactionId xmin
Definition: proc.h:137
PROCLOCK * waitProcLock
Definition: proc.h:179
bool message_level_is_interesting(int elevel)
Definition: elog.c:264
void InitProcess(void)
Definition: proc.c:301
void InitDeadLockChecking(void)
Definition: deadlock.c:143
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:980
int clogGroupMemberPage
Definition: proc.h:231
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:640
static char * buf
Definition: pg_test_fsync.c:68
bool recoveryConflictPending
Definition: proc.h:166
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1033
bool IsUnderPostmaster
Definition: globals.c:109
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:89
void AbortStrongLockAcquire(void)
Definition: lock.c:1757
#define USER_LOCKMETHOD
Definition: lock.h:128
#define InvalidTransactionId
Definition: transam.h:31
TransactionId * xids
Definition: proc.h:318
TransactionId clogGroupMemberXid
Definition: proc.h:228
Oid databaseId
Definition: proc.h:153
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:782
unsigned int uint32
Definition: c.h:429
int errdetail_log(const char *fmt,...)
Definition: elog.c:1083
void ReplicationSlotRelease(void)
Definition: slot.c:484
PGPROC ** procgloballist
Definition: proc.h:124
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:564
Definition: lock.h:297
LOCK * waitLock
Definition: proc.h:178
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
#define NUM_AUXILIARY_PROCS
Definition: proc.h:374
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3325
void BecomeLockGroupLeader(void)
Definition: proc.c:1859
void LockErrorCleanup(void)
Definition: proc.c:706
#define INVALID_PGPROCNO
Definition: proc.h:79
LOCKMASK waitMask
Definition: lock.h:304
Size ProcGlobalShmemSize(void)
Definition: proc.c:101
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:215
Definition: proc.h:312
int max_wal_senders
Definition: walsender.c:121
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1809
SHM_QUEUE procLocks
Definition: lock.h:305
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:744
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:206
#define WARNING
Definition: elog.h:40
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:652
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:529
#define SpinLockRelease(lock)
Definition: spin.h:64
int startupProcPid
Definition: proc.h:354
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1641
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:367
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:76
int MaxConnections
Definition: globals.c:133
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1918
ProcWaitStatus
Definition: proc.h:81
void SwitchToSharedLatch(void)
Definition: miscinit.c:197
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1104
int autovacuum_max_workers
Definition: autovacuum.c:115
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define ereport(elevel,...)
Definition: elog.h:155
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1420
Latch * checkpointerLatch
Definition: proc.h:349
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:55
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:435
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3319
ReplicationSlot * MyReplicationSlot
Definition: slot.c:96
struct SHM_QUEUE * prev
Definition: shmem.h:30
uint8 locktag_type
Definition: lock.h:170
DeadLockState
Definition: lock.h:496
#define Assert(condition)
Definition: c.h:800
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2180
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:80
bool log_lock_waits
Definition: proc.c:64
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
PGPROC * freeProcs
Definition: proc.h:335
static LOCALLOCK * lockAwaited
Definition: proc.c:84
size_t Size
Definition: c.h:528
int LockTimeout
Definition: proc.c:62
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1207
LOCK * lock
Definition: lock.h:422
SHM_QUEUE syncRepLinks
Definition: proc.h:198
uint32 allProcCount
Definition: proc.h:333
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:383
int LOCKMASK
Definition: lockdefs.h:25
const char * name
Definition: encode.c:561
static void CheckDeadLock(void)
Definition: proc.c:1700
uint8 locktag_lockmethodid
Definition: lock.h:171
int StatementTimeout
Definition: proc.c:61
PGPROC * myProc
Definition: lock.h:355
TransactionId xid
Definition: proc.h:132
void InitProcessPhase2(void)
Definition: proc.c:476
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:345
Definition: lock.h:358
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
#define Int32GetDatum(X)
Definition: postgres.h:479
int pgprocno
Definition: proc.h:149
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:343
int errmsg(const char *fmt,...)
Definition: elog.c:902
bool clogGroupMember
Definition: proc.h:226
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int startupBufferPinWaitBufId
Definition: proc.h:356
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:116
#define elog(elevel,...)
Definition: elog.h:228
#define InvalidLocalTransactionId
Definition: lock.h:68
int i
int pgxactoff
Definition: proc.h:147
int size
Definition: lock.h:32
void * arg
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
int max_worker_processes
Definition: globals.c:134
struct Latch * MyLatch
Definition: globals.c:54
void ReplicationSlotCleanup(void)
Definition: slot.c:540
int DeadlockTimeout
Definition: proc.c:60
void CheckDeadLockAlert(void)
Definition: proc.c:1786
PGPROC * allProcs
Definition: proc.h:315
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:205
PGPROC * startupProc
Definition: proc.h:353
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
uint8 * statusFlags
Definition: proc.h:330
void LWLockReleaseAll(void)
Definition: lwlock.c:1910
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:532
PGSemaphore sem
Definition: proc.h:126
int syncRepState
Definition: proc.h:197
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1654
Definition: proc.h:120
int pid
Definition: proc.h:145
#define WL_LATCH_SET
Definition: latch.h:124
#define _(x)
Definition: elog.c:88
XLogRecPtr waitLSN
Definition: proc.h:196
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:445
PGPROC * lockGroupLeader
Definition: proc.h:248
LocalTransactionId fpLocalTransactionId
Definition: proc.h:241
#define PROC_IS_AUTOVACUUM
Definition: proc.h:54
#define offsetof(type, field)
Definition: c.h:723
TransactionId procArrayGroupMemberXid
Definition: proc.h:221
int ProcGlobalSemas(void)
Definition: proc.c:123
LOCKMASK heldLocks
Definition: proc.h:181
void InitLWLockAccess(void)
Definition: lwlock.c:582
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:115
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129
PGPROC * groupLeader
Definition: lock.h:364
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:143
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:1012
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:256
LocalTransactionId lxid
Definition: proc.h:142
#define NON_EXEC_STATIC
Definition: c.h:1347